mirror of
https://github.com/Foundry376/Mailspring.git
synced 2025-02-22 15:15:12 +08:00
fix(initial-sync): When an error is encountered, do not start fetching from zero again
Summary: Previously, when an error was encountered during initial mailbox sync we just started it over after a retry delay. Recent API uptime issues mean that this was happening often and lots of people were seeing sync retry many times. This is bad because the app is less performant while it's syncing mail, and also generates unnecessary load as the app re-fetches threads it already has. In this diff, there are new specs and functionality in nylas-sync-worker to start fetching where we left off. This is typically going to be OK because the default sort ordering of the threads endpoint is newest->oldest, so if new items have arrived since we started fetching and page boundaries have changed, we'll get duplicate data rather than missing data. Connceting to the streaming API as soon as we start the sync also ensures that we roll up any changes to data we've already paginated over. Test Plan: Run tests Reviewers: drew, evan Reviewed By: evan Differential Revision: https://phab.nylas.com/D2132
This commit is contained in:
parent
fc8b1bae3d
commit
d5e04295b2
3 changed files with 99 additions and 46 deletions
|
@ -47,8 +47,8 @@ class InitialSyncActivity extends React.Component
|
|||
return false
|
||||
else if error
|
||||
<div className={classSet} key="initial-sync">
|
||||
<div className="inner">Initial sync encountered an error. Waiting to retry...
|
||||
<div className="btn btn-emphasis" onClick={@_onTryAgain}>Try Again</div>
|
||||
<div className="inner">An error occurred while syncing your mailbox. Sync will resume in a moment…
|
||||
<div className="btn" style={marginTop:10} onClick={@_onTryAgain}>Try Again Now</div>
|
||||
</div>
|
||||
{@_expandedSyncState()}
|
||||
</div>
|
||||
|
|
|
@ -104,23 +104,33 @@ class NylasSyncWorker
|
|||
|
||||
fetchCollection: (model, options = {}) ->
|
||||
return unless @_state
|
||||
return if @_state[model]?.complete and not options.force?
|
||||
return if @_state[model]?.busy
|
||||
state = @_state[model] ? {}
|
||||
|
||||
@_state[model] =
|
||||
complete: false
|
||||
error: null
|
||||
busy: true
|
||||
count: 0
|
||||
fetched: 0
|
||||
return if state.complete and not options.force?
|
||||
return if state.busy
|
||||
|
||||
state.complete = false
|
||||
state.error = null
|
||||
state.busy = true
|
||||
state.fetched ?= 0
|
||||
|
||||
if not state.count
|
||||
state.count = 0
|
||||
@fetchCollectionCount(model)
|
||||
|
||||
if state.errorRequestRange
|
||||
{limit, offset} = state.errorRequestRange
|
||||
state.errorRequestRange = null
|
||||
@fetchCollectionPage(model, {limit, offset})
|
||||
else
|
||||
@fetchCollectionPage(model, {
|
||||
limit: options.initialPageSize ? INITIAL_PAGE_SIZE,
|
||||
offset: 0
|
||||
})
|
||||
|
||||
@_state[model] = state
|
||||
@writeState()
|
||||
|
||||
@fetchCollectionCount(model)
|
||||
@fetchCollectionPage(model, {
|
||||
limit: options.initialPageSize ? INITIAL_PAGE_SIZE,
|
||||
offset: 0
|
||||
})
|
||||
|
||||
fetchCollectionCount: (model) ->
|
||||
@_api.makeRequest
|
||||
accountId: @_account.id
|
||||
|
@ -141,23 +151,31 @@ class NylasSyncWorker
|
|||
requestOptions =
|
||||
error: (err) =>
|
||||
return if @_terminated
|
||||
@_fetchCollectionPageError(model, err)
|
||||
@_fetchCollectionPageError(model, params, err)
|
||||
|
||||
success: (json) =>
|
||||
return if @_terminated
|
||||
|
||||
if model in ["labels", "folders"] and @_hasNoInbox(json)
|
||||
@_fetchCollectionPageError(model, "No inbox in #{model}")
|
||||
@_fetchCollectionPageError(model, params, "No inbox in #{model}")
|
||||
return
|
||||
|
||||
lastReceivedIndex = params.offset + json.length
|
||||
if json.length is params.limit
|
||||
moreToFetch = json.length is params.limit
|
||||
|
||||
if moreToFetch
|
||||
nextParams = _.extend({}, params, {offset: lastReceivedIndex})
|
||||
nextParams.limit = Math.min(Math.round(params.limit * 1.5), MAX_PAGE_SIZE)
|
||||
nextDelay = Math.max(0, 1000 - (Date.now() - requestStartTime))
|
||||
setTimeout(( => @fetchCollectionPage(model, nextParams)), nextDelay)
|
||||
@updateTransferState(model, {fetched: lastReceivedIndex})
|
||||
else
|
||||
@updateTransferState(model, {fetched: lastReceivedIndex, busy: false, complete: true})
|
||||
|
||||
@updateTransferState(model, {
|
||||
fetched: lastReceivedIndex,
|
||||
busy: moreToFetch,
|
||||
complete: !moreToFetch,
|
||||
error: null,
|
||||
errorRequestRange: null
|
||||
})
|
||||
|
||||
if model is 'threads'
|
||||
@_api.getThreads(@_account.id, params, requestOptions)
|
||||
|
@ -171,13 +189,18 @@ class NylasSyncWorker
|
|||
_hasNoInbox: (json) ->
|
||||
return not _.any(json, (obj) -> obj.name is "inbox")
|
||||
|
||||
_fetchCollectionPageError: (model, err) ->
|
||||
_fetchCollectionPageError: (model, params, err) ->
|
||||
@_resumeTimer.backoff()
|
||||
@_resumeTimer.start()
|
||||
@updateTransferState(model, {busy: false, complete: false, error: err.toString()})
|
||||
@updateTransferState(model, {
|
||||
busy: false,
|
||||
complete: false,
|
||||
error: err.toString()
|
||||
errorRequestRange: {offset: params.offset, limit: params.limit}
|
||||
})
|
||||
|
||||
updateTransferState: (model, {busy, error, complete, fetched, count}) ->
|
||||
@_state[model] = _.defaults({busy, error, complete, fetched, count}, @_state[model])
|
||||
updateTransferState: (model, updatedKeys) ->
|
||||
@_state[model] = _.extend(@_state[model], updatedKeys)
|
||||
@writeState()
|
||||
|
||||
writeState: ->
|
||||
|
|
|
@ -143,6 +143,11 @@ describe "NylasSyncWorker", ->
|
|||
@worker.resumeFetches()
|
||||
expect(@worker.fetchCollection.calls.map (call) -> call.args[0]).toEqual(['threads', 'labels', 'drafts', 'contacts', 'calendars', 'events'])
|
||||
|
||||
it "should be called when Actions.retryInitialSync is received", ->
|
||||
spyOn(@worker, 'resumeFetches').andCallThrough()
|
||||
Actions.retryInitialSync()
|
||||
expect(@worker.resumeFetches).toHaveBeenCalled()
|
||||
|
||||
describe "fetchCollection", ->
|
||||
beforeEach ->
|
||||
@apiRequests = []
|
||||
|
@ -172,14 +177,39 @@ describe "NylasSyncWorker", ->
|
|||
expect(@apiRequests[0].requestOptions.path).toBe('/threads')
|
||||
expect(@apiRequests[0].requestOptions.qs.view).toBe('count')
|
||||
|
||||
it "should start the first request for models", ->
|
||||
@worker._state.threads = {
|
||||
'busy': false
|
||||
'complete': false
|
||||
}
|
||||
@worker.fetchCollection('threads')
|
||||
expect(@apiRequests[1].model).toBe('threads')
|
||||
expect(@apiRequests[1].params.offset).toBe(0)
|
||||
describe "when there is no errorRequestRange saved", ->
|
||||
it "should start the first request for models", ->
|
||||
@worker._state.threads = {
|
||||
'busy': false
|
||||
'complete': false
|
||||
}
|
||||
@worker.fetchCollection('threads')
|
||||
expect(@apiRequests[1].model).toBe('threads')
|
||||
expect(@apiRequests[1].params.offset).toBe(0)
|
||||
|
||||
describe "when there is an errorRequestRange saved", ->
|
||||
beforeEach ->
|
||||
@worker._state.threads =
|
||||
'count': 1200
|
||||
'fetched': 100
|
||||
'busy': false
|
||||
'complete': false
|
||||
'error': new Error("Something bad")
|
||||
'errorRequestRange':
|
||||
offset: 100
|
||||
limit: 50
|
||||
|
||||
it "should start paginating from the request that failed", ->
|
||||
@worker.fetchCollection('threads')
|
||||
expect(@apiRequests[0].model).toBe('threads')
|
||||
expect(@apiRequests[0].params.offset).toBe(100)
|
||||
expect(@apiRequests[0].params.limit).toBe(50)
|
||||
|
||||
it "should not reset the `count`, `fetched` or start fetching the count", ->
|
||||
@worker.fetchCollection('threads')
|
||||
expect(@worker._state.threads.fetched).toBe(100)
|
||||
expect(@worker._state.threads.count).toBe(1200)
|
||||
expect(@apiRequests.length).toBe(1)
|
||||
|
||||
describe "when an API request completes", ->
|
||||
beforeEach ->
|
||||
|
@ -254,28 +284,28 @@ describe "NylasSyncWorker", ->
|
|||
expect(@worker.state().threads.complete).toEqual(true)
|
||||
|
||||
describe "with an error", ->
|
||||
it "should log the error to the state", ->
|
||||
it "should log the error to the state, along with the range that failed", ->
|
||||
err = new Error("Oh no a network error")
|
||||
@request.requestOptions.error(err)
|
||||
expect(@worker.state().threads.busy).toEqual(false)
|
||||
expect(@worker.state().threads.complete).toEqual(false)
|
||||
expect(@worker.state().threads.error).toEqual(err.toString())
|
||||
expect(@worker.state().threads.errorRequestRange).toEqual({offset: 0, limit: 30})
|
||||
|
||||
it "should not request another page", ->
|
||||
@request.requestOptions.error(new Error("Oh no a network error"))
|
||||
expect(@apiRequests.length).toBe(0)
|
||||
|
||||
it "resumes when a action forces it to", ->
|
||||
err = new Error("Oh no a network error")
|
||||
@request.requestOptions.error(err)
|
||||
expect(@worker.state().threads.busy).toEqual(false)
|
||||
expect(@worker.state().threads.complete).toEqual(false)
|
||||
spyOn(@worker, 'resumeFetches').andCallThrough()
|
||||
Actions.retryInitialSync()
|
||||
expect(@worker.resumeFetches).toHaveBeenCalled()
|
||||
expect(@worker.resumeFetches.calls.length).toBe 1
|
||||
expect(@worker.state().threads.busy).toEqual(true)
|
||||
expect(@worker.state().threads.error).toBe(null)
|
||||
describe "succeeds after a previous error", ->
|
||||
beforeEach ->
|
||||
@worker._state.threads.error = new Error("Something bad happened")
|
||||
@worker._state.threads.errorRequestRange = {limit: 10, offset: 10}
|
||||
@request.requestOptions.success([])
|
||||
advanceClock(1)
|
||||
|
||||
it "should clear any previous error", ->
|
||||
expect(@worker.state().threads.error).toEqual(null)
|
||||
expect(@worker.state().threads.errorRequestRange).toEqual(null)
|
||||
|
||||
describe "cleanup", ->
|
||||
it "should termiate the long polling connection", ->
|
||||
|
|
Loading…
Reference in a new issue