2015-05-20 07:06:59 +08:00
|
|
|
_ = require 'underscore'
|
2015-12-18 03:46:05 +08:00
|
|
|
{Actions, DatabaseStore, DatabaseTransaction, Account, Thread} = require 'nylas-exports'
|
2016-07-27 17:56:55 +08:00
|
|
|
DeltaStreamingConnection = require('../lib/delta-streaming-connection').default
|
feat(work): Create the "Work" window, move TaskQueue, Nylas sync workers
Summary:
Move sync workers and Edgehill token checks to work window
Move the task queue and database setup to the work window
Move ContactStore background refresh to work window
Store the task queue in the database
WIP
The TaskQueue now puts tasks in the database instead of in a file, which also means it can be observed
Move all delta sync and initial sync to a package, make NylasSyncStore which exposes read-only sync state
DraftStore no longer reads task status. Once you set the "sending" bit on a draft, it never gets unset. But that's fine actually.
If your package lists windowTypes, you *only* get loaded in those windowTypes. If you specify no windowTypes, you get loaded in the root window.
This means that onboarding, worker-ui, worker-sync, etc. no longer get loaded into the main window
ActivitySidebar has a special little store that observes the task queue since it's no longer in the window
Move "toggle component regions" / "toggle react remote" to the Developer menu
Move sync worker specs, update draft store specs to not rely on TaskQueue at all
Test Plan: Run existing tests, all pass
Reviewers: dillon, evan
Reviewed By: evan
Differential Revision: https://phab.nylas.com/D1936
2015-08-28 07:39:40 +08:00
|
|
|
NylasSyncWorker = require '../lib/nylas-sync-worker'
|
2015-04-07 02:46:20 +08:00
|
|
|
|
2015-05-16 01:53:00 +08:00
|
|
|
describe "NylasSyncWorker", ->
|
2015-04-07 02:46:20 +08:00
|
|
|
beforeEach ->
|
|
|
|
@apiRequests = []
|
|
|
|
@api =
|
2016-04-05 08:11:09 +08:00
|
|
|
APIRoot: 'https://api.nylas.com'
|
2016-07-27 17:56:55 +08:00
|
|
|
LongConnectionStatus: {'Closed', 'Connected'}
|
2016-03-11 08:03:32 +08:00
|
|
|
pluginsSupported: true
|
feat(accounts): Kill namespaces, long live accounts
Summary:
This diff replaces the Namespace object with the Account object, and changes all references to namespace_id => account_id, etc. The endpoints are now `/threads` instead of `/n/<id>/threads`.
This diff also adds preliminary support for multiple accounts. When you log in, we now log you in to all the attached accounts on edgehill server. From the preferences panel, you can auth with / unlink additional accounts. Shockingly, this all seems to pretty much work.
When replying to a thread, you cannot switch from addresses. However, when creating a new message in a popout composer, you can change the from address and the SaveDraftTask will delete/re-root the draft on the new account.
Search bar doesn't need to do full refresh on clear if it never committed
Allow drafts to be switched to a different account when not in reply to an existing thread
Fix edge case where ChangeMailTask throws exception if no models are modified during performLocal
Show many dots for many accounts in long polling status bar
add/remove accounts from prefs
Spec fixes!
Test Plan: Run tests, none broken!
Reviewers: evan, dillon
Reviewed By: evan, dillon
Differential Revision: https://phab.nylas.com/D1928
2015-08-22 06:29:58 +08:00
|
|
|
accessTokenForAccountId: =>
|
|
|
|
'123'
|
2015-05-20 06:59:37 +08:00
|
|
|
makeRequest: (requestOptions) =>
|
|
|
|
@apiRequests.push({requestOptions})
|
feat(accounts): Kill namespaces, long live accounts
Summary:
This diff replaces the Namespace object with the Account object, and changes all references to namespace_id => account_id, etc. The endpoints are now `/threads` instead of `/n/<id>/threads`.
This diff also adds preliminary support for multiple accounts. When you log in, we now log you in to all the attached accounts on edgehill server. From the preferences panel, you can auth with / unlink additional accounts. Shockingly, this all seems to pretty much work.
When replying to a thread, you cannot switch from addresses. However, when creating a new message in a popout composer, you can change the from address and the SaveDraftTask will delete/re-root the draft on the new account.
Search bar doesn't need to do full refresh on clear if it never committed
Allow drafts to be switched to a different account when not in reply to an existing thread
Fix edge case where ChangeMailTask throws exception if no models are modified during performLocal
Show many dots for many accounts in long polling status bar
add/remove accounts from prefs
Spec fixes!
Test Plan: Run tests, none broken!
Reviewers: evan, dillon
Reviewed By: evan, dillon
Differential Revision: https://phab.nylas.com/D1928
2015-08-22 06:29:58 +08:00
|
|
|
getCollection: (account, model, params, requestOptions) =>
|
|
|
|
@apiRequests.push({account, model, params, requestOptions})
|
|
|
|
getThreads: (account, params, requestOptions) =>
|
|
|
|
@apiRequests.push({account, model:'threads', params, requestOptions})
|
2016-07-27 17:56:55 +08:00
|
|
|
longConnection: -> {
|
|
|
|
start: ->
|
|
|
|
_status: 'Closed'
|
|
|
|
}
|
2015-04-07 02:46:20 +08:00
|
|
|
|
2016-02-27 05:52:19 +08:00
|
|
|
@apiCursorStub = undefined
|
2016-03-11 03:06:06 +08:00
|
|
|
spyOn(NylasSyncWorker.prototype, 'fetchAllMetadata').andCallFake (cb) -> cb()
|
2015-12-18 03:46:05 +08:00
|
|
|
spyOn(DatabaseTransaction.prototype, 'persistJSONBlob').andReturn(Promise.resolve())
|
2015-12-08 08:52:46 +08:00
|
|
|
spyOn(DatabaseStore, 'findJSONBlob').andCallFake (key) =>
|
2015-10-10 04:42:24 +08:00
|
|
|
if key is "NylasSyncWorker:#{TEST_ACCOUNT_ID}"
|
|
|
|
return Promise.resolve _.extend {}, {
|
2016-02-27 05:52:19 +08:00
|
|
|
"cursor": @apiCursorStub
|
2015-10-10 04:42:24 +08:00
|
|
|
"contacts":
|
|
|
|
busy: true
|
|
|
|
complete: false
|
|
|
|
"calendars":
|
|
|
|
busy:false
|
|
|
|
complete: true
|
|
|
|
}
|
|
|
|
else if key.indexOf('ContactRankings') is 0
|
|
|
|
return Promise.resolve([])
|
|
|
|
else
|
|
|
|
return throw new Error("Not stubbed! #{key}")
|
|
|
|
|
2015-04-07 02:46:20 +08:00
|
|
|
|
2016-07-29 03:40:02 +08:00
|
|
|
spyOn(DeltaStreamingConnection.prototype, 'start')
|
2015-08-29 02:12:53 +08:00
|
|
|
@account = new Account(clientId: TEST_ACCOUNT_CLIENT_ID, serverId: TEST_ACCOUNT_ID, organizationUnit: 'label')
|
feat(accounts): Kill namespaces, long live accounts
Summary:
This diff replaces the Namespace object with the Account object, and changes all references to namespace_id => account_id, etc. The endpoints are now `/threads` instead of `/n/<id>/threads`.
This diff also adds preliminary support for multiple accounts. When you log in, we now log you in to all the attached accounts on edgehill server. From the preferences panel, you can auth with / unlink additional accounts. Shockingly, this all seems to pretty much work.
When replying to a thread, you cannot switch from addresses. However, when creating a new message in a popout composer, you can change the from address and the SaveDraftTask will delete/re-root the draft on the new account.
Search bar doesn't need to do full refresh on clear if it never committed
Allow drafts to be switched to a different account when not in reply to an existing thread
Fix edge case where ChangeMailTask throws exception if no models are modified during performLocal
Show many dots for many accounts in long polling status bar
add/remove accounts from prefs
Spec fixes!
Test Plan: Run tests, none broken!
Reviewers: evan, dillon
Reviewed By: evan, dillon
Differential Revision: https://phab.nylas.com/D1928
2015-08-22 06:29:58 +08:00
|
|
|
@worker = new NylasSyncWorker(@api, @account)
|
2016-03-11 03:06:06 +08:00
|
|
|
@worker._metadata = {"a": [{"id":"b"}]}
|
2015-04-07 02:46:20 +08:00
|
|
|
@connection = @worker.connection()
|
2015-08-07 05:35:52 +08:00
|
|
|
advanceClock()
|
2015-04-07 02:46:20 +08:00
|
|
|
|
2015-05-20 06:59:37 +08:00
|
|
|
it "should reset `busy` to false when reading state from disk", ->
|
feat(accounts): Kill namespaces, long live accounts
Summary:
This diff replaces the Namespace object with the Account object, and changes all references to namespace_id => account_id, etc. The endpoints are now `/threads` instead of `/n/<id>/threads`.
This diff also adds preliminary support for multiple accounts. When you log in, we now log you in to all the attached accounts on edgehill server. From the preferences panel, you can auth with / unlink additional accounts. Shockingly, this all seems to pretty much work.
When replying to a thread, you cannot switch from addresses. However, when creating a new message in a popout composer, you can change the from address and the SaveDraftTask will delete/re-root the draft on the new account.
Search bar doesn't need to do full refresh on clear if it never committed
Allow drafts to be switched to a different account when not in reply to an existing thread
Fix edge case where ChangeMailTask throws exception if no models are modified during performLocal
Show many dots for many accounts in long polling status bar
add/remove accounts from prefs
Spec fixes!
Test Plan: Run tests, none broken!
Reviewers: evan, dillon
Reviewed By: evan, dillon
Differential Revision: https://phab.nylas.com/D1928
2015-08-22 06:29:58 +08:00
|
|
|
@worker = new NylasSyncWorker(@api, @account)
|
2016-04-05 08:11:09 +08:00
|
|
|
spyOn(@worker, 'resume')
|
2015-08-07 05:35:52 +08:00
|
|
|
advanceClock()
|
|
|
|
expect(@worker.state().contacts.busy).toEqual(false)
|
2015-05-20 06:59:37 +08:00
|
|
|
|
2015-04-07 02:46:20 +08:00
|
|
|
describe "start", ->
|
2016-01-30 08:06:33 +08:00
|
|
|
it "should open the delta connection", ->
|
2015-04-07 02:46:20 +08:00
|
|
|
@worker.start()
|
2015-08-07 05:35:52 +08:00
|
|
|
advanceClock()
|
2015-04-07 02:46:20 +08:00
|
|
|
expect(@connection.start).toHaveBeenCalled()
|
|
|
|
|
2015-05-20 06:59:37 +08:00
|
|
|
it "should start querying for model collections and counts that haven't been fully cached", ->
|
2015-04-07 02:46:20 +08:00
|
|
|
@worker.start()
|
2015-08-07 05:35:52 +08:00
|
|
|
advanceClock()
|
2016-04-01 05:58:16 +08:00
|
|
|
expect(@apiRequests.length).toBe(12)
|
2015-05-20 06:59:37 +08:00
|
|
|
modelsRequested = _.compact _.map @apiRequests, ({model}) -> model
|
2016-04-01 05:58:16 +08:00
|
|
|
expect(modelsRequested).toEqual(['threads', 'messages', 'labels', 'drafts', 'contacts', 'events'])
|
2015-05-20 06:59:37 +08:00
|
|
|
|
|
|
|
countsRequested = _.compact _.map @apiRequests, ({requestOptions}) ->
|
|
|
|
if requestOptions.qs?.view is 'count'
|
|
|
|
return requestOptions.path
|
|
|
|
|
2016-04-01 05:58:16 +08:00
|
|
|
expect(modelsRequested).toEqual(['threads', 'messages', 'labels', 'drafts', 'contacts', 'events'])
|
|
|
|
expect(countsRequested).toEqual(['/threads', '/messages', '/labels', '/drafts', '/contacts', '/events'])
|
2015-04-07 02:46:20 +08:00
|
|
|
|
2015-10-06 07:22:22 +08:00
|
|
|
it "should fetch 1000 labels and folders, to prevent issues where Inbox is not in the first page", ->
|
2015-10-04 14:53:59 +08:00
|
|
|
labelsRequest = _.find @apiRequests, (r) -> r.model is 'labels'
|
|
|
|
expect(labelsRequest.params.limit).toBe(1000)
|
|
|
|
|
2015-04-07 02:46:20 +08:00
|
|
|
it "should mark incomplete collections as `busy`", ->
|
|
|
|
@worker.start()
|
2015-08-07 05:35:52 +08:00
|
|
|
advanceClock()
|
2015-05-20 06:59:37 +08:00
|
|
|
nextState = @worker.state()
|
2015-04-07 02:46:20 +08:00
|
|
|
|
2015-07-23 02:18:23 +08:00
|
|
|
for collection in ['contacts','threads','drafts', 'labels']
|
2015-05-20 06:59:37 +08:00
|
|
|
expect(nextState[collection].busy).toEqual(true)
|
|
|
|
|
|
|
|
it "should initialize count and fetched to 0", ->
|
|
|
|
@worker.start()
|
2015-08-07 05:35:52 +08:00
|
|
|
advanceClock()
|
2015-05-20 06:59:37 +08:00
|
|
|
nextState = @worker.state()
|
|
|
|
|
2015-07-23 02:18:23 +08:00
|
|
|
for collection in ['contacts','threads','drafts', 'labels']
|
2015-05-20 06:59:37 +08:00
|
|
|
expect(nextState[collection].fetched).toEqual(0)
|
|
|
|
expect(nextState[collection].count).toEqual(0)
|
|
|
|
|
2015-08-14 02:20:36 +08:00
|
|
|
it "after failures, it should attempt to resume periodically but back off as failures continue", ->
|
|
|
|
simulateNetworkFailure = =>
|
|
|
|
@apiRequests[1].requestOptions.error({statusCode: 400})
|
|
|
|
@apiRequests = []
|
|
|
|
|
2016-04-05 08:11:09 +08:00
|
|
|
spyOn(@worker, 'resume').andCallThrough()
|
2016-09-23 07:17:02 +08:00
|
|
|
spyOn(Math, 'random').andReturn(1.0)
|
2015-05-20 06:59:37 +08:00
|
|
|
@worker.start()
|
2015-08-14 02:20:36 +08:00
|
|
|
|
2016-09-23 07:17:02 +08:00
|
|
|
expectThings = (resumeCallCount, randomCallCount) =>
|
2016-09-23 07:17:02 +08:00
|
|
|
expect(@worker.resume.callCount).toBe(resumeCallCount)
|
2016-09-23 07:17:02 +08:00
|
|
|
expect(Math.random.callCount).toBe(randomCallCount)
|
|
|
|
|
|
|
|
expect(@worker.resume.callCount).toBe(1, 1)
|
|
|
|
simulateNetworkFailure(); expectThings(1, 1)
|
|
|
|
advanceClock(4000); expectThings(2, 1)
|
|
|
|
simulateNetworkFailure(); expectThings(2, 2)
|
|
|
|
advanceClock(4000); expectThings(2, 2)
|
|
|
|
advanceClock(4000); expectThings(3, 2)
|
|
|
|
simulateNetworkFailure(); expectThings(3, 3)
|
|
|
|
advanceClock(4000); expectThings(3, 3)
|
|
|
|
advanceClock(4000); expectThings(3, 3)
|
|
|
|
advanceClock(4000); expectThings(4, 3)
|
|
|
|
simulateNetworkFailure(); expectThings(4, 4)
|
|
|
|
advanceClock(4000); expectThings(4, 4)
|
|
|
|
advanceClock(4000); expectThings(4, 4)
|
|
|
|
advanceClock(4000); expectThings(4, 4)
|
|
|
|
advanceClock(4000); expectThings(4, 4)
|
|
|
|
advanceClock(4000); expectThings(5, 4)
|
2015-05-20 06:59:37 +08:00
|
|
|
|
2015-09-24 01:46:07 +08:00
|
|
|
it "handles the request as a failure if we try and grab labels or folders without an 'inbox'", ->
|
2016-04-05 08:11:09 +08:00
|
|
|
spyOn(@worker, 'resume').andCallThrough()
|
2015-09-24 01:46:07 +08:00
|
|
|
@worker.start()
|
2016-04-05 08:11:09 +08:00
|
|
|
expect(@worker.resume.callCount).toBe(1)
|
2015-09-24 01:46:07 +08:00
|
|
|
request = _.findWhere(@apiRequests, model: 'labels')
|
|
|
|
request.requestOptions.success([])
|
2016-04-05 08:11:09 +08:00
|
|
|
expect(@worker.resume.callCount).toBe(1)
|
2015-09-24 01:46:07 +08:00
|
|
|
advanceClock(30000)
|
2016-04-05 08:11:09 +08:00
|
|
|
expect(@worker.resume.callCount).toBe(2)
|
2015-09-24 01:46:07 +08:00
|
|
|
|
|
|
|
it "handles the request as a success if we try and grab labels or folders and it includes the 'inbox'", ->
|
2016-04-05 08:11:09 +08:00
|
|
|
spyOn(@worker, 'resume').andCallThrough()
|
2015-09-24 01:46:07 +08:00
|
|
|
@worker.start()
|
2016-04-05 08:11:09 +08:00
|
|
|
expect(@worker.resume.callCount).toBe(1)
|
2015-09-24 01:46:07 +08:00
|
|
|
request = _.findWhere(@apiRequests, model: 'labels')
|
|
|
|
request.requestOptions.success([{name: "inbox"}, {name: "archive"}])
|
2016-04-05 08:11:09 +08:00
|
|
|
expect(@worker.resume.callCount).toBe(1)
|
2015-09-24 01:46:07 +08:00
|
|
|
advanceClock(30000)
|
2016-04-05 08:11:09 +08:00
|
|
|
expect(@worker.resume.callCount).toBe(1)
|
2015-09-24 01:46:07 +08:00
|
|
|
|
2016-02-27 05:52:19 +08:00
|
|
|
describe "delta streaming cursor", ->
|
|
|
|
it "should read the cursor from the database, and the old config format", ->
|
2016-07-27 17:56:55 +08:00
|
|
|
spyOn(DeltaStreamingConnection.prototype, 'latestCursor').andReturn Promise.resolve()
|
2016-02-27 05:52:19 +08:00
|
|
|
|
|
|
|
@apiCursorStub = undefined
|
|
|
|
|
|
|
|
# no cursor present
|
|
|
|
worker = new NylasSyncWorker(@api, @account)
|
|
|
|
connection = worker.connection()
|
|
|
|
expect(connection.hasCursor()).toBe(false)
|
|
|
|
advanceClock()
|
|
|
|
expect(connection.hasCursor()).toBe(false)
|
|
|
|
|
|
|
|
# cursor present in config
|
|
|
|
spyOn(NylasEnv.config, 'get').andCallFake (key) =>
|
|
|
|
return 'old-school' if key is "nylas.#{@account.id}.cursor"
|
|
|
|
return undefined
|
|
|
|
|
|
|
|
worker = new NylasSyncWorker(@api, @account)
|
|
|
|
connection = worker.connection()
|
|
|
|
advanceClock()
|
|
|
|
expect(connection.hasCursor()).toBe(true)
|
2016-07-27 17:56:55 +08:00
|
|
|
expect(connection._getCursor()).toEqual('old-school')
|
2016-02-27 05:52:19 +08:00
|
|
|
|
|
|
|
# cursor present in database, overrides cursor in config
|
|
|
|
@apiCursorStub = "new-school"
|
|
|
|
|
|
|
|
worker = new NylasSyncWorker(@api, @account)
|
|
|
|
connection = worker.connection()
|
|
|
|
expect(connection.hasCursor()).toBe(false)
|
|
|
|
advanceClock()
|
|
|
|
expect(connection.hasCursor()).toBe(true)
|
2016-07-27 17:56:55 +08:00
|
|
|
expect(connection._getCursor()).toEqual('new-school')
|
2016-02-27 05:52:19 +08:00
|
|
|
|
2016-07-28 05:29:35 +08:00
|
|
|
it "should set the cursor to the last cursor after receiving deltas", ->
|
|
|
|
spyOn(DeltaStreamingConnection.prototype, 'latestCursor').andReturn Promise.resolve()
|
|
|
|
worker = new NylasSyncWorker(@api, @account)
|
|
|
|
advanceClock()
|
|
|
|
connection = worker.connection()
|
|
|
|
deltas = [{cursor: '1'}, {cursor: '2'}]
|
|
|
|
connection._emitter.emit('results-stopped-arriving', deltas)
|
|
|
|
advanceClock()
|
|
|
|
expect(connection._getCursor()).toEqual('2')
|
|
|
|
|
|
|
|
|
2015-05-20 06:59:37 +08:00
|
|
|
describe "when a count request completes", ->
|
2015-04-07 02:46:20 +08:00
|
|
|
beforeEach ->
|
|
|
|
@worker.start()
|
2015-08-07 05:35:52 +08:00
|
|
|
advanceClock()
|
2015-04-07 02:46:20 +08:00
|
|
|
@request = @apiRequests[0]
|
|
|
|
@apiRequests = []
|
|
|
|
|
2015-05-20 06:59:37 +08:00
|
|
|
it "should update the count on the collection", ->
|
|
|
|
@request.requestOptions.success({count: 1001})
|
|
|
|
nextState = @worker.state()
|
|
|
|
expect(nextState.threads.count).toEqual(1001)
|
|
|
|
|
2016-04-05 08:11:09 +08:00
|
|
|
describe "resume", ->
|
2016-03-11 03:06:06 +08:00
|
|
|
it "should fetch metadata first and fetch other collections when metadata is ready", ->
|
|
|
|
fetchAllMetadataCallback = null
|
|
|
|
jasmine.unspy(NylasSyncWorker.prototype, 'fetchAllMetadata')
|
|
|
|
spyOn(NylasSyncWorker.prototype, 'fetchAllMetadata').andCallFake (cb) =>
|
|
|
|
fetchAllMetadataCallback = cb
|
2015-05-20 06:59:37 +08:00
|
|
|
spyOn(@worker, 'fetchCollection')
|
2016-03-11 03:06:06 +08:00
|
|
|
@worker._state = {}
|
2016-04-05 08:11:09 +08:00
|
|
|
@worker.resume()
|
2016-03-11 03:06:06 +08:00
|
|
|
expect(@worker.fetchAllMetadata).toHaveBeenCalled()
|
|
|
|
expect(@worker.fetchCollection.calls.length).toBe(0)
|
|
|
|
fetchAllMetadataCallback()
|
|
|
|
expect(@worker.fetchCollection.calls.length).not.toBe(0)
|
|
|
|
|
2016-03-11 08:03:32 +08:00
|
|
|
it "should not fetch metadata pages if pluginsSupported is false", ->
|
|
|
|
@api.pluginsSupported = false
|
|
|
|
spyOn(NylasSyncWorker.prototype, '_fetchWithErrorHandling')
|
|
|
|
spyOn(@worker, 'fetchCollection')
|
|
|
|
@worker._state = {}
|
2016-04-05 08:11:09 +08:00
|
|
|
@worker.resume()
|
2016-03-11 08:03:32 +08:00
|
|
|
expect(@worker._fetchWithErrorHandling).not.toHaveBeenCalled()
|
|
|
|
expect(@worker.fetchCollection.calls.length).not.toBe(0)
|
|
|
|
|
2016-03-11 03:06:06 +08:00
|
|
|
it "should fetch collections for which `shouldFetchCollection` returns true", ->
|
|
|
|
spyOn(@worker, 'fetchCollection')
|
|
|
|
spyOn(@worker, 'shouldFetchCollection').andCallFake (collection) =>
|
|
|
|
return collection in ['threads', 'labels', 'drafts']
|
2016-04-05 08:11:09 +08:00
|
|
|
@worker.resume()
|
2016-03-11 03:06:06 +08:00
|
|
|
expect(@worker.fetchCollection.calls.map (call) -> call.args[0]).toEqual(['threads', 'labels', 'drafts'])
|
2015-05-20 06:59:37 +08:00
|
|
|
|
2016-04-05 08:11:09 +08:00
|
|
|
it "should be called when Actions.retrySync is received", ->
|
2016-07-27 17:56:55 +08:00
|
|
|
spyOn(DeltaStreamingConnection.prototype, 'latestCursor').andReturn Promise.resolve()
|
|
|
|
|
|
|
|
# TODO why do we need to call through?
|
2016-04-05 08:11:09 +08:00
|
|
|
spyOn(@worker, 'resume').andCallThrough()
|
|
|
|
Actions.retrySync()
|
|
|
|
expect(@worker.resume).toHaveBeenCalled()
|
2015-10-09 10:02:54 +08:00
|
|
|
|
2016-03-11 03:06:06 +08:00
|
|
|
describe "shouldFetchCollection", ->
|
|
|
|
it "should return false if the collection sync is already in progress", ->
|
2015-05-20 06:59:37 +08:00
|
|
|
@worker._state.threads = {
|
|
|
|
'busy': true
|
|
|
|
'complete': false
|
|
|
|
}
|
2016-03-11 03:06:06 +08:00
|
|
|
expect(@worker.shouldFetchCollection('threads')).toBe(false)
|
2015-05-20 06:59:37 +08:00
|
|
|
|
2016-03-11 03:06:06 +08:00
|
|
|
it "should return false if the collection sync is already complete", ->
|
2015-05-20 06:59:37 +08:00
|
|
|
@worker._state.threads = {
|
|
|
|
'busy': false
|
|
|
|
'complete': true
|
|
|
|
}
|
2016-03-11 03:06:06 +08:00
|
|
|
expect(@worker.shouldFetchCollection('threads')).toBe(false)
|
|
|
|
|
|
|
|
it "should return true otherwise", ->
|
|
|
|
@worker._state.threads = {
|
|
|
|
'busy': false
|
|
|
|
'complete': false
|
|
|
|
}
|
|
|
|
expect(@worker.shouldFetchCollection('threads')).toBe(true)
|
|
|
|
@worker._state.threads = undefined
|
|
|
|
expect(@worker.shouldFetchCollection('threads')).toBe(true)
|
|
|
|
|
|
|
|
describe "fetchCollection", ->
|
|
|
|
beforeEach ->
|
|
|
|
@apiRequests = []
|
2015-05-20 06:59:37 +08:00
|
|
|
|
|
|
|
it "should start the request for the model count", ->
|
|
|
|
@worker._state.threads = {
|
|
|
|
'busy': false
|
|
|
|
'complete': false
|
|
|
|
}
|
|
|
|
@worker.fetchCollection('threads')
|
feat(accounts): Kill namespaces, long live accounts
Summary:
This diff replaces the Namespace object with the Account object, and changes all references to namespace_id => account_id, etc. The endpoints are now `/threads` instead of `/n/<id>/threads`.
This diff also adds preliminary support for multiple accounts. When you log in, we now log you in to all the attached accounts on edgehill server. From the preferences panel, you can auth with / unlink additional accounts. Shockingly, this all seems to pretty much work.
When replying to a thread, you cannot switch from addresses. However, when creating a new message in a popout composer, you can change the from address and the SaveDraftTask will delete/re-root the draft on the new account.
Search bar doesn't need to do full refresh on clear if it never committed
Allow drafts to be switched to a different account when not in reply to an existing thread
Fix edge case where ChangeMailTask throws exception if no models are modified during performLocal
Show many dots for many accounts in long polling status bar
add/remove accounts from prefs
Spec fixes!
Test Plan: Run tests, none broken!
Reviewers: evan, dillon
Reviewed By: evan, dillon
Differential Revision: https://phab.nylas.com/D1928
2015-08-22 06:29:58 +08:00
|
|
|
expect(@apiRequests[0].requestOptions.path).toBe('/threads')
|
2015-05-20 06:59:37 +08:00
|
|
|
expect(@apiRequests[0].requestOptions.qs.view).toBe('count')
|
|
|
|
|
2016-03-11 03:06:06 +08:00
|
|
|
it "should pass any metadata it preloaded", ->
|
|
|
|
@worker._state.threads = {
|
|
|
|
'busy': false
|
|
|
|
'complete': false
|
|
|
|
}
|
|
|
|
@worker.fetchCollection('threads')
|
|
|
|
expect(@apiRequests[1].model).toBe('threads')
|
|
|
|
expect(@apiRequests[1].requestOptions.metadataToAttach).toBe(@worker._metadata)
|
|
|
|
|
2016-06-03 09:46:43 +08:00
|
|
|
describe "when there is no request history (`lastRequestRange`)", ->
|
2015-10-09 10:02:54 +08:00
|
|
|
it "should start the first request for models", ->
|
|
|
|
@worker._state.threads = {
|
|
|
|
'busy': false
|
|
|
|
'complete': false
|
|
|
|
}
|
|
|
|
@worker.fetchCollection('threads')
|
|
|
|
expect(@apiRequests[1].model).toBe('threads')
|
|
|
|
expect(@apiRequests[1].params.offset).toBe(0)
|
|
|
|
|
2016-06-03 09:46:43 +08:00
|
|
|
describe "when it was previously trying to fetch a page (`lastRequestRange`)", ->
|
2015-10-09 10:02:54 +08:00
|
|
|
beforeEach ->
|
|
|
|
@worker._state.threads =
|
|
|
|
'count': 1200
|
|
|
|
'fetched': 100
|
|
|
|
'busy': false
|
|
|
|
'complete': false
|
|
|
|
'error': new Error("Something bad")
|
2016-06-03 09:46:43 +08:00
|
|
|
'lastRequestRange':
|
2015-10-09 10:02:54 +08:00
|
|
|
offset: 100
|
|
|
|
limit: 50
|
|
|
|
|
2016-06-03 09:46:43 +08:00
|
|
|
it "should start paginating from the request that was interrupted", ->
|
2015-10-09 10:02:54 +08:00
|
|
|
@worker.fetchCollection('threads')
|
|
|
|
expect(@apiRequests[0].model).toBe('threads')
|
|
|
|
expect(@apiRequests[0].params.offset).toBe(100)
|
|
|
|
expect(@apiRequests[0].params.limit).toBe(50)
|
|
|
|
|
|
|
|
it "should not reset the `count`, `fetched` or start fetching the count", ->
|
|
|
|
@worker.fetchCollection('threads')
|
|
|
|
expect(@worker._state.threads.fetched).toBe(100)
|
|
|
|
expect(@worker._state.threads.count).toBe(1200)
|
|
|
|
expect(@apiRequests.length).toBe(1)
|
2015-05-20 06:59:37 +08:00
|
|
|
|
2016-04-01 05:58:16 +08:00
|
|
|
describe 'when maxFetchCount option is specified', ->
|
|
|
|
it "should only fetch maxFetch count on the first request if it is less than initialPageSize", ->
|
|
|
|
@worker._state.messages =
|
|
|
|
count: 1000
|
|
|
|
fetched: 0
|
|
|
|
@worker.fetchCollection('messages', {initialPageSize: 30, maxFetchCount: 25})
|
|
|
|
expect(@apiRequests[0].params.offset).toBe 0
|
|
|
|
expect(@apiRequests[0].params.limit).toBe 25
|
|
|
|
|
|
|
|
it "sould only fetch the maxFetchCount when restoring from saved state", ->
|
|
|
|
@worker._state.messages =
|
|
|
|
count: 1000
|
|
|
|
fetched: 470
|
2016-06-03 09:46:43 +08:00
|
|
|
lastRequestRange: {
|
2016-04-01 05:58:16 +08:00
|
|
|
limit: 50,
|
|
|
|
offset: 470,
|
|
|
|
}
|
|
|
|
@worker.fetchCollection('messages', {maxFetchCount: 500})
|
|
|
|
expect(@apiRequests[0].params.offset).toBe 470
|
|
|
|
expect(@apiRequests[0].params.limit).toBe 30
|
|
|
|
|
|
|
|
describe "fetchCollectionPage", ->
|
|
|
|
beforeEach ->
|
|
|
|
@apiRequests = []
|
|
|
|
|
|
|
|
describe 'when maxFetchCount option is specified', ->
|
|
|
|
it 'should not fetch next page if maxFetchCount has been reached', ->
|
|
|
|
@worker._state.messages =
|
|
|
|
count: 1000
|
|
|
|
fetched: 470
|
|
|
|
@worker.fetchCollectionPage('messages', {limit: 30, offset: 470}, {maxFetchCount: 500})
|
|
|
|
{success} = @apiRequests[0].requestOptions
|
|
|
|
success({length: 30})
|
|
|
|
expect(@worker._state.messages.fetched).toBe 500
|
|
|
|
advanceClock(2000)
|
|
|
|
expect(@apiRequests.length).toBe 1
|
|
|
|
|
|
|
|
it 'should limit by maxFetchCount when requesting the next page', ->
|
|
|
|
@worker._state.messages =
|
|
|
|
count: 1000
|
|
|
|
fetched: 450
|
|
|
|
@worker.fetchCollectionPage('messages', {limit: 30, offset: 450 }, {maxFetchCount: 500})
|
|
|
|
{success} = @apiRequests[0].requestOptions
|
|
|
|
success({length: 30})
|
|
|
|
expect(@worker._state.messages.fetched).toBe 480
|
|
|
|
advanceClock(2000)
|
|
|
|
expect(@apiRequests[1].params.offset).toBe 480
|
|
|
|
expect(@apiRequests[1].params.limit).toBe 20
|
|
|
|
|
2015-05-20 06:59:37 +08:00
|
|
|
describe "when an API request completes", ->
|
|
|
|
beforeEach ->
|
|
|
|
@worker.start()
|
2015-08-07 05:35:52 +08:00
|
|
|
advanceClock()
|
2015-05-20 06:59:37 +08:00
|
|
|
@request = @apiRequests[1]
|
|
|
|
@apiRequests = []
|
|
|
|
|
2015-04-07 02:46:20 +08:00
|
|
|
describe "successfully, with models", ->
|
2015-10-01 01:47:33 +08:00
|
|
|
it "should start out by requesting a small number of items", ->
|
|
|
|
expect(@request.params.limit).toBe NylasSyncWorker.INITIAL_PAGE_SIZE
|
|
|
|
|
2015-04-07 02:46:20 +08:00
|
|
|
it "should request the next page", ->
|
2015-05-20 06:59:37 +08:00
|
|
|
pageSize = @request.params.limit
|
2015-04-07 02:46:20 +08:00
|
|
|
models = []
|
2015-05-20 06:59:37 +08:00
|
|
|
models.push(new Thread) for i in [0..(pageSize-1)]
|
2015-04-07 02:46:20 +08:00
|
|
|
@request.requestOptions.success(models)
|
2015-09-15 04:30:45 +08:00
|
|
|
advanceClock(2000)
|
2015-04-07 02:46:20 +08:00
|
|
|
expect(@apiRequests.length).toBe(1)
|
2015-10-01 01:47:33 +08:00
|
|
|
expect(@apiRequests[0].params.offset).toEqual @request.params.offset + pageSize
|
|
|
|
|
|
|
|
it "increase the limit on the next page load by 50%", ->
|
|
|
|
pageSize = @request.params.limit
|
|
|
|
models = []
|
|
|
|
models.push(new Thread) for i in [0..(pageSize-1)]
|
|
|
|
@request.requestOptions.success(models)
|
|
|
|
advanceClock(2000)
|
|
|
|
expect(@apiRequests.length).toBe(1)
|
|
|
|
expect(@apiRequests[0].params.limit).toEqual pageSize * 1.5,
|
|
|
|
|
|
|
|
it "never requests more then MAX_PAGE_SIZE", ->
|
|
|
|
pageSize = @request.params.limit = NylasSyncWorker.MAX_PAGE_SIZE
|
|
|
|
models = []
|
|
|
|
models.push(new Thread) for i in [0..(pageSize-1)]
|
|
|
|
@request.requestOptions.success(models)
|
|
|
|
advanceClock(2000)
|
|
|
|
expect(@apiRequests.length).toBe(1)
|
|
|
|
expect(@apiRequests[0].params.limit).toEqual NylasSyncWorker.MAX_PAGE_SIZE
|
2015-05-20 06:59:37 +08:00
|
|
|
|
|
|
|
it "should update the fetched count on the collection", ->
|
|
|
|
expect(@worker.state().threads.fetched).toEqual(0)
|
|
|
|
pageSize = @request.params.limit
|
|
|
|
models = []
|
|
|
|
models.push(new Thread) for i in [0..(pageSize-1)]
|
|
|
|
@request.requestOptions.success(models)
|
|
|
|
expect(@worker.state().threads.fetched).toEqual(pageSize)
|
2015-04-07 02:46:20 +08:00
|
|
|
|
|
|
|
describe "successfully, with fewer models than requested", ->
|
|
|
|
beforeEach ->
|
|
|
|
models = []
|
|
|
|
models.push(new Thread) for i in [0..100]
|
|
|
|
@request.requestOptions.success(models)
|
|
|
|
|
|
|
|
it "should not request another page", ->
|
|
|
|
expect(@apiRequests.length).toBe(0)
|
|
|
|
|
|
|
|
it "should update the state to complete", ->
|
2015-05-20 06:59:37 +08:00
|
|
|
expect(@worker.state().threads.busy).toEqual(false)
|
|
|
|
expect(@worker.state().threads.complete).toEqual(true)
|
|
|
|
|
|
|
|
it "should update the fetched count on the collection", ->
|
|
|
|
expect(@worker.state().threads.fetched).toEqual(101)
|
2015-04-07 02:46:20 +08:00
|
|
|
|
|
|
|
describe "successfully, with no models", ->
|
|
|
|
it "should not request another page", ->
|
|
|
|
@request.requestOptions.success([])
|
|
|
|
expect(@apiRequests.length).toBe(0)
|
|
|
|
|
|
|
|
it "should update the state to complete", ->
|
|
|
|
@request.requestOptions.success([])
|
2015-05-20 06:59:37 +08:00
|
|
|
expect(@worker.state().threads.busy).toEqual(false)
|
|
|
|
expect(@worker.state().threads.complete).toEqual(true)
|
2015-04-07 02:46:20 +08:00
|
|
|
|
|
|
|
describe "with an error", ->
|
2015-10-09 10:02:54 +08:00
|
|
|
it "should log the error to the state, along with the range that failed", ->
|
2015-04-07 02:46:20 +08:00
|
|
|
err = new Error("Oh no a network error")
|
|
|
|
@request.requestOptions.error(err)
|
2015-05-20 06:59:37 +08:00
|
|
|
expect(@worker.state().threads.busy).toEqual(false)
|
|
|
|
expect(@worker.state().threads.complete).toEqual(false)
|
|
|
|
expect(@worker.state().threads.error).toEqual(err.toString())
|
2016-06-03 09:46:43 +08:00
|
|
|
expect(@worker.state().threads.lastRequestRange).toEqual({offset: 0, limit: 30})
|
2015-04-07 02:46:20 +08:00
|
|
|
|
|
|
|
it "should not request another page", ->
|
|
|
|
@request.requestOptions.error(new Error("Oh no a network error"))
|
|
|
|
expect(@apiRequests.length).toBe(0)
|
|
|
|
|
2015-10-09 10:02:54 +08:00
|
|
|
describe "succeeds after a previous error", ->
|
|
|
|
beforeEach ->
|
|
|
|
@worker._state.threads.error = new Error("Something bad happened")
|
2016-06-03 09:46:43 +08:00
|
|
|
@worker._state.threads.lastRequestRange = {limit: 10, offset: 10}
|
2015-10-09 10:02:54 +08:00
|
|
|
@request.requestOptions.success([])
|
|
|
|
advanceClock(1)
|
|
|
|
|
2016-06-03 09:46:43 +08:00
|
|
|
it "should clear any previous error and updates lastRequestRange", ->
|
2015-10-09 10:02:54 +08:00
|
|
|
expect(@worker.state().threads.error).toEqual(null)
|
2016-06-03 09:46:43 +08:00
|
|
|
expect(@worker.state().threads.lastRequestRange).toEqual({offset: 0, limit: 30})
|
2015-08-29 04:24:05 +08:00
|
|
|
|
2015-04-07 02:46:20 +08:00
|
|
|
describe "cleanup", ->
|
2016-01-30 08:06:33 +08:00
|
|
|
it "should termiate the delta connection", ->
|
2015-04-07 02:46:20 +08:00
|
|
|
spyOn(@connection, 'end')
|
|
|
|
@worker.cleanup()
|
|
|
|
expect(@connection.end).toHaveBeenCalled()
|
2015-05-20 06:59:37 +08:00
|
|
|
|
|
|
|
it "should stop trying to restart failed collection syncs", ->
|
2015-06-16 09:29:59 +08:00
|
|
|
spyOn(console, 'log')
|
2016-04-05 08:11:09 +08:00
|
|
|
spyOn(@worker, 'resume').andCallThrough()
|
2015-05-20 06:59:37 +08:00
|
|
|
@worker.cleanup()
|
|
|
|
advanceClock(50000)
|
2016-04-05 08:11:09 +08:00
|
|
|
expect(@worker.resume.callCount).toBe(0)
|