2015-08-06 06:53:08 +08:00
|
|
|
_ = require 'underscore'
|
2015-12-18 03:46:05 +08:00
|
|
|
|
|
|
|
{APIError,
|
|
|
|
Folder,
|
|
|
|
Thread,
|
|
|
|
Message,
|
|
|
|
ACtions,
|
|
|
|
NylasAPI,
|
|
|
|
Query,
|
|
|
|
DatabaseStore,
|
|
|
|
DatabaseTransaction,
|
|
|
|
Task,
|
|
|
|
Utils,
|
|
|
|
ChangeMailTask} = require 'nylas-exports'
|
2015-08-06 06:53:08 +08:00
|
|
|
|
|
|
|
describe "ChangeMailTask", ->
|
|
|
|
beforeEach ->
|
|
|
|
@threadA = new Thread(id: 'A', folders: [new Folder(id:'folderA')])
|
|
|
|
@threadB = new Thread(id: 'B', folders: [new Folder(id:'folderB')])
|
|
|
|
@threadC = new Thread(id: 'C', folders: [new Folder(id:'folderC')])
|
|
|
|
@threadAChanged = new Thread(id: 'A', folders: [new Folder(id:'folderC')])
|
|
|
|
|
|
|
|
@threadAMesage1 = new Message(id:'A1', threadId: 'A')
|
|
|
|
@threadAMesage2 = new Message(id:'A2', threadId: 'A')
|
|
|
|
@threadBMesage1 = new Message(id:'B1', threadId: 'B')
|
|
|
|
|
|
|
|
threads = [@threadA, @threadB, @threadC]
|
|
|
|
messages = [@threadAMesage1, @threadAMesage2, @threadBMesage1]
|
|
|
|
|
|
|
|
# Instead of spying on find/findAll, we fake the evaluation of the query.
|
|
|
|
# This allows queries to be built with findAll().where().blabla... without
|
|
|
|
# a complex stub chain. Works since query "matchers" can be evaluated in JS
|
|
|
|
spyOn(DatabaseStore, 'run').andCallFake (query) =>
|
|
|
|
if query._klass is Message
|
|
|
|
models = messages
|
|
|
|
else if query._klass is Thread
|
|
|
|
models = threads
|
|
|
|
else
|
|
|
|
throw new Error("Not stubbed!")
|
|
|
|
|
|
|
|
models = models.filter (model) ->
|
|
|
|
for matcher in query._matchers
|
|
|
|
if matcher.evaluate(model) is false
|
|
|
|
return false
|
|
|
|
return true
|
|
|
|
|
|
|
|
if query._singular
|
|
|
|
models = models[0]
|
|
|
|
Promise.resolve(models)
|
|
|
|
|
2015-12-18 03:46:05 +08:00
|
|
|
spyOn(DatabaseTransaction.prototype, 'persistModels').andReturn(Promise.resolve())
|
|
|
|
spyOn(DatabaseTransaction.prototype, 'persistModel').andReturn(Promise.resolve())
|
2015-08-06 06:53:08 +08:00
|
|
|
|
2015-10-30 01:25:34 +08:00
|
|
|
it "leaves subclasses to implement changesToModel", ->
|
2015-08-06 06:53:08 +08:00
|
|
|
task = new ChangeMailTask()
|
2015-10-30 01:25:34 +08:00
|
|
|
expect( => task.changesToModel() ).toThrow()
|
2015-08-06 06:53:08 +08:00
|
|
|
|
2015-10-30 01:25:34 +08:00
|
|
|
it "leaves subclasses to implement requestBodyForModel", ->
|
2015-08-06 06:53:08 +08:00
|
|
|
task = new ChangeMailTask()
|
2015-10-30 01:25:34 +08:00
|
|
|
expect( => task.requestBodyForModel() ).toThrow()
|
2015-08-06 06:53:08 +08:00
|
|
|
|
|
|
|
describe "performLocal", ->
|
|
|
|
it "rejects if it's an undo task and no restore values are present", ->
|
|
|
|
task = new ChangeMailTask()
|
|
|
|
task._isUndoTask = true
|
|
|
|
spyOn(task, '_performLocalThreads').andReturn(Promise.resolve())
|
|
|
|
|
|
|
|
waitsForPromise =>
|
|
|
|
task.performLocal().catch (err) =>
|
|
|
|
expect(err.message).toEqual("ChangeMailTask: No _restoreValues provided for undo task.")
|
|
|
|
|
|
|
|
it "should always call _performLocalThreads and then _performLocalMessages", ->
|
|
|
|
task = new ChangeMailTask()
|
|
|
|
task.threads = [@threadA]
|
|
|
|
|
|
|
|
@messagesResolve = null
|
|
|
|
spyOn(task, '_performLocalThreads').andCallFake => Promise.resolve()
|
|
|
|
spyOn(task, '_performLocalMessages').andCallFake =>
|
|
|
|
new Promise (resolve, reject) => @messagesResolve = resolve
|
|
|
|
|
|
|
|
runs ->
|
|
|
|
task.performLocal()
|
|
|
|
waitsFor ->
|
|
|
|
task._performLocalThreads.callCount > 0
|
|
|
|
runs ->
|
|
|
|
advanceClock()
|
|
|
|
@messagesResolve()
|
|
|
|
waitsFor ->
|
|
|
|
task._performLocalMessages.callCount > 0
|
|
|
|
runs ->
|
|
|
|
expect(task._performLocalThreads).toHaveBeenCalled()
|
|
|
|
expect(task._performLocalMessages).toHaveBeenCalled()
|
|
|
|
|
|
|
|
describe "_performLocalThreads", ->
|
|
|
|
beforeEach ->
|
|
|
|
@task = new ChangeMailTask()
|
|
|
|
@task.threads = [@threadA, @threadB]
|
|
|
|
# Note: Simulate applyChanges only changing threadA, not threadB
|
|
|
|
spyOn(@task, '_applyChanges').andReturn([@threadAChanged])
|
|
|
|
|
|
|
|
it "calls _applyChanges and writes changed threads to the database", ->
|
|
|
|
waitsForPromise =>
|
|
|
|
@task._performLocalThreads().then =>
|
|
|
|
expect(@task._applyChanges).toHaveBeenCalledWith(@task.threads)
|
2015-12-18 03:46:05 +08:00
|
|
|
expect(DatabaseTransaction.prototype.persistModels).toHaveBeenCalledWith([@threadAChanged])
|
2015-08-06 06:53:08 +08:00
|
|
|
|
2015-10-30 01:25:34 +08:00
|
|
|
describe "when processNestedMessages is overridden to return true", ->
|
2015-08-06 08:39:48 +08:00
|
|
|
it "fetches messages on changed threads and appends them to the messages to update", ->
|
|
|
|
waitsForPromise =>
|
2015-10-30 01:25:34 +08:00
|
|
|
@task.processNestedMessages = => true
|
2015-08-06 08:39:48 +08:00
|
|
|
@task._performLocalThreads().then =>
|
|
|
|
expect(@task._applyChanges).toHaveBeenCalledWith(@task.threads)
|
|
|
|
expect(@task.messages).toEqual([@threadAMesage1, @threadAMesage2])
|
2015-08-06 06:53:08 +08:00
|
|
|
|
|
|
|
describe "_performLocalMessages", ->
|
|
|
|
beforeEach ->
|
|
|
|
@task = new ChangeMailTask()
|
|
|
|
@task.messages = [@threadAMesage1, @threadAMesage2, @threadBMesage1]
|
|
|
|
# Note: Simulate applyChanges only changing threadBMesage1
|
|
|
|
spyOn(@task, '_applyChanges').andReturn([@threadBMesage1])
|
|
|
|
|
|
|
|
it "calls _applyChanges and writes changed messages to the database", ->
|
|
|
|
waitsForPromise =>
|
|
|
|
@task._performLocalMessages().then =>
|
|
|
|
expect(@task._applyChanges).toHaveBeenCalledWith(@task.messages)
|
2015-12-18 03:46:05 +08:00
|
|
|
expect(DatabaseTransaction.prototype.persistModels).toHaveBeenCalledWith([@threadBMesage1])
|
2015-08-06 06:53:08 +08:00
|
|
|
|
|
|
|
describe "_applyChanges", ->
|
|
|
|
beforeEach ->
|
|
|
|
@task = new ChangeMailTask()
|
|
|
|
|
|
|
|
describe "when applying forwards", ->
|
|
|
|
beforeEach ->
|
|
|
|
spyOn(@task, '_shouldChangeBackwards').andReturn(false)
|
2015-10-30 01:25:34 +08:00
|
|
|
spyOn(@task, 'changesToModel').andCallFake (thread) =>
|
2015-08-06 06:53:08 +08:00
|
|
|
if thread is @threadC
|
|
|
|
return {folders: [new Folder(id: "different!")]}
|
|
|
|
else
|
|
|
|
return {folders: thread.folders}
|
|
|
|
|
2015-10-30 01:25:34 +08:00
|
|
|
it "should call changesToModel on each model", ->
|
2015-08-06 06:53:08 +08:00
|
|
|
@task._applyChanges([@threadA, @threadB])
|
2015-10-30 01:25:34 +08:00
|
|
|
expect(@task.changesToModel.callCount).toBe(2)
|
|
|
|
expect(@task.changesToModel.calls[0].args[0]).toBe(@threadA)
|
|
|
|
expect(@task.changesToModel.calls[1].args[0]).toBe(@threadB)
|
2015-08-06 06:53:08 +08:00
|
|
|
|
|
|
|
it "should return only the models with new values", ->
|
|
|
|
out = @task._applyChanges([@threadA, @threadB, @threadC])
|
|
|
|
expect(_.isArray(out)).toBe(true)
|
|
|
|
expect(out.length).toBe(1)
|
|
|
|
expect(out[0].id).toBe('C')
|
|
|
|
expect(out[0].folders[0].id).toBe('different!')
|
|
|
|
|
|
|
|
it "should save restore values only for changed items", ->
|
|
|
|
out = @task._applyChanges([@threadA, @threadB, @threadC])
|
|
|
|
expect(@task._restoreValues['A']).toBe(undefined)
|
|
|
|
expect(@task._restoreValues['B']).toBe(undefined)
|
|
|
|
expect(@task._restoreValues['C']).toEqual(folders: @threadC.folders)
|
|
|
|
|
|
|
|
it "should treat models as if they're frozen, returning new models", ->
|
|
|
|
out = @task._applyChanges([@threadA, @threadB, @threadC])
|
|
|
|
expect(out[0]).not.toBe(@threadC)
|
|
|
|
expect(out[0].id).toBe(@threadC.id)
|
|
|
|
expect(@task._restoreValues['C']).toEqual(folders: @threadC.folders)
|
|
|
|
|
|
|
|
describe "when applying backwards (reverting or undoing)", ->
|
|
|
|
beforeEach ->
|
|
|
|
spyOn(@task, '_shouldChangeBackwards').andReturn(true)
|
|
|
|
@task._restoreValues =
|
|
|
|
'C': {folders: [new Folder(id:'oldFolderC')]}
|
|
|
|
|
|
|
|
it "should return only models with the restore values, with the restore values applied", ->
|
|
|
|
out = @task._applyChanges([@threadA, @threadB, @threadC])
|
|
|
|
expect(_.isArray(out)).toBe(true)
|
|
|
|
expect(out.length).toBe(1)
|
|
|
|
expect(out[0].id).toBe('C')
|
|
|
|
expect(out[0].folders[0].id).toBe('oldFolderC')
|
|
|
|
|
|
|
|
describe "performRemote", ->
|
|
|
|
describe "if threads are set", ->
|
2015-10-30 01:25:34 +08:00
|
|
|
it "should only call _performRequests with threads", ->
|
2015-08-06 06:53:08 +08:00
|
|
|
@task = new ChangeMailTask()
|
|
|
|
@task.threads = [@threadA, @threadB]
|
|
|
|
@task.messages = [@threadAMesage1, @threadAMesage2]
|
2015-10-30 01:25:34 +08:00
|
|
|
spyOn(@task, '_performRequests').andReturn(Promise.resolve())
|
2015-08-06 06:53:08 +08:00
|
|
|
waitsForPromise =>
|
|
|
|
@task.performRemote().then =>
|
2015-10-30 01:25:34 +08:00
|
|
|
expect(@task._performRequests).toHaveBeenCalledWith(Thread, @task.threads)
|
|
|
|
expect(@task._performRequests.callCount).toBe(1)
|
2015-08-06 06:53:08 +08:00
|
|
|
|
|
|
|
describe "if only messages are set", ->
|
2015-10-30 01:25:34 +08:00
|
|
|
it "should only call _performRequests with messages", ->
|
2015-08-06 06:53:08 +08:00
|
|
|
@task = new ChangeMailTask()
|
|
|
|
@task.threads = []
|
|
|
|
@task.messages = [@threadAMesage1, @threadAMesage2]
|
2015-10-30 01:25:34 +08:00
|
|
|
spyOn(@task, '_performRequests').andReturn(Promise.resolve())
|
2015-08-06 06:53:08 +08:00
|
|
|
waitsForPromise =>
|
|
|
|
@task.performRemote().then =>
|
2015-10-30 01:25:34 +08:00
|
|
|
expect(@task._performRequests).toHaveBeenCalledWith(Message, @task.messages)
|
|
|
|
expect(@task._performRequests.callCount).toBe(1)
|
2015-08-06 06:53:08 +08:00
|
|
|
|
2015-10-01 05:09:06 +08:00
|
|
|
describe "if somehow there are no threads or messages", ->
|
|
|
|
it "should resolve", ->
|
|
|
|
@task = new ChangeMailTask()
|
|
|
|
@task.threads = []
|
|
|
|
@task.messages = []
|
|
|
|
waitsForPromise =>
|
|
|
|
@task.performRemote().then (code) =>
|
fix(tasks): don't continue if dependent task fails
Summary:
Fixes T4291
If I made a final edit to a pre-existing draft and sent, we'd queue a
`SyncbackDraftTask` before a `SendDraftTask`. This is important because
since we have a valid draft `server_id`, the `SendDraftTask` will send by
server_id, not by POSTing the whole body.
If the `SyncbackDraftTask` fails, then we had a very serious issue whereby
the `SendDraftTask` would keep on sending. Unfortunately the server never
got the latest changes and sent the wrong version of the draft. This
incorrect version would show up later when the `/send` endpoint returned
the message that got actually sent.
The solution was to make any queued `SendDraftTask` fail if a dependent
`SyncbackDraftTask` failed.
This meant we needed to make the requirements for `shouldWaitForTask`
stricter, and block if tasks failed.
Unfortunatley there was no infrastructure in place to do this.
The first change was to change `shouldWaitForTask` to `isDependentTask`.
If we're going to fail when a dependent task fails, I wanted the method
name to reflect this.
Now, if a dependent task fails, we recursively check the dependency tree
(and check for cycles) and `dequeue` anything that needed that to succeed.
I chose `dequeue` as the default action because it seemed as though all
current uses of `shouldWaitForTask` really should bail if their
dependencies fail. It's possible you don't want your task dequeued in this
dependency case. You can return the special `Task.DO_NOT_DEQUEUE_ME`
constant from the `onDependentTaskError` method.
When a task gets dequeued because of the reason above, the
`onDependentTaskError` callback gets fired. This gives tasks like the
`SendDraftTask` a chance to notify the user that it bailed. Not all tasks
need to notify.
The next big issue was a better way to determine if a task truely errored
to the point that we need to dequeue dependencies. In the Developer Status
area we were showing tasks that had errored as "Green" because we caught
the error and resolved with `Task.Status.Finished`. This used to be fine
since nothing life-or-death cared if a task errored or not. Now that it
might cause abortions down the line, we needed a more robust method then
this.
For one I changed `Task.Status.Finished` to a variety of finish types
including `Task.Status.Success`. The way you "error" out is to `throw` or
`Promise.reject` an `Error` object from the `performRemote` method. This
allows us to propagate API errors up, and acts as a safety net that can
catch any malformed code or unexpected responses.
The developer bar now shows a much richer set of statuses instead of a
binary one, which was REALLY helpful in debugging this. We also record
when a Task got dequeued because of the conditions introduced here.
Once all this was working we still had an issue of sending old drafts.
If after a `SyncbackDraftTask` failed, now we'd block the send and notify
the users as such. However, if we tried to send again, there was a
separate issue whereby we wouldn't queue another `SyncbackDraftTask` to
update the server with the latest information. Since our changes were
persisted to the DB, we thought we had no changes, and therefore didn't
need to queue a `SyncbackDraftTask`.
The fix to this is to always force the creation of a `SyncbackDraftTask`
before send regardless of the state of the `DraftStoreProxy`.
Test Plan: new tests. Lots of manual testing
Reviewers: bengotow
Reviewed By: bengotow
Subscribers: mg
Maniphest Tasks: T4291
Differential Revision: https://phab.nylas.com/D2156
2015-10-22 01:33:43 +08:00
|
|
|
expect(code).toEqual(Task.Status.Success)
|
2015-10-01 05:09:06 +08:00
|
|
|
|
2015-10-30 01:25:34 +08:00
|
|
|
describe "if _performRequests resolves", ->
|
fix(tasks): don't continue if dependent task fails
Summary:
Fixes T4291
If I made a final edit to a pre-existing draft and sent, we'd queue a
`SyncbackDraftTask` before a `SendDraftTask`. This is important because
since we have a valid draft `server_id`, the `SendDraftTask` will send by
server_id, not by POSTing the whole body.
If the `SyncbackDraftTask` fails, then we had a very serious issue whereby
the `SendDraftTask` would keep on sending. Unfortunately the server never
got the latest changes and sent the wrong version of the draft. This
incorrect version would show up later when the `/send` endpoint returned
the message that got actually sent.
The solution was to make any queued `SendDraftTask` fail if a dependent
`SyncbackDraftTask` failed.
This meant we needed to make the requirements for `shouldWaitForTask`
stricter, and block if tasks failed.
Unfortunatley there was no infrastructure in place to do this.
The first change was to change `shouldWaitForTask` to `isDependentTask`.
If we're going to fail when a dependent task fails, I wanted the method
name to reflect this.
Now, if a dependent task fails, we recursively check the dependency tree
(and check for cycles) and `dequeue` anything that needed that to succeed.
I chose `dequeue` as the default action because it seemed as though all
current uses of `shouldWaitForTask` really should bail if their
dependencies fail. It's possible you don't want your task dequeued in this
dependency case. You can return the special `Task.DO_NOT_DEQUEUE_ME`
constant from the `onDependentTaskError` method.
When a task gets dequeued because of the reason above, the
`onDependentTaskError` callback gets fired. This gives tasks like the
`SendDraftTask` a chance to notify the user that it bailed. Not all tasks
need to notify.
The next big issue was a better way to determine if a task truely errored
to the point that we need to dequeue dependencies. In the Developer Status
area we were showing tasks that had errored as "Green" because we caught
the error and resolved with `Task.Status.Finished`. This used to be fine
since nothing life-or-death cared if a task errored or not. Now that it
might cause abortions down the line, we needed a more robust method then
this.
For one I changed `Task.Status.Finished` to a variety of finish types
including `Task.Status.Success`. The way you "error" out is to `throw` or
`Promise.reject` an `Error` object from the `performRemote` method. This
allows us to propagate API errors up, and acts as a safety net that can
catch any malformed code or unexpected responses.
The developer bar now shows a much richer set of statuses instead of a
binary one, which was REALLY helpful in debugging this. We also record
when a Task got dequeued because of the conditions introduced here.
Once all this was working we still had an issue of sending old drafts.
If after a `SyncbackDraftTask` failed, now we'd block the send and notify
the users as such. However, if we tried to send again, there was a
separate issue whereby we wouldn't queue another `SyncbackDraftTask` to
update the server with the latest information. Since our changes were
persisted to the DB, we thought we had no changes, and therefore didn't
need to queue a `SyncbackDraftTask`.
The fix to this is to always force the creation of a `SyncbackDraftTask`
before send regardless of the state of the `DraftStoreProxy`.
Test Plan: new tests. Lots of manual testing
Reviewers: bengotow
Reviewed By: bengotow
Subscribers: mg
Maniphest Tasks: T4291
Differential Revision: https://phab.nylas.com/D2156
2015-10-22 01:33:43 +08:00
|
|
|
it "should resolve with Task.Status.Success", ->
|
2015-08-06 06:53:08 +08:00
|
|
|
@task = new ChangeMailTask()
|
2015-10-30 01:25:34 +08:00
|
|
|
spyOn(@task, '_performRequests').andReturn(Promise.resolve())
|
2015-08-06 06:53:08 +08:00
|
|
|
waitsForPromise =>
|
|
|
|
@task.performRemote().then (result) =>
|
fix(tasks): don't continue if dependent task fails
Summary:
Fixes T4291
If I made a final edit to a pre-existing draft and sent, we'd queue a
`SyncbackDraftTask` before a `SendDraftTask`. This is important because
since we have a valid draft `server_id`, the `SendDraftTask` will send by
server_id, not by POSTing the whole body.
If the `SyncbackDraftTask` fails, then we had a very serious issue whereby
the `SendDraftTask` would keep on sending. Unfortunately the server never
got the latest changes and sent the wrong version of the draft. This
incorrect version would show up later when the `/send` endpoint returned
the message that got actually sent.
The solution was to make any queued `SendDraftTask` fail if a dependent
`SyncbackDraftTask` failed.
This meant we needed to make the requirements for `shouldWaitForTask`
stricter, and block if tasks failed.
Unfortunatley there was no infrastructure in place to do this.
The first change was to change `shouldWaitForTask` to `isDependentTask`.
If we're going to fail when a dependent task fails, I wanted the method
name to reflect this.
Now, if a dependent task fails, we recursively check the dependency tree
(and check for cycles) and `dequeue` anything that needed that to succeed.
I chose `dequeue` as the default action because it seemed as though all
current uses of `shouldWaitForTask` really should bail if their
dependencies fail. It's possible you don't want your task dequeued in this
dependency case. You can return the special `Task.DO_NOT_DEQUEUE_ME`
constant from the `onDependentTaskError` method.
When a task gets dequeued because of the reason above, the
`onDependentTaskError` callback gets fired. This gives tasks like the
`SendDraftTask` a chance to notify the user that it bailed. Not all tasks
need to notify.
The next big issue was a better way to determine if a task truely errored
to the point that we need to dequeue dependencies. In the Developer Status
area we were showing tasks that had errored as "Green" because we caught
the error and resolved with `Task.Status.Finished`. This used to be fine
since nothing life-or-death cared if a task errored or not. Now that it
might cause abortions down the line, we needed a more robust method then
this.
For one I changed `Task.Status.Finished` to a variety of finish types
including `Task.Status.Success`. The way you "error" out is to `throw` or
`Promise.reject` an `Error` object from the `performRemote` method. This
allows us to propagate API errors up, and acts as a safety net that can
catch any malformed code or unexpected responses.
The developer bar now shows a much richer set of statuses instead of a
binary one, which was REALLY helpful in debugging this. We also record
when a Task got dequeued because of the conditions introduced here.
Once all this was working we still had an issue of sending old drafts.
If after a `SyncbackDraftTask` failed, now we'd block the send and notify
the users as such. However, if we tried to send again, there was a
separate issue whereby we wouldn't queue another `SyncbackDraftTask` to
update the server with the latest information. Since our changes were
persisted to the DB, we thought we had no changes, and therefore didn't
need to queue a `SyncbackDraftTask`.
The fix to this is to always force the creation of a `SyncbackDraftTask`
before send regardless of the state of the `DraftStoreProxy`.
Test Plan: new tests. Lots of manual testing
Reviewers: bengotow
Reviewed By: bengotow
Subscribers: mg
Maniphest Tasks: T4291
Differential Revision: https://phab.nylas.com/D2156
2015-10-22 01:33:43 +08:00
|
|
|
expect(result).toBe(Task.Status.Success)
|
2015-08-06 06:53:08 +08:00
|
|
|
|
2015-10-30 01:25:34 +08:00
|
|
|
describe "if _performRequests rejects with a permanent network error", ->
|
2015-08-06 06:53:08 +08:00
|
|
|
beforeEach ->
|
|
|
|
@task = new ChangeMailTask()
|
2015-10-30 01:25:34 +08:00
|
|
|
spyOn(@task, '_performRequests').andReturn(Promise.reject(new APIError(statusCode: 400)))
|
2015-08-06 06:53:08 +08:00
|
|
|
spyOn(@task, 'performLocal').andReturn(Promise.resolve())
|
|
|
|
|
|
|
|
it "should set isReverting and call performLocal", ->
|
|
|
|
waitsForPromise =>
|
|
|
|
@task.performRemote().then (result) =>
|
|
|
|
expect(@task.performLocal).toHaveBeenCalled()
|
|
|
|
expect(@task._isReverting).toBe(true)
|
|
|
|
|
fix(tasks): don't continue if dependent task fails
Summary:
Fixes T4291
If I made a final edit to a pre-existing draft and sent, we'd queue a
`SyncbackDraftTask` before a `SendDraftTask`. This is important because
since we have a valid draft `server_id`, the `SendDraftTask` will send by
server_id, not by POSTing the whole body.
If the `SyncbackDraftTask` fails, then we had a very serious issue whereby
the `SendDraftTask` would keep on sending. Unfortunately the server never
got the latest changes and sent the wrong version of the draft. This
incorrect version would show up later when the `/send` endpoint returned
the message that got actually sent.
The solution was to make any queued `SendDraftTask` fail if a dependent
`SyncbackDraftTask` failed.
This meant we needed to make the requirements for `shouldWaitForTask`
stricter, and block if tasks failed.
Unfortunatley there was no infrastructure in place to do this.
The first change was to change `shouldWaitForTask` to `isDependentTask`.
If we're going to fail when a dependent task fails, I wanted the method
name to reflect this.
Now, if a dependent task fails, we recursively check the dependency tree
(and check for cycles) and `dequeue` anything that needed that to succeed.
I chose `dequeue` as the default action because it seemed as though all
current uses of `shouldWaitForTask` really should bail if their
dependencies fail. It's possible you don't want your task dequeued in this
dependency case. You can return the special `Task.DO_NOT_DEQUEUE_ME`
constant from the `onDependentTaskError` method.
When a task gets dequeued because of the reason above, the
`onDependentTaskError` callback gets fired. This gives tasks like the
`SendDraftTask` a chance to notify the user that it bailed. Not all tasks
need to notify.
The next big issue was a better way to determine if a task truely errored
to the point that we need to dequeue dependencies. In the Developer Status
area we were showing tasks that had errored as "Green" because we caught
the error and resolved with `Task.Status.Finished`. This used to be fine
since nothing life-or-death cared if a task errored or not. Now that it
might cause abortions down the line, we needed a more robust method then
this.
For one I changed `Task.Status.Finished` to a variety of finish types
including `Task.Status.Success`. The way you "error" out is to `throw` or
`Promise.reject` an `Error` object from the `performRemote` method. This
allows us to propagate API errors up, and acts as a safety net that can
catch any malformed code or unexpected responses.
The developer bar now shows a much richer set of statuses instead of a
binary one, which was REALLY helpful in debugging this. We also record
when a Task got dequeued because of the conditions introduced here.
Once all this was working we still had an issue of sending old drafts.
If after a `SyncbackDraftTask` failed, now we'd block the send and notify
the users as such. However, if we tried to send again, there was a
separate issue whereby we wouldn't queue another `SyncbackDraftTask` to
update the server with the latest information. Since our changes were
persisted to the DB, we thought we had no changes, and therefore didn't
need to queue a `SyncbackDraftTask`.
The fix to this is to always force the creation of a `SyncbackDraftTask`
before send regardless of the state of the `DraftStoreProxy`.
Test Plan: new tests. Lots of manual testing
Reviewers: bengotow
Reviewed By: bengotow
Subscribers: mg
Maniphest Tasks: T4291
Differential Revision: https://phab.nylas.com/D2156
2015-10-22 01:33:43 +08:00
|
|
|
it "should resolve with Task.Status.Failed after reverting", ->
|
2015-08-06 06:53:08 +08:00
|
|
|
waitsForPromise =>
|
|
|
|
@task.performRemote().then (result) =>
|
fix(tasks): don't continue if dependent task fails
Summary:
Fixes T4291
If I made a final edit to a pre-existing draft and sent, we'd queue a
`SyncbackDraftTask` before a `SendDraftTask`. This is important because
since we have a valid draft `server_id`, the `SendDraftTask` will send by
server_id, not by POSTing the whole body.
If the `SyncbackDraftTask` fails, then we had a very serious issue whereby
the `SendDraftTask` would keep on sending. Unfortunately the server never
got the latest changes and sent the wrong version of the draft. This
incorrect version would show up later when the `/send` endpoint returned
the message that got actually sent.
The solution was to make any queued `SendDraftTask` fail if a dependent
`SyncbackDraftTask` failed.
This meant we needed to make the requirements for `shouldWaitForTask`
stricter, and block if tasks failed.
Unfortunatley there was no infrastructure in place to do this.
The first change was to change `shouldWaitForTask` to `isDependentTask`.
If we're going to fail when a dependent task fails, I wanted the method
name to reflect this.
Now, if a dependent task fails, we recursively check the dependency tree
(and check for cycles) and `dequeue` anything that needed that to succeed.
I chose `dequeue` as the default action because it seemed as though all
current uses of `shouldWaitForTask` really should bail if their
dependencies fail. It's possible you don't want your task dequeued in this
dependency case. You can return the special `Task.DO_NOT_DEQUEUE_ME`
constant from the `onDependentTaskError` method.
When a task gets dequeued because of the reason above, the
`onDependentTaskError` callback gets fired. This gives tasks like the
`SendDraftTask` a chance to notify the user that it bailed. Not all tasks
need to notify.
The next big issue was a better way to determine if a task truely errored
to the point that we need to dequeue dependencies. In the Developer Status
area we were showing tasks that had errored as "Green" because we caught
the error and resolved with `Task.Status.Finished`. This used to be fine
since nothing life-or-death cared if a task errored or not. Now that it
might cause abortions down the line, we needed a more robust method then
this.
For one I changed `Task.Status.Finished` to a variety of finish types
including `Task.Status.Success`. The way you "error" out is to `throw` or
`Promise.reject` an `Error` object from the `performRemote` method. This
allows us to propagate API errors up, and acts as a safety net that can
catch any malformed code or unexpected responses.
The developer bar now shows a much richer set of statuses instead of a
binary one, which was REALLY helpful in debugging this. We also record
when a Task got dequeued because of the conditions introduced here.
Once all this was working we still had an issue of sending old drafts.
If after a `SyncbackDraftTask` failed, now we'd block the send and notify
the users as such. However, if we tried to send again, there was a
separate issue whereby we wouldn't queue another `SyncbackDraftTask` to
update the server with the latest information. Since our changes were
persisted to the DB, we thought we had no changes, and therefore didn't
need to queue a `SyncbackDraftTask`.
The fix to this is to always force the creation of a `SyncbackDraftTask`
before send regardless of the state of the `DraftStoreProxy`.
Test Plan: new tests. Lots of manual testing
Reviewers: bengotow
Reviewed By: bengotow
Subscribers: mg
Maniphest Tasks: T4291
Differential Revision: https://phab.nylas.com/D2156
2015-10-22 01:33:43 +08:00
|
|
|
expect(result).toBe(Task.Status.Failed)
|
2015-08-06 06:53:08 +08:00
|
|
|
|
2015-10-30 01:25:34 +08:00
|
|
|
describe "if _performRequests rejects with a temporary network error", ->
|
2015-08-06 06:53:08 +08:00
|
|
|
beforeEach ->
|
|
|
|
@task = new ChangeMailTask()
|
2015-10-30 01:25:34 +08:00
|
|
|
spyOn(@task, '_performRequests').andReturn(Promise.reject(new APIError(statusCode: NylasAPI.SampleTemporaryErrorCode)))
|
2015-08-06 06:53:08 +08:00
|
|
|
spyOn(@task, 'performLocal').andReturn(Promise.resolve())
|
|
|
|
|
|
|
|
it "should not revert", ->
|
|
|
|
waitsForPromise =>
|
|
|
|
@task.performRemote().then (result) =>
|
|
|
|
expect(@task.performLocal).not.toHaveBeenCalled()
|
|
|
|
expect(@task._isReverting).not.toBe(true)
|
|
|
|
|
|
|
|
it "should resolve with Task.Status.Retry", ->
|
|
|
|
waitsForPromise =>
|
|
|
|
@task.performRemote().then (result) =>
|
|
|
|
expect(result).toBe(Task.Status.Retry)
|
|
|
|
|
|
|
|
|
2015-10-30 01:25:34 +08:00
|
|
|
describe "_performRequests", ->
|
2015-08-06 06:53:08 +08:00
|
|
|
beforeEach ->
|
|
|
|
@task = new ChangeMailTask()
|
|
|
|
@task._restoreValues =
|
|
|
|
'A': {}
|
|
|
|
'B': {}
|
|
|
|
'C': {}
|
|
|
|
'A1': {}
|
2015-10-30 01:25:34 +08:00
|
|
|
spyOn(@task, 'requestBodyForModel').andCallFake (model) =>
|
2015-08-06 06:53:08 +08:00
|
|
|
if model is @threadA
|
|
|
|
return {field: 'thread-a-body'}
|
|
|
|
if model is @threadB
|
|
|
|
return {field: 'thread-b-body'}
|
|
|
|
if model is @threadAMesage1
|
|
|
|
return {field: 'message-1'}
|
|
|
|
|
2015-10-30 01:25:34 +08:00
|
|
|
it "should call NylasAPI.makeRequest for each model, passing the result of requestBodyForModel", ->
|
2015-08-06 06:53:08 +08:00
|
|
|
spyOn(NylasAPI, 'makeRequest').andReturn(Promise.resolve())
|
|
|
|
runs ->
|
2015-10-30 01:25:34 +08:00
|
|
|
@task._performRequests(Thread, [@threadA, @threadB])
|
2015-08-06 06:53:08 +08:00
|
|
|
waitsFor ->
|
|
|
|
NylasAPI.makeRequest.callCount is 2
|
|
|
|
runs ->
|
|
|
|
expect(NylasAPI.makeRequest.calls[0].args[0].body).toEqual({field: 'thread-a-body'})
|
|
|
|
expect(NylasAPI.makeRequest.calls[1].args[0].body).toEqual({field: 'thread-b-body'})
|
|
|
|
|
|
|
|
it "should resolve when all of the requests complete", ->
|
|
|
|
promises = []
|
|
|
|
spyOn(NylasAPI, 'makeRequest').andCallFake ->
|
|
|
|
new Promise (resolve, reject) -> promises.push({resolve, reject})
|
|
|
|
|
|
|
|
resolved = false
|
|
|
|
runs ->
|
2015-10-30 01:25:34 +08:00
|
|
|
@task._performRequests(Thread, [@threadA, @threadB]).then =>
|
2015-08-06 06:53:08 +08:00
|
|
|
resolved = true
|
|
|
|
waitsFor ->
|
|
|
|
NylasAPI.makeRequest.callCount is 2
|
|
|
|
runs ->
|
|
|
|
expect(resolved).toBe(false)
|
|
|
|
promises[0].resolve()
|
|
|
|
advanceClock()
|
|
|
|
expect(resolved).toBe(false)
|
|
|
|
promises[1].resolve()
|
|
|
|
advanceClock()
|
|
|
|
expect(resolved).toBe(true)
|
|
|
|
|
2015-08-27 02:54:03 +08:00
|
|
|
it "should carry on and resolve if a request 404s, since the NylasAPI manager will clean the object from the cache", ->
|
|
|
|
promises = []
|
|
|
|
spyOn(NylasAPI, 'makeRequest').andCallFake ->
|
|
|
|
new Promise (resolve, reject) -> promises.push({resolve, reject})
|
|
|
|
|
|
|
|
resolved = false
|
|
|
|
runs ->
|
2015-10-30 01:25:34 +08:00
|
|
|
@task._performRequests(Thread, [@threadA, @threadB]).then =>
|
2015-08-27 02:54:03 +08:00
|
|
|
resolved = true
|
|
|
|
waitsFor ->
|
|
|
|
NylasAPI.makeRequest.callCount is 2
|
|
|
|
runs ->
|
|
|
|
promises[0].resolve()
|
|
|
|
promises[1].reject(new APIError(statusCode: 404))
|
|
|
|
advanceClock()
|
|
|
|
expect(resolved).toBe(true)
|
|
|
|
|
2015-08-06 06:53:08 +08:00
|
|
|
it "should reject with the request error encountered by any request", ->
|
|
|
|
promises = []
|
|
|
|
spyOn(NylasAPI, 'makeRequest').andCallFake ->
|
|
|
|
new Promise (resolve, reject) -> promises.push({resolve, reject})
|
|
|
|
|
|
|
|
err = null
|
|
|
|
runs ->
|
2015-10-30 01:25:34 +08:00
|
|
|
@task._performRequests(Thread, [@threadA, @threadB]).catch (error) =>
|
2015-08-06 06:53:08 +08:00
|
|
|
err = error
|
|
|
|
waitsFor ->
|
|
|
|
NylasAPI.makeRequest.callCount is 2
|
|
|
|
runs ->
|
|
|
|
expect(err).toBe(null)
|
|
|
|
promises[0].resolve()
|
|
|
|
advanceClock()
|
|
|
|
expect(err).toBe(null)
|
2015-09-04 07:29:33 +08:00
|
|
|
apiError = new APIError(statusCode: NylasAPI.SampleTemporaryErrorCode)
|
2015-08-06 06:53:08 +08:00
|
|
|
promises[1].reject(apiError)
|
|
|
|
advanceClock()
|
|
|
|
expect(err).toBe(apiError)
|
|
|
|
|
|
|
|
it "should use /threads when the klass provided is Thread", ->
|
2015-08-27 02:54:03 +08:00
|
|
|
spyOn(NylasAPI, 'makeRequest').andCallFake ->
|
|
|
|
new Promise (resolve, reject) -> #noop
|
2015-08-06 06:53:08 +08:00
|
|
|
runs ->
|
2015-10-30 01:25:34 +08:00
|
|
|
@task._performRequests(Thread, [@threadA, @threadB])
|
2015-08-06 06:53:08 +08:00
|
|
|
waitsFor ->
|
|
|
|
NylasAPI.makeRequest.callCount is 2
|
|
|
|
runs ->
|
feat(accounts): Kill namespaces, long live accounts
Summary:
This diff replaces the Namespace object with the Account object, and changes all references to namespace_id => account_id, etc. The endpoints are now `/threads` instead of `/n/<id>/threads`.
This diff also adds preliminary support for multiple accounts. When you log in, we now log you in to all the attached accounts on edgehill server. From the preferences panel, you can auth with / unlink additional accounts. Shockingly, this all seems to pretty much work.
When replying to a thread, you cannot switch from addresses. However, when creating a new message in a popout composer, you can change the from address and the SaveDraftTask will delete/re-root the draft on the new account.
Search bar doesn't need to do full refresh on clear if it never committed
Allow drafts to be switched to a different account when not in reply to an existing thread
Fix edge case where ChangeMailTask throws exception if no models are modified during performLocal
Show many dots for many accounts in long polling status bar
add/remove accounts from prefs
Spec fixes!
Test Plan: Run tests, none broken!
Reviewers: evan, dillon
Reviewed By: evan, dillon
Differential Revision: https://phab.nylas.com/D1928
2015-08-22 06:29:58 +08:00
|
|
|
path = "/threads/#{@threadA.id}"
|
2015-08-06 06:53:08 +08:00
|
|
|
expect(NylasAPI.makeRequest.calls[0].args[0].path).toBe(path)
|
feat(accounts): Kill namespaces, long live accounts
Summary:
This diff replaces the Namespace object with the Account object, and changes all references to namespace_id => account_id, etc. The endpoints are now `/threads` instead of `/n/<id>/threads`.
This diff also adds preliminary support for multiple accounts. When you log in, we now log you in to all the attached accounts on edgehill server. From the preferences panel, you can auth with / unlink additional accounts. Shockingly, this all seems to pretty much work.
When replying to a thread, you cannot switch from addresses. However, when creating a new message in a popout composer, you can change the from address and the SaveDraftTask will delete/re-root the draft on the new account.
Search bar doesn't need to do full refresh on clear if it never committed
Allow drafts to be switched to a different account when not in reply to an existing thread
Fix edge case where ChangeMailTask throws exception if no models are modified during performLocal
Show many dots for many accounts in long polling status bar
add/remove accounts from prefs
Spec fixes!
Test Plan: Run tests, none broken!
Reviewers: evan, dillon
Reviewed By: evan, dillon
Differential Revision: https://phab.nylas.com/D1928
2015-08-22 06:29:58 +08:00
|
|
|
expect(NylasAPI.makeRequest.calls[0].args[0].accountId).toBe(@threadA.accountId)
|
2015-08-06 06:53:08 +08:00
|
|
|
|
|
|
|
it "should use /messages when the klass provided is Message", ->
|
2015-08-27 02:54:03 +08:00
|
|
|
spyOn(NylasAPI, 'makeRequest').andCallFake ->
|
|
|
|
new Promise (resolve, reject) -> #noop
|
2015-08-06 06:53:08 +08:00
|
|
|
runs ->
|
2015-10-30 01:25:34 +08:00
|
|
|
@task._performRequests(Message, [@threadAMesage1])
|
2015-08-06 06:53:08 +08:00
|
|
|
waitsFor ->
|
|
|
|
NylasAPI.makeRequest.callCount is 1
|
|
|
|
runs ->
|
feat(accounts): Kill namespaces, long live accounts
Summary:
This diff replaces the Namespace object with the Account object, and changes all references to namespace_id => account_id, etc. The endpoints are now `/threads` instead of `/n/<id>/threads`.
This diff also adds preliminary support for multiple accounts. When you log in, we now log you in to all the attached accounts on edgehill server. From the preferences panel, you can auth with / unlink additional accounts. Shockingly, this all seems to pretty much work.
When replying to a thread, you cannot switch from addresses. However, when creating a new message in a popout composer, you can change the from address and the SaveDraftTask will delete/re-root the draft on the new account.
Search bar doesn't need to do full refresh on clear if it never committed
Allow drafts to be switched to a different account when not in reply to an existing thread
Fix edge case where ChangeMailTask throws exception if no models are modified during performLocal
Show many dots for many accounts in long polling status bar
add/remove accounts from prefs
Spec fixes!
Test Plan: Run tests, none broken!
Reviewers: evan, dillon
Reviewed By: evan, dillon
Differential Revision: https://phab.nylas.com/D1928
2015-08-22 06:29:58 +08:00
|
|
|
path = "/messages/#{@threadAMesage1.id}"
|
2015-08-06 06:53:08 +08:00
|
|
|
expect(NylasAPI.makeRequest.calls[0].args[0].path).toBe(path)
|
feat(accounts): Kill namespaces, long live accounts
Summary:
This diff replaces the Namespace object with the Account object, and changes all references to namespace_id => account_id, etc. The endpoints are now `/threads` instead of `/n/<id>/threads`.
This diff also adds preliminary support for multiple accounts. When you log in, we now log you in to all the attached accounts on edgehill server. From the preferences panel, you can auth with / unlink additional accounts. Shockingly, this all seems to pretty much work.
When replying to a thread, you cannot switch from addresses. However, when creating a new message in a popout composer, you can change the from address and the SaveDraftTask will delete/re-root the draft on the new account.
Search bar doesn't need to do full refresh on clear if it never committed
Allow drafts to be switched to a different account when not in reply to an existing thread
Fix edge case where ChangeMailTask throws exception if no models are modified during performLocal
Show many dots for many accounts in long polling status bar
add/remove accounts from prefs
Spec fixes!
Test Plan: Run tests, none broken!
Reviewers: evan, dillon
Reviewed By: evan, dillon
Differential Revision: https://phab.nylas.com/D1928
2015-08-22 06:29:58 +08:00
|
|
|
expect(NylasAPI.makeRequest.calls[0].args[0].accountId).toBe(@threadAMesage1.accountId)
|
2015-08-06 06:53:08 +08:00
|
|
|
|
|
|
|
it "should decrement change counts as requests complete", ->
|
2015-08-27 02:54:03 +08:00
|
|
|
spyOn(NylasAPI, 'makeRequest').andCallFake ->
|
|
|
|
new Promise (resolve, reject) -> #noop
|
2015-08-06 06:53:08 +08:00
|
|
|
spyOn(@task, '_removeLock')
|
|
|
|
runs ->
|
2015-10-30 01:25:34 +08:00
|
|
|
@task._performRequests(Thread, [@threadAMesage1])
|
2015-08-06 06:53:08 +08:00
|
|
|
waitsFor ->
|
|
|
|
NylasAPI.makeRequest.callCount is 1
|
|
|
|
runs ->
|
|
|
|
NylasAPI.makeRequest.calls[0].args[0].beforeProcessing({})
|
|
|
|
expect(@task._removeLock).toHaveBeenCalledWith(@threadAMesage1)
|
|
|
|
|
2015-09-28 14:42:43 +08:00
|
|
|
it "should make no more than 10 requests at once", ->
|
|
|
|
resolves = []
|
|
|
|
spyOn(@task, '_removeLock')
|
|
|
|
spyOn(NylasAPI, 'makeRequest').andCallFake ->
|
|
|
|
new Promise (resolve, reject) -> resolves.push(resolve)
|
|
|
|
|
|
|
|
threads = []
|
|
|
|
threads.push new Thread(id: "#{idx}", subject: idx) for idx in [0..100]
|
|
|
|
@task._restoreValues = _.map threads, (t) -> {some: 'data'}
|
2015-10-30 01:25:34 +08:00
|
|
|
@task._performRequests(Thread, threads)
|
2015-09-28 14:42:43 +08:00
|
|
|
advanceClock()
|
|
|
|
expect(resolves.length).toEqual(5)
|
|
|
|
advanceClock()
|
|
|
|
expect(resolves.length).toEqual(5)
|
|
|
|
resolves[0]()
|
|
|
|
resolves[1]()
|
|
|
|
advanceClock()
|
|
|
|
expect(resolves.length).toEqual(7)
|
|
|
|
resolves[idx]() for idx in [2...7]
|
|
|
|
advanceClock()
|
|
|
|
expect(resolves.length).toEqual(12)
|
|
|
|
|
|
|
|
|
|
|
|
it "should stop making requests after non-404 network errors", ->
|
|
|
|
resolves = []
|
|
|
|
rejects = []
|
|
|
|
spyOn(@task, '_removeLock')
|
|
|
|
spyOn(NylasAPI, 'makeRequest').andCallFake ->
|
|
|
|
new Promise (resolve, reject) ->
|
|
|
|
resolves.push(resolve)
|
|
|
|
rejects.push(reject)
|
|
|
|
|
|
|
|
threads = []
|
|
|
|
threads.push new Thread(id: "#{idx}", subject: idx) for idx in [0..100]
|
|
|
|
@task._restoreValues = _.map threads, (t) -> {some: 'data'}
|
2016-02-04 07:06:52 +08:00
|
|
|
@task._performRequests(Thread, threads).catch (err) ->
|
|
|
|
# Need to catch the error so it's not a
|
|
|
|
# Promise.possiblyUnhandledRejection, which will stop the tests.
|
|
|
|
expect(err.statusCode).toBe 400
|
2015-09-28 14:42:43 +08:00
|
|
|
advanceClock()
|
|
|
|
expect(resolves.length).toEqual(5)
|
|
|
|
resolves[idx]() for idx in [0...4]
|
|
|
|
advanceClock()
|
|
|
|
expect(resolves.length).toEqual(9)
|
|
|
|
|
|
|
|
# simulate request failure
|
|
|
|
reject = rejects[rejects.length - 1]
|
|
|
|
reject(new APIError(statusCode: 400))
|
|
|
|
advanceClock()
|
|
|
|
|
|
|
|
# simulate more requests succeeding
|
|
|
|
resolves[idx]() for idx in [5...9]
|
|
|
|
advanceClock()
|
|
|
|
|
|
|
|
# check that no more requests have been queued
|
|
|
|
expect(resolves.length).toEqual(9)
|
|
|
|
|
2015-08-06 06:53:08 +08:00
|
|
|
describe "optimistic object locking", ->
|
|
|
|
beforeEach ->
|
|
|
|
@task = new ChangeMailTask()
|
|
|
|
spyOn(@task, '_performLocalThreads').andReturn(Promise.resolve())
|
|
|
|
spyOn(@task, '_lockAll')
|
|
|
|
|
|
|
|
it "increments the locks in performLocal", ->
|
|
|
|
waitsForPromise =>
|
|
|
|
@task.performLocal().then =>
|
|
|
|
expect(@task._lockAll).toHaveBeenCalled()
|
|
|
|
|
|
|
|
describe "when the task is reverting after request failures", ->
|
|
|
|
it "should not increment change locks", ->
|
|
|
|
@task._isReverting = true
|
|
|
|
waitsForPromise =>
|
|
|
|
@task.performLocal().then =>
|
|
|
|
expect(@task._lockAll).not.toHaveBeenCalled()
|
|
|
|
|
|
|
|
describe "when the task is undoing", ->
|
|
|
|
it "should increment change locks", ->
|
|
|
|
@task._isUndoTask = true
|
|
|
|
@task._restoreValues = {}
|
|
|
|
waitsForPromise =>
|
|
|
|
@task.performLocal().then =>
|
|
|
|
expect(@task._lockAll).toHaveBeenCalled()
|
|
|
|
|
fix(tasks): don't continue if dependent task fails
Summary:
Fixes T4291
If I made a final edit to a pre-existing draft and sent, we'd queue a
`SyncbackDraftTask` before a `SendDraftTask`. This is important because
since we have a valid draft `server_id`, the `SendDraftTask` will send by
server_id, not by POSTing the whole body.
If the `SyncbackDraftTask` fails, then we had a very serious issue whereby
the `SendDraftTask` would keep on sending. Unfortunately the server never
got the latest changes and sent the wrong version of the draft. This
incorrect version would show up later when the `/send` endpoint returned
the message that got actually sent.
The solution was to make any queued `SendDraftTask` fail if a dependent
`SyncbackDraftTask` failed.
This meant we needed to make the requirements for `shouldWaitForTask`
stricter, and block if tasks failed.
Unfortunatley there was no infrastructure in place to do this.
The first change was to change `shouldWaitForTask` to `isDependentTask`.
If we're going to fail when a dependent task fails, I wanted the method
name to reflect this.
Now, if a dependent task fails, we recursively check the dependency tree
(and check for cycles) and `dequeue` anything that needed that to succeed.
I chose `dequeue` as the default action because it seemed as though all
current uses of `shouldWaitForTask` really should bail if their
dependencies fail. It's possible you don't want your task dequeued in this
dependency case. You can return the special `Task.DO_NOT_DEQUEUE_ME`
constant from the `onDependentTaskError` method.
When a task gets dequeued because of the reason above, the
`onDependentTaskError` callback gets fired. This gives tasks like the
`SendDraftTask` a chance to notify the user that it bailed. Not all tasks
need to notify.
The next big issue was a better way to determine if a task truely errored
to the point that we need to dequeue dependencies. In the Developer Status
area we were showing tasks that had errored as "Green" because we caught
the error and resolved with `Task.Status.Finished`. This used to be fine
since nothing life-or-death cared if a task errored or not. Now that it
might cause abortions down the line, we needed a more robust method then
this.
For one I changed `Task.Status.Finished` to a variety of finish types
including `Task.Status.Success`. The way you "error" out is to `throw` or
`Promise.reject` an `Error` object from the `performRemote` method. This
allows us to propagate API errors up, and acts as a safety net that can
catch any malformed code or unexpected responses.
The developer bar now shows a much richer set of statuses instead of a
binary one, which was REALLY helpful in debugging this. We also record
when a Task got dequeued because of the conditions introduced here.
Once all this was working we still had an issue of sending old drafts.
If after a `SyncbackDraftTask` failed, now we'd block the send and notify
the users as such. However, if we tried to send again, there was a
separate issue whereby we wouldn't queue another `SyncbackDraftTask` to
update the server with the latest information. Since our changes were
persisted to the DB, we thought we had no changes, and therefore didn't
need to queue a `SyncbackDraftTask`.
The fix to this is to always force the creation of a `SyncbackDraftTask`
before send regardless of the state of the `DraftStoreProxy`.
Test Plan: new tests. Lots of manual testing
Reviewers: bengotow
Reviewed By: bengotow
Subscribers: mg
Maniphest Tasks: T4291
Differential Revision: https://phab.nylas.com/D2156
2015-10-22 01:33:43 +08:00
|
|
|
describe "when performRemote is returning Task.Status.Success", ->
|
2015-08-06 06:53:08 +08:00
|
|
|
it "should clean up locks", ->
|
2015-10-30 01:25:34 +08:00
|
|
|
spyOn(@task, '_performRequests').andReturn(Promise.resolve())
|
2015-08-06 06:53:08 +08:00
|
|
|
spyOn(@task, '_ensureLocksRemoved')
|
|
|
|
waitsForPromise =>
|
|
|
|
@task.performRemote().then =>
|
|
|
|
expect(@task._ensureLocksRemoved).toHaveBeenCalled()
|
|
|
|
|
fix(tasks): don't continue if dependent task fails
Summary:
Fixes T4291
If I made a final edit to a pre-existing draft and sent, we'd queue a
`SyncbackDraftTask` before a `SendDraftTask`. This is important because
since we have a valid draft `server_id`, the `SendDraftTask` will send by
server_id, not by POSTing the whole body.
If the `SyncbackDraftTask` fails, then we had a very serious issue whereby
the `SendDraftTask` would keep on sending. Unfortunately the server never
got the latest changes and sent the wrong version of the draft. This
incorrect version would show up later when the `/send` endpoint returned
the message that got actually sent.
The solution was to make any queued `SendDraftTask` fail if a dependent
`SyncbackDraftTask` failed.
This meant we needed to make the requirements for `shouldWaitForTask`
stricter, and block if tasks failed.
Unfortunatley there was no infrastructure in place to do this.
The first change was to change `shouldWaitForTask` to `isDependentTask`.
If we're going to fail when a dependent task fails, I wanted the method
name to reflect this.
Now, if a dependent task fails, we recursively check the dependency tree
(and check for cycles) and `dequeue` anything that needed that to succeed.
I chose `dequeue` as the default action because it seemed as though all
current uses of `shouldWaitForTask` really should bail if their
dependencies fail. It's possible you don't want your task dequeued in this
dependency case. You can return the special `Task.DO_NOT_DEQUEUE_ME`
constant from the `onDependentTaskError` method.
When a task gets dequeued because of the reason above, the
`onDependentTaskError` callback gets fired. This gives tasks like the
`SendDraftTask` a chance to notify the user that it bailed. Not all tasks
need to notify.
The next big issue was a better way to determine if a task truely errored
to the point that we need to dequeue dependencies. In the Developer Status
area we were showing tasks that had errored as "Green" because we caught
the error and resolved with `Task.Status.Finished`. This used to be fine
since nothing life-or-death cared if a task errored or not. Now that it
might cause abortions down the line, we needed a more robust method then
this.
For one I changed `Task.Status.Finished` to a variety of finish types
including `Task.Status.Success`. The way you "error" out is to `throw` or
`Promise.reject` an `Error` object from the `performRemote` method. This
allows us to propagate API errors up, and acts as a safety net that can
catch any malformed code or unexpected responses.
The developer bar now shows a much richer set of statuses instead of a
binary one, which was REALLY helpful in debugging this. We also record
when a Task got dequeued because of the conditions introduced here.
Once all this was working we still had an issue of sending old drafts.
If after a `SyncbackDraftTask` failed, now we'd block the send and notify
the users as such. However, if we tried to send again, there was a
separate issue whereby we wouldn't queue another `SyncbackDraftTask` to
update the server with the latest information. Since our changes were
persisted to the DB, we thought we had no changes, and therefore didn't
need to queue a `SyncbackDraftTask`.
The fix to this is to always force the creation of a `SyncbackDraftTask`
before send regardless of the state of the `DraftStoreProxy`.
Test Plan: new tests. Lots of manual testing
Reviewers: bengotow
Reviewed By: bengotow
Subscribers: mg
Maniphest Tasks: T4291
Differential Revision: https://phab.nylas.com/D2156
2015-10-22 01:33:43 +08:00
|
|
|
describe "when performRemote is returning Task.Status.Failed after reverting", ->
|
2015-08-06 06:53:08 +08:00
|
|
|
it "should clean up locks", ->
|
2015-10-30 01:25:34 +08:00
|
|
|
spyOn(@task, '_performRequests').andReturn(Promise.reject(new APIError(statusCode: 400)))
|
2015-08-06 06:53:08 +08:00
|
|
|
spyOn(@task, '_ensureLocksRemoved')
|
|
|
|
waitsForPromise =>
|
|
|
|
@task.performRemote().then =>
|
|
|
|
expect(@task._ensureLocksRemoved).toHaveBeenCalled()
|
|
|
|
|
|
|
|
describe "when performRemote is returning Task.Status.Retry", ->
|
|
|
|
it "should not clean up locks", ->
|
2015-10-30 01:25:34 +08:00
|
|
|
spyOn(@task, '_performRequests').andReturn(Promise.reject(new APIError(statusCode: NylasAPI.SampleTemporaryErrorCode)))
|
2015-08-06 06:53:08 +08:00
|
|
|
spyOn(@task, '_ensureLocksRemoved')
|
|
|
|
waitsForPromise =>
|
|
|
|
@task.performRemote().then =>
|
|
|
|
expect(@task._ensureLocksRemoved).not.toHaveBeenCalled()
|
|
|
|
|
|
|
|
describe "_lockAll", ->
|
|
|
|
beforeEach ->
|
|
|
|
@task = new ChangeMailTask()
|
|
|
|
@task.threads = [@threadA, @threadB]
|
|
|
|
spyOn(NylasAPI, 'incrementOptimisticChangeCount')
|
|
|
|
|
|
|
|
it "should keep a hash of the items that it locks", ->
|
|
|
|
@task._lockAll()
|
|
|
|
expect(NylasAPI.incrementOptimisticChangeCount.callCount).toBe(2)
|
|
|
|
expect(@task._locked).toEqual('A': 1, 'B': 1)
|
|
|
|
|
|
|
|
it "should not break anything if it's accidentally called twice", ->
|
|
|
|
@task._lockAll()
|
|
|
|
@task._lockAll()
|
|
|
|
expect(NylasAPI.incrementOptimisticChangeCount.callCount).toBe(4)
|
|
|
|
expect(@task._locked).toEqual('A': 2, 'B': 2)
|
|
|
|
|
|
|
|
describe "_ensureLocksRemoved", ->
|
|
|
|
it "should decrement locks given any aribtrarily messed up lock state and reset the locked array", ->
|
|
|
|
@task = new ChangeMailTask()
|
|
|
|
@task.threads = [@threadA, @threadB, @threadC]
|
|
|
|
spyOn(NylasAPI, 'decrementOptimisticChangeCount')
|
|
|
|
@task._locked = {'A': 2, 'B': 2, 'C': 1}
|
|
|
|
@task._ensureLocksRemoved()
|
|
|
|
expect(NylasAPI.decrementOptimisticChangeCount.callCount).toBe(5)
|
|
|
|
expect(NylasAPI.decrementOptimisticChangeCount.calls[0].args[1]).toBe('A')
|
|
|
|
expect(NylasAPI.decrementOptimisticChangeCount.calls[1].args[1]).toBe('A')
|
|
|
|
expect(NylasAPI.decrementOptimisticChangeCount.calls[2].args[1]).toBe('B')
|
|
|
|
expect(NylasAPI.decrementOptimisticChangeCount.calls[3].args[1]).toBe('B')
|
|
|
|
expect(NylasAPI.decrementOptimisticChangeCount.calls[4].args[1]).toBe('C')
|
|
|
|
expect(@task._locked).toEqual(null)
|
|
|
|
|
|
|
|
describe "createIdenticalTask", ->
|
|
|
|
it "should return a copy of the task, but with the objects converted into object ids", ->
|
|
|
|
task = new ChangeMailTask()
|
|
|
|
task.messages = [@threadAMesage1, @threadAMesage2]
|
|
|
|
clone = task.createIdenticalTask()
|
|
|
|
expect(clone.messages).toEqual([@threadAMesage1.id, @threadAMesage2.id])
|
|
|
|
|
|
|
|
task = new ChangeMailTask()
|
|
|
|
task.threads = [@threadA, @threadB]
|
|
|
|
clone = task.createIdenticalTask()
|
|
|
|
expect(clone.threads).toEqual([@threadA.id, @threadB.id])
|
|
|
|
|
|
|
|
task = new ChangeMailTask()
|
|
|
|
task.threads = [@threadA.id, @threadB.id]
|
|
|
|
clone = task.createIdenticalTask()
|
|
|
|
expect(clone.threads).toEqual([@threadA.id, @threadB.id])
|
|
|
|
|
|
|
|
describe "createUndoTask", ->
|
|
|
|
it "should return a task initialized with _isUndoTask and _restoreValues", ->
|
|
|
|
task = new ChangeMailTask()
|
|
|
|
task.messages = [@threadAMesage1, @threadAMesage2]
|
|
|
|
task._restoreValues = {'A': 'bla'}
|
|
|
|
undo = task.createUndoTask()
|
|
|
|
expect(undo.messages).toEqual([@threadAMesage1.id, @threadAMesage2.id])
|
|
|
|
expect(undo._restoreValues).toBe(task._restoreValues)
|
|
|
|
expect(undo._isUndoTask).toBe(true)
|
|
|
|
|
|
|
|
it "should throw if you try to make an undo task of an undo task", ->
|
|
|
|
task = new ChangeMailTask()
|
|
|
|
task._isUndoTask = true
|
|
|
|
expect( -> task.createUndoTask()).toThrow()
|
|
|
|
|
|
|
|
it "should throw if _restoreValues are not availble", ->
|
|
|
|
task = new ChangeMailTask()
|
|
|
|
task.messages = [@threadAMesage1, @threadAMesage2]
|
|
|
|
task._restoreValues = null
|
|
|
|
expect( -> task.createUndoTask()).toThrow()
|
|
|
|
|
fix(tasks): don't continue if dependent task fails
Summary:
Fixes T4291
If I made a final edit to a pre-existing draft and sent, we'd queue a
`SyncbackDraftTask` before a `SendDraftTask`. This is important because
since we have a valid draft `server_id`, the `SendDraftTask` will send by
server_id, not by POSTing the whole body.
If the `SyncbackDraftTask` fails, then we had a very serious issue whereby
the `SendDraftTask` would keep on sending. Unfortunately the server never
got the latest changes and sent the wrong version of the draft. This
incorrect version would show up later when the `/send` endpoint returned
the message that got actually sent.
The solution was to make any queued `SendDraftTask` fail if a dependent
`SyncbackDraftTask` failed.
This meant we needed to make the requirements for `shouldWaitForTask`
stricter, and block if tasks failed.
Unfortunatley there was no infrastructure in place to do this.
The first change was to change `shouldWaitForTask` to `isDependentTask`.
If we're going to fail when a dependent task fails, I wanted the method
name to reflect this.
Now, if a dependent task fails, we recursively check the dependency tree
(and check for cycles) and `dequeue` anything that needed that to succeed.
I chose `dequeue` as the default action because it seemed as though all
current uses of `shouldWaitForTask` really should bail if their
dependencies fail. It's possible you don't want your task dequeued in this
dependency case. You can return the special `Task.DO_NOT_DEQUEUE_ME`
constant from the `onDependentTaskError` method.
When a task gets dequeued because of the reason above, the
`onDependentTaskError` callback gets fired. This gives tasks like the
`SendDraftTask` a chance to notify the user that it bailed. Not all tasks
need to notify.
The next big issue was a better way to determine if a task truely errored
to the point that we need to dequeue dependencies. In the Developer Status
area we were showing tasks that had errored as "Green" because we caught
the error and resolved with `Task.Status.Finished`. This used to be fine
since nothing life-or-death cared if a task errored or not. Now that it
might cause abortions down the line, we needed a more robust method then
this.
For one I changed `Task.Status.Finished` to a variety of finish types
including `Task.Status.Success`. The way you "error" out is to `throw` or
`Promise.reject` an `Error` object from the `performRemote` method. This
allows us to propagate API errors up, and acts as a safety net that can
catch any malformed code or unexpected responses.
The developer bar now shows a much richer set of statuses instead of a
binary one, which was REALLY helpful in debugging this. We also record
when a Task got dequeued because of the conditions introduced here.
Once all this was working we still had an issue of sending old drafts.
If after a `SyncbackDraftTask` failed, now we'd block the send and notify
the users as such. However, if we tried to send again, there was a
separate issue whereby we wouldn't queue another `SyncbackDraftTask` to
update the server with the latest information. Since our changes were
persisted to the DB, we thought we had no changes, and therefore didn't
need to queue a `SyncbackDraftTask`.
The fix to this is to always force the creation of a `SyncbackDraftTask`
before send regardless of the state of the `DraftStoreProxy`.
Test Plan: new tests. Lots of manual testing
Reviewers: bengotow
Reviewed By: bengotow
Subscribers: mg
Maniphest Tasks: T4291
Differential Revision: https://phab.nylas.com/D2156
2015-10-22 01:33:43 +08:00
|
|
|
describe "isDependentTask", ->
|
2015-08-06 06:53:08 +08:00
|
|
|
it "should return true if another, older ChangeMailTask involves the same threads", ->
|
|
|
|
a = new ChangeMailTask()
|
|
|
|
a.threads = ['t1', 't2', 't3']
|
|
|
|
a.creationDate = new Date(1000)
|
|
|
|
b = new ChangeMailTask()
|
|
|
|
b.threads = ['t3', 't4', 't7']
|
|
|
|
b.creationDate = new Date(2000)
|
|
|
|
c = new ChangeMailTask()
|
|
|
|
c.threads = ['t0', 't7']
|
|
|
|
c.creationDate = new Date(3000)
|
fix(tasks): don't continue if dependent task fails
Summary:
Fixes T4291
If I made a final edit to a pre-existing draft and sent, we'd queue a
`SyncbackDraftTask` before a `SendDraftTask`. This is important because
since we have a valid draft `server_id`, the `SendDraftTask` will send by
server_id, not by POSTing the whole body.
If the `SyncbackDraftTask` fails, then we had a very serious issue whereby
the `SendDraftTask` would keep on sending. Unfortunately the server never
got the latest changes and sent the wrong version of the draft. This
incorrect version would show up later when the `/send` endpoint returned
the message that got actually sent.
The solution was to make any queued `SendDraftTask` fail if a dependent
`SyncbackDraftTask` failed.
This meant we needed to make the requirements for `shouldWaitForTask`
stricter, and block if tasks failed.
Unfortunatley there was no infrastructure in place to do this.
The first change was to change `shouldWaitForTask` to `isDependentTask`.
If we're going to fail when a dependent task fails, I wanted the method
name to reflect this.
Now, if a dependent task fails, we recursively check the dependency tree
(and check for cycles) and `dequeue` anything that needed that to succeed.
I chose `dequeue` as the default action because it seemed as though all
current uses of `shouldWaitForTask` really should bail if their
dependencies fail. It's possible you don't want your task dequeued in this
dependency case. You can return the special `Task.DO_NOT_DEQUEUE_ME`
constant from the `onDependentTaskError` method.
When a task gets dequeued because of the reason above, the
`onDependentTaskError` callback gets fired. This gives tasks like the
`SendDraftTask` a chance to notify the user that it bailed. Not all tasks
need to notify.
The next big issue was a better way to determine if a task truely errored
to the point that we need to dequeue dependencies. In the Developer Status
area we were showing tasks that had errored as "Green" because we caught
the error and resolved with `Task.Status.Finished`. This used to be fine
since nothing life-or-death cared if a task errored or not. Now that it
might cause abortions down the line, we needed a more robust method then
this.
For one I changed `Task.Status.Finished` to a variety of finish types
including `Task.Status.Success`. The way you "error" out is to `throw` or
`Promise.reject` an `Error` object from the `performRemote` method. This
allows us to propagate API errors up, and acts as a safety net that can
catch any malformed code or unexpected responses.
The developer bar now shows a much richer set of statuses instead of a
binary one, which was REALLY helpful in debugging this. We also record
when a Task got dequeued because of the conditions introduced here.
Once all this was working we still had an issue of sending old drafts.
If after a `SyncbackDraftTask` failed, now we'd block the send and notify
the users as such. However, if we tried to send again, there was a
separate issue whereby we wouldn't queue another `SyncbackDraftTask` to
update the server with the latest information. Since our changes were
persisted to the DB, we thought we had no changes, and therefore didn't
need to queue a `SyncbackDraftTask`.
The fix to this is to always force the creation of a `SyncbackDraftTask`
before send regardless of the state of the `DraftStoreProxy`.
Test Plan: new tests. Lots of manual testing
Reviewers: bengotow
Reviewed By: bengotow
Subscribers: mg
Maniphest Tasks: T4291
Differential Revision: https://phab.nylas.com/D2156
2015-10-22 01:33:43 +08:00
|
|
|
expect(a.isDependentTask(b)).toEqual(false)
|
|
|
|
expect(a.isDependentTask(c)).toEqual(false)
|
|
|
|
expect(b.isDependentTask(a)).toEqual(true)
|
|
|
|
expect(c.isDependentTask(a)).toEqual(false)
|
|
|
|
expect(c.isDependentTask(b)).toEqual(true)
|