2016-11-29 09:33:14 +08:00
|
|
|
import _ from 'underscore';
|
|
|
|
import {
|
|
|
|
Actions,
|
2016-12-03 06:13:13 +08:00
|
|
|
Thread,
|
|
|
|
Message,
|
2016-12-03 07:50:17 +08:00
|
|
|
NylasAPIHelpers,
|
2016-11-29 09:33:14 +08:00
|
|
|
DatabaseStore,
|
|
|
|
MailRulesProcessor,
|
|
|
|
} from 'nylas-exports';
|
|
|
|
|
|
|
|
/**
|
|
|
|
* This injests deltas from multiple sources. One is from local-sync, the
|
|
|
|
* other is from n1-cloud. Both sources use
|
|
|
|
* isomorphic-core/src/delta-stream-builder to generate the delta stream.
|
|
|
|
*
|
|
|
|
* In both cases we are given the JSON serialized form of a `Transaction`
|
|
|
|
* model. An example Thread delta would look like:
|
|
|
|
*
|
|
|
|
* modelDelta = {
|
|
|
|
* id: 518,
|
|
|
|
* event: "modify",
|
|
|
|
* object: "thread",
|
|
|
|
* objectId: 2887,
|
|
|
|
* changedFields: ["subject", "unread"],
|
|
|
|
* attributes: {
|
|
|
|
* id: 2887,
|
|
|
|
* object: 'thread',
|
|
|
|
* account_id: 2,
|
|
|
|
* subject: "Hello World",
|
|
|
|
* unread: true,
|
|
|
|
* ...
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
*
|
|
|
|
* An example Metadata delta would look like:
|
|
|
|
*
|
|
|
|
* metadataDelta = {
|
|
|
|
* id: 519,
|
|
|
|
* event: "create",
|
|
|
|
* object: "metadata",
|
|
|
|
* objectId: 8876,
|
|
|
|
* changedFields: ["version", "object"],
|
|
|
|
* attributes: {
|
|
|
|
* id: 8876,
|
|
|
|
* value: {link_clicks: 1},
|
|
|
|
* object: "metadata",
|
|
|
|
* version: 2,
|
|
|
|
* plugin_id: "link-tracking",
|
|
|
|
* object_id: 2887,
|
|
|
|
* object_type: "thread"
|
|
|
|
* account_id: 2,
|
|
|
|
* }
|
|
|
|
* }
|
|
|
|
*
|
|
|
|
* The `object` may be "thread", "message", "metadata", or any other model
|
|
|
|
* type we support
|
|
|
|
*/
|
|
|
|
class DeltaProcessor {
|
2016-12-03 03:40:59 +08:00
|
|
|
async process(rawDeltas = []) {
|
|
|
|
try {
|
|
|
|
const deltas = await this._decorateDeltas(rawDeltas);
|
|
|
|
Actions.longPollReceivedRawDeltas(deltas);
|
|
|
|
Actions.longPollReceivedRawDeltasPing(deltas.length);
|
|
|
|
|
[local-sync] Correctly sync folders and labels
This commit will correctly keep track of folder and label ids when
creating them from N1.
Previously, when we sent the request to create a folder or label to our api,
we would immediately get back a serverId because it was created optimistically
in the back end— given that K2 is strictly non-optimistic, we won’t have a serverId
until some undetermined time in the future, and we need to somehow reference
the object that /was/ optimistically created in N1 to update the ui when
we do get the server id.
Since we can deterministically generate ids for folders and labels,
we "guess" what its going to be, and include it in the props of the syncback request
returned to N1. This is the simplest solution to get thing working
correctly right now, but we’ll need to revisit this in the future for
other types of objects (drafts, contacts, events), and revisit how we
will manage optimistic updates in N1 when we merge the 2 codebases
with K2 (given that K2 was designed to be non-optimisitc).
2016-12-06 10:50:26 +08:00
|
|
|
const {
|
|
|
|
modelDeltas,
|
|
|
|
accountDeltas,
|
|
|
|
metadataDeltas,
|
|
|
|
syncbackRequestDeltas,
|
|
|
|
} = this._extractDeltaTypes(deltas);
|
2016-12-03 03:40:59 +08:00
|
|
|
this._handleAccountDeltas(accountDeltas);
|
|
|
|
|
|
|
|
const models = await this._saveModels(modelDeltas);
|
|
|
|
await this._saveMetadata(metadataDeltas);
|
|
|
|
await this._notifyOfNewMessages(models.created);
|
[local-sync] Correctly sync folders and labels
This commit will correctly keep track of folder and label ids when
creating them from N1.
Previously, when we sent the request to create a folder or label to our api,
we would immediately get back a serverId because it was created optimistically
in the back end— given that K2 is strictly non-optimistic, we won’t have a serverId
until some undetermined time in the future, and we need to somehow reference
the object that /was/ optimistically created in N1 to update the ui when
we do get the server id.
Since we can deterministically generate ids for folders and labels,
we "guess" what its going to be, and include it in the props of the syncback request
returned to N1. This is the simplest solution to get thing working
correctly right now, but we’ll need to revisit this in the future for
other types of objects (drafts, contacts, events), and revisit how we
will manage optimistic updates in N1 when we merge the 2 codebases
with K2 (given that K2 was designed to be non-optimisitc).
2016-12-06 10:50:26 +08:00
|
|
|
this._notifyOfSyncbackRequestDeltas(syncbackRequestDeltas)
|
2016-12-03 03:40:59 +08:00
|
|
|
} catch (err) {
|
2016-12-03 04:11:48 +08:00
|
|
|
console.error(rawDeltas)
|
2016-12-03 03:40:59 +08:00
|
|
|
console.error("DeltaProcessor: Process failed.", err)
|
|
|
|
NylasEnv.reportError(err);
|
|
|
|
} finally {
|
|
|
|
Actions.longPollProcessedDeltas()
|
|
|
|
}
|
2016-11-29 09:33:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
/**
|
|
|
|
* Create a (non-enumerable) reference from the attributes which we
|
|
|
|
* carry forward back to their original deltas. This allows us to
|
|
|
|
* mark the deltas that the app ignores later in the process.
|
|
|
|
*/
|
2016-12-03 03:40:59 +08:00
|
|
|
_decorateDeltas(rawDeltas) {
|
2016-11-29 09:33:14 +08:00
|
|
|
rawDeltas.forEach((delta) => {
|
|
|
|
if (!delta.attributes) return;
|
|
|
|
Object.defineProperty(delta.attributes, '_delta', {
|
|
|
|
get() { return delta; },
|
|
|
|
});
|
|
|
|
})
|
|
|
|
return rawDeltas
|
|
|
|
}
|
|
|
|
|
2016-12-03 03:40:59 +08:00
|
|
|
_extractDeltaTypes(rawDeltas) {
|
2016-11-29 09:33:14 +08:00
|
|
|
const modelDeltas = []
|
|
|
|
const accountDeltas = []
|
[local-sync] Correctly sync folders and labels
This commit will correctly keep track of folder and label ids when
creating them from N1.
Previously, when we sent the request to create a folder or label to our api,
we would immediately get back a serverId because it was created optimistically
in the back end— given that K2 is strictly non-optimistic, we won’t have a serverId
until some undetermined time in the future, and we need to somehow reference
the object that /was/ optimistically created in N1 to update the ui when
we do get the server id.
Since we can deterministically generate ids for folders and labels,
we "guess" what its going to be, and include it in the props of the syncback request
returned to N1. This is the simplest solution to get thing working
correctly right now, but we’ll need to revisit this in the future for
other types of objects (drafts, contacts, events), and revisit how we
will manage optimistic updates in N1 when we merge the 2 codebases
with K2 (given that K2 was designed to be non-optimisitc).
2016-12-06 10:50:26 +08:00
|
|
|
const syncbackRequestDeltas = []
|
2016-11-29 09:33:14 +08:00
|
|
|
const metadataDeltas = []
|
|
|
|
rawDeltas.forEach((delta) => {
|
|
|
|
if (delta.object === "metadata") {
|
|
|
|
metadataDeltas.push(delta)
|
|
|
|
} else if (delta.object === "account") {
|
|
|
|
accountDeltas.push(delta)
|
[local-sync] Correctly sync folders and labels
This commit will correctly keep track of folder and label ids when
creating them from N1.
Previously, when we sent the request to create a folder or label to our api,
we would immediately get back a serverId because it was created optimistically
in the back end— given that K2 is strictly non-optimistic, we won’t have a serverId
until some undetermined time in the future, and we need to somehow reference
the object that /was/ optimistically created in N1 to update the ui when
we do get the server id.
Since we can deterministically generate ids for folders and labels,
we "guess" what its going to be, and include it in the props of the syncback request
returned to N1. This is the simplest solution to get thing working
correctly right now, but we’ll need to revisit this in the future for
other types of objects (drafts, contacts, events), and revisit how we
will manage optimistic updates in N1 when we merge the 2 codebases
with K2 (given that K2 was designed to be non-optimisitc).
2016-12-06 10:50:26 +08:00
|
|
|
} else if (delta.object === "syncbackRequest") {
|
|
|
|
syncbackRequestDeltas.push(delta)
|
|
|
|
modelDeltas.push(delta)
|
2016-11-29 09:33:14 +08:00
|
|
|
} else {
|
|
|
|
modelDeltas.push(delta)
|
|
|
|
}
|
|
|
|
})
|
[local-sync] Correctly sync folders and labels
This commit will correctly keep track of folder and label ids when
creating them from N1.
Previously, when we sent the request to create a folder or label to our api,
we would immediately get back a serverId because it was created optimistically
in the back end— given that K2 is strictly non-optimistic, we won’t have a serverId
until some undetermined time in the future, and we need to somehow reference
the object that /was/ optimistically created in N1 to update the ui when
we do get the server id.
Since we can deterministically generate ids for folders and labels,
we "guess" what its going to be, and include it in the props of the syncback request
returned to N1. This is the simplest solution to get thing working
correctly right now, but we’ll need to revisit this in the future for
other types of objects (drafts, contacts, events), and revisit how we
will manage optimistic updates in N1 when we merge the 2 codebases
with K2 (given that K2 was designed to be non-optimisitc).
2016-12-06 10:50:26 +08:00
|
|
|
return {modelDeltas, metadataDeltas, accountDeltas, syncbackRequestDeltas}
|
2016-11-29 09:33:14 +08:00
|
|
|
}
|
|
|
|
|
|
|
|
_handleAccountDeltas = (accountDeltas) => {
|
|
|
|
const {modify} = this._clusterDeltas(accountDeltas);
|
|
|
|
if (!modify.account) return;
|
|
|
|
for (const accountJSON of _.values(modify.account)) {
|
|
|
|
Actions.updateAccount(accountJSON.account_id, {syncState: accountJSON.sync_state});
|
|
|
|
if (accountJSON.sync_state !== "running") {
|
|
|
|
Actions.recordUserEvent('Account Sync Errored', {
|
|
|
|
accountId: accountJSON.account_id,
|
|
|
|
syncState: accountJSON.sync_state,
|
|
|
|
});
|
|
|
|
}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
[local-sync] Correctly sync folders and labels
This commit will correctly keep track of folder and label ids when
creating them from N1.
Previously, when we sent the request to create a folder or label to our api,
we would immediately get back a serverId because it was created optimistically
in the back end— given that K2 is strictly non-optimistic, we won’t have a serverId
until some undetermined time in the future, and we need to somehow reference
the object that /was/ optimistically created in N1 to update the ui when
we do get the server id.
Since we can deterministically generate ids for folders and labels,
we "guess" what its going to be, and include it in the props of the syncback request
returned to N1. This is the simplest solution to get thing working
correctly right now, but we’ll need to revisit this in the future for
other types of objects (drafts, contacts, events), and revisit how we
will manage optimistic updates in N1 when we merge the 2 codebases
with K2 (given that K2 was designed to be non-optimisitc).
2016-12-06 10:50:26 +08:00
|
|
|
_notifyOfSyncbackRequestDeltas(syncbackRequestDeltas = []) {
|
|
|
|
if (syncbackRequestDeltas.length === 0) { return }
|
|
|
|
const groupedDeltas = {succeeded: [], failed: [], created: []}
|
|
|
|
|
|
|
|
syncbackRequestDeltas.forEach((delta) => {
|
|
|
|
const {status} = delta.attributes
|
|
|
|
if (status === 'FAILED') {
|
|
|
|
groupedDeltas.failed.push(delta)
|
|
|
|
}
|
|
|
|
if (status === 'SUCCEEDED') {
|
|
|
|
groupedDeltas.succeeded.push(delta)
|
|
|
|
}
|
|
|
|
if (status === 'NEW') {
|
|
|
|
groupedDeltas.created.push(delta)
|
|
|
|
}
|
|
|
|
})
|
|
|
|
Actions.didReceiveSyncbackRequestDeltas(groupedDeltas)
|
|
|
|
}
|
|
|
|
|
2016-12-03 03:40:59 +08:00
|
|
|
async _saveModels(modelDeltas) {
|
2016-11-29 09:33:14 +08:00
|
|
|
const {create, modify, destroy} = this._clusterDeltas(modelDeltas);
|
2016-12-03 03:40:59 +08:00
|
|
|
|
|
|
|
const created = await Promise.props(Object.keys(create).map((type) =>
|
2016-12-03 07:50:17 +08:00
|
|
|
NylasAPIHelpers.handleModelResponse(_.values(create[type]))
|
2016-12-03 03:40:59 +08:00
|
|
|
));
|
|
|
|
const updated = await Promise.props(Object.keys(modify).map((type) =>
|
2016-12-03 07:50:17 +08:00
|
|
|
NylasAPIHelpers.handleModelResponse(_.values(modify[type]))
|
2016-12-03 03:40:59 +08:00
|
|
|
));
|
|
|
|
await Promise.map(destroy, this._handleDestroyDelta);
|
|
|
|
|
|
|
|
return {created, updated};
|
2016-11-29 09:33:14 +08:00
|
|
|
}
|
|
|
|
|
2016-12-03 03:40:59 +08:00
|
|
|
async _saveMetadata(deltas) {
|
2016-12-06 02:19:37 +08:00
|
|
|
const all = {};
|
2016-11-29 09:33:14 +08:00
|
|
|
|
2016-12-06 02:19:37 +08:00
|
|
|
for (const delta of deltas.filter(d => d.event === 'create')) {
|
|
|
|
all[delta.attributes.object_id] = delta.attributes;
|
|
|
|
}
|
|
|
|
for (const delta of deltas.filter(d => d.event === 'modify')) {
|
|
|
|
all[delta.attributes.object_id] = delta.attributes;
|
|
|
|
}
|
|
|
|
const allByObjectType = _.groupBy(_.values(all), "object_type")
|
2016-11-29 09:33:14 +08:00
|
|
|
|
2016-12-06 02:19:37 +08:00
|
|
|
return Promise.map(Object.keys(allByObjectType), (objType) => {
|
|
|
|
const jsons = allByObjectType[objType]
|
2016-12-03 07:50:17 +08:00
|
|
|
const klass = NylasAPIHelpers.apiObjectToClassMap[objType];
|
2016-12-06 02:19:37 +08:00
|
|
|
const objectIds = jsons.map(j => j.object_id)
|
|
|
|
|
|
|
|
return DatabaseStore.inTransaction((t) => {
|
|
|
|
return this._findModelsForMetadata(t, klass, objectIds).then((modelsByObjectId) => {
|
|
|
|
const models = [];
|
|
|
|
Object.keys(modelsByObjectId).forEach((objectId) => {
|
|
|
|
const model = modelsByObjectId[objectId];
|
|
|
|
const metadataJSON = all[objectId];
|
|
|
|
const modelWithMetadata = model.applyPluginMetadata(metadataJSON.plugin_id, metadataJSON.value);
|
|
|
|
const localMetadatum = modelWithMetadata.metadataObjectForPluginId(metadataJSON.plugin_id);
|
|
|
|
localMetadatum.version = metadataJSON.version;
|
|
|
|
models.push(model);
|
2016-11-29 09:33:14 +08:00
|
|
|
})
|
|
|
|
return t.persistModels(models)
|
|
|
|
});
|
|
|
|
});
|
|
|
|
})
|
|
|
|
}
|
|
|
|
|
2016-12-06 02:19:37 +08:00
|
|
|
/**
|
|
|
|
@param ids An array of metadata object_ids
|
|
|
|
@returns A map of the object_ids to models in the database, resolving the
|
|
|
|
IDs as necessary. Must be a hashmap because the metadata object_ids may not
|
|
|
|
actually be present in the resulting models.
|
|
|
|
*/
|
2016-12-03 06:13:13 +08:00
|
|
|
_findModelsForMetadata(t, klass, ids) {
|
2016-12-03 07:44:47 +08:00
|
|
|
if (klass === Thread) {
|
2016-12-03 06:13:13 +08:00
|
|
|
// go through the Message table first, since local Thread IDs may be
|
|
|
|
// the (static) ID of any Message in the thread
|
|
|
|
// We prepend 't:' to thread IDs to avoid global object ID conflicts
|
|
|
|
const messageIds = ids.map(i => i.slice(2))
|
2016-12-03 07:58:54 +08:00
|
|
|
return t.findAll(Message, {id: messageIds}).then((messages) => {
|
2016-12-03 07:44:47 +08:00
|
|
|
if (messages.length !== messageIds.length) {
|
2016-12-03 06:13:13 +08:00
|
|
|
throw new Error(`Didn't find message for each thread. Thread IDs from remote: ${ids}`);
|
|
|
|
}
|
|
|
|
const threadIds = messages.map(m => m.threadId);
|
2016-12-06 02:19:37 +08:00
|
|
|
return t.findAll(Thread, {id: threadIds}).then((threads) => {
|
|
|
|
const map = {};
|
|
|
|
for (const thread of threads) {
|
|
|
|
const pluginObjectId = ids[threadIds.indexOf(thread.id)];
|
|
|
|
map[pluginObjectId] = thread;
|
|
|
|
}
|
|
|
|
return map;
|
|
|
|
});
|
2016-12-03 06:13:13 +08:00
|
|
|
});
|
|
|
|
}
|
2016-12-06 02:19:37 +08:00
|
|
|
return t.findAll(klass, {id: ids}).then((models) => {
|
|
|
|
const map = {};
|
|
|
|
for (const model of models) {
|
|
|
|
const pluginObjectId = model.id;
|
|
|
|
map[pluginObjectId] = model;
|
|
|
|
}
|
|
|
|
return map;
|
|
|
|
});
|
2016-12-03 06:13:13 +08:00
|
|
|
}
|
|
|
|
|
2016-11-29 09:33:14 +08:00
|
|
|
/**
|
|
|
|
* Group deltas by object type so we can mutate the cache efficiently.
|
|
|
|
* NOTE: This code must not just accumulate creates, modifies and
|
|
|
|
* destroys but also de-dupe them. We cannot call
|
|
|
|
* "persistModels(itemA, itemA, itemB)" or it will throw an exception
|
|
|
|
*/
|
|
|
|
_clusterDeltas(deltas) {
|
|
|
|
const create = {};
|
|
|
|
const modify = {};
|
|
|
|
const destroy = [];
|
|
|
|
for (const delta of deltas) {
|
|
|
|
if (delta.event === 'create') {
|
|
|
|
if (!create[delta.object]) { create[delta.object] = {}; }
|
|
|
|
create[delta.object][delta.attributes.id] = delta.attributes;
|
|
|
|
} else if (delta.event === 'modify') {
|
|
|
|
if (!modify[delta.object]) { modify[delta.object] = {}; }
|
|
|
|
modify[delta.object][delta.attributes.id] = delta.attributes;
|
|
|
|
} else if (delta.event === 'delete') {
|
|
|
|
destroy.push(delta);
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
return {create, modify, destroy};
|
|
|
|
}
|
|
|
|
|
2016-12-03 03:40:59 +08:00
|
|
|
async _notifyOfNewMessages(created) {
|
|
|
|
try {
|
|
|
|
await MailRulesProcessor.processMessages(created.message || [])
|
|
|
|
} catch (err) {
|
|
|
|
console.error("DeltaProcessor: Running mail rules on incoming mail failed.")
|
|
|
|
}
|
|
|
|
Actions.didPassivelyReceiveCreateDeltas(created)
|
|
|
|
}
|
|
|
|
|
|
|
|
_handleDestroyDelta(delta) {
|
2016-12-03 07:50:17 +08:00
|
|
|
const klass = NylasAPIHelpers.apiObjectToClassMap[delta.object];
|
2016-12-01 09:00:11 +08:00
|
|
|
if (!klass) { return Promise.resolve(); }
|
2016-11-29 09:33:14 +08:00
|
|
|
|
2016-12-01 09:00:11 +08:00
|
|
|
return DatabaseStore.inTransaction(t => {
|
|
|
|
return t.find(klass, delta.objectId).then((model) => {
|
|
|
|
if (!model) { return Promise.resolve(); }
|
|
|
|
return t.unpersistModel(model);
|
2016-11-29 09:33:14 +08:00
|
|
|
});
|
2016-12-01 09:00:11 +08:00
|
|
|
});
|
2016-11-29 09:33:14 +08:00
|
|
|
}
|
|
|
|
}
|
2016-12-03 07:50:17 +08:00
|
|
|
|
2016-11-29 09:33:14 +08:00
|
|
|
export default new DeltaProcessor()
|