mirror of
https://github.com/Foundry376/Mailspring.git
synced 2025-12-09 06:01:09 +08:00
fix(counts): Switch to simpler SQL-based counters, introduce total counts
Summary: The old approach we were using to track unread counts by category was really complicated because it involved computing changes to counts in javascript and then syncing them back to the database, from each process that was making queries. Rather than try to fix that, this diff moves us to a new approach where the counts are maintained by executing a query before and after threads are modified to unapply / reapply them from the counters. Doing this in the database in the same transactions as the thread modifications themselves ensures the counts are internally consistent with the Threads table. This SQL approach is also able to compute initial counts way faster - initializing totals and unreads in a 1GB edgehill.db in about 1 second on my machine. Test Plan: All old tests removed, new tests coming Reviewers: evan, juan Reviewed By: juan Differential Revision: https://phab.nylas.com/D2757
This commit is contained in:
parent
3c4702d40d
commit
3b9ba55bbf
3 changed files with 77 additions and 444 deletions
|
|
@ -1,333 +0,0 @@
|
|||
_ = require 'underscore'
|
||||
DatabaseStore = require '../../src/flux/stores/database-store'
|
||||
DatabaseTransaction = require '../../src/flux/stores/database-transaction'
|
||||
ThreadCountsStore = require '../../src/flux/stores/thread-counts-store'
|
||||
Thread = require '../../src/flux/models/thread'
|
||||
Category = require '../../src/flux/models/category'
|
||||
Matcher = require '../../src/flux/attributes/matcher'
|
||||
WindowBridge = require '../../src/window-bridge'
|
||||
|
||||
category1 = new Category(id: "l1", name: "inbox", displayName: "Inbox")
|
||||
category2 = new Category(id: "l2", name: "archive", displayName: "Archive")
|
||||
category3 = new Category(id: "l3", displayName: "Happy Days")
|
||||
category4 = new Category(id: "l4", displayName: "Sad Days")
|
||||
category5 = new Category(id: "l5", name: 'all', displayName: "All Mail")
|
||||
category6 = new Category(id: "l6", name: 'trash', displayName: "Trash")
|
||||
|
||||
# Values here are the "after" state. Below, the spy on the query returns the
|
||||
# "current" state.
|
||||
threadA = new Thread
|
||||
id: "A"
|
||||
unread: true
|
||||
categories: [category1, category4, category5]
|
||||
categoriesType: 'labels'
|
||||
threadB = new Thread
|
||||
id: "B"
|
||||
unread: true
|
||||
categories: [category3, category5]
|
||||
categoriesType: 'labels'
|
||||
threadC = new Thread
|
||||
id: "C"
|
||||
unread: false
|
||||
categories: [category1, category3, category5]
|
||||
categoriesType: 'labels'
|
||||
threadD = new Thread
|
||||
id: "D"
|
||||
unread: true
|
||||
categories: [category6]
|
||||
categoriesType: 'labels'
|
||||
threadE = new Thread
|
||||
id: "E"
|
||||
unread: true
|
||||
categories: [category1, category5]
|
||||
categoriesType: 'labels'
|
||||
|
||||
|
||||
describe "ThreadCountsStore", ->
|
||||
describe "unreadCountForCategoryId", ->
|
||||
it "returns null if no count exists for the category id", ->
|
||||
expect(ThreadCountsStore.unreadCountForCategoryId('nan')).toBe(null)
|
||||
|
||||
it "returns the count plus any unsaved deltas", ->
|
||||
ThreadCountsStore._counts =
|
||||
'b': 3
|
||||
'a': 5
|
||||
ThreadCountsStore._deltas =
|
||||
'a': -1
|
||||
expect(ThreadCountsStore.unreadCountForCategoryId('a')).toBe(4)
|
||||
expect(ThreadCountsStore.unreadCountForCategoryId('b')).toBe(3)
|
||||
|
||||
describe "when the mutation observer reports count changes", ->
|
||||
describe "in the work window", ->
|
||||
beforeEach ->
|
||||
spyOn(NylasEnv, 'isWorkWindow').andReturn(true)
|
||||
|
||||
it "should merge count deltas into existing count detlas", ->
|
||||
ThreadCountsStore._deltas =
|
||||
'l1': -1
|
||||
'l2': 2
|
||||
ThreadCountsStore._onCountsChanged({'l1': -1, 'l2': 1, 'l3': 2})
|
||||
expect(ThreadCountsStore._deltas).toEqual({
|
||||
'l1': -2,
|
||||
'l2': 3,
|
||||
'l3': 2
|
||||
})
|
||||
|
||||
it "should queue a save of the counts", ->
|
||||
spyOn(ThreadCountsStore, '_saveCountsSoon')
|
||||
ThreadCountsStore._onCountsChanged({'l1': -1, 'l2': 1, 'l3': 2})
|
||||
expect(ThreadCountsStore._saveCountsSoon).toHaveBeenCalled()
|
||||
|
||||
describe "in other windows", ->
|
||||
beforeEach ->
|
||||
spyOn(NylasEnv, 'isWorkWindow').andReturn(false)
|
||||
|
||||
it "should use the WindowBridge to forward the invocation to the work window", ->
|
||||
spyOn(WindowBridge, 'runInWorkWindow')
|
||||
payload = {'l1': -1, 'l2': 1, 'l3': 2}
|
||||
ThreadCountsStore._onCountsChanged(payload)
|
||||
expect(WindowBridge.runInWorkWindow).toHaveBeenCalledWith('ThreadCountsStore', '_onCountsChanged', [payload])
|
||||
|
||||
describe "when counts are persisted", ->
|
||||
it "should update it's _counts cache and trigger", ->
|
||||
newCounts = {
|
||||
'abc': 1
|
||||
}
|
||||
spyOn(ThreadCountsStore, 'trigger')
|
||||
ThreadCountsStore._onCountsBlobRead(newCounts)
|
||||
expect(ThreadCountsStore._counts).toEqual(newCounts)
|
||||
expect(ThreadCountsStore.trigger).toHaveBeenCalled()
|
||||
|
||||
describe "_fetchCountsMissing", ->
|
||||
beforeEach ->
|
||||
ThreadCountsStore._categories = [
|
||||
new Category(id: "l1", name: "inbox", displayName: "Inbox", accountId: 'a1'),
|
||||
new Category(id: "l2", name: "archive", displayName: "Archive", accountId: 'a1'),
|
||||
new Category(id: "l3", displayName: "Happy Days", accountId: 'a1'),
|
||||
new Category(id: "l4", displayName: "Sad Days", accountId: 'a1')
|
||||
]
|
||||
ThreadCountsStore._deltas =
|
||||
l1: 10
|
||||
l2: 0
|
||||
l3: 3
|
||||
l4: 12
|
||||
ThreadCountsStore._counts =
|
||||
l1: 10
|
||||
l2: 0
|
||||
|
||||
@countResolve = null
|
||||
@countReject = null
|
||||
spyOn(ThreadCountsStore, '_fetchCountForCategory').andCallFake =>
|
||||
new Promise (resolve, reject) =>
|
||||
@countResolve = resolve
|
||||
@countReject = reject
|
||||
|
||||
it "should call _fetchCountForCategory for the first category not already in the counts cache", ->
|
||||
ThreadCountsStore._fetchCountsMissing()
|
||||
calls = ThreadCountsStore._fetchCountForCategory.calls
|
||||
expect(calls.length).toBe(1)
|
||||
expect(calls[0].args[0]).toBe(ThreadCountsStore._categories[2])
|
||||
|
||||
it "should set the _deltas for the category it's counting back to zero", ->
|
||||
ThreadCountsStore._fetchCountsMissing()
|
||||
expect(ThreadCountsStore._deltas.l3).toBe(0)
|
||||
|
||||
describe "when the count promise finishes", ->
|
||||
it "should add it to the count cache", ->
|
||||
ThreadCountsStore._fetchCountsMissing()
|
||||
advanceClock()
|
||||
@countResolve(4)
|
||||
advanceClock()
|
||||
expect(ThreadCountsStore._counts.l3).toEqual(4)
|
||||
|
||||
it "should call _fetchCountsMissing again to populate the next missing count", ->
|
||||
ThreadCountsStore._fetchCountsMissing()
|
||||
spyOn(ThreadCountsStore, '_fetchCountsMissing')
|
||||
advanceClock()
|
||||
@countResolve(4)
|
||||
advanceClock()
|
||||
advanceClock(10001)
|
||||
expect(ThreadCountsStore._fetchCountsMissing).toHaveBeenCalled()
|
||||
|
||||
describe "when deltas appear during a count", ->
|
||||
it "should not set the count and count again in 10 seconds", ->
|
||||
ThreadCountsStore._fetchCountsMissing()
|
||||
spyOn(ThreadCountsStore, '_fetchCountsMissing')
|
||||
advanceClock()
|
||||
ThreadCountsStore._deltas.l3 = -1
|
||||
@countResolve(4)
|
||||
advanceClock()
|
||||
expect(ThreadCountsStore._counts.l3).toBeUndefined()
|
||||
expect(ThreadCountsStore._fetchCountsMissing).not.toHaveBeenCalled()
|
||||
advanceClock(10001)
|
||||
expect(ThreadCountsStore._fetchCountsMissing).toHaveBeenCalled()
|
||||
|
||||
describe "when a count fails", ->
|
||||
it "should not immediately try to count any other categories", ->
|
||||
spyOn(console, "warn")
|
||||
ThreadCountsStore._fetchCountsMissing()
|
||||
spyOn(ThreadCountsStore, '_fetchCountsMissing')
|
||||
spyOn(console, 'error')
|
||||
advanceClock()
|
||||
@countReject(new Error("Oh man something really bad."))
|
||||
advanceClock()
|
||||
expect(console.warn).toHaveBeenCalled()
|
||||
expect(ThreadCountsStore._fetchCountsMissing).not.toHaveBeenCalled()
|
||||
|
||||
describe "_fetchCountForCategory", ->
|
||||
it "should make the appropriate category database query", ->
|
||||
spyOn(DatabaseStore, 'count')
|
||||
Matcher.muid = 0
|
||||
ThreadCountsStore._fetchCountForCategory(new Category(id: 'l1', accountId: 'a1'))
|
||||
Matcher.muid = 0
|
||||
expect(DatabaseStore.count).toHaveBeenCalledWith(Thread, [
|
||||
Thread.attributes.categories.contains('l1'),
|
||||
Thread.attributes.accountId.equal('a1'),
|
||||
Thread.attributes.unread.equal(true),
|
||||
])
|
||||
|
||||
describe "_saveCounts", ->
|
||||
beforeEach ->
|
||||
ThreadCountsStore._counts =
|
||||
'b': 3
|
||||
'a': 5
|
||||
ThreadCountsStore._deltas =
|
||||
'a': -1
|
||||
'c': 2
|
||||
|
||||
it "should merge the deltas into the counts and reset the deltas, ignoring any deltas for which the initial count has not been run", ->
|
||||
ThreadCountsStore._saveCounts()
|
||||
expect(ThreadCountsStore._counts).toEqual({
|
||||
'b': 3
|
||||
'a': 4
|
||||
})
|
||||
|
||||
it "should persist the new counts to the database", ->
|
||||
spyOn(DatabaseTransaction.prototype, 'persistJSONBlob')
|
||||
runs =>
|
||||
ThreadCountsStore._saveCounts()
|
||||
waitsFor =>
|
||||
DatabaseTransaction.prototype.persistJSONBlob.callCount > 0
|
||||
runs =>
|
||||
expect(DatabaseTransaction.prototype.persistJSONBlob).toHaveBeenCalledWith(ThreadCountsStore.JSONBlobKey, ThreadCountsStore._counts)
|
||||
|
||||
describe "CategoryDatabaseMutationObserver", ->
|
||||
beforeEach ->
|
||||
@queryResolves = []
|
||||
@query = jasmine.createSpy('query').andCallFake =>
|
||||
new Promise (resolve, reject) =>
|
||||
@queryResolves.push(resolve)
|
||||
|
||||
@countsDidChange = jasmine.createSpy('countsDidChange')
|
||||
@m = new ThreadCountsStore.CategoryDatabaseMutationObserver(@countsDidChange)
|
||||
|
||||
describe "given a set of modifying models", ->
|
||||
scenarios = [
|
||||
{
|
||||
name: 'Persisting a three threads, two unread, all in all mail'
|
||||
type: 'persist',
|
||||
threads: [threadA, threadB, threadC],
|
||||
beforePersistQueryResults: [
|
||||
{id: threadA.id, catId: category1.id},
|
||||
{id: threadA.id, catId: category3.id},
|
||||
{id: threadA.id, catId: category5.id},
|
||||
{id: threadB.id, catId: category2.id},
|
||||
{id: threadB.id, catId: category5.id},
|
||||
{id: threadB.id, catId: category3.id},
|
||||
{id: threadC.id, catId: category5.id},
|
||||
]
|
||||
beforePersistExpected: {
|
||||
l1: -1,
|
||||
l3: -2,
|
||||
l2: -1,
|
||||
l5: -3
|
||||
}
|
||||
afterPersistExpected: {
|
||||
l3: -1,
|
||||
l5: -1,
|
||||
l2: -1,
|
||||
l4: 1,
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'Unpersisting a normal set of threads, all in all mail'
|
||||
type: 'unpersist',
|
||||
threads: [threadA, threadB, threadC],
|
||||
beforePersistQueryResults: [
|
||||
{id: threadA.id, catId: category1.id},
|
||||
{id: threadA.id, catId: category3.id},
|
||||
{id: threadA.id, catId: category5.id},
|
||||
{id: threadB.id, catId: category2.id},
|
||||
{id: threadB.id, catId: category5.id},
|
||||
{id: threadB.id, catId: category3.id},
|
||||
{id: threadC.id, catId: category5.id},
|
||||
]
|
||||
beforePersistExpected: {
|
||||
l1: -1,
|
||||
l3: -2,
|
||||
l2: -1,
|
||||
l5: -3
|
||||
}
|
||||
afterPersistExpected: {
|
||||
l1: -1,
|
||||
l5: -3,
|
||||
l3: -2,
|
||||
l2: -1
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'Thread D going from inbox to trash'
|
||||
type: 'persist',
|
||||
threads: [threadD],
|
||||
beforePersistQueryResults: [
|
||||
{id: threadD.id, catId: category1.id},
|
||||
{id: threadD.id, catId: category3.id},
|
||||
{id: threadD.id, catId: category4.id},
|
||||
]
|
||||
beforePersistExpected: {
|
||||
l1: -1,
|
||||
l3: -1,
|
||||
l4: -1
|
||||
}
|
||||
afterPersistExpected: {
|
||||
l1: -1,
|
||||
l3: -1,
|
||||
l4: -1,
|
||||
}
|
||||
},
|
||||
{
|
||||
name: 'Thread E going from trash to inbox'
|
||||
type: 'persist',
|
||||
threads: [threadE],
|
||||
beforePersistQueryResults: [
|
||||
]
|
||||
beforePersistExpected: {
|
||||
}
|
||||
afterPersistExpected: {
|
||||
l1: 1,
|
||||
l5: 1
|
||||
}
|
||||
},
|
||||
]
|
||||
scenarios.forEach ({name, type, threads, beforePersistQueryResults, beforePersistExpected, afterPersistExpected}) ->
|
||||
it "should call countsDidChange with the category membership deltas (#{name})", ->
|
||||
beforePromise = @m.beforeDatabaseChange(@query, {
|
||||
type: type
|
||||
objects: threads,
|
||||
objectIds: _.pluck(threads, 'id'),
|
||||
objectClass: Thread.name
|
||||
})
|
||||
expect(@query.callCount).toBe(1)
|
||||
expect(@query.calls[0].args[0]).toEqual("SELECT `Thread`.id as id, `Thread-Category`.`value` as catId FROM `Thread` INNER JOIN `Thread-Category` ON `Thread`.`id` = `Thread-Category`.`id` WHERE `Thread`.id IN ('#{_.pluck(threads, 'id').join("','")}') AND `Thread`.unread = 1 AND `Thread`.in_all_mail = 1")
|
||||
@queryResolves[0](beforePersistQueryResults)
|
||||
|
||||
waitsForPromise =>
|
||||
beforePromise.then (result) =>
|
||||
expect(result).toEqual({categories: beforePersistExpected})
|
||||
@m.afterDatabaseChange(@query, {
|
||||
type: type
|
||||
objects: threads,
|
||||
objectIds: _.pluck(threads, 'id'),
|
||||
objectClass: Thread.name
|
||||
}, result)
|
||||
expect(@countsDidChange).toHaveBeenCalledWith(afterPersistExpected)
|
||||
|
|
@ -127,7 +127,9 @@ class Thread extends ModelWithMetadata
|
|||
|
||||
@additionalSQLiteConfig:
|
||||
setup: ->
|
||||
['CREATE INDEX IF NOT EXISTS ThreadListIndex ON Thread(last_message_received_timestamp DESC, id)',
|
||||
['CREATE TABLE IF NOT EXISTS `Thread-Counts` (`category_id` TEXT PRIMARY KEY, `unread` INTEGER, `total` INTEGER)',
|
||||
'CREATE UNIQUE INDEX IF NOT EXISTS ThreadCountsIndex ON `Thread-Counts` (category_id DESC)',
|
||||
'CREATE INDEX IF NOT EXISTS ThreadListIndex ON Thread(last_message_received_timestamp DESC, id)',
|
||||
'CREATE INDEX IF NOT EXISTS ThreadListSentIndex ON Thread(last_message_sent_timestamp DESC, id)',
|
||||
'CREATE INDEX IF NOT EXISTS ThreadStarIndex ON Thread(account_id, starred)']
|
||||
|
||||
|
|
|
|||
|
|
@ -1,140 +1,104 @@
|
|||
Reflux = require 'reflux'
|
||||
Rx = require 'rx-lite'
|
||||
_ = require 'underscore'
|
||||
NylasStore = require 'nylas-store'
|
||||
CategoryStore = require './category-store'
|
||||
AccountStore = require './account-store'
|
||||
DatabaseStore = require './database-store'
|
||||
Actions = require '../actions'
|
||||
Thread = require '../models/thread'
|
||||
Category = require '../models/category'
|
||||
WindowBridge = require '../../window-bridge'
|
||||
|
||||
JSONBlobKey = 'UnreadCounts-V3'
|
||||
###
|
||||
Are running two nested SELECT statements really the best option? Yup.
|
||||
For a performance assessment of these queries and other options, see:
|
||||
https://gist.github.com/bengotow/c8b5cd8989c9149ded56
|
||||
|
||||
Note: SUM(unread) works because unread is represented as an int: 0 or 1.
|
||||
###
|
||||
|
||||
ReadCountsQuery = ->
|
||||
"SELECT * FROM `Thread-Counts`"
|
||||
|
||||
SetCountsQuery = ->
|
||||
"""
|
||||
REPLACE INTO `Thread-Counts` (category_id, unread, total)
|
||||
SELECT
|
||||
`Thread-Category`.`value` as category_id,
|
||||
SUM(unread) as unread,
|
||||
COUNT(*) as total
|
||||
FROM `Thread`
|
||||
INNER JOIN `Thread-Category` ON `Thread`.`id` = `Thread-Category`.`id`
|
||||
WHERE
|
||||
`Thread`.in_all_mail = 1
|
||||
GROUP BY `Thread-Category`.`value`;
|
||||
"""
|
||||
|
||||
UpdateCountsQuery = (objectIds, operator) ->
|
||||
objectIdsString = "'" + objectIds.join("','") + "'"
|
||||
"""
|
||||
REPLACE INTO `Thread-Counts` (category_id, unread, total)
|
||||
SELECT
|
||||
`Thread-Category`.`value` as category_id,
|
||||
COALESCE((SELECT unread FROM `Thread-Counts` WHERE category_id = `Thread-Category`.`value`), 0) #{operator} SUM(unread) as unread,
|
||||
COALESCE((SELECT total FROM `Thread-Counts` WHERE category_id = `Thread-Category`.`value`), 0) #{operator} COUNT(*) as total
|
||||
FROM `Thread`
|
||||
INNER JOIN `Thread-Category` ON `Thread`.`id` = `Thread-Category`.`id`
|
||||
WHERE
|
||||
`Thread`.id IN (#{objectIdsString}) AND
|
||||
`Thread`.in_all_mail = 1
|
||||
GROUP BY `Thread-Category`.`value`
|
||||
"""
|
||||
|
||||
class CategoryDatabaseMutationObserver
|
||||
constructor: (@_countsDidChange) ->
|
||||
|
||||
beforeDatabaseChange: (query, {type, objects, objectIds, objectClass}) =>
|
||||
if objectClass is Thread.name
|
||||
idString = "'" + objectIds.join("','") + "'"
|
||||
query("SELECT `Thread`.id as id, `Thread-Category`.`value` as catId FROM `Thread` INNER JOIN `Thread-Category` ON `Thread`.`id` = `Thread-Category`.`id` WHERE `Thread`.id IN (#{idString}) AND `Thread`.unread = 1 AND `Thread`.in_all_mail = 1", [])
|
||||
.then (categoryData) =>
|
||||
categories = {}
|
||||
for {id, catId} in categoryData
|
||||
categories[catId] ?= 0
|
||||
categories[catId] -= 1
|
||||
Promise.resolve({categories})
|
||||
query(UpdateCountsQuery(objectIds, '-'))
|
||||
else
|
||||
Promise.resolve()
|
||||
|
||||
afterDatabaseChange: (query, {type, objects, objectIds, objectClass}, beforeResolveValue) =>
|
||||
if objectClass is Thread.name
|
||||
{categories} = beforeResolveValue
|
||||
|
||||
if type is 'persist'
|
||||
for thread in objects
|
||||
continue unless thread.unread
|
||||
continue unless thread.inAllMail
|
||||
for cat in thread.categories
|
||||
categories[cat.id] ?= 0
|
||||
categories[cat.id] += 1
|
||||
|
||||
for key, val of categories
|
||||
delete categories[key] if val is 0
|
||||
|
||||
if Object.keys(categories).length > 0
|
||||
@_countsDidChange(categories)
|
||||
|
||||
Promise.resolve()
|
||||
|
||||
query(UpdateCountsQuery(objectIds, '+'))
|
||||
else
|
||||
Promise.resolve()
|
||||
|
||||
class ThreadCountsStore extends NylasStore
|
||||
CategoryDatabaseMutationObserver: CategoryDatabaseMutationObserver
|
||||
JSONBlobKey: JSONBlobKey
|
||||
|
||||
constructor: ->
|
||||
@_counts = {}
|
||||
@_deltas = {}
|
||||
@_saveCountsSoon ?= _.throttle(@_saveCounts, 1000)
|
||||
|
||||
@_observer = new CategoryDatabaseMutationObserver(@_onCountsChanged)
|
||||
@_observer = new CategoryDatabaseMutationObserver()
|
||||
DatabaseStore.addMutationHook(@_observer)
|
||||
|
||||
if NylasEnv.isWorkWindow()
|
||||
DatabaseStore.findJSONBlob(JSONBlobKey).then(@_onCountsBlobRead)
|
||||
Rx.Observable.fromQuery(DatabaseStore.findAll(Category)).subscribe (categories) =>
|
||||
@_categories = [].concat(categories)
|
||||
@_fetchCountsMissing()
|
||||
if NylasEnv.isMainWindow()
|
||||
# For now, unread counts are only retrieved in the main window.
|
||||
@_onCountsChangedDebounced = _.throttle(@_onCountsChanged, 1000)
|
||||
DatabaseStore.listen (change) =>
|
||||
if change.objectClass is Thread.name
|
||||
@_onCountsChangedDebounced()
|
||||
@_onCountsChangedDebounced()
|
||||
|
||||
else
|
||||
query = DatabaseStore.findJSONBlob(JSONBlobKey)
|
||||
Rx.Observable.fromQuery(query).subscribe(@_onCountsBlobRead)
|
||||
if NylasEnv.isWorkWindow() and not NylasEnv.config.get('nylas.threadCountsValid')
|
||||
@reset()
|
||||
|
||||
reset: =>
|
||||
countsStartTime = null
|
||||
DatabaseStore.inTransaction (t) =>
|
||||
countsStartTime = Date.now()
|
||||
DatabaseStore._query(SetCountsQuery())
|
||||
.then =>
|
||||
NylasEnv.config.set('nylas.threadCountsValid', true)
|
||||
console.log("Recomputed all thread counts in #{countsStartTime}s")
|
||||
|
||||
_onCountsChanged: =>
|
||||
DatabaseStore._query(ReadCountsQuery()).then (results) =>
|
||||
@_counts = {}
|
||||
for {category_id, unread, total} in results
|
||||
@_counts[category_id] = {unread, total}
|
||||
@trigger()
|
||||
|
||||
unreadCountForCategoryId: (catId) =>
|
||||
return null if @_counts[catId] is undefined
|
||||
@_counts[catId] + (@_deltas[catId] || 0)
|
||||
@_counts[catId]['unread']
|
||||
|
||||
unreadCounts: =>
|
||||
@_counts
|
||||
|
||||
_onCountsChanged: (metadata) =>
|
||||
if not NylasEnv.isWorkWindow()
|
||||
WindowBridge.runInWorkWindow("ThreadCountsStore", "_onCountsChanged", [metadata])
|
||||
return
|
||||
|
||||
for catId, unread of metadata
|
||||
@_deltas[catId] ?= 0
|
||||
@_deltas[catId] += unread
|
||||
@_saveCountsSoon()
|
||||
|
||||
_onCountsBlobRead: (json) =>
|
||||
@_counts = json ? {}
|
||||
@trigger()
|
||||
|
||||
# Fetch a count, populate it in the cache, and then call ourselves to
|
||||
# populate the next missing count.
|
||||
_fetchCountsMissing: =>
|
||||
# Find a category missing a count
|
||||
category = _.find @_categories, (cat) => !@_counts[cat.id]?
|
||||
return unless category
|
||||
|
||||
# Reset the delta for the category, since we're about to fetch absolute count
|
||||
@_deltas[category.id] = 0
|
||||
|
||||
@_fetchCountForCategory(category).then (unread) =>
|
||||
# Only apply the count if we know it's still correct. If we've detected changes
|
||||
# during the query, we can't know whether `unread` includes those or not.
|
||||
# Just run the count query again in a few moments.
|
||||
if @_deltas[category.id] is 0
|
||||
@_counts[category.id] = unread
|
||||
|
||||
# We defer for a while - this means populating all the counts can take a while,
|
||||
# but we don't want to flood the db with expensive SELECT COUNT queries.
|
||||
_.delay(@_fetchCountsMissing, 3000)
|
||||
@_saveCountsSoon()
|
||||
.catch (err) ->
|
||||
console.warn(err)
|
||||
|
||||
# This method is not intended to return a promise and it
|
||||
# could cause strange chaining.
|
||||
return null
|
||||
|
||||
_saveCounts: =>
|
||||
for key, count of @_deltas
|
||||
continue if @_counts[key] is undefined
|
||||
@_counts[key] += count
|
||||
delete @_deltas[key]
|
||||
|
||||
DatabaseStore.inTransaction (t) =>
|
||||
t.persistJSONBlob(JSONBlobKey, @_counts)
|
||||
@trigger()
|
||||
|
||||
_fetchCountForCategory: (cat) =>
|
||||
DatabaseStore.count(Thread, [
|
||||
Thread.attributes.categories.contains(cat.id),
|
||||
Thread.attributes.accountId.equal(cat.accountId),
|
||||
Thread.attributes.unread.equal(true),
|
||||
])
|
||||
totalCountForCategoryId: (catId) =>
|
||||
return null if @_counts[catId] is undefined
|
||||
@_counts[catId]['total']
|
||||
|
||||
module.exports = new ThreadCountsStore
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue