mirror of
https://github.com/zadam/trilium.git
synced 2025-01-17 20:48:12 +08:00
small sync fixes
This commit is contained in:
parent
b0a3f828fb
commit
aff9ce97ee
4 changed files with 26 additions and 15 deletions
|
@ -6,7 +6,6 @@ class Branch {
|
|||
this.branchId = row.branchId;
|
||||
/** @param {string} */
|
||||
this.noteId = row.noteId;
|
||||
this.note = null;
|
||||
/** @param {string} */
|
||||
this.parentNoteId = row.parentNoteId;
|
||||
/** @param {int} */
|
||||
|
|
|
@ -133,7 +133,7 @@ class TreeCache {
|
|||
|
||||
/** @return {Promise<NoteShort[]>} */
|
||||
async getNotes(noteIds, silentNotFoundError = false) {
|
||||
const missingNoteIds = noteIds.filter(noteId => this.notes[noteId] === undefined);
|
||||
const missingNoteIds = noteIds.filter(noteId => !this.notes[noteId]);
|
||||
|
||||
if (missingNoteIds.length > 0) {
|
||||
await this.reloadNotes(missingNoteIds);
|
||||
|
|
|
@ -64,15 +64,18 @@ async function handleMessage(event) {
|
|||
await consumeQueuePromise;
|
||||
}
|
||||
|
||||
try {
|
||||
// it's my turn so start it up
|
||||
consumeQueuePromise = consumeSyncData();
|
||||
|
||||
await consumeQueuePromise;
|
||||
|
||||
}
|
||||
finally {
|
||||
// finish and set to null to signal somebody else can pick it up
|
||||
consumeQueuePromise = null;
|
||||
}
|
||||
}
|
||||
}
|
||||
else if (message.type === 'sync-hash-check-failed') {
|
||||
toastService.showError("Sync check failed!", 60000);
|
||||
}
|
||||
|
@ -113,6 +116,15 @@ function checkSyncIdListeners() {
|
|||
.forEach(l => console.log(`Waiting for syncId ${l.desiredSyncId} while current is ${lastProcessedSyncId} for ${Math.floor((Date.now() - l.start) / 1000)}s`));
|
||||
}
|
||||
|
||||
async function runSafely(syncHandler, syncData) {
|
||||
try {
|
||||
return await syncHandler(syncData);
|
||||
}
|
||||
catch (e) {
|
||||
console.log(`Sync handler failed with ${e.message}: ${e.stack}`);
|
||||
}
|
||||
}
|
||||
|
||||
async function consumeSyncData() {
|
||||
if (syncDataQueue.length > 0) {
|
||||
const allSyncData = syncDataQueue;
|
||||
|
@ -126,8 +138,8 @@ async function consumeSyncData() {
|
|||
|
||||
// the update process should be synchronous as a whole but individual handlers can run in parallel
|
||||
await Promise.all([
|
||||
...allSyncMessageHandlers.map(syncHandler => syncHandler(allSyncData)),
|
||||
...outsideSyncMessageHandlers.map(syncHandler => syncHandler(outsideSyncData))
|
||||
...allSyncMessageHandlers.map(syncHandler => runSafely(syncHandler, allSyncData)),
|
||||
...outsideSyncMessageHandlers.map(syncHandler => runSafely(syncHandler, outsideSyncData))
|
||||
]);
|
||||
|
||||
lastProcessedSyncId = Math.max(lastProcessedSyncId, allSyncData[allSyncData.length - 1].id);
|
||||
|
@ -171,8 +183,6 @@ async function sendPing() {
|
|||
setTimeout(() => {
|
||||
ws = connectWebSocket();
|
||||
|
||||
lastAcceptedSyncId = glob.maxSyncIdAtLoad;
|
||||
lastProcessedSyncId = glob.maxSyncIdAtLoad;
|
||||
lastPingTs = Date.now();
|
||||
|
||||
setInterval(sendPing, 1000);
|
||||
|
|
|
@ -3,7 +3,6 @@ const sqlInit = require('./sql_init');
|
|||
const optionService = require('./options');
|
||||
const dateUtils = require('./date_utils');
|
||||
const syncTableService = require('./sync_table');
|
||||
const attributeService = require('./attributes');
|
||||
const eventService = require('./events');
|
||||
const repository = require('./repository');
|
||||
const cls = require('../services/cls');
|
||||
|
@ -462,8 +461,11 @@ async function eraseDeletedNotes() {
|
|||
return;
|
||||
}
|
||||
|
||||
// it's better to not use repository for this because it will complain about saving protected notes
|
||||
// out of protected session, also we don't want these changes to be synced (since they are done on all instances anyway)
|
||||
// it's better to not use repository for this because:
|
||||
// - it would complain about saving protected notes out of protected session
|
||||
// - we don't want these changes to be synced (since they are done on all instances anyway)
|
||||
// - we don't want change the hash since this erasing happens on each instance separately
|
||||
// and changing the hash would fire up the sync errors temporarily
|
||||
|
||||
// setting contentLength to zero would serve no benefit and it leaves potentially useful trail
|
||||
await sql.executeMany(`
|
||||
|
|
Loading…
Reference in a new issue