reload frontend when consistency is fixed

This commit is contained in:
zadam 2021-09-12 11:18:06 +02:00
parent 3f2ee4aefd
commit 9b9be5d155
4 changed files with 69 additions and 49 deletions

View file

@ -50,6 +50,12 @@ function load() {
log.info(`Becca (note cache) load took ${Date.now() - start}ms`);
}
function reload() {
load();
require('../services/ws').reloadFrontend();
}
function postProcessEntityUpdate(entityName, entity) {
if (entityName === 'branches') {
branchUpdated(entity);
@ -221,5 +227,6 @@ eventService.subscribe(eventService.LEAVE_PROTECTED_SESSION, load);
module.exports = {
load,
reload,
beccaLoaded
};

View file

@ -47,6 +47,42 @@ function logRows(entityChanges) {
}
}
async function executeFrontendUpdate(entityChanges) {
lastPingTs = Date.now();
if (entityChanges.length > 0) {
logRows(entityChanges);
frontendUpdateDataQueue.push(...entityChanges);
// we set lastAcceptedEntityChangeId even before frontend update processing and send ping so that backend can start sending more updates
lastAcceptedEntityChangeId = Math.max(lastAcceptedEntityChangeId, entityChanges[entityChanges.length - 1].id);
const lastSyncEntityChange = entityChanges.slice().reverse().find(ec => ec.isSynced);
if (lastSyncEntityChange) {
lastAcceptedEntityChangeSyncId = Math.max(lastAcceptedEntityChangeSyncId, lastSyncEntityChange.id);
}
sendPing();
// first wait for all the preceding consumers to finish
while (consumeQueuePromise) {
await consumeQueuePromise;
}
try {
// it's my turn so start it up
consumeQueuePromise = consumeFrontendUpdateData();
await consumeQueuePromise;
} finally {
// finish and set to null to signal somebody else can pick it up
consumeQueuePromise = null;
}
}
}
async function handleMessage(event) {
const message = JSON.parse(event.data);
@ -54,42 +90,11 @@ async function handleMessage(event) {
messageHandler(message);
}
if (message.type === 'frontend-update') {
let {entityChanges} = message.data;
lastPingTs = Date.now();
if (entityChanges.length > 0) {
logRows(entityChanges);
frontendUpdateDataQueue.push(...entityChanges);
// we set lastAcceptedEntityChangeId even before frontend update processing and send ping so that backend can start sending more updates
lastAcceptedEntityChangeId = Math.max(lastAcceptedEntityChangeId, entityChanges[entityChanges.length - 1].id);
const lastSyncEntityChange = entityChanges.slice().reverse().find(ec => ec.isSynced);
if (lastSyncEntityChange) {
lastAcceptedEntityChangeSyncId = Math.max(lastAcceptedEntityChangeSyncId, lastSyncEntityChange.id);
}
sendPing();
// first wait for all the preceding consumers to finish
while (consumeQueuePromise) {
await consumeQueuePromise;
}
try {
// it's my turn so start it up
consumeQueuePromise = consumeFrontendUpdateData();
await consumeQueuePromise;
}
finally {
// finish and set to null to signal somebody else can pick it up
consumeQueuePromise = null;
}
}
if (message.type === 'reload-frontend') {
utils.reloadFrontendApp();
}
else if (message.type === 'frontend-update') {
await executeFrontendUpdate(message.data.entityChanges);
}
else if (message.type === 'sync-hash-check-failed') {
toastService.showError("Sync check failed!", 60000);
@ -211,7 +216,6 @@ setTimeout(() => {
export default {
logError,
subscribeToMessages,
waitForEntityChangeId,
waitForMaxKnownEntityChangeId,
getMaxKnownEntityChangeSyncId: () => lastAcceptedEntityChangeSyncId
};

View file

@ -288,14 +288,17 @@ class ConsistencyChecks {
WHERE note_contents.noteId IS NULL`,
({noteId, isProtected, type, mime}) => {
if (this.autoFix) {
const utcDateModified = dateUtils.utcNowDateTime();
// it might be possible that the note_content is not available only because of the interrupted
// sync and it will come later. It's therefore important to guarantee that this artifical
// record won't overwrite the real one coming from the sync.
const fakeDate = "2000-01-01 00:00:00Z";
// manually creating row since this can also affect deleted notes
sql.upsert("note_contents", "noteId", {
noteId: noteId,
content: getBlankContent(isProtected, type, mime),
utcDateModified: utcDateModified,
dateModified: dateUtils.localNowDateTime()
utcDateModified: fakeDate,
dateModified: fakeDate
});
const hash = utils.hash(utils.randomString(10));
@ -305,7 +308,7 @@ class ConsistencyChecks {
entityId: noteId,
hash: hash,
isErased: false,
utcDateChanged: utcDateModified,
utcDateChanged: fakeDate,
isSynced: true
});
@ -358,10 +361,11 @@ class ConsistencyChecks {
AND branches.isDeleted = 0`,
({parentNoteId}) => {
if (this.autoFix) {
const branchIds = sql.getColumn(`SELECT branchId
FROM branches
WHERE isDeleted = 0
AND parentNoteId = ?`, [parentNoteId]);
const branchIds = sql.getColumn(`
SELECT branchId
FROM branches
WHERE isDeleted = 0
AND parentNoteId = ?`, [parentNoteId]);
const branches = branchIds.map(branchId => becca.getBranch(branchId));
@ -416,7 +420,7 @@ class ConsistencyChecks {
SELECT attributeId,
attributes.noteId
FROM attributes
JOIN notes ON attributes.noteId = notes.noteId
JOIN notes ON attributes.noteId = notes.noteId
WHERE attributes.isDeleted = 0
AND notes.isDeleted = 1`,
({attributeId, noteId}) => {
@ -434,7 +438,7 @@ class ConsistencyChecks {
SELECT attributeId,
attributes.value AS targetNoteId
FROM attributes
JOIN notes ON attributes.value = notes.noteId
JOIN notes ON attributes.value = notes.noteId
WHERE attributes.type = 'relation'
AND attributes.isDeleted = 0
AND notes.isDeleted = 1`,
@ -584,7 +588,7 @@ class ConsistencyChecks {
}
if (this.fixedIssues) {
require("../becca/becca_loader").load();
require("../becca/becca_loader").reload();
}
return !this.unrecoveredConsistencyErrors;

View file

@ -178,6 +178,10 @@ function syncFailed() {
sendMessageToAllClients({ type: 'sync-failed', lastSyncedPush });
}
function reloadFrontend() {
sendMessageToAllClients({ type: 'reload-frontend' });
}
function setLastSyncedPush(entityChangeId) {
lastSyncedPush = entityChangeId;
}
@ -190,5 +194,6 @@ module.exports = {
syncFinished,
syncFailed,
sendTransactionEntityChangesToAllClients,
setLastSyncedPush
setLastSyncedPush,
reloadFrontend
};