mirror of
https://github.com/zadam/trilium.git
synced 2025-11-10 05:40:49 +08:00
Improve test reliability by using vi.waitFor() for async checks
Replaced hardcoded timeouts with vi.waitFor() for better test reliability. Co-authored-by: eliandoran <21236836+eliandoran@users.noreply.github.com>
This commit is contained in:
parent
18a198496b
commit
730e2da932
1 changed files with 25 additions and 28 deletions
|
|
@ -625,19 +625,18 @@ describe("LLM API Tests", () => {
|
||||||
|
|
||||||
expect(response.status).toBe(200); // Still returns 200
|
expect(response.status).toBe(200); // Still returns 200
|
||||||
|
|
||||||
// Wait for async streaming operations to complete
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 100));
|
|
||||||
|
|
||||||
// Import ws service to access mock
|
// Import ws service to access mock
|
||||||
const ws = (await import("../../services/ws.js")).default;
|
const ws = (await import("../../services/ws.js")).default;
|
||||||
|
|
||||||
// Verify error message was sent via WebSocket
|
// Wait for async streaming operations to complete
|
||||||
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
await vi.waitFor(() => {
|
||||||
type: 'llm-stream',
|
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
||||||
chatNoteId: testChatId,
|
type: 'llm-stream',
|
||||||
error: 'Error during streaming: Pipeline error',
|
chatNoteId: testChatId,
|
||||||
done: true
|
error: 'Error during streaming: Pipeline error',
|
||||||
});
|
done: true
|
||||||
|
});
|
||||||
|
}, { timeout: 1000, interval: 50 });
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should handle AI disabled state", async () => {
|
it("should handle AI disabled state", async () => {
|
||||||
|
|
@ -656,19 +655,18 @@ describe("LLM API Tests", () => {
|
||||||
|
|
||||||
expect(response.status).toBe(200);
|
expect(response.status).toBe(200);
|
||||||
|
|
||||||
// Wait for async streaming operations to complete
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 100));
|
|
||||||
|
|
||||||
// Import ws service to access mock
|
// Import ws service to access mock
|
||||||
const ws = (await import("../../services/ws.js")).default;
|
const ws = (await import("../../services/ws.js")).default;
|
||||||
|
|
||||||
// Verify error message about AI being disabled
|
// Wait for async streaming operations to complete
|
||||||
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
await vi.waitFor(() => {
|
||||||
type: 'llm-stream',
|
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
|
||||||
chatNoteId: testChatId,
|
type: 'llm-stream',
|
||||||
error: 'Error during streaming: AI features are disabled. Please enable them in the settings.',
|
chatNoteId: testChatId,
|
||||||
done: true
|
error: 'Error during streaming: AI features are disabled. Please enable them in the settings.',
|
||||||
});
|
done: true
|
||||||
|
});
|
||||||
|
}, { timeout: 1000, interval: 50 });
|
||||||
});
|
});
|
||||||
|
|
||||||
it("should save chat messages after streaming completion", async () => {
|
it("should save chat messages after streaming completion", async () => {
|
||||||
|
|
@ -758,17 +756,16 @@ describe("LLM API Tests", () => {
|
||||||
|
|
||||||
expect(response.status).toBe(200);
|
expect(response.status).toBe(200);
|
||||||
|
|
||||||
// Wait for async streaming operations to complete
|
|
||||||
await new Promise(resolve => setTimeout(resolve, 100));
|
|
||||||
|
|
||||||
// Import ws service to access mock
|
// Import ws service to access mock
|
||||||
const ws = (await import("../../services/ws.js")).default;
|
const ws = (await import("../../services/ws.js")).default;
|
||||||
|
|
||||||
// Verify multiple chunks were sent
|
// Wait for async streaming operations to complete and verify multiple chunks were sent
|
||||||
const streamCalls = (ws.sendMessageToAllClients as any).mock.calls.filter(
|
await vi.waitFor(() => {
|
||||||
call => call[0].type === 'llm-stream' && call[0].content
|
const streamCalls = (ws.sendMessageToAllClients as any).mock.calls.filter(
|
||||||
);
|
call => call[0].type === 'llm-stream' && call[0].content
|
||||||
expect(streamCalls.length).toBeGreaterThan(5);
|
);
|
||||||
|
expect(streamCalls.length).toBeGreaterThan(5);
|
||||||
|
}, { timeout: 1000, interval: 50 });
|
||||||
});
|
});
|
||||||
});
|
});
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue