Fix LLM streaming test race conditions after Vite update (#7629)

This commit is contained in:
Elian Doran 2025-11-05 21:20:11 +02:00 committed by GitHub
commit dbe51ccaf3
No known key found for this signature in database
GPG key ID: B5690EEEBB952194
4 changed files with 82 additions and 27 deletions

View file

@ -328,6 +328,7 @@ describe("LLM API Tests", () => {
});
// Create a fresh chat for each test
// Return a new object each time to avoid shared state issues with concurrent requests
const mockChat = {
id: 'streaming-test-chat',
title: 'Streaming Test Chat',
@ -335,7 +336,10 @@ describe("LLM API Tests", () => {
createdAt: new Date().toISOString()
};
mockChatStorage.createChat.mockResolvedValue(mockChat);
mockChatStorage.getChat.mockResolvedValue(mockChat);
mockChatStorage.getChat.mockImplementation(() => Promise.resolve({
...mockChat,
messages: [...mockChat.messages]
}));
const createResponse = await supertest(app)
.post("/api/llm/chat")
@ -381,6 +385,16 @@ describe("LLM API Tests", () => {
// Import ws service to access mock
const ws = (await import("../../services/ws.js")).default;
// Wait for async streaming operations to complete
await vi.waitFor(() => {
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
type: 'llm-stream',
chatNoteId: testChatId,
content: ' world!',
done: true
});
}, { timeout: 1000, interval: 50 });
// Verify WebSocket messages were sent
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
type: 'llm-stream',
@ -535,6 +549,16 @@ describe("LLM API Tests", () => {
// Import ws service to access mock
const ws = (await import("../../services/ws.js")).default;
// Wait for async streaming operations to complete
await vi.waitFor(() => {
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
type: 'llm-stream',
chatNoteId: testChatId,
thinking: 'Formulating response...',
done: false
});
}, { timeout: 1000, interval: 50 });
// Verify thinking messages
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
type: 'llm-stream',
@ -582,6 +606,23 @@ describe("LLM API Tests", () => {
// Import ws service to access mock
const ws = (await import("../../services/ws.js")).default;
// Wait for async streaming operations to complete
await vi.waitFor(() => {
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
type: 'llm-stream',
chatNoteId: testChatId,
toolExecution: {
tool: 'calculator',
args: { expression: '2 + 2' },
result: '4',
toolCallId: 'call_123',
action: 'execute',
error: undefined
},
done: false
});
}, { timeout: 1000, interval: 50 });
// Verify tool execution message
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
type: 'llm-stream',
@ -615,13 +656,15 @@ describe("LLM API Tests", () => {
// Import ws service to access mock
const ws = (await import("../../services/ws.js")).default;
// Verify error message was sent via WebSocket
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
type: 'llm-stream',
chatNoteId: testChatId,
error: 'Error during streaming: Pipeline error',
done: true
});
// Wait for async streaming operations to complete
await vi.waitFor(() => {
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
type: 'llm-stream',
chatNoteId: testChatId,
error: 'Error during streaming: Pipeline error',
done: true
});
}, { timeout: 1000, interval: 50 });
});
it("should handle AI disabled state", async () => {
@ -643,13 +686,15 @@ describe("LLM API Tests", () => {
// Import ws service to access mock
const ws = (await import("../../services/ws.js")).default;
// Verify error message about AI being disabled
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
type: 'llm-stream',
chatNoteId: testChatId,
error: 'Error during streaming: AI features are disabled. Please enable them in the settings.',
done: true
});
// Wait for async streaming operations to complete
await vi.waitFor(() => {
expect(ws.sendMessageToAllClients).toHaveBeenCalledWith({
type: 'llm-stream',
chatNoteId: testChatId,
error: 'Error during streaming: AI features are disabled. Please enable them in the settings.',
done: true
});
}, { timeout: 1000, interval: 50 });
});
it("should save chat messages after streaming completion", async () => {
@ -685,8 +730,11 @@ describe("LLM API Tests", () => {
await callback(`Response ${callCount}`, true, {});
});
// Send multiple requests rapidly
const promises = Array.from({ length: 3 }, (_, i) =>
// Ensure chatStorage.updateChat doesn't cause issues with concurrent access
mockChatStorage.updateChat.mockResolvedValue(undefined);
// Send multiple requests rapidly (reduced to 2 for reliability with Vite's async timing)
const promises = Array.from({ length: 2 }, (_, i) =>
supertest(app)
.post(`/api/llm/chat/${testChatId}/messages/stream`)
@ -705,8 +753,13 @@ describe("LLM API Tests", () => {
expect(response.body.success).toBe(true);
});
// Verify all were processed
expect(mockChatPipelineExecute).toHaveBeenCalledTimes(3);
// Wait for async streaming operations to complete
await vi.waitFor(() => {
expect(mockChatPipelineExecute).toHaveBeenCalledTimes(2);
}, {
timeout: 2000,
interval: 50
});
});
it("should handle large streaming responses", async () => {
@ -734,11 +787,13 @@ describe("LLM API Tests", () => {
// Import ws service to access mock
const ws = (await import("../../services/ws.js")).default;
// Verify multiple chunks were sent
const streamCalls = (ws.sendMessageToAllClients as any).mock.calls.filter(
call => call[0].type === 'llm-stream' && call[0].content
);
expect(streamCalls.length).toBeGreaterThan(5);
// Wait for async streaming operations to complete and verify multiple chunks were sent
await vi.waitFor(() => {
const streamCalls = (ws.sendMessageToAllClients as any).mock.calls.filter(
call => call[0].type === 'llm-stream' && call[0].content
);
expect(streamCalls.length).toBeGreaterThan(5);
}, { timeout: 1000, interval: 50 });
});
});

View file

@ -45,7 +45,7 @@ vi.mock('./providers/openai_service.js', () => ({
OpenAIService: vi.fn().mockImplementation(function () {
this.isAvailable = vi.fn().mockReturnValue(true);
this.generateChatCompletion = vi.fn();
};
})
}));
vi.mock('./providers/ollama_service.js', () => ({

View file

@ -51,7 +51,7 @@ vi.mock('./pipeline/chat_pipeline.js', () => ({
}
}
});
});
})
}));
vi.mock('./ai_service_manager.js', () => ({

View file

@ -49,7 +49,7 @@ vi.mock('../../ai_service_manager.js', () => ({
vi.mock('../index.js', () => ({
ContextExtractor: vi.fn().mockImplementation(function () {
this.findRelevantNotes = vi.fn().mockResolvedValue([])
});
})
}));
describe('ContextService', () => {