Skip to content

Commit f67caf0

Browse files
committed
Fix tests
1 parent 0b853a7 commit f67caf0

File tree

1 file changed

+21
-21
lines changed

1 file changed

+21
-21
lines changed

apps/sim/executor/handlers/agent/memory.test.ts

Lines changed: 21 additions & 21 deletions
Original file line numberDiff line numberDiff line change
@@ -32,7 +32,7 @@ describe('Memory', () => {
3232
})
3333

3434
describe('applySlidingWindow (message-based)', () => {
35-
it('should keep last N conversation messages', () => {
35+
it('should keep last N turns (turn = user message + assistant response)', () => {
3636
const messages: Message[] = [
3737
{ role: 'system', content: 'System prompt' },
3838
{ role: 'user', content: 'Message 1' },
@@ -43,9 +43,10 @@ describe('Memory', () => {
4343
{ role: 'assistant', content: 'Response 3' },
4444
]
4545

46-
const result = (memoryService as any).applySlidingWindow(messages, '4')
46+
// Limit to 2 turns: should keep turns 2 and 3
47+
const result = (memoryService as any).applySlidingWindow(messages, '2')
4748

48-
expect(result.length).toBe(5)
49+
expect(result.length).toBe(5) // system + 2 turns (4 messages)
4950
expect(result[0].role).toBe('system')
5051
expect(result[0].content).toBe('System prompt')
5152
expect(result[1].content).toBe('Message 2')
@@ -113,19 +114,18 @@ describe('Memory', () => {
113114
it('should preserve first system message and exclude it from token count', () => {
114115
const messages: Message[] = [
115116
{ role: 'system', content: 'A' }, // System message - always preserved
116-
{ role: 'user', content: 'B' }, // ~1 token
117-
{ role: 'assistant', content: 'C' }, // ~1 token
118-
{ role: 'user', content: 'D' }, // ~1 token
117+
{ role: 'user', content: 'B' }, // ~1 token (turn 1)
118+
{ role: 'assistant', content: 'C' }, // ~1 token (turn 1)
119+
{ role: 'user', content: 'D' }, // ~1 token (turn 2)
119120
]
120121

121-
// Limit to 2 tokens - should fit system message + last 2 conversation messages (D, C)
122+
// Limit to 2 tokens - fits turn 2 (D=1 token), but turn 1 (B+C=2 tokens) would exceed
122123
const result = (memoryService as any).applySlidingWindowByTokens(messages, '2', 'gpt-4o')
123124

124-
// Should have: system message + 2 conversation messages = 3 total
125-
expect(result.length).toBe(3)
125+
// Should have: system message + turn 2 (1 message) = 2 total
126+
expect(result.length).toBe(2)
126127
expect(result[0].role).toBe('system') // First system message preserved
127-
expect(result[1].content).toBe('C') // Second most recent conversation message
128-
expect(result[2].content).toBe('D') // Most recent conversation message
128+
expect(result[1].content).toBe('D') // Most recent turn
129129
})
130130

131131
it('should process messages from newest to oldest', () => {
@@ -249,29 +249,29 @@ describe('Memory', () => {
249249
})
250250

251251
describe('Token-based vs Message-based comparison', () => {
252-
it('should produce different results for same message count limit', () => {
252+
it('should produce different results based on turn limits vs token limits', () => {
253253
const messages: Message[] = [
254-
{ role: 'user', content: 'A' }, // Short message (~1 token)
254+
{ role: 'user', content: 'A' }, // Short message (~1 token) - turn 1
255255
{
256256
role: 'assistant',
257257
content: 'This is a much longer response that takes many more tokens',
258-
}, // Long message (~15 tokens)
259-
{ role: 'user', content: 'B' }, // Short message (~1 token)
258+
}, // Long message (~15 tokens) - turn 1
259+
{ role: 'user', content: 'B' }, // Short message (~1 token) - turn 2
260260
]
261261

262-
// Message-based: last 2 messages
263-
const messageResult = (memoryService as any).applySlidingWindow(messages, '2')
264-
expect(messageResult.length).toBe(2)
262+
// Turn-based with limit 1: keeps last turn only
263+
const messageResult = (memoryService as any).applySlidingWindow(messages, '1')
264+
expect(messageResult.length).toBe(1) // Only turn 2 (message B)
265265

266-
// Token-based: with limit of 10 tokens, might fit all 3 messages or just last 2
266+
// Token-based: with limit of 10 tokens, fits turn 2 (1 token) but not turn 1 (~16 tokens)
267267
const tokenResult = (memoryService as any).applySlidingWindowByTokens(
268268
messages,
269269
'10',
270270
'gpt-4o'
271271
)
272272

273-
// The long message should affect what fits
274-
expect(tokenResult.length).toBeGreaterThanOrEqual(1)
273+
// Both should only fit the last turn due to the long assistant message
274+
expect(tokenResult.length).toBe(1)
275275
})
276276
})
277277
})

0 commit comments

Comments
 (0)