chore: cosmetic polish following #161 (#162)

- Restore backticks around `prompt` in `Agent.prompt()` comment.
- Drop two stray blank lines around `mergedRecent` in `summarizeMessages`.
- Collapse deliberation comments in the new context-strategy test to one line.
- Add JSDoc note on `contextStrategy.custom.compress` that it fires every turn including the first; implementations must self-gate.
This commit is contained in:
Jack Chen 2026-04-24 02:30:50 +08:00 committed by GitHub
parent 11a1fb0ced
commit 8e6bf9bde1
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
4 changed files with 7 additions and 10 deletions

View File

@ -206,7 +206,7 @@ export class Agent {
const result = await this.executeRun([...this.messageHistory]) const result = await this.executeRun([...this.messageHistory])
// Persist the new messages into history so the next prompt sees them. // Persist the new messages into history so the next `prompt` sees them.
for (const msg of result.messages) { for (const msg of result.messages) {
this.messageHistory.push(msg) this.messageHistory.push(msg)
} }

View File

@ -374,12 +374,10 @@ export class AgentRunner {
: '[Conversation summary unavailable]' : '[Conversation summary unavailable]'
this.summarizeCache = { oldSignature, summaryPrefix } this.summarizeCache = { oldSignature, summaryPrefix }
const mergedRecent = prependSyntheticPrefixToFirstUser( const mergedRecent = prependSyntheticPrefixToFirstUser(
recentPortion, recentPortion,
`${summaryPrefix}\n\n`, `${summaryPrefix}\n\n`,
) )
return { return {
messages: [firstUser, ...mergedRecent], messages: [firstUser, ...mergedRecent],
usage: summaryResponse.usage, usage: summaryResponse.usage,

View File

@ -85,6 +85,11 @@ export type ContextStrategy =
} }
| { | {
type: 'custom' type: 'custom'
/**
* Compaction callback. Invoked before every LLM turn including the first,
* so implementations that should only fire past a token threshold must
* self-gate inside this function.
*/
compress: ( compress: (
messages: LLMMessage[], messages: LLMMessage[],
estimatedTokens: number, estimatedTokens: number,

View File

@ -205,13 +205,7 @@ describe('AgentRunner contextStrategy', () => {
const result = await runner.run(initialMessages) const result = await runner.run(initialMessages)
// 2 new messages were generated (the tool use, and the tool result). // Three new messages produced: assistant tool_use, user tool_result, assistant text.
// The `done` response is returned but not pushed as a new message to the list in `run()`.
// Wait, the `done` text response *is* pushed.
// Let's verify the exact length of new messages.
// The stream loop pushes the assistant message (tool use), then the user message (tool result),
// then loops back and pushes the final assistant message (text).
// So 3 new messages are added during this run.
expect(result.messages).toHaveLength(3) expect(result.messages).toHaveLength(3)
expect(result.messages[0]!.role).toBe('assistant') expect(result.messages[0]!.role).toBe('assistant')
expect(result.messages[1]!.role).toBe('user') // The tool_result expect(result.messages[1]!.role).toBe('user') // The tool_result