feat: implement synthetic framing for user messages and enhance context strategy handling

This commit is contained in:
MrAvalonApple 2026-04-12 00:18:36 +03:00
parent eb484d9bbf
commit 629d9c8253
2 changed files with 78 additions and 30 deletions

View File

@ -176,6 +176,31 @@ function addTokenUsage(a: TokenUsage, b: TokenUsage): TokenUsage {
const ZERO_USAGE: TokenUsage = { input_tokens: 0, output_tokens: 0 } const ZERO_USAGE: TokenUsage = { input_tokens: 0, output_tokens: 0 }
/**
* Prepends synthetic framing text to the first user message so we never emit
* consecutive `user` turns (Bedrock) and summaries do not concatenate onto
* the original user prompt (direct API). If there is no user message yet,
* inserts a single assistant text preamble.
*/
function prependSyntheticPrefixToFirstUser(
messages: LLMMessage[],
prefix: string,
): LLMMessage[] {
const userIdx = messages.findIndex(m => m.role === 'user')
if (userIdx < 0) {
return [{
role: 'assistant',
content: [{ type: 'text', text: prefix.trimEnd() }],
}, ...messages]
}
const target = messages[userIdx]!
const merged: LLMMessage = {
role: 'user',
content: [{ type: 'text', text: prefix }, ...target.content],
}
return [...messages.slice(0, userIdx), merged, ...messages.slice(userIdx + 1)]
}
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
// AgentRunner // AgentRunner
// --------------------------------------------------------------------------- // ---------------------------------------------------------------------------
@ -197,7 +222,7 @@ export class AgentRunner {
private readonly maxTurns: number private readonly maxTurns: number
private summarizeCache: { private summarizeCache: {
oldSignature: string oldSignature: string
summaryMessage: LLMMessage summaryPrefix: string
} | null = null } | null = null
constructor( constructor(
@ -237,13 +262,10 @@ export class AgentRunner {
const droppedPairs = Math.floor((afterFirst.length - kept.length) / 2) const droppedPairs = Math.floor((afterFirst.length - kept.length) / 2)
if (droppedPairs > 0) { if (droppedPairs > 0) {
result.push({ const notice =
role: 'user', `[Earlier conversation history truncated — ${droppedPairs} turn(s) removed]\n\n`
content: [{ result.push(...prependSyntheticPrefixToFirstUser(kept, notice))
type: 'text', return result
text: `[Earlier conversation history truncated — ${droppedPairs} turn(s) removed]`,
}],
})
} }
result.push(...kept) result.push(...kept)
@ -257,30 +279,36 @@ export class AgentRunner {
baseChatOptions: LLMChatOptions, baseChatOptions: LLMChatOptions,
turns: number, turns: number,
options: RunOptions, options: RunOptions,
): Promise<LLMMessage[]> { ): Promise<{ messages: LLMMessage[]; usage: TokenUsage }> {
const estimated = estimateTokens(messages) const estimated = estimateTokens(messages)
if (estimated <= maxTokens || messages.length < 4) { if (estimated <= maxTokens || messages.length < 4) {
return messages return { messages, usage: ZERO_USAGE }
} }
const firstUserIndex = messages.findIndex(m => m.role === 'user') const firstUserIndex = messages.findIndex(m => m.role === 'user')
if (firstUserIndex < 0 || firstUserIndex === messages.length - 1) { if (firstUserIndex < 0 || firstUserIndex === messages.length - 1) {
return messages return { messages, usage: ZERO_USAGE }
} }
const firstUser = messages[firstUserIndex]! const firstUser = messages[firstUserIndex]!
const rest = messages.slice(firstUserIndex + 1) const rest = messages.slice(firstUserIndex + 1)
if (rest.length < 2) { if (rest.length < 2) {
return messages return { messages, usage: ZERO_USAGE }
} }
const splitAt = Math.max(2, Math.floor(rest.length / 2)) // Split on an even boundary so we never separate a tool_use assistant turn
// from its tool_result user message (rest is user/assistant pairs).
const splitAt = Math.max(2, Math.floor(rest.length / 4) * 2)
const oldPortion = rest.slice(0, splitAt) const oldPortion = rest.slice(0, splitAt)
const recentPortion = rest.slice(splitAt) const recentPortion = rest.slice(splitAt)
const oldSignature = oldPortion.map(m => this.serializeMessage(m)).join('\n') const oldSignature = oldPortion.map(m => this.serializeMessage(m)).join('\n')
if (this.summarizeCache !== null && this.summarizeCache.oldSignature === oldSignature) { if (this.summarizeCache !== null && this.summarizeCache.oldSignature === oldSignature) {
return [firstUser, this.summarizeCache.summaryMessage, ...recentPortion] const mergedRecent = prependSyntheticPrefixToFirstUser(
recentPortion,
`${this.summarizeCache.summaryPrefix}\n\n`,
)
return { messages: [firstUser, ...mergedRecent], usage: ZERO_USAGE }
} }
const summaryPrompt = [ const summaryPrompt = [
@ -327,18 +355,19 @@ export class AgentRunner {
} }
const summaryText = extractText(summaryResponse.content).trim() const summaryText = extractText(summaryResponse.content).trim()
const summaryMessage: LLMMessage = { const summaryPrefix = summaryText.length > 0
role: 'user', ? `[Conversation summary]\n${summaryText}`
content: [{ : '[Conversation summary unavailable]'
type: 'text',
text: summaryText.length > 0
? `[Conversation summary]\n${summaryText}`
: '[Conversation summary unavailable]',
}],
}
this.summarizeCache = { oldSignature, summaryMessage } this.summarizeCache = { oldSignature, summaryPrefix }
return [firstUser, summaryMessage, ...recentPortion] const mergedRecent = prependSyntheticPrefixToFirstUser(
recentPortion,
`${summaryPrefix}\n\n`,
)
return {
messages: [firstUser, ...mergedRecent],
usage: summaryResponse.usage,
}
} }
private async applyContextStrategy( private async applyContextStrategy(
@ -347,9 +376,9 @@ export class AgentRunner {
baseChatOptions: LLMChatOptions, baseChatOptions: LLMChatOptions,
turns: number, turns: number,
options: RunOptions, options: RunOptions,
): Promise<LLMMessage[]> { ): Promise<{ messages: LLMMessage[]; usage: TokenUsage }> {
if (strategy.type === 'sliding-window') { if (strategy.type === 'sliding-window') {
return this.truncateToSlidingWindow(messages, strategy.maxTurns) return { messages: this.truncateToSlidingWindow(messages, strategy.maxTurns), usage: ZERO_USAGE }
} }
if (strategy.type === 'summarize') { if (strategy.type === 'summarize') {
@ -368,7 +397,7 @@ export class AgentRunner {
if (!Array.isArray(compressed) || compressed.length === 0) { if (!Array.isArray(compressed) || compressed.length === 0) {
throw new Error('contextStrategy.custom.compress must return a non-empty LLMMessage[]') throw new Error('contextStrategy.custom.compress must return a non-empty LLMMessage[]')
} }
return compressed return { messages: compressed, usage: ZERO_USAGE }
} }
// ------------------------------------------------------------------------- // -------------------------------------------------------------------------
@ -535,13 +564,15 @@ export class AgentRunner {
// Optionally compact context before each LLM call after the first turn. // Optionally compact context before each LLM call after the first turn.
if (this.options.contextStrategy && turns > 1) { if (this.options.contextStrategy && turns > 1) {
conversationMessages = await this.applyContextStrategy( const compacted = await this.applyContextStrategy(
conversationMessages, conversationMessages,
this.options.contextStrategy, this.options.contextStrategy,
baseChatOptions, baseChatOptions,
turns, turns,
options, options,
) )
conversationMessages = compacted.messages
totalUsage = addTokenUsage(totalUsage, compacted.usage)
} }
// ------------------------------------------------------------------ // ------------------------------------------------------------------

View File

@ -137,7 +137,7 @@ describe('AgentRunner contextStrategy', () => {
contextStrategy: { type: 'summarize', maxTokens: 20 }, contextStrategy: { type: 'summarize', maxTokens: 20 },
}) })
await runner.run( const result = await runner.run(
[{ role: 'user', content: [{ type: 'text', text: 'start' }] }], [{ role: 'user', content: [{ type: 'text', text: 'start' }] }],
{ onTrace: (e) => { traces.push(e) }, runId: 'run-summary', traceAgent: 'context-agent' }, { onTrace: (e) => { traces.push(e) }, runId: 'run-summary', traceAgent: 'context-agent' },
) )
@ -146,6 +146,23 @@ describe('AgentRunner contextStrategy', () => {
expect(summaryCall).toBeDefined() expect(summaryCall).toBeDefined()
const llmTraces = traces.filter(t => t.type === 'llm_call') const llmTraces = traces.filter(t => t.type === 'llm_call')
expect(llmTraces.some(t => t.type === 'llm_call' && t.phase === 'summary')).toBe(true) expect(llmTraces.some(t => t.type === 'llm_call' && t.phase === 'summary')).toBe(true)
// Summary adapter usage must count toward RunResult.tokenUsage (maxTokenBudget).
expect(result.tokenUsage.input_tokens).toBe(15 + 15 + 10 + 10)
expect(result.tokenUsage.output_tokens).toBe(25 + 25 + 20 + 20)
// After compaction, summary text is folded into the next user turn (not a
// standalone user message), preserving user/assistant alternation.
const turnAfterSummary = calls.find(
c => c.messages.some(
m => m.role === 'user' && m.content.some(
b => b.type === 'text' && b.text.includes('[Conversation summary]'),
),
),
)
expect(turnAfterSummary).toBeDefined()
const rolesAfterFirstUser = turnAfterSummary!.messages.map(m => m.role).join(',')
expect(rolesAfterFirstUser).not.toMatch(/^user,user/)
}) })
it('custom strategy calls compress callback and uses returned messages', async () => { it('custom strategy calls compress callback and uses returned messages', async () => {