fix: update test_stats_handler.py for langchain_core >=1.0 compatibility

In langchain_core >=1.0 plain Generation no longer stores a .message
attribute - that only exists on ChatGeneration. Tests were constructing
Generation(message=AIMessage(...)) which silently dropped the message,
making hasattr(generation, "message") return False and skipping the
token-counting path (all usage assertions failed with 0).

- Replace Generation(message=...) with ChatGeneration(message=AIMessage(...))
  in test_stats_handler_on_llm_end_with_usage and thread_safety test
- Use UsageMetadata(input_tokens=N, output_tokens=N, total_tokens=N)
  instead of bare dict (total_tokens is required in langchain_core 1.2+)
- Pass usage_metadata via AIMessage constructor instead of post-init
  attribute assignment (avoids pydantic validation bypass)
- Keep Generation(text=...) in test_stats_handler_on_llm_end_no_usage
  (correctly tests the "no usage" branch — plain Generation has no .message)

Co-authored-by: aguzererler <6199053+aguzererler@users.noreply.github.com>
Agent-Logs-Url: https://github.com/aguzererler/TradingAgents/sessions/ce079791-08ef-4f2e-9f31-a1ae6a26b4cb
This commit is contained in:
copilot-swe-agent[bot] 2026-03-22 06:58:38 +00:00
parent a8b909e2ca
commit 0e3edcdf5a
1 changed files with 10 additions and 11 deletions

View File

@ -1,8 +1,9 @@
import threading
import pytest
from cli.stats_handler import StatsCallbackHandler
from langchain_core.outputs import LLMResult, Generation
from langchain_core.outputs import LLMResult, Generation, ChatGeneration
from langchain_core.messages import AIMessage
from langchain_core.messages.ai import UsageMetadata
def test_stats_handler_initial_state():
handler = StatsCallbackHandler()
@ -35,11 +36,10 @@ def test_stats_handler_on_tool_start():
def test_stats_handler_on_llm_end_with_usage():
handler = StatsCallbackHandler()
# Mock usage metadata
usage_metadata = {"input_tokens": 10, "output_tokens": 20}
message = AIMessage(content="test response")
message.usage_metadata = usage_metadata
generation = Generation(message=message, text="test response")
# ChatGeneration wraps chat messages; Generation (plain text) has no .message attr.
usage_metadata = UsageMetadata(input_tokens=10, output_tokens=20, total_tokens=30)
message = AIMessage(content="test response", usage_metadata=usage_metadata)
generation = ChatGeneration(message=message)
response = LLMResult(generations=[[generation]])
handler.on_llm_end(response)
@ -83,11 +83,10 @@ def test_stats_handler_thread_safety():
handler.on_llm_start({}, [])
handler.on_tool_start({}, "")
# Mock usage metadata for on_llm_end
usage_metadata = {"input_tokens": 1, "output_tokens": 1}
message = AIMessage(content="x")
message.usage_metadata = usage_metadata
generation = Generation(message=message, text="x")
# ChatGeneration wraps chat messages with usage_metadata
usage_metadata = UsageMetadata(input_tokens=1, output_tokens=1, total_tokens=2)
message = AIMessage(content="x", usage_metadata=usage_metadata)
generation = ChatGeneration(message=message)
response = LLMResult(generations=[[generation]])
handler.on_llm_end(response)