Clean up debug test files

🤖 Generated with Claude Code

Co-Authored-By: Claude <noreply@anthropic.com>
This commit is contained in:
佐藤優一 2025-08-11 11:51:34 +09:00
parent f658154f1c
commit 9219fba1b4
5 changed files with 0 additions and 252 deletions

View File

@ -1,39 +0,0 @@
#!/usr/bin/env python
"""Test the actual market analyst test."""
import pytest
from tests.conftest import MockResult
# Import test fixtures
pytest_plugins = ["tests.conftest"]
def test_debug():
from tests.conftest import mock_llm, mock_toolkit, sample_agent_state
from tradingagents.agents.analysts.market_analyst import create_market_analyst
# Create fixtures
llm = mock_llm()
toolkit = mock_toolkit()
state = sample_agent_state()
# Setup like the test does
toolkit.config = {"online_tools": False}
mock_result = MockResult(content="Market analysis complete", tool_calls=[])
llm._chain_mock.invoke.return_value = mock_result
print(f"llm: {llm}")
print(f"llm._chain_mock: {llm._chain_mock}")
print(f"llm._chain_mock.invoke: {llm._chain_mock.invoke}")
print(f"llm._chain_mock.invoke.return_value: {llm._chain_mock.invoke.return_value}")
analyst_node = create_market_analyst(llm, toolkit)
# Execute
result = analyst_node(state)
print(f"Result messages: {result['messages']}")
print(f"Expected: {[mock_result]}")
print(f"Are they equal? {result['messages'] == [mock_result]}")
if __name__ == "__main__":
test_debug()

View File

@ -1,71 +0,0 @@
#!/usr/bin/env python
"""Test the actual market analyst test."""
from unittest.mock import Mock
from tests.conftest import MockResult
from tradingagents.agents.analysts.market_analyst import create_market_analyst
# Recreate the mock_llm fixture logic
mock = Mock()
mock.model_name = "test-model"
# Create a default mock result with proper tool_calls
default_result = MockResult()
# Create a chain mock (what prompt | llm.bind_tools(tools) returns)
chain_mock = Mock()
chain_mock.invoke = Mock(return_value=default_result)
# Store the chain_mock on the mock_llm so tests can configure it
mock._chain_mock = chain_mock
# Mock the bind_tools to return a mock that handles piping
bound_tools_mock = Mock()
# Handle the pipe operation (prompt | llm.bind_tools(tools))
def handle_pipe(self, other):
# Return the chain_mock that tests can configure
print(f"handle_pipe called with self={self}, other={other}")
print(f"Returning chain_mock: {chain_mock}")
return chain_mock
bound_tools_mock.__ror__ = handle_pipe # Right-side or (other | bound_tools_mock)
mock.bind_tools = Mock(return_value=bound_tools_mock)
# Create toolkit
toolkit = Mock()
toolkit.config = {"online_tools": False}
# Set up toolkit methods with proper name attributes
toolkit.get_YFin_data = Mock()
toolkit.get_YFin_data.name = "get_YFin_data"
toolkit.get_stockstats_indicators_report = Mock()
toolkit.get_stockstats_indicators_report.name = "get_stockstats_indicators_report"
# Setup like the test does
mock_result = MockResult(content="Market analysis complete", tool_calls=[])
mock._chain_mock.invoke.return_value = mock_result
# Create state
state = {
"company_of_interest": "AAPL",
"trade_date": "2024-05-10",
"messages": [],
}
print(f"mock: {mock}")
print(f"mock._chain_mock: {mock._chain_mock}")
print(f"mock._chain_mock.invoke: {mock._chain_mock.invoke}")
print(f"mock._chain_mock.invoke.return_value: {mock._chain_mock.invoke.return_value}")
analyst_node = create_market_analyst(mock, toolkit)
# Execute
result = analyst_node(state)
print(f"Result messages: {result['messages']}")
print(f"Expected: {[mock_result]}")
print(f"First message: {result['messages'][0]}")
print(f"Are they equal? {result['messages'] == [mock_result]}")
print(f"First item equal? {result['messages'][0] == mock_result}")
print(f"Are they the same object? {result['messages'][0] is mock_result}")

View File

@ -1,45 +0,0 @@
#!/usr/bin/env python
"""Debug script to understand the mock chain behavior."""
from unittest.mock import Mock
from tests.conftest import MockResult
# Recreate the mock_llm setup
mock = Mock()
mock.model_name = "test-model"
# Create a default mock result with proper tool_calls
default_result = MockResult()
# Mock the bind_tools to return a mock that handles piping
bound_mock = Mock()
bound_mock.invoke = Mock(return_value=default_result)
# Handle the pipe operation (prompt | llm.bind_tools(tools))
def handle_pipe(self, other):
# Return a mock that will use the bound_mock's invoke method
pipe_result = Mock()
pipe_result.invoke = bound_mock.invoke
return pipe_result
bound_mock.__ror__ = handle_pipe
mock.bind_tools.return_value = bound_mock
# Now simulate what a test does
mock_result = MockResult(content="Test content", tool_calls=[])
mock.bind_tools.return_value.invoke.return_value = mock_result
# Simulate what the code does
prompt = Mock() # Simulate a prompt
tools = [] # Simulate tools
chain = prompt | mock.bind_tools(tools)
# Invoke the chain
result = chain.invoke([])
print(f"Expected: {mock_result}")
print(f"Got: {result}")
print(f"Are they the same? {result is mock_result}")
print(f"bound_mock.invoke: {bound_mock.invoke}")
print(f"chain.invoke: {chain.invoke}")
print(f"Are invoke methods the same? {chain.invoke is bound_mock.invoke}")

View File

@ -1,42 +0,0 @@
#!/usr/bin/env python
"""Debug script to understand the mock chain behavior."""
from unittest.mock import Mock, patch
from tests.conftest import MockResult
# Let's create a test to see what happens
def test_mock():
mock_llm = Mock()
mock_llm.model_name = "test-model"
# Create a mock result
test_result = MockResult(content="Test content", tool_calls=[])
# Create the chain mock (what prompt | llm.bind_tools(tools) returns)
chain_mock = Mock()
chain_mock.invoke = Mock(return_value=test_result)
# Make bind_tools return a mock that when piped, returns our chain_mock
bound_tools_mock = Mock()
# This is the key: when something is piped to bound_tools_mock,
# it should return our chain_mock
def pipe_handler(self, other):
return chain_mock
bound_tools_mock.__ror__ = pipe_handler
mock_llm.bind_tools = Mock(return_value=bound_tools_mock)
# Now simulate what the production code does
prompt = Mock()
tools = []
# This is what happens in the actual code
chain = prompt | mock_llm.bind_tools(tools)
result = chain.invoke([])
print(f"Expected: {test_result}")
print(f"Got: {result}")
print(f"Are they the same? {result is test_result}")
test_mock()

View File

@ -1,55 +0,0 @@
#!/usr/bin/env python
"""Debug script to test our mock setup."""
from unittest.mock import Mock
from tests.conftest import MockResult
# Recreate our fixture setup
mock = Mock()
mock.model_name = "test-model"
# Create a default mock result with proper tool_calls
default_result = MockResult()
# Create a chain mock (what prompt | llm.bind_tools(tools) returns)
chain_mock = Mock()
chain_mock.invoke = Mock(return_value=default_result)
# Store the chain_mock on the mock_llm so tests can configure it
mock._chain_mock = chain_mock
# Mock the bind_tools to return a mock that handles piping
bound_tools_mock = Mock()
# Handle the pipe operation (prompt | llm.bind_tools(tools))
def handle_pipe(self, other):
# Return the chain_mock that tests can configure
return chain_mock
bound_tools_mock.__ror__ = handle_pipe # Right-side or (other | bound_tools_mock)
mock.bind_tools = Mock(return_value=bound_tools_mock)
# Now simulate what a test does
mock_result = MockResult(content="Test content", tool_calls=[])
mock._chain_mock.invoke.return_value = mock_result
# Simulate what the production code does
prompt = Mock()
tools = []
# This is what happens in the actual code:
# chain = prompt | llm.bind_tools(tools)
print(f"bind_tools is called with tools: {tools}")
bound_result = mock.bind_tools(tools)
print(f"bind_tools returns: {bound_result}")
print(f"Has __ror__? {hasattr(bound_result, '__ror__')}")
chain = prompt | bound_result
print(f"chain is: {chain}")
print(f"chain_mock is: {chain_mock}")
print(f"Are they the same? {chain is chain_mock}")
result = chain.invoke([])
print(f"Result: {result}")
print(f"Expected: {mock_result}")
print(f"Are they the same? {result is mock_result}")