chore: consolidate install, fix CLI portability, normalize LLM responses

- Point requirements.txt to pyproject.toml as single source of truth
- Resolve welcome.txt path relative to module for CLI portability
- Include cli/static files in package build
- Extract shared normalize_content for OpenAI Responses API and
  Gemini 3 list-format responses into base_client.py
- Update README install and CLI usage instructions
This commit is contained in:
Yijia-Xiao 2026-03-22 21:38:01 +00:00
parent 0b13145dc0
commit 77755f0431
7 changed files with 47 additions and 43 deletions

View File

@ -112,9 +112,9 @@ conda create -n tradingagents python=3.13
conda activate tradingagents
```
Install dependencies:
Install the package and its dependencies:
```bash
pip install -r requirements.txt
pip install .
```
### Required APIs
@ -139,11 +139,12 @@ cp .env.example .env
### CLI Usage
You can also try out the CLI directly by running:
Launch the interactive CLI:
```bash
python -m cli.main
tradingagents # installed command
python -m cli.main # alternative: run directly from source
```
You will see a screen where you can select your desired tickers, date, LLMs, research depth, etc.
You will see a screen where you can select your desired tickers, analysis date, LLM provider, research depth, and more.
<p align="center">
<img src="assets/cli/cli_init.png" width="100%" style="display: inline-block; margin: 0 2%;">

View File

@ -462,7 +462,7 @@ def update_display(layout, spinner_text=None, stats_handler=None, start_time=Non
def get_user_selections():
"""Get all user selections before starting the analysis display."""
# Display ASCII art welcome message
with open("./cli/static/welcome.txt", "r", encoding="utf-8") as f:
with open(Path(__file__).parent / "static" / "welcome.txt", "r", encoding="utf-8") as f:
welcome_ascii = f.read()
# Create welcome box content

View File

@ -37,3 +37,6 @@ tradingagents = "cli.main:app"
[tool.setuptools.packages.find]
include = ["tradingagents*", "cli*"]
[tool.setuptools.package-data]
cli = ["static/*"]

View File

@ -1,21 +1 @@
typing-extensions
langchain-core
langchain-openai
langchain-experimental
pandas
yfinance
stockstats
langgraph
rank-bm25
setuptools
backtrader
parsel
requests
tqdm
pytz
redis
rich
typer
questionary
langchain_anthropic
langchain-google-genai
.

View File

@ -2,6 +2,25 @@ from abc import ABC, abstractmethod
from typing import Any, Optional
def normalize_content(response):
"""Normalize LLM response content to a plain string.
Multiple providers (OpenAI Responses API, Google Gemini 3) return content
as a list of typed blocks, e.g. [{'type': 'reasoning', ...}, {'type': 'text', 'text': '...'}].
Downstream agents expect response.content to be a string. This extracts
and joins the text blocks, discarding reasoning/metadata blocks.
"""
content = response.content
if isinstance(content, list):
texts = [
item.get("text", "") if isinstance(item, dict) and item.get("type") == "text"
else item if isinstance(item, str) else ""
for item in content
]
response.content = "\n".join(t for t in texts if t)
return response
class BaseLLMClient(ABC):
"""Abstract base class for LLM clients."""

View File

@ -2,30 +2,19 @@ from typing import Any, Optional
from langchain_google_genai import ChatGoogleGenerativeAI
from .base_client import BaseLLMClient
from .base_client import BaseLLMClient, normalize_content
from .validators import validate_model
class NormalizedChatGoogleGenerativeAI(ChatGoogleGenerativeAI):
"""ChatGoogleGenerativeAI with normalized content output.
Gemini 3 models return content as list: [{'type': 'text', 'text': '...'}]
Gemini 3 models return content as list of typed blocks.
This normalizes to string for consistent downstream handling.
"""
def _normalize_content(self, response):
content = response.content
if isinstance(content, list):
texts = [
item.get("text", "") if isinstance(item, dict) and item.get("type") == "text"
else item if isinstance(item, str) else ""
for item in content
]
response.content = "\n".join(t for t in texts if t)
return response
def invoke(self, input, config=None, **kwargs):
return self._normalize_content(super().invoke(input, config, **kwargs))
return normalize_content(super().invoke(input, config, **kwargs))
class GoogleClient(BaseLLMClient):

View File

@ -3,9 +3,21 @@ from typing import Any, Optional
from langchain_openai import ChatOpenAI
from .base_client import BaseLLMClient
from .base_client import BaseLLMClient, normalize_content
from .validators import validate_model
class NormalizedChatOpenAI(ChatOpenAI):
"""ChatOpenAI with normalized content output.
The Responses API returns content as a list of typed blocks
(reasoning, text, etc.). This normalizes to string for consistent
downstream handling.
"""
def invoke(self, input, config=None, **kwargs):
return normalize_content(super().invoke(input, config, **kwargs))
# Kwargs forwarded from user config to ChatOpenAI
_PASSTHROUGH_KWARGS = (
"timeout", "max_retries", "reasoning_effort",
@ -66,7 +78,7 @@ class OpenAIClient(BaseLLMClient):
if self.provider == "openai":
llm_kwargs["use_responses_api"] = True
return ChatOpenAI(**llm_kwargs)
return NormalizedChatOpenAI(**llm_kwargs)
def validate_model(self) -> bool:
"""Validate model for the provider."""