chore: consolidate install, fix CLI portability, normalize LLM responses
- Point requirements.txt to pyproject.toml as single source of truth - Resolve welcome.txt path relative to module for CLI portability - Include cli/static files in package build - Extract shared normalize_content for OpenAI Responses API and Gemini 3 list-format responses into base_client.py - Update README install and CLI usage instructions
This commit is contained in:
parent
0b13145dc0
commit
77755f0431
11
README.md
11
README.md
|
|
@ -112,9 +112,9 @@ conda create -n tradingagents python=3.13
|
||||||
conda activate tradingagents
|
conda activate tradingagents
|
||||||
```
|
```
|
||||||
|
|
||||||
Install dependencies:
|
Install the package and its dependencies:
|
||||||
```bash
|
```bash
|
||||||
pip install -r requirements.txt
|
pip install .
|
||||||
```
|
```
|
||||||
|
|
||||||
### Required APIs
|
### Required APIs
|
||||||
|
|
@ -139,11 +139,12 @@ cp .env.example .env
|
||||||
|
|
||||||
### CLI Usage
|
### CLI Usage
|
||||||
|
|
||||||
You can also try out the CLI directly by running:
|
Launch the interactive CLI:
|
||||||
```bash
|
```bash
|
||||||
python -m cli.main
|
tradingagents # installed command
|
||||||
|
python -m cli.main # alternative: run directly from source
|
||||||
```
|
```
|
||||||
You will see a screen where you can select your desired tickers, date, LLMs, research depth, etc.
|
You will see a screen where you can select your desired tickers, analysis date, LLM provider, research depth, and more.
|
||||||
|
|
||||||
<p align="center">
|
<p align="center">
|
||||||
<img src="assets/cli/cli_init.png" width="100%" style="display: inline-block; margin: 0 2%;">
|
<img src="assets/cli/cli_init.png" width="100%" style="display: inline-block; margin: 0 2%;">
|
||||||
|
|
|
||||||
|
|
@ -462,7 +462,7 @@ def update_display(layout, spinner_text=None, stats_handler=None, start_time=Non
|
||||||
def get_user_selections():
|
def get_user_selections():
|
||||||
"""Get all user selections before starting the analysis display."""
|
"""Get all user selections before starting the analysis display."""
|
||||||
# Display ASCII art welcome message
|
# Display ASCII art welcome message
|
||||||
with open("./cli/static/welcome.txt", "r", encoding="utf-8") as f:
|
with open(Path(__file__).parent / "static" / "welcome.txt", "r", encoding="utf-8") as f:
|
||||||
welcome_ascii = f.read()
|
welcome_ascii = f.read()
|
||||||
|
|
||||||
# Create welcome box content
|
# Create welcome box content
|
||||||
|
|
|
||||||
|
|
@ -37,3 +37,6 @@ tradingagents = "cli.main:app"
|
||||||
|
|
||||||
[tool.setuptools.packages.find]
|
[tool.setuptools.packages.find]
|
||||||
include = ["tradingagents*", "cli*"]
|
include = ["tradingagents*", "cli*"]
|
||||||
|
|
||||||
|
[tool.setuptools.package-data]
|
||||||
|
cli = ["static/*"]
|
||||||
|
|
|
||||||
|
|
@ -1,21 +1 @@
|
||||||
typing-extensions
|
.
|
||||||
langchain-core
|
|
||||||
langchain-openai
|
|
||||||
langchain-experimental
|
|
||||||
pandas
|
|
||||||
yfinance
|
|
||||||
stockstats
|
|
||||||
langgraph
|
|
||||||
rank-bm25
|
|
||||||
setuptools
|
|
||||||
backtrader
|
|
||||||
parsel
|
|
||||||
requests
|
|
||||||
tqdm
|
|
||||||
pytz
|
|
||||||
redis
|
|
||||||
rich
|
|
||||||
typer
|
|
||||||
questionary
|
|
||||||
langchain_anthropic
|
|
||||||
langchain-google-genai
|
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,25 @@ from abc import ABC, abstractmethod
|
||||||
from typing import Any, Optional
|
from typing import Any, Optional
|
||||||
|
|
||||||
|
|
||||||
|
def normalize_content(response):
|
||||||
|
"""Normalize LLM response content to a plain string.
|
||||||
|
|
||||||
|
Multiple providers (OpenAI Responses API, Google Gemini 3) return content
|
||||||
|
as a list of typed blocks, e.g. [{'type': 'reasoning', ...}, {'type': 'text', 'text': '...'}].
|
||||||
|
Downstream agents expect response.content to be a string. This extracts
|
||||||
|
and joins the text blocks, discarding reasoning/metadata blocks.
|
||||||
|
"""
|
||||||
|
content = response.content
|
||||||
|
if isinstance(content, list):
|
||||||
|
texts = [
|
||||||
|
item.get("text", "") if isinstance(item, dict) and item.get("type") == "text"
|
||||||
|
else item if isinstance(item, str) else ""
|
||||||
|
for item in content
|
||||||
|
]
|
||||||
|
response.content = "\n".join(t for t in texts if t)
|
||||||
|
return response
|
||||||
|
|
||||||
|
|
||||||
class BaseLLMClient(ABC):
|
class BaseLLMClient(ABC):
|
||||||
"""Abstract base class for LLM clients."""
|
"""Abstract base class for LLM clients."""
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,30 +2,19 @@ from typing import Any, Optional
|
||||||
|
|
||||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||||
|
|
||||||
from .base_client import BaseLLMClient
|
from .base_client import BaseLLMClient, normalize_content
|
||||||
from .validators import validate_model
|
from .validators import validate_model
|
||||||
|
|
||||||
|
|
||||||
class NormalizedChatGoogleGenerativeAI(ChatGoogleGenerativeAI):
|
class NormalizedChatGoogleGenerativeAI(ChatGoogleGenerativeAI):
|
||||||
"""ChatGoogleGenerativeAI with normalized content output.
|
"""ChatGoogleGenerativeAI with normalized content output.
|
||||||
|
|
||||||
Gemini 3 models return content as list: [{'type': 'text', 'text': '...'}]
|
Gemini 3 models return content as list of typed blocks.
|
||||||
This normalizes to string for consistent downstream handling.
|
This normalizes to string for consistent downstream handling.
|
||||||
"""
|
"""
|
||||||
|
|
||||||
def _normalize_content(self, response):
|
|
||||||
content = response.content
|
|
||||||
if isinstance(content, list):
|
|
||||||
texts = [
|
|
||||||
item.get("text", "") if isinstance(item, dict) and item.get("type") == "text"
|
|
||||||
else item if isinstance(item, str) else ""
|
|
||||||
for item in content
|
|
||||||
]
|
|
||||||
response.content = "\n".join(t for t in texts if t)
|
|
||||||
return response
|
|
||||||
|
|
||||||
def invoke(self, input, config=None, **kwargs):
|
def invoke(self, input, config=None, **kwargs):
|
||||||
return self._normalize_content(super().invoke(input, config, **kwargs))
|
return normalize_content(super().invoke(input, config, **kwargs))
|
||||||
|
|
||||||
|
|
||||||
class GoogleClient(BaseLLMClient):
|
class GoogleClient(BaseLLMClient):
|
||||||
|
|
|
||||||
|
|
@ -3,9 +3,21 @@ from typing import Any, Optional
|
||||||
|
|
||||||
from langchain_openai import ChatOpenAI
|
from langchain_openai import ChatOpenAI
|
||||||
|
|
||||||
from .base_client import BaseLLMClient
|
from .base_client import BaseLLMClient, normalize_content
|
||||||
from .validators import validate_model
|
from .validators import validate_model
|
||||||
|
|
||||||
|
|
||||||
|
class NormalizedChatOpenAI(ChatOpenAI):
|
||||||
|
"""ChatOpenAI with normalized content output.
|
||||||
|
|
||||||
|
The Responses API returns content as a list of typed blocks
|
||||||
|
(reasoning, text, etc.). This normalizes to string for consistent
|
||||||
|
downstream handling.
|
||||||
|
"""
|
||||||
|
|
||||||
|
def invoke(self, input, config=None, **kwargs):
|
||||||
|
return normalize_content(super().invoke(input, config, **kwargs))
|
||||||
|
|
||||||
# Kwargs forwarded from user config to ChatOpenAI
|
# Kwargs forwarded from user config to ChatOpenAI
|
||||||
_PASSTHROUGH_KWARGS = (
|
_PASSTHROUGH_KWARGS = (
|
||||||
"timeout", "max_retries", "reasoning_effort",
|
"timeout", "max_retries", "reasoning_effort",
|
||||||
|
|
@ -66,7 +78,7 @@ class OpenAIClient(BaseLLMClient):
|
||||||
if self.provider == "openai":
|
if self.provider == "openai":
|
||||||
llm_kwargs["use_responses_api"] = True
|
llm_kwargs["use_responses_api"] = True
|
||||||
|
|
||||||
return ChatOpenAI(**llm_kwargs)
|
return NormalizedChatOpenAI(**llm_kwargs)
|
||||||
|
|
||||||
def validate_model(self) -> bool:
|
def validate_model(self) -> bool:
|
||||||
"""Validate model for the provider."""
|
"""Validate model for the provider."""
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue