diff --git a/README.md b/README.md index 8cf085e8..e31c43ad 100644 --- a/README.md +++ b/README.md @@ -112,9 +112,9 @@ conda create -n tradingagents python=3.13 conda activate tradingagents ``` -Install dependencies: +Install the package and its dependencies: ```bash -pip install -r requirements.txt +pip install . ``` ### Required APIs @@ -139,11 +139,12 @@ cp .env.example .env ### CLI Usage -You can also try out the CLI directly by running: +Launch the interactive CLI: ```bash -python -m cli.main +tradingagents # installed command +python -m cli.main # alternative: run directly from source ``` -You will see a screen where you can select your desired tickers, date, LLMs, research depth, etc. +You will see a screen where you can select your desired tickers, analysis date, LLM provider, research depth, and more.
diff --git a/cli/main.py b/cli/main.py
index df6dc891..a706f11d 100644
--- a/cli/main.py
+++ b/cli/main.py
@@ -462,7 +462,7 @@ def update_display(layout, spinner_text=None, stats_handler=None, start_time=Non
def get_user_selections():
"""Get all user selections before starting the analysis display."""
# Display ASCII art welcome message
- with open("./cli/static/welcome.txt", "r", encoding="utf-8") as f:
+ with open(Path(__file__).parent / "static" / "welcome.txt", "r", encoding="utf-8") as f:
welcome_ascii = f.read()
# Create welcome box content
diff --git a/pyproject.toml b/pyproject.toml
index 4c91a733..256d21d9 100644
--- a/pyproject.toml
+++ b/pyproject.toml
@@ -37,3 +37,6 @@ tradingagents = "cli.main:app"
[tool.setuptools.packages.find]
include = ["tradingagents*", "cli*"]
+
+[tool.setuptools.package-data]
+cli = ["static/*"]
diff --git a/requirements.txt b/requirements.txt
index 184468b8..9c558e35 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,21 +1 @@
-typing-extensions
-langchain-core
-langchain-openai
-langchain-experimental
-pandas
-yfinance
-stockstats
-langgraph
-rank-bm25
-setuptools
-backtrader
-parsel
-requests
-tqdm
-pytz
-redis
-rich
-typer
-questionary
-langchain_anthropic
-langchain-google-genai
+.
diff --git a/tradingagents/llm_clients/base_client.py b/tradingagents/llm_clients/base_client.py
index 43845575..9c3dd17c 100644
--- a/tradingagents/llm_clients/base_client.py
+++ b/tradingagents/llm_clients/base_client.py
@@ -2,6 +2,25 @@ from abc import ABC, abstractmethod
from typing import Any, Optional
+def normalize_content(response):
+ """Normalize LLM response content to a plain string.
+
+ Multiple providers (OpenAI Responses API, Google Gemini 3) return content
+ as a list of typed blocks, e.g. [{'type': 'reasoning', ...}, {'type': 'text', 'text': '...'}].
+ Downstream agents expect response.content to be a string. This extracts
+ and joins the text blocks, discarding reasoning/metadata blocks.
+ """
+ content = response.content
+ if isinstance(content, list):
+ texts = [
+ item.get("text", "") if isinstance(item, dict) and item.get("type") == "text"
+ else item if isinstance(item, str) else ""
+ for item in content
+ ]
+ response.content = "\n".join(t for t in texts if t)
+ return response
+
+
class BaseLLMClient(ABC):
"""Abstract base class for LLM clients."""
diff --git a/tradingagents/llm_clients/google_client.py b/tradingagents/llm_clients/google_client.py
index 3dd85e3f..7401df0e 100644
--- a/tradingagents/llm_clients/google_client.py
+++ b/tradingagents/llm_clients/google_client.py
@@ -2,30 +2,19 @@ from typing import Any, Optional
from langchain_google_genai import ChatGoogleGenerativeAI
-from .base_client import BaseLLMClient
+from .base_client import BaseLLMClient, normalize_content
from .validators import validate_model
class NormalizedChatGoogleGenerativeAI(ChatGoogleGenerativeAI):
"""ChatGoogleGenerativeAI with normalized content output.
- Gemini 3 models return content as list: [{'type': 'text', 'text': '...'}]
+ Gemini 3 models return content as list of typed blocks.
This normalizes to string for consistent downstream handling.
"""
- def _normalize_content(self, response):
- content = response.content
- if isinstance(content, list):
- texts = [
- item.get("text", "") if isinstance(item, dict) and item.get("type") == "text"
- else item if isinstance(item, str) else ""
- for item in content
- ]
- response.content = "\n".join(t for t in texts if t)
- return response
-
def invoke(self, input, config=None, **kwargs):
- return self._normalize_content(super().invoke(input, config, **kwargs))
+ return normalize_content(super().invoke(input, config, **kwargs))
class GoogleClient(BaseLLMClient):
diff --git a/tradingagents/llm_clients/openai_client.py b/tradingagents/llm_clients/openai_client.py
index c314d077..fd9b4e33 100644
--- a/tradingagents/llm_clients/openai_client.py
+++ b/tradingagents/llm_clients/openai_client.py
@@ -3,9 +3,21 @@ from typing import Any, Optional
from langchain_openai import ChatOpenAI
-from .base_client import BaseLLMClient
+from .base_client import BaseLLMClient, normalize_content
from .validators import validate_model
+
+class NormalizedChatOpenAI(ChatOpenAI):
+ """ChatOpenAI with normalized content output.
+
+ The Responses API returns content as a list of typed blocks
+ (reasoning, text, etc.). This normalizes to string for consistent
+ downstream handling.
+ """
+
+ def invoke(self, input, config=None, **kwargs):
+ return normalize_content(super().invoke(input, config, **kwargs))
+
# Kwargs forwarded from user config to ChatOpenAI
_PASSTHROUGH_KWARGS = (
"timeout", "max_retries", "reasoning_effort",
@@ -66,7 +78,7 @@ class OpenAIClient(BaseLLMClient):
if self.provider == "openai":
llm_kwargs["use_responses_api"] = True
- return ChatOpenAI(**llm_kwargs)
+ return NormalizedChatOpenAI(**llm_kwargs)
def validate_model(self) -> bool:
"""Validate model for the provider."""