From e42a7edea3f7d75229f5f1ea0b62e85a15a7be57 Mon Sep 17 00:00:00 2001 From: Quanliang Liu Date: Thu, 6 Nov 2025 15:00:23 -0600 Subject: [PATCH] global news OpenAI bug fixed --- evaluation/run_evaluation.py | 16 +++---- tradingagents/dataflows/openai.py | 78 ++++++++++++++++++++----------- 2 files changed, 59 insertions(+), 35 deletions(-) diff --git a/evaluation/run_evaluation.py b/evaluation/run_evaluation.py index 2d43192d..070799b2 100644 --- a/evaluation/run_evaluation.py +++ b/evaluation/run_evaluation.py @@ -83,12 +83,12 @@ def run_evaluation( try: cfg = (config or DEFAULT_CONFIG).copy() # Fast eval defaults (you can override from CLI) - cfg["deep_think_llm"] = cfg.get("deep_think_llm", "gpt-5-nano") - cfg["quick_think_llm"] = cfg.get("quick_think_llm", "gpt-5-nano") + cfg["deep_think_llm"] = cfg.get("deep_think_llm", "o4-mini") + cfg["quick_think_llm"] = cfg.get("quick_think_llm", "gpt-4o-mini") cfg["max_debate_rounds"] = cfg.get("max_debate_rounds", 1) cfg["max_risk_discuss_rounds"] = cfg.get("max_risk_discuss_rounds", 1) # Deterministic-ish decoding for reproducibility - cfg.setdefault("llm_params", {}).update({"temperature": 0, "top_p": 1.0, "seed": 42}) + cfg.setdefault("llm_params", {}).update({"temperature": 0.7, "top_p": 1.0, "seed": 42}) print(f"\nInitializing TradingAgents...") print(f" Deep Thinking LLM: {cfg['deep_think_llm']}") @@ -161,7 +161,7 @@ def main(): parser.add_argument("--no-tradingagents", action="store_true", help="Skip TradingAgents") parser.add_argument("--output-dir", type=str, default=None, help="Output directory for results") parser.add_argument("--deep-llm", type=str, default="gpt-4o-mini", help="Deep thinking LLM model") - parser.add_argument("--quick-llm", type=str, default="gpt-4o-mini", help="Quick thinking LLM model") + parser.add_argument("--quick-llm", type=str, default="gpt-5-nano", help="Quick thinking LLM model") parser.add_argument("--debate-rounds", type=int, default=1, help="Number of debate rounds (default: 1)") # Used for debugging @@ -169,16 +169,16 @@ def main(): if is_debugging(): config = DEFAULT_CONFIG.copy() config.update({ - "deep_think_llm": "gpt-5-nano", - "quick_think_llm": "gpt-5-nano", + "deep_think_llm": "o4-mini", + "quick_think_llm": "gpt-4o-mini", "max_debate_rounds": 1, "max_risk_discuss_rounds": 1, - "llm_params": {"temperature": 0, "top_p": 1.0, "seed": 42}, + "llm_params": {"temperature": 0.7, "top_p": 1.0, "seed": 42}, }) run_evaluation( ticker="AAPL", start_date="2024-01-01", - end_date="2024-01-04", + end_date="2024-01-10", initial_capital=1000, include_tradingagents=True, output_dir="./evaluation/results", diff --git a/tradingagents/dataflows/openai.py b/tradingagents/dataflows/openai.py index 91a2258b..72fbeddf 100644 --- a/tradingagents/dataflows/openai.py +++ b/tradingagents/dataflows/openai.py @@ -1,3 +1,5 @@ +from datetime import datetime, timedelta + from openai import OpenAI from .config import get_config @@ -38,38 +40,60 @@ def get_stock_news_openai(query, start_date, end_date): def get_global_news_openai(curr_date, look_back_days=7, limit=5): + + def _extract_text(resp): + # 1) Preferred field for the Responses API + if hasattr(resp, "output_text") and resp.output_text: + return resp.output_text + + # 2) Structured outputs (some SDK builds) + try: + if resp.output and len(resp.output) > 0: + parts = resp.output[0].content or [] + texts = [] + for p in parts: + # p may be a plain object with .text, or a dict + t = getattr(p, "text", None) or (p.get("text") if isinstance(p, dict) else None) + if t: + texts.append(t) + if texts: + return "\n".join(texts) + except Exception: + pass + + # 3) Chat Completions style fallback (just in case) + try: + return resp.choices[0].message["content"] + except Exception: + pass + + # 4) Last resort: stringify the whole object + return str(resp) + config = get_config() client = OpenAI(base_url=config["backend_url"]) - response = client.responses.create( - model=config["quick_think_llm"], - input=[ - { - "role": "system", - "content": [ - { - "type": "input_text", - "text": f"Can you search global or macroeconomics news from {look_back_days} days before {curr_date} to {curr_date} that would be informative for trading purposes? Make sure you only get the data posted during that period. Limit the results to {limit} articles.", - } - ], - } - ], - text={"format": {"type": "text"}}, - reasoning={}, - tools=[ - { - "type": "web_search_preview", - "user_location": {"type": "approximate"}, - "search_context_size": "low", - } - ], - temperature=1, - max_output_tokens=4096, - top_p=1, - store=True, + # Build a clean date window + end = datetime.strptime(curr_date, "%Y-%m-%d").date() + start = end - timedelta(days=look_back_days) + + prompt = ( + f"List {limit} global or macroeconomic news items helpful for trading, " + f"strictly published between {start.isoformat()} and {end.isoformat()} (inclusive). " + "For each item, give: date, headline, 1-2 sentence trading relevance. " + "Do not include articles outside the window." ) - return response.output[1].content[0].text + resp = client.responses.create( + model=config["quick_think_llm"], + input=prompt, + reasoning={}, + tools=[{"type": "web_search_preview"}], + max_output_tokens=4096, + store=False, + ) + + return _extract_text(resp) def get_fundamentals_openai(ticker, curr_date):