global news OpenAI bug fixed
This commit is contained in:
parent
c29ca882af
commit
e42a7edea3
|
|
@ -83,12 +83,12 @@ def run_evaluation(
|
||||||
try:
|
try:
|
||||||
cfg = (config or DEFAULT_CONFIG).copy()
|
cfg = (config or DEFAULT_CONFIG).copy()
|
||||||
# Fast eval defaults (you can override from CLI)
|
# Fast eval defaults (you can override from CLI)
|
||||||
cfg["deep_think_llm"] = cfg.get("deep_think_llm", "gpt-5-nano")
|
cfg["deep_think_llm"] = cfg.get("deep_think_llm", "o4-mini")
|
||||||
cfg["quick_think_llm"] = cfg.get("quick_think_llm", "gpt-5-nano")
|
cfg["quick_think_llm"] = cfg.get("quick_think_llm", "gpt-4o-mini")
|
||||||
cfg["max_debate_rounds"] = cfg.get("max_debate_rounds", 1)
|
cfg["max_debate_rounds"] = cfg.get("max_debate_rounds", 1)
|
||||||
cfg["max_risk_discuss_rounds"] = cfg.get("max_risk_discuss_rounds", 1)
|
cfg["max_risk_discuss_rounds"] = cfg.get("max_risk_discuss_rounds", 1)
|
||||||
# Deterministic-ish decoding for reproducibility
|
# Deterministic-ish decoding for reproducibility
|
||||||
cfg.setdefault("llm_params", {}).update({"temperature": 0, "top_p": 1.0, "seed": 42})
|
cfg.setdefault("llm_params", {}).update({"temperature": 0.7, "top_p": 1.0, "seed": 42})
|
||||||
|
|
||||||
print(f"\nInitializing TradingAgents...")
|
print(f"\nInitializing TradingAgents...")
|
||||||
print(f" Deep Thinking LLM: {cfg['deep_think_llm']}")
|
print(f" Deep Thinking LLM: {cfg['deep_think_llm']}")
|
||||||
|
|
@ -161,7 +161,7 @@ def main():
|
||||||
parser.add_argument("--no-tradingagents", action="store_true", help="Skip TradingAgents")
|
parser.add_argument("--no-tradingagents", action="store_true", help="Skip TradingAgents")
|
||||||
parser.add_argument("--output-dir", type=str, default=None, help="Output directory for results")
|
parser.add_argument("--output-dir", type=str, default=None, help="Output directory for results")
|
||||||
parser.add_argument("--deep-llm", type=str, default="gpt-4o-mini", help="Deep thinking LLM model")
|
parser.add_argument("--deep-llm", type=str, default="gpt-4o-mini", help="Deep thinking LLM model")
|
||||||
parser.add_argument("--quick-llm", type=str, default="gpt-4o-mini", help="Quick thinking LLM model")
|
parser.add_argument("--quick-llm", type=str, default="gpt-5-nano", help="Quick thinking LLM model")
|
||||||
parser.add_argument("--debate-rounds", type=int, default=1, help="Number of debate rounds (default: 1)")
|
parser.add_argument("--debate-rounds", type=int, default=1, help="Number of debate rounds (default: 1)")
|
||||||
|
|
||||||
# Used for debugging
|
# Used for debugging
|
||||||
|
|
@ -169,16 +169,16 @@ def main():
|
||||||
if is_debugging():
|
if is_debugging():
|
||||||
config = DEFAULT_CONFIG.copy()
|
config = DEFAULT_CONFIG.copy()
|
||||||
config.update({
|
config.update({
|
||||||
"deep_think_llm": "gpt-5-nano",
|
"deep_think_llm": "o4-mini",
|
||||||
"quick_think_llm": "gpt-5-nano",
|
"quick_think_llm": "gpt-4o-mini",
|
||||||
"max_debate_rounds": 1,
|
"max_debate_rounds": 1,
|
||||||
"max_risk_discuss_rounds": 1,
|
"max_risk_discuss_rounds": 1,
|
||||||
"llm_params": {"temperature": 0, "top_p": 1.0, "seed": 42},
|
"llm_params": {"temperature": 0.7, "top_p": 1.0, "seed": 42},
|
||||||
})
|
})
|
||||||
run_evaluation(
|
run_evaluation(
|
||||||
ticker="AAPL",
|
ticker="AAPL",
|
||||||
start_date="2024-01-01",
|
start_date="2024-01-01",
|
||||||
end_date="2024-01-04",
|
end_date="2024-01-10",
|
||||||
initial_capital=1000,
|
initial_capital=1000,
|
||||||
include_tradingagents=True,
|
include_tradingagents=True,
|
||||||
output_dir="./evaluation/results",
|
output_dir="./evaluation/results",
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,5 @@
|
||||||
|
from datetime import datetime, timedelta
|
||||||
|
|
||||||
from openai import OpenAI
|
from openai import OpenAI
|
||||||
from .config import get_config
|
from .config import get_config
|
||||||
|
|
||||||
|
|
@ -38,38 +40,60 @@ def get_stock_news_openai(query, start_date, end_date):
|
||||||
|
|
||||||
|
|
||||||
def get_global_news_openai(curr_date, look_back_days=7, limit=5):
|
def get_global_news_openai(curr_date, look_back_days=7, limit=5):
|
||||||
|
|
||||||
|
def _extract_text(resp):
|
||||||
|
# 1) Preferred field for the Responses API
|
||||||
|
if hasattr(resp, "output_text") and resp.output_text:
|
||||||
|
return resp.output_text
|
||||||
|
|
||||||
|
# 2) Structured outputs (some SDK builds)
|
||||||
|
try:
|
||||||
|
if resp.output and len(resp.output) > 0:
|
||||||
|
parts = resp.output[0].content or []
|
||||||
|
texts = []
|
||||||
|
for p in parts:
|
||||||
|
# p may be a plain object with .text, or a dict
|
||||||
|
t = getattr(p, "text", None) or (p.get("text") if isinstance(p, dict) else None)
|
||||||
|
if t:
|
||||||
|
texts.append(t)
|
||||||
|
if texts:
|
||||||
|
return "\n".join(texts)
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# 3) Chat Completions style fallback (just in case)
|
||||||
|
try:
|
||||||
|
return resp.choices[0].message["content"]
|
||||||
|
except Exception:
|
||||||
|
pass
|
||||||
|
|
||||||
|
# 4) Last resort: stringify the whole object
|
||||||
|
return str(resp)
|
||||||
|
|
||||||
config = get_config()
|
config = get_config()
|
||||||
client = OpenAI(base_url=config["backend_url"])
|
client = OpenAI(base_url=config["backend_url"])
|
||||||
|
|
||||||
response = client.responses.create(
|
# Build a clean date window
|
||||||
model=config["quick_think_llm"],
|
end = datetime.strptime(curr_date, "%Y-%m-%d").date()
|
||||||
input=[
|
start = end - timedelta(days=look_back_days)
|
||||||
{
|
|
||||||
"role": "system",
|
prompt = (
|
||||||
"content": [
|
f"List {limit} global or macroeconomic news items helpful for trading, "
|
||||||
{
|
f"strictly published between {start.isoformat()} and {end.isoformat()} (inclusive). "
|
||||||
"type": "input_text",
|
"For each item, give: date, headline, 1-2 sentence trading relevance. "
|
||||||
"text": f"Can you search global or macroeconomics news from {look_back_days} days before {curr_date} to {curr_date} that would be informative for trading purposes? Make sure you only get the data posted during that period. Limit the results to {limit} articles.",
|
"Do not include articles outside the window."
|
||||||
}
|
|
||||||
],
|
|
||||||
}
|
|
||||||
],
|
|
||||||
text={"format": {"type": "text"}},
|
|
||||||
reasoning={},
|
|
||||||
tools=[
|
|
||||||
{
|
|
||||||
"type": "web_search_preview",
|
|
||||||
"user_location": {"type": "approximate"},
|
|
||||||
"search_context_size": "low",
|
|
||||||
}
|
|
||||||
],
|
|
||||||
temperature=1,
|
|
||||||
max_output_tokens=4096,
|
|
||||||
top_p=1,
|
|
||||||
store=True,
|
|
||||||
)
|
)
|
||||||
|
|
||||||
return response.output[1].content[0].text
|
resp = client.responses.create(
|
||||||
|
model=config["quick_think_llm"],
|
||||||
|
input=prompt,
|
||||||
|
reasoning={},
|
||||||
|
tools=[{"type": "web_search_preview"}],
|
||||||
|
max_output_tokens=4096,
|
||||||
|
store=False,
|
||||||
|
)
|
||||||
|
|
||||||
|
return _extract_text(resp)
|
||||||
|
|
||||||
|
|
||||||
def get_fundamentals_openai(ticker, curr_date):
|
def get_fundamentals_openai(ticker, curr_date):
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue