This commit is contained in:
nornen0202 2026-04-07 15:02:17 +02:00 committed by GitHub
commit 7dacb933b2
No known key found for this signature in database
GPG Key ID: B5690EEEBB952194
42 changed files with 5947 additions and 133 deletions

View File

@ -0,0 +1,274 @@
name: Daily Codex Analysis
on:
schedule:
# 00:13 UTC = 09:13 Asia/Seoul
- cron: "13 0 * * *"
workflow_dispatch:
inputs:
tickers:
description: "Optional comma-separated tickers override"
required: false
type: string
trade_date:
description: "Optional YYYY-MM-DD trade date override"
required: false
type: string
site_only:
description: "Only rebuild GitHub Pages from archived runs"
required: false
type: boolean
default: false
permissions:
contents: read
pages: write
id-token: write
concurrency:
group: daily-codex-analysis
cancel-in-progress: false
jobs:
analyze:
runs-on: [self-hosted, Windows]
timeout-minutes: 240
defaults:
run:
shell: powershell -NoProfile -NonInteractive -ExecutionPolicy Bypass -File {0}
env:
PYTHONUTF8: "1"
PIP_DISABLE_PIP_VERSION_CHECK: "1"
TRADINGAGENTS_SITE_DIR: ${{ github.workspace }}\site
TRADINGAGENTS_ARCHIVE_DIR: ${{ vars.TRADINGAGENTS_ARCHIVE_DIR }}
CODEX_BINARY: ${{ vars.CODEX_BINARY }}
CODEX_HOME: ${{ vars.CODEX_HOME }}
steps:
- name: Check out repository
uses: actions/checkout@v4
- name: Configure GitHub Pages
uses: actions/configure-pages@v5
- name: Set up Python
uses: actions/setup-python@v5
with:
python-version: "3.13"
- name: Install TradingAgents
run: |
python -m pip install --upgrade pip
if ($LASTEXITCODE) { exit $LASTEXITCODE }
python -m pip install -e .
if ($LASTEXITCODE) { exit $LASTEXITCODE }
- name: Resolve Codex runtime
run: |
$siteOnly = "${{ github.event.inputs.site_only }}"
if ($siteOnly -eq "true") {
Write-Host "Skipping Codex runtime resolution because site_only=true."
exit 0
}
function Test-CodexCandidate {
param([string]$Candidate)
if ([string]::IsNullOrWhiteSpace($Candidate) -or -not (Test-Path $Candidate)) {
return $false
}
try {
& $Candidate --version | Out-Null
return ($LASTEXITCODE -eq 0)
} catch {
Write-Warning "Codex candidate failed: $Candidate :: $($_.Exception.Message)"
return $false
}
}
$candidates = [System.Collections.Generic.List[string]]::new()
if (-not [string]::IsNullOrWhiteSpace($env:CODEX_BINARY)) {
$candidates.Add($env:CODEX_BINARY)
}
$candidates.Add((Join-Path $env:USERPROFILE ".codex\.sandbox-bin\codex.exe"))
Get-ChildItem -Path "C:\Users\*\.codex\.sandbox-bin\codex.exe" -ErrorAction SilentlyContinue |
Sort-Object LastWriteTime -Descending |
ForEach-Object { $candidates.Add($_.FullName) }
Get-ChildItem -Path "C:\Users\*\.vscode\extensions\openai.chatgpt-*\bin\windows-x86_64\codex.exe" -ErrorAction SilentlyContinue |
Sort-Object LastWriteTime -Descending |
ForEach-Object { $candidates.Add($_.FullName) }
$resolvedBinary = $null
foreach ($candidate in $candidates) {
if (Test-CodexCandidate $candidate) {
$resolvedBinary = $candidate
break
}
}
if (-not $resolvedBinary) {
throw "Could not find a usable Codex binary. Set the CODEX_BINARY repository variable or install Codex for the runner service account."
}
Add-Content -Path $env:GITHUB_ENV -Value "CODEX_BINARY=$resolvedBinary"
Write-Host "Resolved Codex binary: $resolvedBinary"
$resolvedHome = $env:CODEX_HOME
if ([string]::IsNullOrWhiteSpace($resolvedHome) -and $resolvedBinary -like "*.codex\.sandbox-bin\codex.exe") {
$resolvedHome = Split-Path (Split-Path $resolvedBinary -Parent) -Parent
}
if (-not [string]::IsNullOrWhiteSpace($resolvedHome)) {
Add-Content -Path $env:GITHUB_ENV -Value "CODEX_HOME=$resolvedHome"
Write-Host "Using CODEX_HOME: $resolvedHome"
}
- name: Verify Codex login and model availability
if: ${{ github.event.inputs.site_only != 'true' }}
run: |
$siteOnly = "${{ github.event.inputs.site_only }}"
if ($siteOnly -eq "true") {
Write-Host "Skipping Codex preflight because site_only=true."
exit 0
}
$workspaceDir = Join-Path $env:GITHUB_WORKSPACE ".codex-preflight"
$script = @"
import os
from tradingagents.llm_clients.codex_app_server import CodexAppServerAuthError, CodexAppServerBinaryError
from tradingagents.llm_clients.codex_preflight import run_codex_preflight
workspace_dir = r"$workspaceDir"
summary_path = os.getenv("GITHUB_STEP_SUMMARY")
def write_summary(lines):
if not summary_path:
return
with open(summary_path, "a", encoding="utf-8") as handle:
handle.write("\n".join(lines) + "\n")
try:
result = run_codex_preflight(
codex_binary=None,
model="gpt-5.4",
request_timeout=30.0,
workspace_dir=workspace_dir,
cleanup_threads=True,
)
except CodexAppServerAuthError as exc:
message = (
"Codex is installed but not logged in for the runner. "
"Run `codex login` or `codex login --device-auth` on the runner machine, "
"then retry the workflow."
)
print(f"::error::{message}")
print(exc)
write_summary(
[
"## Codex login required",
"",
message,
]
)
raise SystemExit(1)
except CodexAppServerBinaryError as exc:
message = (
"A usable Codex binary is not available for the runner. "
"Check the `CODEX_BINARY` repository variable or install Codex for the runner service account."
)
print(f"::error::{message}")
print(exc)
write_summary(
[
"## Codex runtime issue",
"",
message,
"",
str(exc),
]
)
raise SystemExit(1)
print("Codex account:", result.account)
print("First available models:", ", ".join(result.models[:8]))
write_summary(
[
"## Codex preflight passed",
"",
f"- Account: {result.account}",
f"- Models: {', '.join(result.models[:8])}",
f"- Binary: {os.getenv('CODEX_BINARY', '(auto)')}",
]
)
"@
$script | python -
if ($LASTEXITCODE) { exit $LASTEXITCODE }
- name: Run scheduled analysis and build site
run: |
$configPath = "config/scheduled_analysis.toml"
if (-not (Test-Path $configPath)) {
throw "Missing config/scheduled_analysis.toml. Copy config/scheduled_analysis.example.toml, set your real tickers, and commit the file before enabling the schedule."
}
$args = @("-m", "tradingagents.scheduled", "--config", $configPath, "--site-dir", $env:TRADINGAGENTS_SITE_DIR, "--label", "github-actions")
if (-not [string]::IsNullOrWhiteSpace($env:TRADINGAGENTS_ARCHIVE_DIR)) {
$args += @("--archive-dir", $env:TRADINGAGENTS_ARCHIVE_DIR)
} else {
Write-Warning "TRADINGAGENTS_ARCHIVE_DIR is not set. Run history will live under the repository checkout unless the config overrides it."
}
$manualTickers = "${{ github.event.inputs.tickers }}"
if (-not [string]::IsNullOrWhiteSpace($manualTickers)) {
$args += @("--tickers", $manualTickers)
}
$manualTradeDate = "${{ github.event.inputs.trade_date }}"
if (-not [string]::IsNullOrWhiteSpace($manualTradeDate)) {
$args += @("--trade-date", $manualTradeDate)
}
$siteOnly = "${{ github.event.inputs.site_only }}"
if ($siteOnly -eq "true") {
$args += "--site-only"
}
python @args
if ($LASTEXITCODE) { exit $LASTEXITCODE }
- name: Add Git Bash to PATH for Pages packaging
run: |
$gitBinCandidates = @(
"C:\Program Files\Git\bin",
"C:\Program Files\Git\usr\bin"
)
foreach ($candidate in $gitBinCandidates) {
if (Test-Path (Join-Path $candidate "bash.exe")) {
Add-Content -Path $env:GITHUB_PATH -Value $candidate
Write-Host "Added Git Bash path: $candidate"
exit 0
}
}
throw "bash.exe was not found under the expected Git for Windows directories."
- name: Upload GitHub Pages artifact
uses: actions/upload-pages-artifact@v3
with:
path: site
deploy:
needs: analyze
runs-on: ubuntu-latest
environment:
name: github-pages
url: ${{ steps.deployment.outputs.page_url }}
steps:
- name: Deploy to GitHub Pages
id: deployment
uses: actions/deploy-pages@v4

View File

@ -0,0 +1,163 @@
# Codex 작업 프롬프트 모음
## 프롬프트 1 — 구현 메인 프롬프트
You are working inside the local TradingAgents repository.
Goal:
Implement a new LLM provider named `codex` so TradingAgents can use the local Codex CLI/app-server authenticated with ChatGPT/Codex login instead of an OpenAI API key.
High-level constraints:
1. Do NOT build an OpenAI-compatible HTTP proxy.
2. Do NOT call raw OAuth endpoints yourself.
3. Do NOT depend on Codex dynamicTools for TradingAgents tool execution.
4. Keep TradingAgents existing LangGraph / ToolNode flow intact.
5. The integration must work for both:
- analyst nodes that use `prompt | llm.bind_tools(tools)`
- non-tool nodes that call `llm.invoke(...)` directly
6. Prefer minimal, coherent changes over broad refactors.
7. Add tests and documentation.
8. No unrelated cleanup.
Architecture to implement:
- Add a new provider `codex` in `tradingagents/llm_clients/factory.py`.
- Add a `CodexClient` implementing the existing BaseLLMClient contract.
- Add a custom LangChain chat model that talks to `codex app-server` over stdio JSONL.
- Reuse a long-lived app-server process per model instance, but create a fresh Codex thread per model invocation to avoid context bleed across agents.
- After each invocation, `thread/unsubscribe`.
- Use `initialize` / `initialized` on startup.
- Add a preflight helper that checks:
- `codex` binary exists
- app-server starts
- `account/read` succeeds
- requested models are available from `model/list`
- Do not require API keys for the `codex` provider.
Authentication assumptions:
- The supported user path is `codex login` or `codex login --device-auth`.
- If file-backed auth is used, Codex-managed credentials may be stored in `~/.codex/auth.json`.
- Do not implement direct OAuth token refresh.
- If auth is missing, fail with a clear actionable message telling the user to run `codex login`.
Important implementation choice:
Do NOT use app-server dynamic tools.
Instead, emulate tool calling at the model boundary with strict structured output:
- For plain non-tool calls, request JSON schema: `{ "answer": string }`
- For tool-capable calls, request a root `oneOf` schema:
- final:
`{ "mode": "final", "content": string, "tool_calls": [] }`
- tool batch:
`{ "mode": "tool_calls", "content": string, "tool_calls": [ ... ] }`
- For `tool_calls[].items`, use `oneOf` with one branch per tool so each tool name has its own exact arguments JSON schema.
- This is required so TradingAgents ToolNode can execute the selected tool calls after receiving an `AIMessage.tool_calls`.
Files to add:
- `tradingagents/llm_clients/codex_client.py`
- `tradingagents/llm_clients/codex_chat_model.py`
- `tradingagents/llm_clients/codex_app_server.py`
- `tradingagents/llm_clients/codex_schema.py`
- `tradingagents/llm_clients/codex_message_codec.py`
- `tradingagents/llm_clients/codex_preflight.py`
Files to modify:
- `tradingagents/llm_clients/factory.py`
- `tradingagents/default_config.py`
- `tradingagents/llm_clients/__init__.py`
- CLI / UI config surfaces if present
- README and/or docs
Model behavior requirements:
- Normalize input from:
- `str`
- LangChain `BaseMessage` sequences
- OpenAI-style dict message sequences
- The custom model must support `bind_tools()`.
- `bind_tools()` should preserve LangChain semantics by binding tool schemas into `_generate(...)`.
- Return `AIMessage` objects.
- If tool calls are requested, populate `AIMessage.tool_calls` with stable ids like `call_<uuid>`.
Safety / hardening requirements:
- Default to a neutral dedicated workspace directory for Codex, not the repo root.
- Add config knobs for:
- `codex_binary`
- `codex_reasoning_effort`
- `codex_summary`
- `codex_personality`
- `codex_workspace_dir`
- `codex_request_timeout`
- `codex_max_retries`
- `codex_cleanup_threads`
- Document a recommended `.codex/config.toml` with:
- `approval_policy = "never"`
- `sandbox_mode = "read-only"`
- `web_search = "disabled"`
- `personality = "none"`
- `cli_auth_credentials_store = "file"`
Testing requirements:
1. Unit tests for message normalization.
2. Unit tests for output schema construction.
3. Unit tests for plain final response parsing.
4. Unit tests for tool-call response parsing.
5. Unit tests for malformed JSON retry / error reporting.
6. Integration smoke test for provider `codex`.
7. Preflight test for missing auth / missing binary.
Acceptance criteria:
- `llm_provider="codex"` works without API keys after `codex login`.
- At least one analyst node using `bind_tools()` works.
- At least one non-tool node using `llm.invoke(...)` works.
- A minimal smoke run can produce a final report / final decision.
- Documentation explains installation, auth, usage, and limitations.
Implementation style:
- Read the existing code first and align with project style.
- Make the smallest set of clean, composable changes.
- Include comments only where they add real value.
- Avoid speculative abstractions.
- Keep the code production-oriented and debuggable.
Working method:
1. Inspect the current LLM client factory and how agents call `bind_tools()` vs `invoke()`.
2. Implement the connection layer.
3. Implement the chat model.
4. Wire the provider.
5. Add preflight + docs.
6. Add tests.
7. Run the relevant tests / smoke checks.
8. Summarize exactly what changed and any limitations that remain.
Do the work now.
---
## 프롬프트 2 — 검증/수정 프롬프트
Review the `codex` provider implementation you just added to TradingAgents.
Your job:
1. Find correctness bugs, interface mismatches, race conditions, and integration gaps.
2. Pay special attention to:
- LangChain `bind_tools()` semantics
- `AIMessage.tool_calls` structure
- support for `llm.invoke(str)`, `llm.invoke(list[BaseMessage])`, and `llm.invoke(list[dict])`
- app-server request/response matching
- thread cleanup with `thread/unsubscribe`
- malformed JSON retries
- missing auth / missing binary / missing model diagnostics
3. Run or update tests as needed.
4. Fix only what is necessary; do not refactor unrelated code.
5. Update docs if behavior changed.
Definition of done:
- the provider is internally consistent,
- tests pass,
- smoke run works,
- error messages are actionable,
- no obvious context-bleed or tool-calling contract issues remain.
Return:
- a concise changelog,
- exact files modified,
- exact commands/tests run,
- any remaining known limitations.

View File

@ -0,0 +1,412 @@
# TradingAgents 일일 Codex 리포트 운영 가이드
이 문서는 `self-hosted Windows runner + Codex + GitHub Actions + GitHub Pages` 조합으로 TradingAgents를 매일 자동 실행하고, 웹페이지에서 결과를 확인하는 운영 절차를 정리한 문서입니다.
기준 저장소:
- `https://github.com/nornen0202/TradingAgents`
기본 분석 설정:
- 티커: `GOOGL`, `NVDA`
- provider: `codex`
- model: `gpt-5.4`
- analyst: `market`, `social`, `news`, `fundamentals`
- 출력 언어: `Korean`
관련 파일:
- 설정 파일: [config/scheduled_analysis.toml](/C:/Projects/TradingAgents/config/scheduled_analysis.toml)
- 예시 설정: [config/scheduled_analysis.example.toml](/C:/Projects/TradingAgents/config/scheduled_analysis.example.toml)
- 스케줄 러너: [runner.py](/C:/Projects/TradingAgents/tradingagents/scheduled/runner.py)
- 정적 사이트 생성기: [site.py](/C:/Projects/TradingAgents/tradingagents/scheduled/site.py)
- GitHub Actions 워크플로: [daily-codex-analysis.yml](/C:/Projects/TradingAgents/.github/workflows/daily-codex-analysis.yml)
## 1. 현재 운영 상태
2026-04-07 기준 현재 상태는 아래와 같습니다.
- self-hosted Windows runner 등록 완료
- runner 이름: `desktop-gheeibb-codex`
- runner 상태: `online`
- GitHub Pages 소스: `GitHub Actions`
- Actions 변수 `TRADINGAGENTS_ARCHIVE_DIR` 설정 완료
- 변수 값: `C:\TradingAgentsData\archive`
- `GOOGL`, `NVDA` 설정 파일 작성 완료
- 실제 원격 GitHub Actions 실행 성공 검증 완료
검증된 성공 실행:
- run URL: `https://github.com/nornen0202/TradingAgents/actions/runs/24013668241`
- 상태: `success`
- 실행 시작: `2026-04-06 09:15:42 KST`
- 분석 완료: `2026-04-06 09:28:35 KST`
- Pages 배포 완료: `2026-04-06 09:28:47 KST`
검증된 결과:
- archive manifest: `C:\TradingAgentsData\archive\latest-run.json`
- Pages URL: `https://nornen0202.github.io/TradingAgents/`
- 이번 성공 실행 결과: `GOOGL = BUY`, `NVDA = SELL`
- trade date: 두 티커 모두 `2026-04-02`
중요:
- 현재 runner는 정상 동작 중입니다.
- 서비스 모드 전환은 아직 완료된 상태로 가정하지 않습니다.
- 지금도 PC가 켜져 있고 로그인된 상태라면 자동 실행은 가능합니다.
## 2. 전체 동작 구조
동작 흐름은 아래와 같습니다.
1. GitHub Actions가 매일 `09:13 KST``daily-codex-analysis.yml`을 실행합니다.
2. self-hosted Windows runner가 잡을 받아 TradingAgents를 실행합니다.
3. Codex `gpt-5.4`로 4개 analyst 조합 분석을 수행합니다.
4. 결과를 `TRADINGAGENTS_ARCHIVE_DIR` 아래에 누적 저장합니다.
5. 정적 사이트를 생성합니다.
6. GitHub Pages로 배포합니다.
사용자가 보는 위치:
- 웹: [https://nornen0202.github.io/TradingAgents/](https://nornen0202.github.io/TradingAgents/)
- 로컬 archive: `C:\TradingAgentsData\archive`
## 3. 가장 중요한 개념 3가지
### 3-1. runner token
runner token은 self-hosted runner를 GitHub 저장소에 등록할 때 쓰는 짧은 수명 토큰입니다.
중요:
- 영구 토큰이 아닙니다.
- 보통 1시간 내외로 만료됩니다.
- runner를 새로 등록하거나 재등록할 때만 사용합니다.
### 3-2. Codex 로그인 위치
`codex login`은 GitHub가 아니라 실제 self-hosted runner가 돌아가는 로컬 Windows PC에서 해야 합니다.
즉 이 구성에서는:
- 이 로컬 PC에서 로그인해야 합니다.
- GitHub-hosted runner에서는 이 로그인 상태를 유지할 수 없습니다.
### 3-3. `TRADINGAGENTS_ARCHIVE_DIR`
이 변수는 GitHub 경로나 저장소 경로가 아니라, self-hosted runner가 실행되는 로컬 PC의 절대 경로여야 합니다.
권장 예:
- `C:\TradingAgentsData\archive`
- `D:\TradingAgents\archive`
권장하지 않는 예:
- `C:\Projects\TradingAgents`
- GitHub URL
- 상대 경로
이유:
- 결과 이력을 저장소 checkout 폴더와 분리해야 안전합니다.
- archive는 저장소 밖의 영속 경로에 있어야 이전 실행 이력이 누적됩니다.
## 4. runner token 발급 방법
### 방법 A. GitHub 웹 UI
1. 저장소 [TradingAgents](https://github.com/nornen0202/TradingAgents)로 이동
2. `Settings`
3. `Actions`
4. `Runners`
5. `New self-hosted runner`
6. `Windows` 선택
7. 화면에 표시되는 명령의 `--token` 값을 사용
### 방법 B. GitHub CLI
등록용 token:
```powershell
gh auth status
gh api -X POST repos/nornen0202/TradingAgents/actions/runners/registration-token
```
삭제용 token:
```powershell
gh api -X POST repos/nornen0202/TradingAgents/actions/runners/remove-token
```
중요:
- `registration token``remove token`은 서로 다릅니다.
- `.\config.cmd remove`에는 `remove token`이 필요합니다.
- `.\config.cmd --token ...` 등록에는 `registration token`이 필요합니다.
## 5. Codex 로그인 방법
먼저 확인:
```powershell
where.exe codex
codex --help
```
이 PC에서 확인된 실제 Codex 바이너리:
```powershell
C:\Users\JY\.vscode\extensions\openai.chatgpt-26.325.31654-win32-x64\bin\windows-x86_64\codex.exe
```
브라우저 로그인:
```powershell
codex login
```
또는 실제 경로 직접 실행:
```powershell
& 'C:\Users\JY\.vscode\extensions\openai.chatgpt-26.325.31654-win32-x64\bin\windows-x86_64\codex.exe' login
```
디바이스 인증:
```powershell
codex login --device-auth
```
상태 확인:
```powershell
codex login status
```
현재 이 PC에서 실제 확인된 상태:
```text
Logged in using ChatGPT
```
## 6. 서비스 모드 이해
### 서비스 모드가 의미하는 것
서비스 모드로 전환하면:
- Windows에 로그인하지 않아도 runner가 자동 시작될 수 있습니다.
- 로그아웃 상태에서도 GitHub Actions 잡을 받을 수 있습니다.
### 서비스 모드로도 안 되는 것
서비스 모드여도 아래 상태에서는 동작하지 않습니다.
- PC 전원이 꺼져 있음
- 절전 또는 최대 절전 상태
- 네트워크 끊김
즉 핵심은:
- 서비스 모드 = 로그아웃 상태 대응
- 전원 꺼짐 대응은 아님
### Codex 로그인 유지 여부
보통 같은 PC, 같은 사용자 환경이라면 Codex 로그인은 유지됩니다.
다만 아래 경우에는 재로그인이 필요할 수 있습니다.
- 인증 만료
- Codex 앱/CLI 업데이트 후 인증 재요구
- runner를 다른 사용자 계정으로 실행
- 인증 파일 삭제
## 7. 서비스 모드 전환 절차
현재 질문 흐름상 아직 서비스 모드 전환은 완료하지 않은 상태를 기준으로 설명합니다.
### 7-1. 기존 등록 제거
PowerShell:
```powershell
gh api -X POST repos/nornen0202/TradingAgents/actions/runners/remove-token
Set-Location C:\actions-runner
.\config.cmd remove
```
프롬프트가 뜨면:
- 방금 받은 `remove token` 값을 입력합니다.
주의:
- `registration token`을 넣으면 안 됩니다.
### 7-2. 서비스 모드 재등록
관리자 PowerShell:
```powershell
gh api -X POST repos/nornen0202/TradingAgents/actions/runners/registration-token
Set-Location C:\actions-runner
.\config.cmd --unattended --url https://github.com/nornen0202/TradingAgents --token <REGISTRATION_TOKEN> --name desktop-gheeibb-codex --work _work --replace --labels codex --runasservice
```
### 7-3. 확인
확인 항목:
- GitHub `Settings > Actions > Runners`에서 `online`
- `services.msc`에서 runner 서비스 확인
- `gh api repos/nornen0202/TradingAgents/actions/runners`
## 8. 운영 체크리스트
### 매일 자동 실행 전제 조건
- PC 전원이 켜져 있음
- 인터넷 연결 정상
- runner가 `online`
- `codex login status`가 정상
- `TRADINGAGENTS_ARCHIVE_DIR` 경로 존재
### 수동 점검 체크리스트
```powershell
gh auth status
gh variable list --repo nornen0202/TradingAgents
gh run list --repo nornen0202/TradingAgents --workflow daily-codex-analysis.yml --limit 5
gh api repos/nornen0202/TradingAgents/actions/runners
codex login status
Test-Path C:\TradingAgentsData\archive
```
### 수동 실행 체크리스트
1. 저장소 `Actions`
2. `Daily Codex Analysis`
3. `Run workflow`
4. 필요시 입력:
- `tickers`: `GOOGL,NVDA`
- `trade_date`: `2026-04-02`
- `site_only`: `false`
## 9. 최근 질의응답 정리
### Q. `registration token``expires_at`은 서비스 모드 만료 시간인가
A. 아닙니다.
- `expires_at`은 토큰 만료 시각입니다.
- 서비스 모드 자체의 만료 시각이 아닙니다.
- 만료 전에 등록만 완료하면 이후 서비스는 계속 동작합니다.
### Q. 서비스 모드로 바꾸면 PC를 꺼도 동작하나
A. 아닙니다.
- 서비스 모드는 로그아웃 상태 대응입니다.
- PC 전원이 꺼져 있으면 동작하지 않습니다.
### Q. 서비스 모드로 바꾸면 Codex 로그인은 유지되나
A. 보통 유지됩니다.
- 같은 PC와 같은 사용자 기준이면 대체로 유지됩니다.
- 다만 인증 만료나 사용자 계정 변경 시 재로그인이 필요할 수 있습니다.
### Q. `.\config.cmd remove`에서 무엇을 입력해야 하나
A. `remove token`을 입력해야 합니다.
명령:
```powershell
gh api -X POST repos/nornen0202/TradingAgents/actions/runners/remove-token
```
중요:
- `registration token`이 아닙니다.
- `remove token``registration token`은 별개입니다.
### Q. `TRADINGAGENTS_ARCHIVE_DIR`는 프로젝트 경로인가
A. 아니고, 이 로컬 PC의 영속 archive 폴더 경로입니다.
현재 설정값:
```text
C:\TradingAgentsData\archive
```
## 10. 티커 변경 방법
수정 파일:
- [config/scheduled_analysis.toml](/C:/Projects/TradingAgents/config/scheduled_analysis.toml)
예시:
```toml
[run]
tickers = ["GOOGL", "NVDA"]
```
다른 티커로 바꾸려면:
```toml
[run]
tickers = ["AAPL", "MSFT", "TSLA"]
```
일회성 테스트는 GitHub Actions 수동 실행에서 `tickers` 입력칸으로 덮어쓸 수 있습니다.
## 11. 장애 대응 순서
문제가 생기면 아래 순서로 확인합니다.
1. runner 온라인 여부 확인
2. Codex 로그인 상태 확인
3. archive 경로 존재 여부 확인
4. 최근 Actions run 로그 확인
5. GitHub Pages 최신 페이지 반영 확인
자주 발생하는 원인:
- runner 오프라인
- Windows 로그아웃 또는 전원 꺼짐
- Codex 로그인 만료
- archive 경로 권한 문제
- workflow 수정 후 미푸시
## 12. 실제 검증 완료 항목
이번 작업에서 직접 검증한 항목:
- GitHub CLI 인증 정상
- self-hosted runner 온라인 확인
- Codex 로그인 상태 확인
- Actions 변수 설정 확인
- GitHub Pages 설정 확인
- 원격 workflow dispatch 성공
- `GOOGL`, `NVDA` 분석 성공
- Pages artifact 업로드 성공
- GitHub Pages 배포 성공
- 실제 Pages URL HTTP 200 확인
성공 링크:
- [GitHub Actions run](https://github.com/nornen0202/TradingAgents/actions/runs/24013668241)
- [GitHub Pages](https://nornen0202.github.io/TradingAgents/)
## 13. 지금 꼭 해야 하는 일
즉시 사용 기준으로는 추가 필수 작업이 없습니다.
다만 아래 상황이면 추가 작업이 필요합니다.
- 로그아웃 상태에서도 항상 돌리고 싶다
- 관리자 PowerShell에서 서비스 모드 전환 필요
- 티커를 바꾸고 싶다
- [config/scheduled_analysis.toml](/C:/Projects/TradingAgents/config/scheduled_analysis.toml) 수정
- 다른 PC로 runner를 옮기고 싶다
- 그 PC에서 다시 `codex login`과 runner 등록 필요
## 14. 자주 쓰는 명령
```powershell
gh auth status
gh variable list --repo nornen0202/TradingAgents
gh run list --repo nornen0202/TradingAgents --workflow daily-codex-analysis.yml --limit 5
gh api repos/nornen0202/TradingAgents/actions/runners
codex login status
```
실제 Codex 바이너리 직접 실행:
```powershell
& 'C:\Users\JY\.vscode\extensions\openai.chatgpt-26.325.31654-win32-x64\bin\windows-x86_64\codex.exe' login status
```

View File

@ -0,0 +1,750 @@
# TradingAgents × Codex 브리지 구현 설계서
## 1. 목표
TradingAgents가 요구하는 LLM 호출을 OpenAI API key 대신 **로컬 Codex app-server + ChatGPT/Codex 인증**으로 처리한다.
핵심 목표는 다음과 같다.
1. TradingAgents의 **기존 LangGraph / ToolNode 구조를 유지**한다.
2. OpenAI 호환 프록시를 억지로 에뮬레이션하지 않고, **새 provider(`codex`)를 추가**한다.
3. ChatGPT Pro 사용자는 **Codex 로그인(`codex login` / `codex login --device-auth`) 또는 Codex의 managed auth cache(`~/.codex/auth.json`)**를 통해 인증한다.
4. `bind_tools()`가 필요한 analyst 노드와 plain `invoke()`만 필요한 debate / manager / trader 노드가 모두 동작해야 한다.
5. Codex의 자체 shell/web/tool 생태계에 의존하지 않고, TradingAgents가 이미 가진 도구 실행 루프를 그대로 사용한다.
---
## 2. 왜 이 방식이 최선인가
### 채택할 방식
**권장안:** `codex app-server`를 로컬에 띄우고, Python에서 stdio(JSONL)로 통신하는 **Custom LangChain ChatModel**을 만든다.
### 채택하지 않을 방식
#### A. OpenAI-compatible `/v1/responses` 프록시
비추천. TradingAgents는 현재 `openai` provider에서 `langchain_openai.ChatOpenAI`를 사용하고 native OpenAI일 때 `use_responses_api=True`를 켠다.
`/v1/responses`와 tool-calling semantics를 꽤 정확히 흉내 내야 한다. 구현 난도가 높고 유지보수 비용이 크다.
#### B. Codex dynamic tools 직접 사용
비추천. app-server의 `dynamicTools``item/tool/call`**experimental** 이다.
TradingAgents는 이미 `ToolNode`로 툴 실행을 잘 처리하므로, 여기까지 Codex에 넘길 이유가 없다.
#### C. Codex SDK 직접 내장
부분적으로 가능하지만 비권장. SDK는 TypeScript 중심이다. Python 프로젝트인 TradingAgents에선 app-server stdio 브리지가 더 단순하다.
### 설계 핵심
Codex는 **모델 추론만 담당**하고, 실제 툴 실행은 여전히 TradingAgents/LangGraph가 담당한다.
따라서 Codex 쪽에는 tool schema를 설명하고, 응답은 **엄격한 JSON schema**로만 받는다.
- 툴이 필요하면: `{"mode":"tool_calls", ...}`
- 툴이 더 이상 필요 없으면: `{"mode":"final", ...}`
이렇게 하면 analyst 노드의 `bind_tools()` 요구사항을 만족시키면서도 Codex의 experimental dynamic tool API를 피할 수 있다.
---
## 3. 구현 아키텍처
## 3.1 새 provider 추가
### 수정 파일
- `tradingagents/llm_clients/factory.py`
- `tradingagents/default_config.py`
- `tradingagents/llm_clients/__init__.py`
- CLI/UI 관련 파일(선택 사항이 아니라 사실상 권장)
### 추가 파일
- `tradingagents/llm_clients/codex_client.py`
- `tradingagents/llm_clients/codex_chat_model.py`
- `tradingagents/llm_clients/codex_app_server.py`
- `tradingagents/llm_clients/codex_schema.py`
- `tradingagents/llm_clients/codex_message_codec.py`
- `tradingagents/llm_clients/codex_preflight.py`
- `tests/llm_clients/test_codex_chat_model.py`
- `tests/llm_clients/test_codex_app_server.py`
- `tests/integration/test_codex_provider_smoke.py`
---
## 3.2 런타임 구성
### TradingAgents 측
`TradingAgentsGraph.__init__()`는 deep/quick 두 개 LLM을 한 번 생성해 재사용한다.
따라서 `CodexChatModel`도 **모델 인스턴스당 app-server 세션 1개**를 유지하는 것이 적절하다.
- quick_thinking_llm → Codex app-server session A
- deep_thinking_llm → Codex app-server session B
### 중요 원칙
- **세션은 재사용**
- **thread는 per-invoke 새로 생성**
- 이유: 여러 analyst / debate agent가 같은 LLM 인스턴스를 공유하므로 thread까지 재사용하면 문맥 오염이 발생한다.
즉:
- app-server process: 재사용
- Codex thread: 매 호출마다 새로 생성 후 `thread/unsubscribe`
---
## 3.3 인증 전략
### 기본/권장
사용자가 먼저 로컬에서:
```bash
codex login
```
브라우저 callback이 막히거나 headless면:
```bash
codex login --device-auth
```
### headless / container / 원격 머신
- `cli_auth_credentials_store = "file"` 로 설정해서 `~/.codex/auth.json`을 사용
- 신뢰 가능한 머신에서 생성한 `auth.json`을 복사
- refresh는 직접 구현하지 말고 Codex가 하게 둔다
- `auth.json`은 절대 커밋 금지
### 고급 옵션: OAuth URL helper
원한다면 Python helper에서 app-server로 아래를 호출해 브라우저 login URL을 직접 받아 출력할 수 있다.
- `account/read`
- `account/login/start` with `type="chatgpt"`
하지만 **v1 구현은 이 helper 없이도 충분**하다. 실제 운영에서는 `codex login`이 더 단순하고 안정적이다.
---
## 3.4 보안 / 하드닝
Codex를 “코딩 에이전트”가 아니라 “모델 백엔드”로만 쓰기 위해 다음을 권장한다.
### `.codex/config.toml` 예시
```toml
model = "gpt-5.4"
model_reasoning_effort = "medium"
approval_policy = "never"
sandbox_mode = "read-only"
web_search = "disabled"
personality = "none"
log_dir = ".codex-log"
cli_auth_credentials_store = "file"
```
### 선택적 하드닝
```toml
[features]
apps = false
shell_tool = false
multi_agent = false
```
### 추가 권장
`cwd`를 프로젝트 루트가 아니라 **비어 있는 전용 workspace**로 준다.
예:
- `~/.cache/tradingagents/codex_workspace`
- 또는 repo 내 `./.tradingagents_codex_workspace`
이렇게 해야 Codex가 리포지토리를 뒤지거나 파일을 읽는 쪽으로 샐 가능성을 낮출 수 있다.
---
## 4. 메시지/툴 호출 설계
## 4.1 입력 정규화
`CodexChatModel`은 아래 입력을 모두 받아야 한다.
1. `str`
2. `list[BaseMessage]`
3. `list[dict(role=..., content=...)]`
이유:
- analyst 체인은 prompt pipeline 때문에 `BaseMessage` 시퀀스를 넘길 가능성이 높다
- trader / manager 쪽은 OpenAI-style dict list를 직접 `llm.invoke(messages)`로 넘긴다
### 내부 정규화 포맷 예시
```text
[SYSTEM]
...
[USER]
...
[ASSISTANT]
...
[ASSISTANT_TOOL_CALL]
name=get_news
args={"query":"AAPL",...}
[TOOL_RESULT]
name=get_news
call_id=call_xxx
content=...
```
---
## 4.2 bind_tools 처리
TradingAgents analyst 노드는 다음 패턴을 사용한다.
```python
chain = prompt | llm.bind_tools(tools)
result = chain.invoke(state["messages"])
```
따라서 `CodexChatModel.bind_tools()`는 반드시 구현해야 한다.
### 구현 방식
- LangChain tool 객체를 OpenAI-style tool schema로 변환
- 내부적으로 `self.bind(tools=formatted_tools, tool_choice=...)` 형태로 바인딩
- `_generate(..., tools=..., tool_choice=...)`에서 그 schema를 읽어 사용
### tool schema 변환
가능한 한 LangChain의 표준 helper(`convert_to_openai_tool` 계열)를 사용한다.
각 tool에 대해 다음 정보를 확보한다.
- `name`
- `description`
- `parameters` JSON schema
---
## 4.3 output schema 설계
### plain invoke용
```json
{
"type": "object",
"properties": {
"answer": { "type": "string" }
},
"required": ["answer"],
"additionalProperties": false
}
```
### tool-capable invoke용
루트는 **final** 또는 **tool_calls** 중 하나가 되도록 강제한다.
```json
{
"oneOf": [
{
"type": "object",
"properties": {
"mode": { "const": "final" },
"content": { "type": "string" },
"tool_calls": {
"type": "array",
"maxItems": 0
}
},
"required": ["mode", "content", "tool_calls"],
"additionalProperties": false
},
{
"type": "object",
"properties": {
"mode": { "const": "tool_calls" },
"content": { "type": "string" },
"tool_calls": {
"type": "array",
"minItems": 1,
"items": {
"oneOf": [
{
"type": "object",
"properties": {
"name": { "const": "get_news" },
"arguments": { "...": "get_news parameters schema" }
},
"required": ["name", "arguments"],
"additionalProperties": false
}
]
}
}
},
"required": ["mode", "content", "tool_calls"],
"additionalProperties": false
}
]
}
```
### 중요한 포인트
`tool_calls.items.oneOf` 안에 **툴별 arguments schema**를 넣는다.
그래야 Codex가 tool 이름과 인자를 아무렇게나 생성하지 못한다.
---
## 4.4 tool-call 정책
Codex에게 항상 다음 규칙을 준다.
1. 지금 당장 필요한 **다음 단계 툴 호출만** 요청할 것
2. speculative call 금지
3. tool result를 아직 보지 않은 상태에서 downstream tool을 미리 호출하지 말 것
4. 툴이 필요 없으면 final로 답할 것
5. 응답은 output schema에 맞는 JSON만 낼 것
### 왜 필요한가
예를 들어 market analyst는 `get_stock_data` 이후에 `get_indicators`가 자연스럽다.
하지만 CSV 생성/캐시 같은 간접 의존성이 있으므로 한 번에 여러 단계를 추측 호출하게 두는 것보다 **최소 다음 호출만** 받는 편이 안전하다.
---
## 5. Codex app-server 통신 계층 설계
## 5.1 `CodexAppServerConnection`
책임:
- `codex app-server` subprocess 시작/종료
- `initialize` / `initialized`
- request/response correlation (`id`)
- stdout JSONL reader thread
- notifications 수집
- timeout / error propagation
- graceful shutdown
### 핵심 메서드
- `start()`
- `close()`
- `request(method, params, timeout)`
- `wait_for_turn_completion(thread_id, turn_id, timeout)`
- `read_account()`
- `read_models()`
- `read_rate_limits()`
### transport
- **stdio(JSONL)** 사용
- websocket transport는 실익이 적으므로 v1에서 제외
---
## 5.2 초기 handshake
시작 직후:
1. subprocess spawn: `codex app-server`
2. `initialize`
3. `initialized`
4. `account/read`
5. 필요 시 `model/list`
### `initialize` 예시
```json
{
"method": "initialize",
"id": 1,
"params": {
"clientInfo": {
"name": "tradingagents_codex_bridge",
"title": "TradingAgents Codex Bridge",
"version": "0.1.0"
}
}
}
```
---
## 5.3 preflight 체크
`codex_preflight.py` 또는 helper 함수에서:
1. `codex` binary 존재 여부 확인
2. app-server 시작 가능 여부 확인
3. `account/read(refreshToken=false)` 실행
4. `account.type == "chatgpt"` 또는 `"apiKey"`인지 확인
5. 가능하면 `planType == "pro"` 확인
6. `model/list`에서 `deep_think_llm`, `quick_think_llm` 가용성 확인
7. `account/rateLimits/read` 가능하면 출력
### 실패 시 메시지 예시
- `Codex not installed. Install with npm i -g @openai/codex`
- `No ChatGPT/API auth found. Run codex login`
- `Requested model gpt-5.4-mini is not available under current Codex account`
---
## 6. LangChain 커스텀 모델 설계
## 6.1 `CodexChatModel`
상속:
- `langchain_core.language_models.chat_models.BaseChatModel`
필수 구현:
- `_generate(...)`
- `_llm_type`
- `bind_tools(...)`
권장 추가:
- `_identifying_params`
- `invoke(...)` 입력 정규화 보강
- 에러 래핑
### 내부 필드 예시
- `model`
- `reasoning_effort`
- `summary`
- `personality`
- `request_timeout`
- `max_retries`
- `server: CodexAppServerConnection`
- `workspace_dir`
- `cleanup_threads`
- `service_name`
---
## 6.2 `_generate()` 동작
### tools 없는 경우
1. 입력 messages 정규화
2. plain schema 생성 (`answer`)
3. thread/start
4. turn/start with `outputSchema`
5. 최종 agent message JSON 파싱
6. `AIMessage(content=answer)` 반환
### tools 있는 경우
1. 입력 messages 정규화
2. tool schema 생성
3. root oneOf output schema 생성
4. thread/start
5. turn/start with `outputSchema`
6. 최종 agent message JSON 파싱
7. `mode == "tool_calls"` 면:
- 각 call에 `id = "call_" + uuid`
- `AIMessage(content=content or "", tool_calls=[...])`
8. `mode == "final"` 면:
- `AIMessage(content=content, tool_calls=[])`
### 종료 처리
- `thread/unsubscribe`
- reader queue cleanup
- 필요 시 thread archive는 선택 옵션
---
## 6.3 app-server 호출 파라미터
### thread/start
```json
{
"method": "thread/start",
"params": {
"model": "gpt-5.4",
"cwd": "/abs/path/to/.tradingagents_codex_workspace",
"approvalPolicy": "never",
"serviceName": "tradingagents_codex_bridge"
}
}
```
### turn/start
```json
{
"method": "turn/start",
"params": {
"threadId": "...",
"input": [
{ "type": "text", "text": "<serialized prompt>" }
],
"model": "gpt-5.4",
"effort": "medium",
"summary": "concise",
"personality": "none",
"sandboxPolicy": {
"type": "readOnly",
"access": { "type": "fullAccess" }
},
"outputSchema": { ... }
}
}
```
---
## 6.4 프롬프트 래퍼 템플릿
### plain invoke wrapper
```text
You are the language model backend for a LangGraph-based financial multi-agent system.
Rules:
1. Answer only from the provided conversation transcript.
2. Do not inspect files.
3. Do not run commands.
4. Do not use web search.
5. Return ONLY JSON that matches the provided schema.
Conversation transcript:
<...serialized messages...>
```
### tool-capable wrapper
```text
You are the language model backend for a LangGraph-based financial multi-agent system.
You may either:
- request the next necessary tool call(s), or
- provide the final assistant response.
Hard rules:
1. Use only the allowed tools listed below.
2. Arguments must conform exactly to the JSON schema for that tool.
3. Request only the next required tool call batch.
4. Do not speculate past missing tool results.
5. Do not inspect files.
6. Do not run commands.
7. Do not use web search.
8. Return ONLY JSON that matches the provided schema.
Allowed tools:
<tool schemas pretty-printed>
Conversation transcript:
<...serialized messages...>
```
### 안정화 팁
- tool schema를 pretty JSON으로 포함
- 1~2개의 few-shot example을 포함할 수 있음
- 단, prompt를 너무 길게 만들어 토큰 낭비하지 않도록 주의
---
## 7. TradingAgents 코드 변경 체크리스트
## 7.1 `default_config.py`
추가 권장 key:
```python
"llm_provider": "openai",
"codex_binary": "codex",
"codex_reasoning_effort": "medium",
"codex_summary": "concise",
"codex_personality": "none",
"codex_workspace_dir": os.getenv("TRADINGAGENTS_CODEX_WORKSPACE", "./.tradingagents_codex_workspace"),
"codex_request_timeout": 120,
"codex_max_retries": 2,
"codex_cleanup_threads": True,
```
호환성 위해:
- `openai_reasoning_effort`가 설정돼 있고 `codex_reasoning_effort`가 비어 있으면 fallback 하도록 해도 좋다.
---
## 7.2 `factory.py`
대략:
```python
if provider_lower == "codex":
return CodexClient(model, base_url, **kwargs)
```
---
## 7.3 `codex_client.py`
책임:
- `BaseLLMClient` 구현
- kwargs를 `CodexChatModel` 생성자에 전달
- `validate_model()`에서 preflight/model list 확인
---
## 7.4 CLI / UI
반드시 추가할 항목:
- provider 목록에 `codex`
- backend_url 입력은 codex일 때 숨기거나 무시
- advanced options:
- `codex_reasoning_effort`
- `codex_summary`
- `codex_personality`
- `codex_workspace_dir`
---
## 7.5 README / docs
반드시 문서화:
1. ChatGPT Pro/Codex auth와 API key의 차이
2. `codex login`
3. headless auth cache 사용법
4. `.codex/config.toml` 예시
5. provider 선택 방법
6. known limitations
---
## 8. 테스트 전략
## 8.1 단위 테스트
### `test_codex_message_codec.py`
- `str` 입력 정규화
- `BaseMessage` 시퀀스 정규화
- dict message 시퀀스 정규화
- `ToolMessage` 직렬화
### `test_codex_schema.py`
- plain schema 생성
- tool oneOf schema 생성
- tool args const / required / additionalProperties 검증
### `test_codex_chat_model.py`
mock app-server 응답으로:
- plain final answer
- tool_calls answer
- malformed JSON retry
- timeout
- unsupported model error
### `test_codex_app_server.py`
- initialize handshake
- request/response correlation
- notification draining
- turn completed / failed 처리
---
## 8.2 통합 테스트
### smoke
- provider=`codex`
- analyst=`news` 한 개만 선택
- ticker=`AAPL`
- research depth=1
- 최종 리포트 파일 생성 확인
### tool loop
- market analyst만 실행
- 첫 응답이 `get_stock_data` tool call
- tool result 후 다음 응답이 `get_indicators` 또는 final
### multi-agent
- `market + news`
- graph 전체 완주
- `final_trade_decision` 비어 있지 않음
### auth preflight
- 로그인 안 된 환경 → 친절한 실패
- 로그인 된 환경 → account/read 성공
---
## 8.3 운영 검증
실제 실행 전 아래 순서 권장:
```bash
codex login
python -m tradingagents.llm_clients.codex_preflight
python main.py
```
또는 CLI/UI에서 provider를 `codex`로 선택.
---
## 9. 장애 대응
## 9.1 malformed JSON
대응:
- 1회 재시도
- 재시도 prompt:
- “Your previous output was invalid JSON. Return valid JSON matching the schema only.”
- 그래도 실패하면 예외 raise
## 9.2 app-server 시작 실패
대응:
- binary path 재확인
- `codex --version` 확인
- PATH 문제면 `codex_binary` 절대경로 사용
## 9.3 로그인/권한 문제
대응:
- `codex login`
- headless면 `codex login --device-auth`
- `cli_auth_credentials_store="file"` 설정
- `~/.codex/auth.json` 존재 여부 확인
## 9.4 rate limit
대응:
- `account/rateLimits/read` 노출
- 재시도(backoff)
- 긴 배치 작업은 serialized run
- 필요 시 Codex credits 사용 고려
## 9.5 thread log 과다 생성
대응:
- `thread/unsubscribe` 기본 수행
- `.codex-log` 별도 디렉터리 사용
- 오래된 로그 cleanup script 추가
---
## 10. 권장 구현 순서
### Phase 1
- provider 추가
- app-server connection 추가
- plain invoke만 먼저 연결
- preflight 추가
### Phase 2
- `bind_tools()` + tool schema oneOf 구현
- analyst nodes smoke test
### Phase 3
- CLI/UI 옵션 추가
- README/docs 작성
- 통합 테스트 보강
### Phase 4
- malformed JSON retry
- rate limit/backoff
- log cleanup / diagnostics
---
## 11. 최종 권장안 요약
### 가장 좋은 구현 방식
**TradingAgents에 `codex` provider를 새로 추가하고, 내부에서 `codex app-server`와 stdio(JSONL)로 통신하는 LangChain 커스텀 ChatModel을 구현한다.**
tool calling은 Codex dynamicTools를 쓰지 말고, **outputSchema + JSON oneOf** 방식으로 모델 응답을 `final` 또는 `tool_calls` 형태로 강제한다.
### 이 방식의 장점
- OpenAI API key 불필요
- ChatGPT Pro / Codex 로그인 재사용 가능
- TradingAgents의 기존 ToolNode / graph 구조 유지
- Python 프로젝트에 자연스럽게 통합 가능
- dynamicTools 실험 API 의존 최소화
- 추후 유지보수 포인트가 명확함
### 반드시 지켜야 할 운영 원칙
- 직접 OAuth refresh 구현 금지
- `auth.json`은 비밀 취급
- `codex login` 또는 device-auth 우선
- one auth cache per trusted runner / serialized workflow
- Codex를 모델 백엔드로만 쓰고 shell/web 기능은 최대한 비활성화
---
## 12. 최소 수용 기준(Acceptance Criteria)
아래가 모두 충족되면 구현 성공으로 간주한다.
1. `llm_provider="codex"` 설정으로 TradingAgents가 실행된다.
2. API key 없이 `codex login` 상태에서 동작한다.
3. analyst 노드가 `bind_tools()`를 통해 tool call을 생성하고 ToolNode가 이를 실행한다.
4. manager/trader/risk nodes가 plain `invoke()`로 정상 응답한다.
5. `AAPL` 또는 `SPY`에 대해 최소 1개 analyst + 전체 graph smoke run이 성공한다.
6. malformed JSON, auth missing, binary missing, model missing에 대한 에러 메시지가 명확하다.
7. README와 preflight가 포함된다.

213
README.ko.md Normal file
View File

@ -0,0 +1,213 @@
# TradingAgents: 멀티 에이전트 LLM 금융 트레이딩 프레임워크
영문 문서: [README.md](README.md)
## 개요
TradingAgents는 실제 트레이딩 조직의 협업 흐름을 반영한 멀티 에이전트 프레임워크입니다. 펀더멘털 분석가, 뉴스 분석가, 시장 분석가, 리서처, 트레이더, 리스크 관리 팀이 역할별로 나뉘어 시장을 분석하고, 토론을 거쳐 최종 매매 결정을 도출합니다.
이 프로젝트는 연구 목적입니다. 결과는 사용한 모델, 데이터 품질, 분석 기간, 프롬프트, 외부 환경에 따라 달라질 수 있으며 투자 자문 용도가 아닙니다.
## 팀 구성
### 애널리스트 팀
- 펀더멘털 분석가: 기업 재무 상태와 성과 지표를 평가합니다.
- 센티먼트 분석가: 소셜 미디어와 대중 심리를 분석합니다.
- 뉴스 분석가: 뉴스와 거시경제 이벤트의 영향을 해석합니다.
- 시장 분석가: 기술적 지표와 가격 흐름을 분석합니다.
### 리서처 팀
- 강세 관점과 약세 관점의 리서처가 애널리스트 보고서를 바탕으로 토론합니다.
### 트레이더
- 애널리스트와 리서처의 결과를 종합해 매매 타이밍과 비중을 판단합니다.
### 리스크 관리 및 포트폴리오 매니저
- 리스크를 평가하고 최종 거래 제안을 승인하거나 거절합니다.
## 설치
### 저장소 클론
```powershell
git clone https://github.com/TauricResearch/TradingAgents.git
Set-Location TradingAgents
```
### Windows PowerShell 빠른 시작
이 저장소에서 실제로 검증한 설치 절차입니다.
```powershell
Set-Location C:\Projects\TradingAgents
py -3.13 -m venv .venv-codex
.\.venv-codex\Scripts\Activate.ps1
python -m pip install --upgrade pip
python -m pip install -e . --no-cache-dir
tradingagents --help
```
참고:
- 현재 환경에서는 `.venv-codex`를 기본 가상환경으로 사용하는 흐름을 검증했습니다.
- `tradingagents` 명령이 없으면 대개 패키지가 아직 가상환경에 설치되지 않은 상태입니다.
### Docker
```powershell
Copy-Item .env.example .env
notepad .env
docker compose run --rm tradingagents
```
Ollama 프로필:
```powershell
docker compose --profile ollama run --rm tradingagents-ollama
```
## API 및 인증
TradingAgents는 여러 LLM 제공자를 지원합니다.
### 일반 제공자용 환경 변수
```powershell
$env:OPENAI_API_KEY = "..."
$env:GOOGLE_API_KEY = "..."
$env:ANTHROPIC_API_KEY = "..."
$env:XAI_API_KEY = "..."
$env:OPENROUTER_API_KEY = "..."
$env:ALPHA_VANTAGE_API_KEY = "..."
```
### Codex 제공자
`codex` 제공자는 OpenAI API 키가 필요 없습니다. 대신 Codex CLI 로그인만 되어 있으면 됩니다.
```powershell
where.exe codex
codex --version
codex login
```
또는:
```powershell
codex login --device-auth
```
TradingAgents는 `codex app-server`와 stdio로 직접 통신하며, Codex가 관리하는 인증 정보를 사용합니다. 파일 기반 인증을 쓰는 경우 보통 `~/.codex/auth.json`이 사용될 수 있습니다.
권장 `~/.codex/config.toml`:
```toml
approval_policy = "never"
sandbox_mode = "read-only"
web_search = "disabled"
personality = "none"
cli_auth_credentials_store = "file"
```
중요한 점:
- TradingAgents는 자체 LangGraph `ToolNode`를 유지합니다.
- Codex dynamic tools는 사용하지 않습니다.
- 에이전트 간 컨텍스트 오염을 막기 위해 호출마다 새로운 ephemeral Codex thread를 사용합니다.
- 기본 Codex 작업 디렉터리는 `~/.codex/tradingagents-workspace`입니다.
VS Code 터미널에서 `codex`가 인식되지 않으면:
- `where.exe codex`로 경로를 확인합니다.
- VS Code 창을 다시 로드합니다.
- 필요하면 `where.exe codex`가 반환한 전체 경로로 `codex.exe`를 직접 실행합니다.
TradingAgents는 Windows에서 VS Code OpenAI 확장 설치 경로 같은 일반적인 위치의 `codex.exe`도 자동 탐지합니다. 자동 탐지를 덮어쓰고 싶다면:
```powershell
$env:CODEX_BINARY = "C:\full\path\to\codex.exe"
```
## CLI 실행
설치 후 인터랙티브 CLI 실행:
```powershell
Set-Location C:\Projects\TradingAgents
.\.venv-codex\Scripts\Activate.ps1
tradingagents
```
대안:
```powershell
Set-Location C:\Projects\TradingAgents
.\.venv-codex\Scripts\Activate.ps1
python -m cli.main
```
도움말 확인:
```powershell
Set-Location C:\Projects\TradingAgents
.\.venv-codex\Scripts\Activate.ps1
tradingagents --help
```
## Python 패키지로 사용
### 기본 예시
```python
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
ta = TradingAgentsGraph(debug=True, config=DEFAULT_CONFIG.copy())
_, decision = ta.propagate("NVDA", "2026-01-15")
print(decision)
```
### 설정 예시
```python
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
config = DEFAULT_CONFIG.copy()
config["llm_provider"] = "codex"
config["quick_think_llm"] = "gpt-5.4-mini"
config["deep_think_llm"] = "gpt-5.4-mini"
config["max_debate_rounds"] = 1
ta = TradingAgentsGraph(debug=True, config=config)
_, decision = ta.propagate("NVDA", "2026-01-15")
print(decision)
```
`llm_provider = "codex"`에서 추가로 조정할 수 있는 설정:
- `codex_binary`
- `codex_reasoning_effort`
- `codex_summary`
- `codex_personality`
- `codex_workspace_dir`
- `codex_request_timeout`
- `codex_max_retries`
- `codex_cleanup_threads`
## 이번 검증에서 확인한 항목
실제 Windows PowerShell 환경에서 다음 항목을 검증했습니다.
- `.venv-codex`에 패키지 설치
- `tradingagents --help` 실행
- 로그인된 Codex 계정으로 plain `llm.invoke(...)` 호출
- OpenAI 스타일 `list[dict]` 입력 경로
- `bind_tools()` 기반 tool-call 경로
- 최소 `TradingAgentsGraph(...).propagate(...)` smoke run으로 final decision 생성
최소 그래프 smoke run에서는 `FINAL_DECISION= HOLD`가 반환되는 것을 확인했습니다.
## 기여
버그 수정, 문서 개선, 기능 제안 등 모든 형태의 기여를 환영합니다.
## 인용
인용 정보는 [README.md](README.md)의 citation 섹션을 참고해 주세요.

View File

@ -27,6 +27,8 @@
# TradingAgents: Multi-Agents LLM Financial Trading Framework
Korean documentation: [README.ko.md](README.ko.md)
## News
- [2026-03] **TradingAgents v0.2.3** released with multi-language support, GPT-5.4 family models, unified model catalog, backtesting date fidelity, and proxy support.
- [2026-03] **TradingAgents v0.2.2** released with GPT-5.4/Gemini 3.1/Claude 4.6 model coverage, five-tier rating scale, OpenAI Responses API, Anthropic effort control, and cross-platform stability.
@ -118,6 +120,16 @@ Install the package and its dependencies:
pip install .
```
Windows PowerShell quickstart (validated in this repository):
```powershell
Set-Location C:\Projects\TradingAgents
py -3.13 -m venv .venv-codex
.\.venv-codex\Scripts\Activate.ps1
python -m pip install --upgrade pip
python -m pip install -e . --no-cache-dir
tradingagents --help
```
### Docker
Alternatively, run with Docker:
@ -146,6 +158,42 @@ export ALPHA_VANTAGE_API_KEY=... # Alpha Vantage
For local models, configure Ollama with `llm_provider: "ollama"` in your config.
For the local `codex` provider, no API key is required. Authenticate once with Codex instead:
```bash
codex login
# or
codex login --device-auth
```
TradingAgents talks directly to `codex app-server` over stdio and relies on Codex-managed credentials (for example `~/.codex/auth.json` when file-backed auth is enabled). If auth is missing, the provider fails with a message telling you to run `codex login`.
Recommended `~/.codex/config.toml` for TradingAgents:
```toml
approval_policy = "never"
sandbox_mode = "read-only"
web_search = "disabled"
personality = "none"
cli_auth_credentials_store = "file"
```
Important notes for `codex`:
- TradingAgents keeps its own LangGraph `ToolNode` execution. It does not use Codex dynamic tools.
- Each model invocation uses a fresh ephemeral Codex thread to avoid context bleed across agents.
- The default Codex workspace is a dedicated neutral directory under `~/.codex/tradingagents-workspace`, not your repo root.
Windows PowerShell notes for `codex`:
```powershell
where.exe codex
codex --version
codex login
```
If `codex` is not recognized in the VS Code terminal, reload the VS Code window after updating your terminal PATH or use the full `codex.exe` path returned by `where.exe codex`.
TradingAgents also tries to auto-discover `codex.exe` from common Windows locations such as the VS Code OpenAI extension install path. You can override detection explicitly with:
```powershell
$env:CODEX_BINARY = "C:\full\path\to\codex.exe"
```
Alternatively, copy `.env.example` to `.env` and fill in your keys:
```bash
cp .env.example .env
@ -160,6 +208,33 @@ python -m cli.main # alternative: run directly from source
```
You will see a screen where you can select your desired tickers, analysis date, LLM provider, research depth, and more.
Windows PowerShell run commands:
```powershell
Set-Location C:\Projects\TradingAgents
.\.venv-codex\Scripts\Activate.ps1
tradingagents
```
Alternative:
```powershell
Set-Location C:\Projects\TradingAgents
.\.venv-codex\Scripts\Activate.ps1
python -m cli.main
```
Validated Codex smoke checks:
```powershell
Set-Location C:\Projects\TradingAgents
.\.venv-codex\Scripts\Activate.ps1
tradingagents --help
```
The local Codex provider was also validated with:
- a plain `llm.invoke(...)` call
- an OpenAI-style `list[dict]` invoke path
- a `bind_tools()` tool-call path
- a minimal `TradingAgentsGraph(...).propagate(...)` smoke run that returned a final decision
<p align="center">
<img src="assets/cli/cli_init.png" width="100%" style="display: inline-block; margin: 0 2%;">
</p>
@ -178,7 +253,7 @@ An interface will appear showing results as they load, letting you track the age
### Implementation Details
We built TradingAgents with LangGraph to ensure flexibility and modularity. The framework supports multiple LLM providers: OpenAI, Google, Anthropic, xAI, OpenRouter, and Ollama.
We built TradingAgents with LangGraph to ensure flexibility and modularity. The framework supports multiple LLM providers: OpenAI, Codex, Google, Anthropic, xAI, OpenRouter, and Ollama.
### Python Usage
@ -202,7 +277,7 @@ from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
config = DEFAULT_CONFIG.copy()
config["llm_provider"] = "openai" # openai, google, anthropic, xai, openrouter, ollama
config["llm_provider"] = "openai" # openai, codex, google, anthropic, xai, openrouter, ollama
config["deep_think_llm"] = "gpt-5.4" # Model for complex reasoning
config["quick_think_llm"] = "gpt-5.4-mini" # Model for quick tasks
config["max_debate_rounds"] = 2
@ -214,6 +289,16 @@ print(decision)
See `tradingagents/default_config.py` for all configuration options.
When using `llm_provider = "codex"`, these extra config knobs are available:
- `codex_binary`
- `codex_reasoning_effort`
- `codex_summary`
- `codex_personality`
- `codex_workspace_dir`
- `codex_request_timeout`
- `codex_max_retries`
- `codex_cleanup_threads`
## Contributing
We welcome contributions from the community! Whether it's fixing a bug, improving documentation, or suggesting a new feature, your input helps make this project better. If you are interested in this line of research, please consider joining our open-source financial AI research community [Tauric Research](https://tauric.ai/).

View File

@ -25,6 +25,7 @@ from rich.rule import Rule
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
from tradingagents.reporting import save_report_bundle
from cli.models import AnalystType
from cli.utils import *
from cli.announcements import fetch_announcements, display_announcements
@ -462,7 +463,7 @@ def update_display(layout, spinner_text=None, stats_handler=None, start_time=Non
def get_user_selections():
"""Get all user selections before starting the analysis display."""
# Display ASCII art welcome message
with open(Path(__file__).parent / "static" / "welcome.txt", "r") as f:
with open(Path(__file__).parent / "static" / "welcome.txt", "r", encoding="utf-8") as f:
welcome_ascii = f.read()
# Create welcome box content
@ -568,6 +569,7 @@ def get_user_selections():
thinking_level = None
reasoning_effort = None
anthropic_effort = None
codex_reasoning_effort = None
provider_lower = selected_llm_provider.lower()
if provider_lower == "google":
@ -594,6 +596,14 @@ def get_user_selections():
)
)
anthropic_effort = ask_anthropic_effort()
elif provider_lower == "codex":
console.print(
create_question_box(
"Step 8: Reasoning Effort",
"Configure Codex reasoning effort level"
)
)
codex_reasoning_effort = ask_codex_reasoning_effort()
return {
"ticker": selected_ticker,
@ -607,6 +617,7 @@ def get_user_selections():
"google_thinking_level": thinking_level,
"openai_reasoning_effort": reasoning_effort,
"anthropic_effort": anthropic_effort,
"codex_reasoning_effort": codex_reasoning_effort,
"output_language": output_language,
}
@ -635,94 +646,9 @@ def get_analysis_date():
)
def save_report_to_disk(final_state, ticker: str, save_path: Path):
def save_report_to_disk(final_state, ticker: str, save_path: Path, *, language: str = "English"):
"""Save complete analysis report to disk with organized subfolders."""
save_path.mkdir(parents=True, exist_ok=True)
sections = []
# 1. Analysts
analysts_dir = save_path / "1_analysts"
analyst_parts = []
if final_state.get("market_report"):
analysts_dir.mkdir(exist_ok=True)
(analysts_dir / "market.md").write_text(final_state["market_report"])
analyst_parts.append(("Market Analyst", final_state["market_report"]))
if final_state.get("sentiment_report"):
analysts_dir.mkdir(exist_ok=True)
(analysts_dir / "sentiment.md").write_text(final_state["sentiment_report"])
analyst_parts.append(("Social Analyst", final_state["sentiment_report"]))
if final_state.get("news_report"):
analysts_dir.mkdir(exist_ok=True)
(analysts_dir / "news.md").write_text(final_state["news_report"])
analyst_parts.append(("News Analyst", final_state["news_report"]))
if final_state.get("fundamentals_report"):
analysts_dir.mkdir(exist_ok=True)
(analysts_dir / "fundamentals.md").write_text(final_state["fundamentals_report"])
analyst_parts.append(("Fundamentals Analyst", final_state["fundamentals_report"]))
if analyst_parts:
content = "\n\n".join(f"### {name}\n{text}" for name, text in analyst_parts)
sections.append(f"## I. Analyst Team Reports\n\n{content}")
# 2. Research
if final_state.get("investment_debate_state"):
research_dir = save_path / "2_research"
debate = final_state["investment_debate_state"]
research_parts = []
if debate.get("bull_history"):
research_dir.mkdir(exist_ok=True)
(research_dir / "bull.md").write_text(debate["bull_history"])
research_parts.append(("Bull Researcher", debate["bull_history"]))
if debate.get("bear_history"):
research_dir.mkdir(exist_ok=True)
(research_dir / "bear.md").write_text(debate["bear_history"])
research_parts.append(("Bear Researcher", debate["bear_history"]))
if debate.get("judge_decision"):
research_dir.mkdir(exist_ok=True)
(research_dir / "manager.md").write_text(debate["judge_decision"])
research_parts.append(("Research Manager", debate["judge_decision"]))
if research_parts:
content = "\n\n".join(f"### {name}\n{text}" for name, text in research_parts)
sections.append(f"## II. Research Team Decision\n\n{content}")
# 3. Trading
if final_state.get("trader_investment_plan"):
trading_dir = save_path / "3_trading"
trading_dir.mkdir(exist_ok=True)
(trading_dir / "trader.md").write_text(final_state["trader_investment_plan"])
sections.append(f"## III. Trading Team Plan\n\n### Trader\n{final_state['trader_investment_plan']}")
# 4. Risk Management
if final_state.get("risk_debate_state"):
risk_dir = save_path / "4_risk"
risk = final_state["risk_debate_state"]
risk_parts = []
if risk.get("aggressive_history"):
risk_dir.mkdir(exist_ok=True)
(risk_dir / "aggressive.md").write_text(risk["aggressive_history"])
risk_parts.append(("Aggressive Analyst", risk["aggressive_history"]))
if risk.get("conservative_history"):
risk_dir.mkdir(exist_ok=True)
(risk_dir / "conservative.md").write_text(risk["conservative_history"])
risk_parts.append(("Conservative Analyst", risk["conservative_history"]))
if risk.get("neutral_history"):
risk_dir.mkdir(exist_ok=True)
(risk_dir / "neutral.md").write_text(risk["neutral_history"])
risk_parts.append(("Neutral Analyst", risk["neutral_history"]))
if risk_parts:
content = "\n\n".join(f"### {name}\n{text}" for name, text in risk_parts)
sections.append(f"## IV. Risk Management Team Decision\n\n{content}")
# 5. Portfolio Manager
if risk.get("judge_decision"):
portfolio_dir = save_path / "5_portfolio"
portfolio_dir.mkdir(exist_ok=True)
(portfolio_dir / "decision.md").write_text(risk["judge_decision"])
sections.append(f"## V. Portfolio Manager Decision\n\n### Portfolio Manager\n{risk['judge_decision']}")
# Write consolidated report
header = f"# Trading Analysis Report: {ticker}\n\nGenerated: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
(save_path / "complete_report.md").write_text(header + "\n\n".join(sections))
return save_path / "complete_report.md"
return save_report_bundle(final_state, ticker, save_path, language=language)
def display_complete_report(final_state):
@ -941,6 +867,7 @@ def run_analysis():
config["google_thinking_level"] = selections.get("google_thinking_level")
config["openai_reasoning_effort"] = selections.get("openai_reasoning_effort")
config["anthropic_effort"] = selections.get("anthropic_effort")
config["codex_reasoning_effort"] = selections.get("codex_reasoning_effort")
config["output_language"] = selections.get("output_language", "English")
# Create stats callback handler for tracking LLM/tool calls
@ -979,7 +906,7 @@ def run_analysis():
func(*args, **kwargs)
timestamp, message_type, content = obj.messages[-1]
content = content.replace("\n", " ") # Replace newlines with spaces
with open(log_file, "a") as f:
with open(log_file, "a", encoding="utf-8") as f:
f.write(f"{timestamp} [{message_type}] {content}\n")
return wrapper
@ -990,7 +917,7 @@ def run_analysis():
func(*args, **kwargs)
timestamp, tool_name, args = obj.tool_calls[-1]
args_str = ", ".join(f"{k}={v}" for k, v in args.items())
with open(log_file, "a") as f:
with open(log_file, "a", encoding="utf-8") as f:
f.write(f"{timestamp} [Tool Call] {tool_name}({args_str})\n")
return wrapper
@ -1004,7 +931,7 @@ def run_analysis():
if content:
file_name = f"{section_name}.md"
text = "\n".join(str(item) for item in content) if isinstance(content, list) else content
with open(report_dir / file_name, "w") as f:
with open(report_dir / file_name, "w", encoding="utf-8") as f:
f.write(text)
return wrapper
@ -1187,7 +1114,12 @@ def run_analysis():
).strip()
save_path = Path(save_path_str)
try:
report_file = save_report_to_disk(final_state, selections["ticker"], save_path)
report_file = save_report_to_disk(
final_state,
selections["ticker"],
save_path,
language=selections.get("output_language", "English"),
)
console.print(f"\n[green]✓ Report saved to:[/green] {save_path.resolve()}")
console.print(f" [dim]Complete report:[/dim] {report_file.name}")
except Exception as e:

View File

@ -237,6 +237,7 @@ def select_llm_provider() -> tuple[str, str | None]:
"""Select the LLM provider and its API endpoint."""
BASE_URLS = [
("OpenAI", "https://api.openai.com/v1"),
("Codex", None),
("Google", None), # google-genai SDK manages its own endpoint
("Anthropic", "https://api.anthropic.com/"),
("xAI", "https://api.x.ai/v1"),
@ -288,6 +289,11 @@ def ask_openai_reasoning_effort() -> str:
).ask()
def ask_codex_reasoning_effort() -> str:
"""Ask for Codex reasoning effort level."""
return ask_openai_reasoning_effort()
def ask_anthropic_effort() -> str | None:
"""Ask for Anthropic effort level.

View File

@ -0,0 +1,36 @@
# Copy this file to `config/scheduled_analysis.toml` and adjust the values for your runner.
[run]
tickers = ["NVDA", "MSFT", "TSLA"]
analysts = ["market", "social", "news", "fundamentals"]
output_language = "Korean"
trade_date_mode = "latest_available"
timezone = "Asia/Seoul"
max_debate_rounds = 1
max_risk_discuss_rounds = 1
latest_market_data_lookback_days = 14
continue_on_ticker_error = true
[llm]
provider = "codex"
# TradingAgents' current Codex provider path uses the frontier model id `gpt-5.4`
# for Codex 5.4 sessions.
quick_model = "gpt-5.4"
deep_model = "gpt-5.4"
codex_reasoning_effort = "medium"
codex_summary = "none"
codex_personality = "none"
codex_request_timeout = 180.0
codex_max_retries = 2
codex_cleanup_threads = true
[storage]
# For stable run history on a self-hosted runner, prefer a persistent path outside the repo checkout.
# Example on Windows: "C:/TradingAgentsData/archive"
archive_dir = "./.runtime/tradingagents-archive"
site_dir = "./site"
[site]
title = "TradingAgents Daily Reports"
subtitle = "Self-hosted Codex automation for scheduled multi-ticker analysis"
max_runs_on_homepage = 30

View File

@ -0,0 +1,34 @@
[run]
tickers = ["GOOGL", "NVDA", "TSM", "APPL", "ETN", "LLY", "GLDM", "VRT", "TSLA", "GEV", "VXUS", "RSP", "FANG", "ETHU", "ORCL", "MU"]
analysts = ["market", "social", "news", "fundamentals"]
output_language = "Korean"
trade_date_mode = "latest_available"
timezone = "Asia/Seoul"
max_debate_rounds = 1
max_risk_discuss_rounds = 1
latest_market_data_lookback_days = 14
continue_on_ticker_error = true
[llm]
provider = "codex"
# TradingAgents' current Codex provider path uses the frontier model id `gpt-5.4`
# for Codex 5.4 sessions.
quick_model = "gpt-5.4"
deep_model = "gpt-5.4"
codex_reasoning_effort = "medium"
codex_summary = "none"
codex_personality = "none"
codex_request_timeout = 180.0
codex_max_retries = 2
codex_cleanup_threads = true
[storage]
# For a self-hosted runner, it is better to override this to a persistent absolute path
# via the TRADINGAGENTS_ARCHIVE_DIR repository variable or by editing this file.
archive_dir = "./.runtime/tradingagents-archive"
site_dir = "./site"
[site]
title = "TradingAgents Daily Reports"
subtitle = "Self-hosted Codex automation for scheduled multi-ticker analysis"
max_runs_on_homepage = 30

View File

@ -34,6 +34,7 @@ dependencies = [
[project.scripts]
tradingagents = "cli.main:app"
tradingagents-scheduled = "tradingagents.scheduled.runner:main"
[tool.setuptools.packages.find]
include = ["tradingagents*", "cli*"]

View File

@ -0,0 +1,93 @@
import tempfile
import unittest
from pathlib import Path
from types import SimpleNamespace
from unittest.mock import patch
from cli.main import run_analysis
from cli.models import AnalystType
class _DummyLive:
def __init__(self, *args, **kwargs):
pass
def __enter__(self):
return self
def __exit__(self, exc_type, exc, tb):
return False
class _FakePropagator:
def create_initial_state(self, ticker, analysis_date):
return {"ticker": ticker, "analysis_date": analysis_date}
def get_graph_args(self, callbacks=None):
return {}
class _FakeGraphRunner:
def stream(self, init_state, **kwargs):
yield {
"messages": [SimpleNamespace(id="msg-1", tool_calls=[])],
"market_report": "시장 보고서 — 한글 검증",
"final_trade_decision": "HOLD — 포지션 유지",
}
class _FakeTradingAgentsGraph:
def __init__(self, *args, **kwargs):
self.propagator = _FakePropagator()
self.graph = _FakeGraphRunner()
def process_signal(self, signal):
return signal
class CliUnicodeLoggingTests(unittest.TestCase):
def test_run_analysis_writes_logs_and_reports_as_utf8(self):
with tempfile.TemporaryDirectory() as tmpdir:
results_dir = Path(tmpdir) / "results"
selections = {
"ticker": "GOOGL",
"analysis_date": "2026-04-05",
"output_language": "Korean",
"analysts": [AnalystType.MARKET],
"research_depth": 1,
"llm_provider": "codex",
"backend_url": None,
"shallow_thinker": "gpt-5.4",
"deep_thinker": "gpt-5.4",
"codex_reasoning_effort": "medium",
}
with (
patch("cli.main.get_user_selections", return_value=selections),
patch("cli.main.DEFAULT_CONFIG", {"results_dir": str(results_dir)}),
patch("cli.main.TradingAgentsGraph", _FakeTradingAgentsGraph),
patch("cli.main.StatsCallbackHandler", return_value=SimpleNamespace()),
patch("cli.main.Live", _DummyLive),
patch("cli.main.create_layout", return_value=object()),
patch("cli.main.update_display"),
patch("cli.main.update_analyst_statuses"),
patch(
"cli.main.classify_message_type",
return_value=("Agent", "유니코드 메시지 — 로그 저장 검증"),
),
patch("cli.main.typer.prompt", side_effect=["N", "N"]),
patch("cli.main.console.print"),
):
run_analysis()
log_file = results_dir / "GOOGL" / "2026-04-05" / "message_tool.log"
report_file = results_dir / "GOOGL" / "2026-04-05" / "reports" / "market_report.md"
self.assertTrue(log_file.exists())
self.assertTrue(report_file.exists())
self.assertIn("유니코드 메시지 — 로그 저장 검증", log_file.read_text(encoding="utf-8"))
self.assertIn("시장 보고서 — 한글 검증", report_file.read_text(encoding="utf-8"))
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,543 @@
import re
import unittest
from collections import deque
from pathlib import Path
from unittest.mock import patch
from langchain_core.messages import AIMessage, HumanMessage, SystemMessage, ToolMessage
from langchain_core.prompts import ChatPromptTemplate
from tradingagents.llm_clients.codex_app_server import (
CodexAppServerAuthError,
CodexAppServerBinaryError,
CodexInvocationResult,
CodexStructuredOutputError,
)
from tradingagents.llm_clients.codex_message_codec import normalize_input_messages
from tradingagents.llm_clients.codex_binary import resolve_codex_binary
from tradingagents.llm_clients.codex_preflight import run_codex_preflight
from tradingagents.llm_clients.codex_schema import (
build_plain_response_schema,
build_tool_response_schema,
normalize_tools_for_codex,
)
from tradingagents.llm_clients.factory import create_llm_client
def lookup_price(ticker: str) -> str:
"""Return the latest price snapshot for a ticker."""
def lookup_volume(ticker: str) -> str:
"""Return the latest volume snapshot for a ticker."""
class FakeCodexSession:
def __init__(
self,
*,
codex_binary=None,
request_timeout=0,
workspace_dir="",
cleanup_threads=True,
responses=None,
account_payload=None,
models_payload=None,
):
self.codex_binary = codex_binary
self.request_timeout = request_timeout
self.workspace_dir = workspace_dir
self.cleanup_threads = cleanup_threads
self.responses = deque(responses or [])
self.account_payload = account_payload or {
"account": {"type": "chatgpt"},
"requiresOpenaiAuth": False,
}
self.models_payload = models_payload or {
"data": [{"id": "gpt-5.4", "model": "gpt-5.4"}]
}
self.started = 0
self.closed = 0
self.invocations = []
def start(self):
self.started += 1
def close(self):
self.closed += 1
def account_read(self):
return self.account_payload
def model_list(self, include_hidden=True):
return self.models_payload
def invoke(
self,
*,
prompt,
model,
output_schema,
reasoning_effort,
summary,
personality,
):
self.invocations.append(
{
"prompt": prompt,
"model": model,
"output_schema": output_schema,
"reasoning_effort": reasoning_effort,
"summary": summary,
"personality": personality,
}
)
if not self.responses:
raise AssertionError("No fake Codex responses left.")
return CodexInvocationResult(final_text=self.responses.popleft(), notifications=[])
class CodexProviderTests(unittest.TestCase):
def test_resolve_codex_binary_uses_windows_vscode_fallback(self):
fake_home = Path("C:/Users/tester")
candidate = fake_home / ".vscode/extensions/openai.chatgpt-1.0.0/bin/windows-x86_64/codex.exe"
with (
patch("tradingagents.llm_clients.codex_binary.os.name", "nt"),
patch("tradingagents.llm_clients.codex_binary.Path.home", return_value=fake_home),
patch("tradingagents.llm_clients.codex_binary.shutil.which", return_value=None),
patch(
"tradingagents.llm_clients.codex_binary.Path.glob",
return_value=[candidate],
),
patch("pathlib.Path.is_file", return_value=True),
patch("pathlib.Path.exists", return_value=True),
patch("pathlib.Path.stat") as mocked_stat,
):
mocked_stat.return_value.st_mtime = 1
resolved = resolve_codex_binary(None)
self.assertEqual(resolved, str(candidate))
def test_resolve_codex_binary_skips_unusable_path_alias_on_windows(self):
fake_home = Path("C:/Users/tester")
alias_path = "C:/Program Files/WindowsApps/OpenAI.Codex/app/resources/codex.exe"
candidate = fake_home / ".vscode/extensions/openai.chatgpt-1.0.0/bin/windows-x86_64/codex.exe"
with (
patch("tradingagents.llm_clients.codex_binary.os.name", "nt"),
patch("tradingagents.llm_clients.codex_binary.Path.home", return_value=fake_home),
patch("tradingagents.llm_clients.codex_binary.shutil.which", return_value=alias_path),
patch(
"tradingagents.llm_clients.codex_binary.Path.glob",
return_value=[candidate],
),
patch("pathlib.Path.is_file", return_value=True),
patch("pathlib.Path.exists", return_value=True),
patch("pathlib.Path.stat") as mocked_stat,
patch(
"tradingagents.llm_clients.codex_binary._is_usable_codex_binary",
side_effect=lambda path: path != alias_path,
),
):
mocked_stat.return_value.st_mtime = 1
resolved = resolve_codex_binary(None)
self.assertEqual(resolved, str(candidate))
def test_resolve_codex_binary_uses_env_override(self):
with (
patch("tradingagents.llm_clients.codex_binary.os.name", "nt"),
patch("tradingagents.llm_clients.codex_binary.shutil.which", return_value=None),
patch.dict("os.environ", {"CODEX_BINARY": "C:/custom/codex.exe"}, clear=False),
patch("pathlib.Path.is_file", return_value=True),
patch(
"tradingagents.llm_clients.codex_binary._is_usable_codex_binary",
return_value=True,
),
):
resolved = resolve_codex_binary(None)
self.assertEqual(Path(resolved), Path("C:/custom/codex.exe"))
def test_resolve_codex_binary_checks_explicit_binary_usability(self):
with (
patch("tradingagents.llm_clients.codex_binary.os.name", "nt"),
patch("pathlib.Path.is_file", return_value=True),
patch(
"tradingagents.llm_clients.codex_binary._is_usable_codex_binary",
return_value=False,
),
):
resolved = resolve_codex_binary("C:/custom/codex.exe")
self.assertEqual(Path(resolved), Path("C:/custom/codex.exe"))
def test_message_normalization_supports_str_messages_and_openai_dicts(self):
normalized = normalize_input_messages(
[
{"role": "system", "content": "system"},
{"role": "user", "content": "user"},
{
"role": "assistant",
"content": "",
"tool_calls": [
{
"id": "call_123",
"type": "function",
"function": {
"name": "lookup_price",
"arguments": '{"ticker":"NVDA"}',
},
}
],
},
{"role": "tool", "tool_call_id": "call_123", "content": "42"},
]
)
self.assertIsInstance(normalized[0], SystemMessage)
self.assertIsInstance(normalized[1], HumanMessage)
self.assertIsInstance(normalized[2], AIMessage)
self.assertEqual(normalized[2].tool_calls[0]["name"], "lookup_price")
self.assertEqual(normalized[2].tool_calls[0]["args"], {"ticker": "NVDA"})
self.assertIsInstance(normalized[3], ToolMessage)
def test_output_schema_construction_builds_exact_tool_branches(self):
tool_schemas = normalize_tools_for_codex([lookup_price])
schema = build_tool_response_schema(tool_schemas)
required_schema = build_tool_response_schema(tool_schemas, allow_final=False)
plain_schema = build_plain_response_schema()
self.assertEqual(plain_schema["required"], ["answer"])
self.assertEqual(schema["properties"]["mode"]["enum"], ["final", "tool_calls"])
tool_branch = schema["properties"]["tool_calls"]["items"]
self.assertEqual(tool_branch["properties"]["name"]["const"], "lookup_price")
self.assertIn("arguments", tool_branch["required"])
self.assertEqual(required_schema["properties"]["mode"]["const"], "tool_calls")
generic_schema = build_tool_response_schema(
normalize_tools_for_codex([lookup_price, lookup_volume])
)
generic_items = generic_schema["properties"]["tool_calls"]["items"]
self.assertEqual(generic_items["properties"]["name"]["type"], "string")
self.assertIn("enum", generic_items["properties"]["name"])
self.assertEqual(generic_items["properties"]["arguments_json"]["type"], "string")
def test_plain_final_response_parsing(self):
session = FakeCodexSession(
responses=['{"answer":"Final decision"}'],
)
llm = create_llm_client(
"codex",
"gpt-5.4",
codex_binary="C:/fake/codex",
codex_workspace_dir="C:/tmp/codex-workspace",
session_factory=lambda **kwargs: session,
preflight_runner=lambda **kwargs: None,
).get_llm()
result = llm.invoke("Give me the final answer.")
self.assertEqual(result.content, "Final decision")
self.assertEqual(session.started, 1)
def test_invoke_accepts_openai_style_message_dicts(self):
session = FakeCodexSession(
responses=['{"answer":"From dict transcript"}'],
)
llm = create_llm_client(
"codex",
"gpt-5.4",
codex_binary="C:/fake/codex",
codex_workspace_dir="C:/tmp/codex-workspace",
session_factory=lambda **kwargs: session,
preflight_runner=lambda **kwargs: None,
).get_llm()
result = llm.invoke(
[
{"role": "system", "content": "system"},
{"role": "user", "content": "user"},
]
)
self.assertEqual(result.content, "From dict transcript")
self.assertIn("[System]\nsystem", session.invocations[0]["prompt"])
self.assertIn("[Human]\nuser", session.invocations[0]["prompt"])
def test_invoke_accepts_langchain_message_sequences(self):
session = FakeCodexSession(
responses=['{"answer":"From BaseMessage transcript"}'],
)
llm = create_llm_client(
"codex",
"gpt-5.4",
codex_binary="C:/fake/codex",
codex_workspace_dir="C:/tmp/codex-workspace",
session_factory=lambda **kwargs: session,
preflight_runner=lambda **kwargs: None,
).get_llm()
result = llm.invoke(
[
SystemMessage(content="system"),
HumanMessage(content="user"),
]
)
self.assertEqual(result.content, "From BaseMessage transcript")
self.assertIn("[System]\nsystem", session.invocations[0]["prompt"])
self.assertIn("[Human]\nuser", session.invocations[0]["prompt"])
def test_tool_call_response_parsing_populates_ai_message_tool_calls(self):
session = FakeCodexSession(
responses=[
'{"mode":"tool_calls","content":"Need data first","tool_calls":[{"name":"lookup_price","arguments":{"ticker":"NVDA"}}]}'
],
)
llm = create_llm_client(
"codex",
"gpt-5.4",
codex_binary="C:/fake/codex",
codex_workspace_dir="C:/tmp/codex-workspace",
session_factory=lambda **kwargs: session,
preflight_runner=lambda **kwargs: None,
).get_llm()
prompt = ChatPromptTemplate.from_messages(
[("system", "Use tools if needed."), ("human", "Analyze NVDA")]
)
result = (prompt | llm.bind_tools([lookup_price])).invoke({})
self.assertEqual(result.content, "Need data first")
self.assertEqual(result.tool_calls[0]["name"], "lookup_price")
self.assertEqual(result.tool_calls[0]["args"], {"ticker": "NVDA"})
self.assertRegex(result.tool_calls[0]["id"], r"^call_[0-9a-f]{32}$")
def test_multi_tool_response_parses_arguments_json(self):
session = FakeCodexSession(
responses=[
'{"mode":"tool_calls","content":"","tool_calls":[{"name":"lookup_price","arguments_json":"{\\"ticker\\":\\"NVDA\\"}"}]}'
],
)
llm = create_llm_client(
"codex",
"gpt-5.4",
codex_binary="C:/fake/codex",
codex_workspace_dir="C:/tmp/codex-workspace",
session_factory=lambda **kwargs: session,
preflight_runner=lambda **kwargs: None,
).get_llm()
result = llm.bind_tools([lookup_price, lookup_volume]).invoke("Analyze NVDA")
self.assertEqual(result.tool_calls[0]["name"], "lookup_price")
self.assertEqual(result.tool_calls[0]["args"], {"ticker": "NVDA"})
def test_bind_tools_honors_required_and_named_tool_choice(self):
required_session = FakeCodexSession(
responses=[
'{"mode":"tool_calls","content":"Calling tool","tool_calls":[{"name":"lookup_price","arguments":{"ticker":"NVDA"}}]}'
],
)
required_llm = create_llm_client(
"codex",
"gpt-5.4",
codex_binary="C:/fake/codex",
codex_workspace_dir="C:/tmp/codex-workspace",
session_factory=lambda **kwargs: required_session,
preflight_runner=lambda **kwargs: None,
).get_llm()
required_result = required_llm.bind_tools([lookup_price], tool_choice="required").invoke(
"Analyze NVDA"
)
self.assertTrue(required_result.tool_calls)
self.assertEqual(
required_session.invocations[0]["output_schema"]["properties"]["mode"]["const"],
"tool_calls",
)
self.assertIn(
"must respond with one or more tool calls",
required_session.invocations[0]["prompt"].lower(),
)
named_session = FakeCodexSession(
responses=[
'{"mode":"tool_calls","content":"Calling named tool","tool_calls":[{"name":"lookup_price","arguments":{"ticker":"MSFT"}}]}'
],
)
named_llm = create_llm_client(
"codex",
"gpt-5.4",
codex_binary="C:/fake/codex",
codex_workspace_dir="C:/tmp/codex-workspace",
session_factory=lambda **kwargs: named_session,
preflight_runner=lambda **kwargs: None,
).get_llm()
named_result = named_llm.bind_tools(
[lookup_price],
tool_choice={"type": "function", "function": {"name": "lookup_price"}},
).invoke("Analyze MSFT")
self.assertEqual(named_result.tool_calls[0]["name"], "lookup_price")
tool_item = named_session.invocations[0]["output_schema"]["properties"]["tool_calls"]["items"]
self.assertEqual(tool_item["properties"]["name"]["const"], "lookup_price")
self.assertIn(
"must call the tool named `lookup_price`",
named_session.invocations[0]["prompt"].lower(),
)
def test_malformed_json_retries_and_surfaces_error_when_exhausted(self):
session = FakeCodexSession(
responses=["not json", '{"answer":"Recovered"}'],
)
llm = create_llm_client(
"codex",
"gpt-5.4",
codex_binary="C:/fake/codex",
codex_workspace_dir="C:/tmp/codex-workspace",
codex_max_retries=1,
session_factory=lambda **kwargs: session,
preflight_runner=lambda **kwargs: None,
).get_llm()
result = llm.invoke("Recover after malformed JSON.")
self.assertEqual(result.content, "Recovered")
self.assertEqual(len(session.invocations), 2)
self.assertIn(
"previous response did not satisfy tradingagents validation",
session.invocations[1]["prompt"].lower(),
)
failing_session = FakeCodexSession(
responses=["still bad", "still bad again"],
)
failing_llm = create_llm_client(
"codex",
"gpt-5.4",
codex_binary="C:/fake/codex",
codex_workspace_dir="C:/tmp/codex-workspace",
codex_max_retries=1,
session_factory=lambda **kwargs: failing_session,
preflight_runner=lambda **kwargs: None,
).get_llm()
with self.assertRaises(CodexStructuredOutputError):
failing_llm.invoke("This should fail.")
def test_runtime_errors_do_not_retry_as_json_failures(self):
class FailingSession(FakeCodexSession):
def invoke(self, **kwargs):
raise RuntimeError("transport exploded")
session = FailingSession()
llm = create_llm_client(
"codex",
"gpt-5.4",
codex_binary="C:/fake/codex",
codex_workspace_dir="C:/tmp/codex-workspace",
codex_max_retries=2,
session_factory=lambda **kwargs: session,
preflight_runner=lambda **kwargs: None,
).get_llm()
with self.assertRaisesRegex(RuntimeError, "transport exploded"):
llm.invoke("fail fast")
def test_provider_codex_smoke_covers_bind_tools_and_direct_invoke_paths(self):
session = FakeCodexSession(
responses=[
'{"mode":"tool_calls","content":"Fetching market data","tool_calls":[{"name":"lookup_price","arguments":{"ticker":"NVDA"}}]}',
'{"answer":"Rating: Buy\\nExecutive Summary: Add gradually."}',
],
)
llm = create_llm_client(
"codex",
"gpt-5.4",
codex_binary="C:/fake/codex",
codex_workspace_dir="C:/tmp/codex-workspace",
session_factory=lambda **kwargs: session,
preflight_runner=lambda **kwargs: None,
).get_llm()
analyst_prompt = ChatPromptTemplate.from_messages(
[("system", "Use tools when you need extra data."), ("human", "Analyze NVDA.")]
)
market_result = (analyst_prompt | llm.bind_tools([lookup_price])).invoke({})
self.assertTrue(market_result.tool_calls)
self.assertEqual(market_result.tool_calls[0]["name"], "lookup_price")
decision = llm.invoke("Produce the final trade decision.")
self.assertIn("Rating: Buy", decision.content)
self.assertEqual(len(session.invocations), 2)
def test_preflight_detects_missing_auth_and_missing_binary(self):
valid_factory = lambda **kwargs: FakeCodexSession(
account_payload={
"account": {"type": "chatgpt", "email": "user@example.com"},
"requiresOpenaiAuth": True,
}
)
result = run_codex_preflight(
codex_binary="C:\\fake\\codex.exe",
model="gpt-5.4",
request_timeout=10.0,
workspace_dir="C:/tmp/codex-workspace",
cleanup_threads=True,
session_factory=valid_factory,
)
self.assertEqual(result.account["type"], "chatgpt")
authless_factory = lambda **kwargs: FakeCodexSession(
account_payload={"account": None, "requiresOpenaiAuth": True}
)
with self.assertRaises(CodexAppServerAuthError):
run_codex_preflight(
codex_binary="C:\\fake\\codex.exe",
model="gpt-5.4",
request_timeout=10.0,
workspace_dir="C:/tmp/codex-workspace",
cleanup_threads=True,
session_factory=authless_factory,
)
with patch(
"tradingagents.llm_clients.codex_preflight.resolve_codex_binary",
return_value=None,
):
with self.assertRaises(CodexAppServerBinaryError):
run_codex_preflight(
codex_binary="definitely-missing-codex-binary",
model="gpt-5.4",
request_timeout=10.0,
workspace_dir="C:/tmp/codex-workspace",
cleanup_threads=True,
)
def test_preflight_uses_resolved_binary_path(self):
captured = {}
def factory(**kwargs):
captured["codex_binary"] = kwargs["codex_binary"]
return FakeCodexSession(**kwargs)
with patch(
"tradingagents.llm_clients.codex_preflight.resolve_codex_binary",
return_value="C:/resolved/codex.exe",
):
run_codex_preflight(
codex_binary=None,
model="gpt-5.4",
request_timeout=10.0,
workspace_dir="C:/tmp/codex-workspace",
cleanup_threads=True,
session_factory=factory,
)
self.assertEqual(captured["codex_binary"], "C:/resolved/codex.exe")
if __name__ == "__main__":
unittest.main()

View File

@ -50,3 +50,6 @@ class ModelValidationTests(unittest.TestCase):
client.get_llm()
self.assertEqual(caught, [])
def test_validator_accepts_known_model_with_surrounding_whitespace(self):
self.assertTrue(validate_model(" openai ", " gpt-5.4 "))

View File

@ -0,0 +1,102 @@
import tempfile
import unittest
from pathlib import Path
from unittest.mock import patch
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.reporting import save_report_bundle
class ReportLocalizationTests(unittest.TestCase):
def test_save_report_bundle_uses_korean_labels(self):
final_state = {
"analysis_date": "2026-04-06",
"trade_date": "2026-04-02",
"market_report": "시장 보고서 본문",
"sentiment_report": "소셜 보고서 본문",
"news_report": "뉴스 보고서 본문",
"fundamentals_report": "펀더멘털 보고서 본문",
"investment_debate_state": {
"bull_history": "강세 의견",
"bear_history": "약세 의견",
"judge_decision": "리서치 매니저 판단",
},
"trader_investment_plan": "트레이딩 계획",
"risk_debate_state": {
"aggressive_history": "공격적 의견",
"conservative_history": "보수적 의견",
"neutral_history": "중립 의견",
"judge_decision": "포트폴리오 최종 판단",
},
}
with tempfile.TemporaryDirectory() as tmpdir:
report_path = save_report_bundle(
final_state,
"GOOGL",
Path(tmpdir),
language="Korean",
)
report_text = report_path.read_text(encoding="utf-8")
self.assertIn("트레이딩 분석 리포트", report_text)
self.assertIn("생성 시각", report_text)
self.assertIn("분석 기준일: 2026-04-06", report_text)
self.assertIn("시장 데이터 기준일: 2026-04-02", report_text)
self.assertIn("애널리스트 팀 리포트", report_text)
self.assertIn("포트폴리오 매니저 최종 판단", report_text)
self.assertIn("시장 애널리스트", report_text)
def test_localize_final_state_rewrites_user_facing_fields(self):
graph = TradingAgentsGraph.__new__(TradingAgentsGraph)
graph.quick_thinking_llm = object()
final_state = {
"market_report": "market",
"sentiment_report": "social",
"news_report": "news",
"fundamentals_report": "fundamentals",
"investment_plan": "investment plan",
"trader_investment_plan": "trader plan",
"final_trade_decision": "final decision",
"investment_debate_state": {
"bull_history": "bull",
"bear_history": "bear",
"history": "debate history",
"current_response": "latest debate",
"judge_decision": "manager decision",
},
"risk_debate_state": {
"aggressive_history": "aggressive",
"conservative_history": "conservative",
"neutral_history": "neutral",
"history": "risk history",
"current_aggressive_response": "aggr latest",
"current_conservative_response": "cons latest",
"current_neutral_response": "neutral latest",
"judge_decision": "portfolio decision",
},
}
with (
patch("tradingagents.graph.trading_graph.get_output_language", return_value="Korean"),
patch(
"tradingagents.graph.trading_graph.rewrite_in_output_language",
side_effect=lambda llm, content, content_type="report": f"KO::{content_type}::{content}",
),
):
localized = graph._localize_final_state(final_state)
self.assertEqual(localized["market_report"], "KO::market analyst report::market")
self.assertEqual(localized["investment_plan"], "KO::research manager investment plan::investment plan")
self.assertEqual(
localized["investment_debate_state"]["judge_decision"],
"KO::research manager decision::manager decision",
)
self.assertEqual(
localized["risk_debate_state"]["current_neutral_response"],
"KO::neutral risk analyst latest response::neutral latest",
)
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,215 @@
import json
import tempfile
import unittest
from pathlib import Path
from unittest.mock import patch
from tradingagents.scheduled.runner import execute_scheduled_run, load_scheduled_config, main
class _FakeStatsHandler:
def get_stats(self):
return {
"llm_calls": 12,
"tool_calls": 7,
"tokens_in": 1024,
"tokens_out": 2048,
}
class _FakeTradingAgentsGraph:
def __init__(self, selected_analysts, debug=False, config=None, callbacks=None):
self.selected_analysts = selected_analysts
self.debug = debug
self.config = config or {}
self.callbacks = callbacks or []
def propagate(self, ticker, trade_date, analysis_date=None):
if ticker == "FAIL":
raise RuntimeError("synthetic failure")
final_state = {
"company_of_interest": ticker,
"trade_date": trade_date,
"analysis_date": analysis_date or trade_date,
"market_report": f"## Market\n{ticker} market analysis",
"sentiment_report": f"## Sentiment\n{ticker} sentiment analysis",
"news_report": f"## News\n{ticker} news analysis",
"fundamentals_report": f"## Fundamentals\n{ticker} fundamentals analysis",
"investment_debate_state": {
"bull_history": f"{ticker} bull case",
"bear_history": f"{ticker} bear case",
"history": "debate transcript",
"current_response": "",
"judge_decision": f"{ticker} research manager decision",
},
"trader_investment_plan": f"{ticker} trading plan",
"investment_plan": f"{ticker} investment plan",
"risk_debate_state": {
"aggressive_history": f"{ticker} aggressive case",
"conservative_history": f"{ticker} conservative case",
"neutral_history": f"{ticker} neutral case",
"history": "risk transcript",
"judge_decision": f"{ticker} final portfolio decision",
},
"final_trade_decision": f"{ticker} final trade decision",
}
return final_state, "BUY"
class ScheduledAnalysisTests(unittest.TestCase):
def test_execute_scheduled_run_archives_outputs_and_builds_site(self):
with tempfile.TemporaryDirectory() as tmpdir:
root = Path(tmpdir)
config_path = root / "scheduled_analysis.toml"
archive_dir = root / "archive"
site_dir = root / "site"
config_path.write_text(
f"""
[run]
tickers = ["NVDA", "FAIL"]
analysts = ["market", "social", "news", "fundamentals"]
output_language = "Korean"
trade_date_mode = "latest_available"
timezone = "Asia/Seoul"
continue_on_ticker_error = true
[llm]
provider = "codex"
quick_model = "gpt-5.4"
deep_model = "gpt-5.4"
codex_reasoning_effort = "medium"
[storage]
archive_dir = "{archive_dir.as_posix()}"
site_dir = "{site_dir.as_posix()}"
[site]
title = "Daily Reports"
subtitle = "Automated"
""",
encoding="utf-8",
)
config = load_scheduled_config(config_path)
with (
patch("tradingagents.scheduled.runner.TradingAgentsGraph", _FakeTradingAgentsGraph),
patch("tradingagents.scheduled.runner.StatsCallbackHandler", _FakeStatsHandler),
patch("tradingagents.scheduled.runner.resolve_trade_date", return_value="2026-04-04"),
):
manifest = execute_scheduled_run(config, run_label="test")
self.assertEqual(manifest["status"], "partial_failure")
self.assertEqual(manifest["summary"]["successful_tickers"], 1)
self.assertEqual(manifest["summary"]["failed_tickers"], 1)
self.assertEqual(manifest["settings"]["provider"], "codex")
self.assertEqual(manifest["settings"]["deep_model"], "gpt-5.4")
self.assertEqual(manifest["settings"]["quick_model"], "gpt-5.4")
self.assertEqual(manifest["tickers"][0]["analysis_date"], manifest["started_at"][:10])
run_dir = archive_dir / "runs" / manifest["started_at"][:4] / manifest["run_id"]
self.assertTrue((run_dir / "run.json").exists())
self.assertTrue((run_dir / "tickers" / "NVDA" / "report" / "complete_report.md").exists())
self.assertTrue((run_dir / "tickers" / "FAIL" / "error.json").exists())
index_html = (site_dir / "index.html").read_text(encoding="utf-8")
run_html = (site_dir / "runs" / manifest["run_id"] / "index.html").read_text(encoding="utf-8")
ticker_html = (site_dir / "runs" / manifest["run_id"] / "NVDA.html").read_text(encoding="utf-8")
self.assertIn("Daily Reports", index_html)
self.assertIn("partial failure", index_html)
self.assertIn("NVDA", run_html)
self.assertIn("Rendered report", ticker_html)
self.assertIn("Analysis date", ticker_html)
self.assertTrue((site_dir / "downloads" / manifest["run_id"] / "NVDA" / "complete_report.md").exists())
def test_main_site_only_rebuilds_from_existing_archive(self):
with tempfile.TemporaryDirectory() as tmpdir:
root = Path(tmpdir)
archive_dir = root / "archive"
site_dir = root / "site"
run_dir = archive_dir / "runs" / "2026" / "20260405T091300_seed"
ticker_dir = run_dir / "tickers" / "NVDA" / "report"
ticker_dir.mkdir(parents=True, exist_ok=True)
(ticker_dir / "complete_report.md").write_text("# Test report", encoding="utf-8")
analysis_dir = run_dir / "tickers" / "NVDA"
(analysis_dir / "analysis.json").write_text("{}", encoding="utf-8")
(analysis_dir / "final_state.json").write_text("{}", encoding="utf-8")
(run_dir / "run.json").write_text(
json.dumps(
{
"version": 1,
"run_id": "20260405T091300_seed",
"label": "seed",
"status": "success",
"started_at": "2026-04-05T09:13:00+09:00",
"finished_at": "2026-04-05T09:20:00+09:00",
"timezone": "Asia/Seoul",
"settings": {
"provider": "codex",
"quick_model": "gpt-5.4",
"deep_model": "gpt-5.4",
"codex_reasoning_effort": "medium",
"output_language": "Korean",
"analysts": ["market", "social", "news", "fundamentals"],
"trade_date_mode": "latest_available",
"max_debate_rounds": 1,
"max_risk_discuss_rounds": 1,
},
"summary": {
"total_tickers": 1,
"successful_tickers": 1,
"failed_tickers": 0,
},
"tickers": [
{
"ticker": "NVDA",
"status": "success",
"analysis_date": "2026-04-05",
"trade_date": "2026-04-04",
"decision": "BUY",
"started_at": "2026-04-05T09:13:00+09:00",
"finished_at": "2026-04-05T09:20:00+09:00",
"duration_seconds": 420.0,
"metrics": {
"llm_calls": 10,
"tool_calls": 7,
"tokens_in": 1000,
"tokens_out": 2000,
},
"artifacts": {
"analysis_json": "tickers/NVDA/analysis.json",
"report_markdown": "tickers/NVDA/report/complete_report.md",
"final_state_json": "tickers/NVDA/final_state.json",
"graph_log_json": None,
},
}
],
},
ensure_ascii=False,
),
encoding="utf-8",
)
config_path = root / "scheduled_analysis.toml"
config_path.write_text(
f"""
[run]
tickers = ["NVDA"]
[storage]
archive_dir = "{archive_dir.as_posix()}"
site_dir = "{site_dir.as_posix()}"
""",
encoding="utf-8",
)
exit_code = main(["--config", str(config_path), "--site-only"])
self.assertEqual(exit_code, 0)
self.assertTrue((site_dir / "index.html").exists())
self.assertIn("NVDA", (site_dir / "runs" / "20260405T091300_seed" / "NVDA.html").read_text(encoding="utf-8"))
if __name__ == "__main__":
unittest.main()

View File

@ -0,0 +1,68 @@
import unittest
from unittest.mock import patch
from tradingagents.dataflows.yfinance_news import get_news_yfinance
def _article(date_value: str, title: str, link: str) -> dict:
return {
"content": {
"title": title,
"summary": f"Summary for {title}",
"provider": {"displayName": "Unit Test"},
"canonicalUrl": {"url": link},
"pubDate": f"{date_value}T12:00:00Z",
}
}
class _FakeTicker:
def __init__(self, full_news: list[dict]):
self.full_news = list(full_news)
def get_news(self, count=20):
return self.full_news[:count]
class YFinanceNewsTests(unittest.TestCase):
def test_get_news_yfinance_expands_feed_depth_to_cover_requested_window(self):
recent_articles = [
_article(f"2026-04-{day:02d}", f"Recent article {day}", f"https://example.com/recent-{day}")
for day in range(6, 2, -1)
for _ in range(15)
]
older_articles = [
_article("2026-04-02", "Alphabet April 2 article", "https://example.com/apr2"),
_article("2026-04-01", "Alphabet April 1 article", "https://example.com/apr1"),
]
fake_ticker = _FakeTicker(recent_articles + older_articles)
with (
patch("tradingagents.dataflows.yfinance_news.yf.Ticker", return_value=fake_ticker),
patch("tradingagents.dataflows.yfinance_news.yf_retry", side_effect=lambda fn: fn()),
):
result = get_news_yfinance("GOOGL", "2026-03-26", "2026-04-02")
self.assertIn("Alphabet April 2 article", result)
self.assertIn("[2026-04-02]", result)
def test_get_news_yfinance_reports_feed_coverage_when_window_is_unavailable(self):
fake_ticker = _FakeTicker(
[
_article("2026-04-06", "Fresh article", "https://example.com/fresh"),
_article("2026-04-05", "Fresh article 2", "https://example.com/fresh-2"),
]
)
with (
patch("tradingagents.dataflows.yfinance_news.yf.Ticker", return_value=fake_ticker),
patch("tradingagents.dataflows.yfinance_news.yf_retry", side_effect=lambda fn: fn()),
):
result = get_news_yfinance("GOOGL", "2026-03-26", "2026-04-02")
self.assertIn("No news found for GOOGL between 2026-03-26 and 2026-04-02", result)
self.assertIn("2026-04-05 to 2026-04-06", result)
if __name__ == "__main__":
unittest.main()

View File

@ -10,7 +10,7 @@ from tradingagents.dataflows.config import get_config
def create_news_analyst(llm):
def news_analyst_node(state):
current_date = state["trade_date"]
current_date = state.get("analysis_date") or state["trade_date"]
instrument_context = build_instrument_context(state["company_of_interest"])
tools = [

View File

@ -5,7 +5,7 @@ from tradingagents.dataflows.config import get_config
def create_social_media_analyst(llm):
def social_media_analyst_node(state):
current_date = state["trade_date"]
current_date = state.get("analysis_date") or state["trade_date"]
instrument_context = build_instrument_context(state["company_of_interest"])
tools = [

View File

@ -46,6 +46,7 @@ class RiskDebateState(TypedDict):
class AgentState(MessagesState):
company_of_interest: Annotated[str, "Company that we are interested in trading"]
trade_date: Annotated[str, "What date we are trading at"]
analysis_date: Annotated[str, "What date the full analysis is being generated on"]
sender: Annotated[str, "Agent that sent this message"]

View File

@ -1,4 +1,5 @@
from langchain_core.messages import HumanMessage, RemoveMessage
import re
# Import tools from separate utility files
from tradingagents.agents.utils.core_stock_tools import (
@ -27,11 +28,87 @@ def get_language_instruction() -> str:
Only applied to user-facing agents (analysts, portfolio manager).
Internal debate agents stay in English for reasoning quality.
"""
from tradingagents.dataflows.config import get_config
lang = get_config().get("output_language", "English")
lang = get_output_language()
if lang.strip().lower() == "english":
return ""
return f" Write your entire response in {lang}."
return (
f" Write your entire response in {lang}. "
f"Do not mix in English for headings, summaries, recommendations, table labels, or narrative text. "
f"Keep only ticker symbols, company names, dates, and raw numeric values unchanged when needed."
)
def get_output_language() -> str:
from tradingagents.dataflows.config import get_config
return str(get_config().get("output_language", "English")).strip() or "English"
def rewrite_in_output_language(llm, content: str, *, content_type: str = "report") -> str:
"""Rewrite already-generated content into the configured output language.
This lets the graph keep English-centric reasoning prompts where useful while
ensuring the persisted user-facing report is consistently localized.
"""
if not content:
return content
lang = get_output_language()
if lang.lower() == "english":
return content
messages = [
(
"system",
"You are a financial editor rewriting existing analysis for end users. "
f"Rewrite the user's {content_type} entirely in {lang}. "
"Requirements: preserve the original meaning, preserve markdown structure, preserve tables, preserve ticker symbols, preserve dates, preserve numbers, and preserve factual details. "
"Translate all headings, labels, bullet text, narrative prose, recommendations, quoted headlines, and English source titles so the output reads naturally and consistently in the target language. "
"Do not leave English article titles or English section names in the output unless they are unavoidable proper nouns or acronyms. "
"Keep only unavoidable Latin-script proper nouns or acronyms such as ticker symbols, company names, product names, RSI, MACD, ATR, EBITDA, and CAPEX. "
"If the source contains English control phrases or analyst role labels, rewrite them into natural user-facing target-language labels. "
"Output only the rewritten content.",
),
("human", content),
]
rewritten = llm.invoke(messages).content
if not isinstance(rewritten, str) or not rewritten.strip():
return content
return _normalize_localized_finance_terms(rewritten, lang)
def _normalize_localized_finance_terms(content: str, language: str) -> str:
if language.strip().lower() != "korean":
return content
replacements = {
"FINAL TRANSACTION PROPOSAL": "최종 거래 제안",
"**BUY**": "**매수**",
"**HOLD**": "**보유**",
"**SELL**": "**매도**",
"**OVERWEIGHT**": "**비중 확대**",
"**UNDERWEIGHT**": "**비중 축소**",
}
normalized = content
for source, target in replacements.items():
normalized = normalized.replace(source, target)
regex_replacements = (
(r"\bBuy\b", "매수"),
(r"\bHold\b", "보유"),
(r"\bSell\b", "매도"),
(r"\bOverweight\b", "비중 확대"),
(r"\bUnderweight\b", "비중 축소"),
(r"\bBUY\b", "매수"),
(r"\bHOLD\b", "보유"),
(r"\bSELL\b", "매도"),
(r"\bOVERWEIGHT\b", "비중 확대"),
(r"\bUNDERWEIGHT\b", "비중 축소"),
)
for pattern, replacement in regex_replacements:
normalized = re.sub(pattern, replacement, normalized)
return normalized
def build_instrument_context(ticker: str) -> str:

View File

@ -1,12 +1,46 @@
"""yfinance-based news data fetching functions."""
import yfinance as yf
from datetime import datetime
from datetime import datetime, timezone
from dateutil.relativedelta import relativedelta
import yfinance as yf
from .stockstats_utils import yf_retry
_TICKER_NEWS_FETCH_COUNTS = (20, 50, 100)
_MAX_FILTERED_TICKER_ARTICLES = 25
def _parse_pub_date(raw_value) -> datetime | None:
"""Normalize yfinance pub date values into a timezone-aware datetime."""
if raw_value in (None, ""):
return None
if isinstance(raw_value, datetime):
return raw_value
if isinstance(raw_value, (int, float)):
try:
return datetime.fromtimestamp(raw_value, tz=timezone.utc)
except (OverflowError, OSError, ValueError):
return None
if isinstance(raw_value, str):
normalized = raw_value.strip()
if not normalized:
return None
try:
return datetime.fromisoformat(normalized.replace("Z", "+00:00"))
except ValueError:
try:
return datetime.fromtimestamp(float(normalized), tz=timezone.utc)
except (OverflowError, OSError, ValueError):
return None
return None
def _extract_article_data(article: dict) -> dict:
"""Extract article data from yfinance news format (handles nested 'content' structure)."""
# Handle nested content structure
@ -22,13 +56,7 @@ def _extract_article_data(article: dict) -> dict:
link = url_obj.get("url", "")
# Get publish date
pub_date_str = content.get("pubDate", "")
pub_date = None
if pub_date_str:
try:
pub_date = datetime.fromisoformat(pub_date_str.replace("Z", "+00:00"))
except (ValueError, AttributeError):
pass
pub_date = _parse_pub_date(content.get("pubDate", ""))
return {
"title": title,
@ -44,10 +72,79 @@ def _extract_article_data(article: dict) -> dict:
"summary": article.get("summary", ""),
"publisher": article.get("publisher", "Unknown"),
"link": article.get("link", ""),
"pub_date": None,
"pub_date": _parse_pub_date(article.get("providerPublishTime")),
}
def _article_identity(article: dict) -> str:
"""Return a stable identity key for deduplicating news articles."""
link = article.get("link", "").strip()
if link:
return link
title = article.get("title", "").strip()
publisher = article.get("publisher", "").strip()
pub_date = article.get("pub_date")
stamp = pub_date.isoformat() if isinstance(pub_date, datetime) else ""
return f"{publisher}::{title}::{stamp}"
def _collect_ticker_news(
ticker: str,
start_dt: datetime,
) -> tuple[list[dict], datetime | None, datetime | None]:
"""Fetch increasingly larger ticker feeds until the requested window is covered."""
collected: list[dict] = []
seen: set[str] = set()
oldest_pub_date = None
newest_pub_date = None
for count in _TICKER_NEWS_FETCH_COUNTS:
news = yf_retry(lambda batch_size=count: yf.Ticker(ticker).get_news(count=batch_size))
if not news:
continue
for article in news:
data = _extract_article_data(article)
identity = _article_identity(data)
if identity in seen:
continue
seen.add(identity)
collected.append(data)
pub_date = data.get("pub_date")
if pub_date:
if newest_pub_date is None or pub_date > newest_pub_date:
newest_pub_date = pub_date
if oldest_pub_date is None or pub_date < oldest_pub_date:
oldest_pub_date = pub_date
if oldest_pub_date and oldest_pub_date.replace(tzinfo=None) <= start_dt:
break
if len(news) < count:
break
collected.sort(
key=lambda article: article["pub_date"].timestamp() if article.get("pub_date") else float("-inf"),
reverse=True,
)
return collected, oldest_pub_date, newest_pub_date
def _format_coverage_note(oldest_pub_date: datetime | None, newest_pub_date: datetime | None) -> str:
"""Describe the yfinance coverage window when no article matches the requested range."""
if oldest_pub_date and newest_pub_date:
return (
"; the current yfinance ticker feed only covered "
f"{oldest_pub_date.strftime('%Y-%m-%d')} to {newest_pub_date.strftime('%Y-%m-%d')} at query time"
)
if oldest_pub_date:
return f"; the current yfinance ticker feed only reached back to {oldest_pub_date.strftime('%Y-%m-%d')}"
if newest_pub_date:
return f"; the current yfinance ticker feed only returned articles up to {newest_pub_date.strftime('%Y-%m-%d')}"
return ""
def get_news_yfinance(
ticker: str,
start_date: str,
@ -65,38 +162,40 @@ def get_news_yfinance(
Formatted string containing news articles
"""
try:
stock = yf.Ticker(ticker)
news = yf_retry(lambda: stock.get_news(count=20))
if not news:
return f"No news found for {ticker}"
# Parse date range for filtering
start_dt = datetime.strptime(start_date, "%Y-%m-%d")
end_dt = datetime.strptime(end_date, "%Y-%m-%d")
articles, oldest_pub_date, newest_pub_date = _collect_ticker_news(ticker, start_dt)
if not articles:
return f"No news found for {ticker}"
news_str = ""
filtered_count = 0
for article in news:
data = _extract_article_data(article)
for data in articles:
# Filter by date if publish time is available
if data["pub_date"]:
pub_date_naive = data["pub_date"].replace(tzinfo=None)
if not (start_dt <= pub_date_naive <= end_dt + relativedelta(days=1)):
continue
news_str += f"### {data['title']} (source: {data['publisher']})\n"
date_prefix = ""
if data["pub_date"]:
date_prefix = f"[{data['pub_date'].strftime('%Y-%m-%d')}] "
news_str += f"### {date_prefix}{data['title']} (source: {data['publisher']})\n"
if data["summary"]:
news_str += f"{data['summary']}\n"
if data["link"]:
news_str += f"Link: {data['link']}\n"
news_str += "\n"
filtered_count += 1
if filtered_count >= _MAX_FILTERED_TICKER_ARTICLES:
break
if filtered_count == 0:
return f"No news found for {ticker} between {start_date} and {end_date}"
coverage_note = _format_coverage_note(oldest_pub_date, newest_pub_date)
return f"No news found for {ticker} between {start_date} and {end_date}{coverage_note}"
return f"## {ticker} News, from {start_date} to {end_date}:\n\n{news_str}"

View File

@ -1,4 +1,5 @@
import os
from pathlib import Path
DEFAULT_CONFIG = {
"project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
@ -16,6 +17,14 @@ DEFAULT_CONFIG = {
"google_thinking_level": None, # "high", "minimal", etc.
"openai_reasoning_effort": None, # "medium", "high", "low"
"anthropic_effort": None, # "high", "medium", "low"
"codex_binary": os.getenv("CODEX_BINARY"),
"codex_reasoning_effort": "medium",
"codex_summary": "none",
"codex_personality": "none",
"codex_workspace_dir": str(Path.home() / ".codex" / "tradingagents-workspace"),
"codex_request_timeout": 120.0,
"codex_max_retries": 2,
"codex_cleanup_threads": True,
# Output language for analyst reports and final decision
# Internal agent debate stays in English for reasoning quality
"output_language": "English",

View File

@ -16,13 +16,17 @@ class Propagator:
self.max_recur_limit = max_recur_limit
def create_initial_state(
self, company_name: str, trade_date: str
self,
company_name: str,
trade_date: str,
analysis_date: str | None = None,
) -> Dict[str, Any]:
"""Create the initial state for the agent graph."""
return {
"messages": [("human", company_name)],
"company_of_interest": company_name,
"trade_date": str(trade_date),
"analysis_date": str(analysis_date or trade_date),
"investment_debate_state": InvestDebateState(
{
"bull_history": "",

View File

@ -30,7 +30,9 @@ from tradingagents.agents.utils.agent_utils import (
get_income_statement,
get_news,
get_insider_transactions,
get_global_news
get_global_news,
get_output_language,
rewrite_in_output_language,
)
from .conditional_logic import ConditionalLogic
@ -152,6 +154,15 @@ class TradingAgentsGraph:
effort = self.config.get("anthropic_effort")
if effort:
kwargs["effort"] = effort
elif provider == "codex":
kwargs["codex_binary"] = self.config.get("codex_binary")
kwargs["codex_reasoning_effort"] = self.config.get("codex_reasoning_effort")
kwargs["codex_summary"] = self.config.get("codex_summary")
kwargs["codex_personality"] = self.config.get("codex_personality")
kwargs["codex_workspace_dir"] = self.config.get("codex_workspace_dir")
kwargs["codex_request_timeout"] = self.config.get("codex_request_timeout")
kwargs["codex_max_retries"] = self.config.get("codex_max_retries")
kwargs["codex_cleanup_threads"] = self.config.get("codex_cleanup_threads")
return kwargs
@ -191,14 +202,14 @@ class TradingAgentsGraph:
),
}
def propagate(self, company_name, trade_date):
def propagate(self, company_name, trade_date, analysis_date=None):
"""Run the trading agents graph for a company on a specific date."""
self.ticker = company_name
# Initialize state
init_agent_state = self.propagator.create_initial_state(
company_name, trade_date
company_name, trade_date, analysis_date=analysis_date
)
args = self.propagator.get_graph_args()
@ -217,6 +228,9 @@ class TradingAgentsGraph:
# Standard mode without tracing
final_state = self.graph.invoke(init_agent_state, **args)
signal = self.process_signal(final_state["final_trade_decision"])
final_state = self._localize_final_state(final_state)
# Store current state for reflection
self.curr_state = final_state
@ -224,13 +238,14 @@ class TradingAgentsGraph:
self._log_state(trade_date, final_state)
# Return decision and processed signal
return final_state, self.process_signal(final_state["final_trade_decision"])
return final_state, signal
def _log_state(self, trade_date, final_state):
"""Log the final state to a JSON file."""
self.log_states_dict[str(trade_date)] = {
"company_of_interest": final_state["company_of_interest"],
"trade_date": final_state["trade_date"],
"analysis_date": final_state.get("analysis_date", final_state["trade_date"]),
"market_report": final_state["market_report"],
"sentiment_report": final_state["sentiment_report"],
"news_report": final_state["news_report"],
@ -287,3 +302,61 @@ class TradingAgentsGraph:
def process_signal(self, full_signal):
"""Process a signal to extract the core decision."""
return self.signal_processor.process_signal(full_signal)
def _localize_final_state(self, final_state: Dict[str, Any]) -> Dict[str, Any]:
"""Rewrite persisted user-facing outputs into the configured output language."""
language = get_output_language()
if language.lower() == "english":
return final_state
localized = dict(final_state)
for field_name, content_type in (
("market_report", "market analyst report"),
("sentiment_report", "social sentiment report"),
("news_report", "news analyst report"),
("fundamentals_report", "fundamentals analyst report"),
("investment_plan", "research manager investment plan"),
("trader_investment_plan", "trader plan"),
("final_trade_decision", "portfolio manager final decision"),
):
localized[field_name] = rewrite_in_output_language(
self.quick_thinking_llm,
localized.get(field_name, ""),
content_type=content_type,
)
investment_debate = dict(localized.get("investment_debate_state") or {})
for field_name, content_type in (
("bull_history", "bull researcher debate history"),
("bear_history", "bear researcher debate history"),
("history", "investment debate transcript"),
("current_response", "investment debate latest response"),
("judge_decision", "research manager decision"),
):
investment_debate[field_name] = rewrite_in_output_language(
self.quick_thinking_llm,
investment_debate.get(field_name, ""),
content_type=content_type,
)
localized["investment_debate_state"] = investment_debate
risk_debate = dict(localized.get("risk_debate_state") or {})
for field_name, content_type in (
("aggressive_history", "aggressive risk analyst debate history"),
("conservative_history", "conservative risk analyst debate history"),
("neutral_history", "neutral risk analyst debate history"),
("history", "risk debate transcript"),
("current_aggressive_response", "aggressive risk analyst latest response"),
("current_conservative_response", "conservative risk analyst latest response"),
("current_neutral_response", "neutral risk analyst latest response"),
("judge_decision", "portfolio manager decision"),
):
risk_debate[field_name] = rewrite_in_output_language(
self.quick_thinking_llm,
risk_debate.get(field_name, ""),
content_type=content_type,
)
localized["risk_debate_state"] = risk_debate
return localized

View File

@ -1,4 +1,10 @@
from .base_client import BaseLLMClient
from .factory import create_llm_client
def create_llm_client(*args, **kwargs):
from .factory import create_llm_client as _create_llm_client
return _create_llm_client(*args, **kwargs)
__all__ = ["BaseLLMClient", "create_llm_client"]

View File

@ -0,0 +1,337 @@
from __future__ import annotations
import json
import queue
import subprocess
import threading
import uuid
from collections import deque
from dataclasses import dataclass
from pathlib import Path
from typing import Any
from .codex_binary import codex_binary_error_message, resolve_codex_binary
class CodexAppServerError(RuntimeError):
"""Raised when the Codex app-server request cycle fails."""
class CodexAppServerAuthError(CodexAppServerError):
"""Raised when Codex login is missing or unusable."""
class CodexAppServerBinaryError(CodexAppServerError):
"""Raised when the Codex binary cannot be started."""
class CodexStructuredOutputError(CodexAppServerError):
"""Raised when Codex does not honor the requested structured output."""
@dataclass(slots=True)
class CodexInvocationResult:
final_text: str
notifications: list[dict[str, Any]]
class CodexAppServerSession:
"""Minimal JSON-RPC client for `codex app-server` over stdio JSONL."""
def __init__(
self,
*,
codex_binary: str | None,
request_timeout: float,
workspace_dir: str,
cleanup_threads: bool,
client_name: str = "tradingagents_codex",
client_title: str = "TradingAgents Codex Provider",
client_version: str = "0.2.3",
) -> None:
self.codex_binary = codex_binary
self.request_timeout = request_timeout
self.workspace_dir = str(Path(workspace_dir).expanduser())
self.cleanup_threads = cleanup_threads
self.client_name = client_name
self.client_title = client_title
self.client_version = client_version
self._proc: subprocess.Popen[str] | None = None
self._stdout_queue: queue.Queue[dict[str, Any] | None] = queue.Queue()
self._pending: deque[dict[str, Any]] = deque()
self._stderr_lines: deque[str] = deque(maxlen=200)
self._lock = threading.RLock()
self._request_lock = threading.RLock()
self._reader_thread: threading.Thread | None = None
self._stderr_thread: threading.Thread | None = None
def start(self) -> None:
with self._lock:
if self._proc is not None:
return
Path(self.workspace_dir).mkdir(parents=True, exist_ok=True)
binary = resolve_codex_binary(self.codex_binary)
if not binary:
raise CodexAppServerBinaryError(codex_binary_error_message(self.codex_binary))
self.codex_binary = binary
try:
self._proc = subprocess.Popen(
[binary, "app-server", "--listen", "stdio://"],
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
text=True,
encoding="utf-8",
cwd=self.workspace_dir,
bufsize=1,
)
except OSError as exc:
raise CodexAppServerBinaryError(
f"Failed to start Codex app-server with binary '{binary}': {exc}"
) from exc
self._start_reader_threads()
self._initialize()
def close(self) -> None:
with self._lock:
proc = self._proc
self._proc = None
if proc is None:
return
try:
if proc.stdin:
proc.stdin.close()
except OSError:
pass
try:
proc.terminate()
proc.wait(timeout=2)
except Exception:
proc.kill()
def account_read(self) -> dict[str, Any]:
return self.request("account/read", {"refreshToken": False})
def model_list(self, *, include_hidden: bool = True) -> dict[str, Any]:
return self.request("model/list", {"includeHidden": include_hidden})
def invoke(
self,
*,
prompt: str,
model: str,
output_schema: dict[str, Any],
reasoning_effort: str | None,
summary: str | None,
personality: str | None,
) -> CodexInvocationResult:
with self._request_lock:
self.start()
thread_id = None
try:
thread = self.request(
"thread/start",
{
"approvalPolicy": "never",
"cwd": self.workspace_dir,
"ephemeral": True,
"model": model,
"personality": personality,
"sandbox": "read-only",
"serviceName": "tradingagents_codex",
},
)
thread_id = thread["thread"]["id"]
started = self.request(
"turn/start",
{
"threadId": thread_id,
"input": [{"type": "text", "text": prompt}],
"model": model,
"effort": reasoning_effort,
"summary": summary,
"outputSchema": output_schema,
},
)
turn_id = started["turn"]["id"]
final_text, notifications = self._collect_turn(turn_id)
return CodexInvocationResult(final_text=final_text, notifications=notifications)
finally:
if thread_id and self.cleanup_threads:
try:
self.request("thread/unsubscribe", {"threadId": thread_id})
except CodexAppServerError:
pass
def request(self, method: str, params: dict[str, Any] | None = None) -> dict[str, Any]:
request_id = str(uuid.uuid4())
self._write({"id": request_id, "method": method, "params": params or {}})
deferred: list[dict[str, Any]] = []
while True:
message = self._next_message(self.request_timeout)
if message.get("id") == request_id:
self._restore_deferred(deferred)
if "error" in message:
error = message["error"] or {}
code = error.get("code")
text = error.get("message", "unknown Codex app-server error")
raise CodexAppServerError(
f"{method} failed ({code}): {text}. stderr_tail={self._stderr_tail()}"
)
result = message.get("result")
if not isinstance(result, dict):
raise CodexAppServerError(f"{method} returned a non-object result: {result!r}")
return result
if "method" in message and "id" in message:
self._handle_server_request(message)
continue
deferred.append(message)
def _initialize(self) -> None:
response = self.request(
"initialize",
{
"clientInfo": {
"name": self.client_name,
"title": self.client_title,
"version": self.client_version,
}
},
)
if not response.get("userAgent"):
raise CodexAppServerError("Codex initialize response did not include userAgent.")
self._write({"method": "initialized", "params": {}})
def _collect_turn(self, turn_id: str) -> tuple[str, list[dict[str, Any]]]:
notifications: list[dict[str, Any]] = []
final_messages: list[str] = []
fallback_messages: list[str] = []
while True:
message = self._next_message(self.request_timeout)
if "method" in message and "id" in message:
self._handle_server_request(message)
continue
if "method" not in message:
self._pending.append(message)
continue
method = message["method"]
params = message.get("params", {})
notifications.append(message)
if (
method == "item/completed"
and isinstance(params, dict)
and params.get("turnId") == turn_id
):
item = params.get("item", {})
if isinstance(item, dict) and item.get("type") == "agentMessage":
text = str(item.get("text", ""))
if item.get("phase") == "final_answer":
final_messages.append(text)
else:
fallback_messages.append(text)
continue
if method == "turn/completed" and isinstance(params, dict):
turn = params.get("turn", {})
if isinstance(turn, dict) and turn.get("id") == turn_id:
status = turn.get("status")
if status == "failed":
error = turn.get("error", {})
message_text = error.get("message") if isinstance(error, dict) else None
raise CodexAppServerError(
message_text or f"Codex turn {turn_id} failed without an error message."
)
break
if final_messages:
return final_messages[-1], notifications
if fallback_messages:
return fallback_messages[-1], notifications
raise CodexStructuredOutputError("Codex turn completed without an assistant message.")
def _handle_server_request(self, message: dict[str, Any]) -> None:
try:
self._write({"id": message["id"], "result": {}})
except Exception:
pass
def _write(self, payload: dict[str, Any]) -> None:
if self._proc is None or self._proc.stdin is None:
raise CodexAppServerError("Codex app-server is not running.")
try:
self._proc.stdin.write(json.dumps(payload) + "\n")
self._proc.stdin.flush()
except OSError as exc:
raise CodexAppServerError(
f"Failed to write to Codex app-server: {exc}. stderr_tail={self._stderr_tail()}"
) from exc
def _next_message(self, timeout: float) -> dict[str, Any]:
if self._pending:
return self._pending.popleft()
try:
message = self._stdout_queue.get(timeout=timeout)
except queue.Empty as exc:
raise CodexAppServerError(
f"Timed out waiting for Codex app-server after {timeout}s. stderr_tail={self._stderr_tail()}"
) from exc
if message is None:
raise CodexAppServerError(
f"Codex app-server closed unexpectedly. stderr_tail={self._stderr_tail()}"
)
return message
def _start_reader_threads(self) -> None:
assert self._proc is not None
assert self._proc.stdout is not None
assert self._proc.stderr is not None
def _read_stdout() -> None:
stdout = self._proc.stdout
assert stdout is not None
for line in stdout:
line = line.strip()
if not line:
continue
try:
payload = json.loads(line)
except json.JSONDecodeError:
self._stderr_lines.append(f"invalid_json_stdout={line}")
continue
if isinstance(payload, dict):
self._stdout_queue.put(payload)
self._stdout_queue.put(None)
def _read_stderr() -> None:
stderr = self._proc.stderr
assert stderr is not None
for line in stderr:
self._stderr_lines.append(line.rstrip())
self._reader_thread = threading.Thread(target=_read_stdout, daemon=True)
self._stderr_thread = threading.Thread(target=_read_stderr, daemon=True)
self._reader_thread.start()
self._stderr_thread.start()
def _stderr_tail(self) -> str:
return "\n".join(list(self._stderr_lines)[-40:])
def _restore_deferred(self, deferred: list[dict[str, Any]]) -> None:
for message in reversed(deferred):
self._pending.appendleft(message)

View File

@ -0,0 +1,113 @@
from __future__ import annotations
import os
import shutil
import subprocess
from pathlib import Path
def resolve_codex_binary(codex_binary: str | None) -> str | None:
requested_candidates = [
_normalize_explicit_binary(codex_binary),
_normalize_explicit_binary(os.getenv("CODEX_BINARY")),
]
for candidate in requested_candidates:
if candidate and _is_usable_codex_binary(candidate):
return candidate
discovered_candidates = []
path_binary = shutil.which("codex")
if path_binary:
discovered_candidates.append(path_binary)
discovered_candidates.extend(str(candidate) for candidate in _windows_codex_candidates())
first_existing = None
for candidate in _dedupe_candidates(discovered_candidates):
if not Path(candidate).is_file():
continue
if first_existing is None:
first_existing = candidate
if _is_usable_codex_binary(candidate):
return candidate
for candidate in requested_candidates:
if candidate:
return candidate
return first_existing
def codex_binary_error_message(codex_binary: str | None) -> str:
requested = codex_binary or os.getenv("CODEX_BINARY") or "codex"
message = (
f"Could not find Codex binary '{requested}'. Install Codex, ensure it is on PATH, "
"set the `CODEX_BINARY` environment variable, or configure `codex_binary` with the full executable path."
)
discovered = [str(path) for path in _windows_codex_candidates() if path.is_file()]
if discovered:
message += f" Detected candidate: {discovered[0]}"
return message
def _normalize_explicit_binary(value: str | None) -> str | None:
if not value:
return None
expanded = str(Path(value).expanduser())
has_separator = any(sep and sep in expanded for sep in (os.path.sep, os.path.altsep))
if has_separator:
return expanded if Path(expanded).is_file() else None
found = shutil.which(expanded)
return found or None
def _windows_codex_candidates() -> list[Path]:
if os.name != "nt":
return []
home = Path.home()
candidates = sorted(
home.glob(r".vscode/extensions/openai.chatgpt-*/bin/windows-x86_64/codex.exe"),
key=lambda path: path.stat().st_mtime if path.exists() else 0,
reverse=True,
)
candidates.extend(
[
home / ".codex" / ".sandbox-bin" / "codex.exe",
home / ".codex" / "bin" / "codex.exe",
home / "AppData" / "Local" / "Programs" / "Codex" / "codex.exe",
]
)
return candidates
def _dedupe_candidates(candidates: list[str]) -> list[str]:
unique = []
seen = set()
for candidate in candidates:
normalized = os.path.normcase(os.path.normpath(candidate))
if normalized in seen:
continue
seen.add(normalized)
unique.append(candidate)
return unique
def _is_usable_codex_binary(binary: str) -> bool:
if os.name != "nt":
return True
try:
completed = subprocess.run(
[binary, "--version"],
capture_output=True,
text=True,
timeout=5,
check=False,
)
except (OSError, subprocess.SubprocessError):
return False
return completed.returncode == 0

View File

@ -0,0 +1,407 @@
from __future__ import annotations
import json
import threading
import uuid
from typing import Any, Callable, Sequence
from pydantic import ConfigDict, Field, PrivateAttr
from langchain_core.language_models.chat_models import BaseChatModel
from langchain_core.messages import AIMessage, BaseMessage
from langchain_core.outputs import ChatGeneration, ChatResult
from .codex_app_server import CodexAppServerSession, CodexStructuredOutputError
from .codex_message_codec import (
format_messages_for_codex,
normalize_input_messages,
strip_json_fence,
)
from .codex_preflight import run_codex_preflight
from .codex_schema import (
build_plain_response_schema,
build_tool_response_schema,
normalize_tools_for_codex,
)
class CodexChatModel(BaseChatModel):
"""LangChain chat model that talks to `codex app-server` over stdio."""
model: str
codex_binary: str | None = None
codex_reasoning_effort: str | None = None
codex_summary: str | None = None
codex_personality: str | None = None
codex_workspace_dir: str
codex_request_timeout: float = 120.0
codex_max_retries: int = 2
codex_cleanup_threads: bool = True
session_factory: Callable[..., CodexAppServerSession] | None = Field(
default=None, exclude=True, repr=False
)
preflight_runner: Callable[..., Any] | None = Field(
default=None, exclude=True, repr=False
)
model_config = ConfigDict(arbitrary_types_allowed=True)
_session: CodexAppServerSession | None = PrivateAttr(default=None)
_session_lock: threading.Lock = PrivateAttr(default_factory=threading.Lock)
_preflight_lock: threading.Lock = PrivateAttr(default_factory=threading.Lock)
_preflight_done: bool = PrivateAttr(default=False)
@property
def _llm_type(self) -> str:
return "codex"
@property
def _identifying_params(self) -> dict[str, Any]:
return {
"model": self.model,
"codex_binary": self.codex_binary,
"codex_reasoning_effort": self.codex_reasoning_effort,
"codex_summary": self.codex_summary,
"codex_personality": self.codex_personality,
}
def preflight(self) -> None:
with self._preflight_lock:
if self._preflight_done:
return
runner = self.preflight_runner or run_codex_preflight
runner(
codex_binary=self.codex_binary,
model=self.model,
request_timeout=self.codex_request_timeout,
workspace_dir=self.codex_workspace_dir,
cleanup_threads=self.codex_cleanup_threads,
session_factory=self.session_factory or CodexAppServerSession,
)
self._preflight_done = True
def bind_tools(
self,
tools: Sequence[dict[str, Any] | type | Callable | Any],
*,
tool_choice: str | bool | dict[str, Any] | None = None,
**kwargs: Any,
):
normalized_tools = normalize_tools_for_codex(tools)
return self.bind(tools=normalized_tools, tool_choice=tool_choice, **kwargs)
def close(self) -> None:
with self._session_lock:
if self._session is not None:
self._session.close()
self._session = None
def _generate(
self,
messages: list[BaseMessage],
stop: list[str] | None = None,
run_manager=None,
**kwargs: Any,
) -> ChatResult:
self.preflight()
normalized_messages = normalize_input_messages(messages)
tools = kwargs.get("tools") or []
tool_choice = kwargs.get("tool_choice")
tool_binding = self._resolve_tool_binding(tools, tool_choice)
tools = tool_binding["tools"]
effective_tool_choice = tool_binding["tool_choice"]
output_schema = tool_binding["output_schema"]
tool_arguments_as_json_string = tool_binding["tool_arguments_as_json_string"]
raw_response: str | None = None
last_error: Exception | None = None
for attempt in range(self.codex_max_retries + 1):
retry_message = None
if attempt:
previous_error = str(last_error) if last_error is not None else "unknown schema mismatch"
retry_message = (
"The previous response did not satisfy TradingAgents validation: "
f"{previous_error}. Return only valid JSON that exactly matches the requested "
"schema and tool argument requirements."
)
prompt = format_messages_for_codex(
normalized_messages,
tool_names=[tool["function"]["name"] for tool in tools],
tool_schemas=tools,
tool_choice=effective_tool_choice,
tool_arguments_as_json_string=tool_arguments_as_json_string,
retry_message=retry_message,
)
result = self._session_or_create().invoke(
prompt=prompt,
model=self.model,
output_schema=output_schema,
reasoning_effort=self.codex_reasoning_effort,
summary=self.codex_summary,
personality=self.codex_personality,
)
raw_response = result.final_text
if run_manager is not None:
for notification in result.notifications:
if notification.get("method") != "item/agentMessage/delta":
continue
params = notification.get("params", {})
if isinstance(params, dict):
delta = params.get("delta")
if isinstance(delta, str) and delta:
run_manager.on_llm_new_token(delta)
try:
ai_message = (
self._parse_tool_response(
raw_response,
tools,
tool_arguments_as_json_string=tool_arguments_as_json_string,
)
if tools
else self._parse_plain_response(raw_response)
)
return ChatResult(generations=[ChatGeneration(message=ai_message)])
except (json.JSONDecodeError, CodexStructuredOutputError, ValueError) as exc:
last_error = exc
continue
raise CodexStructuredOutputError(
"Codex returned malformed structured output after "
f"{self.codex_max_retries + 1} attempt(s): {last_error}. "
f"Last response: {raw_response!r}"
)
def _parse_plain_response(self, raw_response: str) -> AIMessage:
payload = json.loads(strip_json_fence(raw_response))
if not isinstance(payload, dict) or not isinstance(payload.get("answer"), str):
raise CodexStructuredOutputError(
f"Expected plain response JSON with string `answer`, got: {payload!r}"
)
return AIMessage(content=payload["answer"])
def _parse_tool_response(
self,
raw_response: str,
tools: Sequence[dict[str, Any]],
*,
tool_arguments_as_json_string: bool,
) -> AIMessage:
payload = json.loads(strip_json_fence(raw_response))
if not isinstance(payload, dict):
raise CodexStructuredOutputError(f"Expected JSON object, got: {payload!r}")
mode = payload.get("mode")
content = payload.get("content", "")
if not isinstance(content, str):
raise CodexStructuredOutputError("Structured response `content` must be a string.")
if mode == "final":
tool_calls = payload.get("tool_calls", [])
if tool_calls not in ([], None):
raise CodexStructuredOutputError(
f"`mode=final` must not include tool calls, got: {tool_calls!r}"
)
return AIMessage(content=content)
if mode != "tool_calls":
raise CodexStructuredOutputError(f"Unknown structured response mode: {mode!r}")
raw_tool_calls = payload.get("tool_calls")
if not isinstance(raw_tool_calls, list) or not raw_tool_calls:
raise CodexStructuredOutputError("`mode=tool_calls` requires a non-empty tool_calls array.")
tool_calls: list[dict[str, Any]] = []
tool_parameters = {
tool.get("function", {}).get("name"): tool.get("function", {}).get("parameters", {})
for tool in tools
}
for item in raw_tool_calls:
if not isinstance(item, dict):
raise CodexStructuredOutputError(f"Tool call entries must be objects, got: {item!r}")
name = item.get("name")
arguments = self._extract_tool_arguments(
item,
tool_arguments_as_json_string=tool_arguments_as_json_string,
)
if not isinstance(name, str) or not isinstance(arguments, dict):
raise CodexStructuredOutputError(
f"Tool call entries must include string name and object arguments, got: {item!r}"
)
if name not in tool_parameters:
raise CodexStructuredOutputError(
f"Tool call name '{name}' is not in the bound tool set."
)
self._validate_tool_arguments(name, arguments, tool_parameters[name])
tool_calls.append(
{
"name": name,
"args": arguments,
"id": f"call_{uuid.uuid4().hex}",
}
)
return AIMessage(content=content, tool_calls=tool_calls)
def _extract_tool_arguments(
self,
item: dict[str, Any],
*,
tool_arguments_as_json_string: bool,
) -> dict[str, Any]:
if tool_arguments_as_json_string:
raw_arguments = item.get("arguments_json")
if not isinstance(raw_arguments, str):
raise CodexStructuredOutputError(
f"Tool call entries must include string arguments_json, got: {item!r}"
)
try:
parsed = json.loads(raw_arguments)
except json.JSONDecodeError as exc:
raise CodexStructuredOutputError(
f"Tool call arguments_json must contain valid JSON, got: {raw_arguments!r}"
) from exc
if not isinstance(parsed, dict):
raise CodexStructuredOutputError(
f"Tool call arguments_json must decode to an object, got: {parsed!r}"
)
return parsed
arguments = item.get("arguments")
if not isinstance(arguments, dict):
raise CodexStructuredOutputError(
f"Tool call entries must include object arguments, got: {item!r}"
)
return arguments
def _validate_tool_arguments(
self,
tool_name: str,
arguments: dict[str, Any],
schema: dict[str, Any] | None,
) -> None:
if not isinstance(schema, dict):
return
properties = schema.get("properties")
if properties is not None and not isinstance(properties, dict):
raise CodexStructuredOutputError(
f"Tool schema for '{tool_name}' has invalid properties metadata."
)
required = schema.get("required") or []
if isinstance(required, list):
missing = [name for name in required if name not in arguments]
if missing:
raise CodexStructuredOutputError(
f"Tool call '{tool_name}' is missing required arguments: {', '.join(missing)}"
)
if properties and schema.get("additionalProperties") is False:
unexpected = [name for name in arguments if name not in properties]
if unexpected:
raise CodexStructuredOutputError(
f"Tool call '{tool_name}' included unexpected arguments: {', '.join(unexpected)}"
)
def _session_or_create(self) -> CodexAppServerSession:
with self._session_lock:
if self._session is None:
factory = self.session_factory or CodexAppServerSession
self._session = factory(
codex_binary=self.codex_binary,
request_timeout=self.codex_request_timeout,
workspace_dir=self.codex_workspace_dir,
cleanup_threads=self.codex_cleanup_threads,
)
self._session.start()
return self._session
def _resolve_tool_binding(
self,
tools: Sequence[dict[str, Any]],
tool_choice: Any,
) -> dict[str, Any]:
tool_list = list(tools)
if not tool_list:
return {
"tools": [],
"tool_choice": None,
"output_schema": build_plain_response_schema(),
"tool_arguments_as_json_string": False,
}
if tool_choice in (None, "auto"):
return {
"tools": tool_list,
"tool_choice": None if tool_choice is None else "auto",
"output_schema": build_tool_response_schema(tool_list, allow_final=True),
"tool_arguments_as_json_string": len(tool_list) > 1,
}
if tool_choice in (False, "none"):
return {
"tools": [],
"tool_choice": "none",
"output_schema": build_plain_response_schema(),
"tool_arguments_as_json_string": False,
}
if tool_choice in (True, "any", "required"):
normalized_choice = "required" if tool_choice in (True, "required") else "any"
return {
"tools": tool_list,
"tool_choice": normalized_choice,
"output_schema": build_tool_response_schema(tool_list, allow_final=False),
"tool_arguments_as_json_string": len(tool_list) > 1,
}
selected_tool_name = self._extract_named_tool_choice(tool_choice)
if selected_tool_name is None:
raise CodexStructuredOutputError(
f"Unsupported Codex tool_choice value: {tool_choice!r}"
)
selected_tools = [
tool
for tool in tool_list
if tool.get("function", {}).get("name") == selected_tool_name
]
if not selected_tools:
available = ", ".join(
tool.get("function", {}).get("name", "<unknown>")
for tool in tool_list
)
raise CodexStructuredOutputError(
f"Requested tool_choice '{selected_tool_name}' is not in the bound tool set. "
f"Available tools: {available}"
)
return {
"tools": selected_tools,
"tool_choice": selected_tool_name,
"output_schema": build_tool_response_schema(selected_tools, allow_final=False),
"tool_arguments_as_json_string": False,
}
def _extract_named_tool_choice(self, tool_choice: Any) -> str | None:
if isinstance(tool_choice, str):
return tool_choice
if not isinstance(tool_choice, dict):
return None
function = tool_choice.get("function")
if isinstance(function, dict):
name = function.get("name")
if isinstance(name, str) and name:
return name
name = tool_choice.get("name")
if isinstance(name, str) and name:
return name
return None

View File

@ -0,0 +1,40 @@
from __future__ import annotations
from pathlib import Path
from typing import Any, Optional
from .base_client import BaseLLMClient
from .codex_chat_model import CodexChatModel
from .validators import validate_model
class CodexClient(BaseLLMClient):
"""Client wrapper for the local Codex app-server provider."""
def __init__(self, model: str, base_url: Optional[str] = None, **kwargs):
super().__init__(model, base_url, **kwargs)
def get_llm(self) -> Any:
self.warn_if_unknown_model()
llm = CodexChatModel(
model=self.model,
codex_binary=self.kwargs.get("codex_binary"),
codex_reasoning_effort=self.kwargs.get("codex_reasoning_effort"),
codex_summary=self.kwargs.get("codex_summary"),
codex_personality=self.kwargs.get("codex_personality"),
codex_workspace_dir=self.kwargs.get(
"codex_workspace_dir",
str(Path.home() / ".codex" / "tradingagents-workspace"),
),
codex_request_timeout=self.kwargs.get("codex_request_timeout", 120.0),
codex_max_retries=self.kwargs.get("codex_max_retries", 2),
codex_cleanup_threads=self.kwargs.get("codex_cleanup_threads", True),
session_factory=self.kwargs.get("session_factory"),
preflight_runner=self.kwargs.get("preflight_runner"),
callbacks=self.kwargs.get("callbacks"),
)
llm.preflight()
return llm
def validate_model(self) -> bool:
return validate_model("codex", self.model)

View File

@ -0,0 +1,236 @@
import json
from typing import Any, Iterable, Mapping, Sequence
from langchain_core.messages import AIMessage, BaseMessage, HumanMessage, SystemMessage, ToolMessage
class CodexMessageCodecError(ValueError):
"""Raised when TradingAgents inputs cannot be normalized for Codex."""
def normalize_input_messages(
value: str | Sequence[BaseMessage | Mapping[str, Any]],
) -> list[BaseMessage]:
"""Normalize TradingAgents model inputs into LangChain messages."""
if isinstance(value, str):
return [HumanMessage(content=value)]
normalized: list[BaseMessage] = []
for item in value:
if isinstance(item, BaseMessage):
normalized.append(item)
continue
if not isinstance(item, Mapping):
raise CodexMessageCodecError(
f"Unsupported message input type: {type(item).__name__}"
)
normalized.append(_message_from_dict(item))
return normalized
def format_messages_for_codex(
messages: Sequence[BaseMessage],
*,
tool_names: Iterable[str] = (),
tool_schemas: Sequence[Mapping[str, Any]] = (),
tool_choice: str | None = None,
tool_arguments_as_json_string: bool = False,
retry_message: str | None = None,
) -> str:
"""Render a chat transcript into a single text prompt for Codex."""
tool_list = list(tool_names)
lines = [
"You are answering on behalf of TradingAgents.",
"The conversation transcript is provided below.",
"Treat tool outputs as authoritative execution results from the host application.",
]
if tool_list:
lines.append(
"If external data is still needed, respond with tool calls using only these tools: "
+ ", ".join(tool_list)
+ "."
)
else:
lines.append("No host tools are available for this turn.")
if tool_choice == "none":
lines.append("Do not request tool calls for this turn.")
elif tool_choice in {"any", "required"}:
lines.append("You must respond with one or more tool calls for this turn.")
elif tool_choice and tool_choice != "auto":
lines.append(f"You must call the tool named `{tool_choice}` for this turn.")
elif tool_choice == "auto":
lines.append("Use tool calls only if they are necessary to answer correctly.")
if tool_arguments_as_json_string:
lines.append(
"When returning tool calls, encode each tool argument object as a JSON string in `arguments_json`."
)
schema_lines = _format_tool_schema_lines(tool_schemas)
if schema_lines:
lines.append("Tool argument requirements:")
lines.extend(schema_lines)
lines.append("Respond only with JSON that matches the requested output schema.")
if retry_message:
lines.append(retry_message)
transcript: list[str] = []
for message in messages:
transcript.append(_format_message(message))
return "\n\n".join(lines + ["Conversation transcript:", *transcript])
def strip_json_fence(text: str) -> str:
stripped = text.strip()
if stripped.startswith("```"):
parts = stripped.split("```")
if len(parts) >= 3:
candidate = parts[1]
if candidate.lstrip().startswith("json"):
candidate = candidate.lstrip()[4:]
return candidate.strip()
return stripped
def _message_from_dict(message: Mapping[str, Any]) -> BaseMessage:
role = str(message.get("role", "")).lower()
content = _content_to_text(message.get("content", ""))
if role == "system":
return SystemMessage(content=content)
if role == "user":
return HumanMessage(content=content)
if role == "tool":
tool_call_id = str(message.get("tool_call_id") or message.get("toolCallId") or "")
if not tool_call_id:
raise CodexMessageCodecError("Tool messages require tool_call_id.")
return ToolMessage(content=content, tool_call_id=tool_call_id)
if role == "assistant":
raw_tool_calls = message.get("tool_calls") or message.get("toolCalls") or []
tool_calls = _normalize_tool_calls(raw_tool_calls)
return AIMessage(content=content, tool_calls=tool_calls)
raise CodexMessageCodecError(f"Unsupported message role: {role!r}")
def _normalize_tool_calls(raw_tool_calls: Any) -> list[dict[str, Any]]:
normalized: list[dict[str, Any]] = []
if not raw_tool_calls:
return normalized
if not isinstance(raw_tool_calls, Sequence):
raise CodexMessageCodecError("assistant.tool_calls must be a sequence")
for item in raw_tool_calls:
if not isinstance(item, Mapping):
raise CodexMessageCodecError("assistant.tool_calls items must be objects")
if "function" in item:
function = item.get("function")
if not isinstance(function, Mapping):
raise CodexMessageCodecError("assistant.tool_calls.function must be an object")
raw_args = function.get("arguments", {})
if isinstance(raw_args, str):
try:
args = json.loads(raw_args)
except json.JSONDecodeError as exc:
raise CodexMessageCodecError(
f"assistant tool arguments must be valid JSON: {raw_args!r}"
) from exc
else:
args = raw_args
if not isinstance(args, Mapping):
raise CodexMessageCodecError("assistant tool arguments must decode to an object")
normalized.append(
{
"name": str(function.get("name", "")),
"args": dict(args),
"id": str(item.get("id") or ""),
}
)
continue
args = item.get("args", {})
if not isinstance(args, Mapping):
raise CodexMessageCodecError("assistant tool args must be an object")
normalized.append(
{
"name": str(item.get("name", "")),
"args": dict(args),
"id": str(item.get("id") or ""),
}
)
return normalized
def _format_message(message: BaseMessage) -> str:
role = type(message).__name__.replace("Message", "") or "Message"
body = _content_to_text(message.content)
if isinstance(message, AIMessage) and message.tool_calls:
tool_call_json = json.dumps(
[
{
"id": tool_call.get("id"),
"name": tool_call.get("name"),
"args": tool_call.get("args", {}),
}
for tool_call in message.tool_calls
],
ensure_ascii=False,
indent=2,
sort_keys=True,
)
return f"[{role}]\n{body}\nTool calls:\n{tool_call_json}".strip()
if isinstance(message, ToolMessage):
return f"[Tool:{message.tool_call_id}]\n{body}".strip()
return f"[{role}]\n{body}".strip()
def _content_to_text(content: Any) -> str:
if isinstance(content, str):
return content
if isinstance(content, list):
parts: list[str] = []
for item in content:
if isinstance(item, str):
parts.append(item)
elif isinstance(item, Mapping):
text = item.get("text")
if isinstance(text, str):
parts.append(text)
else:
parts.append(json.dumps(dict(item), ensure_ascii=False))
else:
parts.append(str(item))
return "\n".join(part for part in parts if part)
if content is None:
return ""
return str(content)
def _format_tool_schema_lines(tool_schemas: Sequence[Mapping[str, Any]]) -> list[str]:
lines: list[str] = []
for tool_schema in tool_schemas:
function = tool_schema.get("function")
if not isinstance(function, Mapping):
continue
name = function.get("name")
parameters = function.get("parameters") or {}
if not isinstance(name, str) or not isinstance(parameters, Mapping):
continue
required = parameters.get("required") or []
properties = parameters.get("properties") or {}
summary = {
"required": required if isinstance(required, list) else [],
"properties": properties if isinstance(properties, Mapping) else {},
}
lines.append(
f"- {name}: {json.dumps(summary, ensure_ascii=False, sort_keys=True)}"
)
return lines

View File

@ -0,0 +1,72 @@
from __future__ import annotations
from dataclasses import dataclass
from typing import Callable
from .codex_app_server import (
CodexAppServerAuthError,
CodexAppServerBinaryError,
CodexAppServerSession,
)
from .codex_binary import codex_binary_error_message, resolve_codex_binary
@dataclass(slots=True)
class CodexPreflightResult:
account: dict
models: list[str]
def run_codex_preflight(
*,
codex_binary: str | None,
model: str,
request_timeout: float,
workspace_dir: str,
cleanup_threads: bool,
session_factory: Callable[..., CodexAppServerSession] = CodexAppServerSession,
) -> CodexPreflightResult:
binary = resolve_codex_binary(codex_binary)
if not binary:
raise CodexAppServerBinaryError(codex_binary_error_message(codex_binary))
session = session_factory(
codex_binary=binary,
request_timeout=request_timeout,
workspace_dir=workspace_dir,
cleanup_threads=cleanup_threads,
)
try:
session.start()
account_payload = session.account_read()
account = account_payload.get("account")
if not account:
raise CodexAppServerAuthError(
"Codex authentication is not available for TradingAgents. "
"Run `codex login` or `codex login --device-auth`, then retry."
)
models_payload = session.model_list(include_hidden=True)
models = _collect_model_names(models_payload)
if model not in models:
preview = ", ".join(models[:8]) if models else "no models reported"
raise CodexAppServerBinaryError(
f"Codex model '{model}' is not available from `model/list`. Available models: {preview}"
)
return CodexPreflightResult(account=account, models=models)
finally:
session.close()
def _collect_model_names(payload: dict) -> list[str]:
names: list[str] = []
for entry in payload.get("data", []) or []:
if not isinstance(entry, dict):
continue
for key in ("model", "id"):
value = entry.get(key)
if isinstance(value, str) and value not in names:
names.append(value)
return names

View File

@ -0,0 +1,118 @@
from __future__ import annotations
from typing import Any, Callable, Sequence
from langchain_core.tools import BaseTool
from langchain_core.utils.function_calling import convert_to_openai_tool
def normalize_tools_for_codex(
tools: Sequence[dict[str, Any] | type | Callable | BaseTool],
) -> list[dict[str, Any]]:
"""Normalize LangChain tool definitions into OpenAI-style schemas."""
normalized: list[dict[str, Any]] = []
for tool in tools:
normalized.append(convert_to_openai_tool(tool, strict=True))
return normalized
def build_plain_response_schema() -> dict[str, Any]:
return {
"type": "object",
"properties": {
"answer": {"type": "string"},
},
"required": ["answer"],
"additionalProperties": False,
}
def build_tool_response_schema(
tool_schemas: Sequence[dict[str, Any]],
*,
allow_final: bool = True,
) -> dict[str, Any]:
tool_items_schema = _tool_items_schema(tool_schemas)
if not allow_final:
return {
"type": "object",
"properties": {
"mode": {"const": "tool_calls", "type": "string"},
"content": {"type": "string"},
"tool_calls": {
"type": "array",
"minItems": 1,
"items": tool_items_schema,
},
},
"required": ["mode", "content", "tool_calls"],
"additionalProperties": False,
}
return {
"type": "object",
"properties": {
"mode": {
"type": "string",
"enum": ["final", "tool_calls"],
},
"content": {"type": "string"},
"tool_calls": {
"type": "array",
"items": tool_items_schema,
},
},
"required": ["mode", "content", "tool_calls"],
"additionalProperties": False,
}
def _tool_items_schema(tool_schemas: Sequence[dict[str, Any]]) -> dict[str, Any]:
if len(tool_schemas) == 1:
return _tool_call_variant(tool_schemas[0])
tool_names = [
tool_schema.get("function", {}).get("name")
for tool_schema in tool_schemas
if tool_schema.get("function", {}).get("name")
]
argument_properties: dict[str, Any] = {}
for tool_schema in tool_schemas:
parameters = tool_schema.get("function", {}).get("parameters") or {}
properties = parameters.get("properties") or {}
if not isinstance(properties, dict):
continue
for name, schema in properties.items():
if name not in argument_properties:
argument_properties[name] = schema
return {
"type": "object",
"properties": {
"name": {
"type": "string",
"enum": tool_names,
},
"arguments_json": {
"type": "string",
},
},
"required": ["name", "arguments_json"],
"additionalProperties": False,
}
def _tool_call_variant(tool_schema: dict[str, Any]) -> dict[str, Any]:
function = tool_schema.get("function", {})
parameters = function.get("parameters") or {"type": "object", "properties": {}}
return {
"type": "object",
"properties": {
"name": {
"const": function["name"],
"type": "string",
},
"arguments": parameters,
},
"required": ["name", "arguments"],
"additionalProperties": False,
}

View File

@ -1,9 +1,6 @@
from typing import Optional
from .base_client import BaseLLMClient
from .openai_client import OpenAIClient
from .anthropic_client import AnthropicClient
from .google_client import GoogleClient
def create_llm_client(
@ -15,7 +12,7 @@ def create_llm_client(
"""Create an LLM client for the specified provider.
Args:
provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter)
provider: LLM provider (openai, anthropic, google, xai, ollama, openrouter, codex)
model: Model name/identifier
base_url: Optional base URL for API endpoint
**kwargs: Additional provider-specific arguments
@ -35,15 +32,28 @@ def create_llm_client(
provider_lower = provider.lower()
if provider_lower in ("openai", "ollama", "openrouter"):
from .openai_client import OpenAIClient
return OpenAIClient(model, base_url, provider=provider_lower, **kwargs)
if provider_lower == "xai":
from .openai_client import OpenAIClient
return OpenAIClient(model, base_url, provider="xai", **kwargs)
if provider_lower == "anthropic":
from .anthropic_client import AnthropicClient
return AnthropicClient(model, base_url, **kwargs)
if provider_lower == "google":
from .google_client import GoogleClient
return GoogleClient(model, base_url, **kwargs)
if provider_lower == "codex":
from .codex_client import CodexClient
return CodexClient(model, base_url, **kwargs)
raise ValueError(f"Unsupported LLM provider: {provider}")

View File

@ -23,6 +23,20 @@ MODEL_OPTIONS: ProviderModeOptions = {
("GPT-5.4 Pro - Most capable, expensive ($30/$180 per 1M tokens)", "gpt-5.4-pro"),
],
},
"codex": {
"quick": [
("GPT-5.4 Mini - Local Codex session, fast tool use", "gpt-5.4-mini"),
("GPT-5.4 Nano - Lowest-cost Codex model", "gpt-5.4-nano"),
("GPT-5.4 - Frontier Codex model", "gpt-5.4"),
("GPT-4.1 - Strong non-reasoning fallback", "gpt-4.1"),
],
"deep": [
("GPT-5.4 - Frontier Codex model", "gpt-5.4"),
("GPT-5.2 - Strong Codex reasoning", "gpt-5.2"),
("GPT-5.4 Mini - Faster Codex alternative", "gpt-5.4-mini"),
("GPT-5.4 Pro - Highest capability Codex model", "gpt-5.4-pro"),
],
},
"anthropic": {
"quick": [
("Claude Sonnet 4.6 - Best speed and intelligence balance", "claude-sonnet-4-6"),

View File

@ -15,7 +15,8 @@ def validate_model(provider: str, model: str) -> bool:
For ollama, openrouter - any model is accepted.
"""
provider_lower = provider.lower()
provider_lower = provider.lower().strip()
model_name = model.strip()
if provider_lower in ("ollama", "openrouter"):
return True
@ -23,4 +24,4 @@ def validate_model(provider: str, model: str) -> bool:
if provider_lower not in VALID_MODELS:
return True
return model in VALID_MODELS[provider_lower]
return model_name in VALID_MODELS[provider_lower]

183
tradingagents/reporting.py Normal file
View File

@ -0,0 +1,183 @@
from __future__ import annotations
import datetime as dt
from pathlib import Path
from typing import Any, Mapping
def save_report_bundle(
final_state: Mapping[str, Any],
ticker: str,
save_path: Path,
*,
generated_at: dt.datetime | None = None,
language: str = "English",
) -> Path:
"""Persist a complete TradingAgents report bundle to disk."""
generated_at = generated_at or dt.datetime.now()
save_path = Path(save_path)
save_path.mkdir(parents=True, exist_ok=True)
labels = _labels_for(language)
analysis_date = _coerce_text(final_state.get("analysis_date"))
trade_date = _coerce_text(final_state.get("trade_date"))
sections: list[str] = []
analysts_dir = save_path / "1_analysts"
analyst_parts: list[tuple[str, str]] = []
for file_name, title, key in (
("market.md", labels["market_analyst"], "market_report"),
("sentiment.md", labels["social_analyst"], "sentiment_report"),
("news.md", labels["news_analyst"], "news_report"),
("fundamentals.md", labels["fundamentals_analyst"], "fundamentals_report"),
):
content = _coerce_text(final_state.get(key))
if not content:
continue
analysts_dir.mkdir(exist_ok=True)
_write_text(analysts_dir / file_name, content)
analyst_parts.append((title, content))
if analyst_parts:
sections.append(
f"## {labels['section_analysts']}\n\n"
+ "\n\n".join(f"### {title}\n{content}" for title, content in analyst_parts)
)
debate = final_state.get("investment_debate_state") or {}
research_dir = save_path / "2_research"
research_parts: list[tuple[str, str]] = []
for file_name, title, key in (
("bull.md", labels["bull_researcher"], "bull_history"),
("bear.md", labels["bear_researcher"], "bear_history"),
("manager.md", labels["research_manager"], "judge_decision"),
):
content = _coerce_text(debate.get(key))
if not content:
continue
research_dir.mkdir(exist_ok=True)
_write_text(research_dir / file_name, content)
research_parts.append((title, content))
if research_parts:
sections.append(
f"## {labels['section_research']}\n\n"
+ "\n\n".join(f"### {title}\n{content}" for title, content in research_parts)
)
trader_plan = _coerce_text(final_state.get("trader_investment_plan"))
if trader_plan:
trading_dir = save_path / "3_trading"
trading_dir.mkdir(exist_ok=True)
_write_text(trading_dir / "trader.md", trader_plan)
sections.append(
f"## {labels['section_trading']}\n\n### {labels['trader']}\n{trader_plan}"
)
risk = final_state.get("risk_debate_state") or {}
risk_dir = save_path / "4_risk"
risk_parts: list[tuple[str, str]] = []
for file_name, title, key in (
("aggressive.md", labels["aggressive_analyst"], "aggressive_history"),
("conservative.md", labels["conservative_analyst"], "conservative_history"),
("neutral.md", labels["neutral_analyst"], "neutral_history"),
):
content = _coerce_text(risk.get(key))
if not content:
continue
risk_dir.mkdir(exist_ok=True)
_write_text(risk_dir / file_name, content)
risk_parts.append((title, content))
if risk_parts:
sections.append(
f"## {labels['section_risk']}\n\n"
+ "\n\n".join(f"### {title}\n{content}" for title, content in risk_parts)
)
portfolio_decision = _coerce_text(risk.get("judge_decision"))
if portfolio_decision:
portfolio_dir = save_path / "5_portfolio"
portfolio_dir.mkdir(exist_ok=True)
_write_text(portfolio_dir / "decision.md", portfolio_decision)
sections.append(
f"## {labels['section_portfolio']}\n\n"
f"### {labels['portfolio_manager']}\n{portfolio_decision}"
)
metadata_lines = [f"{labels['generated_at']}: {generated_at.strftime('%Y-%m-%d %H:%M:%S')}"]
if analysis_date:
metadata_lines.append(f"{labels['analysis_date']}: {analysis_date}")
if trade_date:
metadata_lines.append(f"{labels['trade_date']}: {trade_date}")
header = f"# {labels['report_title']}: {ticker}\n\n" + "\n".join(metadata_lines) + "\n\n"
complete_report = save_path / "complete_report.md"
_write_text(complete_report, header + "\n\n".join(sections))
return complete_report
def _coerce_text(value: Any) -> str:
if value is None:
return ""
if isinstance(value, str):
return value
if isinstance(value, list):
return "\n".join(str(item) for item in value)
return str(value)
def _write_text(path: Path, content: str) -> None:
path.write_text(content, encoding="utf-8")
def _labels_for(language: str) -> dict[str, str]:
if str(language).strip().lower() == "korean":
return {
"report_title": "트레이딩 분석 리포트",
"generated_at": "생성 시각",
"analysis_date": "분석 기준일",
"trade_date": "시장 데이터 기준일",
"section_analysts": "I. 애널리스트 팀 리포트",
"section_research": "II. 리서치 팀 판단",
"section_trading": "III. 트레이딩 팀 계획",
"section_risk": "IV. 리스크 관리 팀 판단",
"section_portfolio": "V. 포트폴리오 매니저 최종 판단",
"market_analyst": "시장 애널리스트",
"social_analyst": "소셜 심리 애널리스트",
"news_analyst": "뉴스 애널리스트",
"fundamentals_analyst": "펀더멘털 애널리스트",
"bull_researcher": "강세 리서처",
"bear_researcher": "약세 리서처",
"research_manager": "리서치 매니저",
"trader": "트레이더",
"aggressive_analyst": "공격적 리스크 애널리스트",
"conservative_analyst": "보수적 리스크 애널리스트",
"neutral_analyst": "중립 리스크 애널리스트",
"portfolio_manager": "포트폴리오 매니저",
}
return {
"report_title": "Trading Analysis Report",
"generated_at": "Generated",
"analysis_date": "Analysis date",
"trade_date": "Market data date",
"section_analysts": "I. Analyst Team Reports",
"section_research": "II. Research Team Decision",
"section_trading": "III. Trading Team Plan",
"section_risk": "IV. Risk Management Team Decision",
"section_portfolio": "V. Portfolio Manager Decision",
"market_analyst": "Market Analyst",
"social_analyst": "Social Analyst",
"news_analyst": "News Analyst",
"fundamentals_analyst": "Fundamentals Analyst",
"bull_researcher": "Bull Researcher",
"bear_researcher": "Bear Researcher",
"research_manager": "Research Manager",
"trader": "Trader",
"aggressive_analyst": "Aggressive Analyst",
"conservative_analyst": "Conservative Analyst",
"neutral_analyst": "Neutral Analyst",
"portfolio_manager": "Portfolio Manager",
}

View File

@ -0,0 +1,11 @@
from .config import ScheduledAnalysisConfig, load_scheduled_config
from .runner import execute_scheduled_run, main
from .site import build_site
__all__ = [
"ScheduledAnalysisConfig",
"build_site",
"execute_scheduled_run",
"load_scheduled_config",
"main",
]

View File

@ -0,0 +1,5 @@
from .runner import main
if __name__ == "__main__":
raise SystemExit(main())

View File

@ -0,0 +1,220 @@
from __future__ import annotations
import os
import tomllib
from dataclasses import dataclass, field, replace
from pathlib import Path
from typing import Iterable
from zoneinfo import ZoneInfo
from cli.utils import normalize_ticker_symbol
ALL_ANALYSTS = ("market", "social", "news", "fundamentals")
VALID_TRADE_DATE_MODES = {"latest_available", "today", "previous_business_day", "explicit"}
@dataclass(frozen=True)
class RunSettings:
tickers: list[str]
analysts: list[str] = field(default_factory=lambda: list(ALL_ANALYSTS))
output_language: str = "Korean"
trade_date_mode: str = "latest_available"
explicit_trade_date: str | None = None
timezone: str = "Asia/Seoul"
max_debate_rounds: int = 1
max_risk_discuss_rounds: int = 1
latest_market_data_lookback_days: int = 14
continue_on_ticker_error: bool = True
@dataclass(frozen=True)
class LLMSettings:
provider: str = "codex"
deep_model: str = "gpt-5.4"
quick_model: str = "gpt-5.4"
codex_reasoning_effort: str = "medium"
codex_summary: str = "none"
codex_personality: str = "none"
codex_request_timeout: float = 180.0
codex_max_retries: int = 2
codex_cleanup_threads: bool = True
codex_workspace_dir: str | None = None
codex_binary: str | None = None
@dataclass(frozen=True)
class StorageSettings:
archive_dir: Path
site_dir: Path
@dataclass(frozen=True)
class SiteSettings:
title: str = "TradingAgents Daily Reports"
subtitle: str = "Automated multi-agent market analysis powered by Codex"
max_runs_on_homepage: int = 30
@dataclass(frozen=True)
class ScheduledAnalysisConfig:
run: RunSettings
llm: LLMSettings
storage: StorageSettings
site: SiteSettings
config_path: Path
def load_scheduled_config(path: str | Path) -> ScheduledAnalysisConfig:
config_path = Path(path).resolve()
with config_path.open("rb") as handle:
raw = tomllib.load(handle)
run_raw = raw.get("run") or {}
llm_raw = raw.get("llm") or {}
storage_raw = raw.get("storage") or {}
site_raw = raw.get("site") or {}
tickers = _normalize_tickers(run_raw.get("tickers") or [])
if not tickers:
raise ValueError("Scheduled analysis config must declare at least one ticker in [run].tickers.")
analysts = _normalize_analysts(run_raw.get("analysts") or list(ALL_ANALYSTS))
trade_date_mode = str(run_raw.get("trade_date_mode", "latest_available")).strip().lower()
explicit_trade_date = None
if run_raw.get("trade_date"):
trade_date_mode = "explicit"
explicit_trade_date = _validate_trade_date(str(run_raw["trade_date"]))
elif trade_date_mode == "explicit":
explicit_trade_date = _validate_trade_date(str(run_raw.get("explicit_trade_date", "")).strip())
if trade_date_mode not in VALID_TRADE_DATE_MODES:
raise ValueError(
f"Unsupported trade_date_mode '{trade_date_mode}'. "
f"Expected one of: {', '.join(sorted(VALID_TRADE_DATE_MODES))}."
)
timezone_name = str(run_raw.get("timezone", "Asia/Seoul")).strip()
ZoneInfo(timezone_name)
base_dir = config_path.parent
archive_dir = _resolve_path(storage_raw.get("archive_dir", ".tradingagents-scheduled/archive"), base_dir)
site_dir = _resolve_path(storage_raw.get("site_dir", "site"), base_dir)
return ScheduledAnalysisConfig(
run=RunSettings(
tickers=tickers,
analysts=analysts,
output_language=str(run_raw.get("output_language", "Korean")).strip() or "Korean",
trade_date_mode=trade_date_mode,
explicit_trade_date=explicit_trade_date,
timezone=timezone_name,
max_debate_rounds=int(run_raw.get("max_debate_rounds", 1)),
max_risk_discuss_rounds=int(run_raw.get("max_risk_discuss_rounds", 1)),
latest_market_data_lookback_days=int(run_raw.get("latest_market_data_lookback_days", 14)),
continue_on_ticker_error=bool(run_raw.get("continue_on_ticker_error", True)),
),
llm=LLMSettings(
provider=str(llm_raw.get("provider", "codex")).strip().lower() or "codex",
deep_model=str(llm_raw.get("deep_model", "gpt-5.4")).strip() or "gpt-5.4",
quick_model=str(llm_raw.get("quick_model", "gpt-5.4")).strip() or "gpt-5.4",
codex_reasoning_effort=str(llm_raw.get("codex_reasoning_effort", "medium")).strip() or "medium",
codex_summary=str(llm_raw.get("codex_summary", "none")).strip() or "none",
codex_personality=str(llm_raw.get("codex_personality", "none")).strip() or "none",
codex_request_timeout=float(llm_raw.get("codex_request_timeout", 180.0)),
codex_max_retries=int(llm_raw.get("codex_max_retries", 2)),
codex_cleanup_threads=bool(llm_raw.get("codex_cleanup_threads", True)),
codex_workspace_dir=_optional_string(llm_raw.get("codex_workspace_dir")),
codex_binary=_optional_string(llm_raw.get("codex_binary")),
),
storage=StorageSettings(
archive_dir=archive_dir,
site_dir=site_dir,
),
site=SiteSettings(
title=str(site_raw.get("title", "TradingAgents Daily Reports")).strip() or "TradingAgents Daily Reports",
subtitle=str(
site_raw.get(
"subtitle",
"Automated multi-agent market analysis powered by Codex",
)
).strip()
or "Automated multi-agent market analysis powered by Codex",
max_runs_on_homepage=int(site_raw.get("max_runs_on_homepage", 30)),
),
config_path=config_path,
)
def with_overrides(
config: ScheduledAnalysisConfig,
*,
archive_dir: str | Path | None = None,
site_dir: str | Path | None = None,
tickers: Iterable[str] | None = None,
trade_date: str | None = None,
) -> ScheduledAnalysisConfig:
run = config.run
storage = config.storage
if tickers is not None:
run = replace(run, tickers=_normalize_tickers(tickers))
if trade_date:
run = replace(run, trade_date_mode="explicit", explicit_trade_date=_validate_trade_date(trade_date))
if archive_dir:
storage = replace(storage, archive_dir=Path(archive_dir).expanduser().resolve())
if site_dir:
storage = replace(storage, site_dir=Path(site_dir).expanduser().resolve())
return replace(config, run=run, storage=storage)
def _normalize_tickers(values: Iterable[str]) -> list[str]:
normalized: list[str] = []
seen: set[str] = set()
for value in values:
ticker = normalize_ticker_symbol(str(value))
if not ticker or ticker in seen:
continue
seen.add(ticker)
normalized.append(ticker)
return normalized
def _normalize_analysts(values: Iterable[str]) -> list[str]:
normalized: list[str] = []
seen: set[str] = set()
for value in values:
analyst = str(value).strip().lower()
if analyst not in ALL_ANALYSTS:
raise ValueError(
f"Unsupported analyst '{analyst}'. Expected only: {', '.join(ALL_ANALYSTS)}."
)
if analyst in seen:
continue
seen.add(analyst)
normalized.append(analyst)
return normalized or list(ALL_ANALYSTS)
def _resolve_path(value: str | os.PathLike[str], base_dir: Path) -> Path:
expanded = os.path.expanduser(os.path.expandvars(str(value)))
path = Path(expanded)
if not path.is_absolute():
path = (base_dir / path).resolve()
return path
def _optional_string(value: object) -> str | None:
if value is None:
return None
text = str(value).strip()
return text or None
def _validate_trade_date(value: str) -> str:
text = value.strip()
if len(text) != 10 or text[4] != "-" or text[7] != "-":
raise ValueError(f"Invalid trade date '{value}'. Expected YYYY-MM-DD.")
return text

View File

@ -0,0 +1,362 @@
from __future__ import annotations
import argparse
import json
import traceback
from datetime import date, datetime, timedelta
from pathlib import Path
from time import perf_counter
from typing import Any
from zoneinfo import ZoneInfo
import yfinance as yf
from cli.stats_handler import StatsCallbackHandler
from tradingagents.default_config import DEFAULT_CONFIG
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.reporting import save_report_bundle
from .config import ScheduledAnalysisConfig, load_scheduled_config, with_overrides
from .site import build_site
def main(argv: list[str] | None = None) -> int:
parser = argparse.ArgumentParser(
description="Run a non-interactive scheduled TradingAgents analysis and build a static report site."
)
parser.add_argument("--config", default="config/scheduled_analysis.toml", help="Path to scheduled analysis TOML config.")
parser.add_argument("--archive-dir", help="Override archive directory for run history.")
parser.add_argument("--site-dir", help="Override generated site output directory.")
parser.add_argument("--tickers", help="Comma-separated ticker override.")
parser.add_argument("--trade-date", help="Optional YYYY-MM-DD override for all tickers.")
parser.add_argument("--site-only", action="store_true", help="Only rebuild the static site from archived runs.")
parser.add_argument("--strict", action="store_true", help="Return a non-zero exit code if any ticker fails.")
parser.add_argument("--label", default="github-actions", help="Run label for archived metadata.")
args = parser.parse_args(argv)
config = with_overrides(
load_scheduled_config(args.config),
archive_dir=args.archive_dir,
site_dir=args.site_dir,
tickers=_parse_ticker_override(args.tickers),
trade_date=args.trade_date,
)
if args.site_only:
manifests = build_site(config.storage.archive_dir, config.storage.site_dir, config.site)
print(
f"Rebuilt static site at {config.storage.site_dir} from {len(manifests)} archived run(s)."
)
return 0
manifest = execute_scheduled_run(config, run_label=args.label)
print(
f"Completed run {manifest['run_id']} with status {manifest['status']} "
f"({manifest['summary']['successful_tickers']} success / {manifest['summary']['failed_tickers']} failed)."
)
return 1 if args.strict and manifest["summary"]["failed_tickers"] else 0
def execute_scheduled_run(
config: ScheduledAnalysisConfig,
*,
run_label: str = "manual",
) -> dict[str, Any]:
tz = ZoneInfo(config.run.timezone)
started_at = datetime.now(tz)
run_id = _build_run_id(started_at, run_label)
run_dir = config.storage.archive_dir / "runs" / started_at.strftime("%Y") / run_id
run_dir.mkdir(parents=True, exist_ok=True)
ticker_summaries: list[dict[str, Any]] = []
engine_results_dir = run_dir / "engine-results"
for ticker in config.run.tickers:
ticker_summary = _run_single_ticker(
config=config,
ticker=ticker,
run_dir=run_dir,
engine_results_dir=engine_results_dir,
)
ticker_summaries.append(ticker_summary)
if ticker_summary["status"] != "success" and not config.run.continue_on_ticker_error:
break
finished_at = datetime.now(tz)
failures = sum(1 for item in ticker_summaries if item["status"] != "success")
successes = len(ticker_summaries) - failures
status = "success"
if failures and successes:
status = "partial_failure"
elif failures:
status = "failed"
manifest = {
"version": 1,
"run_id": run_id,
"label": run_label,
"status": status,
"started_at": started_at.isoformat(),
"finished_at": finished_at.isoformat(),
"timezone": config.run.timezone,
"settings": _settings_snapshot(config),
"summary": {
"total_tickers": len(ticker_summaries),
"successful_tickers": successes,
"failed_tickers": failures,
},
"tickers": ticker_summaries,
}
_write_json(run_dir / "run.json", manifest)
_write_json(config.storage.archive_dir / "latest-run.json", manifest)
build_site(config.storage.archive_dir, config.storage.site_dir, config.site)
return manifest
def resolve_trade_date(
ticker: str,
config: ScheduledAnalysisConfig,
) -> str:
mode = config.run.trade_date_mode
if mode == "explicit" and config.run.explicit_trade_date:
return config.run.explicit_trade_date
now = datetime.now(ZoneInfo(config.run.timezone))
if mode == "today":
return now.date().isoformat()
if mode == "previous_business_day":
return _previous_business_day(now.date()).isoformat()
history = yf.Ticker(ticker).history(
period=f"{config.run.latest_market_data_lookback_days}d",
interval="1d",
auto_adjust=False,
)
if history.empty:
raise RuntimeError(
f"Could not resolve the latest available trade date for {ticker}; yfinance returned no rows."
)
last_index = history.index[-1]
last_value = getattr(last_index, "to_pydatetime", lambda: last_index)()
last_date = last_value.date() if hasattr(last_value, "date") else last_value
if not isinstance(last_date, date):
raise RuntimeError(f"Unexpected trade date index value for {ticker}: {last_index!r}")
return last_date.isoformat()
def _run_single_ticker(
*,
config: ScheduledAnalysisConfig,
ticker: str,
run_dir: Path,
engine_results_dir: Path,
) -> dict[str, Any]:
ticker_dir = run_dir / "tickers" / ticker
ticker_dir.mkdir(parents=True, exist_ok=True)
ticker_started = datetime.now(ZoneInfo(config.run.timezone))
timer_start = perf_counter()
analysis_date = ticker_started.date().isoformat()
try:
trade_date = resolve_trade_date(ticker, config)
stats_handler = StatsCallbackHandler()
graph = TradingAgentsGraph(
config.run.analysts,
debug=False,
config=_graph_config(config, engine_results_dir),
callbacks=[stats_handler],
)
final_state, decision = graph.propagate(
ticker,
trade_date,
analysis_date=analysis_date,
)
report_dir = ticker_dir / "report"
report_file = save_report_bundle(
final_state,
ticker,
report_dir,
generated_at=ticker_started,
language=config.run.output_language,
)
final_state_path = ticker_dir / "final_state.json"
_write_json(final_state_path, _serialize_final_state(final_state))
graph_log = (
engine_results_dir
/ ticker
/ "TradingAgentsStrategy_logs"
/ f"full_states_log_{trade_date}.json"
)
copied_graph_log = None
if graph_log.exists():
copied_graph_log = ticker_dir / graph_log.name
copied_graph_log.write_text(graph_log.read_text(encoding="utf-8"), encoding="utf-8")
metrics = stats_handler.get_stats()
analysis_payload = {
"ticker": ticker,
"status": "success",
"trade_date": trade_date,
"analysis_date": analysis_date,
"decision": str(decision),
"started_at": ticker_started.isoformat(),
"finished_at": datetime.now(ZoneInfo(config.run.timezone)).isoformat(),
"duration_seconds": round(perf_counter() - timer_start, 2),
"metrics": metrics,
"provider": config.llm.provider,
"models": {
"quick_model": config.llm.quick_model,
"deep_model": config.llm.deep_model,
},
}
analysis_path = ticker_dir / "analysis.json"
_write_json(analysis_path, analysis_payload)
return {
"ticker": ticker,
"status": "success",
"trade_date": trade_date,
"analysis_date": analysis_date,
"decision": str(decision),
"started_at": ticker_started.isoformat(),
"finished_at": analysis_payload["finished_at"],
"duration_seconds": analysis_payload["duration_seconds"],
"metrics": metrics,
"artifacts": {
"analysis_json": _relative_to_run(run_dir, analysis_path),
"report_markdown": _relative_to_run(run_dir, report_file),
"final_state_json": _relative_to_run(run_dir, final_state_path),
"graph_log_json": _relative_to_run(run_dir, copied_graph_log) if copied_graph_log else None,
},
}
except Exception as exc:
error_payload = {
"ticker": ticker,
"status": "failed",
"analysis_date": analysis_date,
"error": str(exc),
"traceback": traceback.format_exc(),
"started_at": ticker_started.isoformat(),
"finished_at": datetime.now(ZoneInfo(config.run.timezone)).isoformat(),
"duration_seconds": round(perf_counter() - timer_start, 2),
}
error_path = ticker_dir / "error.json"
_write_json(error_path, error_payload)
return {
"ticker": ticker,
"status": "failed",
"analysis_date": analysis_date,
"trade_date": None,
"decision": None,
"error": str(exc),
"started_at": error_payload["started_at"],
"finished_at": error_payload["finished_at"],
"duration_seconds": error_payload["duration_seconds"],
"metrics": {"llm_calls": 0, "tool_calls": 0, "tokens_in": 0, "tokens_out": 0},
"artifacts": {
"error_json": _relative_to_run(run_dir, error_path),
},
}
def _graph_config(config: ScheduledAnalysisConfig, engine_results_dir: Path) -> dict[str, Any]:
graph_config = DEFAULT_CONFIG.copy()
graph_config["results_dir"] = str(engine_results_dir)
graph_config["llm_provider"] = config.llm.provider
graph_config["quick_think_llm"] = config.llm.quick_model
graph_config["deep_think_llm"] = config.llm.deep_model
graph_config["max_debate_rounds"] = config.run.max_debate_rounds
graph_config["max_risk_discuss_rounds"] = config.run.max_risk_discuss_rounds
graph_config["output_language"] = config.run.output_language
graph_config["codex_reasoning_effort"] = config.llm.codex_reasoning_effort
graph_config["codex_summary"] = config.llm.codex_summary
graph_config["codex_personality"] = config.llm.codex_personality
graph_config["codex_request_timeout"] = config.llm.codex_request_timeout
graph_config["codex_max_retries"] = config.llm.codex_max_retries
graph_config["codex_cleanup_threads"] = config.llm.codex_cleanup_threads
if config.llm.codex_workspace_dir:
graph_config["codex_workspace_dir"] = config.llm.codex_workspace_dir
if config.llm.codex_binary:
graph_config["codex_binary"] = config.llm.codex_binary
return graph_config
def _serialize_final_state(final_state: dict[str, Any]) -> dict[str, Any]:
investment_debate = final_state.get("investment_debate_state") or {}
risk_debate = final_state.get("risk_debate_state") or {}
return {
"company_of_interest": final_state.get("company_of_interest"),
"trade_date": final_state.get("trade_date"),
"analysis_date": final_state.get("analysis_date"),
"market_report": final_state.get("market_report"),
"sentiment_report": final_state.get("sentiment_report"),
"news_report": final_state.get("news_report"),
"fundamentals_report": final_state.get("fundamentals_report"),
"investment_debate_state": {
"bull_history": investment_debate.get("bull_history", ""),
"bear_history": investment_debate.get("bear_history", ""),
"history": investment_debate.get("history", ""),
"current_response": investment_debate.get("current_response", ""),
"judge_decision": investment_debate.get("judge_decision", ""),
},
"trader_investment_plan": final_state.get("trader_investment_plan", ""),
"investment_plan": final_state.get("investment_plan", ""),
"risk_debate_state": {
"aggressive_history": risk_debate.get("aggressive_history", ""),
"conservative_history": risk_debate.get("conservative_history", ""),
"neutral_history": risk_debate.get("neutral_history", ""),
"history": risk_debate.get("history", ""),
"judge_decision": risk_debate.get("judge_decision", ""),
},
"final_trade_decision": final_state.get("final_trade_decision", ""),
}
def _settings_snapshot(config: ScheduledAnalysisConfig) -> dict[str, Any]:
return {
"provider": config.llm.provider,
"quick_model": config.llm.quick_model,
"deep_model": config.llm.deep_model,
"codex_reasoning_effort": config.llm.codex_reasoning_effort,
"output_language": config.run.output_language,
"analysts": list(config.run.analysts),
"trade_date_mode": config.run.trade_date_mode,
"max_debate_rounds": config.run.max_debate_rounds,
"max_risk_discuss_rounds": config.run.max_risk_discuss_rounds,
}
def _build_run_id(started_at: datetime, run_label: str) -> str:
clean_label = "".join(ch if ch.isalnum() or ch in ("-", "_") else "-" for ch in run_label.strip()) or "run"
return f"{started_at.strftime('%Y%m%dT%H%M%S')}_{clean_label}"
def _parse_ticker_override(value: str | None) -> list[str] | None:
if not value:
return None
return [item.strip() for item in value.split(",") if item.strip()]
def _previous_business_day(current: date) -> date:
candidate = current - timedelta(days=1)
while candidate.weekday() >= 5:
candidate -= timedelta(days=1)
return candidate
def _relative_to_run(run_dir: Path, path: Path | None) -> str | None:
if path is None:
return None
return path.relative_to(run_dir).as_posix()
def _write_json(path: Path, payload: dict[str, Any]) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(json.dumps(payload, indent=2, ensure_ascii=False), encoding="utf-8")

View File

@ -0,0 +1,486 @@
from __future__ import annotations
import html
import json
import shutil
from datetime import datetime
from pathlib import Path
from typing import Any
from .config import SiteSettings
try:
from markdown_it import MarkdownIt
except ImportError: # pragma: no cover
MarkdownIt = None
_MARKDOWN = (
MarkdownIt("commonmark", {"html": False, "linkify": True}).enable(["table", "strikethrough"])
if MarkdownIt
else None
)
def build_site(archive_dir: Path, site_dir: Path, settings: SiteSettings) -> list[dict[str, Any]]:
archive_dir = Path(archive_dir)
site_dir = Path(site_dir)
manifests = _load_run_manifests(archive_dir)
if site_dir.exists():
shutil.rmtree(site_dir)
(site_dir / "assets").mkdir(parents=True, exist_ok=True)
_write_text(site_dir / "assets" / "style.css", _STYLE_CSS)
for manifest in manifests:
run_dir = Path(manifest["_run_dir"])
_copy_artifacts(site_dir, run_dir, manifest)
_write_text(
site_dir / "runs" / manifest["run_id"] / "index.html",
_render_run_page(manifest, settings),
)
for ticker_summary in manifest.get("tickers", []):
_write_text(
site_dir / "runs" / manifest["run_id"] / f"{ticker_summary['ticker']}.html",
_render_ticker_page(manifest, ticker_summary, settings),
)
_write_text(site_dir / "index.html", _render_index_page(manifests, settings))
_write_json(
site_dir / "feed.json",
{
"generated_at": datetime.now().isoformat(),
"runs": [
{key: value for key, value in manifest.items() if key != "_run_dir"}
for manifest in manifests
],
},
)
return manifests
def _load_run_manifests(archive_dir: Path) -> list[dict[str, Any]]:
manifests: list[dict[str, Any]] = []
runs_root = archive_dir / "runs"
if not runs_root.exists():
return manifests
for path in runs_root.rglob("run.json"):
payload = json.loads(path.read_text(encoding="utf-8"))
payload["_run_dir"] = str(path.parent)
manifests.append(payload)
manifests.sort(key=lambda item: item.get("started_at", ""), reverse=True)
return manifests
def _copy_artifacts(site_dir: Path, run_dir: Path, manifest: dict[str, Any]) -> None:
for ticker_summary in manifest.get("tickers", []):
download_dir = site_dir / "downloads" / manifest["run_id"] / ticker_summary["ticker"]
download_dir.mkdir(parents=True, exist_ok=True)
for relative_path in (ticker_summary.get("artifacts") or {}).values():
if not relative_path:
continue
source = run_dir / relative_path
if source.is_file():
shutil.copy2(source, download_dir / source.name)
def _render_index_page(manifests: list[dict[str, Any]], settings: SiteSettings) -> str:
latest = manifests[0] if manifests else None
latest_html = (
f"""
<section class="hero">
<div>
<p class="eyebrow">Latest automated run</p>
<h1>{_escape(settings.title)}</h1>
<p class="subtitle">{_escape(settings.subtitle)}</p>
</div>
<div class="hero-card">
<div class="status {latest['status']}">{_escape(latest['status'].replace('_', ' '))}</div>
<p><strong>Run ID</strong><span>{_escape(latest['run_id'])}</span></p>
<p><strong>Started</strong><span>{_escape(latest['started_at'])}</span></p>
<p><strong>Tickers</strong><span>{latest['summary']['total_tickers']}</span></p>
<p><strong>Success</strong><span>{latest['summary']['successful_tickers']}</span></p>
<p><strong>Failed</strong><span>{latest['summary']['failed_tickers']}</span></p>
<a class="button" href="runs/{_escape(latest['run_id'])}/index.html">Open latest run</a>
</div>
</section>
"""
if latest
else f"""
<section class="hero">
<div>
<p class="eyebrow">Waiting for first run</p>
<h1>{_escape(settings.title)}</h1>
<p class="subtitle">{_escape(settings.subtitle)}</p>
</div>
<div class="hero-card">
<div class="status pending">no data yet</div>
<p>The scheduled workflow has not produced an archived run yet.</p>
</div>
</section>
"""
)
cards = []
for manifest in manifests[: settings.max_runs_on_homepage]:
cards.append(
f"""
<article class="run-card">
<div class="run-card-header">
<a href="runs/{_escape(manifest['run_id'])}/index.html">{_escape(manifest['run_id'])}</a>
<span class="status {manifest['status']}">{_escape(manifest['status'].replace('_', ' '))}</span>
</div>
<p>{_escape(manifest['started_at'])}</p>
<p>{manifest['summary']['successful_tickers']} succeeded, {manifest['summary']['failed_tickers']} failed</p>
<p>{_escape(manifest['settings']['provider'])} / {_escape(manifest['settings']['deep_model'])}</p>
</article>
"""
)
body = latest_html + f"""
<section class="section">
<div class="section-head">
<h2>Recent runs</h2>
<p>{len(manifests)} archived run(s)</p>
</div>
<div class="run-grid">
{''.join(cards) if cards else '<p class="empty">No archived runs were found.</p>'}
</div>
</section>
"""
return _page_template(settings.title, body, prefix="")
def _render_run_page(manifest: dict[str, Any], settings: SiteSettings) -> str:
ticker_cards = []
for ticker_summary in manifest.get("tickers", []):
ticker_cards.append(
f"""
<article class="ticker-card">
<div class="ticker-card-header">
<a href="{_escape(ticker_summary['ticker'])}.html">{_escape(ticker_summary['ticker'])}</a>
<span class="status {ticker_summary['status']}">{_escape(ticker_summary['status'])}</span>
</div>
<p><strong>Analysis date</strong><span>{_escape(ticker_summary.get('analysis_date') or '-')}</span></p>
<p><strong>Trade date</strong><span>{_escape(ticker_summary.get('trade_date') or '-')}</span></p>
<p><strong>Duration</strong><span>{ticker_summary.get('duration_seconds', 0):.1f}s</span></p>
<p><strong>Decision</strong><span>{_escape(ticker_summary.get('decision') or ticker_summary.get('error') or '-')}</span></p>
</article>
"""
)
body = f"""
<nav class="breadcrumbs"><a href="../../index.html">Home</a></nav>
<section class="hero compact">
<div>
<p class="eyebrow">Run detail</p>
<h1>{_escape(manifest['run_id'])}</h1>
<p class="subtitle">{_escape(manifest['started_at'])}</p>
</div>
<div class="hero-card">
<div class="status {manifest['status']}">{_escape(manifest['status'].replace('_', ' '))}</div>
<p><strong>Provider</strong><span>{_escape(manifest['settings']['provider'])}</span></p>
<p><strong>Deep model</strong><span>{_escape(manifest['settings']['deep_model'])}</span></p>
<p><strong>Quick model</strong><span>{_escape(manifest['settings']['quick_model'])}</span></p>
<p><strong>Language</strong><span>{_escape(manifest['settings']['output_language'])}</span></p>
</div>
</section>
<section class="section">
<div class="section-head">
<h2>Tickers</h2>
<p>{manifest['summary']['successful_tickers']} success / {manifest['summary']['failed_tickers']} failed</p>
</div>
<div class="ticker-grid">
{''.join(ticker_cards)}
</div>
</section>
"""
return _page_template(f"{manifest['run_id']} | {settings.title}", body, prefix="../../")
def _render_ticker_page(
manifest: dict[str, Any],
ticker_summary: dict[str, Any],
settings: SiteSettings,
) -> str:
run_dir = Path(manifest["_run_dir"])
report_html = "<p class='empty'>No report markdown was generated for this ticker.</p>"
report_relative = (ticker_summary.get("artifacts") or {}).get("report_markdown")
if report_relative:
report_path = run_dir / report_relative
if report_path.exists():
report_html = _render_markdown(report_path.read_text(encoding="utf-8"))
download_links = []
for relative_path in (ticker_summary.get("artifacts") or {}).values():
if not relative_path:
continue
artifact_name = Path(relative_path).name
download_links.append(
f"<a class='pill' href='../../downloads/{_escape(manifest['run_id'])}/{_escape(ticker_summary['ticker'])}/{_escape(artifact_name)}'>{_escape(artifact_name)}</a>"
)
failure_html = ""
if ticker_summary["status"] != "success":
failure_html = (
"<section class='section'>"
"<div class='section-head'><h2>Failure</h2></div>"
f"<pre class='error-block'>{_escape(ticker_summary.get('error') or 'Unknown error')}</pre>"
"</section>"
)
body = f"""
<nav class="breadcrumbs">
<a href="../../index.html">Home</a>
<a href="index.html">{_escape(manifest['run_id'])}</a>
</nav>
<section class="hero compact">
<div>
<p class="eyebrow">Ticker report</p>
<h1>{_escape(ticker_summary['ticker'])}</h1>
<p class="subtitle">Analysis {_escape(ticker_summary.get('analysis_date') or '-')} / Market {_escape(ticker_summary.get('trade_date') or '-')} / {_escape(ticker_summary['status'])}</p>
</div>
<div class="hero-card">
<div class="status {ticker_summary['status']}">{_escape(ticker_summary['status'])}</div>
<p><strong>Analysis date</strong><span>{_escape(ticker_summary.get('analysis_date') or '-')}</span></p>
<p><strong>Trade date</strong><span>{_escape(ticker_summary.get('trade_date') or '-')}</span></p>
<p><strong>Decision</strong><span>{_escape(ticker_summary.get('decision') or '-')}</span></p>
<p><strong>Duration</strong><span>{ticker_summary.get('duration_seconds', 0):.1f}s</span></p>
<p><strong>LLM calls</strong><span>{ticker_summary.get('metrics', {}).get('llm_calls', 0)}</span></p>
<p><strong>Tool calls</strong><span>{ticker_summary.get('metrics', {}).get('tool_calls', 0)}</span></p>
</div>
</section>
<section class="section">
<div class="section-head">
<h2>Artifacts</h2>
</div>
<div class="pill-row">
{''.join(download_links) if download_links else "<span class='empty'>No downloadable artifacts</span>"}
</div>
</section>
{failure_html}
<section class="section prose">
<div class="section-head">
<h2>Rendered report</h2>
</div>
{report_html}
</section>
"""
return _page_template(
f"{ticker_summary['ticker']} | {settings.title}",
body,
prefix="../../",
)
def _page_template(title: str, body: str, *, prefix: str) -> str:
return f"""<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width, initial-scale=1" />
<title>{_escape(title)}</title>
<link rel="stylesheet" href="{prefix}assets/style.css" />
</head>
<body>
<main class="shell">
{body}
</main>
</body>
</html>
"""
def _render_markdown(content: str) -> str:
if _MARKDOWN is None:
return f"<pre>{_escape(content)}</pre>"
return _MARKDOWN.render(content)
def _write_text(path: Path, content: str) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(content, encoding="utf-8")
def _write_json(path: Path, payload: dict[str, Any]) -> None:
path.parent.mkdir(parents=True, exist_ok=True)
path.write_text(json.dumps(payload, indent=2, ensure_ascii=False), encoding="utf-8")
def _escape(value: object) -> str:
return html.escape(str(value))
_STYLE_CSS = """
:root {
--bg: #f4efe7;
--paper: rgba(255, 255, 255, 0.84);
--ink: #132238;
--muted: #5d6c7d;
--line: rgba(19, 34, 56, 0.12);
--accent: #0f7c82;
--success: #1f7a4d;
--warning: #c46a1c;
--danger: #b23b3b;
--shadow: 0 18px 45px rgba(17, 34, 51, 0.12);
}
* { box-sizing: border-box; }
body {
margin: 0;
color: var(--ink);
font-family: Aptos, "Segoe UI", "Noto Sans KR", sans-serif;
background:
radial-gradient(circle at top right, rgba(15, 124, 130, 0.16), transparent 34%),
radial-gradient(circle at top left, rgba(196, 106, 28, 0.16), transparent 28%),
linear-gradient(180deg, #f8f3eb 0%, #eef4f5 100%);
}
a { color: inherit; }
.shell {
width: min(1180px, calc(100% - 32px));
margin: 0 auto;
padding: 24px 0 56px;
}
.hero {
display: grid;
grid-template-columns: minmax(0, 1.7fr) minmax(280px, 0.9fr);
gap: 20px;
padding: 28px;
border: 1px solid var(--line);
border-radius: 28px;
background: linear-gradient(135deg, rgba(255,255,255,0.9), rgba(248,251,252,0.9));
box-shadow: var(--shadow);
}
.hero h1, .section h2 {
margin: 0;
font-family: Georgia, "Times New Roman", serif;
letter-spacing: -0.03em;
}
.hero h1 {
font-size: clamp(2.1rem, 4vw, 3.4rem);
line-height: 0.95;
}
.subtitle, .section-head p, .hero-card p, .run-card p, .ticker-card p, .breadcrumbs, .empty {
color: var(--muted);
}
.eyebrow {
margin: 0 0 14px;
text-transform: uppercase;
letter-spacing: 0.16em;
font-size: 0.78rem;
color: var(--accent);
}
.hero-card, .run-card, .ticker-card, .section, .error-block, .prose pre {
border: 1px solid var(--line);
border-radius: 22px;
background: var(--paper);
box-shadow: var(--shadow);
}
.hero-card, .run-card, .ticker-card, .section { padding: 18px 20px; }
.hero-card p, .ticker-card p {
display: flex;
justify-content: space-between;
gap: 12px;
margin: 10px 0;
}
.status {
display: inline-flex;
align-items: center;
padding: 8px 12px;
border-radius: 999px;
font-size: 0.82rem;
font-weight: 700;
text-transform: uppercase;
letter-spacing: 0.06em;
margin-bottom: 12px;
}
.status.success { background: rgba(31, 122, 77, 0.12); color: var(--success); }
.status.partial_failure, .status.pending { background: rgba(196, 106, 28, 0.14); color: var(--warning); }
.status.failed { background: rgba(178, 59, 59, 0.12); color: var(--danger); }
.button, .pill {
display: inline-flex;
align-items: center;
text-decoration: none;
border-radius: 999px;
padding: 10px 16px;
font-weight: 600;
border: 1px solid rgba(15, 124, 130, 0.22);
background: rgba(15, 124, 130, 0.12);
}
.section { margin-top: 20px; }
.section-head, .run-card-header, .ticker-card-header {
display: flex;
justify-content: space-between;
gap: 16px;
align-items: baseline;
}
.run-grid, .ticker-grid {
display: grid;
grid-template-columns: repeat(auto-fit, minmax(240px, 1fr));
gap: 16px;
}
.breadcrumbs {
display: flex;
gap: 12px;
margin: 0 0 12px;
}
.breadcrumbs a::after {
content: "/";
margin-left: 12px;
opacity: 0.4;
}
.breadcrumbs a:last-child::after { display: none; }
.pill-row {
display: flex;
flex-wrap: wrap;
gap: 10px;
}
.prose { line-height: 1.65; }
.prose h1, .prose h2, .prose h3 { font-family: Georgia, "Times New Roman", serif; }
.prose pre, .error-block {
padding: 16px;
overflow: auto;
white-space: pre-wrap;
font-family: Consolas, "Courier New", monospace;
}
.prose table {
width: 100%;
border-collapse: collapse;
}
.prose th, .prose td {
border: 1px solid var(--line);
padding: 10px;
text-align: left;
}
@media (max-width: 840px) {
.hero { grid-template-columns: 1fr; }
.shell { width: min(100% - 20px, 1180px); }
}
"""