migrate off Railway, update Dockerfile and LLM clients
This commit is contained in:
parent
ae18643103
commit
2e37bc117d
|
|
@ -218,3 +218,4 @@ __marimo__/
|
|||
# Cache
|
||||
**/data_cache/
|
||||
eval_results/
|
||||
.env.railway
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ WORKDIR /app
|
|||
|
||||
# System deps for building Python packages
|
||||
RUN apt-get update && apt-get install -y --no-install-recommends \
|
||||
gcc g++ && \
|
||||
gcc g++ curl && \
|
||||
rm -rf /var/lib/apt/lists/*
|
||||
|
||||
COPY requirements.txt .
|
||||
|
|
|
|||
24
app.py
24
app.py
|
|
@ -1,5 +1,9 @@
|
|||
"""FastAPI SSE backend for the structured equity ranking engine."""
|
||||
|
||||
from pathlib import Path
|
||||
from dotenv import load_dotenv
|
||||
load_dotenv(Path(__file__).parent / ".env")
|
||||
|
||||
import logging
|
||||
import os
|
||||
import re
|
||||
|
|
@ -466,3 +470,23 @@ async def stream_analysis(analysis_id: str, last_event: int = 0):
|
|||
@app.get("/health")
|
||||
async def health():
|
||||
return {"status": "ok", "engine": "structured_pipeline"}
|
||||
|
||||
|
||||
@app.get("/api/status")
|
||||
async def get_status():
|
||||
"""Structured pipeline status — no auth required."""
|
||||
from datetime import datetime
|
||||
active_count = len(analyses)
|
||||
return {
|
||||
"service": "structured-pipeline",
|
||||
"engine": "TradingAgents",
|
||||
"active_analyses": active_count,
|
||||
"analyses": {k: {"created": v["created"], "done": v["done"]} for k, v in analyses.items()},
|
||||
"pid": __import__("os").getpid(),
|
||||
"uptime": time.time() - __import__("os").getpid(),
|
||||
}
|
||||
|
||||
|
||||
@app.get("/api/health")
|
||||
async def api_health():
|
||||
return {"status": "ok", "service": "structured-pipeline"}
|
||||
|
|
|
|||
6
main.py
6
main.py
|
|
@ -1,6 +1,8 @@
|
|||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
|
||||
import os
|
||||
|
||||
from dotenv import load_dotenv
|
||||
|
||||
# Load environment variables from .env file
|
||||
|
|
@ -8,8 +10,8 @@ load_dotenv()
|
|||
|
||||
# Create a custom config
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
config["deep_think_llm"] = "gpt-5-mini" # Use a different model
|
||||
config["quick_think_llm"] = "gpt-5-mini" # Use a different model
|
||||
config["deep_think_llm"] = os.environ.get("DEEP_THINK_LLM", "gpt-5-mini")
|
||||
config["quick_think_llm"] = os.environ.get("QUICK_THINK_LLM", "gpt-5-mini")
|
||||
config["max_debate_rounds"] = 1 # Increase debate rounds
|
||||
|
||||
# Configure data vendors (default uses yfinance, no extra API keys needed)
|
||||
|
|
|
|||
|
|
@ -1,8 +0,0 @@
|
|||
[build]
|
||||
builder = "DOCKERFILE"
|
||||
dockerfilePath = "Dockerfile"
|
||||
|
||||
[deploy]
|
||||
healthcheckPath = "/health"
|
||||
restartPolicyType = "ON_FAILURE"
|
||||
restartPolicyMaxRetries = 3
|
||||
|
|
@ -60,6 +60,9 @@ class OpenAIClient(BaseLLMClient):
|
|||
llm_kwargs["api_key"] = "ollama" # Ollama doesn't require auth
|
||||
elif self.base_url:
|
||||
llm_kwargs["base_url"] = self.base_url
|
||||
api_key = os.environ.get("OPENAI_API_KEY")
|
||||
if api_key:
|
||||
llm_kwargs["api_key"] = api_key
|
||||
|
||||
for key in ("timeout", "max_retries", "reasoning_effort", "api_key", "callbacks"):
|
||||
if key in self.kwargs:
|
||||
|
|
|
|||
Loading…
Reference in New Issue