TradingAgents/.env.example

23 lines
1.0 KiB
Plaintext

# ─── Cloud Providers ──────────────────────────────────────────────────────────
OPENAI_API_KEY=
GOOGLE_API_KEY=
ANTHROPIC_API_KEY=
XAI_API_KEY=
OPENROUTER_API_KEY=
# ─── Local LLM via llama.cpp ──────────────────────────────────────────────────
# 1. Start llama-server:
# llama-server --model ~/models/my-model.gguf --port 8081 --host 0.0.0.0
# 2. Copy this file to .env and uncomment + fill in the values below
# 3. Find your model ID with: curl http://localhost:8081/v1/models
# LLM_PROVIDER=llamacpp
# BACKEND_URL=http://localhost:8081/v1
# DEEP_THINK_LLM=your-model-name-here
# QUICK_THINK_LLM=your-model-name-here
# ─── Any OpenAI-compatible local server (LM Studio, vLLM, etc.) ───────────────
# LLM_PROVIDER=openai
# BACKEND_URL=http://localhost:1234/v1
# OPENAI_API_KEY=dummy