update README.md

This commit is contained in:
kimheesu 2025-07-01 11:11:09 +09:00
parent a679d61724
commit bd07db13ae
4 changed files with 16 additions and 19 deletions

3
.gitignore vendored
View File

@ -6,4 +6,5 @@ src/
eval_results/
eval_data/
*.egg-info/
results/
results/
.env

View File

@ -119,9 +119,10 @@ You will also need the FinnHub API for financial data. All of our code is implem
export FINNHUB_API_KEY=$YOUR_FINNHUB_API_KEY
```
You will need the OpenAI API for all the agents.
You will need the OpenAI API or GEMINI API for all the agents.
```bash
export OPENAI_API_KEY=$YOUR_OPENAI_API_KEY
export GEMINI_API_KEY=$YOUR_GEMINI_API_KEY
```
### CLI Usage

15
main.py
View File

@ -1,17 +1,14 @@
from tradingagents.graph.trading_graph import TradingAgentsGraph
from tradingagents.default_config import DEFAULT_CONFIG
from dotenv import load_dotenv
import os
load_dotenv()
# Create a custom config
config = DEFAULT_CONFIG.copy()
config["llm_provider"] = os.getenv("LLM_PROVIDER", "openai") # Use a different model
config["backend_url"] = os.getenv("BACKEND_URL", "https://api.openai.com/v1") # Use a different backend
config["deep_think_llm"] = os.getenv("DEEP_THINK_LLM", "o4-mini") # Use a different model
config["quick_think_llm"] = os.getenv("QUICK_THINK_LLM", "gpt-4o-mini") # Use a different model
config["max_debate_rounds"] = int(os.getenv("MAX_DEBATE_ROUNDS", 1)) # Increase debate rounds
config["online_tools"] = bool(os.getenv("ONLINE_TOOLS", "True")) # Increase debate rounds
config["llm_provider"] = "google" # Use a different model
config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend
config["deep_think_llm"] = "gemini-2.5-pro" # Use a different model
config["quick_think_llm"] = "gemini-2.5-flash-lite-preview-06-17" # Use a different model
config["max_debate_rounds"] = 1 # Increase debate rounds
config["online_tools"] = True # Increase debate rounds
# Initialize with custom config
ta = TradingAgentsGraph(debug=True, config=config)

View File

@ -1,6 +1,4 @@
import os
from dotenv import load_dotenv
load_dotenv()
DEFAULT_CONFIG = {
"project_dir": os.path.abspath(os.path.join(os.path.dirname(__file__), ".")),
@ -11,14 +9,14 @@ DEFAULT_CONFIG = {
"dataflows/data_cache",
),
# LLM settings
"llm_provider": os.getenv("LLM_PROVIDER", "openai"),
"deep_think_llm": os.getenv("DEEP_THINK_LLM", "o4-mini"),
"quick_think_llm": os.getenv("QUICK_THINK_LLM", "gpt-4o-mini"),
"backend_url": os.getenv("BACKEND_URL", "https://api.openai.com/v1"),
"llm_provider": "openai",
"deep_think_llm": "o4-mini",
"quick_think_llm": "gpt-4o-mini",
"backend_url": "https://api.openai.com/v1",
# Debate and discussion settings
"max_debate_rounds": int(os.getenv("MAX_DEBATE_ROUNDS", 1)),
"max_debate_rounds": 1,
"max_risk_discuss_rounds": 1,
"max_recur_limit": 100,
# Tool settings
"online_tools": bool(os.getenv("ONLINE_TOOLS", "True")),
"online_tools": True,
}