diff --git a/.gitignore b/.gitignore index 4ebf99e3..c7327cdd 100644 --- a/.gitignore +++ b/.gitignore @@ -7,3 +7,5 @@ eval_results/ eval_data/ *.egg-info/ .env +*.log +results \ No newline at end of file diff --git a/cli/utils.py b/cli/utils.py index 7b9682a6..a3758700 100644 --- a/cli/utils.py +++ b/cli/utils.py @@ -213,7 +213,7 @@ def select_deep_thinking_agent(provider) -> str: ], "ollama": [ ("llama3.1 local", "llama3.1"), - ("qwen3", "qwen3"), + ("qwen3 local", "qwen3"), ] } diff --git a/main.py b/main.py index 6c8ae3d9..18b62dbf 100644 --- a/main.py +++ b/main.py @@ -1,12 +1,15 @@ from tradingagents.graph.trading_graph import TradingAgentsGraph from tradingagents.default_config import DEFAULT_CONFIG +from dotenv import load_dotenv +# Load environment variables +load_dotenv() # Create a custom config config = DEFAULT_CONFIG.copy() -config["llm_provider"] = "google" # Use a different model -config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend -config["deep_think_llm"] = "gemini-2.0-flash" # Use a different model -config["quick_think_llm"] = "gemini-2.0-flash" # Use a different model +config["llm_provider"] = "ollama" # Use a different model +config["backend_url"] = "http://localhost:11434/v1" # Use a different backend +config["deep_think_llm"] = "llama3.2" # Use a different model +config["quick_think_llm"] = "llama3.2" # Use a different model config["max_debate_rounds"] = 1 # Increase debate rounds config["online_tools"] = True # Increase debate rounds @@ -14,7 +17,7 @@ config["online_tools"] = True # Increase debate rounds ta = TradingAgentsGraph(debug=True, config=config) # forward propagate -_, decision = ta.propagate("NVDA", "2024-05-10") +_, decision = ta.propagate("SPY", "2024-07-09") print(decision) # Memorize mistakes and reflect diff --git a/requirements.txt b/requirements.txt index a6154cd2..f8914b85 100644 --- a/requirements.txt +++ b/requirements.txt @@ -24,3 +24,4 @@ rich questionary langchain_anthropic langchain-google-genai +langchain-ollama \ No newline at end of file diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py index 089e9c24..921f25d4 100644 --- a/tradingagents/default_config.py +++ b/tradingagents/default_config.py @@ -9,10 +9,10 @@ DEFAULT_CONFIG = { "dataflows/data_cache", ), # LLM settings - "llm_provider": "openai", - "deep_think_llm": "o4-mini", - "quick_think_llm": "gpt-4o-mini", - "backend_url": "https://api.openai.com/v1", + "llm_provider": "ollama", + "deep_think_llm": "llama3.2", + "quick_think_llm": "llama3.2", + "backend_url": "http://localhost:11434/v1", # Debate and discussion settings "max_debate_rounds": 1, "max_risk_discuss_rounds": 1, diff --git a/tradingagents/graph/trading_graph.py b/tradingagents/graph/trading_graph.py index 80a29e53..e7adf5ad 100644 --- a/tradingagents/graph/trading_graph.py +++ b/tradingagents/graph/trading_graph.py @@ -9,6 +9,7 @@ from typing import Dict, Any, Tuple, List, Optional from langchain_openai import ChatOpenAI from langchain_anthropic import ChatAnthropic from langchain_google_genai import ChatGoogleGenerativeAI +from langchain_ollama import ChatOllama from langgraph.prebuilt import ToolNode @@ -58,9 +59,12 @@ class TradingAgentsGraph: ) # Initialize LLMs - if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "ollama" or self.config["llm_provider"] == "openrouter": + if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "openrouter": self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["backend_url"]) self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], base_url=self.config["backend_url"]) + elif self.config["llm_provider"].lower() == "ollama": + self.deep_thinking_llm = ChatOllama(model=self.config["deep_think_llm"]) + self.quick_thinking_llm = ChatOllama(model=self.config["quick_think_llm"]) elif self.config["llm_provider"].lower() == "anthropic": self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"]) self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])