ollama support
This commit is contained in:
parent
a438acdbbd
commit
ff1296a860
|
|
@ -7,3 +7,5 @@ eval_results/
|
|||
eval_data/
|
||||
*.egg-info/
|
||||
.env
|
||||
*.log
|
||||
results
|
||||
|
|
@ -213,7 +213,7 @@ def select_deep_thinking_agent(provider) -> str:
|
|||
],
|
||||
"ollama": [
|
||||
("llama3.1 local", "llama3.1"),
|
||||
("qwen3", "qwen3"),
|
||||
("qwen3 local", "qwen3"),
|
||||
]
|
||||
}
|
||||
|
||||
|
|
|
|||
13
main.py
13
main.py
|
|
@ -1,12 +1,15 @@
|
|||
from tradingagents.graph.trading_graph import TradingAgentsGraph
|
||||
from tradingagents.default_config import DEFAULT_CONFIG
|
||||
from dotenv import load_dotenv
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
# Create a custom config
|
||||
config = DEFAULT_CONFIG.copy()
|
||||
config["llm_provider"] = "google" # Use a different model
|
||||
config["backend_url"] = "https://generativelanguage.googleapis.com/v1" # Use a different backend
|
||||
config["deep_think_llm"] = "gemini-2.0-flash" # Use a different model
|
||||
config["quick_think_llm"] = "gemini-2.0-flash" # Use a different model
|
||||
config["llm_provider"] = "ollama" # Use a different model
|
||||
config["backend_url"] = "http://localhost:11434/v1" # Use a different backend
|
||||
config["deep_think_llm"] = "llama3.2" # Use a different model
|
||||
config["quick_think_llm"] = "llama3.2" # Use a different model
|
||||
config["max_debate_rounds"] = 1 # Increase debate rounds
|
||||
config["online_tools"] = True # Increase debate rounds
|
||||
|
||||
|
|
@ -14,7 +17,7 @@ config["online_tools"] = True # Increase debate rounds
|
|||
ta = TradingAgentsGraph(debug=True, config=config)
|
||||
|
||||
# forward propagate
|
||||
_, decision = ta.propagate("NVDA", "2024-05-10")
|
||||
_, decision = ta.propagate("SPY", "2024-07-09")
|
||||
print(decision)
|
||||
|
||||
# Memorize mistakes and reflect
|
||||
|
|
|
|||
|
|
@ -24,3 +24,4 @@ rich
|
|||
questionary
|
||||
langchain_anthropic
|
||||
langchain-google-genai
|
||||
langchain-ollama
|
||||
|
|
@ -9,10 +9,10 @@ DEFAULT_CONFIG = {
|
|||
"dataflows/data_cache",
|
||||
),
|
||||
# LLM settings
|
||||
"llm_provider": "openai",
|
||||
"deep_think_llm": "o4-mini",
|
||||
"quick_think_llm": "gpt-4o-mini",
|
||||
"backend_url": "https://api.openai.com/v1",
|
||||
"llm_provider": "ollama",
|
||||
"deep_think_llm": "llama3.2",
|
||||
"quick_think_llm": "llama3.2",
|
||||
"backend_url": "http://localhost:11434/v1",
|
||||
# Debate and discussion settings
|
||||
"max_debate_rounds": 1,
|
||||
"max_risk_discuss_rounds": 1,
|
||||
|
|
|
|||
|
|
@ -9,6 +9,7 @@ from typing import Dict, Any, Tuple, List, Optional
|
|||
from langchain_openai import ChatOpenAI
|
||||
from langchain_anthropic import ChatAnthropic
|
||||
from langchain_google_genai import ChatGoogleGenerativeAI
|
||||
from langchain_ollama import ChatOllama
|
||||
|
||||
from langgraph.prebuilt import ToolNode
|
||||
|
||||
|
|
@ -58,9 +59,12 @@ class TradingAgentsGraph:
|
|||
)
|
||||
|
||||
# Initialize LLMs
|
||||
if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "ollama" or self.config["llm_provider"] == "openrouter":
|
||||
if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "openrouter":
|
||||
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
|
||||
self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
|
||||
elif self.config["llm_provider"].lower() == "ollama":
|
||||
self.deep_thinking_llm = ChatOllama(model=self.config["deep_think_llm"])
|
||||
self.quick_thinking_llm = ChatOllama(model=self.config["quick_think_llm"])
|
||||
elif self.config["llm_provider"].lower() == "anthropic":
|
||||
self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
|
||||
self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
|
||||
|
|
|
|||
Loading…
Reference in New Issue