Add support for deepseek official api

This commit is contained in:
Yuchen Zhang 2025-06-19 20:03:47 +08:00
parent 7eaf4d995f
commit 1d22ddae58
3 changed files with 11 additions and 0 deletions

View File

@ -151,6 +151,9 @@ def select_shallow_thinking_agent(provider) -> str:
], ],
"ollama": [ "ollama": [
("llama3.2 local", "llama3.2"), ("llama3.2 local", "llama3.2"),
],
"deepseek": [
("DeepSeek V3 - a 685B-parameter, mixture-of-experts model", "deepseek-chat")
] ]
} }
@ -212,6 +215,10 @@ def select_deep_thinking_agent(provider) -> str:
], ],
"ollama": [ "ollama": [
("qwen3", "qwen3"), ("qwen3", "qwen3"),
],
"deepseek": [
("DeepSeek V3 - a 685B-parameter, mixture-of-experts model", "deepseek-chat"),
("DeepSeek-R1 - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek-reasoner"),
] ]
} }

View File

@ -22,3 +22,4 @@ redis
chainlit chainlit
rich rich
questionary questionary
langchain_anthropic

View File

@ -67,6 +67,9 @@ class TradingAgentsGraph:
elif self.config["llm_provider"].lower() == "google": elif self.config["llm_provider"].lower() == "google":
self.deep_thinking_llm = ChatGoogleGenerativeAI(model=self.config["deep_think_llm"]) self.deep_thinking_llm = ChatGoogleGenerativeAI(model=self.config["deep_think_llm"])
self.quick_thinking_llm = ChatGoogleGenerativeAI(model=self.config["quick_think_llm"]) self.quick_thinking_llm = ChatGoogleGenerativeAI(model=self.config["quick_think_llm"])
elif self.config["llm_provider"].lower() == 'deepseek':
self.deep_thinking_llm = ChatDeepSeek(model=self.config["deep_think_llm"])
self.quick_thinking_llm = ChatDeepSeek(model=self.config["quick_think_llm"])
else: else:
raise ValueError(f"Unsupported LLM provider: {self.config['llm_provider']}") raise ValueError(f"Unsupported LLM provider: {self.config['llm_provider']}")