feat: support Qwen model
This commit is contained in:
parent
623cf677c1
commit
606713face
12
cli/utils.py
12
cli/utils.py
|
|
@ -133,6 +133,11 @@ def select_shallow_thinking_agent(provider) -> str:
|
|||
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
|
||||
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
|
||||
],
|
||||
"qwen": [
|
||||
("Qwen-Turbo - Fast speed and low cost, suitable for simple tasks", "qwen-turbo"),
|
||||
("Qwen-Plus - Balanced combination of performance and speed, ideal for moderately complex tasks", "qwen-plus"),
|
||||
("Qwen-Max - For complex and multi-step tasks", "qwen-max"),
|
||||
],
|
||||
"anthropic": [
|
||||
("Claude Haiku 3.5 - Fast inference and standard capabilities", "claude-3-5-haiku-latest"),
|
||||
("Claude Sonnet 3.5 - Highly capable standard model", "claude-3-5-sonnet-latest"),
|
||||
|
|
@ -193,6 +198,12 @@ def select_deep_thinking_agent(provider) -> str:
|
|||
("o3 - Full advanced reasoning model", "o3"),
|
||||
("o1 - Premier reasoning and problem-solving model", "o1"),
|
||||
],
|
||||
"qwen": [
|
||||
("QwQ - Reasoning model. Have reached the level of DeepSeek-R1", "qwq-plus"),
|
||||
("Qwen-Turbo - Fast speed and low cost, suitable for simple tasks", "qwen-turbo"),
|
||||
("Qwen-Plus - Balanced combination of performance and speed, ideal for moderately complex tasks", "qwen-plus"),
|
||||
("Qwen-Max - For complex and multi-step tasks", "qwen-max"),
|
||||
],
|
||||
"anthropic": [
|
||||
("Claude Haiku 3.5 - Fast inference and standard capabilities", "claude-3-5-haiku-latest"),
|
||||
("Claude Sonnet 3.5 - Highly capable standard model", "claude-3-5-sonnet-latest"),
|
||||
|
|
@ -242,6 +253,7 @@ def select_llm_provider() -> tuple[str, str]:
|
|||
# Define OpenAI api options with their corresponding endpoints
|
||||
BASE_URLS = [
|
||||
("OpenAI", "https://api.openai.com/v1"),
|
||||
("Qwen", "https://dashscope.aliyuncs.com/compatible-mode/v1"),
|
||||
("Anthropic", "https://api.anthropic.com/"),
|
||||
("Google", "https://generativelanguage.googleapis.com/v1"),
|
||||
("Openrouter", "https://openrouter.ai/api/v1"),
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
import os
|
||||
import chromadb
|
||||
from chromadb.config import Settings
|
||||
from openai import OpenAI
|
||||
|
|
@ -7,6 +8,12 @@ class FinancialSituationMemory:
|
|||
def __init__(self, name, config):
|
||||
if config["backend_url"] == "http://localhost:11434/v1":
|
||||
self.embedding = "nomic-embed-text"
|
||||
elif config["llm_provider"] == "qwen":
|
||||
self.embedding = "text-embedding-v2"
|
||||
self.client = OpenAI(
|
||||
base_url=config["backend_url"],
|
||||
api_key=os.getenv("DASHSCOPE_API_KEY")
|
||||
)
|
||||
else:
|
||||
self.embedding = "text-embedding-3-small"
|
||||
self.client = OpenAI()
|
||||
|
|
|
|||
|
|
@ -704,10 +704,13 @@ def get_YFin_data(
|
|||
|
||||
def get_stock_news_openai(ticker, curr_date):
|
||||
config = get_config()
|
||||
client = OpenAI()
|
||||
client = OpenAI(
|
||||
base_url=config["backend_url"],
|
||||
api_key=os.getenv(config["api_key_env_name"])
|
||||
)
|
||||
|
||||
response = client.responses.create(
|
||||
model="gpt-4.1-mini",
|
||||
model=config["quick_think_llm"],
|
||||
input=[
|
||||
{
|
||||
"role": "system",
|
||||
|
|
@ -739,10 +742,13 @@ def get_stock_news_openai(ticker, curr_date):
|
|||
|
||||
def get_global_news_openai(curr_date):
|
||||
config = get_config()
|
||||
client = OpenAI()
|
||||
client = OpenAI(
|
||||
base_url=config["backend_url"],
|
||||
api_key=os.getenv(config["api_key_env_name"])
|
||||
)
|
||||
|
||||
response = client.responses.create(
|
||||
model="gpt-4.1-mini",
|
||||
model=config["quick_think_llm"],
|
||||
input=[
|
||||
{
|
||||
"role": "system",
|
||||
|
|
@ -774,10 +780,13 @@ def get_global_news_openai(curr_date):
|
|||
|
||||
def get_fundamentals_openai(ticker, curr_date):
|
||||
config = get_config()
|
||||
client = OpenAI()
|
||||
client = OpenAI(
|
||||
base_url=config["backend_url"],
|
||||
api_key=os.getenv(config["api_key_env_name"])
|
||||
)
|
||||
|
||||
response = client.responses.create(
|
||||
model="gpt-4.1-mini",
|
||||
model=config["quick_think_llm"],
|
||||
input=[
|
||||
{
|
||||
"role": "system",
|
||||
|
|
|
|||
|
|
@ -8,10 +8,11 @@ DEFAULT_CONFIG = {
|
|||
"dataflows/data_cache",
|
||||
),
|
||||
# LLM settings
|
||||
"llm_provider": "openai",
|
||||
"deep_think_llm": "o4-mini",
|
||||
"quick_think_llm": "gpt-4o-mini",
|
||||
"backend_url": "https://api.openai.com/v1",
|
||||
"llm_provider": "qwen",
|
||||
"deep_think_llm": "qwen-plus",
|
||||
"quick_think_llm": "qwen-turbo",
|
||||
"backend_url": "https://dashscope.aliyuncs.com/compatible-mode/v1",
|
||||
"api_key_env_name": "DASHSCOPE_API_KEY",
|
||||
# Debate and discussion settings
|
||||
"max_debate_rounds": 1,
|
||||
"max_risk_discuss_rounds": 1,
|
||||
|
|
|
|||
|
|
@ -61,6 +61,9 @@ class TradingAgentsGraph:
|
|||
if self.config["llm_provider"].lower() == "openai" or self.config["llm_provider"] == "ollama" or self.config["llm_provider"] == "openrouter":
|
||||
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
|
||||
self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
|
||||
elif self.config["llm_provider"].lower() == "qwen":
|
||||
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"], base_url=self.config["backend_url"], api_key=os.getenv("DASHSCOPE_API_KEY"))
|
||||
self.quick_thinking_llm = ChatOpenAI(model=self.config["quick_think_llm"], base_url=self.config["backend_url"], api_key=os.getenv("DASHSCOPE_API_KEY"))
|
||||
elif self.config["llm_provider"].lower() == "anthropic":
|
||||
self.deep_thinking_llm = ChatAnthropic(model=self.config["deep_think_llm"], base_url=self.config["backend_url"])
|
||||
self.quick_thinking_llm = ChatAnthropic(model=self.config["quick_think_llm"], base_url=self.config["backend_url"])
|
||||
|
|
|
|||
Loading…
Reference in New Issue