From 9f2ea6bb5b1f7d198a8a2dbc749ed6cd967b4bb4 Mon Sep 17 00:00:00 2001 From: bjorn Date: Tue, 10 Jun 2025 17:42:21 +0800 Subject: [PATCH] chore: use free models Signed-off-by: bjorn --- .gitignore | 2 ++ Makefile | 23 +++++++++++++++++++++++ cli/utils.py | 2 ++ tradingagents/agents/utils/memory.py | 13 +++++++------ tradingagents/default_config.py | 5 +++-- tradingagents/graph/trading_graph.py | 3 ++- 6 files changed, 39 insertions(+), 9 deletions(-) create mode 100644 Makefile diff --git a/.gitignore b/.gitignore index 8313619e..b0f6d6f7 100644 --- a/.gitignore +++ b/.gitignore @@ -6,3 +6,5 @@ src/ eval_results/ eval_data/ *.egg-info/ +.env +.venv \ No newline at end of file diff --git a/Makefile b/Makefile new file mode 100644 index 00000000..17228dd9 --- /dev/null +++ b/Makefile @@ -0,0 +1,23 @@ +.PHONY: setup + +setup: + @if [ ! -d .venv ]; then \ + echo "Creating virtual environment..."; \ + python3 -m venv .venv; \ + echo "Downloading uv binary..."; \ + curl -sL https://github.com/astral-sh/uv/releases/download/0.7.12/uv-aarch64-apple-darwin.tar.gz -o .venv/bin/uv.tar.gz; \ + tar -xzf .venv/bin/uv.tar.gz -C .venv/bin; \ + mv .venv/bin/uv-aarch64-apple-darwin/uv .venv/bin/uv; \ + chmod +x .venv/bin/uv; \ + rm .venv/bin/uv.tar.gz; \ + rm -rf /.venv/bin/uv-aarch64-apple-darwin; \ + fi + + @echo "Installing dependencies with uv..." + @.venv/bin/uv pip install -r requirements.txt + @echo "Please enter your API keys:" + @bash -c 'read -s -p "FINNHUB_API_KEY: " FINNHUB_KEY; echo; \ + read -s -p "OPENAI_API_KEY: " OPENAI_KEY; echo; \ + echo "export FINNHUB_API_KEY=$$FINNHUB_KEY" > .env; \ + echo "export OPENAI_API_KEY=$$OPENAI_KEY" >> .env; \ + echo "API keys saved to .env file. Run '\''source .env'\'' to load them."' diff --git a/cli/utils.py b/cli/utils.py index c3865253..92bacb10 100644 --- a/cli/utils.py +++ b/cli/utils.py @@ -131,6 +131,7 @@ def select_shallow_thinking_agent() -> str: ("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"), ("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"), ("GPT-4o - Standard model with solid capabilities", "gpt-4o"), + ("Deepseek R1","deepseek/deepseek-chat-v3-0324:free"), ] choice = questionary.select( @@ -170,6 +171,7 @@ def select_deep_thinking_agent() -> str: ("o3-mini - Advanced reasoning model (lightweight)", "o3-mini"), ("o3 - Full advanced reasoning model", "o3"), ("o1 - Premier reasoning and problem-solving model", "o1"), + ("Deepseek R1","deepseek/deepseek-chat-v3-0324:free"), ] choice = questionary.select( diff --git a/tradingagents/agents/utils/memory.py b/tradingagents/agents/utils/memory.py index a1934bd8..6338ec98 100644 --- a/tradingagents/agents/utils/memory.py +++ b/tradingagents/agents/utils/memory.py @@ -1,21 +1,22 @@ import chromadb from chromadb.config import Settings -from openai import OpenAI import numpy as np +from sentence_transformers import SentenceTransformer class FinancialSituationMemory: def __init__(self, name): - self.client = OpenAI() + self.embedding_model = SentenceTransformer("all-MiniLM-L6-v2") self.chroma_client = chromadb.Client(Settings(allow_reset=True)) self.situation_collection = self.chroma_client.create_collection(name=name) def get_embedding(self, text): """Get OpenAI embedding for a text""" - response = self.client.embeddings.create( - model="text-embedding-ada-002", input=text - ) - return response.data[0].embedding + # response = self.client.embeddings.create( + # model="text-embedding-ada-002", input=text + # ) + # return response.data[0].embedding + return self.embedding_model.encode(text, convert_to_tensor=False) def add_situations(self, situations_and_advice): """Add financial situations and their corresponding advice. Parameter is a list of tuples (situation, rec)""" diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py index 5bb2548c..7b5e86ae 100644 --- a/tradingagents/default_config.py +++ b/tradingagents/default_config.py @@ -8,8 +8,9 @@ DEFAULT_CONFIG = { "dataflows/data_cache", ), # LLM settings - "deep_think_llm": "o4-mini", - "quick_think_llm": "gpt-4o-mini", + "base_url": "https://openrouter.ai/api/v1", + "deep_think_llm": "deepseek/deepseek-r1:free", + "quick_think_llm": "deepseek/deepseek-r1:free", # Debate and discussion settings "max_debate_rounds": 1, "max_risk_discuss_rounds": 1, diff --git a/tradingagents/graph/trading_graph.py b/tradingagents/graph/trading_graph.py index bbd45071..159abb56 100644 --- a/tradingagents/graph/trading_graph.py +++ b/tradingagents/graph/trading_graph.py @@ -55,8 +55,9 @@ class TradingAgentsGraph: ) # Initialize LLMs - self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"]) + self.deep_thinking_llm = ChatOpenAI(base_url=self.config['base_url'],model=self.config["deep_think_llm"]) self.quick_thinking_llm = ChatOpenAI( + base_url=self.config['base_url'], model=self.config["quick_think_llm"], temperature=0.1 ) self.toolkit = Toolkit(config=self.config)