chore: update to use free models

Signed-off-by: bjorn <bjornjee95@gmail.com>
This commit is contained in:
bjorn 2025-06-10 18:11:01 +08:00
parent a879868396
commit 2c29702ace
6 changed files with 16 additions and 9 deletions

0
Makefile Normal file
View File

View File

@ -131,6 +131,7 @@ def select_shallow_thinking_agent() -> str:
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
("Deepseek R1","deepseek/deepseek-chat-v3-0324:free"),
]
choice = questionary.select(
@ -170,6 +171,7 @@ def select_deep_thinking_agent() -> str:
("o3-mini - Advanced reasoning model (lightweight)", "o3-mini"),
("o3 - Full advanced reasoning model", "o3"),
("o1 - Premier reasoning and problem-solving model", "o1"),
("Deepseek R1","deepseek/deepseek-chat-v3-0324:free"),
]
choice = questionary.select(

View File

@ -22,3 +22,4 @@ redis
chainlit
rich
questionary
sentence-transformers

View File

@ -2,21 +2,23 @@ import chromadb
from chromadb.config import Settings
from openai import OpenAI
import numpy as np
from sentence_transformers import SentenceTransformer
class FinancialSituationMemory:
def __init__(self, name):
self.client = OpenAI()
self.embedding_model = SentenceTransformer("all-MiniLM-L6-v2")
self.chroma_client = chromadb.Client(Settings(allow_reset=True))
self.situation_collection = self.chroma_client.create_collection(name=name)
def get_embedding(self, text):
"""Get OpenAI embedding for a text"""
response = self.client.embeddings.create(
model="text-embedding-ada-002", input=text
)
return response.data[0].embedding
# response = self.client.embeddings.create(
# model="text-embedding-ada-002", input=text
# )
# return response.data[0].embedding
return self.embedding_model.encode(text, convert_to_tensor=False)
def add_situations(self, situations_and_advice):
"""Add financial situations and their corresponding advice. Parameter is a list of tuples (situation, rec)"""

View File

@ -8,8 +8,9 @@ DEFAULT_CONFIG = {
"dataflows/data_cache",
),
# LLM settings
"deep_think_llm": "o4-mini",
"quick_think_llm": "gpt-4o-mini",
"base_url": "https://openrouter.ai/api/v1",
"deep_think_llm": "deepseek/deepseek-r1:free",
"quick_think_llm": "deepseek/deepseek-r1:free",
# Debate and discussion settings
"max_debate_rounds": 1,
"max_risk_discuss_rounds": 1,

View File

@ -55,8 +55,9 @@ class TradingAgentsGraph:
)
# Initialize LLMs
self.deep_thinking_llm = ChatOpenAI(model=self.config["deep_think_llm"])
self.deep_thinking_llm = ChatOpenAI(base_url=self.config['base_url'],model=self.config["deep_think_llm"])
self.quick_thinking_llm = ChatOpenAI(
base_url=self.config['base_url'],
model=self.config["quick_think_llm"], temperature=0.1
)
self.toolkit = Toolkit(config=self.config)