LLM updates
This commit is contained in:
parent
94558f2227
commit
d3f219a1a4
|
|
@ -0,0 +1,3 @@
|
|||
{
|
||||
"kiroAgent.configureMCP": "Disabled"
|
||||
}
|
||||
|
|
@ -0,0 +1,22 @@
|
|||
# Repository Guidelines
|
||||
|
||||
## Project Structure & Module Organization
|
||||
Core package lives under `tradingagents/` with agent roles in `tradingagents/agents`, data acquisition in `tradingagents/dataflows`, and LangGraph orchestration in `tradingagents/graph`. CLI entry points sit in `cli/`; run `python -m cli.main` for the interactive workflow, and see `main.py` for a scripted example. Reference material (images, CLI captures) belongs in `assets/`, while experiment output should land in `results/`. Avoid committing local virtual environments—`tradingagents/python=3.13/` is legacy and should stay untracked. Update shared configs through `tradingagents/default_config.py` and keep per-developer overrides outside the repo.
|
||||
|
||||
## Build, Test, and Development Commands
|
||||
- `python -m venv .venv && source .venv/bin/activate`: create and activate a clean Python ≥3.10 environment.
|
||||
- `pip install -r requirements.txt && pip install -e .`: install runtime dependencies and link the package for local development.
|
||||
- `python -m cli.main`: launch the console UI to step through ticker selection and agent runs.
|
||||
- `python main.py`: execute a minimal graph run using `DEFAULT_CONFIG` for smoke-testing pipeline changes.
|
||||
|
||||
## Coding Style & Naming Conventions
|
||||
Follow standard PEP 8: 4-space indentation, lowercase `snake_case` functions, and `CamelCase` classes. Preserve typing hints where present (`Dict[str, Any]` patterns are common) and keep docstrings focused on side effects and arguments. Prefer module-level constants for configuration keys rather than magic strings scattered across functions.
|
||||
|
||||
## Testing Guidelines
|
||||
`test.py` currently exercises the Yahoo Finance dataflow; extend it or add new scripts under `tests/` when introducing features, keeping names aligned with the module under test (`test_<module>.py`). When you add high-impact functionality, capture CLI or graph outputs in the PR description and include deterministic test cases where possible.
|
||||
|
||||
## Commit & Pull Request Guidelines
|
||||
Recent history uses short, present-tense summaries (“Latest changes applied”, “utf8 encoding fixed…”). Mirror that tone, lead with the primary change, and keep messages under ~70 characters when feasible. PRs should include a concise summary, configuration considerations, and before/after evidence (logs, screenshots) plus linked issue IDs where applicable.
|
||||
|
||||
## Security & Configuration Tips
|
||||
Load API keys through the environment (`FINNHUB_API_KEY`, `OPENAI_API_KEY`) and never hard-code real secrets; scrub `DEFAULT_CONFIG` before publishing. Regenerate cached data under `tradingagents/dataflows/data_cache` rather than checking large blobs into git. If you script new agents, document required permissions and expected rate limits in the PR to help reviewers assess operational impact.
|
||||
Binary file not shown.
|
|
@ -0,0 +1,6 @@
|
|||
{
|
||||
"name": "TradingAgents",
|
||||
"lockfileVersion": 3,
|
||||
"requires": true,
|
||||
"packages": {}
|
||||
}
|
||||
|
|
@ -13,9 +13,9 @@ class FinancialSituationMemory:
|
|||
if config["backend_url"] == "http://localhost:11434/v1":
|
||||
self.embedding = "nomic-embed-text"
|
||||
elif config["backend_url"] == "http://192.168.0.20:1234/v1":
|
||||
self.embedding = "text-embedding-nomic-embed-text-v1.5"
|
||||
self.embedding = "text-embedding-nomic-embed-text-v2-moe"
|
||||
else:
|
||||
self.embedding = "ttext-embedding-nomic-embed-text-v1.5"
|
||||
self.embedding = "text-embedding-nomic-embed-text-v2-moe"
|
||||
|
||||
self.client = OpenAI(base_url=config["backend_url"])
|
||||
self.chroma_client = chromadb.Client(Settings(allow_reset=True))
|
||||
|
|
|
|||
|
|
@ -22,8 +22,8 @@ DEFAULT_CONFIG = {
|
|||
|
||||
# Default LLM is set to local LMStudio instance
|
||||
"llm_provider": "lmstudio",
|
||||
"deep_think_llm": "qwen/qwen3-4b-thinking-2507",
|
||||
"quick_think_llm": "openai/gpt-oss-20b",
|
||||
"deep_think_llm": "glm-4.7-reap-50-mixed-3-4-bits",
|
||||
"quick_think_llm": "qwen/qwen3-vl-30b",
|
||||
"backend_url": "http://192.168.0.20/v1",
|
||||
"api_key": "blablabla",
|
||||
|
||||
|
|
|
|||
|
|
@ -16,8 +16,8 @@ DEFAULT_CONFIG = {
|
|||
|
||||
# Default LLM is set to local LMStudio instance
|
||||
"llm_provider": "lmstudio",
|
||||
"deep_think_llm": "qwen/qwen3-4b-thinking-2507",
|
||||
"quick_think_llm": "openai/gpt-oss-20b",
|
||||
"deep_think_llm": "glm-4.7-reap-50-mixed-3-4-bits",
|
||||
"quick_think_llm": "qwen/qwen3-vl-30b",
|
||||
"backend_url": "http://192.168.0.20/v1",
|
||||
"api_key": "blablabla",
|
||||
|
||||
|
|
|
|||
|
|
@ -5,12 +5,12 @@ from openai import OpenAI
|
|||
|
||||
class FinancialSituationMemory:
|
||||
def __init__(self, name, config):
|
||||
if config["backend_url"] == "http://192.168.0.20:11434/v1":
|
||||
self.embedding = "nomic-embed-text"
|
||||
if config["backend_url"] == "http://192.168.0.20:1234/v1":
|
||||
self.embedding = "text-embedding-nomic-embed-text-v2-moe"
|
||||
elif config["backend_url"] == "http://192.168.0.20:1234/v1":
|
||||
self.embedding = "text-embedding-nomic-embed-text-v1.5"
|
||||
self.embedding = "text-embedding-nomic-embed-text-v2-moe"
|
||||
else:
|
||||
self.embedding = "ttext-embedding-nomic-embed-text-v1.5"
|
||||
self.embedding = "text-embedding-nomic-embed-text-v2-moe"
|
||||
|
||||
self.client = OpenAI(base_url=config["backend_url"], api_key=config["api_key"])
|
||||
self.chroma_client = chromadb.Client(Settings(allow_reset=True))
|
||||
|
|
|
|||
|
|
@ -0,0 +1,291 @@
|
|||
import questionary
|
||||
from typing import List, Optional, Tuple, Dict
|
||||
|
||||
from cli.models import AnalystType
|
||||
|
||||
ANALYST_ORDER = [
|
||||
("Market Analyst", AnalystType.MARKET),
|
||||
("Social Media Analyst", AnalystType.SOCIAL),
|
||||
("News Analyst", AnalystType.NEWS),
|
||||
("Fundamentals Analyst", AnalystType.FUNDAMENTALS),
|
||||
]
|
||||
|
||||
|
||||
def get_ticker() -> str:
|
||||
"""Prompt the user to enter a ticker symbol."""
|
||||
ticker = questionary.text(
|
||||
"Enter the ticker symbol to analyze:",
|
||||
validate=lambda x: len(x.strip()) > 0 or "Please enter a valid ticker symbol.",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("text", "fg:green"),
|
||||
("highlighted", "noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if not ticker:
|
||||
console.print("\n[red]No ticker symbol provided. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return ticker.strip().upper()
|
||||
|
||||
|
||||
def get_analysis_date() -> str:
|
||||
"""Prompt the user to enter a date in YYYY-MM-DD format."""
|
||||
import re
|
||||
from datetime import datetime
|
||||
|
||||
def validate_date(date_str: str) -> bool:
|
||||
if not re.match(r"^\d{4}-\d{2}-\d{2}$", date_str):
|
||||
return False
|
||||
try:
|
||||
datetime.strptime(date_str, "%Y-%m-%d")
|
||||
return True
|
||||
except ValueError:
|
||||
return False
|
||||
|
||||
date = questionary.text(
|
||||
"Enter the analysis date (YYYY-MM-DD):",
|
||||
validate=lambda x: validate_date(x.strip())
|
||||
or "Please enter a valid date in YYYY-MM-DD format.",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("text", "fg:green"),
|
||||
("highlighted", "noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if not date:
|
||||
console.print("\n[red]No date provided. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return date.strip()
|
||||
|
||||
|
||||
def select_analysts() -> List[AnalystType]:
|
||||
"""Select analysts using an interactive checkbox."""
|
||||
choices = questionary.checkbox(
|
||||
"Select Your [Analysts Team]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value) for display, value in ANALYST_ORDER
|
||||
],
|
||||
instruction="\n- Press Space to select/unselect analysts\n- Press 'a' to select/unselect all\n- Press Enter when done",
|
||||
validate=lambda x: len(x) > 0 or "You must select at least one analyst.",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("checkbox-selected", "fg:green"),
|
||||
("selected", "fg:green noinherit"),
|
||||
("highlighted", "noinherit"),
|
||||
("pointer", "noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if not choices:
|
||||
console.print("\n[red]No analysts selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return choices
|
||||
|
||||
|
||||
def select_research_depth() -> int:
|
||||
"""Select research depth using an interactive selection."""
|
||||
|
||||
# Define research depth options with their corresponding values
|
||||
DEPTH_OPTIONS = [
|
||||
("Shallow - Quick research, few debate and strategy discussion rounds", 1),
|
||||
("Medium - Middle ground, moderate debate rounds and strategy discussion", 3),
|
||||
("Deep - Comprehensive research, in depth debate and strategy discussion", 5),
|
||||
]
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Research Depth]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value) for display, value in DEPTH_OPTIONS
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:yellow noinherit"),
|
||||
("highlighted", "fg:yellow noinherit"),
|
||||
("pointer", "fg:yellow noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print("\n[red]No research depth selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return choice
|
||||
|
||||
|
||||
def select_shallow_thinking_agent(provider) -> str:
|
||||
"""Select shallow thinking llm engine using an interactive selection."""
|
||||
|
||||
# Define shallow thinking llm engine options with their corresponding model names
|
||||
SHALLOW_AGENT_OPTIONS = {
|
||||
"openai": [
|
||||
("GPT-4o-mini - Fast and efficient for quick tasks", "gpt-4o-mini"),
|
||||
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
|
||||
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
|
||||
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
|
||||
],
|
||||
"anthropic": [
|
||||
("Claude Haiku 3.5 - Fast inference and standard capabilities", "claude-3-5-haiku-latest"),
|
||||
("Claude Sonnet 3.5 - Highly capable standard model", "claude-3-5-sonnet-latest"),
|
||||
("Claude Sonnet 3.7 - Exceptional hybrid reasoning and agentic capabilities", "claude-3-7-sonnet-latest"),
|
||||
("Claude Sonnet 4 - High performance and excellent reasoning", "claude-sonnet-4-0"),
|
||||
("CCR", "openai/gpt-oss-20b"),
|
||||
],
|
||||
"google": [
|
||||
("Gemini 2.0 Flash-Lite - Cost efficiency and low latency", "gemini-2.0-flash-lite"),
|
||||
("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"),
|
||||
("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"),
|
||||
],
|
||||
"openrouter": [
|
||||
("Meta: Llama 4 Scout", "meta-llama/llama-4-scout:free"),
|
||||
("Meta: Llama 3.3 8B Instruct - A lightweight and ultra-fast variant of Llama 3.3 70B", "meta-llama/llama-3.3-8b-instruct:free"),
|
||||
("google/gemini-2.0-flash-exp:free - Gemini Flash 2.0 offers a significantly faster time to first token", "google/gemini-2.0-flash-exp:free"),
|
||||
],
|
||||
"ollama": [
|
||||
("llama3.1 local", "llama3.1"),
|
||||
("llama3.2 local", "llama3.2"),
|
||||
],
|
||||
"lmstudio": [
|
||||
("LMStudio Qwen 4b Thinking","qwen/qwen3-4b-thinking-2507"),
|
||||
("LMStudio GLM", "glm-4.5-air-mlx"),
|
||||
("LMStudio OSS 120b","openai/gpt-oss-120b"),
|
||||
("LMStudio Kimi","kimi-dev-72b-dwq"),
|
||||
]
|
||||
}
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Quick-Thinking LLM Engine]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value)
|
||||
for display, value in SHALLOW_AGENT_OPTIONS[provider.lower()]
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:magenta noinherit"),
|
||||
("highlighted", "fg:magenta noinherit"),
|
||||
("pointer", "fg:magenta noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print(
|
||||
"\n[red]No shallow thinking llm engine selected. Exiting...[/red]"
|
||||
)
|
||||
exit(1)
|
||||
|
||||
return choice
|
||||
|
||||
|
||||
def select_deep_thinking_agent(provider) -> str:
|
||||
"""Select deep thinking llm engine using an interactive selection."""
|
||||
|
||||
# Define deep thinking llm engine options with their corresponding model names
|
||||
DEEP_AGENT_OPTIONS = {
|
||||
"openai": [
|
||||
("GPT-4.1-nano - Ultra-lightweight model for basic operations", "gpt-4.1-nano"),
|
||||
("GPT-4.1-mini - Compact model with good performance", "gpt-4.1-mini"),
|
||||
("GPT-4o - Standard model with solid capabilities", "gpt-4o"),
|
||||
("o4-mini - Specialized reasoning model (compact)", "o4-mini"),
|
||||
("o3-mini - Advanced reasoning model (lightweight)", "o3-mini"),
|
||||
("o3 - Full advanced reasoning model", "o3"),
|
||||
("o1 - Premier reasoning and problem-solving model", "o1"),
|
||||
],
|
||||
"anthropic": [
|
||||
("Claude Haiku 3.5 - Fast inference and standard capabilities", "claude-3-5-haiku-latest"),
|
||||
("Claude Sonnet 3.5 - Highly capable standard model", "claude-3-5-sonnet-latest"),
|
||||
("Claude Sonnet 3.7 - Exceptional hybrid reasoning and agentic capabilities", "claude-3-7-sonnet-latest"),
|
||||
("Claude Sonnet 4 - High performance and excellent reasoning", "claude-sonnet-4-0"),
|
||||
("Claude Opus 4 - Most powerful Anthropic model", " claude-opus-4-0"),
|
||||
],
|
||||
"google": [
|
||||
("Gemini 2.0 Flash-Lite - Cost efficiency and low latency", "gemini-2.0-flash-lite"),
|
||||
("Gemini 2.0 Flash - Next generation features, speed, and thinking", "gemini-2.0-flash"),
|
||||
("Gemini 2.5 Flash - Adaptive thinking, cost efficiency", "gemini-2.5-flash-preview-05-20"),
|
||||
("Gemini 2.5 Pro", "gemini-2.5-pro-preview-06-05"),
|
||||
],
|
||||
"openrouter": [
|
||||
("DeepSeek V3 - a 685B-parameter, mixture-of-experts model", "deepseek/deepseek-chat-v3-0324:free"),
|
||||
("Deepseek - latest iteration of the flagship chat model family from the DeepSeek team.", "deepseek/deepseek-chat-v3-0324:free"),
|
||||
],
|
||||
"ollama": [
|
||||
("llama3.1 local", "llama3.1"),
|
||||
("qwen3", "qwen3"),
|
||||
],
|
||||
"lmstudio": [
|
||||
("LMStudio Qwen 4b Thinking","qwen/qwen3-4b-thinking-2507"),
|
||||
("LMStudio GLM", "glm-4.5-air-mlx"),
|
||||
("LMStudio OSS 20b","openai/gpt-oss-20b"),
|
||||
("LMStudio Kimi","kimi-dev-72b-dwq"),
|
||||
]
|
||||
}
|
||||
|
||||
choice = questionary.select(
|
||||
"Select Your [Deep-Thinking LLM Engine]:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=value)
|
||||
for display, value in DEEP_AGENT_OPTIONS[provider.lower()]
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:magenta noinherit"),
|
||||
("highlighted", "fg:magenta noinherit"),
|
||||
("pointer", "fg:magenta noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print("\n[red]No deep thinking llm engine selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
return choice
|
||||
|
||||
def select_llm_provider() -> tuple[str, str]:
|
||||
"""Select the OpenAI api url using interactive selection."""
|
||||
# Define OpenAI api options with their corresponding endpoints
|
||||
BASE_URLS = [
|
||||
("LMStudio", "http://192.168.0.20:1234/v1"),
|
||||
("OpenAI Local", "http://192.168.0.20:1234/v1"),
|
||||
("OpenAI", "https://api.openai.com/v1"),
|
||||
("Anthropic", "https://api.anthropic.com/"),
|
||||
("Google", "https://generativelanguage.googleapis.com/v1"),
|
||||
("Openrouter", "https://openrouter.ai/api/v1"),
|
||||
("Ollama", "http://localhost:11434/v1"),
|
||||
]
|
||||
|
||||
choice = questionary.select(
|
||||
"Select your LLM Provider:",
|
||||
choices=[
|
||||
questionary.Choice(display, value=(display, value))
|
||||
for display, value in BASE_URLS
|
||||
],
|
||||
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
|
||||
style=questionary.Style(
|
||||
[
|
||||
("selected", "fg:magenta noinherit"),
|
||||
("highlighted", "fg:magenta noinherit"),
|
||||
("pointer", "fg:magenta noinherit"),
|
||||
]
|
||||
),
|
||||
).ask()
|
||||
|
||||
if choice is None:
|
||||
console.print("\n[red]no OpenAI backend selected. Exiting...[/red]")
|
||||
exit(1)
|
||||
|
||||
display_name, url = choice
|
||||
print(f"You selected: {display_name}\tURL: {url}")
|
||||
|
||||
return display_name, url
|
||||
Loading…
Reference in New Issue