fix: Add MiniMax support, fix Windows Unicode errors, and correct pandas import bugs

This commit is contained in:
ElMoorish 2026-04-05 01:57:53 +01:00
parent 4641c03340
commit 35d2a7d995
7 changed files with 157 additions and 293 deletions

226
.gitignore vendored
View File

@ -1,219 +1,9 @@
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[codz]
*$py.class
# C extensions
*.so
# Distribution / packaging
.Python
build/
develop-eggs/
dist/
downloads/
eggs/
.eggs/
lib/
lib64/
parts/
sdist/
var/
wheels/
share/python-wheels/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# PyInstaller
# Usually these files are written by a python script from a template
# before PyInstaller builds the exe, so as to inject date/other infos into it.
*.manifest
*.spec
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
htmlcov/
.tox/
.nox/
.coverage
.coverage.*
.cache
nosetests.xml
coverage.xml
*.cover
*.py.cover
.hypothesis/
.pytest_cache/
cover/
# Translations
*.mo
*.pot
# Django stuff:
*.log
local_settings.py
db.sqlite3
db.sqlite3-journal
# Flask stuff:
instance/
.webassets-cache
# Scrapy stuff:
.scrapy
# Sphinx documentation
docs/_build/
# PyBuilder
.pybuilder/
target/
# Jupyter Notebook
.ipynb_checkpoints
# IPython
profile_default/
ipython_config.py
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:
# .python-version
# pipenv
# According to pypa/pipenv#598, it is recommended to include Pipfile.lock in version control.
# However, in case of collaboration, if having platform-specific dependencies or dependencies
# having no cross-platform support, pipenv may install dependencies that don't work, or not
# install all needed dependencies.
# Pipfile.lock
# UV
# Similar to Pipfile.lock, it is generally recommended to include uv.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# uv.lock
# poetry
# Similar to Pipfile.lock, it is generally recommended to include poetry.lock in version control.
# This is especially recommended for binary packages to ensure reproducibility, and is more
# commonly ignored for libraries.
# https://python-poetry.org/docs/basic-usage/#commit-your-poetrylock-file-to-version-control
# poetry.lock
# poetry.toml
# pdm
# Similar to Pipfile.lock, it is generally recommended to include pdm.lock in version control.
# pdm recommends including project-wide configuration in pdm.toml, but excluding .pdm-python.
# https://pdm-project.org/en/latest/usage/project/#working-with-version-control
# pdm.lock
# pdm.toml
.pdm-python
.pdm-build/
# pixi
# Similar to Pipfile.lock, it is generally recommended to include pixi.lock in version control.
# pixi.lock
# Pixi creates a virtual environment in the .pixi directory, just like venv module creates one
# in the .venv directory. It is recommended not to include this directory in version control.
.pixi
# PEP 582; used by e.g. github.com/David-OConnor/pyflow and github.com/pdm-project/pdm
__pypackages__/
# Celery stuff
celerybeat-schedule
celerybeat.pid
# Redis
*.rdb
*.aof
*.pid
# RabbitMQ
mnesia/
rabbitmq/
rabbitmq-data/
# ActiveMQ
activemq-data/
# SageMath parsed files
*.sage.py
# Environments
.env
.envrc
.venv
env/
venv/
ENV/
env.bak/
venv.bak/
# Spyder project settings
.spyderproject
.spyproject
# Rope project settings
.ropeproject
# mkdocs documentation
/site
# mypy
.mypy_cache/
.dmypy.json
dmypy.json
# Pyre type checker
.pyre/
# pytype static type analyzer
.pytype/
# Cython debug symbols
cython_debug/
# PyCharm
# JetBrains specific template is maintained in a separate JetBrains.gitignore that can
# be found at https://github.com/github/gitignore/blob/main/Global/JetBrains.gitignore
# and can be added to the global gitignore or merged into this file. For a more nuclear
# option (not recommended) you can uncomment the following to ignore the entire idea folder.
# .idea/
# Abstra
# Abstra is an AI-powered process automation framework.
# Ignore directories containing user credentials, local state, and settings.
# Learn more at https://abstra.io/docs
.abstra/
# Visual Studio Code
# Visual Studio Code specific template is maintained in a separate VisualStudioCode.gitignore
# that can be found at https://github.com/github/gitignore/blob/main/Global/VisualStudioCode.gitignore
# and can be added to the global gitignore or merged into this file. However, if you prefer,
# you could uncomment the following to ignore the entire vscode folder
# .vscode/
# Ruff stuff:
.ruff_cache/
# PyPI configuration file
.pypirc
# Marimo
marimo/_static/
marimo/_lsp/
__marimo__/
# Streamlit
.streamlit/secrets.toml
# Cache
**/data_cache/
.env
__pycache__/
*.egg-info/
reports/
results/
build/
dist/
.DS_Store

View File

@ -645,19 +645,19 @@ def save_report_to_disk(final_state, ticker: str, save_path: Path):
analyst_parts = []
if final_state.get("market_report"):
analysts_dir.mkdir(exist_ok=True)
(analysts_dir / "market.md").write_text(final_state["market_report"])
(analysts_dir / "market.md").write_text(final_state["market_report"], encoding="utf-8")
analyst_parts.append(("Market Analyst", final_state["market_report"]))
if final_state.get("sentiment_report"):
analysts_dir.mkdir(exist_ok=True)
(analysts_dir / "sentiment.md").write_text(final_state["sentiment_report"])
(analysts_dir / "sentiment.md").write_text(final_state["sentiment_report"], encoding="utf-8")
analyst_parts.append(("Social Analyst", final_state["sentiment_report"]))
if final_state.get("news_report"):
analysts_dir.mkdir(exist_ok=True)
(analysts_dir / "news.md").write_text(final_state["news_report"])
(analysts_dir / "news.md").write_text(final_state["news_report"], encoding="utf-8")
analyst_parts.append(("News Analyst", final_state["news_report"]))
if final_state.get("fundamentals_report"):
analysts_dir.mkdir(exist_ok=True)
(analysts_dir / "fundamentals.md").write_text(final_state["fundamentals_report"])
(analysts_dir / "fundamentals.md").write_text(final_state["fundamentals_report"], encoding="utf-8")
analyst_parts.append(("Fundamentals Analyst", final_state["fundamentals_report"]))
if analyst_parts:
content = "\n\n".join(f"### {name}\n{text}" for name, text in analyst_parts)
@ -670,15 +670,15 @@ def save_report_to_disk(final_state, ticker: str, save_path: Path):
research_parts = []
if debate.get("bull_history"):
research_dir.mkdir(exist_ok=True)
(research_dir / "bull.md").write_text(debate["bull_history"])
(research_dir / "bull.md").write_text(debate["bull_history"], encoding="utf-8")
research_parts.append(("Bull Researcher", debate["bull_history"]))
if debate.get("bear_history"):
research_dir.mkdir(exist_ok=True)
(research_dir / "bear.md").write_text(debate["bear_history"])
(research_dir / "bear.md").write_text(debate["bear_history"], encoding="utf-8")
research_parts.append(("Bear Researcher", debate["bear_history"]))
if debate.get("judge_decision"):
research_dir.mkdir(exist_ok=True)
(research_dir / "manager.md").write_text(debate["judge_decision"])
(research_dir / "manager.md").write_text(debate["judge_decision"], encoding="utf-8")
research_parts.append(("Research Manager", debate["judge_decision"]))
if research_parts:
content = "\n\n".join(f"### {name}\n{text}" for name, text in research_parts)
@ -688,7 +688,7 @@ def save_report_to_disk(final_state, ticker: str, save_path: Path):
if final_state.get("trader_investment_plan"):
trading_dir = save_path / "3_trading"
trading_dir.mkdir(exist_ok=True)
(trading_dir / "trader.md").write_text(final_state["trader_investment_plan"])
(trading_dir / "trader.md").write_text(final_state["trader_investment_plan"], encoding="utf-8")
sections.append(f"## III. Trading Team Plan\n\n### Trader\n{final_state['trader_investment_plan']}")
# 4. Risk Management
@ -698,15 +698,15 @@ def save_report_to_disk(final_state, ticker: str, save_path: Path):
risk_parts = []
if risk.get("aggressive_history"):
risk_dir.mkdir(exist_ok=True)
(risk_dir / "aggressive.md").write_text(risk["aggressive_history"])
(risk_dir / "aggressive.md").write_text(risk["aggressive_history"], encoding="utf-8")
risk_parts.append(("Aggressive Analyst", risk["aggressive_history"]))
if risk.get("conservative_history"):
risk_dir.mkdir(exist_ok=True)
(risk_dir / "conservative.md").write_text(risk["conservative_history"])
(risk_dir / "conservative.md").write_text(risk["conservative_history"], encoding="utf-8")
risk_parts.append(("Conservative Analyst", risk["conservative_history"]))
if risk.get("neutral_history"):
risk_dir.mkdir(exist_ok=True)
(risk_dir / "neutral.md").write_text(risk["neutral_history"])
(risk_dir / "neutral.md").write_text(risk["neutral_history"], encoding="utf-8")
risk_parts.append(("Neutral Analyst", risk["neutral_history"]))
if risk_parts:
content = "\n\n".join(f"### {name}\n{text}" for name, text in risk_parts)
@ -716,12 +716,12 @@ def save_report_to_disk(final_state, ticker: str, save_path: Path):
if risk.get("judge_decision"):
portfolio_dir = save_path / "5_portfolio"
portfolio_dir.mkdir(exist_ok=True)
(portfolio_dir / "decision.md").write_text(risk["judge_decision"])
(portfolio_dir / "decision.md").write_text(risk["judge_decision"], encoding="utf-8")
sections.append(f"## V. Portfolio Manager Decision\n\n### Portfolio Manager\n{risk['judge_decision']}")
# Write consolidated report
header = f"# Trading Analysis Report: {ticker}\n\nGenerated: {datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S')}\n\n"
(save_path / "complete_report.md").write_text(header + "\n\n".join(sections))
(save_path / "complete_report.md").write_text(header + "\n\n".join(sections), encoding="utf-8")
return save_path / "complete_report.md"
@ -979,7 +979,7 @@ def run_analysis():
func(*args, **kwargs)
timestamp, message_type, content = obj.messages[-1]
content = content.replace("\n", " ") # Replace newlines with spaces
with open(log_file, "a") as f:
with open(log_file, "a", encoding="utf-8") as f:
f.write(f"{timestamp} [{message_type}] {content}\n")
return wrapper
@ -990,7 +990,7 @@ def run_analysis():
func(*args, **kwargs)
timestamp, tool_name, args = obj.tool_calls[-1]
args_str = ", ".join(f"{k}={v}" for k, v in args.items())
with open(log_file, "a") as f:
with open(log_file, "a", encoding="utf-8") as f:
f.write(f"{timestamp} [Tool Call] {tool_name}({args_str})\n")
return wrapper
@ -1004,7 +1004,7 @@ def run_analysis():
if content:
file_name = f"{section_name}.md"
text = "\n".join(str(item) for item in content) if isinstance(content, list) else content
with open(report_dir / file_name, "w") as f:
with open(report_dir / file_name, "w", encoding="utf-8") as f:
f.write(text)
return wrapper

View File

@ -187,23 +187,29 @@ def select_deep_thinking_agent(provider) -> str:
return choice
def select_llm_provider() -> tuple[str, str]:
"""Select the OpenAI api url using interactive selection."""
# Define OpenAI api options with their corresponding endpoints
BASE_URLS = [
("OpenAI", "https://api.openai.com/v1"),
("Google", "https://generativelanguage.googleapis.com/v1"),
("Anthropic", "https://api.anthropic.com/"),
("xAI", "https://api.x.ai/v1"),
("Openrouter", "https://openrouter.ai/api/v1"),
("Ollama", "http://localhost:11434/v1"),
("OpenAI", "openai", "https://api.openai.com/v1"),
("Google", "google", "https://generativelanguage.googleapis.com/v1"),
("Anthropic", "anthropic", "https://api.anthropic.com/"),
("xAI", "xai", "https://api.x.ai/v1"),
("Openrouter", "openrouter", "https://openrouter.ai/api/v1"),
("Ollama", "ollama", "http://localhost:11434/v1"),
(
"Kilo Gateway (MiniMax M2.5 Free)",
"minimax",
"https://api.kilo.ai/api/gateway",
),
]
choice = questionary.select(
"Select your LLM Provider:",
choices=[
questionary.Choice(display, value=(display, value))
for display, value in BASE_URLS
questionary.Choice(display, value=(provider, url))
for display, provider, url in BASE_URLS
],
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style(
@ -214,15 +220,15 @@ def select_llm_provider() -> tuple[str, str]:
]
),
).ask()
if choice is None:
console.print("\n[red]no OpenAI backend selected. Exiting...[/red]")
exit(1)
display_name, url = choice
print(f"You selected: {display_name}\tURL: {url}")
return display_name, url
provider_name, url = choice
print(f"You selected: {provider_name}\tURL: {url}")
return provider_name, url
def ask_openai_reasoning_effort() -> str:
@ -235,11 +241,13 @@ def ask_openai_reasoning_effort() -> str:
return questionary.select(
"Select Reasoning Effort:",
choices=choices,
style=questionary.Style([
("selected", "fg:cyan noinherit"),
("highlighted", "fg:cyan noinherit"),
("pointer", "fg:cyan noinherit"),
]),
style=questionary.Style(
[
("selected", "fg:cyan noinherit"),
("highlighted", "fg:cyan noinherit"),
("pointer", "fg:cyan noinherit"),
]
),
).ask()
@ -255,11 +263,13 @@ def ask_anthropic_effort() -> str | None:
questionary.Choice("Medium (balanced)", "medium"),
questionary.Choice("Low (faster, cheaper)", "low"),
],
style=questionary.Style([
("selected", "fg:cyan noinherit"),
("highlighted", "fg:cyan noinherit"),
("pointer", "fg:cyan noinherit"),
]),
style=questionary.Style(
[
("selected", "fg:cyan noinherit"),
("highlighted", "fg:cyan noinherit"),
("pointer", "fg:cyan noinherit"),
]
),
).ask()
@ -275,11 +285,13 @@ def ask_gemini_thinking_config() -> str | None:
questionary.Choice("Enable Thinking (recommended)", "high"),
questionary.Choice("Minimal/Disable Thinking", "minimal"),
],
style=questionary.Style([
("selected", "fg:green noinherit"),
("highlighted", "fg:green noinherit"),
("pointer", "fg:green noinherit"),
]),
style=questionary.Style(
[
("selected", "fg:green noinherit"),
("highlighted", "fg:green noinherit"),
("pointer", "fg:green noinherit"),
]
),
).ask()
@ -301,17 +313,24 @@ def ask_output_language() -> str:
questionary.Choice("Russian (Русский)", "Russian"),
questionary.Choice("Custom language", "custom"),
],
style=questionary.Style([
("selected", "fg:yellow noinherit"),
("highlighted", "fg:yellow noinherit"),
("pointer", "fg:yellow noinherit"),
]),
style=questionary.Style(
[
("selected", "fg:yellow noinherit"),
("highlighted", "fg:yellow noinherit"),
("pointer", "fg:yellow noinherit"),
]
),
).ask()
if choice == "custom":
return questionary.text(
"Enter language name (e.g. Turkish, Vietnamese, Thai, Indonesian):",
validate=lambda x: len(x.strip()) > 0 or "Please enter a language name.",
).ask().strip()
return (
questionary.text(
"Enter language name (e.g. Turkish, Vietnamese, Thai, Indonesian):",
validate=lambda x: len(x.strip()) > 0
or "Please enter a language name.",
)
.ask()
.strip()
)
return choice

View File

@ -2,6 +2,7 @@ from typing import Annotated
from datetime import datetime
from dateutil.relativedelta import relativedelta
import yfinance as yf
import pandas as pd
import os
from .stockstats_utils import StockstatsUtils, _clean_dataframe, yf_retry, load_ohlcv, filter_financials_by_date

View File

@ -34,7 +34,7 @@ def create_llm_client(
"""
provider_lower = provider.lower()
if provider_lower in ("openai", "ollama", "openrouter"):
if provider_lower in ("openai", "ollama", "openrouter", "minimax"):
return OpenAIClient(model, base_url, provider=provider_lower, **kwargs)
if provider_lower == "xai":

View File

@ -20,19 +20,31 @@ MODEL_OPTIONS: ProviderModeOptions = {
("GPT-5.4 - Latest frontier, 1M context", "gpt-5.4"),
("GPT-5.2 - Strong reasoning, cost-effective", "gpt-5.2"),
("GPT-5.4 Mini - Fast, strong coding and tool use", "gpt-5.4-mini"),
("GPT-5.4 Pro - Most capable, expensive ($30/$180 per 1M tokens)", "gpt-5.4-pro"),
(
"GPT-5.4 Pro - Most capable, expensive ($30/$180 per 1M tokens)",
"gpt-5.4-pro",
),
],
},
"anthropic": {
"quick": [
("Claude Sonnet 4.6 - Best speed and intelligence balance", "claude-sonnet-4-6"),
(
"Claude Sonnet 4.6 - Best speed and intelligence balance",
"claude-sonnet-4-6",
),
("Claude Haiku 4.5 - Fast, near-instant responses", "claude-haiku-4-5"),
("Claude Sonnet 4.5 - Agents and coding", "claude-sonnet-4-5"),
],
"deep": [
("Claude Opus 4.6 - Most intelligent, agents and coding", "claude-opus-4-6"),
(
"Claude Opus 4.6 - Most intelligent, agents and coding",
"claude-opus-4-6",
),
("Claude Opus 4.5 - Premium, max intelligence", "claude-opus-4-5"),
("Claude Sonnet 4.6 - Best speed and intelligence balance", "claude-sonnet-4-6"),
(
"Claude Sonnet 4.6 - Best speed and intelligence balance",
"claude-sonnet-4-6",
),
("Claude Sonnet 4.5 - Agents and coding", "claude-sonnet-4-5"),
],
},
@ -40,11 +52,17 @@ MODEL_OPTIONS: ProviderModeOptions = {
"quick": [
("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"),
("Gemini 2.5 Flash - Balanced, stable", "gemini-2.5-flash"),
("Gemini 3.1 Flash Lite - Most cost-efficient", "gemini-3.1-flash-lite-preview"),
(
"Gemini 3.1 Flash Lite - Most cost-efficient",
"gemini-3.1-flash-lite-preview",
),
("Gemini 2.5 Flash Lite - Fast, low-cost", "gemini-2.5-flash-lite"),
],
"deep": [
("Gemini 3.1 Pro - Reasoning-first, complex workflows", "gemini-3.1-pro-preview"),
(
"Gemini 3.1 Pro - Reasoning-first, complex workflows",
"gemini-3.1-pro-preview",
),
("Gemini 3 Flash - Next-gen fast", "gemini-3-flash-preview"),
("Gemini 2.5 Pro - Stable pro model", "gemini-2.5-pro"),
("Gemini 2.5 Flash - Balanced, stable", "gemini-2.5-flash"),
@ -52,25 +70,46 @@ MODEL_OPTIONS: ProviderModeOptions = {
},
"xai": {
"quick": [
("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"),
("Grok 4 Fast (Non-Reasoning) - Speed optimized", "grok-4-fast-non-reasoning"),
("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"),
(
"Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx",
"grok-4-1-fast-non-reasoning",
),
(
"Grok 4 Fast (Non-Reasoning) - Speed optimized",
"grok-4-fast-non-reasoning",
),
(
"Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx",
"grok-4-1-fast-reasoning",
),
],
"deep": [
("Grok 4 - Flagship model", "grok-4-0709"),
("Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx", "grok-4-1-fast-reasoning"),
(
"Grok 4.1 Fast (Reasoning) - High-performance, 2M ctx",
"grok-4-1-fast-reasoning",
),
("Grok 4 Fast (Reasoning) - High-performance", "grok-4-fast-reasoning"),
("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"),
(
"Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx",
"grok-4-1-fast-non-reasoning",
),
],
},
"openrouter": {
"quick": [
("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"),
(
"NVIDIA Nemotron 3 Nano 30B (free)",
"nvidia/nemotron-3-nano-30b-a3b:free",
),
("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"),
],
"deep": [
("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"),
("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"),
(
"NVIDIA Nemotron 3 Nano 30B (free)",
"nvidia/nemotron-3-nano-30b-a3b:free",
),
],
},
"ollama": {
@ -85,6 +124,18 @@ MODEL_OPTIONS: ProviderModeOptions = {
("Qwen3:latest (8B, local)", "qwen3:latest"),
],
},
"minimax": {
"quick": [
("Kilo Auto Free (Limited Free Inference)", "kilo-auto/free"),
("Kilo Auto Balanced (Affordable Inference)", "kilo-auto/balanced"),
("MiniMax M2.7 (via Kilo Gateway)", "minimax/minimax-m2.7"),
],
"deep": [
("MiniMax M2.7 (via Kilo Gateway)", "minimax/minimax-m2.7"),
("Kilo Auto Balanced (Affordable Inference)", "kilo-auto/balanced"),
("Kilo Auto Free (Limited Free Inference)", "kilo-auto/free"),
],
},
}
@ -97,11 +148,7 @@ def get_known_models() -> Dict[str, List[str]]:
"""Build known model names from the shared CLI catalog."""
return {
provider: sorted(
{
value
for options in mode_options.values()
for _, value in options
}
{value for options in mode_options.values() for _, value in options}
)
for provider, mode_options in MODEL_OPTIONS.items()
}

View File

@ -18,10 +18,16 @@ class NormalizedChatOpenAI(ChatOpenAI):
def invoke(self, input, config=None, **kwargs):
return normalize_content(super().invoke(input, config, **kwargs))
# Kwargs forwarded from user config to ChatOpenAI
_PASSTHROUGH_KWARGS = (
"timeout", "max_retries", "reasoning_effort",
"api_key", "callbacks", "http_client", "http_async_client",
"timeout",
"max_retries",
"reasoning_effort",
"api_key",
"callbacks",
"http_client",
"http_async_client",
)
# Provider base URLs and API key env vars
@ -29,6 +35,7 @@ _PROVIDER_CONFIG = {
"xai": ("https://api.x.ai/v1", "XAI_API_KEY"),
"openrouter": ("https://openrouter.ai/api/v1", "OPENROUTER_API_KEY"),
"ollama": ("http://localhost:11434/v1", None),
"minimax": ("https://api.kilo.ai/api/gateway", "KILO_API_KEY"),
}