diff --git a/cli/utils.py b/cli/utils.py index 15c4a056..e071ce06 100644 --- a/cli/utils.py +++ b/cli/utils.py @@ -134,9 +134,52 @@ def select_research_depth() -> int: return choice +def _fetch_openrouter_models() -> List[Tuple[str, str]]: + """Fetch available models from the OpenRouter API.""" + import requests + try: + resp = requests.get("https://openrouter.ai/api/v1/models", timeout=10) + resp.raise_for_status() + models = resp.json().get("data", []) + return [(m.get("name") or m["id"], m["id"]) for m in models] + except Exception as e: + console.print(f"\n[yellow]Could not fetch OpenRouter models: {e}[/yellow]") + return [] + + +def select_openrouter_model() -> str: + """Select an OpenRouter model from the newest available, or enter a custom ID.""" + models = _fetch_openrouter_models() + + choices = [questionary.Choice(name, value=mid) for name, mid in models[:5]] + choices.append(questionary.Choice("Custom model ID", value="custom")) + + choice = questionary.select( + "Select OpenRouter Model (latest available):", + choices=choices, + instruction="\n- Use arrow keys to navigate\n- Press Enter to select", + style=questionary.Style([ + ("selected", "fg:magenta noinherit"), + ("highlighted", "fg:magenta noinherit"), + ("pointer", "fg:magenta noinherit"), + ]), + ).ask() + + if choice is None or choice == "custom": + return questionary.text( + "Enter OpenRouter model ID (e.g. google/gemma-4-26b-a4b-it):", + validate=lambda x: len(x.strip()) > 0 or "Please enter a model ID.", + ).ask().strip() + + return choice + + def select_shallow_thinking_agent(provider) -> str: """Select shallow thinking llm engine using an interactive selection.""" + if provider.lower() == "openrouter": + return select_openrouter_model() + choice = questionary.select( "Select Your [Quick-Thinking LLM Engine]:", choices=[ @@ -165,6 +208,9 @@ def select_shallow_thinking_agent(provider) -> str: def select_deep_thinking_agent(provider) -> str: """Select deep thinking llm engine using an interactive selection.""" + if provider.lower() == "openrouter": + return select_openrouter_model() + choice = questionary.select( "Select Your [Deep-Thinking LLM Engine]:", choices=[ diff --git a/tradingagents/llm_clients/model_catalog.py b/tradingagents/llm_clients/model_catalog.py index 91e1659c..fd91c66d 100644 --- a/tradingagents/llm_clients/model_catalog.py +++ b/tradingagents/llm_clients/model_catalog.py @@ -63,16 +63,8 @@ MODEL_OPTIONS: ProviderModeOptions = { ("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"), ], }, - "openrouter": { - "quick": [ - ("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"), - ("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"), - ], - "deep": [ - ("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"), - ("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"), - ], - }, + # OpenRouter models are fetched dynamically at CLI runtime. + # No static entries needed; any model ID is accepted by the validator. "ollama": { "quick": [ ("Qwen3:latest (8B, local)", "qwen3:latest"),