feat: dynamic OpenRouter model selection with search (#482, #337)

This commit is contained in:
Yijia-Xiao 2026-04-04 07:56:44 +00:00
parent bdb9c29d44
commit 4f965bf46a
No known key found for this signature in database
2 changed files with 48 additions and 10 deletions

View File

@ -134,9 +134,52 @@ def select_research_depth() -> int:
return choice return choice
def _fetch_openrouter_models() -> List[Tuple[str, str]]:
"""Fetch available models from the OpenRouter API."""
import requests
try:
resp = requests.get("https://openrouter.ai/api/v1/models", timeout=10)
resp.raise_for_status()
models = resp.json().get("data", [])
return [(m.get("name") or m["id"], m["id"]) for m in models]
except Exception as e:
console.print(f"\n[yellow]Could not fetch OpenRouter models: {e}[/yellow]")
return []
def select_openrouter_model() -> str:
"""Select an OpenRouter model from the newest available, or enter a custom ID."""
models = _fetch_openrouter_models()
choices = [questionary.Choice(name, value=mid) for name, mid in models[:5]]
choices.append(questionary.Choice("Custom model ID", value="custom"))
choice = questionary.select(
"Select OpenRouter Model (latest available):",
choices=choices,
instruction="\n- Use arrow keys to navigate\n- Press Enter to select",
style=questionary.Style([
("selected", "fg:magenta noinherit"),
("highlighted", "fg:magenta noinherit"),
("pointer", "fg:magenta noinherit"),
]),
).ask()
if choice is None or choice == "custom":
return questionary.text(
"Enter OpenRouter model ID (e.g. google/gemma-4-26b-a4b-it):",
validate=lambda x: len(x.strip()) > 0 or "Please enter a model ID.",
).ask().strip()
return choice
def select_shallow_thinking_agent(provider) -> str: def select_shallow_thinking_agent(provider) -> str:
"""Select shallow thinking llm engine using an interactive selection.""" """Select shallow thinking llm engine using an interactive selection."""
if provider.lower() == "openrouter":
return select_openrouter_model()
choice = questionary.select( choice = questionary.select(
"Select Your [Quick-Thinking LLM Engine]:", "Select Your [Quick-Thinking LLM Engine]:",
choices=[ choices=[
@ -165,6 +208,9 @@ def select_shallow_thinking_agent(provider) -> str:
def select_deep_thinking_agent(provider) -> str: def select_deep_thinking_agent(provider) -> str:
"""Select deep thinking llm engine using an interactive selection.""" """Select deep thinking llm engine using an interactive selection."""
if provider.lower() == "openrouter":
return select_openrouter_model()
choice = questionary.select( choice = questionary.select(
"Select Your [Deep-Thinking LLM Engine]:", "Select Your [Deep-Thinking LLM Engine]:",
choices=[ choices=[

View File

@ -63,16 +63,8 @@ MODEL_OPTIONS: ProviderModeOptions = {
("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"), ("Grok 4.1 Fast (Non-Reasoning) - Speed optimized, 2M ctx", "grok-4-1-fast-non-reasoning"),
], ],
}, },
"openrouter": { # OpenRouter models are fetched dynamically at CLI runtime.
"quick": [ # No static entries needed; any model ID is accepted by the validator.
("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"),
("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"),
],
"deep": [
("Z.AI GLM 4.5 Air (free)", "z-ai/glm-4.5-air:free"),
("NVIDIA Nemotron 3 Nano 30B (free)", "nvidia/nemotron-3-nano-30b-a3b:free"),
],
},
"ollama": { "ollama": {
"quick": [ "quick": [
("Qwen3:latest (8B, local)", "qwen3:latest"), ("Qwen3:latest (8B, local)", "qwen3:latest"),