feat(llm): add LM Studio provider support and refactor client factory

- Add LM Studio as a new provider option in config.json
- Introduce LLM_PROVIDER_TYPES configuration for provider-to-client mapping
- Refactor factory.py to use centralized provider type configuration
- Add results and reports directories to .gitignore

The refactor centralizes provider configuration, making it easier to add new providers in the future without modifying the factory logic. LM Studio support enables local model hosting integration.
This commit is contained in:
Maytekin 2026-03-24 21:54:46 +00:00
parent 4c5f0d8ae4
commit 244a986e83
3 changed files with 66 additions and 6 deletions

4
.gitignore vendored
View File

@ -82,6 +82,10 @@ target/
profile_default/
ipython_config.py
# Results
results
reports
# pyenv
# For a library or package, you might want to ignore these files since the code is
# intended to run in multiple environments; otherwise, check them in:

View File

@ -39,6 +39,10 @@
["Qwen3:latest (8B, local)", "qwen3:latest"],
["GPT-OSS:latest (20B, local)", "gpt-oss:latest"],
["GLM-4.7-Flash:latest (30B, local)", "glm-4.7-flash:latest"]
],
"lmstudio": [
["qwen3.5-35b-a3b","qwen/qwen3.5-35b-a3b"],
["openai/gpt-oss-20b", "openai/gpt-oss-20b"]
]
},
"DEEP_AGENT_OPTIONS": {
@ -74,11 +78,24 @@
["GLM-4.7-Flash:latest (30B, local)", "glm-4.7-flash:latest"],
["GPT-OSS:latest (20B, local)", "gpt-oss:latest"],
["Qwen3:latest (8B, local)", "qwen3:latest"]
],
"lmstudio": [
["qwen3.5-35b-a3b","qwen/qwen3.5-35b-a3b"],
["openai/gpt-oss-20b", "openai/gpt-oss-20b"]
]
},
"DEFAULT_LLM_SETTINGS": {
"llm_provider": "openai",
"deep_think_llm": "gpt-5.2",
"quick_think_llm": "gpt-5-mini"
},
"LLM_PROVIDER_TYPES": {
"openai": "openai",
"anthropic": "anthropic",
"google": "google",
"xai": "openai",
"openrouter": "openai",
"ollama": "openai",
"lmstudio": "openai"
}
}

View File

@ -1,3 +1,5 @@
import json
from pathlib import Path
from typing import Optional
from .base_client import BaseLLMClient
@ -5,6 +7,45 @@ from .openai_client import OpenAIClient
from .anthropic_client import AnthropicClient
from .google_client import GoogleClient
CONFIG_PATH = Path(__file__).resolve().parents[2] / "config.json"
def _load_config() -> dict:
try:
with CONFIG_PATH.open("r", encoding="utf-8") as config_file:
config = json.load(config_file)
except FileNotFoundError as exc:
raise RuntimeError(f"Config file not found: {CONFIG_PATH}") from exc
except json.JSONDecodeError as exc:
raise RuntimeError(f"Invalid JSON in config file: {CONFIG_PATH}") from exc
except OSError as exc:
raise RuntimeError(f"Unable to read config file: {CONFIG_PATH}") from exc
if not isinstance(config, dict):
raise RuntimeError(f"Invalid config format in file: {CONFIG_PATH}")
return config
def _load_provider_types() -> dict[str, str]:
provider_types = _load_config().get("LLM_PROVIDER_TYPES")
if not isinstance(provider_types, dict):
raise RuntimeError(
f"Invalid or missing 'LLM_PROVIDER_TYPES' in config file: {CONFIG_PATH}"
)
return {
str(name).lower(): str(client_type).lower()
for name, client_type in provider_types.items()
}
_PROVIDER_TYPES: dict[str, str] | None = None
def _get_provider_types() -> dict[str, str]:
global _PROVIDER_TYPES
if _PROVIDER_TYPES is None:
_PROVIDER_TYPES = _load_provider_types()
return _PROVIDER_TYPES
def create_llm_client(
provider: str,
@ -33,17 +74,15 @@ def create_llm_client(
ValueError: If provider is not supported
"""
provider_lower = provider.lower()
provider_type = _get_provider_types().get(provider_lower)
if provider_lower in ("openai", "ollama", "openrouter"):
if provider_type == "openai":
return OpenAIClient(model, base_url, provider=provider_lower, **kwargs)
if provider_lower == "xai":
return OpenAIClient(model, base_url, provider="xai", **kwargs)
if provider_lower == "anthropic":
if provider_type == "anthropic":
return AnthropicClient(model, base_url, **kwargs)
if provider_lower == "google":
if provider_type == "google":
return GoogleClient(model, base_url, **kwargs)
raise ValueError(f"Unsupported LLM provider: {provider}")