fix: address Gemini review - centralize llamacpp base_url logic

- openai_client.py: don't use self.base_url for llamacpp (it would
  inherit openai default); read BACKEND_URL then LLAMACPP_BASE_URL directly
- default_config.py: remove redundant LLAMACPP_BASE_URL fallback from
  backend_url, keep only generic BACKEND_URL env var
This commit is contained in:
OpenClaw Assistant 2026-03-21 10:33:03 +01:00
parent 3e509bfa32
commit 01bd138f84
2 changed files with 2 additions and 2 deletions

View File

@ -17,7 +17,7 @@ DEFAULT_CONFIG = {
"llm_provider": os.environ.get("LLM_PROVIDER", "openai"),
"deep_think_llm": os.environ.get("DEEP_THINK_LLM", "gpt-5.2"),
"quick_think_llm": os.environ.get("QUICK_THINK_LLM", "gpt-5-mini"),
"backend_url": os.environ.get("BACKEND_URL", os.environ.get("LLAMACPP_BASE_URL", "https://api.openai.com/v1")),
"backend_url": os.environ.get("BACKEND_URL", "https://api.openai.com/v1"),
# Provider-specific thinking configuration
"google_thinking_level": None, # "high", "minimal", etc.
"openai_reasoning_effort": None, # "medium", "high", "low"

View File

@ -57,7 +57,7 @@ class OpenAIClient(BaseLLMClient):
llm_kwargs["base_url"] = "http://localhost:11434/v1"
llm_kwargs["api_key"] = "ollama" # Ollama doesn't require auth
elif self.provider == "llamacpp":
base_url = self.base_url or os.environ.get("LLAMACPP_BASE_URL", "http://localhost:8080/v1")
base_url = os.environ.get("BACKEND_URL") or os.environ.get("LLAMACPP_BASE_URL", "http://localhost:8080/v1")
llm_kwargs["base_url"] = base_url
llm_kwargs["api_key"] = "no-key-needed" # llama-server doesn't require auth
elif self.base_url: