diff --git a/tradingagents/default_config.py b/tradingagents/default_config.py index f553ff8e..9c900100 100644 --- a/tradingagents/default_config.py +++ b/tradingagents/default_config.py @@ -17,7 +17,7 @@ DEFAULT_CONFIG = { "llm_provider": os.environ.get("LLM_PROVIDER", "openai"), "deep_think_llm": os.environ.get("DEEP_THINK_LLM", "gpt-5.2"), "quick_think_llm": os.environ.get("QUICK_THINK_LLM", "gpt-5-mini"), - "backend_url": os.environ.get("BACKEND_URL", os.environ.get("LLAMACPP_BASE_URL", "https://api.openai.com/v1")), + "backend_url": os.environ.get("BACKEND_URL", "https://api.openai.com/v1"), # Provider-specific thinking configuration "google_thinking_level": None, # "high", "minimal", etc. "openai_reasoning_effort": None, # "medium", "high", "low" diff --git a/tradingagents/llm_clients/openai_client.py b/tradingagents/llm_clients/openai_client.py index da16f3a9..f7e74434 100644 --- a/tradingagents/llm_clients/openai_client.py +++ b/tradingagents/llm_clients/openai_client.py @@ -57,7 +57,7 @@ class OpenAIClient(BaseLLMClient): llm_kwargs["base_url"] = "http://localhost:11434/v1" llm_kwargs["api_key"] = "ollama" # Ollama doesn't require auth elif self.provider == "llamacpp": - base_url = self.base_url or os.environ.get("LLAMACPP_BASE_URL", "http://localhost:8080/v1") + base_url = os.environ.get("BACKEND_URL") or os.environ.get("LLAMACPP_BASE_URL", "http://localhost:8080/v1") llm_kwargs["base_url"] = base_url llm_kwargs["api_key"] = "no-key-needed" # llama-server doesn't require auth elif self.base_url: