added gemini support

This commit is contained in:
swj.premkumar 2026-01-11 07:33:58 -06:00
parent 0f46729f09
commit 5ea8a67684
3 changed files with 91 additions and 4 deletions

View File

@ -3,10 +3,26 @@
# 0. Check & Start Claude Proxy # 0. Check & Start Claude Proxy
# Check if port 10909 is open (Proxy running) using pure bash TCP check # Check if port 10909 is open (Proxy running) using pure bash TCP check
if ! (echo > /dev/tcp/localhost/10909) 2>/dev/null; then if ! (echo > /dev/tcp/localhost/10909) 2>/dev/null; then
echo "🔌 Starting Claude Proxy..." echo "🔌 Claude Proxy not detected on port 10909"
/home/prem/git/antigravity-claude-proxy/startProxy.sh & echo "Select Proxy Provider:"
# Wait a moment for it to initialize echo "1) gemini (default)"
sleep 2 echo "2) anthropic"
read -p "Choice [1]: " choice
case $choice in
2) PROXY_TYPE="anthropic" ;;
*) PROXY_TYPE="gemini" ;;
esac
echo "🔌 Starting Claude Proxy ($PROXY_TYPE)..."
/home/prem/git/antigravity-claude-proxy/startProxy.sh "$PROXY_TYPE" &
# Wait a moment for it to initialize with a progress bar
echo -n "⏳ Initializing proxy: ["
for i in {1..20}; do
echo -n "■"
sleep 0.1
done
echo "] 100% Ready!"
else else
echo "✅ Claude Proxy already running on port 10909" echo "✅ Claude Proxy already running on port 10909"
fi fi

69
test_google_api.py Normal file
View File

@ -0,0 +1,69 @@
#!/usr/bin/env python3
"""Test script to verify Google API connectivity and model availability."""
import os
from dotenv import load_dotenv
from langchain_openai import ChatOpenAI
# Load environment variables
load_dotenv()
def test_google_api():
"""Test Google API with different models via local proxy."""
# Use local proxy
proxy_url = "http://localhost:10909"
print(f"🔧 Using proxy: {proxy_url}")
# Test models
test_models = [
"gemini-1.5-pro",
"gemini-1.5-flash",
"gemini-2.0-flash-exp",
"gemini-2.5-flash",
"gemini-3-flash-preview",
"gemini-3-pro-preview",
]
for model_name in test_models:
print(f"\n{'='*60}")
print(f"Testing model: {model_name}")
print(f"{'='*60}")
try:
# Initialize client
print(f"🔧 Initializing {model_name}...")
llm = ChatGoogleGenerativeAI(
model=model_name,
max_retries=3,
request_timeout=30
)
# Test simple query
print(f"📤 Sending test query...")
response = llm.invoke("Say 'Hello, I am working!' in exactly 5 words.")
print(f"✅ SUCCESS!")
print(f"📥 Response: {response.content}")
except Exception as e:
print(f"❌ FAILED: {type(e).__name__}")
print(f" Error: {str(e)[:200]}")
# Check for specific error types
if "404" in str(e):
print(f" → Model '{model_name}' not found or not available")
elif "403" in str(e) or "401" in str(e):
print(f" → Authentication error - check API key permissions")
elif "429" in str(e):
print(f" → Rate limit exceeded")
elif "timeout" in str(e).lower():
print(f" → Request timed out")
if __name__ == "__main__":
print("🚀 Google API Test Script")
print("="*60)
test_google_api()
print("\n" + "="*60)
print("✅ Test complete!")

View File

@ -81,12 +81,14 @@ class TradingAgentsGraph:
elif self.config["llm_provider"].lower() == "google": elif self.config["llm_provider"].lower() == "google":
self.deep_thinking_llm = ChatGoogleGenerativeAI( self.deep_thinking_llm = ChatGoogleGenerativeAI(
model=self.config["deep_think_llm"], model=self.config["deep_think_llm"],
base_url=self.config["backend_url"],
max_retries=10, max_retries=10,
request_timeout=60 request_timeout=60
) )
self.quick_thinking_llm = ChatGoogleGenerativeAI( self.quick_thinking_llm = ChatGoogleGenerativeAI(
model=self.config["quick_think_llm"], model=self.config["quick_think_llm"],
max_retries=10, max_retries=10,
base_url=self.config["backend_url"],
request_timeout=60 request_timeout=60
) )
else: else: