added gemini support
This commit is contained in:
parent
0f46729f09
commit
5ea8a67684
|
|
@ -3,10 +3,26 @@
|
|||
# 0. Check & Start Claude Proxy
|
||||
# Check if port 10909 is open (Proxy running) using pure bash TCP check
|
||||
if ! (echo > /dev/tcp/localhost/10909) 2>/dev/null; then
|
||||
echo "🔌 Starting Claude Proxy..."
|
||||
/home/prem/git/antigravity-claude-proxy/startProxy.sh &
|
||||
# Wait a moment for it to initialize
|
||||
sleep 2
|
||||
echo "🔌 Claude Proxy not detected on port 10909"
|
||||
echo "Select Proxy Provider:"
|
||||
echo "1) gemini (default)"
|
||||
echo "2) anthropic"
|
||||
read -p "Choice [1]: " choice
|
||||
case $choice in
|
||||
2) PROXY_TYPE="anthropic" ;;
|
||||
*) PROXY_TYPE="gemini" ;;
|
||||
esac
|
||||
|
||||
echo "🔌 Starting Claude Proxy ($PROXY_TYPE)..."
|
||||
/home/prem/git/antigravity-claude-proxy/startProxy.sh "$PROXY_TYPE" &
|
||||
|
||||
# Wait a moment for it to initialize with a progress bar
|
||||
echo -n "⏳ Initializing proxy: ["
|
||||
for i in {1..20}; do
|
||||
echo -n "■"
|
||||
sleep 0.1
|
||||
done
|
||||
echo "] 100% Ready!"
|
||||
else
|
||||
echo "✅ Claude Proxy already running on port 10909"
|
||||
fi
|
||||
|
|
|
|||
|
|
@ -0,0 +1,69 @@
|
|||
#!/usr/bin/env python3
|
||||
"""Test script to verify Google API connectivity and model availability."""
|
||||
|
||||
import os
|
||||
from dotenv import load_dotenv
|
||||
from langchain_openai import ChatOpenAI
|
||||
|
||||
# Load environment variables
|
||||
load_dotenv()
|
||||
|
||||
def test_google_api():
|
||||
"""Test Google API with different models via local proxy."""
|
||||
|
||||
# Use local proxy
|
||||
proxy_url = "http://localhost:10909"
|
||||
|
||||
print(f"🔧 Using proxy: {proxy_url}")
|
||||
|
||||
# Test models
|
||||
test_models = [
|
||||
"gemini-1.5-pro",
|
||||
"gemini-1.5-flash",
|
||||
"gemini-2.0-flash-exp",
|
||||
"gemini-2.5-flash",
|
||||
"gemini-3-flash-preview",
|
||||
"gemini-3-pro-preview",
|
||||
]
|
||||
|
||||
for model_name in test_models:
|
||||
print(f"\n{'='*60}")
|
||||
print(f"Testing model: {model_name}")
|
||||
print(f"{'='*60}")
|
||||
|
||||
try:
|
||||
# Initialize client
|
||||
print(f"🔧 Initializing {model_name}...")
|
||||
llm = ChatGoogleGenerativeAI(
|
||||
model=model_name,
|
||||
max_retries=3,
|
||||
request_timeout=30
|
||||
)
|
||||
|
||||
# Test simple query
|
||||
print(f"📤 Sending test query...")
|
||||
response = llm.invoke("Say 'Hello, I am working!' in exactly 5 words.")
|
||||
|
||||
print(f"✅ SUCCESS!")
|
||||
print(f"📥 Response: {response.content}")
|
||||
|
||||
except Exception as e:
|
||||
print(f"❌ FAILED: {type(e).__name__}")
|
||||
print(f" Error: {str(e)[:200]}")
|
||||
|
||||
# Check for specific error types
|
||||
if "404" in str(e):
|
||||
print(f" → Model '{model_name}' not found or not available")
|
||||
elif "403" in str(e) or "401" in str(e):
|
||||
print(f" → Authentication error - check API key permissions")
|
||||
elif "429" in str(e):
|
||||
print(f" → Rate limit exceeded")
|
||||
elif "timeout" in str(e).lower():
|
||||
print(f" → Request timed out")
|
||||
|
||||
if __name__ == "__main__":
|
||||
print("🚀 Google API Test Script")
|
||||
print("="*60)
|
||||
test_google_api()
|
||||
print("\n" + "="*60)
|
||||
print("✅ Test complete!")
|
||||
|
|
@ -81,12 +81,14 @@ class TradingAgentsGraph:
|
|||
elif self.config["llm_provider"].lower() == "google":
|
||||
self.deep_thinking_llm = ChatGoogleGenerativeAI(
|
||||
model=self.config["deep_think_llm"],
|
||||
base_url=self.config["backend_url"],
|
||||
max_retries=10,
|
||||
request_timeout=60
|
||||
)
|
||||
self.quick_thinking_llm = ChatGoogleGenerativeAI(
|
||||
model=self.config["quick_think_llm"],
|
||||
max_retries=10,
|
||||
base_url=self.config["backend_url"],
|
||||
request_timeout=60
|
||||
)
|
||||
else:
|
||||
|
|
|
|||
Loading…
Reference in New Issue