feat(hypotheses): switch LLM analysis from Anthropic to Gemini
Uses google-genai SDK with gemini-2.5-flash-lite — same model already used by the discovery pipeline, so no new secret needed (GOOGLE_API_KEY). Removed ANTHROPIC_API_KEY from hypothesis-runner.yml. Co-Authored-By: Claude Sonnet 4.6 <noreply@anthropic.com>
This commit is contained in:
parent
3dbcb3fa5b
commit
43fb186d0e
|
|
@ -49,7 +49,6 @@ jobs:
|
||||||
GH_TOKEN: ${{ secrets.GH_TOKEN }}
|
GH_TOKEN: ${{ secrets.GH_TOKEN }}
|
||||||
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
GOOGLE_API_KEY: ${{ secrets.GOOGLE_API_KEY }}
|
||||||
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
OPENAI_API_KEY: ${{ secrets.OPENAI_API_KEY }}
|
||||||
ANTHROPIC_API_KEY: ${{ secrets.ANTHROPIC_API_KEY }}
|
|
||||||
FINNHUB_API_KEY: ${{ secrets.FINNHUB_API_KEY }}
|
FINNHUB_API_KEY: ${{ secrets.FINNHUB_API_KEY }}
|
||||||
ALPHA_VANTAGE_API_KEY: ${{ secrets.ALPHA_VANTAGE_API_KEY }}
|
ALPHA_VANTAGE_API_KEY: ${{ secrets.ALPHA_VANTAGE_API_KEY }}
|
||||||
FMP_API_KEY: ${{ secrets.FMP_API_KEY }}
|
FMP_API_KEY: ${{ secrets.FMP_API_KEY }}
|
||||||
|
|
|
||||||
|
|
@ -175,22 +175,22 @@ def run_hypothesis(hyp: dict) -> bool:
|
||||||
|
|
||||||
def llm_analysis(hyp: dict, conclusion: dict, scanner_domain: str) -> Optional[str]:
|
def llm_analysis(hyp: dict, conclusion: dict, scanner_domain: str) -> Optional[str]:
|
||||||
"""
|
"""
|
||||||
Ask Claude to interpret the experiment results and provide richer context.
|
Ask Gemini to interpret the experiment results and provide richer context.
|
||||||
|
|
||||||
Returns a markdown string to embed in the PR comment, or None if the API
|
Returns a markdown string to embed in the PR comment, or None if the API
|
||||||
call fails or ANTHROPIC_API_KEY is not set.
|
call fails or GOOGLE_API_KEY is not set.
|
||||||
|
|
||||||
The LLM does NOT override the programmatic decision — it adds nuance:
|
The LLM does NOT override the programmatic decision — it adds nuance:
|
||||||
sample-size caveats, market-condition context, follow-up hypotheses.
|
sample-size caveats, market-condition context, follow-up hypotheses.
|
||||||
"""
|
"""
|
||||||
api_key = os.environ.get("ANTHROPIC_API_KEY")
|
api_key = os.environ.get("GOOGLE_API_KEY")
|
||||||
if not api_key:
|
if not api_key:
|
||||||
return None
|
return None
|
||||||
|
|
||||||
try:
|
try:
|
||||||
import anthropic
|
from google import genai
|
||||||
except ImportError:
|
except ImportError:
|
||||||
print(" anthropic SDK not installed, skipping LLM analysis", flush=True)
|
print(" google-genai SDK not installed, skipping LLM analysis", flush=True)
|
||||||
return None
|
return None
|
||||||
|
|
||||||
hyp_metrics = conclusion["hypothesis"]
|
hyp_metrics = conclusion["hypothesis"]
|
||||||
|
|
@ -230,13 +230,12 @@ Provide a concise analysis (3–5 sentences) covering:
|
||||||
Be direct. Do not restate the numbers — interpret them. Do not recommend merging or closing the PR."""
|
Be direct. Do not restate the numbers — interpret them. Do not recommend merging or closing the PR."""
|
||||||
|
|
||||||
try:
|
try:
|
||||||
client = anthropic.Anthropic(api_key=api_key)
|
client = genai.Client(api_key=api_key)
|
||||||
message = client.messages.create(
|
response = client.models.generate_content(
|
||||||
model="claude-haiku-4-5-20251001",
|
model="gemini-2.5-flash-lite",
|
||||||
max_tokens=512,
|
contents=prompt,
|
||||||
messages=[{"role": "user", "content": prompt}],
|
|
||||||
)
|
)
|
||||||
return message.content[0].text.strip()
|
return response.text.strip()
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(f" LLM analysis failed: {e}", flush=True)
|
print(f" LLM analysis failed: {e}", flush=True)
|
||||||
return None
|
return None
|
||||||
|
|
|
||||||
Loading…
Reference in New Issue