fix: Gemini 3 models (Flash, 3.1 Pro, 3.1 Flash Lite) fail with `400 InvalidArgument`

during multi-turn function calling:
This commit is contained in:
DanielTobi0 2026-03-31 20:50:29 +01:00
parent 4641c03340
commit 7196ac468a
2 changed files with 37 additions and 2 deletions

View File

@ -13,7 +13,7 @@ dependencies = [
"backtrader>=1.9.78.123", "backtrader>=1.9.78.123",
"langchain-anthropic>=0.3.15", "langchain-anthropic>=0.3.15",
"langchain-experimental>=0.3.4", "langchain-experimental>=0.3.4",
"langchain-google-genai>=2.1.5", "langchain-google-genai>=2.1.12",
"langchain-openai>=0.3.23", "langchain-openai>=0.3.23",
"langgraph>=0.4.8", "langgraph>=0.4.8",
"pandas>=2.3.0", "pandas>=2.3.0",

View File

@ -1,18 +1,53 @@
from typing import Any, Optional from typing import Any, Optional, List
from langchain_core.messages import BaseMessage
from langchain_google_genai import ChatGoogleGenerativeAI from langchain_google_genai import ChatGoogleGenerativeAI
from .base_client import BaseLLMClient, normalize_content from .base_client import BaseLLMClient, normalize_content
from .validators import validate_model from .validators import validate_model
# Dummy value sanctioned by Google to skip thought_signature validation.
# See https://ai.google.dev/gemini-api/docs/thought-signatures#faqs
_SKIP_THOUGHT_SIG = b"skip_thought_signature_validator"
def _inject_thought_signatures(request: Any) -> Any:
"""Add dummy thought_signature to function-call parts in Gemini 3 requests.
langchain-google-genai <=2.x does not preserve thought_signature fields
returned by the API, causing 400 errors on the next turn. Google's FAQ
allows a well-known dummy value to bypass server-side validation.
"""
for content in request.contents:
if content.role != "model":
continue
first_fc = True
for part in content.parts:
if part.function_call.name: # has a function call
if first_fc:
part.thought_signature = _SKIP_THOUGHT_SIG
first_fc = False
return request
class NormalizedChatGoogleGenerativeAI(ChatGoogleGenerativeAI): class NormalizedChatGoogleGenerativeAI(ChatGoogleGenerativeAI):
"""ChatGoogleGenerativeAI with normalized content output. """ChatGoogleGenerativeAI with normalized content output.
Gemini 3 models return content as list of typed blocks. Gemini 3 models return content as list of typed blocks.
This normalizes to string for consistent downstream handling. This normalizes to string for consistent downstream handling.
Also injects dummy thought signatures for Gemini 3 function calling.
""" """
def _prepare_request(
self,
messages: List[BaseMessage],
**kwargs: Any,
) -> Any:
request = super()._prepare_request(messages, **kwargs)
if "gemini-3" in (self.model or "").lower():
_inject_thought_signatures(request)
return request
def invoke(self, input, config=None, **kwargs): def invoke(self, input, config=None, **kwargs):
return normalize_content(super().invoke(input, config, **kwargs)) return normalize_content(super().invoke(input, config, **kwargs))