Preserve thinking block signatures for multi-turn conversations
All checks were successful
Build Nanobot OAuth / build (push) Successful in 1m59s
All checks were successful
Build Nanobot OAuth / build (push) Successful in 1m59s
The Anthropic API returns a signature field in thinking blocks that must be replayed in subsequent turns. Store full thinking blocks (including signatures) instead of just the text content. Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
This commit is contained in:
@@ -90,12 +90,12 @@ class AnthropicOAuthProvider(LLMProvider):
|
||||
if role == "assistant" and msg.get("tool_calls"):
|
||||
# Convert OpenAI tool_calls to Anthropic content blocks
|
||||
content_blocks: list[dict[str, Any]] = []
|
||||
# Preserve thinking block if present
|
||||
if msg.get("reasoning_content"):
|
||||
content_blocks.append({
|
||||
"type": "thinking",
|
||||
"thinking": msg["reasoning_content"],
|
||||
})
|
||||
# Preserve thinking blocks (list=raw API blocks with signatures, str=legacy)
|
||||
rc = msg.get("reasoning_content")
|
||||
if isinstance(rc, list):
|
||||
content_blocks.extend(rc)
|
||||
elif isinstance(rc, str) and rc:
|
||||
content_blocks.append({"type": "thinking", "thinking": rc})
|
||||
text = msg.get("content")
|
||||
if text:
|
||||
content_blocks.append({"type": "text", "text": text})
|
||||
@@ -118,9 +118,11 @@ class AnthropicOAuthProvider(LLMProvider):
|
||||
|
||||
if role == "assistant" and msg.get("reasoning_content"):
|
||||
# Plain assistant message with thinking (no tool calls)
|
||||
content_blocks = [
|
||||
{"type": "thinking", "thinking": msg["reasoning_content"]},
|
||||
]
|
||||
rc = msg["reasoning_content"]
|
||||
if isinstance(rc, list):
|
||||
content_blocks = list(rc)
|
||||
else:
|
||||
content_blocks = [{"type": "thinking", "thinking": rc}]
|
||||
text = msg.get("content")
|
||||
if text:
|
||||
content_blocks.append({"type": "text", "text": text})
|
||||
@@ -275,12 +277,13 @@ class AnthropicOAuthProvider(LLMProvider):
|
||||
content_blocks = response.get("content", [])
|
||||
|
||||
text_content = ""
|
||||
thinking_content = ""
|
||||
thinking_blocks: list[dict[str, Any]] = []
|
||||
tool_calls = []
|
||||
|
||||
for block in content_blocks:
|
||||
if block.get("type") == "thinking":
|
||||
thinking_content += block.get("thinking", "")
|
||||
# Preserve full block including signature for multi-turn replay
|
||||
thinking_blocks.append(block)
|
||||
elif block.get("type") == "text":
|
||||
text_content += block.get("text", "")
|
||||
elif block.get("type") == "tool_use":
|
||||
@@ -306,7 +309,7 @@ class AnthropicOAuthProvider(LLMProvider):
|
||||
tool_calls=tool_calls,
|
||||
finish_reason=response.get("stop_reason", "end_turn"),
|
||||
usage=usage,
|
||||
reasoning_content=thinking_content or None,
|
||||
reasoning_content=thinking_blocks or None,
|
||||
)
|
||||
|
||||
def get_default_model(self) -> str:
|
||||
|
||||
@@ -20,7 +20,7 @@ class LLMResponse:
|
||||
tool_calls: list[ToolCallRequest] = field(default_factory=list)
|
||||
finish_reason: str = "stop"
|
||||
usage: dict[str, int] = field(default_factory=dict)
|
||||
reasoning_content: str | None = None # Kimi, DeepSeek-R1 etc.
|
||||
reasoning_content: Any = None # str for Kimi/DeepSeek-R1; list[dict] for Anthropic thinking blocks
|
||||
|
||||
@property
|
||||
def has_tool_calls(self) -> bool:
|
||||
|
||||
Reference in New Issue
Block a user