From f71b3b3fea4da87e828cfae58b74869ab2f1e520 Mon Sep 17 00:00:00 2001 From: wylab Date: Fri, 13 Feb 2026 15:51:15 +0100 Subject: [PATCH] Preserve thinking block signatures for multi-turn conversations The Anthropic API returns a signature field in thinking blocks that must be replayed in subsequent turns. Store full thinking blocks (including signatures) instead of just the text content. Co-Authored-By: Claude Opus 4.6 --- nanobot/providers/anthropic_oauth.py | 27 +++++++++++++++------------ nanobot/providers/base.py | 2 +- 2 files changed, 16 insertions(+), 13 deletions(-) diff --git a/nanobot/providers/anthropic_oauth.py b/nanobot/providers/anthropic_oauth.py index 8b6bf7e..c468e62 100644 --- a/nanobot/providers/anthropic_oauth.py +++ b/nanobot/providers/anthropic_oauth.py @@ -90,12 +90,12 @@ class AnthropicOAuthProvider(LLMProvider): if role == "assistant" and msg.get("tool_calls"): # Convert OpenAI tool_calls to Anthropic content blocks content_blocks: list[dict[str, Any]] = [] - # Preserve thinking block if present - if msg.get("reasoning_content"): - content_blocks.append({ - "type": "thinking", - "thinking": msg["reasoning_content"], - }) + # Preserve thinking blocks (list=raw API blocks with signatures, str=legacy) + rc = msg.get("reasoning_content") + if isinstance(rc, list): + content_blocks.extend(rc) + elif isinstance(rc, str) and rc: + content_blocks.append({"type": "thinking", "thinking": rc}) text = msg.get("content") if text: content_blocks.append({"type": "text", "text": text}) @@ -118,9 +118,11 @@ class AnthropicOAuthProvider(LLMProvider): if role == "assistant" and msg.get("reasoning_content"): # Plain assistant message with thinking (no tool calls) - content_blocks = [ - {"type": "thinking", "thinking": msg["reasoning_content"]}, - ] + rc = msg["reasoning_content"] + if isinstance(rc, list): + content_blocks = list(rc) + else: + content_blocks = [{"type": "thinking", "thinking": rc}] text = msg.get("content") if text: content_blocks.append({"type": "text", "text": text}) @@ -275,12 +277,13 @@ class AnthropicOAuthProvider(LLMProvider): content_blocks = response.get("content", []) text_content = "" - thinking_content = "" + thinking_blocks: list[dict[str, Any]] = [] tool_calls = [] for block in content_blocks: if block.get("type") == "thinking": - thinking_content += block.get("thinking", "") + # Preserve full block including signature for multi-turn replay + thinking_blocks.append(block) elif block.get("type") == "text": text_content += block.get("text", "") elif block.get("type") == "tool_use": @@ -306,7 +309,7 @@ class AnthropicOAuthProvider(LLMProvider): tool_calls=tool_calls, finish_reason=response.get("stop_reason", "end_turn"), usage=usage, - reasoning_content=thinking_content or None, + reasoning_content=thinking_blocks or None, ) def get_default_model(self) -> str: diff --git a/nanobot/providers/base.py b/nanobot/providers/base.py index c69c38b..5085292 100644 --- a/nanobot/providers/base.py +++ b/nanobot/providers/base.py @@ -20,7 +20,7 @@ class LLMResponse: tool_calls: list[ToolCallRequest] = field(default_factory=list) finish_reason: str = "stop" usage: dict[str, int] = field(default_factory=dict) - reasoning_content: str | None = None # Kimi, DeepSeek-R1 etc. + reasoning_content: Any = None # str for Kimi/DeepSeek-R1; list[dict] for Anthropic thinking blocks @property def has_tool_calls(self) -> bool: