feat: add AiHubMix provider support and refactor provider matching
This commit is contained in:
@@ -16,7 +16,7 @@
|
||||
|
||||
⚡️ Delivers core agent functionality in just **~4,000** lines of code — **99% smaller** than Clawdbot's 430k+ lines.
|
||||
|
||||
📏 Real-time line count: **3,431 lines** (run `bash core_agent_lines.sh` to verify anytime)
|
||||
📏 Real-time line count: **3,422 lines** (run `bash core_agent_lines.sh` to verify anytime)
|
||||
|
||||
## 📢 News
|
||||
|
||||
@@ -352,6 +352,7 @@ Config file: `~/.nanobot/config.json`
|
||||
| `deepseek` | LLM (DeepSeek direct) | [platform.deepseek.com](https://platform.deepseek.com) |
|
||||
| `groq` | LLM + **Voice transcription** (Whisper) | [console.groq.com](https://console.groq.com) |
|
||||
| `gemini` | LLM (Gemini direct) | [aistudio.google.com](https://aistudio.google.com) |
|
||||
| `aihubmix` | LLM (API gateway, access to all models) | [aihubmix.com](https://aihubmix.com) |
|
||||
| `dashscope` | LLM (Qwen) | [dashscope.console.aliyun.com](https://dashscope.console.aliyun.com) |
|
||||
|
||||
|
||||
|
||||
@@ -155,7 +155,8 @@ class AgentLoop:
|
||||
if msg.channel == "system":
|
||||
return await self._process_system_message(msg)
|
||||
|
||||
logger.info(f"Processing message from {msg.channel}:{msg.sender_id}")
|
||||
preview = msg.content[:80] + "..." if len(msg.content) > 80 else msg.content
|
||||
logger.info(f"Processing message from {msg.channel}:{msg.sender_id}: {preview}")
|
||||
|
||||
# Get or create session
|
||||
session = self.sessions.get_or_create(msg.session_key)
|
||||
@@ -216,8 +217,8 @@ class AgentLoop:
|
||||
|
||||
# Execute tools
|
||||
for tool_call in response.tool_calls:
|
||||
args_str = json.dumps(tool_call.arguments)
|
||||
logger.debug(f"Executing tool: {tool_call.name} with arguments: {args_str}")
|
||||
args_str = json.dumps(tool_call.arguments, ensure_ascii=False)
|
||||
logger.info(f"Tool call: {tool_call.name}({args_str[:200]})")
|
||||
result = await self.tools.execute(tool_call.name, tool_call.arguments)
|
||||
messages = self.context.add_tool_result(
|
||||
messages, tool_call.id, tool_call.name, result
|
||||
@@ -230,6 +231,10 @@ class AgentLoop:
|
||||
if final_content is None:
|
||||
final_content = "I've completed processing but have no response to give."
|
||||
|
||||
# Log response preview
|
||||
preview = final_content[:120] + "..." if len(final_content) > 120 else final_content
|
||||
logger.info(f"Response to {msg.channel}:{msg.sender_id}: {preview}")
|
||||
|
||||
# Save to session
|
||||
session.add_message("user", msg.content)
|
||||
session.add_message("assistant", final_content)
|
||||
@@ -315,8 +320,8 @@ class AgentLoop:
|
||||
)
|
||||
|
||||
for tool_call in response.tool_calls:
|
||||
args_str = json.dumps(tool_call.arguments)
|
||||
logger.debug(f"Executing tool: {tool_call.name} with arguments: {args_str}")
|
||||
args_str = json.dumps(tool_call.arguments, ensure_ascii=False)
|
||||
logger.info(f"Tool call: {tool_call.name}({args_str[:200]})")
|
||||
result = await self.tools.execute(tool_call.name, tool_call.arguments)
|
||||
messages = self.context.add_tool_result(
|
||||
messages, tool_call.id, tool_call.name, result
|
||||
|
||||
@@ -147,6 +147,23 @@ This file stores important information that should persist across sessions.
|
||||
console.print(" [dim]Created memory/MEMORY.md[/dim]")
|
||||
|
||||
|
||||
def _make_provider(config):
|
||||
"""Create LiteLLMProvider from config. Exits if no API key found."""
|
||||
from nanobot.providers.litellm_provider import LiteLLMProvider
|
||||
p = config.get_provider()
|
||||
model = config.agents.defaults.model
|
||||
if not (p and p.api_key) and not model.startswith("bedrock/"):
|
||||
console.print("[red]Error: No API key configured.[/red]")
|
||||
console.print("Set one in ~/.nanobot/config.json under providers section")
|
||||
raise typer.Exit(1)
|
||||
return LiteLLMProvider(
|
||||
api_key=p.api_key if p else None,
|
||||
api_base=config.get_api_base(),
|
||||
default_model=model,
|
||||
extra_headers=p.extra_headers if p else None,
|
||||
)
|
||||
|
||||
|
||||
# ============================================================================
|
||||
# Gateway / Server
|
||||
# ============================================================================
|
||||
@@ -160,7 +177,6 @@ def gateway(
|
||||
"""Start the nanobot gateway."""
|
||||
from nanobot.config.loader import load_config, get_data_dir
|
||||
from nanobot.bus.queue import MessageBus
|
||||
from nanobot.providers.litellm_provider import LiteLLMProvider
|
||||
from nanobot.agent.loop import AgentLoop
|
||||
from nanobot.channels.manager import ChannelManager
|
||||
from nanobot.cron.service import CronService
|
||||
@@ -174,26 +190,8 @@ def gateway(
|
||||
console.print(f"{__logo__} Starting nanobot gateway on port {port}...")
|
||||
|
||||
config = load_config()
|
||||
|
||||
# Create components
|
||||
bus = MessageBus()
|
||||
|
||||
# Create provider (supports OpenRouter, Anthropic, OpenAI, Bedrock)
|
||||
api_key = config.get_api_key()
|
||||
api_base = config.get_api_base()
|
||||
model = config.agents.defaults.model
|
||||
is_bedrock = model.startswith("bedrock/")
|
||||
|
||||
if not api_key and not is_bedrock:
|
||||
console.print("[red]Error: No API key configured.[/red]")
|
||||
console.print("Set one in ~/.nanobot/config.json under providers.openrouter.apiKey")
|
||||
raise typer.Exit(1)
|
||||
|
||||
provider = LiteLLMProvider(
|
||||
api_key=api_key,
|
||||
api_base=api_base,
|
||||
default_model=config.agents.defaults.model
|
||||
)
|
||||
provider = _make_provider(config)
|
||||
|
||||
# Create cron service first (callback set after agent creation)
|
||||
cron_store_path = get_data_dir() / "cron" / "jobs.json"
|
||||
@@ -290,26 +288,12 @@ def agent(
|
||||
"""Interact with the agent directly."""
|
||||
from nanobot.config.loader import load_config
|
||||
from nanobot.bus.queue import MessageBus
|
||||
from nanobot.providers.litellm_provider import LiteLLMProvider
|
||||
from nanobot.agent.loop import AgentLoop
|
||||
|
||||
config = load_config()
|
||||
|
||||
api_key = config.get_api_key()
|
||||
api_base = config.get_api_base()
|
||||
model = config.agents.defaults.model
|
||||
is_bedrock = model.startswith("bedrock/")
|
||||
|
||||
if not api_key and not is_bedrock:
|
||||
console.print("[red]Error: No API key configured.[/red]")
|
||||
raise typer.Exit(1)
|
||||
|
||||
bus = MessageBus()
|
||||
provider = LiteLLMProvider(
|
||||
api_key=api_key,
|
||||
api_base=api_base,
|
||||
default_model=config.agents.defaults.model
|
||||
)
|
||||
provider = _make_provider(config)
|
||||
|
||||
agent_loop = AgentLoop(
|
||||
bus=bus,
|
||||
@@ -657,12 +641,14 @@ def status():
|
||||
has_gemini = bool(config.providers.gemini.api_key)
|
||||
has_zhipu = bool(config.providers.zhipu.api_key)
|
||||
has_vllm = bool(config.providers.vllm.api_base)
|
||||
has_aihubmix = bool(config.providers.aihubmix.api_key)
|
||||
|
||||
console.print(f"OpenRouter API: {'[green]✓[/green]' if has_openrouter else '[dim]not set[/dim]'}")
|
||||
console.print(f"Anthropic API: {'[green]✓[/green]' if has_anthropic else '[dim]not set[/dim]'}")
|
||||
console.print(f"OpenAI API: {'[green]✓[/green]' if has_openai else '[dim]not set[/dim]'}")
|
||||
console.print(f"Gemini API: {'[green]✓[/green]' if has_gemini else '[dim]not set[/dim]'}")
|
||||
console.print(f"Zhipu AI API: {'[green]✓[/green]' if has_zhipu else '[dim]not set[/dim]'}")
|
||||
console.print(f"AiHubMix API: {'[green]✓[/green]' if has_aihubmix else '[dim]not set[/dim]'}")
|
||||
vllm_status = f"[green]✓ {config.providers.vllm.api_base}[/green]" if has_vllm else "[dim]not set[/dim]"
|
||||
console.print(f"vLLM/Local: {vllm_status}")
|
||||
|
||||
|
||||
@@ -65,6 +65,7 @@ class ProviderConfig(BaseModel):
|
||||
"""LLM provider configuration."""
|
||||
api_key: str = ""
|
||||
api_base: str | None = None
|
||||
extra_headers: dict[str, str] | None = None # Custom headers (e.g. APP-Code for AiHubMix)
|
||||
|
||||
|
||||
class ProvidersConfig(BaseModel):
|
||||
@@ -79,6 +80,7 @@ class ProvidersConfig(BaseModel):
|
||||
vllm: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
gemini: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
moonshot: ProviderConfig = Field(default_factory=ProviderConfig)
|
||||
aihubmix: ProviderConfig = Field(default_factory=ProviderConfig) # AiHubMix API gateway
|
||||
|
||||
|
||||
class GatewayConfig(BaseModel):
|
||||
@@ -123,60 +125,44 @@ class Config(BaseSettings):
|
||||
"""Get expanded workspace path."""
|
||||
return Path(self.agents.defaults.workspace).expanduser()
|
||||
|
||||
def _match_provider(self, model: str | None = None) -> ProviderConfig | None:
|
||||
"""Match a provider based on model name."""
|
||||
# Default base URLs for API gateways
|
||||
_GATEWAY_DEFAULTS = {"openrouter": "https://openrouter.ai/api/v1", "aihubmix": "https://aihubmix.com/v1"}
|
||||
|
||||
def get_provider(self, model: str | None = None) -> ProviderConfig | None:
|
||||
"""Get matched provider config (api_key, api_base, extra_headers). Falls back to first available."""
|
||||
model = (model or self.agents.defaults.model).lower()
|
||||
# Map of keywords to provider configs
|
||||
providers = {
|
||||
"openrouter": self.providers.openrouter,
|
||||
"deepseek": self.providers.deepseek,
|
||||
"anthropic": self.providers.anthropic,
|
||||
"claude": self.providers.anthropic,
|
||||
"openai": self.providers.openai,
|
||||
"gpt": self.providers.openai,
|
||||
"gemini": self.providers.gemini,
|
||||
"zhipu": self.providers.zhipu,
|
||||
"glm": self.providers.zhipu,
|
||||
"zai": self.providers.zhipu,
|
||||
"dashscope": self.providers.dashscope,
|
||||
"qwen": self.providers.dashscope,
|
||||
"groq": self.providers.groq,
|
||||
"moonshot": self.providers.moonshot,
|
||||
"kimi": self.providers.moonshot,
|
||||
"vllm": self.providers.vllm,
|
||||
p = self.providers
|
||||
# Keyword → provider mapping (order matters: gateways first)
|
||||
keyword_map = {
|
||||
"aihubmix": p.aihubmix, "openrouter": p.openrouter,
|
||||
"deepseek": p.deepseek, "anthropic": p.anthropic, "claude": p.anthropic,
|
||||
"openai": p.openai, "gpt": p.openai, "gemini": p.gemini,
|
||||
"zhipu": p.zhipu, "glm": p.zhipu, "zai": p.zhipu,
|
||||
"dashscope": p.dashscope, "qwen": p.dashscope,
|
||||
"groq": p.groq, "moonshot": p.moonshot, "kimi": p.moonshot, "vllm": p.vllm,
|
||||
}
|
||||
for keyword, provider in providers.items():
|
||||
if keyword in model and provider.api_key:
|
||||
for kw, provider in keyword_map.items():
|
||||
if kw in model and provider.api_key:
|
||||
return provider
|
||||
return None
|
||||
# Fallback: gateways first (can serve any model), then specific providers
|
||||
all_providers = [p.openrouter, p.aihubmix, p.anthropic, p.openai, p.deepseek,
|
||||
p.gemini, p.zhipu, p.dashscope, p.moonshot, p.vllm, p.groq]
|
||||
return next((pr for pr in all_providers if pr.api_key), None)
|
||||
|
||||
def get_api_key(self, model: str | None = None) -> str | None:
|
||||
"""Get API key for the given model (or default model). Falls back to first available key."""
|
||||
# Try matching by model name first
|
||||
matched = self._match_provider(model)
|
||||
if matched:
|
||||
return matched.api_key
|
||||
# Fallback: return first available key
|
||||
for provider in [
|
||||
self.providers.openrouter, self.providers.deepseek,
|
||||
self.providers.anthropic, self.providers.openai,
|
||||
self.providers.gemini, self.providers.zhipu,
|
||||
self.providers.dashscope, self.providers.moonshot,
|
||||
self.providers.vllm, self.providers.groq,
|
||||
]:
|
||||
if provider.api_key:
|
||||
return provider.api_key
|
||||
return None
|
||||
"""Get API key for the given model. Falls back to first available key."""
|
||||
p = self.get_provider(model)
|
||||
return p.api_key if p else None
|
||||
|
||||
def get_api_base(self, model: str | None = None) -> str | None:
|
||||
"""Get API base URL based on model name."""
|
||||
model = (model or self.agents.defaults.model).lower()
|
||||
if "openrouter" in model:
|
||||
return self.providers.openrouter.api_base or "https://openrouter.ai/api/v1"
|
||||
if any(k in model for k in ("zhipu", "glm", "zai")):
|
||||
return self.providers.zhipu.api_base
|
||||
if "vllm" in model:
|
||||
return self.providers.vllm.api_base
|
||||
"""Get API base URL for the given model. Applies default URLs for known gateways."""
|
||||
p = self.get_provider(model)
|
||||
if p and p.api_base:
|
||||
return p.api_base
|
||||
# Default URLs for known gateways (openrouter, aihubmix)
|
||||
for name, url in self._GATEWAY_DEFAULTS.items():
|
||||
if p == getattr(self.providers, name):
|
||||
return url
|
||||
return None
|
||||
|
||||
class Config:
|
||||
|
||||
@@ -21,10 +21,12 @@ class LiteLLMProvider(LLMProvider):
|
||||
self,
|
||||
api_key: str | None = None,
|
||||
api_base: str | None = None,
|
||||
default_model: str = "anthropic/claude-opus-4-5"
|
||||
default_model: str = "anthropic/claude-opus-4-5",
|
||||
extra_headers: dict[str, str] | None = None,
|
||||
):
|
||||
super().__init__(api_key, api_base)
|
||||
self.default_model = default_model
|
||||
self.extra_headers = extra_headers or {}
|
||||
|
||||
# Detect OpenRouter by api_key prefix or explicit api_base
|
||||
self.is_openrouter = (
|
||||
@@ -32,14 +34,20 @@ class LiteLLMProvider(LLMProvider):
|
||||
(api_base and "openrouter" in api_base)
|
||||
)
|
||||
|
||||
# Detect AiHubMix by api_base
|
||||
self.is_aihubmix = bool(api_base and "aihubmix" in api_base)
|
||||
|
||||
# Track if using custom endpoint (vLLM, etc.)
|
||||
self.is_vllm = bool(api_base) and not self.is_openrouter
|
||||
self.is_vllm = bool(api_base) and not self.is_openrouter and not self.is_aihubmix
|
||||
|
||||
# Configure LiteLLM based on provider
|
||||
if api_key:
|
||||
if self.is_openrouter:
|
||||
# OpenRouter mode - set key
|
||||
os.environ["OPENROUTER_API_KEY"] = api_key
|
||||
elif self.is_aihubmix:
|
||||
# AiHubMix gateway - OpenAI-compatible
|
||||
os.environ["OPENAI_API_KEY"] = api_key
|
||||
elif self.is_vllm:
|
||||
# vLLM/custom endpoint - uses OpenAI-compatible API
|
||||
os.environ["HOSTED_VLLM_API_KEY"] = api_key
|
||||
@@ -91,41 +99,26 @@ class LiteLLMProvider(LLMProvider):
|
||||
"""
|
||||
model = model or self.default_model
|
||||
|
||||
# For OpenRouter, prefix model name if not already prefixed
|
||||
# Auto-prefix model names for known providers
|
||||
# (keywords, target_prefix, skip_if_starts_with)
|
||||
_prefix_rules = [
|
||||
(("glm", "zhipu"), "zai", ("zhipu/", "zai/", "openrouter/", "hosted_vllm/")),
|
||||
(("qwen", "dashscope"), "dashscope", ("dashscope/", "openrouter/")),
|
||||
(("moonshot", "kimi"), "moonshot", ("moonshot/", "openrouter/")),
|
||||
(("gemini",), "gemini", ("gemini/",)),
|
||||
]
|
||||
model_lower = model.lower()
|
||||
for keywords, prefix, skip in _prefix_rules:
|
||||
if any(kw in model_lower for kw in keywords) and not any(model.startswith(s) for s in skip):
|
||||
model = f"{prefix}/{model}"
|
||||
break
|
||||
|
||||
# Gateway/endpoint-specific prefixes (detected by api_base/api_key, not model name)
|
||||
if self.is_openrouter and not model.startswith("openrouter/"):
|
||||
model = f"openrouter/{model}"
|
||||
|
||||
# For Zhipu/Z.ai, ensure prefix is present
|
||||
# Handle cases like "glm-4.7-flash" -> "zai/glm-4.7-flash"
|
||||
if ("glm" in model.lower() or "zhipu" in model.lower()) and not (
|
||||
model.startswith("zhipu/") or
|
||||
model.startswith("zai/") or
|
||||
model.startswith("openrouter/") or
|
||||
model.startswith("hosted_vllm/")
|
||||
):
|
||||
model = f"zai/{model}"
|
||||
|
||||
# For DashScope/Qwen, ensure dashscope/ prefix
|
||||
if ("qwen" in model.lower() or "dashscope" in model.lower()) and not (
|
||||
model.startswith("dashscope/") or
|
||||
model.startswith("openrouter/")
|
||||
):
|
||||
model = f"dashscope/{model}"
|
||||
|
||||
# For Moonshot/Kimi, ensure moonshot/ prefix (before vLLM check)
|
||||
if ("moonshot" in model.lower() or "kimi" in model.lower()) and not (
|
||||
model.startswith("moonshot/") or model.startswith("openrouter/")
|
||||
):
|
||||
model = f"moonshot/{model}"
|
||||
|
||||
# For Gemini, ensure gemini/ prefix if not already present
|
||||
if "gemini" in model.lower() and not model.startswith("gemini/"):
|
||||
model = f"gemini/{model}"
|
||||
|
||||
|
||||
# For vLLM, use hosted_vllm/ prefix per LiteLLM docs
|
||||
# Convert openai/ prefix to hosted_vllm/ if user specified it
|
||||
if self.is_vllm:
|
||||
elif self.is_aihubmix:
|
||||
model = f"openai/{model.split('/')[-1]}"
|
||||
elif self.is_vllm:
|
||||
model = f"hosted_vllm/{model}"
|
||||
|
||||
# kimi-k2.5 only supports temperature=1.0
|
||||
@@ -143,6 +136,10 @@ class LiteLLMProvider(LLMProvider):
|
||||
if self.api_base:
|
||||
kwargs["api_base"] = self.api_base
|
||||
|
||||
# Pass extra headers (e.g. APP-Code for AiHubMix)
|
||||
if self.extra_headers:
|
||||
kwargs["extra_headers"] = self.extra_headers
|
||||
|
||||
if tools:
|
||||
kwargs["tools"] = tools
|
||||
kwargs["tool_choice"] = "auto"
|
||||
|
||||
Reference in New Issue
Block a user