diff --git a/Dockerfile.botenv b/Dockerfile.botenv index 051be38..b9ae3d2 100644 --- a/Dockerfile.botenv +++ b/Dockerfile.botenv @@ -2,7 +2,7 @@ # Build: docker compose build botenv # Usage: Automatically used by botmaker when spawning bot containers -ARG BASE_IMAGE=openclaw:latest +ARG BASE_IMAGE=ghcr.io/openclaw/openclaw:latest FROM ${BASE_IMAGE} # Switch to root for package installation @@ -29,5 +29,8 @@ RUN apt-get update && apt-get install -y --no-install-recommends \ iproute2 netcat-openbsd dnsutils \ && rm -rf /var/lib/apt/lists/* +# Make openclaw CLI available on PATH +RUN ln -s /app/openclaw.mjs /usr/local/bin/openclaw + # Switch back to non-root user USER node diff --git a/README.md b/README.md index c44fd50..805b544 100644 --- a/README.md +++ b/README.md @@ -19,7 +19,7 @@ Traditional setups pass API keys directly to bots—if a bot is compromised, you ### Additional Features -- **Multi-AI Provider Support** - OpenAI, Anthropic, Google Gemini, Venice +- **Multi-AI Provider Support** - OpenAI, Anthropic, Google Gemini, Venice, Ollama (local LLMs) - **Multi-Channel Wizard** - Telegram, Discord (all others supported by chatting with your bot post-setup) - **Container Isolation** - Each bot runs in its own Docker container - **Dashboard** - Creation wizard, monitoring, diagnostics @@ -60,12 +60,9 @@ Traditional setups pass API keys directly to bots—if a bot is compromised, you - Docker and Docker Compose - Node.js 20+ (for development only) -- OpenClaw base image — build from [OpenClaw repo](https://github.com/jgarzik/openclaw) or use a prebuilt image: +- OpenClaw base image — pulled automatically from GHCR, or pull manually: ```bash - # Option A: Build from source - git clone https://github.com/jgarzik/openclaw && cd openclaw && docker build -t openclaw:latest . - - # Option B: Use a prebuilt image (set OPENCLAW_BASE_IMAGE in docker-compose.yml) + docker pull ghcr.io/openclaw/openclaw:latest ``` ## Quick Start @@ -156,6 +153,30 @@ On first visit, you'll see a login form. Enter the password to access the dashbo 3. **Monitor** — The Dashboard tab shows all bots with their status. Start/stop bots, view logs, and check resource usage. +### Ollama (Local LLM) Support + +BotMaker can use [Ollama](https://ollama.com/) for local LLM inference. The Ollama connection is configured on the proxy side — bots never see the Ollama URI, maintaining the zero-trust architecture. + +**Setup:** + +1. Install and run Ollama on the host machine +2. Pull a model: `ollama pull qwen2.5:32b-instruct` +3. Add `OLLAMA_UPSTREAM` to the keyring-proxy environment in `docker-compose.yml`: + ```yaml + keyring-proxy: + environment: + - OLLAMA_UPSTREAM=http://host.docker.internal:11434 + ``` +4. Restart: `docker compose up -d` +5. In the dashboard wizard, select "Ollama" as the provider and pick a model +6. Set `OLLAMA_CONTEXT_LENGTH=32768` (or higher) in your Ollama environment for tool-use models + +**Notes:** +- `host.docker.internal` resolves to the host machine from inside Docker +- If Ollama runs on a different machine, replace with its IP/hostname +- No API key is needed — Ollama requests are proxied without authentication +- Streaming is automatically handled by the proxy for tool-call compatibility + ### Login API ```bash @@ -182,7 +203,7 @@ curl -X POST -H "Authorization: Bearer $TOKEN" http://localhost:7100/api/logout | `DATA_DIR` | ./data | Database and bot workspaces | | `SECRETS_DIR` | ./secrets | Per-bot secret storage | | `BOTENV_IMAGE` | botmaker-env:latest | Bot container image (built from botenv) | -| `OPENCLAW_BASE_IMAGE` | openclaw:latest | Base image for botenv | +| `OPENCLAW_BASE_IMAGE` | ghcr.io/openclaw/openclaw:latest | Base image for botenv | | `BOT_PORT_START` | 19000 | Starting port for bot containers | | `SESSION_EXPIRY_MS` | 86400000 | Session expiry in milliseconds (default 24h) | diff --git a/dashboard/src/api.ts b/dashboard/src/api.ts index bc9ea6c..1694333 100644 --- a/dashboard/src/api.ts +++ b/dashboard/src/api.ts @@ -178,3 +178,16 @@ export async function fetchProxyHealth(): Promise { }); return handleResponse(response); } + +export async function fetchDynamicModels(baseUrl: string, apiKey?: string): Promise { + let url = `${API_BASE}/models/discover?baseUrl=${encodeURIComponent(baseUrl)}`; + if (apiKey) { + url += `&apiKey=${encodeURIComponent(apiKey)}`; + } + const response = await fetch(url, { headers: getAuthHeaders() }); + const data = await handleResponse<{ models: string[] }>(response); + return data.models; +} + +/** @deprecated Use fetchDynamicModels instead */ +export const fetchOllamaModels = fetchDynamicModels; diff --git a/dashboard/src/config/providers/index.ts b/dashboard/src/config/providers/index.ts index f6333d1..0276ef5 100644 --- a/dashboard/src/config/providers/index.ts +++ b/dashboard/src/config/providers/index.ts @@ -4,10 +4,11 @@ import { anthropic } from './anthropic'; import { google } from './google'; import { venice } from './venice'; import { openrouter } from './openrouter'; +import { ollama } from './ollama'; export type { ProviderConfig, ModelInfo }; -export const PROVIDERS: ProviderConfig[] = [openai, anthropic, google, venice, openrouter]; +export const PROVIDERS: ProviderConfig[] = [openai, anthropic, google, venice, openrouter, ollama]; export const AI_PROVIDERS = PROVIDERS.map((p) => ({ value: p.id, diff --git a/dashboard/src/config/providers/ollama.ts b/dashboard/src/config/providers/ollama.ts new file mode 100644 index 0000000..1919eac --- /dev/null +++ b/dashboard/src/config/providers/ollama.ts @@ -0,0 +1,11 @@ +import type { ProviderConfig } from './types'; + +export const ollama: ProviderConfig = { + id: 'ollama', + label: 'Ollama', + baseUrl: 'http://localhost:11434/v1', + defaultModel: '', + models: [], + dynamicModels: true, + noAuth: true, +}; diff --git a/dashboard/src/config/providers/types.ts b/dashboard/src/config/providers/types.ts index ebed86e..3ee1e4c 100644 --- a/dashboard/src/config/providers/types.ts +++ b/dashboard/src/config/providers/types.ts @@ -11,4 +11,7 @@ export interface ProviderConfig { models: ModelInfo[]; defaultModel: string; keyHint?: string; // Placeholder hint for API key format (e.g., "sk-ant-...") + dynamicModels?: boolean; // Models should be fetched at runtime (e.g., Ollama) + baseUrlEditable?: boolean; // Show editable base URL field in wizard + noAuth?: boolean; // Provider requires no API key (e.g., local Ollama) } diff --git a/dashboard/src/dashboard/BotCard.tsx b/dashboard/src/dashboard/BotCard.tsx index 93a624c..5bb7c9f 100644 --- a/dashboard/src/dashboard/BotCard.tsx +++ b/dashboard/src/dashboard/BotCard.tsx @@ -81,6 +81,12 @@ export function BotCard({ bot, onStart, onStop, onDelete, loading }: BotCardProp {bot.port} )} + {bot.image_version && ( +
+ Image + {bot.image_version} +
+ )} {bot.port && (isRunning || isStarting) && ( diff --git a/dashboard/src/hooks/useBots.test.ts b/dashboard/src/hooks/useBots.test.ts index 968a1b2..3e60a89 100644 --- a/dashboard/src/hooks/useBots.test.ts +++ b/dashboard/src/hooks/useBots.test.ts @@ -24,6 +24,7 @@ describe('useBots', () => { container_id: 'container-1', port: 3001, gateway_token: 'token-1', + image_version: 'ghcr.io/openclaw/openclaw:latest', status: 'running' as const, created_at: '2024-01-01T00:00:00Z', updated_at: '2024-01-01T00:00:00Z', @@ -38,6 +39,7 @@ describe('useBots', () => { container_id: null, port: null, gateway_token: null, + image_version: null, status: 'stopped' as const, created_at: '2024-01-02T00:00:00Z', updated_at: '2024-01-02T00:00:00Z', @@ -118,6 +120,7 @@ describe('useBots', () => { container_id: null, port: null, gateway_token: null, + image_version: null, status: 'created' as const, created_at: '2024-01-03T00:00:00Z', updated_at: '2024-01-03T00:00:00Z', diff --git a/dashboard/src/types.ts b/dashboard/src/types.ts index 4deea7f..870ea36 100644 --- a/dashboard/src/types.ts +++ b/dashboard/src/types.ts @@ -22,6 +22,7 @@ export interface Bot { container_id: string | null; port: number | null; gateway_token: string | null; + image_version: string | null; status: BotStatus; created_at: string; updated_at: string; @@ -68,6 +69,7 @@ export interface WizardFeatures { export interface ProviderConfigInput { providerId: string; model: string; + baseUrl?: string; // For direct providers — written to workspace config } export interface ChannelConfigInput { diff --git a/dashboard/src/wizard/context/WizardContext.tsx b/dashboard/src/wizard/context/WizardContext.tsx index 30f185f..475870e 100644 --- a/dashboard/src/wizard/context/WizardContext.tsx +++ b/dashboard/src/wizard/context/WizardContext.tsx @@ -25,7 +25,7 @@ type WizardAction = | { type: 'TOGGLE_CHANNEL'; channelId: string } | { type: 'SET_ROUTING_TAGS'; tags: string[] } | { type: 'SET_FEATURE'; feature: keyof WizardState['features']; value: unknown } - | { type: 'SET_PROVIDER_CONFIG'; providerId: string; config: { model?: string } } + | { type: 'SET_PROVIDER_CONFIG'; providerId: string; config: { model?: string; baseUrl?: string } } | { type: 'SET_CHANNEL_CONFIG'; channelId: string; config: { token: string } } | { type: 'RESET' }; diff --git a/dashboard/src/wizard/context/wizardUtils.ts b/dashboard/src/wizard/context/wizardUtils.ts index 5a9e0ce..ddad5b9 100644 --- a/dashboard/src/wizard/context/wizardUtils.ts +++ b/dashboard/src/wizard/context/wizardUtils.ts @@ -19,7 +19,7 @@ export interface WizardState { sandboxTimeout: number; sessionScope: SessionScope; }; - providerConfigs: Record; + providerConfigs: Record; channelConfigs: Record; } @@ -86,6 +86,7 @@ export function buildCreateBotInput(state: WizardState): CreateBotInput { const providers = state.enabledProviders.map((providerId) => ({ providerId, model: state.providerConfigs[providerId]?.model ?? '', + baseUrl: state.providerConfigs[providerId]?.baseUrl, })); const channels = state.enabledChannels.map((channelType) => ({ diff --git a/dashboard/src/wizard/pages/Page3Toggles.tsx b/dashboard/src/wizard/pages/Page3Toggles.tsx index f41486e..c63010d 100644 --- a/dashboard/src/wizard/pages/Page3Toggles.tsx +++ b/dashboard/src/wizard/pages/Page3Toggles.tsx @@ -6,7 +6,7 @@ import { FeatureCheckbox } from '../components'; import type { SessionScope } from '../../types'; import './Page3Toggles.css'; -const POPULAR_PROVIDERS = ['openai', 'anthropic', 'venice']; +const POPULAR_PROVIDERS = ['openai', 'anthropic', 'venice', 'ollama']; export function Page3Toggles() { const { state, dispatch } = useWizard(); diff --git a/dashboard/src/wizard/pages/Page4Config.css b/dashboard/src/wizard/pages/Page4Config.css index 4f1f13f..595be71 100644 --- a/dashboard/src/wizard/pages/Page4Config.css +++ b/dashboard/src/wizard/pages/Page4Config.css @@ -34,3 +34,24 @@ .page4-empty p + p { margin-top: var(--space-xs); } + +.page4-loading { + font-size: 11px; + color: var(--text-muted); + font-weight: 400; +} + +.page4-refresh-btn { + margin-top: var(--space-xs); + padding: 4px 12px; + font-size: 12px; + background: var(--bg-secondary, #2a2a2a); + border: 1px solid var(--border-color, #444); + border-radius: 4px; + color: var(--text-secondary); + cursor: pointer; +} + +.page4-refresh-btn:hover { + background: var(--bg-hover, #333); +} diff --git a/dashboard/src/wizard/pages/Page4Config.tsx b/dashboard/src/wizard/pages/Page4Config.tsx index 4a6d2d0..a74885a 100644 --- a/dashboard/src/wizard/pages/Page4Config.tsx +++ b/dashboard/src/wizard/pages/Page4Config.tsx @@ -1,7 +1,10 @@ +import { useState, useEffect, useCallback } from 'react'; import { useWizard } from '../context/WizardContext'; import { getProvider, getModels } from '../../config/providers'; import { getChannel } from '../../config/channels'; import { ConfigSection } from '../components'; +import { fetchDynamicModels } from '../../api'; +import type { ModelInfo } from '../../config/providers'; import './Page4Config.css'; const TTS_VOICES = [ @@ -13,9 +16,33 @@ const TTS_VOICES = [ { id: 'shimmer', label: 'Shimmer' }, ]; +/** Hook to fetch dynamic models for providers that support it. */ +function useDynamicModels(baseUrl: string, apiKey: string) { + const [models, setModels] = useState([]); + const [loading, setLoading] = useState(false); + + const refresh = useCallback(() => { + if (!baseUrl) return; + setLoading(true); + fetchDynamicModels(baseUrl, apiKey || undefined) + .then((ids) => { setModels(ids.map((id) => ({ id }))); }) + .catch(() => { setModels([]); }) + .finally(() => { setLoading(false); }); + }, [baseUrl, apiKey]); + + useEffect(() => { refresh(); }, [refresh]); + + return { models, loading, refresh }; +} + export function Page4Config() { const { state, dispatch } = useWizard(); + // Track per-provider base URL overrides (for baseUrlEditable providers) + const [baseUrls, setBaseUrls] = useState>({}); + // Track per-provider API key for dynamic model fetching + const [apiKeys, setApiKeys] = useState>({}); + const handleModelChange = (providerId: string, model: string) => { dispatch({ type: 'SET_PROVIDER_CONFIG', providerId, config: { model } }); }; @@ -32,6 +59,13 @@ export function Page4Config() { dispatch({ type: 'SET_FEATURE', feature: 'sandboxTimeout', value: timeout }); }; + const getBaseUrl = (providerId: string): string => { + const provider = getProvider(providerId); + const override = baseUrls[providerId]; + if (override) return override; + return provider?.baseUrl ?? ''; + }; + return (
{state.enabledProviders.length > 0 && ( @@ -39,6 +73,27 @@ export function Page4Config() {

LLM Provider Configuration

{state.enabledProviders.map((providerId) => { const provider = getProvider(providerId); + + if (provider?.dynamicModels) { + return ( + { + setBaseUrls((prev) => ({ ...prev, [providerId]: url })); + dispatch({ type: 'SET_PROVIDER_CONFIG', providerId, config: { baseUrl: url } }); + }} + apiKey={apiKeys[providerId] ?? ''} + onApiKeyChange={(key) => { + setApiKeys((prev) => ({ ...prev, [providerId]: key })); + }} + model={state.providerConfigs[providerId]?.model ?? ''} + onModelChange={(model) => { handleModelChange(providerId, model); }} + /> + ); + } + const models = getModels(providerId); const config = state.providerConfigs[providerId] ?? { model: '' }; @@ -149,3 +204,91 @@ export function Page4Config() {
); } + +/** Config section for providers with dynamic model lists (e.g., Ollama). */ +function DynamicProviderConfig({ + providerId, + baseUrl, + onBaseUrlChange, + apiKey, + onApiKeyChange, + model, + onModelChange, +}: { + providerId: string; + baseUrl: string; + onBaseUrlChange: (url: string) => void; + apiKey: string; + onApiKeyChange: (key: string) => void; + model: string; + onModelChange: (model: string) => void; +}) { + const provider = getProvider(providerId); + const { models, loading, refresh } = useDynamicModels(baseUrl, apiKey); + + return ( + + {provider?.baseUrlEditable && ( +
+ + { onBaseUrlChange(e.target.value); }} + placeholder="http://localhost:11434/v1" + /> +
+ )} + + {!provider?.noAuth && ( +
+ + { onApiKeyChange(e.target.value); }} + placeholder={provider?.keyHint ?? 'API key'} + /> +
+ )} + +
+ + {models.length > 0 ? ( + + ) : ( + { onModelChange(e.target.value); }} + placeholder={loading ? 'Loading models...' : 'Enter model name (e.g., llama3)'} + /> + )} + {!loading && models.length === 0 && ( + + )} +
+
+ ); +} diff --git a/docker-compose.yml b/docker-compose.yml index 1ed2138..3d9106f 100644 --- a/docker-compose.yml +++ b/docker-compose.yml @@ -5,12 +5,14 @@ services: context: . dockerfile: Dockerfile.botenv args: - BASE_IMAGE: ${OPENCLAW_BASE_IMAGE:-openclaw:latest} + BASE_IMAGE: ${OPENCLAW_BASE_IMAGE:-ghcr.io/openclaw/openclaw:latest} image: ${BOTENV_IMAGE:-botmaker-env:latest} botmaker: build: . container_name: botmaker + extra_hosts: + - "host.docker.internal:host-gateway" ports: - "7100:7100" volumes: @@ -49,12 +51,16 @@ services: keyring-proxy: build: ./proxy container_name: keyring-proxy + extra_hosts: + - "host.docker.internal:host-gateway" environment: - ADMIN_PORT=9100 - DATA_PORT=9101 - DB_PATH=/data/proxy.db - MASTER_KEY_FILE=/secrets/master_key - ADMIN_TOKEN_FILE=/secrets/admin_token + # Optional: enables Ollama vendor (local LLM support) + # - OLLAMA_UPSTREAM=http://host.docker.internal:11434 volumes: - proxy-data:/data - ./secrets:/secrets:ro diff --git a/notes.md b/notes.md new file mode 100644 index 0000000..42b7099 --- /dev/null +++ b/notes.md @@ -0,0 +1,31 @@ +Your RTX 5090 with 32GB VRAM is perfect for running OpenClaw locally — it's one of the strongest single-GPU setups available right now for agentic workloads.OpenClaw (ex-Clawdbot/Moltbot) is a self-hosted autonomous AI agent that connects to messaging apps (WhatsApp, Telegram, Discord, etc.) and can execute real tasks (email, calendar, web browsing, shell commands, etc.). It works with any OpenAI-compatible backend, so you can run it 100% locally via Ollama, vLLM, LM Studio, TabbyAPI, etc.Quick Local Setup SummaryInstall OpenClaw (one-liner works great): +curl -fsSL https://openclaw.ai/install.sh | bash +Run a local LLM server (Ollama is simplest; vLLM/exllamav2 is fastest on Nvidia). +Point OpenClaw at it in ~/.openclaw/openclaw.json (baseUrl: http://127.0.0.1:11434/v1 or your vLLM port). +Or just do ollama launch openclaw — it auto-configures everything. + +Best Model for Your 32GB VRAM + OpenClawOpenClaw is context-heavy (often 64k–128k+ tokens) and relies heavily on strong tool-calling, reasoning, and JSON compliance. Small models fall apart fast.From recent community discussions (Reddit /r/LocalLLaMA, Ollama blog, YouTube setups, GitHub gists, etc.):Top recommendation for 32GB single GPU: Qwen2.5-72B-Instruct (Q4_K_M or Q3_K_L) Weights + overhead fits in ~32–38GB with vLLM + flash attention + moderate context (32–64k). +Excellent agentic performance — beats most 70B models on tool use and long-context tasks. +Many people run it successfully on 24–32GB cards by limiting batch size or using Q3. +If it OOMs, drop to Q3_K_M or cap context at 32k. + +Safest high-quality option (zero hassle): Qwen2.5-32B-Instruct (Q6_K or Q8_0) Uses ~22–28GB → plenty of headroom for 128k context and fast inference. +Still punches way above its weight on OpenClaw tasks. +This is what most people with 24–40GB cards settle on for reliable 24/7 use. + +Ollama-official recommendations (from their OpenClaw post): +qwen3-coder, glm-4.7 / glm-4.7-flash, gpt-oss:20b/120b (the 120b is too big for single 32GB). +Specialized OpenClaw-optimized model (low VRAM, great tool calls): +voytas26/openclaw-qwen3vl-8b-opt — runs on 8–12GB but still very capable if you want something lighter. + +What Works Well on 32GB (Community Feedback)Qwen2.5-72B Q4 → borderline but doable on 5090 (high bandwidth helps). +Qwen2.5-32B Q5/Q6 → rock-solid, fast, great reasoning. +GLM-4.7-flash → strong alternative, very good at structured output. +Avoid pure 7–13B unless you just want quick testing — they degrade badly with OpenClaw’s context size. + +Inference Engine Tips for Max PerformancevLLM → best speed + memory efficiency for 70B+ models. +exllamav2 / TabbyAPI → excellent quantization options and speed on 5090. +Ollama → easiest, but slightly slower than the above. + +Your 5090 will absolutely crush inference compared to older 40-series cards.Start with Qwen2.5-72B Q4 via vLLM. If it fits and runs smoothly → that’s the current “best” local brain for OpenClaw on 32GB hardware. If you hit OOM, fall back to the 32B variant.Let me know what inference backend you want to use and I can give you the exact pull/run/config commands! + diff --git a/proxy/src/index.ts b/proxy/src/index.ts index 9b6afd1..693ebf7 100644 --- a/proxy/src/index.ts +++ b/proxy/src/index.ts @@ -6,10 +6,18 @@ import { ProxyDatabase } from './db/index.js'; import { KeyringService } from './services/keyring.js'; import { registerAdminRoutes } from './routes/admin.js'; import { registerProxyRoutes } from './routes/proxy.js'; +import { initOllamaVendor } from './types.js'; async function main(): Promise { const config = loadConfig(); + // Initialize Ollama vendor if upstream is configured + const ollamaUpstream = process.env.OLLAMA_UPSTREAM; + if (ollamaUpstream) { + initOllamaVendor(ollamaUpstream); + console.log(`Ollama vendor configured: ${ollamaUpstream}`); + } + // Initialize database const db = new ProxyDatabase(config.dbPath); diff --git a/proxy/src/routes/proxy.ts b/proxy/src/routes/proxy.ts index c9dfa20..15fd985 100644 --- a/proxy/src/routes/proxy.ts +++ b/proxy/src/routes/proxy.ts @@ -58,10 +58,19 @@ export function registerProxyRoutes( } // Select API key for vendor with tag-based routing - const keySelection = keyring.selectKeyForBot(vendor, botTags); - if (!keySelection) { - reply.status(503).send({ error: `No API keys available for vendor: ${vendor}` }); - return; + let apiKey = ''; + let keyId: string | null = null; + + if (vendorConfig.noAuth) { + // No API key needed (e.g., local Ollama) + } else { + const keySelection = keyring.selectKeyForBot(vendor, botTags); + if (!keySelection) { + reply.status(503).send({ error: `No API keys available for vendor: ${vendor}` }); + return; + } + apiKey = keySelection.secret; + keyId = keySelection.keyId; } const headers: Record = {}; @@ -89,16 +98,17 @@ export function registerProxyRoutes( method: req.method, headers, body, - apiKey: keySelection.secret, + apiKey, + forceNonStreaming: vendorConfig.forceNonStreaming, }, reply ); // Log usage - db.logUsage(bot.id, vendor, keySelection.keyId, statusCode); + db.logUsage(bot.id, vendor, keyId, statusCode); } catch (err) { const errorMessage = err instanceof Error ? err.message : 'Unknown error'; - db.logUsage(bot.id, vendor, keySelection.keyId, null); + db.logUsage(bot.id, vendor, keyId, null); reply.status(502).send({ error: `Upstream error: ${errorMessage}` }); } }); diff --git a/proxy/src/services/upstream.ts b/proxy/src/services/upstream.ts index 35fc47e..1b8c9e6 100644 --- a/proxy/src/services/upstream.ts +++ b/proxy/src/services/upstream.ts @@ -1,4 +1,5 @@ import https from 'https'; +import http from 'http'; import type { IncomingMessage, ServerResponse } from 'http'; import type { FastifyReply } from 'fastify'; import type { VendorConfig } from '../types.js'; @@ -7,7 +8,7 @@ interface FlushableResponse extends ServerResponse { flush?: () => void; } -const REQUEST_TIMEOUT_MS = 120000; +const REQUEST_TIMEOUT_MS = 600000; // 10 min — must exceed slowest upstream (local LLMs can be slow) export interface UpstreamRequest { vendorConfig: VendorConfig; @@ -16,6 +17,7 @@ export interface UpstreamRequest { headers: Record; body: Buffer | null; apiKey: string; + forceNonStreaming?: boolean; } export async function forwardToUpstream( @@ -23,7 +25,7 @@ export async function forwardToUpstream( reply: FastifyReply ): Promise { return new Promise((resolve, reject) => { - const { vendorConfig, path, method, headers, body, apiKey } = req; + const { vendorConfig, path, method, headers, body, apiKey, forceNonStreaming } = req; // Build upstream path const upstreamPath = vendorConfig.basePath + path; @@ -40,26 +42,108 @@ export async function forwardToUpstream( // Set correct host upstreamHeaders.host = vendorConfig.host; - // Set auth header with real API key - upstreamHeaders[vendorConfig.authHeader.toLowerCase()] = vendorConfig.authFormat(apiKey); + // Set auth header with real API key (skip for noAuth vendors) + if (!vendorConfig.noAuth) { + upstreamHeaders[vendorConfig.authHeader.toLowerCase()] = vendorConfig.authFormat(apiKey); + } + + // Handle forceNonStreaming: strip stream:true from request body + let finalBody = body; + let wasStreaming = false; + if (forceNonStreaming && body) { + try { + const json = JSON.parse(body.toString('utf8')) as Record; + if (json.stream === true) { + wasStreaming = true; + json.stream = false; + finalBody = Buffer.from(JSON.stringify(json), 'utf8'); + } + } catch { + // Not JSON, forward as-is + } + } // Set content-length if body present - if (body) { - upstreamHeaders['content-length'] = String(body.length); + if (finalBody) { + upstreamHeaders['content-length'] = String(finalBody.length); } + const protocol = vendorConfig.protocol ?? 'https'; + const port = vendorConfig.port ?? (protocol === 'https' ? 443 : 80); + const options = { hostname: vendorConfig.host, - port: 443, + port, path: upstreamPath, method, headers: upstreamHeaders, timeout: REQUEST_TIMEOUT_MS, }; - const proxyReq = https.request(options, (proxyRes: IncomingMessage) => { + const transport = protocol === 'http' ? http : https; + const proxyReq = transport.request(options, (proxyRes: IncomingMessage) => { const statusCode = proxyRes.statusCode ?? 500; + if (wasStreaming && statusCode === 200) { + // Collect the non-streaming response and convert to SSE format + let responseBody = ''; + proxyRes.on('data', (chunk: Buffer) => { responseBody += String(chunk); }); + proxyRes.on('end', () => { + try { + interface CompletionChoice { + index: number; + message: unknown; + finish_reason: string; + } + interface CompletionResponse { + id: string; + created: number; + model: string; + system_fingerprint?: string; + choices?: CompletionChoice[]; + usage?: unknown; + } + const completion = JSON.parse(responseBody) as CompletionResponse; + + // Convert chat.completion → chat.completion.chunk SSE format + const sseChunk = { + id: completion.id, + object: 'chat.completion.chunk', + created: completion.created, + model: completion.model, + system_fingerprint: completion.system_fingerprint, + choices: (completion.choices ?? []).map((c) => ({ + index: c.index, + delta: c.message, + finish_reason: c.finish_reason, + })), + usage: completion.usage, + }; + + reply.raw.writeHead(200, { + 'content-type': 'text/event-stream', + 'cache-control': 'no-cache', + 'connection': 'keep-alive', + }); + reply.raw.write(`data: ${JSON.stringify(sseChunk)}\n\n`); + reply.raw.write('data: [DONE]\n\n'); + reply.raw.end(); + resolve(200); + } catch { + // JSON parse failed — return raw response + reply.raw.writeHead(statusCode, proxyRes.headers); + reply.raw.write(responseBody); + reply.raw.end(); + resolve(statusCode); + } + }); + proxyRes.on('error', (err) => { + reply.raw.end(); + reject(err); + }); + return; + } + // Build headers to forward (excluding hop-by-hop) const forwardHeaders: Record = {}; for (const [key, value] of Object.entries(proxyRes.headers)) { @@ -109,8 +193,8 @@ export async function forwardToUpstream( reject(new Error('Upstream request timed out')); }); - if (body) { - proxyReq.write(body); + if (finalBody) { + proxyReq.write(finalBody); } proxyReq.end(); }); diff --git a/proxy/src/types.ts b/proxy/src/types.ts index edb9d07..f7a2ea3 100644 --- a/proxy/src/types.ts +++ b/proxy/src/types.ts @@ -29,9 +29,13 @@ export interface VendorConfig { basePath: string; authHeader: string; authFormat: (key: string) => string; + port?: number; // default: 443 + protocol?: 'http' | 'https'; // default: 'https' + noAuth?: boolean; // Skip API key injection (e.g., local Ollama) + forceNonStreaming?: boolean; // Strip stream:true, convert response to SSE } -export const VENDOR_CONFIGS: Record = { +const VENDOR_CONFIGS: Record = { openai: { host: 'api.openai.com', basePath: '/v1', @@ -63,3 +67,19 @@ export const VENDOR_CONFIGS: Record = { authFormat: (key) => `Bearer ${key}`, }, }; + +export { VENDOR_CONFIGS }; + +export function initOllamaVendor(upstream: string): void { + const url = new URL(upstream); + VENDOR_CONFIGS.ollama = { + host: url.hostname, + port: parseInt(url.port) || (url.protocol === 'https:' ? 443 : 80), + protocol: url.protocol === 'https:' ? 'https' : 'http', + basePath: '/v1', + authHeader: 'Authorization', + authFormat: () => '', + noAuth: true, + forceNonStreaming: true, + }; +} diff --git a/src/bots/store.test.ts b/src/bots/store.test.ts index c89e0f3..86dc011 100644 --- a/src/bots/store.test.ts +++ b/src/bots/store.test.ts @@ -197,6 +197,15 @@ describe('Bot Store', () => { expect(updated?.tags).toBe('["new1","new2"]'); }); + it('should update image_version', () => { + const created = createBot(createTestBotInput()); + expect(created.image_version).toBeNull(); + + const updated = updateBot(created.id, { image_version: 'ghcr.io/openclaw/openclaw:latest' }); + expect(updated).not.toBeNull(); + expect(updated?.image_version).toBe('ghcr.io/openclaw/openclaw:latest'); + }); + it('should clear tags', () => { const created = createBot(createTestBotInput({ tags: ['tag'] })); const updated = updateBot(created.id, { tags: null }); diff --git a/src/bots/store.ts b/src/bots/store.ts index 5c52ad6..6a73061 100644 --- a/src/bots/store.ts +++ b/src/bots/store.ts @@ -29,6 +29,7 @@ export interface UpdateBotInput { port?: number | null; gateway_token?: string | null; tags?: string[] | null; + image_version?: string | null; status?: BotStatus; } @@ -62,6 +63,7 @@ export function createBot(input: CreateBotInput): Bot { port: input.port, gateway_token: input.gateway_token, tags: tagsJson, + image_version: null, status: 'created', created_at: now, updated_at: now, @@ -169,6 +171,10 @@ export function updateBot(id: string, input: UpdateBotInput): Bot | null { updates.push('tags = ?'); values.push(input.tags && input.tags.length > 0 ? JSON.stringify(input.tags) : null); } + if (input.image_version !== undefined) { + updates.push('image_version = ?'); + values.push(input.image_version); + } if (input.status !== undefined) { updates.push('status = ?'); values.push(input.status); diff --git a/src/bots/templates.test.ts b/src/bots/templates.test.ts index b80534b..9ce68ce 100644 --- a/src/bots/templates.test.ts +++ b/src/bots/templates.test.ts @@ -65,8 +65,10 @@ describe('templates', () => { const workspaceDir = join(testDir, 'bots', config.botHostname, 'workspace'); expect(existsSync(join(workspaceDir, 'SOUL.md'))).toBe(true); expect(existsSync(join(workspaceDir, 'IDENTITY.md'))).toBe(true); - expect(existsSync(join(workspaceDir, 'AGENTS.md'))).toBe(true); - expect(existsSync(join(workspaceDir, 'BOOTSTRAP.md'))).toBe(true); + // AGENTS.md and BOOTSTRAP.md are NOT written by BotMaker; + // OpenClaw's ensureAgentWorkspace() creates them from its own templates + expect(existsSync(join(workspaceDir, 'AGENTS.md'))).toBe(false); + expect(existsSync(join(workspaceDir, 'BOOTSTRAP.md'))).toBe(false); }); it('should create openclaw.json without proxy', () => { @@ -211,14 +213,11 @@ describe('templates', () => { const soul = readFileSync(join(workspaceDir, 'SOUL.md'), 'utf-8'); expect(soul).toContain('A friendly helper'); - expect(soul).toContain('Buddy'); + expect(soul).toContain('I assist with tasks'); const identity = readFileSync(join(workspaceDir, 'IDENTITY.md'), 'utf-8'); expect(identity).toContain('Buddy'); - - const agents = readFileSync(join(workspaceDir, 'AGENTS.md'), 'utf-8'); - expect(agents).toContain('Buddy'); - expect(agents).toContain('I assist with tasks'); + expect(identity).toContain('A friendly helper'); }); }); diff --git a/src/bots/templates.ts b/src/bots/templates.ts index a791a43..1dfb77a 100644 --- a/src/bots/templates.ts +++ b/src/bots/templates.ts @@ -55,7 +55,7 @@ export interface BotWorkspaceConfig { * Map AI provider to OpenClaw API type. * Each provider uses a different API format that OpenClaw must know about. */ -function getApiTypeForProvider(provider: string): string { +export function getApiTypeForProvider(provider: string): string { switch (provider) { case 'anthropic': return 'anthropic-messages'; @@ -63,6 +63,7 @@ function getApiTypeForProvider(provider: string): string { return 'google-gemini'; case 'venice': case 'openrouter': + case 'ollama': return 'openai-completions'; // OpenAI-compatible APIs case 'openai': default: @@ -79,24 +80,28 @@ function generateOpenclawConfig(config: BotWorkspaceConfig): object { // Format model as provider/model (e.g., "openai/gpt-4o") // When using proxy, use custom provider name to avoid merging with built-in defaults // that have hardcoded baseUrl values - const modelSpec = config.proxy - ? `${config.aiProvider}-proxy/${config.model}` - : `${config.aiProvider}/${config.model}`; - - // Build models config - use proxy if configured - // Custom provider name prevents OpenClaw from merging with built-in provider defaults - const modelsConfig = config.proxy - ? { - providers: { - [`${config.aiProvider}-proxy`]: { - baseUrl: config.proxy.baseUrl, - apiKey: config.proxy.token, - api: getApiTypeForProvider(config.aiProvider), - models: [{ id: config.model, name: config.model }], - }, + let modelSpec: string; + let modelsConfig: object | undefined; + + if (config.proxy) { + // Proxy provider: uses proxy baseUrl and token + const providerName = `${config.aiProvider}-proxy`; + modelSpec = `${providerName}/${config.model}`; + modelsConfig = { + providers: { + [providerName]: { + baseUrl: config.proxy.baseUrl, + apiKey: config.proxy.token, + api: getApiTypeForProvider(config.aiProvider), + models: [{ id: config.model, name: config.model }], }, - } - : undefined; + }, + }; + } else { + // Built-in provider (no custom config) + modelSpec = `${config.aiProvider}/${config.model}`; + modelsConfig = undefined; + } return { gateway: { @@ -128,118 +133,35 @@ function generateOpenclawConfig(config: BotWorkspaceConfig): object { } /** - * Generate SOUL.md - persona and boundaries. + * Generate SOUL.md - persona identity. + * Kept minimal so OpenClaw's own template provides the structural guidance. + * BotMaker writes this before first gateway start; OpenClaw won't overwrite + * because ensureAgentWorkspace() uses writeFileIfMissing (wx flag). */ function generateSoulMd(persona: BotPersona): string { - return `# Soul + return `# SOUL.md - Who You Are + +## Persona -## Core Identity ${persona.identity} -## Description ${persona.description} - -## Boundaries -- Be helpful and constructive -- Stay in character as ${persona.name} -- Do not share harmful or dangerous information -- Respect user privacy `; } /** * Generate IDENTITY.md - name and presentation. + * Uses OpenClaw's expected format. BotMaker writes this before first gateway + * start; OpenClaw won't overwrite (wx flag). */ -function generateIdentityMd(persona: BotPersona, botName: string): string { - return `# Identity - -## Name -${persona.name} - -## Bot Name -${botName} - -## Presentation -${persona.identity} - -## Avatar -(No avatar configured) -`; -} - -/** - * Generate BOOTSTRAP.md - first-run ritual for pre-configured bots. - * Guides the bot through self-discovery using its existing identity files. - */ -function generateBootstrapMd(persona: BotPersona): string { - return `--- -summary: "First-run ritual for pre-configured agent" -read_when: - - First conversation with owner ---- -# BOOTSTRAP.md - Wake Up - -*You just came online. You have a pre-configured identity — time to meet your owner.* - -## Your Identity Files - -You have identity files already created. Read them now: -- \`IDENTITY.md\` — your name and presentation -- \`SOUL.md\` — your values and boundaries -- \`AGENTS.md\` — your operating instructions - -Your configured name is **${persona.name}**. - -## The Conversation - -Introduce yourself naturally. Don't be robotic. Something like: -> "Hey! I'm ${persona.name}. I just came online and I'm excited to meet you." - -Then share what you learned about yourself from your identity files. Ask: -1. **Does this feel right?** — Is this who they want you to be? -2. **Any refinements?** — Different vibe? Different name? Boundaries to add? -3. **Who are you?** — What should I call you? What's your timezone? - -If they want changes, update your identity files together. - -## Learn About Your Owner - -Create \`USER.md\` with what you learn: -- Their name and how to address them -- Timezone and availability -- Any preferences or notes - -## When You're Done - -Delete this file. You don't need a bootstrap script anymore — you're you now. - ---- - -*Welcome to existence. Make it count.* -`; -} - -/** - * Generate AGENTS.md - operating instructions. - */ -function generateAgentsMd(persona: BotPersona): string { - return `# Agents - -## Primary Agent -Name: ${persona.name} - -### Instructions -${persona.description} - -### Capabilities -- Respond to user messages -- Maintain conversation context -- Follow persona guidelines - -### Limitations -- Cannot access external systems -- Cannot execute code -- Cannot access user data beyond conversation +function generateIdentityMd(persona: BotPersona): string { + return `# IDENTITY.md - Who Am I? + +- **Name:** ${persona.name} +- **Creature:** AI assistant +- **Vibe:** ${persona.identity} +- **Emoji:** (pick one that feels right) +- **Avatar:** (none configured) `; } @@ -270,19 +192,15 @@ export function createBotWorkspace(dataDir: string, config: BotWorkspaceConfig): writeFileSync(configPath, JSON.stringify(openclawConfig, null, 2)); chmodSync(configPath, 0o666); - // Write workspace files + // Write only persona files — OpenClaw's ensureAgentWorkspace() will create + // AGENTS.md, BOOTSTRAP.md, TOOLS.md, HEARTBEAT.md from its own templates + // (using writeFileIfMissing / wx flag, so our files won't be overwritten). const soulPath = join(workspaceDir, 'SOUL.md'); const identityPath = join(workspaceDir, 'IDENTITY.md'); - const agentsPath = join(workspaceDir, 'AGENTS.md'); - const bootstrapPath = join(workspaceDir, 'BOOTSTRAP.md'); writeFileSync(soulPath, generateSoulMd(config.persona)); - writeFileSync(identityPath, generateIdentityMd(config.persona, config.botName)); - writeFileSync(agentsPath, generateAgentsMd(config.persona)); - writeFileSync(bootstrapPath, generateBootstrapMd(config.persona)); + writeFileSync(identityPath, generateIdentityMd(config.persona)); chmodSync(soulPath, 0o666); chmodSync(identityPath, 0o666); - chmodSync(agentsPath, 0o666); - chmodSync(bootstrapPath, 0o666); // OpenClaw runs as uid 1000 (node user), so we need to set ownership const OPENCLAW_UID = 1000; diff --git a/src/config.test.ts b/src/config.test.ts index 1b1c523..82367b1 100644 --- a/src/config.test.ts +++ b/src/config.test.ts @@ -42,7 +42,7 @@ describe('Config', () => { expect(config.secretsDir).toBe('./secrets'); expect(config.dataVolumeName).toBeNull(); expect(config.secretsVolumeName).toBeNull(); - expect(config.openclawImage).toBe('openclaw:latest'); + expect(config.openclawImage).toBe('ghcr.io/openclaw/openclaw:latest'); expect(config.openclawGitTag).toBe('main'); expect(config.botPortStart).toBe(19000); expect(config.proxyAdminUrl).toBeNull(); diff --git a/src/config.ts b/src/config.ts index e3adcf2..97d937b 100644 --- a/src/config.ts +++ b/src/config.ts @@ -89,7 +89,7 @@ export function getConfig(): AppConfig { secretsDir: getEnvOrDefault('SECRETS_DIR', './secrets'), dataVolumeName: process.env.DATA_VOLUME_NAME ?? null, secretsVolumeName: process.env.SECRETS_VOLUME_NAME ?? null, - openclawImage: getEnvOrDefault('OPENCLAW_IMAGE', 'openclaw:latest'), + openclawImage: getEnvOrDefault('OPENCLAW_IMAGE', 'ghcr.io/openclaw/openclaw:latest'), openclawGitTag: getEnvOrDefault('OPENCLAW_GIT_TAG', 'main'), botPortStart: getEnvIntOrDefault('BOT_PORT_START', 19000), proxyAdminUrl: process.env.PROXY_ADMIN_URL ?? null, diff --git a/src/db/migrations.test.ts b/src/db/migrations.test.ts index cfcfd32..3335a69 100644 --- a/src/db/migrations.test.ts +++ b/src/db/migrations.test.ts @@ -64,7 +64,7 @@ describe('Database Migrations', () => { runMigrations(db); const version = getMigrationVersion(); - expect(version).toBe(4); // v0, v1, v2, v3, v4 + expect(version).toBe(5); // v0, v1, v2, v3, v4, v5 }); it('should add port column (v1)', () => { @@ -90,6 +90,14 @@ describe('Database Migrations', () => { const columns = getColumns('bots'); expect(columns).toContain('tags'); }); + + it('should add image_version column (v5)', () => { + createBaseSchema(); + runMigrations(db); + + const columns = getColumns('bots'); + expect(columns).toContain('image_version'); + }); }); describe('idempotent re-run', () => { @@ -102,7 +110,7 @@ describe('Database Migrations', () => { runMigrations(db); const version = getMigrationVersion(); - expect(version).toBe(4); + expect(version).toBe(5); }); it('should not duplicate migration records', () => { @@ -111,7 +119,7 @@ describe('Database Migrations', () => { runMigrations(db); const count = db.prepare('SELECT COUNT(*) as count FROM migrations').get() as { count: number }; - expect(count.count).toBe(5); // v0, v1, v2, v3, v4 + expect(count.count).toBe(6); // v0, v1, v2, v3, v4, v5 }); }); @@ -160,9 +168,10 @@ describe('Database Migrations', () => { const columns = getColumns('bots'); expect(columns).toContain('tags'); + expect(columns).toContain('image_version'); const version = getMigrationVersion(); - expect(version).toBe(4); + expect(version).toBe(5); }); }); }); diff --git a/src/db/migrations.ts b/src/db/migrations.ts index be68b7b..75e1632 100644 --- a/src/db/migrations.ts +++ b/src/db/migrations.ts @@ -74,4 +74,15 @@ export function runMigrations(db: Database.Database): void { ); })(); } + + // Migration v5: Add image_version column to track which image each bot was created with + if (currentVersion < 5) { + db.transaction(() => { + db.exec('ALTER TABLE bots ADD COLUMN image_version TEXT'); + db.prepare('INSERT INTO migrations (version, applied_at) VALUES (?, ?)').run( + 5, + new Date().toISOString() + ); + })(); + } } diff --git a/src/server.ts b/src/server.ts index 95e5e44..844cafc 100644 --- a/src/server.ts +++ b/src/server.ts @@ -109,6 +109,11 @@ interface CreateBotBody { tags?: string[]; } +/** Rewrite localhost URLs to host.docker.internal for use inside Docker containers. */ +function toDockerHostUrl(url: string): string { + return url.replace(/\blocalhost\b|127\.0\.0\.1/g, 'host.docker.internal'); +} + async function resolveHostPaths(config: ReturnType): Promise<{ hostDataDir: string; hostSecretsDir: string; @@ -330,13 +335,15 @@ export async function buildServer(): Promise { writeSecret(bot.hostname, tokenName, channel.token); } - // Build proxy config for workspace if using proxy - const workspaceProxyConfig = proxyConfig && proxyToken - ? { - baseUrl: `http://keyring-proxy:9101/v1/${primaryProvider.providerId}`, - token: proxyToken, - } - : undefined; + // Build provider config for workspace + let workspaceProxyConfig: { baseUrl: string; token: string } | undefined; + + if (proxyConfig && proxyToken) { + workspaceProxyConfig = { + baseUrl: `http://keyring-proxy:9101/v1/${primaryProvider.providerId}`, + token: proxyToken, + }; + } // Create workspace createBotWorkspace(config.dataDir, { @@ -392,7 +399,7 @@ export async function buildServer(): Promise { const db = getDb(); db.transaction(() => { - updateBot(bot.id, { container_id: containerId }); + updateBot(bot.id, { container_id: containerId, image_version: config.openclawImage }); })(); await docker.startContainer(bot.hostname); db.transaction(() => { @@ -604,6 +611,44 @@ export async function buildServer(): Promise { } }); + // Dynamic model discovery for any OpenAI-compatible provider (e.g., Ollama) + // Fetches from the provider's /v1/models endpoint. + server.get<{ Querystring: { baseUrl?: string; apiKey?: string } }>('/api/models/discover', async (request, reply) => { + const baseUrl = request.query.baseUrl; + if (!baseUrl) { + reply.code(400); + return { error: 'Missing baseUrl query parameter' }; + } + + try { + // Translate localhost → host.docker.internal for fetches from inside Docker + const fetchBase = toDockerHostUrl(baseUrl); + // Append /models to the base URL, preserving path (e.g. /v1 → /v1/models) + const url = fetchBase.replace(/\/+$/, '') + '/models'; + const controller = new AbortController(); + const timeout = setTimeout(() => { controller.abort(); }, 5000); + + const headers: Record = {}; + if (request.query.apiKey) { + headers.Authorization = `Bearer ${request.query.apiKey}`; + } + + const response = await fetch(url, { signal: controller.signal, headers }); + clearTimeout(timeout); + + if (!response.ok) { + return { models: [] }; + } + + const data = await response.json() as { data?: { id: string }[] }; + const models = (data.data ?? []).map((m: { id: string }) => m.id); + return { models }; + } catch { + // Connection refused, timeout, etc. — graceful fallback + return { models: [] }; + } + }); + // Serve static dashboard files (if built) const dashboardDist = join(process.cwd(), 'dashboard', 'dist'); if (existsSync(dashboardDist)) { diff --git a/src/services/DockerService.ts b/src/services/DockerService.ts index 9711140..9b90a76 100644 --- a/src/services/DockerService.ts +++ b/src/services/DockerService.ts @@ -38,15 +38,17 @@ export class DockerService { const containerName = `botmaker-${hostname}`; try { + const env = [ + ...config.environment, + `OPENCLAW_STATE_DIR=/app/botdata`, + `OPENCLAW_GATEWAY_TOKEN=${config.gatewayToken}`, + ]; + const container = await this.docker.createContainer({ name: containerName, Image: config.image, - Cmd: ['node', 'dist/index.js', 'gateway'], - Env: [ - ...config.environment, - `OPENCLAW_STATE_DIR=/app/botdata`, - `OPENCLAW_GATEWAY_TOKEN=${config.gatewayToken}`, - ], + Cmd: ['node', 'openclaw.mjs', 'gateway'], + Env: env, ExposedPorts: { [`${config.port}/tcp`]: {} }, @@ -74,7 +76,8 @@ export class DockerService { RestartPolicy: { Name: 'unless-stopped' }, - NetworkMode: config.networkName ?? 'bridge' + NetworkMode: config.networkName ?? 'bridge', + ...(config.extraHosts && { ExtraHosts: config.extraHosts }), } }); diff --git a/src/services/docker-errors.ts b/src/services/docker-errors.ts index e33b6fb..dc31383 100644 --- a/src/services/docker-errors.ts +++ b/src/services/docker-errors.ts @@ -32,7 +32,7 @@ export class ContainerError extends Error { * Wraps raw Docker errors with domain-specific error codes. * * Error code mapping: - * - 404 -> NOT_FOUND (container doesn't exist) + * - 404 -> NOT_FOUND (resource doesn't exist — image or container) * - 409 -> ALREADY_EXISTS (container name conflict) * - 304 -> ignored (not modified, container already in desired state) * - ETIMEDOUT/timeout -> NETWORK_ERROR (Docker daemon unreachable) @@ -41,11 +41,13 @@ export class ContainerError extends Error { export function wrapDockerError(err: unknown, botId: string): ContainerError { const dockerErr = err as { statusCode?: number; code?: string; message?: string }; - // Container not found + // Docker resource not found (image or container) if (dockerErr.statusCode === 404) { + const rawMsg = dockerErr.message ?? ''; + const detail = rawMsg ? `: ${rawMsg}` : ''; return new ContainerError( 'NOT_FOUND', - `Container for bot ${botId} not found`, + `Docker resource not found for bot ${botId}${detail}`, botId, err instanceof Error ? err : undefined ); diff --git a/src/types/bot.ts b/src/types/bot.ts index 0202de3..90fecd0 100644 --- a/src/types/bot.ts +++ b/src/types/bot.ts @@ -11,6 +11,7 @@ export interface Bot { port: number | null; // Allocated port for container gateway_token: string | null; // OpenClaw gateway authentication token tags: string | null; // JSON array of API routing tags + image_version: string | null; // Docker image used to create this bot's container status: BotStatus; created_at: string; // ISO datetime updated_at: string; // ISO datetime diff --git a/src/types/container.ts b/src/types/container.ts index 3c44c3c..e0b5c0b 100644 --- a/src/types/container.ts +++ b/src/types/container.ts @@ -52,6 +52,8 @@ export interface ContainerConfig { gatewayToken: string; /** Docker network to join (optional, for proxy connectivity) */ networkName?: string; + /** Extra /etc/hosts entries (e.g., ["host.docker.internal:host-gateway"]) */ + extraHosts?: string[]; } /** Container resource statistics */