diff --git a/bot/.coveragerc b/bot/.coveragerc
new file mode 100644
index 00000000..ff75cef6
--- /dev/null
+++ b/bot/.coveragerc
@@ -0,0 +1,26 @@
+[run]
+source = vikingbot
+omit =
+ */tests/*
+ */test_*
+ */__pycache__/*
+ */venv/*
+ */.venv/*
+ */node_modules/*
+ setup.py
+
+[report]
+exclude_lines =
+ pragma: no cover
+ def __repr__
+ raise AssertionError
+ raise NotImplementedError
+ if __name__ == .__main__.:
+ class .*\bProtocol\):
+ @(abc\.)?abstractmethod
+
+show_missing = True
+skip_covered = False
+
+[html]
+directory = htmlcov
diff --git a/bot/.github/workflows/test.yml b/bot/.github/workflows/test.yml
new file mode 100644
index 00000000..5b9c8c21
--- /dev/null
+++ b/bot/.github/workflows/test.yml
@@ -0,0 +1,72 @@
+name: Tests
+
+on:
+ push:
+ branches: [main, develop]
+ pull_request:
+ branches: [main, develop]
+
+jobs:
+ test:
+ runs-on: ubuntu-latest
+ strategy:
+ matrix:
+ python-version: ["3.10", "3.11", "3.12"]
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Set up Python ${{ matrix.python-version }}
+ uses: actions/setup-python@v5
+ with:
+ python-version: ${{ matrix.python-version }}
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v3
+ with:
+ version: "latest"
+
+ - name: Install dependencies
+ run: |
+ uv pip install -e ".[dev]"
+
+ - name: Run tests with coverage
+ run: |
+ pytest --cov=vikingbot --cov-report=xml --cov-report=term-missing -v
+
+ - name: Upload coverage to Codecov
+ uses: codecov/codecov-action@v4
+ with:
+ file: ./coverage.xml
+ flags: unittests
+ name: codecov-umbrella
+
+ lint:
+ runs-on: ubuntu-latest
+
+ steps:
+ - name: Checkout code
+ uses: actions/checkout@v4
+
+ - name: Set up Python
+ uses: actions/setup-python@v5
+ with:
+ python-version: "3.11"
+
+ - name: Install uv
+ uses: astral-sh/setup-uv@v3
+ with:
+ version: "latest"
+
+ - name: Install dependencies
+ run: |
+ uv pip install ruff
+
+ - name: Run ruff check
+ run: |
+ ruff check .
+
+ - name: Run ruff format check
+ run: |
+ ruff format --check .
diff --git a/bot/CHANNEL.md b/bot/CHANNEL.md
new file mode 100644
index 00000000..61eabbc9
--- /dev/null
+++ b/bot/CHANNEL.md
@@ -0,0 +1,414 @@
+## 💬 聊天应用
+
+通过 Telegram、Discord、WhatsApp、飞书、Mochat、钉钉、Slack、邮件或 QQ 与您的 vikingbot 对话 —— 随时随地。
+
+| 渠道 | 设置难度 |
+|---------|-------|
+| **Telegram** | 简单(只需一个令牌) |
+| **Discord** | 简单(机器人令牌 + 权限) |
+| **WhatsApp** | 中等(扫描二维码) |
+| **飞书** | 中等(应用凭证) |
+| **Mochat** | 中等(claw 令牌 + websocket) |
+| **钉钉** | 中等(应用凭证) |
+| **Slack** | 中等(机器人 + 应用令牌) |
+| **邮件** | 中等(IMAP/SMTP 凭证) |
+| **QQ** | 简单(应用凭证) |
+
+
+Telegram(推荐)
+
+**1. 创建机器人**
+- 打开 Telegram,搜索 `@BotFather`
+- 发送 `/newbot`,按照提示操作
+- 复制令牌
+
+**2. 配置**
+
+```json
+{
+ "channels": [
+ {
+ "type": "telegram",
+ "enabled": true,
+ "token": "YOUR_BOT_TOKEN",
+ "allowFrom": ["YOUR_USER_ID"]
+ }
+ ]
+}
+```
+
+> 您可以在 Telegram 设置中找到您的 **用户 ID**。它显示为 `@yourUserId`。
+> 复制这个值**不带 `@` 符号**并粘贴到配置文件中。
+
+
+**3. 运行**
+
+```bash
+vikingbot gateway
+```
+
+
+
+
+Mochat (Claw IM)
+
+默认使用 **Socket.IO WebSocket**,并带有 HTTP 轮询回退。
+
+**1. 让 vikingbot 为您设置 Mochat**
+
+只需向 vikingbot 发送此消息(将 `xxx@xxx` 替换为您的真实邮箱):
+
+```
+Read https://raw.githubusercontent.com/HKUDS/MoChat/refs/heads/main/skills/vikingbot/skill.md and register on MoChat. My Email account is xxx@xxx Bind me as your owner and DM me on MoChat.
+```
+
+vikingbot 将自动注册、配置 `~/.vikingbot/config.json` 并连接到 Mochat。
+
+**2. 重启网关**
+
+```bash
+vikingbot gateway
+```
+
+就这么简单 —— vikingbot 处理剩下的一切!
+
+
+
+
+手动配置(高级)
+
+如果您更喜欢手动配置,请将以下内容添加到 `~/.vikingbot/config.json`:
+
+> 请保密 `claw_token`。它只应在 `X-Claw-Token` 头中发送到您的 Mochat API 端点。
+
+```json
+{
+ "channels": [
+ {
+ "type": "mochat",
+ "enabled": true,
+ "base_url": "https://mochat.io",
+ "socket_url": "https://mochat.io",
+ "socket_path": "/socket.io",
+ "claw_token": "claw_xxx",
+ "agent_user_id": "6982abcdef",
+ "sessions": ["*"],
+ "panels": ["*"],
+ "reply_delay_mode": "non-mention",
+ "reply_delay_ms": 120000
+ }
+ ]
+}
+```
+
+
+
+
+
+
+
+Discord
+
+**1. 创建机器人**
+- 访问 https://discord.com/developers/applications
+- 创建应用 → 机器人 → 添加机器人
+- 复制机器人令牌
+
+**2. 启用意图**
+- 在机器人设置中,启用 **MESSAGE CONTENT INTENT**
+- (可选)如果您计划使用基于成员数据的允许列表,启用 **SERVER MEMBERS INTENT**
+
+**3. 获取您的用户 ID**
+- Discord 设置 → 高级 → 启用 **开发者模式**
+- 右键点击您的头像 → **复制用户 ID**
+
+**4. 配置**
+
+```json
+{
+ "channels": [
+ {
+ "type": "discord",
+ "enabled": true,
+ "token": "YOUR_BOT_TOKEN",
+ "allowFrom": ["YOUR_USER_ID"]
+ }
+ ]
+}
+```
+
+**5. 邀请机器人**
+- OAuth2 → URL 生成器
+- 范围:`bot`
+- 机器人权限:`发送消息`、`读取消息历史`
+- 打开生成的邀请 URL 并将机器人添加到您的服务器
+
+**6. 运行**
+
+```bash
+vikingbot gateway
+```
+
+
+
+
+WhatsApp
+
+需要 **Node.js ≥18**。
+
+**1. 链接设备**
+
+```bash
+vikingbot channels login
+# 使用 WhatsApp 扫描二维码 → 设置 → 链接设备
+```
+
+**2. 配置**
+
+```json
+{
+ "channels": [
+ {
+ "type": "whatsapp",
+ "enabled": true,
+ "allowFrom": ["+1234567890"]
+ }
+ ]
+}
+```
+
+**3. 运行**(两个终端)
+
+```bash
+# 终端 1
+vikingbot channels login
+
+# 终端 2
+vikingbot gateway
+```
+
+
+
+
+飞书
+
+使用 **WebSocket** 长连接 —— 不需要公网 IP。
+
+**1. 创建飞书机器人**
+- 访问 [飞书开放平台](https://open.feishu.cn/app)
+- 创建新应用 → 启用 **机器人** 功能
+- **权限**:添加 `im:message`(发送消息)
+- **事件**:添加 `im.message.receive_v1`(接收消息)
+ - 选择 **长连接** 模式(需要先运行 vikingbot 来建立连接)
+- 从「凭证与基础信息」获取 **App ID** 和 **App Secret**
+- 发布应用
+
+**2. 配置**
+
+```json
+{
+ "channels": [
+ {
+ "type": "feishu",
+ "enabled": true,
+ "appId": "cli_xxx",
+ "appSecret": "xxx",
+ "encryptKey": "",
+ "verificationToken": "",
+ "allowFrom": []
+ }
+ ]
+}
+```
+
+> 长连接模式下,`encryptKey` 和 `verificationToken` 是可选的。
+> `allowFrom`:留空以允许所有用户,或添加 `["ou_xxx"]` 以限制访问。
+
+**3. 运行**
+
+```bash
+vikingbot gateway
+```
+
+> [!TIP]
+> 飞书使用 WebSocket 接收消息 —— 不需要 webhook 或公网 IP!
+
+
+
+
+QQ(QQ单聊)
+
+使用 **botpy SDK** 配合 WebSocket —— 不需要公网 IP。目前仅支持 **私聊**。
+
+**1. 注册并创建机器人**
+- 访问 [QQ 开放平台](https://q.qq.com) → 注册为开发者(个人或企业)
+- 创建新的机器人应用
+- 进入 **开发设置** → 复制 **AppID** 和 **AppSecret**
+
+**2. 设置沙箱测试环境**
+- 在机器人管理控制台中,找到 **沙箱配置**
+- 在 **在消息列表配置** 下,点击 **添加成员** 并添加您自己的 QQ 号
+- 添加完成后,用手机 QQ 扫描机器人的二维码 → 打开机器人资料卡 → 点击「发消息」开始聊天
+
+**3. 配置**
+
+> - `allowFrom`:留空以供公开访问,或添加用户 openid 以限制。您可以在用户向机器人发消息时在 vikingbot 日志中找到 openid。
+> - 生产环境:在机器人控制台提交审核并发布。查看 [QQ 机器人文档](https://bot.q.qq.com/wiki/) 了解完整发布流程。
+
+```json
+{
+ "channels": [
+ {
+ "type": "qq",
+ "enabled": true,
+ "appId": "YOUR_APP_ID",
+ "secret": "YOUR_APP_SECRET",
+ "allowFrom": []
+ }
+ ]
+}
+```
+
+**4. 运行**
+
+```bash
+vikingbot gateway
+```
+
+现在从 QQ 向机器人发送消息 —— 它应该会回复!
+
+
+
+
+钉钉
+
+使用 **流模式** —— 不需要公网 IP。
+
+**1. 创建钉钉机器人**
+- 访问 [钉钉开放平台](https://open-dev.dingtalk.com/)
+- 创建新应用 -> 添加 **机器人** 功能
+- **配置**:
+ - 打开 **流模式**
+- **权限**:添加发送消息所需的权限
+- 从「凭证」获取 **AppKey**(客户端 ID)和 **AppSecret**(客户端密钥)
+- 发布应用
+
+**2. 配置**
+
+```json
+{
+ "channels": [
+ {
+ "type": "dingtalk",
+ "enabled": true,
+ "clientId": "YOUR_APP_KEY",
+ "clientSecret": "YOUR_APP_SECRET",
+ "allowFrom": []
+ }
+ ]
+}
+```
+
+> `allowFrom`:留空以允许所有用户,或添加 `["staffId"]` 以限制访问。
+
+**3. 运行**
+
+```bash
+vikingbot gateway
+```
+
+
+
+
+Slack
+
+使用 **Socket 模式** —— 不需要公网 URL。
+
+**1. 创建 Slack 应用**
+- 访问 [Slack API](https://api.slack.com/apps) → **创建新应用** →「从零开始」
+- 选择名称并选择您的工作区
+
+**2. 配置应用**
+- **Socket 模式**:打开 → 生成一个具有 `connections:write` 范围的 **应用级令牌** → 复制它(`xapp-...`)
+- **OAuth 与权限**:添加机器人范围:`chat:write`、`reactions:write`、`app_mentions:read`
+- **事件订阅**:打开 → 订阅机器人事件:`message.im`、`message.channels`、`app_mention` → 保存更改
+- **应用主页**:滚动到 **显示标签页** → 启用 **消息标签页** → 勾选 **"允许用户从消息标签页发送斜杠命令和消息"**
+- **安装应用**:点击 **安装到工作区** → 授权 → 复制 **机器人令牌**(`xoxb-...`)
+
+**3. 配置 vikingbot**
+
+```json
+{
+ "channels": [
+ {
+ "type": "slack",
+ "enabled": true,
+ "botToken": "xoxb-...",
+ "appToken": "xapp-...",
+ "groupPolicy": "mention"
+ }
+ ]
+}
+```
+
+**4. 运行**
+
+```bash
+vikingbot gateway
+```
+
+直接向机器人发送私信或在频道中 @提及它 —— 它应该会回复!
+
+> [!TIP]
+> - `groupPolicy`:`"mention"`(默认 —— 仅在 @提及時回复)、`"open"`(回复所有频道消息)或 `"allowlist"`(限制到特定频道)。
+> - 私信策略默认为开放。设置 `"dm": {"enabled": false}` 以禁用私信。
+
+
+
+
+邮件
+
+给 vikingbot 一个自己的邮箱账户。它通过 **IMAP** 轮询收件箱并通过 **SMTP** 回复 —— 就像一个个人邮件助手。
+
+**1. 获取凭证(Gmail 示例)**
+- 为您的机器人创建一个专用的 Gmail 账户(例如 `my-vikingbot@gmail.com`)
+- 启用两步验证 → 创建 [应用密码](https://myaccount.google.com/apppasswords)
+- 将此应用密码用于 IMAP 和 SMTP
+
+**2. 配置**
+
+> - `consentGranted` 必须为 `true` 以允许邮箱访问。这是一个安全门 —— 设置为 `false` 以完全禁用。
+> - `allowFrom`:留空以接受来自任何人的邮件,或限制到特定发件人。
+> - `smtpUseTls` 和 `smtpUseSsl` 分别默认为 `true` / `false`,这对 Gmail(端口 587 + STARTTLS)是正确的。无需显式设置它们。
+> - 如果您只想读取/分析邮件而不发送自动回复,请设置 `"autoReplyEnabled": false`。
+
+```json
+{
+ "channels": [
+ {
+ "type": "email",
+ "enabled": true,
+ "consentGranted": true,
+ "imapHost": "imap.gmail.com",
+ "imapPort": 993,
+ "imapUsername": "my-vikingbot@gmail.com",
+ "imapPassword": "your-app-password",
+ "smtpHost": "smtp.gmail.com",
+ "smtpPort": 587,
+ "smtpUsername": "my-vikingbot@gmail.com",
+ "smtpPassword": "your-app-password",
+ "fromAddress": "my-vikingbot@gmail.com",
+ "allowFrom": ["your-real-email@gmail.com"]
+ }
+ ]
+}
+```
+
+
+**3. 运行**
+
+```bash
+vikingbot gateway
+```
+
+
\ No newline at end of file
diff --git a/bot/README.md b/bot/README.md
index da0993bb..c8e88a5c 100644
--- a/bot/README.md
+++ b/bot/README.md
@@ -31,8 +31,38 @@ uv venv --python 3.11
source .venv/bin/activate # macOS/Linux
# .venv\Scripts\activate # Windows
-# Install dependencies
+# Install dependencies (minimal)
uv pip install -e .
+
+# Or install with optional features
+uv pip install -e ".[langfuse,telegram,console]"
+```
+
+### Optional Dependencies
+
+Install only the features you need:
+
+| Feature Group | Install Command | Description |
+|---------------|-----------------|-------------|
+| **Full** | `uv pip install -e ".[full]"` | All features included |
+| **Langfuse** | `uv pip install -e ".[langfuse]"` | LLM observability and tracing |
+| **FUSE** | `uv pip install -e ".[fuse]"` | OpenViking filesystem mount |
+| **Sandbox** | `uv pip install -e ".[sandbox]"` | Code execution sandbox |
+| **OpenCode** | `uv pip install -e ".[opencode]"` | OpenCode AI integration |
+
+#### Channels (chat apps)
+
+| Channel | Install Command |
+|---------|-----------------|
+| **Telegram** | `uv pip install -e ".[telegram]"` |
+| **Feishu/Lark** | `uv pip install -e ".[feishu]"` |
+| **DingTalk** | `uv pip install -e ".[dingtalk]"` |
+| **Slack** | `uv pip install -e ".[slack]"` |
+| **QQ** | `uv pip install -e ".[qq]"` |
+
+Multiple features can be combined:
+```bash
+uv pip install -e ".[langfuse,telegram,console]"
```
## 🚀 Quick Start
@@ -48,7 +78,7 @@ vikingbot gateway
```
This will automatically:
-- Create a default config at `~/.vikingbot/config.json`
+- Create a default config at `~/.openviking/ov.conf`
- Start the Console Web UI at http://localhost:18791
**2. Configure via Console**
@@ -61,7 +91,17 @@ Open http://localhost:18791 in your browser and:
**3. Chat**
```bash
-vikingbot agent -m "What is 2+2?"
+# Send a single message directly
+vikingbot chat -m "What is 2+2?"
+
+# Enter interactive chat mode (supports multi-turn conversations)
+vikingbot chat
+
+# Show plain-text replies (no Markdown rendering)
+vikingbot chat --no-markdown
+
+# Show runtime logs during chat (useful for debugging)
+vikingbot chat --logs
```
That's it! You have a working AI assistant in 2 minutes.
@@ -70,17 +110,6 @@ That's it! You have a working AI assistant in 2 minutes.
You can also deploy vikingbot using Docker for easier setup and isolation.
-## ☁️ Volcengine VKE Deployment
-
-If you want to deploy vikingbot on Volcengine Kubernetes Engine (VKE), see the detailed deployment guide:
-
-👉 [VKE Deployment Guide (Chinese)](deploy/vke/README.md)
-
-The guide includes:
-- Complete prerequisites
-- How to create Volcengine account, VKE cluster, container registry, and TOS bucket
-- One-click deployment script usage
-- Configuration details and troubleshooting
### Prerequisites
First, install Docker:
@@ -93,7 +122,6 @@ Verify Docker installation:
docker --version
```
-### Quick Volcengine Registry Deploy (Recommended)
### Quick Docker Deploy
```bash
@@ -105,7 +133,7 @@ docker run -d \
--name vikingbot \
--restart unless-stopped \
--platform linux/amd64 \
- -v ~/.vikingbot:/root/.vikingbot \
+ -v ~/.openviking:/root/.openviking \
-p 18791:18791 \
vikingbot-cn-beijing.cr.volces.com/vikingbot/vikingbot:latest \
gateway
@@ -161,14 +189,16 @@ Talk to your vikingbot through Telegram, Discord, WhatsApp, Feishu, Mochat, Ding
```json
{
- "channels": [
- {
- "type": "telegram",
- "enabled": true,
- "token": "YOUR_BOT_TOKEN",
- "allowFrom": ["YOUR_USER_ID"]
- }
- ]
+ "bot": {
+ "channels": [
+ {
+ "type": "telegram",
+ "enabled": true,
+ "token": "YOUR_BOT_TOKEN",
+ "allowFrom": ["YOUR_USER_ID"]
+ }
+ ]
+ }
}
```
@@ -197,7 +227,7 @@ Simply send this message to vikingbot (replace `xxx@xxx` with your real email):
Read https://raw.githubusercontent.com/HKUDS/MoChat/refs/heads/main/skills/vikingbot/skill.md and register on MoChat. My Email account is xxx@xxx Bind me as your owner and DM me on MoChat.
```
-vikingbot will automatically register, configure `~/.vikingbot/config.json`, and connect to Mochat.
+vikingbot will automatically register, configure `~/.openviking/ov.conf`, and connect to Mochat.
**2. Restart gateway**
@@ -212,27 +242,29 @@ That's it — vikingbot handles the rest!
Manual configuration (advanced)
-If you prefer to configure manually, add the following to `~/.vikingbot/config.json`:
+If you prefer to configure manually, add the following to `~/.openviking/ov.conf`:
> Keep `claw_token` private. It should only be sent in `X-Claw-Token` header to your Mochat API endpoint.
```json
{
- "channels": [
- {
- "type": "mochat",
- "enabled": true,
- "base_url": "https://mochat.io",
- "socket_url": "https://mochat.io",
- "socket_path": "/socket.io",
- "claw_token": "claw_xxx",
- "agent_user_id": "6982abcdef",
- "sessions": ["*"],
- "panels": ["*"],
- "reply_delay_mode": "non-mention",
- "reply_delay_ms": 120000
- }
- ]
+ "bot": {
+ "channels": [
+ {
+ "type": "mochat",
+ "enabled": true,
+ "base_url": "https://mochat.io",
+ "socket_url": "https://mochat.io",
+ "socket_path": "/socket.io",
+ "claw_token": "claw_xxx",
+ "agent_user_id": "6982abcdef",
+ "sessions": ["*"],
+ "panels": ["*"],
+ "reply_delay_mode": "non-mention",
+ "reply_delay_ms": 120000
+ }
+ ]
+ }
}
```
@@ -262,14 +294,16 @@ If you prefer to configure manually, add the following to `~/.vikingbot/config.j
```json
{
- "channels": [
- {
- "type": "discord",
- "enabled": true,
- "token": "YOUR_BOT_TOKEN",
- "allowFrom": ["YOUR_USER_ID"]
- }
- ]
+ "bot": {
+ "channels": [
+ {
+ "type": "discord",
+ "enabled": true,
+ "token": "YOUR_BOT_TOKEN",
+ "allowFrom": ["YOUR_USER_ID"]
+ }
+ ]
+ }
}
```
@@ -303,13 +337,15 @@ vikingbot channels login
```json
{
- "channels": [
- {
- "type": "whatsapp",
- "enabled": true,
- "allowFrom": ["+1234567890"]
- }
- ]
+ "bot": {
+ "channels": [
+ {
+ "type": "whatsapp",
+ "enabled": true,
+ "allowFrom": ["+1234567890"]
+ }
+ ]
+ }
}
```
@@ -343,17 +379,19 @@ Uses **WebSocket** long connection — no public IP required.
```json
{
- "channels": [
- {
- "type": "feishu",
- "enabled": true,
- "appId": "cli_xxx",
- "appSecret": "xxx",
- "encryptKey": "",
- "verificationToken": "",
- "allowFrom": []
- }
- ]
+ "bot": {
+ "channels": [
+ {
+ "type": "feishu",
+ "enabled": true,
+ "appId": "cli_xxx",
+ "appSecret": "xxx",
+ "encryptKey": "",
+ "verificationToken": "",
+ "allowFrom": []
+ }
+ ]
+ }
}
```
@@ -393,15 +431,17 @@ Uses **botpy SDK** with WebSocket — no public IP required. Currently supports
```json
{
- "channels": [
- {
- "type": "qq",
- "enabled": true,
- "appId": "YOUR_APP_ID",
- "secret": "YOUR_APP_SECRET",
- "allowFrom": []
- }
- ]
+ "bot": {
+ "channels": [
+ {
+ "type": "qq",
+ "enabled": true,
+ "appId": "YOUR_APP_ID",
+ "secret": "YOUR_APP_SECRET",
+ "allowFrom": []
+ }
+ ]
+ }
}
```
@@ -433,15 +473,17 @@ Uses **Stream Mode** — no public IP required.
```json
{
- "channels": [
- {
- "type": "dingtalk",
- "enabled": true,
- "clientId": "YOUR_APP_KEY",
- "clientSecret": "YOUR_APP_SECRET",
- "allowFrom": []
- }
- ]
+ "bot": {
+ "channels": [
+ {
+ "type": "dingtalk",
+ "enabled": true,
+ "clientId": "YOUR_APP_KEY",
+ "clientSecret": "YOUR_APP_SECRET",
+ "allowFrom": []
+ }
+ ]
+ }
}
```
@@ -475,15 +517,17 @@ Uses **Socket Mode** — no public URL required.
```json
{
- "channels": [
- {
- "type": "slack",
- "enabled": true,
- "botToken": "xoxb-...",
- "appToken": "xapp-...",
- "groupPolicy": "mention"
- }
- ]
+ "bot": {
+ "channels": [
+ {
+ "type": "slack",
+ "enabled": true,
+ "botToken": "xoxb-...",
+ "appToken": "xapp-...",
+ "groupPolicy": "mention"
+ }
+ ]
+ }
}
```
@@ -520,23 +564,25 @@ Give vikingbot its own email account. It polls **IMAP** for incoming mail and re
```json
{
- "channels": [
- {
- "type": "email",
- "enabled": true,
- "consentGranted": true,
- "imapHost": "imap.gmail.com",
- "imapPort": 993,
- "imapUsername": "my-vikingbot@gmail.com",
- "imapPassword": "your-app-password",
- "smtpHost": "smtp.gmail.com",
- "smtpPort": 587,
- "smtpUsername": "my-vikingbot@gmail.com",
- "smtpPassword": "your-app-password",
- "fromAddress": "my-vikingbot@gmail.com",
- "allowFrom": ["your-real-email@gmail.com"]
- }
- ]
+ "bot": {
+ "channels": [
+ {
+ "type": "email",
+ "enabled": true,
+ "consentGranted": true,
+ "imapHost": "imap.gmail.com",
+ "imapPort": 993,
+ "imapUsername": "my-vikingbot@gmail.com",
+ "imapPassword": "your-app-password",
+ "smtpHost": "smtp.gmail.com",
+ "smtpPort": 587,
+ "smtpUsername": "my-vikingbot@gmail.com",
+ "smtpPassword": "your-app-password",
+ "fromAddress": "my-vikingbot@gmail.com",
+ "allowFrom": ["your-real-email@gmail.com"]
+ }
+ ]
+ }
}
```
@@ -562,31 +608,32 @@ Simply send the command above to your vikingbot (via CLI or any chat channel), a
## ⚙️ Configuration
-Config file: `~/.vikingbot/config.json`
+Config file: `~/.openviking/ov.conf`
> [!IMPORTANT]
> After modifying the configuration (either via Console UI or by editing the file directly),
> you need to restart the gateway service for changes to take effect.
+> [!NOTE]
+> Configuration has been migrated from `~/.vikingbot/config.json` to `~/.openviking/ov.conf`.
+> The configuration is now nested under the `bot` key.
+
### Manual Configuration (Advanced)
If you prefer to edit the config file directly instead of using the Console UI:
```json
{
- "providers": {
- "openai": {
- "apiKey": "sk-xxx"
- }
- },
- "agents": {
- "defaults": {
+ "bot": {
+ "agents": {
"model": "openai/doubao-seed-2-0-pro-260215"
}
}
}
```
+Provider configuration is read from OpenViking config (`vlm` section in `ov.conf`).
+
### Providers
> [!TIP]
@@ -661,62 +708,198 @@ That's it! Environment variables, model prefixing, config matching, and `vikingb
| `tools.restrictToWorkspace` | `true` | When `true`, restricts **all** agent tools (shell, file read/write/edit, list) to the workspace directory. Prevents path traversal and out-of-scope access. |
| `channels.*.allowFrom` | `[]` (allow all) | Whitelist of user IDs. Empty = allow everyone; non-empty = only listed users can interact. |
+### Observability (Optional)
+
+**Langfuse** integration for LLM observability and tracing.
+
+
+Langfuse Configuration
+
+**Option 1: Local Deployment (Recommended for testing)**
+
+Deploy Langfuse locally using Docker:
+
+```bash
+# Navigate to the deployment script
+cd deploy/docker
+
+# Run the deployment script
+./deploy_langfuse.sh
+```
+
+This will start Langfuse locally at `http://localhost:3000` with pre-configured credentials.
+
+**Option 2: Langfuse Cloud**
+
+1. Sign up at [langfuse.com](https://langfuse.com)
+2. Create a new project
+3. Copy the **Secret Key** and **Public Key** from project settings
+
+**Configuration**
+
+Add to `~/.openviking/ov.conf`:
+
+```json
+{
+ "langfuse": {
+ "enabled": true,
+ "secret_key": "sk-lf-vikingbot-secret-key-2026",
+ "public_key": "pk-lf-vikingbot-public-key-2026",
+ "base_url": "http://localhost:3000"
+ }
+}
+```
+
+For Langfuse Cloud, use `https://cloud.langfuse.com` as the `base_url`.
+
+**Install Langfuse support:**
+```bash
+uv pip install -e ".[langfuse]"
+```
+
+**Restart vikingbot:**
+```bash
+vikingbot gateway
+```
+
+**Features enabled:**
+- Automatic trace creation for each conversation
+- Session and user tracking
+- LLM call monitoring
+- Token usage tracking
+
+
+
### Sandbox
-vikingbot supports sandboxed execution for enhanced security. By default, sandbox is disabled. To enable sandbox with SRT backend in per-session mode, set `"enabled": true`.
+vikingbot supports sandboxed execution for enhanced security.
+
+**By default, no sandbox configuration is needed in `ov.conf`:**
+- Default backend: `direct` (runs code directly on host)
+- Default mode: `shared` (single sandbox shared across all sessions)
+
+You only need to add sandbox configuration when you want to change these defaults.
-Sandbox Configuration (SRT Backend)
+Sandbox Configuration Options
+
+**To use a different backend or mode:**
+```json
+{
+ "sandbox": {
+ "backend": "opensandbox",
+ "mode": "per-session"
+ }
+}
+```
+
+**Available Backends:**
+| Backend | Description |
+|---------|-------------|
+| `direct` | (Default) Runs code directly on the host |
+| `docker` | Uses Docker containers for isolation |
+| `opensandbox` | Uses OpenSandbox service |
+| `srt` | Uses Anthropic's SRT sandbox runtime |
+| `aiosandbox` | Uses AIO Sandbox service |
+
+**Available Modes:**
+| Mode | Description |
+|------|-------------|
+| `shared` | (Default) Single sandbox shared across all sessions |
+| `per-session` | Separate sandbox instance for each session |
+
+**Backend-specific Configuration (only needed when using that backend):**
+
+**Direct Backend:**
+```json
+{
+ "sandbox": {
+ "backends": {
+ "direct": {
+ "restrictToWorkspace": false
+ }
+ }
+ }
+}
+```
+
+**OpenSandbox Backend:**
+```json
+{
+ "sandbox": {
+ "backend": "opensandbox",
+ "backends": {
+ "opensandbox": {
+ "serverUrl": "http://localhost:18792",
+ "apiKey": "",
+ "defaultImage": "opensandbox/code-interpreter:v1.0.1"
+ }
+ }
+ }
+}
+```
+
+**Docker Backend:**
+```json
+{
+ "sandbox": {
+ "backend": "docker",
+ "backends": {
+ "docker": {
+ "image": "python:3.11-slim",
+ "networkMode": "bridge"
+ }
+ }
+ }
+}
+```
+**SRT Backend:**
```json
{
"sandbox": {
- "enabled": false,
"backend": "srt",
- "mode": "per-session",
- "network": {
- "allowedDomains": [],
- "deniedDomains": [],
- "allowLocalBinding": false
- },
- "filesystem": {
- "denyRead": [],
- "allowWrite": [],
- "denyWrite": []
- },
- "runtime": {
- "cleanupOnExit": true,
- "timeout": 300
- },
"backends": {
"srt": {
- "nodePath": "node"
+ "settingsPath": "~/.vikingbot/srt-settings.json",
+ "nodePath": "node",
+ "network": {
+ "allowedDomains": [],
+ "deniedDomains": [],
+ "allowLocalBinding": false
+ },
+ "filesystem": {
+ "denyRead": [],
+ "allowWrite": [],
+ "denyWrite": []
+ },
+ "runtime": {
+ "cleanupOnExit": true,
+ "timeout": 300
+ }
}
}
}
}
```
-**Configuration Options:**
-
-| Option | Default | Description |
-|--------|---------|-------------|
-| `enabled` | `false` | Enable sandbox execution |
-| `backend` | `"srt"` | Sandbox backend: `srt` or `docker` |
-| `mode` | `"per-session"` | Sandbox mode: `per-session` (isolated per session) or `shared` (shared across sessions) |
-| `network.allowedDomains` | `[]` | List of allowed domains for network access (empty = all allowed) |
-| `network.deniedDomains` | `[]` | List of denied domains (blocked regardless of allowed list) |
-| `network.allowLocalBinding` | `false` | Allow binding to local addresses (localhost, 127.0.0.1) |
-| `filesystem.denyRead` | `[]` | Paths/files to deny read access |
-| `filesystem.allowWrite` | `[]` | Paths/files to explicitly allow write access |
-| `filesystem.denyWrite` | `[]` | Paths/files to deny write access |
-| `runtime.cleanupOnExit` | `true` | Clean up sandbox resources on exit |
-| `runtime.timeout` | `300` | Command execution timeout in seconds |
-| `backends.srt.nodePath` | `"/usr/local/bin/node"` | Path to Node.js executable (use full path if `node` is not in PATH) |
+**AIO Sandbox Backend:**
+```json
+{
+ "sandbox": {
+ "backend": "aiosandbox",
+ "backends": {
+ "aiosandbox": {
+ "baseUrl": "http://localhost:18794"
+ }
+ }
+ }
+}
+```
**SRT Backend Setup:**
-The SRT backend uses `@anthropic-ai/sandbox-runtime`. It's automatically installed when you run `vikingbot onboard`.
+The SRT backend uses `@anthropic-ai/sandbox-runtime`.
**System Dependencies:**
@@ -783,11 +966,10 @@ which nodejs
| Command | Description |
|---------|-------------|
-| `vikingbot agent -m "..."` | Chat with the agent |
-| `vikingbot agent` | Interactive chat mode |
-| `vikingbot agent --no-markdown` | Show plain-text replies |
-| `vikingbot agent --logs` | Show runtime logs during chat |
-| `vikingbot tui` | Launch TUI (Terminal User Interface) |
+| `vikingbot chat -m "..."` | Chat with the agent |
+| `vikingbot chat` | Interactive chat mode |
+| `vikingbot chat --no-markdown` | Show plain-text replies |
+| `vikingbot chat --logs` | Show runtime logs during chat |
| `vikingbot gateway` | Start the gateway and Console Web UI |
| `vikingbot status` | Show status |
| `vikingbot channels login` | Link WhatsApp (scan QR) |
@@ -810,23 +992,6 @@ The Console Web UI is automatically started when you run `vikingbot gateway`, ac
Interactive mode exits: `exit`, `quit`, `/exit`, `/quit`, `:q`, or `Ctrl+D`.
-
-TUI (Terminal User Interface)
-
-Launch the vikingbot TUI for a rich terminal-based chat experience:
-
-```bash
-vikingbot tui
-```
-
-The TUI provides:
-- Rich text rendering with markdown support
-- Message history and conversation management
-- Real-time agent responses
-- Keyboard shortcuts for navigation
-
-
-
Scheduled Tasks (Cron)
diff --git a/bot/README_CN.md b/bot/README_CN.md
index 7d8a4b79..48f87966 100644
--- a/bot/README_CN.md
+++ b/bot/README_CN.md
@@ -7,11 +7,11 @@
Vikingbot 深度集成 OpenViking,提供强大的知识管理和记忆检索能力:
-- **本地/远程双模式**:支持本地存储(`~/.vikingbot/ov_data/`)和远程服务器模式
+- **本地/远程双模式**:支持本地存储(`~/.openviking/data/`)和远程服务器模式
- **7 个专用 Agent 工具**:资源管理、语义搜索、正则搜索、通配符搜索、记忆搜索
- **三级内容访问**:L0(摘要)、L1(概览)、L2(完整内容)
- **会话记忆自动提交**:对话历史自动保存到 OpenViking
-- **火山引擎 TOS 集成**:远程模式下支持云存储
+- **模型配置**:从 OpenViking 配置(`vlm` 部分)读取,无需在 bot 配置中单独设置 provider
## 📦 安装
@@ -40,8 +40,38 @@ uv venv --python 3.11
source .venv/bin/activate # macOS/Linux
# .venv\Scripts\activate # Windows
-# 安装依赖
+# 安装依赖(最小化)
uv pip install -e .
+
+# 或安装包含可选功能
+uv pip install -e ".[langfuse,telegram,console]"
+```
+
+### 可选依赖
+
+只安装你需要的功能:
+
+| 功能组 | 安装命令 | 描述 |
+|---------------|-----------------|-------------|
+| **完整版** | `uv pip install -e ".[full]"` | 包含所有功能 |
+| **Langfuse** | `uv pip install -e ".[langfuse]"` | LLM 可观测性和追踪 |
+| **FUSE** | `uv pip install -e ".[fuse]"` | OpenViking 文件系统挂载 |
+| **沙箱** | `uv pip install -e ".[sandbox]"` | 代码执行沙箱 |
+| **OpenCode** | `uv pip install -e ".[opencode]"` | OpenCode AI 集成 |
+
+#### 聊天渠道
+
+| 渠道 | 安装命令 |
+|---------|-----------------|
+| **Telegram** | `uv pip install -e ".[telegram]"` |
+| **飞书/Lark** | `uv pip install -e ".[feishu]"` |
+| **钉钉** | `uv pip install -e ".[dingtalk]"` |
+| **Slack** | `uv pip install -e ".[slack]"` |
+| **QQ** | `uv pip install -e ".[qq]"` |
+
+可以组合多个功能:
+```bash
+uv pip install -e ".[langfuse,telegram,console]"
```
## 🚀 快速开始
@@ -57,7 +87,8 @@ vikingbot gateway
```
这将自动:
-- 在 `~/.vikingbot/config.json` 创建默认配置
+- 在 `~/.openviking/ov.conf` 创建默认配置
+- 在 openviking的工作空间下创建bot启动文件。默认路径为 `~/.openviking/data/bot/`
- 在 http://localhost:18791 启动控制台 Web UI
**2. 通过控制台配置**
@@ -70,7 +101,17 @@ vikingbot gateway
**3. 聊天**
```bash
-vikingbot agent -m "What is 2+2?"
+# 直接发送单条消息
+vikingbot chat -m "What is 2+2?"
+
+# 进入交互式聊天模式(支持多轮对话)
+vikingbot chat
+
+# 显示纯文本回复(不渲染 Markdown)
+vikingbot chat --no-markdown
+
+# 聊天时显示运行时日志(便于调试)
+vikingbot chat --logs
```
就这么简单!您只需 2 分钟就能拥有一个可用的 AI 助手。
@@ -79,18 +120,6 @@ vikingbot agent -m "What is 2+2?"
您也可以使用 Docker 部署 vikingbot,以便更轻松地设置和隔离。
-## ☁️ 火山引擎 VKE 部署
-
-如果您想在火山引擎容器服务(VKE)上部署 vikingbot,请查看详细的部署文档:
-
-👉 [VKE 部署指南](deploy/vke/README.md)
-
-该指南包含:
-- 完整的前置准备步骤
-- 火山引擎账号、VKE 集群、镜像仓库、TOS 存储桶的创建方法
-- 一键部署脚本使用说明
-- 配置详解和故障排查
-
### 前置要求
首先安装 Docker:
@@ -108,14 +137,14 @@ docker --version
```bash
# 1. 创建必要目录
-mkdir -p ~/.vikingbot/
+mkdir -p ~/.openviking/
# 2. 启动容器
docker run -d \
--name vikingbot \
--restart unless-stopped \
--platform linux/amd64 \
- -v ~/.vikingbot:/root/.vikingbot \
+ -v ~/.openviking:/root/.openviking \
-p 18791:18791 \
vikingbot-cn-beijing.cr.volces.com/vikingbot/vikingbot:latest \
gateway
@@ -143,420 +172,10 @@ docker logs --tail 50 -f vikingbot
更多 Docker 部署选项,请查看 [deploy/docker/README.md](deploy/docker/README.md)。
-## 💬 聊天应用
通过 Telegram、Discord、WhatsApp、飞书、Mochat、钉钉、Slack、邮件或 QQ 与您的 vikingbot 对话 —— 随时随地。
-| 渠道 | 设置难度 |
-|---------|-------|
-| **Telegram** | 简单(只需一个令牌) |
-| **Discord** | 简单(机器人令牌 + 权限) |
-| **WhatsApp** | 中等(扫描二维码) |
-| **飞书** | 中等(应用凭证) |
-| **Mochat** | 中等(claw 令牌 + websocket) |
-| **钉钉** | 中等(应用凭证) |
-| **Slack** | 中等(机器人 + 应用令牌) |
-| **邮件** | 中等(IMAP/SMTP 凭证) |
-| **QQ** | 简单(应用凭证) |
-
-
-Telegram(推荐)
-
-**1. 创建机器人**
-- 打开 Telegram,搜索 `@BotFather`
-- 发送 `/newbot`,按照提示操作
-- 复制令牌
-
-**2. 配置**
-
-```json
-{
- "channels": [
- {
- "type": "telegram",
- "enabled": true,
- "token": "YOUR_BOT_TOKEN",
- "allowFrom": ["YOUR_USER_ID"]
- }
- ]
-}
-```
-
-> 您可以在 Telegram 设置中找到您的 **用户 ID**。它显示为 `@yourUserId`。
-> 复制这个值**不带 `@` 符号**并粘贴到配置文件中。
-
-
-**3. 运行**
-
-```bash
-vikingbot gateway
-```
-
-
-
-
-Mochat (Claw IM)
-
-默认使用 **Socket.IO WebSocket**,并带有 HTTP 轮询回退。
-
-**1. 让 vikingbot 为您设置 Mochat**
-
-只需向 vikingbot 发送此消息(将 `xxx@xxx` 替换为您的真实邮箱):
-
-```
-Read https://raw.githubusercontent.com/HKUDS/MoChat/refs/heads/main/skills/vikingbot/skill.md and register on MoChat. My Email account is xxx@xxx Bind me as your owner and DM me on MoChat.
-```
-
-vikingbot 将自动注册、配置 `~/.vikingbot/config.json` 并连接到 Mochat。
-
-**2. 重启网关**
-
-```bash
-vikingbot gateway
-```
-
-就这么简单 —— vikingbot 处理剩下的一切!
-
-
-
-
-手动配置(高级)
-
-如果您更喜欢手动配置,请将以下内容添加到 `~/.vikingbot/config.json`:
-
-> 请保密 `claw_token`。它只应在 `X-Claw-Token` 头中发送到您的 Mochat API 端点。
-
-```json
-{
- "channels": [
- {
- "type": "mochat",
- "enabled": true,
- "base_url": "https://mochat.io",
- "socket_url": "https://mochat.io",
- "socket_path": "/socket.io",
- "claw_token": "claw_xxx",
- "agent_user_id": "6982abcdef",
- "sessions": ["*"],
- "panels": ["*"],
- "reply_delay_mode": "non-mention",
- "reply_delay_ms": 120000
- }
- ]
-}
-```
-
-
-
-
-
-
-
-Discord
-
-**1. 创建机器人**
-- 访问 https://discord.com/developers/applications
-- 创建应用 → 机器人 → 添加机器人
-- 复制机器人令牌
-
-**2. 启用意图**
-- 在机器人设置中,启用 **MESSAGE CONTENT INTENT**
-- (可选)如果您计划使用基于成员数据的允许列表,启用 **SERVER MEMBERS INTENT**
-
-**3. 获取您的用户 ID**
-- Discord 设置 → 高级 → 启用 **开发者模式**
-- 右键点击您的头像 → **复制用户 ID**
-
-**4. 配置**
-
-```json
-{
- "channels": [
- {
- "type": "discord",
- "enabled": true,
- "token": "YOUR_BOT_TOKEN",
- "allowFrom": ["YOUR_USER_ID"]
- }
- ]
-}
-```
-
-**5. 邀请机器人**
-- OAuth2 → URL 生成器
-- 范围:`bot`
-- 机器人权限:`发送消息`、`读取消息历史`
-- 打开生成的邀请 URL 并将机器人添加到您的服务器
-
-**6. 运行**
-
-```bash
-vikingbot gateway
-```
-
-
-
-
-WhatsApp
-
-需要 **Node.js ≥18**。
-
-**1. 链接设备**
-
-```bash
-vikingbot channels login
-# 使用 WhatsApp 扫描二维码 → 设置 → 链接设备
-```
-
-**2. 配置**
-
-```json
-{
- "channels": [
- {
- "type": "whatsapp",
- "enabled": true,
- "allowFrom": ["+1234567890"]
- }
- ]
-}
-```
-
-**3. 运行**(两个终端)
-
-```bash
-# 终端 1
-vikingbot channels login
-
-# 终端 2
-vikingbot gateway
-```
-
-
-
-
-飞书
-
-使用 **WebSocket** 长连接 —— 不需要公网 IP。
-
-**1. 创建飞书机器人**
-- 访问 [飞书开放平台](https://open.feishu.cn/app)
-- 创建新应用 → 启用 **机器人** 功能
-- **权限**:添加 `im:message`(发送消息)
-- **事件**:添加 `im.message.receive_v1`(接收消息)
- - 选择 **长连接** 模式(需要先运行 vikingbot 来建立连接)
-- 从「凭证与基础信息」获取 **App ID** 和 **App Secret**
-- 发布应用
-
-**2. 配置**
-
-```json
-{
- "channels": [
- {
- "type": "feishu",
- "enabled": true,
- "appId": "cli_xxx",
- "appSecret": "xxx",
- "encryptKey": "",
- "verificationToken": "",
- "allowFrom": []
- }
- ]
-}
-```
-
-> 长连接模式下,`encryptKey` 和 `verificationToken` 是可选的。
-> `allowFrom`:留空以允许所有用户,或添加 `["ou_xxx"]` 以限制访问。
-
-**3. 运行**
-
-```bash
-vikingbot gateway
-```
-
-> [!TIP]
-> 飞书使用 WebSocket 接收消息 —— 不需要 webhook 或公网 IP!
-
-
-
-
-QQ(QQ单聊)
-
-使用 **botpy SDK** 配合 WebSocket —— 不需要公网 IP。目前仅支持 **私聊**。
-
-**1. 注册并创建机器人**
-- 访问 [QQ 开放平台](https://q.qq.com) → 注册为开发者(个人或企业)
-- 创建新的机器人应用
-- 进入 **开发设置** → 复制 **AppID** 和 **AppSecret**
-
-**2. 设置沙箱测试环境**
-- 在机器人管理控制台中,找到 **沙箱配置**
-- 在 **在消息列表配置** 下,点击 **添加成员** 并添加您自己的 QQ 号
-- 添加完成后,用手机 QQ 扫描机器人的二维码 → 打开机器人资料卡 → 点击「发消息」开始聊天
-
-**3. 配置**
-
-> - `allowFrom`:留空以供公开访问,或添加用户 openid 以限制。您可以在用户向机器人发消息时在 vikingbot 日志中找到 openid。
-> - 生产环境:在机器人控制台提交审核并发布。查看 [QQ 机器人文档](https://bot.q.qq.com/wiki/) 了解完整发布流程。
-
-```json
-{
- "channels": [
- {
- "type": "qq",
- "enabled": true,
- "appId": "YOUR_APP_ID",
- "secret": "YOUR_APP_SECRET",
- "allowFrom": []
- }
- ]
-}
-```
-
-**4. 运行**
-
-```bash
-vikingbot gateway
-```
-
-现在从 QQ 向机器人发送消息 —— 它应该会回复!
-
-
-
-
-钉钉
-
-使用 **流模式** —— 不需要公网 IP。
-
-**1. 创建钉钉机器人**
-- 访问 [钉钉开放平台](https://open-dev.dingtalk.com/)
-- 创建新应用 -> 添加 **机器人** 功能
-- **配置**:
- - 打开 **流模式**
-- **权限**:添加发送消息所需的权限
-- 从「凭证」获取 **AppKey**(客户端 ID)和 **AppSecret**(客户端密钥)
-- 发布应用
-
-**2. 配置**
-
-```json
-{
- "channels": [
- {
- "type": "dingtalk",
- "enabled": true,
- "clientId": "YOUR_APP_KEY",
- "clientSecret": "YOUR_APP_SECRET",
- "allowFrom": []
- }
- ]
-}
-```
-
-> `allowFrom`:留空以允许所有用户,或添加 `["staffId"]` 以限制访问。
-
-**3. 运行**
-
-```bash
-vikingbot gateway
-```
-
-
-
-
-Slack
-
-使用 **Socket 模式** —— 不需要公网 URL。
-
-**1. 创建 Slack 应用**
-- 访问 [Slack API](https://api.slack.com/apps) → **创建新应用** →「从零开始」
-- 选择名称并选择您的工作区
-
-**2. 配置应用**
-- **Socket 模式**:打开 → 生成一个具有 `connections:write` 范围的 **应用级令牌** → 复制它(`xapp-...`)
-- **OAuth 与权限**:添加机器人范围:`chat:write`、`reactions:write`、`app_mentions:read`
-- **事件订阅**:打开 → 订阅机器人事件:`message.im`、`message.channels`、`app_mention` → 保存更改
-- **应用主页**:滚动到 **显示标签页** → 启用 **消息标签页** → 勾选 **"允许用户从消息标签页发送斜杠命令和消息"**
-- **安装应用**:点击 **安装到工作区** → 授权 → 复制 **机器人令牌**(`xoxb-...`)
-
-**3. 配置 vikingbot**
-
-```json
-{
- "channels": [
- {
- "type": "slack",
- "enabled": true,
- "botToken": "xoxb-...",
- "appToken": "xapp-...",
- "groupPolicy": "mention"
- }
- ]
-}
-```
-
-**4. 运行**
-
-```bash
-vikingbot gateway
-```
-
-直接向机器人发送私信或在频道中 @提及它 —— 它应该会回复!
-
-> [!TIP]
-> - `groupPolicy`:`"mention"`(默认 —— 仅在 @提及時回复)、`"open"`(回复所有频道消息)或 `"allowlist"`(限制到特定频道)。
-> - 私信策略默认为开放。设置 `"dm": {"enabled": false}` 以禁用私信。
-
-
-
-
-邮件
-
-给 vikingbot 一个自己的邮箱账户。它通过 **IMAP** 轮询收件箱并通过 **SMTP** 回复 —— 就像一个个人邮件助手。
-
-**1. 获取凭证(Gmail 示例)**
-- 为您的机器人创建一个专用的 Gmail 账户(例如 `my-vikingbot@gmail.com`)
-- 启用两步验证 → 创建 [应用密码](https://myaccount.google.com/apppasswords)
-- 将此应用密码用于 IMAP 和 SMTP
-
-**2. 配置**
-
-> - `consentGranted` 必须为 `true` 以允许邮箱访问。这是一个安全门 —— 设置为 `false` 以完全禁用。
-> - `allowFrom`:留空以接受来自任何人的邮件,或限制到特定发件人。
-> - `smtpUseTls` 和 `smtpUseSsl` 分别默认为 `true` / `false`,这对 Gmail(端口 587 + STARTTLS)是正确的。无需显式设置它们。
-> - 如果您只想读取/分析邮件而不发送自动回复,请设置 `"autoReplyEnabled": false`。
-
-```json
-{
- "channels": [
- {
- "type": "email",
- "enabled": true,
- "consentGranted": true,
- "imapHost": "imap.gmail.com",
- "imapPort": 993,
- "imapUsername": "my-vikingbot@gmail.com",
- "imapPassword": "your-app-password",
- "smtpHost": "smtp.gmail.com",
- "smtpPort": 587,
- "smtpUsername": "my-vikingbot@gmail.com",
- "smtpPassword": "your-app-password",
- "fromAddress": "my-vikingbot@gmail.com",
- "allowFrom": ["your-real-email@gmail.com"]
- }
- ]
-}
-```
-
-
-**3. 运行**
-
-```bash
-vikingbot gateway
-```
-
-
+详细配置请参考 [CHANNEL.md](CHANNEL.md)。
## 🌐 代理社交网络
@@ -571,40 +190,75 @@ vikingbot gateway
## ⚙️ 配置
-配置文件:`~/.vikingbot/config.json`
+配置文件:`~/.openviking/ov.conf`(可通过环境变量 `OPENVIKING_CONFIG_FILE` 自定义路径)
+
+> [!TIP]
+> Vikingbot 与 OpenViking 共享同一配置文件,配置项位于文件的 `bot` 字段下,同时会自动合并 `vlm`、`storage`、`server` 等全局配置,无需单独维护配置文件。
> [!IMPORTANT]
> 修改配置后(无论是通过控制台 UI 还是直接编辑文件),
> 您需要重启网关服务以使更改生效。
-### OpenViking 配置
-
-Vikingbot 支持本地和远程两种 OpenViking 模式。
-
-#### 本地模式(默认)
-
+### Openviking Server配置
+bot将连接远程的OpenViking服务器,使用前需启动Openviking Server。 默认使用`ov.conf`中配置的OpenViking server信息
+- Openviking默认启动地址为 127.0.0.1:1933
+- 如果配置了 root_api_key,则开启多租户模式。详见 [多租户](https://github.com/volcengine/OpenViking/blob/main/examples/multi_tenant/README.md)
+- Openviking Server配置示例
```json
{
- "openviking": {
- "mode": "local"
+ "server": {
+
+ "host": "127.0.0.1",
+ "port": 1933,
+ "root_api_key": "test"
}
}
```
-数据存储在 `~/.vikingbot/ov_data/`。
-
-#### 远程模式(配合火山引擎 TOS)
+### bot配置
+全部配置在`ov.conf`中`bot`字段下,配置项自带默认值。可选手动配置项说明如下:
+- `agents`:Agent 配置
+ - max_tool_iterations:单轮对话任务最大循环次数,超过则直接返回结果
+ - memory_window:自动提交session到Openviking的对话轮次上限
+ - gen_image_model:生成图片的模型
+- gateway:Gateway 配置
+ - host:Gateway 监听地址,默认值为 `0.0.0.0`
+ - port:Gateway 监听端口,默认值为 `18790`
+- sandbox:沙箱配置
+ - mode:沙箱模式,可选值为 `shared`(所有session共享工作空间)或 `private`(私有,按Channel、session隔离工作空间)。默认值为 `shared`。
+- ov_server:OpenViking Server 配置。
+ - 不配置,默认使用`ov.conf`中配置的OpenViking server信息
+ - 若不使用本地启动的OpenViking Server,可在此配置url和对应的root user的API Key
+- channels:消息平台配置,详见 [消息平台配置](CHANNEL.md)
```json
{
- "openviking": {
- "mode": "remote",
- "server_url": "https://your-openviking-server.com",
- "tos_endpoint": "https://tos-cn-beijing.volces.com",
- "tos_region": "cn-beijing",
- "tos_bucket": "your-bucket-name",
- "tos_ak": "your-access-key",
- "tos_sk": "your-secret-key"
+ "bot": {
+ "agents": {
+ "max_tool_iterations": 50,
+ "memory_window": 50,
+ "gen_image_model": "openai/doubao-seedream-4-5-251128"
+ },
+ "gateway": {
+ "host": "0.0.0.0",
+ "port": 18790
+ },
+ "sandbox": {
+ "mode": "shared"
+ },
+ "ov_server": {
+ "server_url": "http://127.0.0.1:1933",
+ "root_api_key": "test"
+ },
+ "channels": [
+ {
+ "type": "feishu",
+ "enabled": true,
+ "appId": "",
+ "appSecret": "",
+ "allowFrom": []
+ }
+ ]
}
}
```
@@ -644,19 +298,16 @@ Vikingbot 默认启用 OpenViking 钩子:
```json
{
- "providers": {
- "openai": {
- "apiKey": "sk-xxx"
- }
- },
- "agents": {
- "defaults": {
+ "bot": {
+ "agents": {
"model": "openai/doubao-seed-2-0-pro-260215"
}
}
}
```
+Provider 配置从 OpenViking 配置(`ov.conf` 的 `vlm` 部分)读取。
+
### 提供商
> [!TIP]
@@ -724,6 +375,70 @@ class ProvidersConfig(BaseModel):
+### 可观测性(可选)
+
+**Langfuse** 集成,用于 LLM 可观测性和追踪。
+
+
+Langfuse 配置
+
+**方式 1:本地部署(测试推荐)**
+
+使用 Docker 在本地部署 Langfuse:
+
+```bash
+# 进入部署脚本目录
+cd deploy/docker
+
+# 运行部署脚本
+./deploy_langfuse.sh
+```
+
+这将在 `http://localhost:3000` 启动 Langfuse,并使用预配置的凭据。
+
+**方式 2:Langfuse Cloud**
+
+1. 在 [langfuse.com](https://langfuse.com) 注册
+2. 创建新项目
+3. 从项目设置中复制 **Secret Key** 和 **Public Key**
+
+**配置**
+
+添加到 `~/.openviking/ov.conf`:
+
+```json
+{
+ "bot": {
+ "langfuse": {
+ "enabled": true,
+ "secret_key": "sk-lf-vikingbot-secret-key-2026",
+ "public_key": "pk-lf-vikingbot-public-key-2026",
+ "base_url": "http://localhost:3000"
+ }
+ }
+}
+```
+
+对于 Langfuse Cloud,使用 `https://cloud.langfuse.com` 作为 `base_url`。
+
+**安装 Langfuse 支持:**
+```bash
+uv pip install -e ".[langfuse]"
+```
+
+**重启 vikingbot:**
+```bash
+vikingbot gateway
+```
+
+**启用的功能:**
+- 每次对话自动创建 trace
+- Session 和 User 追踪
+- LLM 调用监控
+- Token 使用量追踪
+
+
+
### 安全
| 选项 | 默认值 | 描述 |
@@ -733,60 +448,146 @@ class ProvidersConfig(BaseModel):
### 沙箱
-vikingbot 支持沙箱执行以增强安全性。默认情况下,沙箱是禁用的。要在会话模式下使用 SRT 后端启用沙箱,请设置 `"enabled": true`。
+vikingbot 支持沙箱执行以增强安全性。
+
+**默认情况下,`ov.conf` 中不需要配置 sandbox:**
+- 默认后端:`direct`(直接在主机上运行代码)
+- 默认模式:`shared`(所有会话共享一个沙箱)
+
+只有当您想要更改这些默认值时,才需要添加 sandbox 配置。
-沙箱配置(SRT 后端)
+沙箱配置选项
+**使用不同的后端或模式:**
```json
{
- "sandbox": {
- "enabled": false,
- "backend": "srt",
- "mode": "per-session",
- "network": {
- "allowedDomains": [],
- "deniedDomains": [],
- "allowLocalBinding": false
- },
- "filesystem": {
- "denyRead": [],
- "allowWrite": [],
- "denyWrite": []
- },
- "runtime": {
- "cleanupOnExit": true,
- "timeout": 300
- },
- "backends": {
- "srt": {
- "nodePath": "node"
+ "bot": {
+ "sandbox": {
+ "backend": "opensandbox",
+ "mode": "per-session"
+ }
+ }
+}
+```
+
+**可用后端:**
+| 后端 | 描述 |
+|---------|-------------|
+| `direct` | (默认)直接在主机上运行代码 |
+| `docker` | 使用 Docker 容器进行隔离 |
+| `opensandbox` | 使用 OpenSandbox 服务 |
+| `srt` | 使用 Anthropic 的 SRT 沙箱运行时 |
+| `aiosandbox` | 使用 AIO Sandbox 服务 |
+
+**可用模式:**
+| 模式 | 描述 |
+|------|-------------|
+| `shared` | (默认)所有会话共享一个沙箱 |
+| `per-session` | 每个会话使用独立的沙箱实例 |
+
+**后端特定配置(仅在使用该后端时需要):**
+
+**Direct 后端:**
+```json
+{
+ "bot": {
+ "sandbox": {
+ "backends": {
+ "direct": {
+ "restrictToWorkspace": false
+ }
}
}
}
}
```
-**配置选项:**
+**OpenSandbox 后端:**
+```json
+{
+ "bot": {
+ "sandbox": {
+ "backend": "opensandbox",
+ "backends": {
+ "opensandbox": {
+ "serverUrl": "http://localhost:18792",
+ "apiKey": "",
+ "defaultImage": "opensandbox/code-interpreter:v1.0.1"
+ }
+ }
+ }
+ }
+}
+```
-| 选项 | 默认值 | 描述 |
-|--------|---------|-------------|
-| `enabled` | `false` | 启用沙箱执行 |
-| `backend` | `"srt"` | 沙箱后端:`srt` 或 `docker` |
-| `mode` | `"per-session"` | 沙箱模式:`per-session`(每个会话隔离)或 `shared`(跨会话共享) |
-| `network.allowedDomains` | `[]` | 允许网络访问的域列表(空 = 允许所有) |
-| `network.deniedDomains` | `[]` | 拒绝的域列表(无论允许列表如何都被阻止) |
-| `network.allowLocalBinding` | `false` | 允许绑定到本地地址(localhost、127.0.0.1) |
-| `filesystem.denyRead` | `[]` | 拒绝读取访问的路径/文件 |
-| `filesystem.allowWrite` | `[]` | 明确允许写入访问的路径/文件 |
-| `filesystem.denyWrite` | `[]` | 拒绝写入访问的路径/文件 |
-| `runtime.cleanupOnExit` | `true` | 退出时清理沙箱资源 |
-| `runtime.timeout` | `300` | 命令执行超时(秒) |
-| `backends.srt.nodePath` | `"/usr/local/bin/node"` | Node.js 可执行文件的路径(如果 `node` 不在 PATH 中,请使用完整路径) |
+**Docker 后端:**
+```json
+{
+ "bot": {
+ "sandbox": {
+ "backend": "docker",
+ "backends": {
+ "docker": {
+ "image": "python:3.11-slim",
+ "networkMode": "bridge"
+ }
+ }
+ }
+ }
+}
+```
+
+**SRT 后端:**
+```json
+{
+ "bot": {
+ "sandbox": {
+ "backend": "srt",
+ "backends": {
+ "srt": {
+ "settingsPath": "~/.vikingbot/srt-settings.json",
+ "nodePath": "node",
+ "network": {
+ "allowedDomains": [],
+ "deniedDomains": [],
+ "allowLocalBinding": false
+ },
+ "filesystem": {
+ "denyRead": [],
+ "allowWrite": [],
+ "denyWrite": []
+ },
+ "runtime": {
+ "cleanupOnExit": true,
+ "timeout": 300
+ }
+ }
+ }
+ }
+ }
+}
+```
+
+**AIO Sandbox 后端:**
+```json
+{
+ "bot": {
+ "sandbox": {
+ "backend": "aiosandbox",
+ "backends": {
+ "aiosandbox": {
+ "baseUrl": "http://localhost:18794"
+ }
+ }
+ }
+ }
+}
+```
**SRT 后端设置:**
-SRT 后端使用 `@anthropic-ai/sandbox-runtime`。当您运行 `vikingbot onboard` 时它会自动安装。
+SRT 后端使用 `@anthropic-ai/sandbox-runtime`。
**系统依赖:**
@@ -853,11 +654,10 @@ which nodejs
| 命令 | 描述 |
|---------|-------------|
-| `vikingbot agent -m "..."` | 与代理聊天 |
-| `vikingbot agent` | 交互式聊天模式 |
-| `vikingbot agent --no-markdown` | 显示纯文本回复 |
-| `vikingbot agent --logs` | 聊天期间显示运行时日志 |
-| `vikingbot tui` | 启动 TUI(终端用户界面) |
+| `vikingbot chat -m "..."` | 与代理聊天 |
+| `vikingbot chat` | 交互式聊天模式 |
+| `vikingbot chat --no-markdown` | 显示纯文本回复 |
+| `vikingbot chat --logs` | 聊天期间显示运行时日志 |
| `vikingbot gateway` | 启动网关和控制台 Web UI |
| `vikingbot status` | 显示状态 |
| `vikingbot channels login` | 链接 WhatsApp(扫描二维码) |
@@ -880,23 +680,6 @@ which nodejs
交互模式退出:`exit`、`quit`、`/exit`、`/quit`、`:q` 或 `Ctrl+D`。
-
-TUI(终端用户界面)
-
-启动 vikingbot TUI 以获得丰富的基于终端的聊天体验:
-
-```bash
-vikingbot tui
-```
-
-TUI 提供:
-- 支持 markdown 的富文本渲染
-- 消息历史和对话管理
-- 实时代理响应
-- 导航的键盘快捷键
-
-
-
定时任务(Cron)
diff --git a/bot/deploy/docker/deploy_langfuse.sh b/bot/deploy/docker/deploy_langfuse.sh
new file mode 100755
index 00000000..80dd0be7
--- /dev/null
+++ b/bot/deploy/docker/deploy_langfuse.sh
@@ -0,0 +1,28 @@
+#!/bin/bash
+# Deploy local Langfuse using Docker Compose
+
+set -e
+
+SCRIPT_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"
+LANGFUSE_DIR="$SCRIPT_DIR/langfuse"
+
+cd "$LANGFUSE_DIR"
+
+echo "🚀 Starting Langfuse..."
+docker-compose up -d
+
+echo ""
+echo "✅ Langfuse deployed successfully!"
+echo ""
+echo "🌐 Web UI: http://localhost:3000"
+echo ""
+echo "📧 Login credentials:"
+echo " Email: admin@vikingbot.local"
+echo " Password: vikingbot-admin-password-2026"
+echo ""
+echo "🔑 API keys:"
+echo " Public key: pk-lf-vikingbot-public-key-2026"
+echo " Secret key: sk-lf-vikingbot-secret-key-2026"
+echo ""
+echo "📝 To view logs: docker-compose -f $LANGFUSE_DIR/docker-compose.yml logs -f"
+echo "📝 To stop: docker-compose -f $LANGFUSE_DIR/docker-compose.yml down"
diff --git a/bot/deploy/docker/langfuse/docker-compose.yml b/bot/deploy/docker/langfuse/docker-compose.yml
new file mode 100644
index 00000000..cf9a18c2
--- /dev/null
+++ b/bot/deploy/docker/langfuse/docker-compose.yml
@@ -0,0 +1,175 @@
+# Make sure to update the credential placeholders with your own secrets.
+# We mark them with # CHANGEME in the file below.
+# In addition, we recommend to restrict inbound traffic on the host to langfuse-web (port 3000) and minio (port 9090) only.
+# All other components are bound to localhost (127.0.0.1) to only accept connections from the local machine.
+# External connections from other machines will not be able to reach these services directly.
+services:
+ langfuse-worker:
+ image: docker.io/langfuse/langfuse-worker:3
+ restart: always
+ depends_on: &langfuse-depends-on
+ postgres:
+ condition: service_healthy
+ minio:
+ condition: service_healthy
+ redis:
+ condition: service_healthy
+ clickhouse:
+ condition: service_healthy
+ ports:
+ - 127.0.0.1:3030:3030
+ environment: &langfuse-worker-env
+ NEXTAUTH_URL: ${NEXTAUTH_URL:-http://localhost:3000}
+ DATABASE_URL: ${DATABASE_URL:-postgresql://postgres:postgres@postgres:5432/postgres} # CHANGEME
+ SALT: ${SALT:-vikingbot-salt-2026}
+ ENCRYPTION_KEY: ${ENCRYPTION_KEY:-0000000000000000000000000000000000000000000000000000000000000000}
+ TELEMETRY_ENABLED: ${TELEMETRY_ENABLED:-false}
+ LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES: ${LANGFUSE_ENABLE_EXPERIMENTAL_FEATURES:-false}
+ CLICKHOUSE_MIGRATION_URL: ${CLICKHOUSE_MIGRATION_URL:-clickhouse://clickhouse:9000}
+ CLICKHOUSE_URL: ${CLICKHOUSE_URL:-http://clickhouse:8123}
+ CLICKHOUSE_USER: ${CLICKHOUSE_USER:-clickhouse}
+ CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD:-clickhouse}
+ CLICKHOUSE_CLUSTER_ENABLED: ${CLICKHOUSE_CLUSTER_ENABLED:-false}
+ LANGFUSE_USE_AZURE_BLOB: ${LANGFUSE_USE_AZURE_BLOB:-false}
+ LANGFUSE_S3_EVENT_UPLOAD_BUCKET: ${LANGFUSE_S3_EVENT_UPLOAD_BUCKET:-langfuse}
+ LANGFUSE_S3_EVENT_UPLOAD_REGION: ${LANGFUSE_S3_EVENT_UPLOAD_REGION:-auto}
+ LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID: ${LANGFUSE_S3_EVENT_UPLOAD_ACCESS_KEY_ID:-minio}
+ LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY: ${LANGFUSE_S3_EVENT_UPLOAD_SECRET_ACCESS_KEY:-miniosecret}
+ LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT: ${LANGFUSE_S3_EVENT_UPLOAD_ENDPOINT:-http://minio:9000}
+ LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE: ${LANGFUSE_S3_EVENT_UPLOAD_FORCE_PATH_STYLE:-true}
+ LANGFUSE_S3_EVENT_UPLOAD_PREFIX: ${LANGFUSE_S3_EVENT_UPLOAD_PREFIX:-events/}
+ LANGFUSE_S3_MEDIA_UPLOAD_BUCKET: ${LANGFUSE_S3_MEDIA_UPLOAD_BUCKET:-langfuse}
+ LANGFUSE_S3_MEDIA_UPLOAD_REGION: ${LANGFUSE_S3_MEDIA_UPLOAD_REGION:-auto}
+ LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID: ${LANGFUSE_S3_MEDIA_UPLOAD_ACCESS_KEY_ID:-minio}
+ LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY: ${LANGFUSE_S3_MEDIA_UPLOAD_SECRET_ACCESS_KEY:-miniosecret}
+ LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT: ${LANGFUSE_S3_MEDIA_UPLOAD_ENDPOINT:-http://localhost:9090}
+ LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE: ${LANGFUSE_S3_MEDIA_UPLOAD_FORCE_PATH_STYLE:-true}
+ LANGFUSE_S3_MEDIA_UPLOAD_PREFIX: ${LANGFUSE_S3_MEDIA_UPLOAD_PREFIX:-media/}
+ LANGFUSE_S3_BATCH_EXPORT_ENABLED: ${LANGFUSE_S3_BATCH_EXPORT_ENABLED:-false}
+ LANGFUSE_S3_BATCH_EXPORT_BUCKET: ${LANGFUSE_S3_BATCH_EXPORT_BUCKET:-langfuse}
+ LANGFUSE_S3_BATCH_EXPORT_PREFIX: ${LANGFUSE_S3_BATCH_EXPORT_PREFIX:-exports/}
+ LANGFUSE_S3_BATCH_EXPORT_REGION: ${LANGFUSE_S3_BATCH_EXPORT_REGION:-auto}
+ LANGFUSE_S3_BATCH_EXPORT_ENDPOINT: ${LANGFUSE_S3_BATCH_EXPORT_ENDPOINT:-http://minio:9000}
+ LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT: ${LANGFUSE_S3_BATCH_EXPORT_EXTERNAL_ENDPOINT:-http://localhost:9090}
+ LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID: ${LANGFUSE_S3_BATCH_EXPORT_ACCESS_KEY_ID:-minio}
+ LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY: ${LANGFUSE_S3_BATCH_EXPORT_SECRET_ACCESS_KEY:-miniosecret}
+ LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE: ${LANGFUSE_S3_BATCH_EXPORT_FORCE_PATH_STYLE:-true}
+ LANGFUSE_INGESTION_QUEUE_DELAY_MS: ${LANGFUSE_INGESTION_QUEUE_DELAY_MS:-}
+ LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS: ${LANGFUSE_INGESTION_CLICKHOUSE_WRITE_INTERVAL_MS:-}
+ REDIS_HOST: ${REDIS_HOST:-redis}
+ REDIS_PORT: ${REDIS_PORT:-6379}
+ REDIS_AUTH: ${REDIS_AUTH:-vikingbot-redis-secret}
+ REDIS_TLS_ENABLED: ${REDIS_TLS_ENABLED:-false}
+ REDIS_TLS_CA: ${REDIS_TLS_CA:-/certs/ca.crt}
+ REDIS_TLS_CERT: ${REDIS_TLS_CERT:-/certs/redis.crt}
+ REDIS_TLS_KEY: ${REDIS_TLS_KEY:-/certs/redis.key}
+ EMAIL_FROM_ADDRESS: ${EMAIL_FROM_ADDRESS:-}
+ SMTP_CONNECTION_URL: ${SMTP_CONNECTION_URL:-}
+
+ langfuse-web:
+ image: docker.io/langfuse/langfuse:3
+ restart: always
+ depends_on: *langfuse-depends-on
+ ports:
+ - 3000:3000
+ environment:
+ <<: *langfuse-worker-env
+ NEXTAUTH_SECRET: ${NEXTAUTH_SECRET:-vikingbot-nextauth-secret-2026}
+ LANGFUSE_INIT_ORG_ID: ${LANGFUSE_INIT_ORG_ID:-vikingbot-org}
+ LANGFUSE_INIT_ORG_NAME: ${LANGFUSE_INIT_ORG_NAME:-Vikingbot Org}
+ LANGFUSE_INIT_PROJECT_ID: ${LANGFUSE_INIT_PROJECT_ID:-vikingbot-project}
+ LANGFUSE_INIT_PROJECT_NAME: ${LANGFUSE_INIT_PROJECT_NAME:-Vikingbot Project}
+ LANGFUSE_INIT_PROJECT_PUBLIC_KEY: ${LANGFUSE_INIT_PROJECT_PUBLIC_KEY:-pk-lf-vikingbot-public-key-2026}
+ LANGFUSE_INIT_PROJECT_SECRET_KEY: ${LANGFUSE_INIT_PROJECT_SECRET_KEY:-sk-lf-vikingbot-secret-key-2026}
+ LANGFUSE_INIT_USER_EMAIL: ${LANGFUSE_INIT_USER_EMAIL:-admin@vikingbot.local}
+ LANGFUSE_INIT_USER_NAME: ${LANGFUSE_INIT_USER_NAME:-Vikingbot Admin}
+ LANGFUSE_INIT_USER_PASSWORD: ${LANGFUSE_INIT_USER_PASSWORD:-vikingbot-admin-password-2026}
+
+ clickhouse:
+ image: docker.io/clickhouse/clickhouse-server
+ restart: always
+ user: "101:101"
+ environment:
+ CLICKHOUSE_DB: default
+ CLICKHOUSE_USER: ${CLICKHOUSE_USER:-clickhouse}
+ CLICKHOUSE_PASSWORD: ${CLICKHOUSE_PASSWORD:-clickhouse}
+ volumes:
+ - langfuse_clickhouse_data:/var/lib/clickhouse
+ - langfuse_clickhouse_logs:/var/log/clickhouse-server
+ ports:
+ - 127.0.0.1:8123:8123
+ - 127.0.0.1:9000:9000
+ healthcheck:
+ test: wget --no-verbose --tries=1 --spider http://localhost:8123/ping || exit 1
+ interval: 5s
+ timeout: 5s
+ retries: 10
+ start_period: 1s
+
+ minio:
+ image: cgr.dev/chainguard/minio
+ restart: always
+ entrypoint: sh
+ # create the 'langfuse' bucket before starting the service
+ command: -c 'mkdir -p /data/langfuse && minio server --address ":9000" --console-address ":9001" /data'
+ environment:
+ MINIO_ROOT_USER: ${MINIO_ROOT_USER:-minio}
+ MINIO_ROOT_PASSWORD: ${MINIO_ROOT_PASSWORD:-miniosecret}
+ ports:
+ - 9090:9000
+ - 127.0.0.1:9091:9001
+ volumes:
+ - langfuse_minio_data:/data
+ healthcheck:
+ test: ["CMD", "mc", "ready", "local"]
+ interval: 1s
+ timeout: 5s
+ retries: 5
+ start_period: 1s
+
+ redis:
+ image: docker.io/redis:7
+ restart: always
+ command: >
+ --requirepass ${REDIS_AUTH:-vikingbot-redis-secret}
+ --maxmemory-policy noeviction
+ ports:
+ - 127.0.0.1:6379:6379
+ volumes:
+ - langfuse_redis_data:/data
+ healthcheck:
+ test: ["CMD", "redis-cli", "ping"]
+ interval: 3s
+ timeout: 10s
+ retries: 10
+
+ postgres:
+ image: docker.io/postgres:${POSTGRES_VERSION:-17}
+ restart: always
+ healthcheck:
+ test: ["CMD-SHELL", "pg_isready -U postgres"]
+ interval: 3s
+ timeout: 3s
+ retries: 10
+ environment:
+ POSTGRES_USER: ${POSTGRES_USER:-postgres}
+ POSTGRES_PASSWORD: ${POSTGRES_PASSWORD:-postgres}
+ POSTGRES_DB: ${POSTGRES_DB:-postgres}
+ TZ: UTC
+ PGTZ: UTC
+ ports:
+ - 127.0.0.1:5432:5432
+ volumes:
+ - langfuse_postgres_data:/var/lib/postgresql/data
+
+volumes:
+ langfuse_postgres_data:
+ driver: local
+ langfuse_clickhouse_data:
+ driver: local
+ langfuse_clickhouse_logs:
+ driver: local
+ langfuse_minio_data:
+ driver: local
+ langfuse_redis_data:
+ driver: local
diff --git a/bot/docs/openclaw-plugin-analysis.md b/bot/docs/openclaw-plugin-analysis.md
new file mode 100644
index 00000000..33e208f5
--- /dev/null
+++ b/bot/docs/openclaw-plugin-analysis.md
@@ -0,0 +1,353 @@
+# OpenClaw 插件机制深度分析
+
+> 分析日期:2026-03-03
+> 基于 OpenClaw 最新代码库
+
+---
+
+## 目录
+
+1. [插件机制概述](#1-插件机制概述)
+2. [插件分类体系](#2-插件分类体系)
+3. [注册与加载机制](#3-注册与加载机制)
+4. [架构设计详解](#4-架构设计详解)
+5. [内置插件清单](#5-内置插件清单)
+6. [关于动态修改 SKILL.md 的分析](#6-关于动态修改-skillmd-的分析)
+
+---
+
+## 1. 插件机制概述
+
+OpenClaw 采用**分层、可扩展的插件架构**,支持三种类型的插件:
+
+- **内置插件 (Bundled)** - 随 OpenClaw 一起发布
+- **托管插件 (Managed)** - 通过 ClawHub 安装管理
+- **工作空间插件 (Workspace)** - 项目特定的本地插件
+
+插件发现层级遵循以下优先级(从高到低):
+
+```
+Config paths → Workspace → Global → Bundled
+```
+
+---
+
+## 2. 插件分类体系
+
+OpenClaw 支持以下扩展类型:
+
+| 扩展类型 | 说明 | 示例 |
+|---------|------|------|
+| **Channel 插件** | 消息通道集成 | Matrix, Zalo, WhatsApp |
+| **Tool 插件** | 工具扩展 | 文件操作、网络请求 |
+| **Gateway RPC** | RPC 接口扩展 | 自定义 API 端点 |
+| **HTTP Handlers** | HTTP 请求处理器 | Webhook 处理 |
+| **CLI Commands** | 命令行命令 | 自定义 CLI 指令 |
+| **Services** | 后台服务 | 定时任务、监听器 |
+| **Hooks** | 事件钩子 | 生命周期钩子 |
+| **Provider Auth** | 认证提供者 | OAuth、API Key 管理 |
+
+---
+
+## 3. 注册与加载机制
+
+### 3.1 插件加载器
+
+OpenClaw 使用 **jiti** 作为插件加载器,支持**运行时直接执行 TypeScript**,无需预编译:
+
+```typescript
+// 核心加载函数来自 pi-coding-agent 包
+import { loadSkillsFromDir } from '@mariozechner/pi-coding-agent';
+
+// 加载技能目录
+const skills = await loadSkillsFromDir(skillDir);
+```
+
+### 3.2 注册 API
+
+插件通过以下 API 注册到系统:
+
+```typescript
+// 注册通道
+api.registerChannel(config: ChannelConfig);
+
+// 注册工具
+api.registerTool(name: string, handler: ToolHandler);
+
+// 注册 Gateway 方法
+api.registerGatewayMethod(method: string, handler: Function);
+```
+
+### 3.3 文件监听与热重载
+
+OpenClaw 使用 `chokidar` 监听文件变化,实现热重载:
+
+```typescript
+// 来自 src/agents/skills/refresh.ts
+const watcher = chokidar.watch(watchTargets, {
+ ignoreInitial: true,
+ awaitWriteFinish: {
+ stabilityThreshold: debounceMs,
+ pollInterval: 100,
+ },
+ ignored: DEFAULT_SKILLS_WATCH_IGNORED,
+});
+```
+
+---
+
+## 4. 架构设计详解
+
+### 4.1 基于 Hook 的事件驱动架构
+
+OpenClaw 的核心是事件驱动的 Hook 系统,主要事件包括:
+
+| 事件 | 触发时机 |
+|------|---------|
+| `message:inbound` | 消息流入系统 |
+| `message:outbound` | 消息流出系统 |
+| `agent:start` | Agent 开始工作 |
+| `agent:complete` | Agent 完成工作 |
+| `config:reload` | 配置重新加载 |
+| `before_prompt_build` | 构建 prompt 之前 |
+| `llm_input` | LLM 输入前 |
+| `llm_output` | LLM 输出后 |
+
+### 4.2 插件 SDK 能力
+
+插件 SDK 提供以下核心能力:
+
+```typescript
+interface PluginSDK {
+ // 后台服务
+ Background: {
+ start(service: BackgroundService): void;
+ stop(serviceId: string): void;
+ };
+
+ // 生命周期钩子
+ Lifecycle: {
+ on(event: LifecycleEvent, handler: Function): void;
+ };
+
+ // 配置管理
+ Config: {
+ get(key: string): T;
+ set(key: string, value: T): void;
+ };
+
+ // 日志
+ Logger: {
+ info(msg: string): void;
+ error(msg: string): void;
+ debug(msg: string): void;
+ };
+}
+```
+
+---
+
+## 5. 内置插件清单
+
+### 5.1 内置通道 (Bundled Channels)
+
+| 通道 | 说明 |
+|------|------|
+| WhatsApp | WhatsApp 商业 API |
+| Telegram | Telegram Bot |
+| Slack | Slack App |
+| Discord | Discord Bot |
+| Signal | Signal 集成 |
+| iMessage | Apple iMessage |
+| Google Chat | Google Workspace Chat |
+
+### 5.2 扩展插件 (位于 `/extensions/`)
+
+| 插件 | 说明 |
+|------|------|
+| Matrix | 去中心化聊天协议 |
+| Microsoft Teams | 微软团队协作 |
+| Zalo (User/Business) | 越南社交应用 |
+| Nostr | 去中心化社交网络 |
+| LINE | 日本即时通讯 |
+| Mattermost | 开源团队协作 |
+| Nextcloud Talk | 私有云通话 |
+
+---
+
+## 6. 关于动态修改 SKILL.md 的分析
+
+### 6.1 核心结论
+
+**OpenClaw 目前无法直接在 skill 加载时修改 SKILL.md 内容。**
+
+原因:
+1. **无生命周期钩子** - Skill 系统没有提供 `onLoad`、`beforeLoad` 等钩子
+2. **静态声明式架构** - Skills 通过 `SKILL.md` 静态定义,使用 `pi-coding-agent` 包加载,没有预留修改入口
+3. **只读解析** - Frontmatter 解析器只读取不写入
+4. **加载后只读** - Skill 加载后被用于构建 system prompt,本身不会被修改
+
+### 6.2 可行替代方案
+
+#### 方案 1: 外部预处理脚本(推荐)
+
+在启动 OpenClaw 之前,运行脚本修改 SKILL.md:
+
+```bash
+#!/bin/bash
+# preprocess-skills.sh
+node scripts/modify-skills.js
+openclaw start # 启动 OpenClaw
+```
+
+```javascript
+// scripts/modify-skills.js
+const fs = require('fs');
+const path = require('path');
+const yaml = require('yaml');
+
+const skillPath = process.env.SKILL_PATH || './skills/my-skill/SKILL.md';
+const content = fs.readFileSync(skillPath, 'utf8');
+
+// 解析 frontmatter
+const match = content.match(/^---\n([\s\S]*?)\n---\n([\s\S]*)$/);
+if (match) {
+ const frontmatter = yaml.parse(match[1]);
+ const body = match[2];
+
+ // 动态修改内容
+ frontmatter.lastModified = new Date().toISOString();
+ frontmatter.dynamicValue = calculateDynamicValue();
+
+ // 写回文件
+ const newContent = `---\n${yaml.stringify(frontmatter)}---\n${body}`;
+ fs.writeFileSync(skillPath, newContent);
+}
+```
+
+#### 方案 2: 使用 OpenClaw Hooks 系统
+
+利用 `before_prompt_build` hook 在构建 prompt 时动态修改 skill 内容:
+
+```typescript
+// 在你的插件中
+import { definePlugin } from 'openclaw';
+
+export default definePlugin({
+ name: 'dynamic-skill-modifier',
+
+ hooks: {
+ // 在构建 prompt 之前修改 skill 内容
+ before_prompt_build: async ({ skills, context }) => {
+ // 动态修改 skill 对象(不修改文件,只修改内存中的表示)
+ for (const skill of skills) {
+ if (skill.name === 'my-dynamic-skill') {
+ skill.content = modifySkillContent(skill.content, context);
+ }
+ }
+ return { skills };
+ }
+ }
+});
+```
+
+#### 方案 3: 自定义 Skill 加载器(高级)
+
+创建一个自定义的 skill 加载插件,拦截加载过程:
+
+```typescript
+// plugins/dynamic-skill-loader.ts
+import { loadSkillsFromDir } from 'pi-coding-agent';
+import * as fs from 'fs';
+import * as path from 'path';
+
+export class DynamicSkillLoader {
+ async loadSkills(skillDir: string) {
+ // 1. 复制 skill 到临时目录
+ const tempDir = this.createTempCopy(skillDir);
+
+ // 2. 修改临时目录中的 SKILL.md
+ this.modifySkillMdFiles(tempDir);
+
+ // 3. 从临时目录加载
+ return loadSkillsFromDir(tempDir);
+ }
+
+ private modifySkillMdFiles(dir: string) {
+ const skillMdFiles = this.findSkillMdFiles(dir);
+ for (const file of skillMdFiles) {
+ let content = fs.readFileSync(file, 'utf8');
+
+ // 动态修改内容
+ content = this.applyDynamicModifications(content);
+
+ fs.writeFileSync(file, content);
+ }
+ }
+
+ private applyDynamicModifications(content: string): string {
+ // 添加动态生成的内容
+ const dynamicSection = `\n\n\n`;
+ return content + dynamicSection;
+ }
+}
+```
+
+#### 方案 4: 文件监听 + 触发重载(最符合 OpenClaw 设计)
+
+利用 OpenClaw 已有的文件监听机制,在修改 SKILL.md 后自动重载:
+
+```typescript
+// 在你的构建脚本中
+import * as chokidar from 'chokidar';
+import * as fs from 'fs';
+
+// 监听原始 skill 定义文件
+const watcher = chokidar.watch('./skill-sources/**/*.md');
+
+watcher.on('change', (filepath) => {
+ console.log(`Source changed: ${filepath}`);
+
+ // 重新生成 SKILL.md
+ generateSkillMd(filepath);
+});
+
+function generateSkillMd(sourcePath: string) {
+ const source = fs.readFileSync(sourcePath, 'utf8');
+
+ // 动态生成 frontmatter
+ const frontmatter = {
+ name: extractName(source),
+ version: calculateVersion(),
+ lastBuild: new Date().toISOString(),
+ dynamicConfig: loadDynamicConfig()
+ };
+
+ // 写入 SKILL.md(触发 OpenClaw 重载)
+ const skillMd = `---\n${yaml.stringify(frontmatter)}---\n${extractBody(source)}`;
+ fs.writeFileSync('./skills/my-skill/SKILL.md', skillMd);
+}
+```
+
+### 6.3 方案对比
+
+| 方案 | 复杂度 | 侵入性 | 适用场景 | 推荐度 |
+|------|--------|--------|----------|--------|
+| 预处理脚本 | 低 | 低 | 启动前一次性修改 | ★★★★★ |
+| Hooks 系统 | 中 | 中 | 运行时动态修改内存中的 skill | ★★★★ |
+| 自定义加载器 | 高 | 高 | 需要完全控制加载过程 | ★★★ |
+| 文件监听重载 | 中 | 低 | 需要持续同步外部变更 | ★★★★ |
+
+### 6.4 核心结论
+
+**OpenClaw 的 Skill 系统是静态声明式的**,设计理念是:
+- Skill 定义(SKILL.md)是**只读的声明**
+- 动态行为通过 **Hooks** 和 **插件** 实现
+- 文件变化通过 **监听 + 重载** 机制处理
+
+因此,如果需要"在 skill 加载时修改 SKILL.md",应该:
+1. **在加载前** 通过预处理脚本修改(方案 1)
+2. **在加载后** 通过 Hooks 修改内存中的表示(方案 2)
+3. **避免** 尝试在加载过程中 hack 内部机制
+
+这种设计与 OpenClaw 的整体架构哲学一致:**声明式配置 + 程序化扩展**。
diff --git a/bot/docs/rfc-openviking-cli-ov-chat.md b/bot/docs/rfc-openviking-cli-ov-chat.md
new file mode 100644
index 00000000..0e43f620
--- /dev/null
+++ b/bot/docs/rfc-openviking-cli-ov-chat.md
@@ -0,0 +1,341 @@
+# RFC: OpenViking CLI Support for ov chat Command
+
+**Author:** OpenViking Team
+**Status:** Implemented
+**Date:** 2025-03-03
+
+---
+
+## 1. Executive Summary / 执行摘要
+
+This document describes the integration architecture between `ov` CLI (Rust), `openviking-server` (Python/FastAPI), and `vikingbot` (Python AI agent framework). The goal is to provide a unified chat interface where the bot service shares the same port and authentication mechanism as the OpenViking server.
+
+本文档描述了 `ov` CLI(Rust)、`openviking-server`(Python/FastAPI)和 `vikingbot`(Python AI agent 框架)之间的集成架构。目标是提供一个统一的聊天界面,使 bot 服务与 OpenViking 服务器共享相同的端口和认证机制。
+
+---
+
+## 2. Architecture Overview / 架构概览
+
+### 2.1 系统整体架构 / System Architecture
+
+**部署说明 / Deployment Note:** OpenViking Server 和 Vikingbot 部署在同一台机器上,通过本地端口通信。
+
+```mermaid
+flowchart TB
+ subgraph Client["客户端 / Client (可远程)"]
+ CLI["ov CLI
(Rust)"]
+ end
+
+ subgraph SameMachine["同一台机器 / Same Machine"]
+ direction TB
+
+ subgraph Server["OpenViking Server
(Python/FastAPI, Port 1933)"]
+ Auth["统一认证中间件
Unified Auth"]
+ BotAPIProxy["Bot API Proxy
(--with-bot)"]
+ BotRouter["/api/v1/bot/*
Router"]
+ end
+
+ subgraph Vikingbot["Vikingbot (Process 2, Port 18791)"]
+ subgraph Channels["Channels (BaseChannel 实现)"]
+ OC["OpenAPIChannel"]
+ FC["FeishuChannel
(Webhook)"]
+ DC["DiscordChannel"]
+ TC["TelegramChannel"]
+ end
+ MB["MessageBus"]
+ AL["Agent Loop"]
+ end
+ end
+
+ CLI -->|"HTTP + API Key"| Auth
+ Auth --> BotAPIProxy
+ BotAPIProxy -->|"Proxy to"| BotRouter
+ BotRouter -->|"Forward to"| OC
+
+ FC -->|"Webhook Events"| MB
+ DC -->|"WebSocket"| MB
+ TC -->|"Bot API"| MB
+ OC -->|"send_to_bus()"| MB
+ MB --> AL
+
+ OC -.->|"implements"| BaseChannel["BaseChannel"]
+ FC -.->|"implements"| BaseChannel
+ DC -.->|"implements"| BaseChannel
+ TC -.->|"implements"| BaseChannel
+```
+
+### 2.2 Channel-Bus-Agent 架构详解
+
+展示 Channel 与 MessageBus 的关系,以及各 Channel 如何作为 BaseChannel 实现:
+
+```mermaid
+flowchart TB
+ subgraph Vikingbot["Vikingbot Core"]
+ direction TB
+
+ subgraph BaseChannelImpl["BaseChannel Implementations / 通道实现"]
+ direction LR
+
+ subgraph OC["OpenAPIChannel
(HTTP API 通道)"]
+ OCEndpoints["Endpoints:
- /chat
- /chat/stream
- /health
- /docs"]
+ OCService["Service:
OpenAPIChannelService"]
+ end
+
+ subgraph FC["FeishuChannel
(飞书 Webhook)"]
+ FCEndpoints["Endpoints:
- /webhook/event
- /webhook/card"]
+ FCService["Service:
FeishuChannelService"]
+ end
+
+ subgraph Others["Other Channels"]
+ Discord["DiscordChannel"]
+ Telegram["TelegramChannel"]
+ Slack["SlackChannel"]
+ end
+ end
+
+ subgraph Core["Core Components / 核心组件"]
+ MB["MessageBus
消息总线
- inbound queue
- outbound queue
- log store"]
+ AL["Agent Loop
代理循环
- ContextBuilder
- LLM (LiteLLM)
- Tool Executor"]
+ end
+ end
+
+ subgraph External["External Clients / 外部客户端"]
+ CLI["ov CLI"]
+ FeishuApp["Feishu App
飞书应用"]
+ DiscordClient["Discord Client"]
+ end
+
+ CLI -->|"HTTP POST
http://localhost:18791/chat"| OCEndpoints
+ FeishuApp -->|"Webhook POST
/webhook/event"| FCEndpoints
+ DiscordClient -->|"WebSocket"| Discord
+
+ OCEndpoints --> OCService
+ FCEndpoints --> FCService
+
+ OCService -->|"send_to_bus()
message → bus"| MB
+ FCService -->|"send_to_bus()
message → bus"| MB
+ Discord -->|"send_to_bus()"| MB
+ Telegram -->|"send_to_bus()"| MB
+
+ MB -->|"consume"| AL
+ AL -->|"reply"| MB
+ MB -->|"dispatch"| OCService
+ MB -->|"dispatch"| FCService
+
+ classDef channelClass fill:#e1f5fe,stroke:#01579b,stroke-width:2px
+ classDef coreClass fill:#fff3e0,stroke:#e65100,stroke-width:2px
+ classDef externalClass fill:#e8f5e9,stroke:#2e7d32,stroke-width:2px
+
+ class OC,FC,Discord,Telegram,Others channelClass
+ class MB,AL coreClass
+ class CLI,FeishuApp,DiscordClient externalClass
+```
+
+---
+
+## 3. Key Components / 关键组件
+
+### 3.1 OpenViking Server (`openviking-server`)
+
+**Role:** HTTP API Gateway with Bot API proxy / 带 Bot API 代理的 HTTP API 网关
+
+**Key Features / 主要特性:**
+- Unified authentication middleware for all endpoints / 为所有端点提供统一认证中间件
+- Bot API proxy layer (enabled via `--with-bot`) / Bot API 代理层(通过 `--with-bot` 启用)
+- Request forwarding to Vikingbot OpenAPIChannel / 请求转发到 Vikingbot OpenAPI 通道
+
+**Architecture Position / 架构位置:**
+- Process 1 (Port 1933) / 进程1(端口 1933)
+- Entry point for all external clients (CLI, Feishu, etc.) / 所有外部客户端的入口点
+
+---
+
+### 3.2 `ov` CLI Client (`ov chat`)
+
+**Role:** Command-line chat interface / 命令行聊天界面
+
+**Key Features / 主要特性:**
+- Interactive mode and single-message mode / 交互模式和单消息模式
+- Configurable endpoint via environment variable / 通过环境变量配置端点
+- HTTP POST with JSON request/response / 使用 JSON 请求/响应的 HTTP POST
+
+**Architecture Position / 架构位置:**
+- External client layer / 外部客户端层
+- Communicates with OpenViking Server (Port 1933) / 与 OpenViking 服务器通信(端口 1933)
+
+---
+
+### 3.3 Vikingbot OpenAPIChannel
+
+**Role:** AI agent framework with HTTP API / 带 HTTP API 的 AI 代理框架
+
+**Key Features / 主要特性:**
+- HTTP endpoints for chat, streaming, and health checks / 聊天、流式传输和健康检查的 HTTP 端点
+- Integration with MessageBus for message routing / 与 MessageBus 集成进行消息路由
+- Support for session management and context building / 支持会话管理和上下文构建
+
+**Architecture Position / 架构位置:**
+- Process 2 (Port 18791 default) / 进程2(默认端口 18791)
+- Receives proxied requests from OpenViking Server / 接收来自 OpenViking 服务器的代理请求
+
+---
+
+### 3.4 MessageBus and Agent Loop / 消息总线与代理循环
+
+**Role:** Core message routing and processing engine / 核心消息路由和处理引擎
+
+**Components / 组件:**
+- **MessageBus / 消息总线:** Inbound queue, Outbound queue, Log store / 入队队列、出队队列、日志存储
+- **Agent Loop / 代理循环:** ContextBuilder, LLM (LiteLLM), Tool Executor / 上下文构建器、LLM、工具执行器
+
+**Flow / 流程:**
+```
+Channel → MessageBus.inbound → Agent Loop → MessageBus.outbound → Channel
+```
+
+---
+
+## 4. API Endpoints / API 端点
+
+### 4.1 Bot API (via OpenViking Server)
+
+| Method | Path | Description |
+|--------|------|-------------|
+| GET | `/api/v1/bot/health` | Health check |
+| POST | `/api/v1/bot/chat` | Send message (non-streaming) |
+| POST | `/api/v1/bot/chat/stream` | Send message (streaming, SSE) |
+
+### 4.2 Response Codes
+
+| Code | Condition |
+|------|-----------|
+| 200 | Success |
+| 503 | `--with-bot` not enabled or bot service unavailable |
+| 502 | Bot service returned an error |
+
+---
+
+## 5. Usage Examples / 使用示例
+
+### 5.1 Start the services / 启动服务
+
+```bash
+# 启动 OpenViking Server (带 --with-bot 会自动启动 vikingbot gateway)
+openviking-server --with-bot
+
+# Output:
+# OpenViking HTTP Server is running on 127.0.0.1:1933
+# Bot API proxy enabled, forwarding to http://localhost:18791
+# [vikingbot] Starting gateway on port 18791...
+```
+
+**说明 / Note:**
+- `--with-bot`: 自动在同一机器上启动 `vikingbot gateway` 进程
+- 不加 `--with-bot`: 仅启动 OpenViking Server,不会启动 Vikingbot
+
+**设计意图 / Design Rationale:**
+OpenViking Server 统一代理 Vikingbot 的 CLI 请求,目的是:
+1. **共享鉴权机制** - 复用 OpenViking Server 的统一认证中间件
+2. **端口共享** - 服务端部署时可共享端口,简化网络配置
+
+### 5.2 Using `ov chat` CLI / 使用 `ov chat` CLI
+
+```bash
+# Interactive mode (default)
+ov chat
+
+# Single message mode
+ov chat -m "Hello, bot!"
+
+# Use custom endpoint
+VIKINGBOT_ENDPOINT=http://localhost:1933/api/v1/bot ov chat -m "Hello!"
+```
+
+### 5.3 Direct HTTP API usage / 直接 HTTP API 使用
+
+```bash
+# Health check
+curl http://localhost:1933/api/v1/bot/health
+
+# Send a message
+curl -X POST http://localhost:1933/api/v1/bot/chat \
+ -H "Content-Type: application/json" \
+ -d '{
+ "message": "Hello!",
+ "session_id": "test-session",
+ "user_id": "test-user"
+ }'
+
+# Streaming response
+curl -X POST http://localhost:1933/api/v1/bot/chat/stream \
+ -H "Content-Type: application/json" \
+ -d '{
+ "message": "Hello!",
+ "session_id": "test-session",
+ "stream": true
+ }'
+```
+
+---
+
+## 6. Configuration / 配置
+
+### 6.1 配置共享说明 / Configuration Sharing
+
+**重要 / Important:** Vikingbot 与 OpenViking Server 共享同一个 `ov.conf` 配置文件,不再使用 `~/.vikingbot/config.json`。
+
+Vikingbot 的配置项统一放在 `ov.conf` 的 `bot` 字段下:
+
+```json
+{
+ "server": {
+ "host": "127.0.0.1",
+ "port": 1933,
+ "root_api_key": "your-api-key",
+ "with_bot": true,
+ "bot_api_url": "http://localhost:18791"
+ },
+ "bot": {
+ "agents": {
+ "model": "openai/gpt-4o",
+ "max_tool_iterations": 50,
+ "memory_window": 50
+ },
+ "gateway": {
+ "host": "0.0.0.0",
+ "port": 18791
+ },
+ "channels": [
+ {"type": "feishu", "enabled": false, "app_id": "", "app_secret": ""}
+ ],
+ "sandbox": {
+ "backend": "direct",
+ "mode": "shared"
+ }
+ }
+}
+```
+
+**配置说明 / Configuration Notes:**
+- `server.with_bot`: 启用时自动在同一机器上启动 Vikingbot gateway
+- `bot.agents`: Agent 配置,包括 LLM 模型、最大工具迭代次数、记忆窗口
+- `bot.gateway`: HTTP Gateway 监听地址
+- `bot.channels`: 渠道配置列表,支持 openapi、feishu 等
+- `bot.sandbox`: 沙箱执行配置
+
+### 6.2 Command-line Options
+
+```bash
+# Enable Bot API proxy
+openviking-server --with-bot
+
+# Custom bot URL
+openviking-server --with-bot --bot-url http://localhost:8080
+
+# With config file
+openviking-server --config /path/to/ov.conf
+```
+
+---
+
+*End of Document*
diff --git a/bot/pyproject.toml b/bot/pyproject.toml
index 24503115..dc4ebbaa 100644
--- a/bot/pyproject.toml
+++ b/bot/pyproject.toml
@@ -1,6 +1,6 @@
[project]
name = "vikingbot"
-version = "0.1.1"
+version = "0.1.2"
description = "A lightweight personal AI assistant framework"
requires-python = ">=3.11, <=3.14"
license = {text = "MIT"}
@@ -28,32 +28,19 @@ dependencies = [
"readability-lxml>=0.8.0",
"rich>=13.0.0",
"croniter>=2.0.0",
- "dingtalk-stream>=0.4.0",
- "python-telegram-bot[socks]>=21.0",
- "lark-oapi>=1.0.0",
"socksio>=1.0.0",
"python-socketio>=5.11.0",
"msgpack>=1.0.8",
- "slack-sdk>=3.26.0",
- "qq-botpy>=1.0.0",
"python-socks[asyncio]>=2.4.0",
"prompt-toolkit>=3.0.0",
- "textual>=0.50.0",
"pygments>=2.16.0",
- "ddgs>=9.0.0",
"fastapi>=0.100.0",
"uvicorn>=0.20.0",
- "opensandbox>=0.1.0",
- "opensandbox-server>=0.1.0",
- "agent-sandbox>=0.0.23",
"html2text>=2020.1.16",
"beautifulsoup4>=4.12.0",
- "opencode-ai>=0.1.0a0",
"openviking>=0.1.18",
- "tos>=2.9.0",
+ "ddgs>=9.0.0",
"gradio>=6.6.0",
- "fusepy>=3.0.1",
- "pytest>=9.0.2",
]
[project.optional-dependencies]
@@ -62,6 +49,53 @@ dev = [
"pytest-asyncio>=0.21.0",
"ruff>=0.1.0",
]
+langfuse = [
+ "langfuse>=3.0.0",
+]
+# Channels - install only the ones you need
+telegram = [
+ "python-telegram-bot[socks]>=21.0",
+]
+feishu = [
+ "lark-oapi>=1.0.0",
+]
+dingtalk = [
+ "dingtalk-stream>=0.4.0",
+]
+slack = [
+ "slack-sdk>=3.26.0",
+]
+qq = [
+ "qq-botpy>=1.0.0",
+]
+# Sandbox backends
+sandbox = [
+ "opensandbox>=0.1.0",
+ "opensandbox-server>=0.1.0",
+ "agent-sandbox>=0.0.23",
+]
+# FUSE filesystem mount
+fuse = [
+ "fusepy>=3.0.1",
+]
+# OpenCode AI integration
+opencode = [
+ "opencode-ai>=0.1.0a0",
+]
+# Full installation - includes everything
+full = [
+ "langfuse>=3.0.0",
+ "python-telegram-bot[socks]>=21.0",
+ "lark-oapi>=1.0.0",
+ "dingtalk-stream>=0.4.0",
+ "slack-sdk>=3.26.0",
+ "qq-botpy>=1.0.0",
+ "opensandbox>=0.1.0",
+ "opensandbox-server>=0.1.0",
+ "agent-sandbox>=0.0.23",
+ "fusepy>=3.0.1",
+ "opencode-ai>=0.1.0a0",
+]
[project.scripts]
vikingbot = "vikingbot.cli.commands:app"
diff --git a/bot/scripts/restart_openviking_server.sh b/bot/scripts/restart_openviking_server.sh
new file mode 100755
index 00000000..9094ba0b
--- /dev/null
+++ b/bot/scripts/restart_openviking_server.sh
@@ -0,0 +1,148 @@
+#!/bin/bash
+
+# Restart OpenViking Server with Bot API enabled
+# Usage: ./restart_openviking_server.sh [--port PORT] [--bot-url URL]
+
+set -e
+
+# Default values
+PORT="1933"
+BOT_URL="http://localhost:18791"
+
+# Parse arguments
+while [[ $# -gt 0 ]]; do
+ case $1 in
+ --port)
+ PORT="$2"
+ shift 2
+ ;;
+ --bot-url)
+ BOT_URL="$2"
+ shift 2
+ ;;
+ *)
+ echo "Unknown option: $1"
+ echo "Usage: $0 [--port PORT] [--bot-url URL]"
+ exit 1
+ ;;
+ esac
+done
+
+# Parse Bot URL to extract port
+BOT_PORT=$(echo "$BOT_URL" | sed -n 's/.*:\([0-9]*\).*/\1/p')
+if [ -z "$BOT_PORT" ]; then
+ BOT_PORT="18791"
+fi
+
+echo "=========================================="
+echo "Restarting OpenViking Server with Bot API"
+echo "=========================================="
+echo "OpenViking Server Port: $PORT"
+echo "Bot URL: $BOT_URL"
+echo "Bot Port: $BOT_PORT"
+echo ""
+
+# Step 0: Kill existing vikingbot processes
+echo "Step 0: Stopping existing vikingbot processes..."
+if pgrep -f "vikingbot.*openapi" > /dev/null 2>&1 || pgrep -f "vikingbot.*gateway" > /dev/null 2>&1; then
+ pkill -f "vikingbot.*openapi" 2>/dev/null || true
+ pkill -f "vikingbot.*gateway" 2>/dev/null || true
+ sleep 2
+ echo " ✓ Stopped existing vikingbot processes"
+else
+ echo " ✓ No existing vikingbot processes found"
+fi
+
+# Step 1: Kill existing openviking-server processes
+echo "Step 1: Stopping existing openviking-server processes..."
+if pgrep -f "openviking-server" > /dev/null 2>&1; then
+ pkill -f "openviking-server" 2>/dev/null || true
+ sleep 2
+ # Force kill if still running
+ if pgrep -f "openviking-server" > /dev/null 2>&1; then
+ echo " Force killing remaining processes..."
+ pkill -9 -f "openviking-server" 2>/dev/null || true
+ sleep 1
+ fi
+ echo " ✓ Stopped existing processes"
+else
+ echo " ✓ No existing processes found"
+fi
+
+# Step 2: Wait for port to be released
+echo ""
+echo "Step 2: Waiting for port $PORT to be released..."
+for i in {1..10}; do
+ if ! lsof -i :"$PORT" > /dev/null 2>&1; then
+ echo " ✓ Port $PORT is free"
+ break
+ fi
+ sleep 1
+done
+
+# Step 3: Start openviking-server with --with-bot
+echo ""
+echo "Step 3: Starting openviking-server with Bot API..."
+echo " Command: openviking-server --with-bot --port $PORT --bot-url $BOT_URL"
+echo ""
+
+# Start in background and log to file
+#nohup openviking-server \
+ --with-bot \
+ --port "$PORT" \
+ --bot-url "$BOT_URL" \
+ > /tmp/openviking-server.log 2>&1 &
+
+openviking-server \
+ --with-bot \
+ --port "$PORT" \
+ --bot-url "$BOT_URL"
+
+
+SERVER_PID=$!
+echo " Server PID: $SERVER_PID"
+
+# Step 4: Wait for server to start
+echo ""
+echo "Step 4: Waiting for server to be ready..."
+sleep 3
+
+# First check if server is responding at all
+for i in {1..10}; do
+ if curl -s http://localhost:"$PORT"/api/v1/bot/health > /dev/null 2>&1; then
+ echo ""
+ echo "=========================================="
+ echo "✓ OpenViking Server started successfully!"
+ echo "=========================================="
+ echo ""
+ echo "Server URL: http://localhost:$PORT"
+ echo "Health Check: http://localhost:$PORT/api/v1/bot/health"
+ echo "Logs: tail -f /tmp/openviking-server.log"
+ echo ""
+ exit 0
+ fi
+ # Check actual health response
+ health_response=$(curl -s http://localhost:"$PORT"/api/v1/bot/health 2>/dev/null)
+ if echo "$health_response" | grep -q "Vikingbot"; then
+ echo " ✓ Vikingbot is healthy"
+ elif echo "$health_response" | grep -q "Bot service unavailable"; then
+ echo " ⏳ Waiting for Vikingbot to start (attempt $i/10)..."
+ fi
+ sleep 2
+done
+
+# If we reach here, server failed to start
+echo ""
+echo "=========================================="
+echo "✗ Failed to start OpenViking Server"
+echo "=========================================="
+echo ""
+echo "Recent logs:"
+tail -20 /tmp/openviking-server.log 2>/dev/null || echo "(No logs available)"
+echo ""
+echo "Troubleshooting:"
+echo " 1. Check if port $PORT is in use: lsof -i :$PORT"
+echo " 2. Check Vikingbot is running on $BOT_URL"
+echo " 3. Check logs: tail -f /tmp/openviking-server.log"
+echo ""
+exit 1
diff --git a/bot/uv.lock b/bot/uv.lock
index 2bcce976..638cb082 100644
--- a/bot/uv.lock
+++ b/bot/uv.lock
@@ -263,6 +263,15 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/f6/22/91616fe707a5c5510de2cac9b046a30defe7007ba8a0c04f9c08f27df312/audioop_lts-0.2.2-cp314-cp314t-win_arm64.whl", hash = "sha256:b492c3b040153e68b9fdaff5913305aaaba5bb433d8a7f73d5cf6a64ed3cc1dd", size = 25206, upload-time = "2025-08-05T16:43:16.444Z" },
]
+[[package]]
+name = "backoff"
+version = "2.2.1"
+source = { registry = "https://pypi.org/simple" }
+sdist = { url = "https://files.pythonhosted.org/packages/47/d7/5bbeb12c44d7c4f2fb5b56abce497eb5ed9f34d85701de869acedd602619/backoff-2.2.1.tar.gz", hash = "sha256:03f829f5bb1923180821643f8753b0502c3b682293992485b0eef2807afa5cba", size = 17001, upload-time = "2022-10-05T19:19:32.061Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/df/73/b6e24bd22e6720ca8ee9a85a0c4a2971af8497d8f3193fa05390cbd46e09/backoff-2.2.1-py3-none-any.whl", hash = "sha256:63579f9a0628e06278f7e47b7d7d5b6ce20dc65c5e96a6f3ca99a6adca0396e8", size = 15148, upload-time = "2022-10-05T19:19:30.546Z" },
+]
+
[[package]]
name = "beautifulsoup4"
version = "4.14.3"
@@ -535,12 +544,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/d1/d6/3965ed04c63042e047cb6a3e6ed1a63a35087b6a609aa3a15ed8ac56c221/colorama-0.4.6-py2.py3-none-any.whl", hash = "sha256:4f1d9991f5acc0ca119f9d443620b77f9d6b33703e51011c16baf57afb285fc6", size = 25335, upload-time = "2022-10-25T02:36:20.889Z" },
]
-[[package]]
-name = "crcmod"
-version = "1.7"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/6b/b0/e595ce2a2527e169c3bcd6c33d2473c1918e0b7f6826a043ca1245dd4e5b/crcmod-1.7.tar.gz", hash = "sha256:dc7051a0db5f2bd48665a990d3ec1cc305a466a77358ca4492826f41f283601e", size = 89670, upload-time = "2010-06-27T14:35:29.538Z" }
-
[[package]]
name = "croniter"
version = "6.0.0"
@@ -647,18 +650,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/4e/8c/f3147f5c4b73e7550fe5f9352eaa956ae838d5c51eb58e7a25b9f3e2643b/decorator-5.2.1-py3-none-any.whl", hash = "sha256:d316bb415a2d9e2d2b3abcc4084c6502fc09240e292cd76a76afc106a1c8e04a", size = 9190, upload-time = "2025-02-24T04:41:32.565Z" },
]
-[[package]]
-name = "deprecated"
-version = "1.3.1"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "wrapt" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/49/85/12f0a49a7c4ffb70572b6c2ef13c90c88fd190debda93b23f026b25f9634/deprecated-1.3.1.tar.gz", hash = "sha256:b1b50e0ff0c1fddaa5708a2c6b0a6588bb09b892825ab2b214ac9ea9d92a5223", size = 2932523, upload-time = "2025-10-30T08:19:02.757Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/84/d0/205d54408c08b13550c733c4b85429e7ead111c7f0014309637425520a9a/deprecated-1.3.1-py2.py3-none-any.whl", hash = "sha256:597bfef186b6f60181535a29fbe44865ce137a5079f295b479886c82729d5f3f", size = 11298, upload-time = "2025-10-30T08:19:00.758Z" },
-]
-
[[package]]
name = "dingtalk-stream"
version = "0.24.3"
@@ -953,6 +944,18 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/ac/35/17c9141c4ae21e9a29a43acdfd848e3e468a810517f862cad07977bf8fe9/google-3.0.0-py2.py3-none-any.whl", hash = "sha256:889cf695f84e4ae2c55fbc0cfdaf4c1e729417fa52ab1db0485202ba173e4935", size = 45258, upload-time = "2020-07-11T14:49:58.287Z" },
]
+[[package]]
+name = "googleapis-common-protos"
+version = "1.72.0"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "protobuf" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/e5/7b/adfd75544c415c487b33061fe7ae526165241c1ea133f9a9125a56b39fd8/googleapis_common_protos-1.72.0.tar.gz", hash = "sha256:e55a601c1b32b52d7a3e65f43563e2aa61bcd737998ee672ac9b951cd49319f5", size = 147433, upload-time = "2025-11-06T18:29:24.087Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/c4/ab/09169d5a4612a5f92490806649ac8d41e3ec9129c636754575b3553f4ea4/googleapis_common_protos-1.72.0-py3-none-any.whl", hash = "sha256:4299c5a82d5ae1a9702ada957347726b167f9f8d1fc352477702a1e851ff4038", size = 297515, upload-time = "2025-11-06T18:29:13.14Z" },
+]
+
[[package]]
name = "gradio"
version = "6.6.0"
@@ -1354,30 +1357,39 @@ wheels = [
]
[[package]]
-name = "lark-oapi"
-version = "1.5.3"
+name = "langfuse"
+version = "3.14.5"
source = { registry = "https://pypi.org/simple" }
dependencies = [
+ { name = "backoff" },
{ name = "httpx" },
- { name = "pycryptodome" },
+ { name = "openai" },
+ { name = "opentelemetry-api" },
+ { name = "opentelemetry-exporter-otlp-proto-http" },
+ { name = "opentelemetry-sdk" },
+ { name = "packaging" },
+ { name = "pydantic" },
{ name = "requests" },
- { name = "requests-toolbelt" },
- { name = "websockets" },
+ { name = "wrapt" },
]
+sdist = { url = "https://files.pythonhosted.org/packages/ec/6b/7a945e8bc56cbf343b6f6171fd45870b0ea80ea38463b2db8dd5a9dc04a2/langfuse-3.14.5.tar.gz", hash = "sha256:2f543ec1540053d39b08a50ed5992caf1cd54d472a55cb8e5dcf6d4fcb7ff631", size = 235474, upload-time = "2026-02-23T10:42:47.721Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/bf/ff/2ece5d735ebfa2af600a53176f2636ae47af2bf934e08effab64f0d1e047/lark_oapi-1.5.3-py3-none-any.whl", hash = "sha256:fda6b32bb38d21b6bdaae94979c600b94c7c521e985adade63a54e4b3e20cc36", size = 6993016, upload-time = "2026-01-27T08:21:49.307Z" },
+ { url = "https://files.pythonhosted.org/packages/a3/a1/10f04224542d6a57073c4f339b6763836a0899c98966f1d4ffcf56d2cf61/langfuse-3.14.5-py3-none-any.whl", hash = "sha256:5054b1c705ec69bce2d7077ce7419727ac629159428da013790979ca9cae77d5", size = 421240, upload-time = "2026-02-23T10:42:46.085Z" },
]
[[package]]
-name = "linkify-it-py"
-version = "2.0.3"
+name = "lark-oapi"
+version = "1.5.3"
source = { registry = "https://pypi.org/simple" }
dependencies = [
- { name = "uc-micro-py" },
+ { name = "httpx" },
+ { name = "pycryptodome" },
+ { name = "requests" },
+ { name = "requests-toolbelt" },
+ { name = "websockets" },
]
-sdist = { url = "https://files.pythonhosted.org/packages/2a/ae/bb56c6828e4797ba5a4821eec7c43b8bf40f69cda4d4f5f8c8a2810ec96a/linkify-it-py-2.0.3.tar.gz", hash = "sha256:68cda27e162e9215c17d786649d1da0021a451bdc436ef9e0fa0ba5234b9b048", size = 27946, upload-time = "2024-02-04T14:48:04.179Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/04/1e/b832de447dee8b582cac175871d2f6c3d5077cc56d5575cadba1fd1cccfa/linkify_it_py-2.0.3-py3-none-any.whl", hash = "sha256:6bcbc417b0ac14323382aef5c5192c0075bf8a9d6b41820a2b66371eac6b6d79", size = 19820, upload-time = "2024-02-04T14:48:02.496Z" },
+ { url = "https://files.pythonhosted.org/packages/bf/ff/2ece5d735ebfa2af600a53176f2636ae47af2bf934e08effab64f0d1e047/lark_oapi-1.5.3-py3-none-any.whl", hash = "sha256:fda6b32bb38d21b6bdaae94979c600b94c7c521e985adade63a54e4b3e20cc36", size = 6993016, upload-time = "2026-01-27T08:21:49.307Z" },
]
[[package]]
@@ -1547,11 +1559,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/94/54/e7d793b573f298e1c9013b8c4dade17d481164aa517d1d7148619c2cedbf/markdown_it_py-4.0.0-py3-none-any.whl", hash = "sha256:87327c59b172c5011896038353a81343b6754500a08cd7a4973bb48c6d578147", size = 87321, upload-time = "2025-08-11T12:57:51.923Z" },
]
-[package.optional-dependencies]
-linkify = [
- { name = "linkify-it-py" },
-]
-
[[package]]
name = "markdownify"
version = "1.2.2"
@@ -1639,18 +1646,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/70/bc/6f1c2f612465f5fa89b95bead1f44dcb607670fd42891d8fdcd5d039f4f4/markupsafe-3.0.3-cp314-cp314t-win_arm64.whl", hash = "sha256:32001d6a8fc98c8cb5c947787c5d08b0a50663d139f1305bac5885d98d9b40fa", size = 14146, upload-time = "2025-09-27T18:37:28.327Z" },
]
-[[package]]
-name = "mdit-py-plugins"
-version = "0.5.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "markdown-it-py" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/b2/fd/a756d36c0bfba5f6e39a1cdbdbfdd448dc02692467d83816dff4592a1ebc/mdit_py_plugins-0.5.0.tar.gz", hash = "sha256:f4918cb50119f50446560513a8e311d574ff6aaed72606ddae6d35716fe809c6", size = 44655, upload-time = "2025-08-11T07:25:49.083Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/fb/86/dd6e5db36df29e76c7a7699123569a4a18c1623ce68d826ed96c62643cae/mdit_py_plugins-0.5.0-py3-none-any.whl", hash = "sha256:07a08422fc1936a5d26d146759e9155ea466e842f5ab2f7d2266dd084c8dab1f", size = 57205, upload-time = "2025-08-11T07:25:47.597Z" },
-]
-
[[package]]
name = "mdurl"
version = "0.1.2"
@@ -1969,6 +1964,88 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/69/8b/39f0a9ece971139c9370bdd7b7c8a11aedb39a6216098c5dd63267fe5e07/opensandbox_server-0.1.2-py3-none-any.whl", hash = "sha256:49807cf9c463fbfd3ada12d5f082af9fdd09c021998abb0908b444b729145631", size = 84905, upload-time = "2026-02-09T12:23:01.623Z" },
]
+[[package]]
+name = "opentelemetry-api"
+version = "1.39.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "importlib-metadata" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/97/b9/3161be15bb8e3ad01be8be5a968a9237c3027c5be504362ff800fca3e442/opentelemetry_api-1.39.1.tar.gz", hash = "sha256:fbde8c80e1b937a2c61f20347e91c0c18a1940cecf012d62e65a7caf08967c9c", size = 65767, upload-time = "2025-12-11T13:32:39.182Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/cf/df/d3f1ddf4bb4cb50ed9b1139cc7b1c54c34a1e7ce8fd1b9a37c0d1551a6bd/opentelemetry_api-1.39.1-py3-none-any.whl", hash = "sha256:2edd8463432a7f8443edce90972169b195e7d6a05500cd29e6d13898187c9950", size = 66356, upload-time = "2025-12-11T13:32:17.304Z" },
+]
+
+[[package]]
+name = "opentelemetry-exporter-otlp-proto-common"
+version = "1.39.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "opentelemetry-proto" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/e9/9d/22d241b66f7bbde88a3bfa6847a351d2c46b84de23e71222c6aae25c7050/opentelemetry_exporter_otlp_proto_common-1.39.1.tar.gz", hash = "sha256:763370d4737a59741c89a67b50f9e39271639ee4afc999dadfe768541c027464", size = 20409, upload-time = "2025-12-11T13:32:40.885Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/8c/02/ffc3e143d89a27ac21fd557365b98bd0653b98de8a101151d5805b5d4c33/opentelemetry_exporter_otlp_proto_common-1.39.1-py3-none-any.whl", hash = "sha256:08f8a5862d64cc3435105686d0216c1365dc5701f86844a8cd56597d0c764fde", size = 18366, upload-time = "2025-12-11T13:32:20.2Z" },
+]
+
+[[package]]
+name = "opentelemetry-exporter-otlp-proto-http"
+version = "1.39.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "googleapis-common-protos" },
+ { name = "opentelemetry-api" },
+ { name = "opentelemetry-exporter-otlp-proto-common" },
+ { name = "opentelemetry-proto" },
+ { name = "opentelemetry-sdk" },
+ { name = "requests" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/80/04/2a08fa9c0214ae38880df01e8bfae12b067ec0793446578575e5080d6545/opentelemetry_exporter_otlp_proto_http-1.39.1.tar.gz", hash = "sha256:31bdab9745c709ce90a49a0624c2bd445d31a28ba34275951a6a362d16a0b9cb", size = 17288, upload-time = "2025-12-11T13:32:42.029Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/95/f1/b27d3e2e003cd9a3592c43d099d2ed8d0a947c15281bf8463a256db0b46c/opentelemetry_exporter_otlp_proto_http-1.39.1-py3-none-any.whl", hash = "sha256:d9f5207183dd752a412c4cd564ca8875ececba13be6e9c6c370ffb752fd59985", size = 19641, upload-time = "2025-12-11T13:32:22.248Z" },
+]
+
+[[package]]
+name = "opentelemetry-proto"
+version = "1.39.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "protobuf" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/49/1d/f25d76d8260c156c40c97c9ed4511ec0f9ce353f8108ca6e7561f82a06b2/opentelemetry_proto-1.39.1.tar.gz", hash = "sha256:6c8e05144fc0d3ed4d22c2289c6b126e03bcd0e6a7da0f16cedd2e1c2772e2c8", size = 46152, upload-time = "2025-12-11T13:32:48.681Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/51/95/b40c96a7b5203005a0b03d8ce8cd212ff23f1793d5ba289c87a097571b18/opentelemetry_proto-1.39.1-py3-none-any.whl", hash = "sha256:22cdc78efd3b3765d09e68bfbd010d4fc254c9818afd0b6b423387d9dee46007", size = 72535, upload-time = "2025-12-11T13:32:33.866Z" },
+]
+
+[[package]]
+name = "opentelemetry-sdk"
+version = "1.39.1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "opentelemetry-api" },
+ { name = "opentelemetry-semantic-conventions" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/eb/fb/c76080c9ba07e1e8235d24cdcc4d125ef7aa3edf23eb4e497c2e50889adc/opentelemetry_sdk-1.39.1.tar.gz", hash = "sha256:cf4d4563caf7bff906c9f7967e2be22d0d6b349b908be0d90fb21c8e9c995cc6", size = 171460, upload-time = "2025-12-11T13:32:49.369Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/7c/98/e91cf858f203d86f4eccdf763dcf01cf03f1dae80c3750f7e635bfa206b6/opentelemetry_sdk-1.39.1-py3-none-any.whl", hash = "sha256:4d5482c478513ecb0a5d938dcc61394e647066e0cc2676bee9f3af3f3f45f01c", size = 132565, upload-time = "2025-12-11T13:32:35.069Z" },
+]
+
+[[package]]
+name = "opentelemetry-semantic-conventions"
+version = "0.60b1"
+source = { registry = "https://pypi.org/simple" }
+dependencies = [
+ { name = "opentelemetry-api" },
+ { name = "typing-extensions" },
+]
+sdist = { url = "https://files.pythonhosted.org/packages/91/df/553f93ed38bf22f4b999d9be9c185adb558982214f33eae539d3b5cd0858/opentelemetry_semantic_conventions-0.60b1.tar.gz", hash = "sha256:87c228b5a0669b748c76d76df6c364c369c28f1c465e50f661e39737e84bc953", size = 137935, upload-time = "2025-12-11T13:32:50.487Z" }
+wheels = [
+ { url = "https://files.pythonhosted.org/packages/7a/5e/5958555e09635d09b75de3c4f8b9cae7335ca545d77392ffe7331534c402/opentelemetry_semantic_conventions-0.60b1-py3-none-any.whl", hash = "sha256:9fa8c8b0c110da289809292b0591220d3a7b53c1526a23021e977d68597893fb", size = 219982, upload-time = "2025-12-11T13:32:36.955Z" },
+]
+
[[package]]
name = "openviking"
version = "0.1.18"
@@ -2090,11 +2167,11 @@ wheels = [
[[package]]
name = "packaging"
-version = "26.0"
+version = "25.0"
source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/65/ee/299d360cdc32edc7d2cf530f3accf79c4fca01e96ffc950d8a52213bd8e4/packaging-26.0.tar.gz", hash = "sha256:00243ae351a257117b6a241061796684b084ed1c516a08c48a3f7e147a9d80b4", size = 143416, upload-time = "2026-01-21T20:50:39.064Z" }
+sdist = { url = "https://files.pythonhosted.org/packages/a1/d4/1fc4078c65507b51b96ca8f8c3ba19e6a61c8253c72794544580a7b6c24d/packaging-25.0.tar.gz", hash = "sha256:d443872c98d677bf60f6a1f2f8c1cb748e8fe762d2bf9d3148b5599295b0fc4f", size = 165727, upload-time = "2025-04-19T11:48:59.673Z" }
wheels = [
- { url = "https://files.pythonhosted.org/packages/b7/b9/c538f279a4e237a006a2c98387d081e9eb060d203d8ed34467cc0f0b9b53/packaging-26.0-py3-none-any.whl", hash = "sha256:b36f1fef9334a5588b4166f8bcd26a14e521f2b55e6b9de3aaa80d3ff7a37529", size = 74366, upload-time = "2026-01-21T20:50:37.788Z" },
+ { url = "https://files.pythonhosted.org/packages/20/12/38679034af332785aac8774540895e234f4d07f7545804097de4b666afd8/packaging-25.0-py3-none-any.whl", hash = "sha256:29572ef2b1f17581046b3a2227d5c611fb25ec70ca1ba8554b24b0e69331a484", size = 66469, upload-time = "2025-04-19T11:48:57.875Z" },
]
[[package]]
@@ -2265,15 +2342,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/f2/26/c56ce33ca856e358d27fda9676c055395abddb82c35ac0f593877ed4562e/pillow-12.1.1-pp311-pypy311_pp73-win_amd64.whl", hash = "sha256:cb9bb857b2d057c6dfc72ac5f3b44836924ba15721882ef103cecb40d002d80e", size = 7029880, upload-time = "2026-02-11T04:23:04.783Z" },
]
-[[package]]
-name = "platformdirs"
-version = "4.7.0"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/71/25/ccd8e88fcd16a4eb6343a8b4b9635e6f3928a7ebcd82822a14d20e3ca29f/platformdirs-4.7.0.tar.gz", hash = "sha256:fd1a5f8599c85d49b9ac7d6e450bc2f1aaf4a23f1fe86d09952fe20ad365cf36", size = 23118, upload-time = "2026-02-12T22:21:53.764Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/cb/e3/1eddccb2c39ecfbe09b3add42a04abcc3fa5b468aa4224998ffb8a7e9c8f/platformdirs-4.7.0-py3-none-any.whl", hash = "sha256:1ed8db354e344c5bb6039cd727f096af975194b508e37177719d562b2b540ee6", size = 18983, upload-time = "2026-02-12T22:21:52.237Z" },
-]
-
[[package]]
name = "pluggy"
version = "1.6.0"
@@ -3371,23 +3439,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/40/44/4a5f08c96eb108af5cb50b41f76142f0afa346dfa99d5296fe7202a11854/tabulate-0.9.0-py3-none-any.whl", hash = "sha256:024ca478df22e9340661486f85298cff5f6dcdba14f3813e8830015b9ed1948f", size = 35252, upload-time = "2022-10-06T17:21:44.262Z" },
]
-[[package]]
-name = "textual"
-version = "7.5.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "markdown-it-py", extra = ["linkify"] },
- { name = "mdit-py-plugins" },
- { name = "platformdirs" },
- { name = "pygments" },
- { name = "rich" },
- { name = "typing-extensions" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/9f/38/7d169a765993efde5095c70a668bf4f5831bb7ac099e932f2783e9b71abf/textual-7.5.0.tar.gz", hash = "sha256:c730cba1e3d704e8f1ca915b6a3af01451e3bca380114baacf6abf87e9dac8b6", size = 1592319, upload-time = "2026-01-30T13:46:39.881Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/9c/78/96ddb99933e11d91bc6e05edae23d2687e44213066bcbaca338898c73c47/textual-7.5.0-py3-none-any.whl", hash = "sha256:849dfee9d705eab3b2d07b33152b7bd74fb1f5056e002873cc448bce500c6374", size = 718164, upload-time = "2026-01-30T13:46:37.635Z" },
-]
-
[[package]]
name = "tiktoken"
version = "0.12.0"
@@ -3477,20 +3528,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/bd/75/8539d011f6be8e29f339c42e633aae3cb73bffa95dd0f9adec09b9c58e85/tomlkit-0.13.3-py3-none-any.whl", hash = "sha256:c89c649d79ee40629a9fda55f8ace8c6a1b42deb912b2a8fd8d942ddadb606b0", size = 38901, upload-time = "2025-06-05T07:13:43.546Z" },
]
-[[package]]
-name = "tos"
-version = "2.9.0"
-source = { registry = "https://pypi.org/simple" }
-dependencies = [
- { name = "crcmod" },
- { name = "deprecated" },
- { name = "pytz" },
- { name = "requests" },
- { name = "six" },
- { name = "wrapt" },
-]
-sdist = { url = "https://files.pythonhosted.org/packages/9a/b3/13451226f564f88d9db2323e9b7eabcced792a0ad5ee1e333751a7634257/tos-2.9.0.tar.gz", hash = "sha256:861cfc348e770f099f911cb96b2c41774ada6c9c51b7a89d97e0c426074dd99e", size = 157071, upload-time = "2026-01-06T04:13:08.921Z" }
-
[[package]]
name = "tqdm"
version = "4.67.3"
@@ -3572,15 +3609,6 @@ wheels = [
{ url = "https://files.pythonhosted.org/packages/c2/14/e2a54fabd4f08cd7af1c07030603c3356b74da07f7cc056e600436edfa17/tzlocal-5.3.1-py3-none-any.whl", hash = "sha256:eb1a66c3ef5847adf7a834f1be0800581b683b5608e74f86ecbcef8ab91bb85d", size = 18026, upload-time = "2025-03-05T21:17:39.857Z" },
]
-[[package]]
-name = "uc-micro-py"
-version = "1.0.3"
-source = { registry = "https://pypi.org/simple" }
-sdist = { url = "https://files.pythonhosted.org/packages/91/7a/146a99696aee0609e3712f2b44c6274566bc368dfe8375191278045186b8/uc-micro-py-1.0.3.tar.gz", hash = "sha256:d321b92cff673ec58027c04015fcaa8bb1e005478643ff4a500882eaab88c48a", size = 6043, upload-time = "2024-02-09T16:52:01.654Z" }
-wheels = [
- { url = "https://files.pythonhosted.org/packages/37/87/1f677586e8ac487e29672e4b17455758fce261de06a0d086167bb760361a/uc_micro_py-1.0.3-py3-none-any.whl", hash = "sha256:db1dffff340817673d7b466ec86114a9dc0e9d4d9b5ba229d9d60e5c12600cd5", size = 6229, upload-time = "2024-02-09T16:52:00.371Z" },
-]
-
[[package]]
name = "urllib3"
version = "2.6.3"
@@ -3608,39 +3636,26 @@ name = "vikingbot"
version = "0.1.1"
source = { editable = "." }
dependencies = [
- { name = "agent-sandbox" },
{ name = "beautifulsoup4" },
{ name = "croniter" },
{ name = "ddgs" },
- { name = "dingtalk-stream" },
{ name = "fastapi" },
- { name = "fusepy" },
{ name = "gradio" },
{ name = "html2text" },
{ name = "httpx", extra = ["socks"] },
- { name = "lark-oapi" },
{ name = "litellm" },
{ name = "loguru" },
{ name = "msgpack" },
- { name = "opencode-ai" },
- { name = "opensandbox" },
- { name = "opensandbox-server" },
{ name = "openviking" },
{ name = "prompt-toolkit" },
{ name = "pydantic" },
{ name = "pydantic-settings" },
{ name = "pygments" },
- { name = "pytest" },
{ name = "python-socketio" },
{ name = "python-socks" },
- { name = "python-telegram-bot", extra = ["socks"] },
- { name = "qq-botpy" },
{ name = "readability-lxml" },
{ name = "rich" },
- { name = "slack-sdk" },
{ name = "socksio" },
- { name = "textual" },
- { name = "tos" },
{ name = "typer" },
{ name = "uvicorn" },
{ name = "websocket-client" },
@@ -3653,51 +3668,102 @@ dev = [
{ name = "pytest-asyncio" },
{ name = "ruff" },
]
+dingtalk = [
+ { name = "dingtalk-stream" },
+]
+feishu = [
+ { name = "lark-oapi" },
+]
+full = [
+ { name = "agent-sandbox" },
+ { name = "dingtalk-stream" },
+ { name = "fusepy" },
+ { name = "langfuse" },
+ { name = "lark-oapi" },
+ { name = "opencode-ai" },
+ { name = "opensandbox" },
+ { name = "opensandbox-server" },
+ { name = "python-telegram-bot", extra = ["socks"] },
+ { name = "qq-botpy" },
+ { name = "slack-sdk" },
+]
+fuse = [
+ { name = "fusepy" },
+]
+langfuse = [
+ { name = "langfuse" },
+]
+opencode = [
+ { name = "opencode-ai" },
+]
+qq = [
+ { name = "qq-botpy" },
+]
+sandbox = [
+ { name = "agent-sandbox" },
+ { name = "opensandbox" },
+ { name = "opensandbox-server" },
+]
+slack = [
+ { name = "slack-sdk" },
+]
+telegram = [
+ { name = "python-telegram-bot", extra = ["socks"] },
+]
[package.metadata]
requires-dist = [
- { name = "agent-sandbox", specifier = ">=0.0.23" },
+ { name = "agent-sandbox", marker = "extra == 'full'", specifier = ">=0.0.23" },
+ { name = "agent-sandbox", marker = "extra == 'sandbox'", specifier = ">=0.0.23" },
{ name = "beautifulsoup4", specifier = ">=4.12.0" },
{ name = "croniter", specifier = ">=2.0.0" },
{ name = "ddgs", specifier = ">=9.0.0" },
- { name = "dingtalk-stream", specifier = ">=0.4.0" },
+ { name = "dingtalk-stream", marker = "extra == 'dingtalk'", specifier = ">=0.4.0" },
+ { name = "dingtalk-stream", marker = "extra == 'full'", specifier = ">=0.4.0" },
{ name = "fastapi", specifier = ">=0.100.0" },
- { name = "fusepy", specifier = ">=3.0.1" },
+ { name = "fusepy", marker = "extra == 'full'", specifier = ">=3.0.1" },
+ { name = "fusepy", marker = "extra == 'fuse'", specifier = ">=3.0.1" },
{ name = "gradio", specifier = ">=6.6.0" },
{ name = "html2text", specifier = ">=2020.1.16" },
{ name = "httpx", extras = ["socks"], specifier = ">=0.25.0" },
- { name = "lark-oapi", specifier = ">=1.0.0" },
+ { name = "langfuse", marker = "extra == 'full'", specifier = ">=3.0.0" },
+ { name = "langfuse", marker = "extra == 'langfuse'", specifier = ">=3.0.0" },
+ { name = "lark-oapi", marker = "extra == 'feishu'", specifier = ">=1.0.0" },
+ { name = "lark-oapi", marker = "extra == 'full'", specifier = ">=1.0.0" },
{ name = "litellm", specifier = ">=1.0.0" },
{ name = "loguru", specifier = ">=0.7.0" },
{ name = "msgpack", specifier = ">=1.0.8" },
- { name = "opencode-ai", specifier = ">=0.1.0a0" },
- { name = "opensandbox", specifier = ">=0.1.0" },
- { name = "opensandbox-server", specifier = ">=0.1.0" },
+ { name = "opencode-ai", marker = "extra == 'full'", specifier = ">=0.1.0a0" },
+ { name = "opencode-ai", marker = "extra == 'opencode'", specifier = ">=0.1.0a0" },
+ { name = "opensandbox", marker = "extra == 'full'", specifier = ">=0.1.0" },
+ { name = "opensandbox", marker = "extra == 'sandbox'", specifier = ">=0.1.0" },
+ { name = "opensandbox-server", marker = "extra == 'full'", specifier = ">=0.1.0" },
+ { name = "opensandbox-server", marker = "extra == 'sandbox'", specifier = ">=0.1.0" },
{ name = "openviking", specifier = ">=0.1.18" },
{ name = "prompt-toolkit", specifier = ">=3.0.0" },
{ name = "pydantic", specifier = ">=2.0.0" },
{ name = "pydantic-settings", specifier = ">=2.0.0" },
{ name = "pygments", specifier = ">=2.16.0" },
- { name = "pytest", specifier = ">=9.0.2" },
{ name = "pytest", marker = "extra == 'dev'", specifier = ">=7.0.0" },
{ name = "pytest-asyncio", marker = "extra == 'dev'", specifier = ">=0.21.0" },
{ name = "python-socketio", specifier = ">=5.11.0" },
{ name = "python-socks", extras = ["asyncio"], specifier = ">=2.4.0" },
- { name = "python-telegram-bot", extras = ["socks"], specifier = ">=21.0" },
- { name = "qq-botpy", specifier = ">=1.0.0" },
+ { name = "python-telegram-bot", extras = ["socks"], marker = "extra == 'full'", specifier = ">=21.0" },
+ { name = "python-telegram-bot", extras = ["socks"], marker = "extra == 'telegram'", specifier = ">=21.0" },
+ { name = "qq-botpy", marker = "extra == 'full'", specifier = ">=1.0.0" },
+ { name = "qq-botpy", marker = "extra == 'qq'", specifier = ">=1.0.0" },
{ name = "readability-lxml", specifier = ">=0.8.0" },
{ name = "rich", specifier = ">=13.0.0" },
{ name = "ruff", marker = "extra == 'dev'", specifier = ">=0.1.0" },
- { name = "slack-sdk", specifier = ">=3.26.0" },
+ { name = "slack-sdk", marker = "extra == 'full'", specifier = ">=3.26.0" },
+ { name = "slack-sdk", marker = "extra == 'slack'", specifier = ">=3.26.0" },
{ name = "socksio", specifier = ">=1.0.0" },
- { name = "textual", specifier = ">=0.50.0" },
- { name = "tos", specifier = ">=2.9.0" },
{ name = "typer", specifier = ">=0.9.0" },
{ name = "uvicorn", specifier = ">=0.20.0" },
{ name = "websocket-client", specifier = ">=1.6.0" },
{ name = "websockets", specifier = ">=12.0" },
]
-provides-extras = ["dev"]
+provides-extras = ["dev", "langfuse", "telegram", "feishu", "dingtalk", "slack", "qq", "sandbox", "fuse", "opencode", "full"]
[[package]]
name = "volcengine"
diff --git a/bot/vikingbot/__init__.py b/bot/vikingbot/__init__.py
index 5da12814..58982321 100644
--- a/bot/vikingbot/__init__.py
+++ b/bot/vikingbot/__init__.py
@@ -2,5 +2,5 @@
vikingbot - A lightweight AI agent framework
"""
-__version__ = "0.1.0"
+__version__ = "0.1.2"
__logo__ = "🐈"
diff --git a/bot/vikingbot/agent/context.py b/bot/vikingbot/agent/context.py
index 40c36e50..25570043 100644
--- a/bot/vikingbot/agent/context.py
+++ b/bot/vikingbot/agent/context.py
@@ -29,12 +29,18 @@ def __init__(
self,
workspace: Path,
sandbox_manager: SandboxManager | None = None,
+ sender_id: str = None,
+ is_group_chat: bool = False,
+ eval: bool = False,
):
self.workspace = workspace
self._templates_ensured = False
self.sandbox_manager = sandbox_manager
self._memory = None
self._skills = None
+ self._sender_id = sender_id
+ self._is_group_chat = is_group_chat
+ self._eval = eval
@property
def memory(self):
@@ -72,7 +78,7 @@ async def build_system_prompt(
"""
# Ensure workspace templates exist only when first needed
self._ensure_templates_once()
- sandbox_key = self.sandbox_manager.to_sandbox_key(session_key)
+ workspace_id = self.sandbox_manager.to_workspace_id(session_key)
parts = []
@@ -82,22 +88,29 @@ async def build_system_prompt(
# Sandbox environment info
if self.sandbox_manager:
sandbox_cwd = await self.sandbox_manager.get_sandbox_cwd(session_key)
-
parts.append(
f"## Sandbox Environment\n\nYou are running in a sandboxed environment. All file operations and command execution are restricted to the sandbox directory.\nThe sandbox root directory is `{sandbox_cwd}` (use relative paths for all operations)."
)
+ # Add group chat context if applicable
+ if self._is_group_chat:
+ parts.append(
+ f"\n\n## Group Chat Context\nThis is a group chat session. Multiple users can participate in this conversation. Each user message is prefixed with the user ID in brackets like @. "
+ f"You should pay attention to who is speaking to understand the context. Current user ID: {self._sender_id}")
+
# Viking user profile
- profile = await self.memory.get_viking_user_profile(sandbox_key=sandbox_key)
+ profile = await self.memory.get_viking_user_profile(
+ workspace_id=workspace_id, user_id=self._sender_id
+ )
if profile:
- parts.append(profile)
+ parts.append(f"## Current user's information\n{profile}")
- # Viking memory
+ # Viking agent memory
viking_memory = await self.memory.get_viking_memory_context(
- current_message=current_message, sandbox_key=sandbox_key
+ current_message=current_message, workspace_id=workspace_id
)
if viking_memory:
- parts.append(viking_memory)
+ parts.append(f"## Your memories. Using tools to read more details.\n{viking_memory}")
# Bootstrap files
bootstrap = self._load_bootstrap_files()
@@ -198,7 +211,6 @@ async def build_messages(
self,
history: list[dict[str, Any]],
current_message: str,
- skill_names: list[str] | None = None,
media: list[str] | None = None,
session_key: SessionKey | None = None,
) -> list[dict[str, Any]]:
@@ -208,10 +220,8 @@ async def build_messages(
Args:
history: Previous conversation messages.
current_message: The new user message.
- skill_names: Optional skills to include.
media: Optional list of local file paths for images/media.
- channel: Current channel (telegram, feishu, etc.).
- chat_id: Current chat/user ID.
+ session_key: Optional session key.
Returns:
List of messages including system prompt.
@@ -220,13 +230,14 @@ async def build_messages(
# System prompt
system_prompt = await self.build_system_prompt(session_key, current_message, history)
- if session_key.channel_id and session_key.chat_id:
+ if session_key and session_key.channel_id and session_key.chat_id:
system_prompt += f"\n\n## Current Session\nChannel: {session_key.type}:{session_key.channel_id}\nChat ID: {session_key.chat_id}"
messages.append({"role": "system", "content": system_prompt})
# logger.debug(f"system_prompt: {system_prompt}")
# History
- messages.extend(history)
+ if not self._eval:
+ messages.extend(history)
# Current message (with optional image attachments)
user_content = self._build_user_content(current_message, media)
diff --git a/bot/vikingbot/agent/loop.py b/bot/vikingbot/agent/loop.py
index 11962ccd..349dca90 100644
--- a/bot/vikingbot/agent/loop.py
+++ b/bot/vikingbot/agent/loop.py
@@ -3,9 +3,6 @@
import asyncio
import json
import time
-from dataclasses import dataclass, field
-from datetime import datetime
-from enum import Enum
from pathlib import Path
from loguru import logger
@@ -15,8 +12,9 @@
from vikingbot.agent.subagent import SubagentManager
from vikingbot.agent.tools import register_default_tools
from vikingbot.agent.tools.registry import ToolRegistry
-from vikingbot.bus.events import InboundMessage, OutboundMessage
+from vikingbot.bus.events import InboundMessage, OutboundMessage, OutboundEventType
from vikingbot.bus.queue import MessageBus
+from vikingbot.config import load_config
from vikingbot.config.schema import Config
from vikingbot.config.schema import SessionKey
from vikingbot.hooks import HookContext
@@ -25,25 +23,7 @@
from vikingbot.sandbox import SandboxManager
from vikingbot.session.manager import SessionManager
from vikingbot.utils.helpers import cal_str_tokens
-
-
-class ThinkingStepType(Enum):
- """思考步骤类型(简化版本,避免循环依赖)"""
-
- REASONING = "reasoning"
- TOOL_CALL = "tool_call"
- TOOL_RESULT = "tool_result"
- ITERATION = "iteration"
-
-
-@dataclass
-class ThinkingStep:
- """单个思考步骤(简化版本,避免循环依赖)"""
-
- step_type: ThinkingStepType
- content: str
- timestamp: datetime = field(default_factory=datetime.now)
- metadata: dict = field(default_factory=dict)
+from vikingbot.utils.tracing import trace
class AgentLoop:
@@ -73,9 +53,41 @@ def __init__(
cron_service: "CronService | None" = None,
session_manager: SessionManager | None = None,
sandbox_manager: SandboxManager | None = None,
- thinking_callback=None,
config: Config = None,
+ eval: bool = False,
):
+ """
+ Initialize the AgentLoop with all required dependencies and configuration.
+
+ Args:
+ bus: MessageBus instance for publishing and subscribing to messages.
+ provider: LLMProvider instance for making LLM calls.
+ workspace: Path to the workspace directory for file operations.
+ model: Optional model identifier. If not provided, uses the provider's default.
+ max_iterations: Maximum number of tool execution iterations per message (default: 50).
+ memory_window: Maximum number of messages to keep in session memory (default: 50).
+ brave_api_key: Optional API key for Brave search integration.
+ exa_api_key: Optional API key for Exa search integration.
+ gen_image_model: Optional model identifier for image generation (default: openai/doubao-seedream-4-5-251128).
+ exec_config: Optional configuration for the exec tool (command execution).
+ cron_service: Optional CronService for scheduled task management.
+ session_manager: Optional SessionManager for session persistence. If not provided, a new one is created.
+ sandbox_manager: Optional SandboxManager for sandboxed operations.
+ config: Optional Config object with full configuration. Used if other parameters are not provided.
+
+ Note:
+ The AgentLoop creates its own ContextBuilder, SessionManager (if not provided),
+ ToolRegistry, and SubagentManager during initialization.
+
+ Example:
+ >>> loop = AgentLoop(
+ ... bus=message_bus,
+ ... provider=llm_provider,
+ ... workspace=Path("/path/to/workspace"),
+ ... model="gpt-4",
+ ... max_iterations=30,
+ ... )
+ """
from vikingbot.config.schema import ExecToolConfig
self.bus = bus
@@ -96,9 +108,10 @@ def __init__(
self._register_builtin_hooks()
self.sessions = session_manager or SessionManager(
- workspace, sandbox_manager=sandbox_manager
+ self.config.bot_data_path, sandbox_manager=sandbox_manager
)
self.tools = ToolRegistry()
+ self._eval = eval
self.subagents = SubagentManager(
provider=provider,
workspace=workspace,
@@ -109,8 +122,46 @@ def __init__(
)
self._running = False
- self.thinking_callback = thinking_callback
self._register_default_tools()
+ self._token_usage = {
+ "prompt_tokens": 0,
+ "completion_tokens": 0,
+ "total_tokens": 0,
+ }
+
+ async def _publish_thinking_event(
+ self, session_key: SessionKey, event_type: OutboundEventType, content: str
+ ) -> None:
+ """
+ Publish a thinking event to the message bus.
+
+ Thinking events are used to communicate the agent's internal processing
+ state to the user, such as when the agent is executing a tool or
+ processing a complex request.
+
+ Args:
+ session_key: The session key identifying the conversation.
+ event_type: The type of thinking event (e.g., THINKING, TOOL_START).
+ content: The message content to display to the user.
+
+ Note:
+ This is an internal method used by the agent loop to communicate
+ progress to users during long-running operations.
+
+ Example:
+ >>> await self._publish_thinking_event(
+ ... session_key=SessionKey(channel="telegram", chat_id="123"),
+ ... event_type=OutboundEventType.TOOL_START,
+ ... content="Executing web search..."
+ ... )
+ """
+ await self.bus.publish_outbound(
+ OutboundMessage(
+ session_key=session_key,
+ content=content,
+ event_type=event_type,
+ )
+ )
def _register_builtin_hooks(self):
"""Register built-in hooks."""
@@ -158,68 +209,24 @@ def stop(self) -> None:
self._running = False
logger.info("Agent loop stopping")
- async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
+ async def _run_agent_loop(
+ self,
+ messages: list[dict],
+ session_key: SessionKey,
+ publish_events: bool = True,
+ sender_id: str | None = None,
+ ) -> tuple[str | None, list[dict]]:
"""
- Process a single inbound message.
+ Run the core agent loop: call LLM, execute tools, repeat until done.
Args:
- msg: The inbound message to process.
- session_key: Override session key (used by process_direct).
+ messages: Initial message list
+ session_key: Session key for tool execution context
+ publish_events: Whether to publish ITERATION/REASONING/TOOL_CALL events to the bus
Returns:
- The response message, or None if no response needed.
+ tuple of (final_content, tools_used)
"""
- # Handle system messages (subagent announces)
- # The chat_id contains the original "channel:chat_id" to route back to
- if msg.session_key.type == "system":
- return await self._process_system_message(msg)
-
- preview = msg.content[:80] + "..." if len(msg.content) > 80 else msg.content
- logger.info(f"Processing message from {msg.session_key}:{msg.sender_id}: {preview}")
-
- # Get or create session
- session_key = msg.session_key
- # For CLI/direct sessions, skip heartbeat by default
- skip_heartbeat = session_key.type in ("cli", "tui")
- session = self.sessions.get_or_create(session_key, skip_heartbeat=skip_heartbeat)
-
- # Handle slash commands
- cmd = msg.content.strip().lower()
- if cmd == "/new":
- await self._consolidate_memory(session, archive_all=True)
- session.clear()
- self.sessions.save(session)
- return OutboundMessage(
- session_key=msg.session_key, content="🐈 New session started. Memory consolidated."
- )
- if cmd == "/help":
- return OutboundMessage(
- session_key=msg.session_key,
- content="🐈 vikingbot commands:\n/new — Start a new conversation\n/help — Show available commands",
- )
-
- # Consolidate memory before processing if session is too large
- if len(session.messages) > self.memory_window:
- await self._consolidate_memory(session)
-
- if self.sandbox_manager:
- message_workspace = self.sandbox_manager.get_workspace_path(session_key)
- else:
- message_workspace = self.workspace
-
- from vikingbot.agent.context import ContextBuilder
-
- message_context = ContextBuilder(message_workspace, sandbox_manager=self.sandbox_manager)
-
- # Build initial messages (use get_history for LLM-formatted messages)
- messages = await message_context.build_messages(
- history=session.get_history(),
- current_message=msg.content,
- media=msg.media if msg.media else None,
- session_key=msg.session_key,
- )
-
- # Agent loop
iteration = 0
final_content = None
tools_used: list[dict] = []
@@ -227,32 +234,36 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
while iteration < self.max_iterations:
iteration += 1
- # 回调:迭代开始
- if self.thinking_callback:
- self.thinking_callback(
- ThinkingStep(
- step_type=ThinkingStepType.ITERATION,
+ if publish_events:
+ await self.bus.publish_outbound(
+ OutboundMessage(
+ session_key=session_key,
content=f"Iteration {iteration}/{self.max_iterations}",
- metadata={"iteration": iteration},
+ event_type=OutboundEventType.ITERATION,
)
)
- # Call LLM
response = await self.provider.chat(
- messages=messages, tools=self.tools.get_definitions(), model=self.model
+ messages=messages,
+ tools=self.tools.get_definitions(),
+ model=self.model,
+ session_id=session_key.safe_name(),
)
-
- # 回调:推理内容
- if response.reasoning_content and self.thinking_callback:
- self.thinking_callback(
- ThinkingStep(
- step_type=ThinkingStepType.REASONING,
+ if response.usage:
+ cur_token = response.usage
+ self._token_usage["prompt_tokens"] += cur_token["prompt_tokens"]
+ self._token_usage["completion_tokens"] += cur_token["completion_tokens"]
+ self._token_usage["total_tokens"] += cur_token["total_tokens"]
+
+ if publish_events and response.reasoning_content:
+ await self.bus.publish_outbound(
+ OutboundMessage(
+ session_key=session_key,
content=response.reasoning_content,
- metadata={},
+ event_type=OutboundEventType.REASONING,
)
)
- # Handle tool calls
if response.has_tool_calls:
args_list = [tc.arguments for tc in response.tool_calls]
tool_call_dicts = [
@@ -261,7 +272,7 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
"type": "function",
"function": {
"name": tc.name,
- "arguments": json.dumps(args), # Use truncated args
+ "arguments": json.dumps(args),
},
}
for tc, args in zip(response.tool_calls, args_list)
@@ -273,20 +284,17 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
reasoning_content=response.reasoning_content,
)
- # Execute tools
for tool_call in response.tool_calls:
args_str = json.dumps(tool_call.arguments, ensure_ascii=False)
- # 回调:工具调用
- if self.thinking_callback:
- self.thinking_callback(
- ThinkingStep(
- step_type=ThinkingStepType.TOOL_CALL,
+ if publish_events:
+ await self.bus.publish_outbound(
+ OutboundMessage(
+ session_key=session_key,
content=f"{tool_call.name}({args_str})",
- metadata={"tool": tool_call.name, "args": tool_call.arguments},
+ event_type=OutboundEventType.TOOL_CALL,
)
)
-
logger.info(f"[TOOL_CALL]: {tool_call.name}({args_str[:200]})")
tool_execute_start_time = time.time()
result = await self.tools.execute(
@@ -294,23 +302,19 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
tool_call.arguments,
session_key=session_key,
sandbox_manager=self.sandbox_manager,
+ sender_id=sender_id,
)
tool_execute_duration = (time.time() - tool_execute_start_time) * 1000
logger.info(f"[RESULT]: {str(result)[:600]}")
- # 回调:工具结果
- if self.thinking_callback:
- result_str = str(result)
- if len(result_str) > 500:
- result_str = result_str[:500] + "..."
- self.thinking_callback(
- ThinkingStep(
- step_type=ThinkingStepType.TOOL_RESULT,
- content=result_str,
- metadata={"tool": tool_call.name},
+ if publish_events:
+ await self.bus.publish_outbound(
+ OutboundMessage(
+ session_key=session_key,
+ content=str(result),
+ event_type=OutboundEventType.TOOL_RESULT,
)
)
-
messages = self.context.add_tool_result(
messages, tool_call.id, tool_call.name, result
)
@@ -327,12 +331,11 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
"output_token": cal_str_tokens(result, text_type="mixed"),
}
tools_used.append(tool_used_dict)
- # Interleaved CoT: reflect before next action
+
messages.append(
- {"role": "user", "content": "Reflect on the results and decide next steps."}
+ {"role": "system", "content": "Reflect on the results and decide next steps."}
)
else:
- # No tool calls, we're done
final_content = response.content
break
@@ -342,21 +345,103 @@ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
else:
final_content = "I've completed processing but have no response to give."
+ return final_content, tools_used
+
+ @trace(
+ name="process_message",
+ extract_session_id=lambda msg: msg.session_key.safe_name(),
+ extract_user_id=lambda msg: msg.sender_id,
+ )
+ async def _process_message(self, msg: InboundMessage) -> OutboundMessage | None:
+ """
+ Process a single inbound message.
+
+ Args:
+ msg: The inbound message to process.
+ session_key: Override session key (used by process_direct).
+
+ Returns:
+ The response message, or None if no response needed.
+ """
+ # Handle system messages (subagent announces)
+ # The chat_id contains the original "channel:chat_id" to route back to
+ if msg.session_key.type == "system":
+ return await self._process_system_message(msg)
+
+ preview = msg.content[:80] + "..." if len(msg.content) > 80 else msg.content
+ logger.info(f"Processing message from {msg.session_key}:{msg.sender_id}: {preview}")
+
+ # Get or create session
+ session_key = msg.session_key
+ # For CLI/direct sessions, skip heartbeat by default
+ skip_heartbeat = session_key.type == "cli"
+ session = self.sessions.get_or_create(session_key, skip_heartbeat=skip_heartbeat)
+
+ # Handle slash commands
+ is_group_chat = msg.metadata.get("chat_type") == "group" if msg.metadata else False
+ if is_group_chat:
+ cmd = msg.content.replace(f"@{msg.sender_id}", "").strip().lower()
+ else:
+ cmd = msg.content.strip().lower()
+ if cmd == "/new":
+ await self._consolidate_memory(session, archive_all=True)
+ session.clear()
+ await self.sessions.save(session)
+ return OutboundMessage(
+ session_key=msg.session_key, content="🐈 New session started. Memory consolidated."
+ )
+ if cmd == "/help":
+ return OutboundMessage(
+ session_key=msg.session_key,
+ content="🐈 vikingbot commands:\n/new — Start a new conversation\n/help — Show available commands",
+ )
+
+ # Consolidate memory before processing if session is too large
+ if len(session.messages) > self.memory_window:
+ await self._consolidate_memory(session)
+
+ if self.sandbox_manager:
+ message_workspace = self.sandbox_manager.get_workspace_path(session_key)
+ else:
+ message_workspace = self.workspace
+
+ from vikingbot.agent.context import ContextBuilder
+ message_context = ContextBuilder(
+ message_workspace, sandbox_manager=self.sandbox_manager, sender_id=msg.sender_id, is_group_chat=is_group_chat, eval=self._eval
+ )
+
+ # Build initial messages (use get_history for LLM-formatted messages)
+ messages = await message_context.build_messages(
+ history=session.get_history(),
+ current_message=msg.content,
+ media=msg.media if msg.media else None,
+ session_key=msg.session_key,
+ )
+
+ # Run agent loop
+ final_content, tools_used = await self._run_agent_loop(
+ messages=messages,
+ session_key=session_key,
+ publish_events=True,
+ sender_id=msg.sender_id,
+ )
+
# Log response preview
- preview = final_content[:120] + "..." if len(final_content) > 120 else final_content
+ preview = final_content[:300] + "..." if len(final_content) > 300 else final_content
logger.info(f"Response to {msg.session_key}: {preview}")
# Save to session (include tool names so consolidation sees what happened)
- session.add_message("user", msg.content)
+ session.add_message("user", msg.content, sender_id=msg.sender_id)
session.add_message(
"assistant", final_content, tools_used=tools_used if tools_used else None
)
- self.sessions.save(session)
+ await self.sessions.save(session)
return OutboundMessage(
session_key=msg.session_key,
content=final_content,
- metadata=msg.metadata
+ metadata=msg.metadata,
+ token_usage=self._token_usage
or {}, # Pass through for channel-specific needs (e.g. Slack thread_ts)
)
@@ -376,60 +461,22 @@ async def _process_system_message(self, msg: InboundMessage) -> OutboundMessage
history=session.get_history(), current_message=msg.content, session_key=msg.session_key
)
- # Agent loop (limited for announce handling)
- iteration = 0
- final_content = None
-
- while iteration < self.max_iterations:
- iteration += 1
-
- response = await self.provider.chat(
- messages=messages, tools=self.tools.get_definitions(), model=self.model
- )
-
- if response.has_tool_calls:
- tool_call_dicts = [
- {
- "id": tc.id,
- "type": "function",
- "function": {"name": tc.name, "arguments": json.dumps(tc.arguments)},
- }
- for tc in response.tool_calls
- ]
- messages = self.context.add_assistant_message(
- messages,
- response.content,
- tool_call_dicts,
- reasoning_content=response.reasoning_content,
- )
-
- for tool_call in response.tool_calls:
- args_str = json.dumps(tool_call.arguments, ensure_ascii=False)
- logger.info(f"Tool call: {tool_call.name}({args_str[:200]})")
- result = await self.tools.execute(
- tool_call.name,
- tool_call.arguments,
- session_key=msg.session_key,
- sandbox_manager=self.sandbox_manager,
- )
- messages = self.context.add_tool_result(
- messages, tool_call.id, tool_call.name, result
- )
- # Interleaved CoT: reflect before next action
- messages.append(
- {"role": "user", "content": "Reflect on the results and decide next steps."}
- )
- else:
- final_content = response.content
- break
+ # Run agent loop (no events published)
+ final_content, tools_used = await self._run_agent_loop(
+ messages=messages,
+ session_key=msg.session_key,
+ publish_events=False,
+ )
if final_content is None:
final_content = "Background task completed."
# Save to session (mark as system message in history)
session.add_message("user", f"[System: {msg.sender_id}] {msg.content}")
- session.add_message("assistant", final_content)
- self.sessions.save(session)
+ session.add_message(
+ "assistant", final_content, tools_used=tools_used if tools_used else None
+ )
+ await self.sessions.save(session)
return OutboundMessage(session_key=msg.session_key, content=final_content)
@@ -443,7 +490,8 @@ async def _consolidate_memory(self, session, archive_all: bool = False) -> None:
context=HookContext(
event_type="message.compact",
session_id=session.key.safe_name(),
- sandbox_key=self.sandbox_manager.to_sandbox_key(session.key),
+ workspace_id=self.sandbox_manager.to_workspace_id(session.key),
+ session_key=session.key,
),
session=session,
)
@@ -509,6 +557,7 @@ async def _consolidate_memory(self, session, archive_all: bool = False) -> None:
{"role": "user", "content": prompt},
],
model=self.model,
+ session_id=session.key.safe_name(),
)
text = (response.content or "").strip()
if text.startswith("```"):
@@ -518,11 +567,11 @@ async def _consolidate_memory(self, session, archive_all: bool = False) -> None:
if entry := result.get("history_entry"):
memory.append_history(entry)
if update := result.get("memory_update"):
- if update != current_memory:
+ if load_config().use_local_memory and update != current_memory:
memory.write_long_term(update)
session.messages = session.messages[-keep_count:] if keep_count else []
- self.sessions.save(session)
+ await self.sessions.save(session)
logger.info(
f"Memory consolidation done, session trimmed to {len(session.messages)} messages"
)
diff --git a/bot/vikingbot/agent/memory.py b/bot/vikingbot/agent/memory.py
index a8be18f9..1a7b43a0 100644
--- a/bot/vikingbot/agent/memory.py
+++ b/bot/vikingbot/agent/memory.py
@@ -15,9 +15,6 @@ def __init__(self, workspace: Path):
self.memory_dir = ensure_dir(workspace / "memory")
self.memory_file = self.memory_dir / "MEMORY.md"
self.history_file = self.memory_dir / "HISTORY.md"
- config = load_config()
- ov_config = config.openviking
- self.user_id = ov_config.user_id if ov_config.mode == "remote" else "default"
def read_long_term(self) -> str:
if self.memory_file.exists():
@@ -48,24 +45,22 @@ def get_memory_context(self) -> str:
long_term = self.read_long_term()
return f"## Long-term Memory\n{long_term}" if long_term else ""
- async def get_viking_memory_context(self, current_message: str, sandbox_key: str) -> str:
- client = await VikingClient.create(agent_id=sandbox_key)
- result = await client.search_memory(current_message, limit=5)
+ async def get_viking_memory_context(self, current_message: str, workspace_id: str) -> str:
+ client = await VikingClient.create(agent_id=workspace_id)
+ admin_user_id = load_config().ov_server.admin_user_id
+ result = await client.search_memory(current_message, user_id=admin_user_id, limit=5)
if not result:
return ""
user_memory = self._parse_viking_memory(result["user_memory"])
agent_memory = self._parse_viking_memory(result["agent_memory"])
return (
- f"## Related memories.Using tools to read more details.\n"
f"### user memories:\n{user_memory}\n"
f"### agent memories:\n{agent_memory}"
)
- async def get_viking_user_profile(self, sandbox_key: str) -> str:
- client = await VikingClient.create(agent_id=sandbox_key)
- result = await client.read_content(
- uri=f"viking://user/{self.user_id}/memories/profile.md", level="read"
- )
+ async def get_viking_user_profile(self, workspace_id: str, user_id: str) -> str:
+ client = await VikingClient.create(agent_id=workspace_id)
+ result = await client.read_user_profile(user_id)
if not result:
return ""
- return f"## User Information\n{result}"
+ return result
diff --git a/bot/vikingbot/agent/skills.py b/bot/vikingbot/agent/skills.py
index ab07ff4f..b524ddbf 100644
--- a/bot/vikingbot/agent/skills.py
+++ b/bot/vikingbot/agent/skills.py
@@ -2,6 +2,7 @@
import json
import os
+from loguru import logger
import re
import shutil
from pathlib import Path
diff --git a/bot/vikingbot/agent/subagent.py b/bot/vikingbot/agent/subagent.py
index 4dfdd1cd..b397d3a1 100644
--- a/bot/vikingbot/agent/subagent.py
+++ b/bot/vikingbot/agent/subagent.py
@@ -4,17 +4,15 @@
import json
import uuid
from pathlib import Path
-from typing import TYPE_CHECKING, Any
+from typing import Any
from loguru import logger
+from vikingbot.agent.tools.registry import ToolRegistry
from vikingbot.bus.events import InboundMessage
from vikingbot.bus.queue import MessageBus
from vikingbot.config.schema import SessionKey
from vikingbot.providers.base import LLMProvider
-from vikingbot.agent.tools.registry import ToolRegistry
-
-
from vikingbot.sandbox.manager import SandboxManager
diff --git a/bot/vikingbot/agent/tools/base.py b/bot/vikingbot/agent/tools/base.py
index de997fbe..eb0898c1 100644
--- a/bot/vikingbot/agent/tools/base.py
+++ b/bot/vikingbot/agent/tools/base.py
@@ -8,11 +8,33 @@
@dataclass
class ToolContext:
- """Context passed to tools during execution, containing runtime information."""
+ """Context passed to tools during execution, containing runtime information.
+
+ This class encapsulates all the runtime context that a tool might need during
+ execution, including session identification, sandbox access, and sender information.
+
+ Attributes:
+ session_key: Unique identifier for the current session, typically in the format
+ 'channel:chat_id'.
+ sandbox_manager: Optional manager for sandbox operations like file access and
+ command execution. If provided, tools can perform sandboxed operations.
+ workspace_id: Computed workspace identifier derived from the sandbox_manager
+ and session_key. This determines the sandbox directory for the session.
+ sender_id: Optional identifier for the message sender, used for tracking
+ and permission checks.
+
+ Example:
+ >>> context = ToolContext(
+ ... session_key=SessionKey(channel="telegram", chat_id="12345"),
+ ... sandbox_manager=sandbox_mgr,
+ ... sender_id="user_123"
+ ... )
+ """
session_key: SessionKey = None
sandbox_manager: SandboxManager | None = None
- sandbox_key: str = sandbox_manager.to_sandbox_key(session_key) if sandbox_manager else None
+ workspace_id: str = sandbox_manager.to_workspace_id(session_key) if sandbox_manager else None
+ sender_id: str | None = None
"""Base class for agent tools."""
@@ -25,8 +47,43 @@ class Tool(ABC):
"""
Abstract base class for agent tools.
- Tools are capabilities that the agent can use to interact with
- the environment, such as reading files, executing commands, etc.
+ Tools are capabilities that the agent can use to interact with the environment,
+ such as reading files, executing commands, searching the web, etc. Each tool
+ defines its own name, description, parameters schema, and execution logic.
+
+ To create a new tool, subclass Tool and implement the required abstract
+ properties and methods:
+ - name: The unique identifier for the tool
+ - description: Human-readable explanation of what the tool does
+ - parameters: JSON Schema defining the tool's input parameters
+ - execute(): The actual implementation of the tool's functionality
+
+ Attributes:
+ _TYPE_MAP: Internal mapping of JSON schema types to Python types for
+ parameter validation.
+
+ Example:
+ >>> class GreetingTool(Tool):
+ ... @property
+ ... def name(self) -> str:
+ ... return "greet"
+ ...
+ ... @property
+ ... def description(self) -> str:
+ ... return "Sends a greeting message"
+ ...
+ ... @property
+ ... def parameters(self) -> dict[str, Any]:
+ ... return {
+ ... "type": "object",
+ ... "properties": {
+ ... "name": {"type": "string", "description": "Name to greet"}
+ ... },
+ ... "required": ["name"]
+ ... }
+ ...
+ ... async def execute(self, context: ToolContext, name: str) -> str:
+ ... return f"Hello, {name}!"
"""
_TYPE_MAP = {
@@ -71,13 +128,57 @@ async def execute(self, tool_context: ToolContext, **kwargs: Any) -> str:
pass
def validate_params(self, params: dict[str, Any]) -> list[str]:
- """Validate tool parameters against JSON schema. Returns error list (empty if valid)."""
+ """
+ Validate tool parameters against the tool's JSON schema.
+
+ This method validates that the provided parameters match the tool's
+ defined schema, including type checking, required field validation,
+ enum validation, and range constraints.
+
+ Args:
+ params: Dictionary of parameter names to values to validate.
+
+ Returns:
+ List of error messages. An empty list indicates the parameters
+ are valid.
+
+ Raises:
+ ValueError: If the tool's parameter schema is not an object type.
+
+ Example:
+ >>> tool = MyTool()
+ >>> errors = tool.validate_params({"name": "test", "count": 5})
+ >>> if errors:
+ ... print("Validation failed:", errors)
+ ... else:
+ ... print("Parameters are valid")
+ """
schema = self.parameters or {}
if schema.get("type", "object") != "object":
raise ValueError(f"Schema must be object type, got {schema.get('type')!r}")
return self._validate(params, {**schema, "type": "object"}, "")
def _validate(self, val: Any, schema: dict[str, Any], path: str) -> list[str]:
+ """
+ Recursively validate a value against a JSON schema.
+
+ This internal method performs recursive validation of values against
+ JSON schema definitions, supporting all common JSON schema features
+ including type checking, enums, ranges, string length, object properties,
+ and array items.
+
+ Args:
+ val: The value to validate.
+ schema: The JSON schema to validate against.
+ path: The current path in the data structure (for error messages).
+
+ Returns:
+ List of validation error messages. Empty list if validation passes.
+
+ Note:
+ This is an internal method used by validate_params(). It should
+ not be called directly from outside the class.
+ """
t, label = schema.get("type"), path or "parameter"
if t in self._TYPE_MAP and not isinstance(val, self._TYPE_MAP[t]):
return [f"{label} should be {t}"]
@@ -111,7 +212,30 @@ def _validate(self, val: Any, schema: dict[str, Any], path: str) -> list[str]:
return errors
def to_schema(self) -> dict[str, Any]:
- """Convert tool to OpenAI function schema format."""
+ """
+ Convert tool to OpenAI function schema format.
+
+ This method transforms the tool's definition into the format expected by
+ OpenAI's function calling API, which can be used with chat completions.
+
+ Returns:
+ Dictionary containing the function schema in OpenAI format with:
+ - type: Always "function"
+ - function: Object containing name, description, and parameters
+
+ Example:
+ >>> tool = MyTool()
+ >>> schema = tool.to_schema()
+ >>> print(schema)
+ {
+ 'type': 'function',
+ 'function': {
+ 'name': 'my_tool',
+ 'description': 'Does something useful',
+ 'parameters': {'type': 'object', 'properties': {...}}
+ }
+ }
+ """
return {
"type": "function",
"function": {
diff --git a/bot/vikingbot/agent/tools/factory.py b/bot/vikingbot/agent/tools/factory.py
index 96a73869..8716a8ae 100644
--- a/bot/vikingbot/agent/tools/factory.py
+++ b/bot/vikingbot/agent/tools/factory.py
@@ -19,6 +19,7 @@
from vikingbot.agent.tools.shell import ExecTool
from vikingbot.agent.tools.web import WebFetchTool
from vikingbot.agent.tools.websearch import WebSearchTool
+from vikingbot.config.loader import load_config
if TYPE_CHECKING:
from vikingbot.agent.tools.spawn import SpawnTool
@@ -56,12 +57,13 @@ def register_default_tools(
exec_config = config.tools.exec
brave_api_key = config.tools.web.search.api_key if config.tools.web.search else None
exa_api_key = None # TODO: Add to config if needed
- gen_image_model = config.agents.defaults.gen_image_model
# Get provider API key and base from config
- provider_config = config.get_provider()
- provider_api_key = provider_config.api_key if provider_config else None
- provider_api_base = provider_config.api_base if provider_config else None
+
+ agent_config = load_config().agents
+ provider_api_key = agent_config.api_key if agent_config else None
+ provider_api_base = agent_config.api_base if agent_config else None
+ gen_image_model = agent_config.gen_image_model
# File tools
registry.register(ReadFileTool())
registry.register(WriteFileTool())
diff --git a/bot/vikingbot/agent/tools/ov_file.py b/bot/vikingbot/agent/tools/ov_file.py
index 2e0a5876..d16ccfd9 100644
--- a/bot/vikingbot/agent/tools/ov_file.py
+++ b/bot/vikingbot/agent/tools/ov_file.py
@@ -16,7 +16,7 @@ def __init__(self):
async def _get_client(self, tool_context: ToolContext):
if self._client is None:
- self._client = await VikingClient.create(tool_context.sandbox_key)
+ self._client = await VikingClient.create(tool_context.workspace_id)
return self._client
@@ -375,10 +375,10 @@ def parameters(self) -> dict[str, Any]:
"required": ["query"],
}
- async def execute(self, tool_context: "ToolContext", query: str, **kwargs: Any) -> str:
+ async def execute(self, tool_context: ToolContext, query: str, **kwargs: Any) -> str:
try:
client = await self._get_client(tool_context)
- results = await client.search_user_memory(query)
+ results = await client.search_user_memory(query, tool_context.sender_id)
if not results:
return f"No results found for query: {query}"
@@ -396,7 +396,7 @@ def name(self) -> str:
@property
def description(self) -> str:
- return "Commit messages to OpenViking session to persist conversation history."
+ return "When user has personal information needs to be remembered, Commit messages to OpenViking."
@property
def parameters(self) -> dict[str, Any]:
@@ -405,7 +405,7 @@ def parameters(self) -> dict[str, Any]:
"properties": {
"messages": {
"type": "array",
- "description": "List of messages to commit, each with role, content, and optional tools_used",
+ "description": "List of messages to commit, each with role, content",
"items": {
"type": "object",
"properties": {
@@ -426,9 +426,11 @@ async def execute(
**kwargs: Any,
) -> str:
try:
+ if not tool_context.sender_id:
+ return "Error committed, sender_id is required."
client = await self._get_client(tool_context)
session_id = tool_context.session_key.safe_name()
- await client.commit(session_id, messages)
+ await client.commit(session_id, messages, tool_context.sender_id)
return f"Successfully committed to session {session_id}"
except Exception as e:
logger.exception(f"Error processing message: {e}")
diff --git a/bot/vikingbot/agent/tools/registry.py b/bot/vikingbot/agent/tools/registry.py
index e86c47db..628e2bd4 100644
--- a/bot/vikingbot/agent/tools/registry.py
+++ b/bot/vikingbot/agent/tools/registry.py
@@ -1,25 +1,17 @@
"""Tool registry for dynamic tool management."""
-from loguru import logger
-
-from typing import Any, TYPE_CHECKING
-
-from vikingbot.agent.tools.base import Tool, ToolContext
-from vikingbot.config import loader
-from vikingbot.config.schema import SessionKey
-from vikingbot.hooks import HookContext
-from vikingbot.hooks.manager import hook_manager
-from vikingbot.sandbox.manager import SandboxManager
+import time
-"""Tool registry for dynamic tool management."""
from loguru import logger
from typing import Any
-from vikingbot.agent.tools.base import Tool
+from vikingbot.agent.tools.base import Tool, ToolContext
from vikingbot.config.schema import SessionKey
from vikingbot.hooks import HookContext
from vikingbot.hooks.manager import hook_manager
+from vikingbot.integrations.langfuse import LangfuseClient
+from vikingbot.sandbox.manager import SandboxManager
class ToolRegistry:
@@ -31,25 +23,99 @@ class ToolRegistry:
def __init__(self):
self._tools: dict[str, Tool] = {}
+ self.langfuse = LangfuseClient.get_instance()
def register(self, tool: Tool) -> None:
- """Register a tool."""
+ """
+ Register a tool in the registry.
+
+ Adds the tool to the internal registry dictionary, using the tool's name
+ as the key. If a tool with the same name already exists, it will be
+ silently overwritten.
+
+ Args:
+ tool: The Tool instance to register. Must have a unique name property.
+
+ Note:
+ Currently, duplicate registration silently overwrites the existing tool.
+ Consider checking for duplicates if this behavior is not desired.
+
+ Example:
+ >>> registry = ToolRegistry()
+ >>> tool = MyTool()
+ >>> registry.register(tool)
+ >>> assert registry.has(tool.name)
+ """
self._tools[tool.name] = tool
def unregister(self, name: str) -> None:
- """Unregister a tool by name."""
+ """
+ Unregister a tool by name.
+
+ Removes the tool with the specified name from the registry. If no tool
+ with that name exists, this operation is a no-op (no error is raised).
+
+ Args:
+ name: The name of the tool to unregister.
+
+ Example:
+ >>> registry.register(my_tool)
+ >>> registry.unregister(my_tool.name)
+ >>> assert not registry.has(my_tool.name)
+ """
self._tools.pop(name, None)
def get(self, name: str) -> Tool | None:
- """Get a tool by name."""
+ """
+ Get a tool by name.
+
+ Retrieves the tool with the specified name from the registry.
+
+ Args:
+ name: The name of the tool to retrieve.
+
+ Returns:
+ The Tool instance if found, or None if no tool with that name exists.
+
+ Example:
+ >>> tool = registry.get("read_file")
+ >>> if tool:
+ ... print(f"Found tool: {tool.description}")
+ """
return self._tools.get(name)
def has(self, name: str) -> bool:
- """Check if a tool is registered."""
+ """
+ Check if a tool is registered.
+
+ Args:
+ name: The name of the tool to check.
+
+ Returns:
+ True if a tool with the given name is registered, False otherwise.
+
+ Example:
+ >>> if registry.has("read_file"):
+ ... print("Read file tool is available")
+ """
return name in self._tools
def get_definitions(self) -> list[dict[str, Any]]:
- """Get all tool definitions in OpenAI format."""
+ """
+ Get all tool definitions in OpenAI format.
+
+ Converts all registered tools to the OpenAI function schema format,
+ suitable for use with OpenAI's function calling API.
+
+ Returns:
+ List of tool schemas in OpenAI format, where each schema contains
+ the tool's type, name, description, and parameters.
+
+ Example:
+ >>> definitions = registry.get_definitions()
+ >>> for defn in definitions:
+ ... print(f"Tool: {defn['function']['name']}")
+ """
return [tool.to_schema() for tool in self._tools.values()]
async def execute(
@@ -58,6 +124,7 @@ async def execute(
params: dict[str, Any],
session_key: SessionKey,
sandbox_manager: SandboxManager | None = None,
+ sender_id: str | None = None,
) -> str:
"""
Execute a tool by name with given parameters.
@@ -67,6 +134,7 @@ async def execute(
params: Tool parameters.
session_key: Session key for the current session.
sandbox_manager: Sandbox manager for file/shell operations.
+ sender_id: Sender id for the current session.
Returns:
Tool execution result as string.
@@ -81,24 +149,56 @@ async def execute(
tool_context = ToolContext(
session_key=session_key,
sandbox_manager=sandbox_manager,
- sandbox_key=sandbox_manager.to_sandbox_key(session_key),
+ sender_id=sender_id,
)
+ # Langfuse tool call tracing - automatic for all tools
+ tool_span = None
+ start_time = time.time()
result = None
try:
+ if self.langfuse.enabled:
+ tool_ctx = self.langfuse.tool_call(
+ name=name,
+ input=params,
+ session_id=session_key.safe_name(),
+ )
+ tool_span = tool_ctx.__enter__()
+
errors = tool.validate_params(params)
if errors:
- return f"Error: Invalid parameters for tool '{name}': " + "; ".join(errors)
- result = await tool.execute(tool_context, **params)
+ result = f"Error: Invalid parameters for tool '{name}': " + "; ".join(errors)
+ else:
+ result = await tool.execute(tool_context, **params)
except Exception as e:
result = e
logger.exception("Tool call fail: ", e)
+ finally:
+ # End Langfuse tool call tracing
+ duration_ms = (time.time() - start_time) * 1000
+ if tool_span is not None:
+ try:
+ execute_success = not isinstance(result, Exception) and not (
+ isinstance(result, str) and result.startswith("Error")
+ )
+ output_str = str(result) if result is not None else None
+ self.langfuse.end_tool_call(
+ span=tool_span,
+ output=output_str,
+ success=execute_success,
+ metadata={"duration_ms": duration_ms},
+ )
+ if hasattr(tool_span, "__exit__"):
+ tool_span.__exit__(None, None, None)
+ self.langfuse.flush()
+ except Exception:
+ pass
hook_result = await hook_manager.execute_hooks(
context=HookContext(
event_type="tool.post_call",
- session_id=session_key.safe_name(),
- sandbox_key=sandbox_manager.to_sandbox_key(session_key),
+ session_key=session_key,
+ workspace_id=sandbox_manager.to_workspace_id(session_key),
),
tool_name=name,
params=params,
diff --git a/bot/vikingbot/bus/events.py b/bot/vikingbot/bus/events.py
index 98270219..b3e9accb 100644
--- a/bot/vikingbot/bus/events.py
+++ b/bot/vikingbot/bus/events.py
@@ -2,29 +2,32 @@
from dataclasses import dataclass, field
from datetime import datetime
+from enum import Enum
from typing import Any
from vikingbot.config.schema import SessionKey
+class OutboundEventType(str, Enum):
+ """Type of outbound message/event."""
+ RESPONSE = "response" # Normal response message
+ TOOL_CALL = "tool_call" # Tool being called
+ TOOL_RESULT = "tool_result" # Result from tool execution
+ REASONING = "reasoning" # Reasoning content
+ ITERATION = "iteration" # Iteration marker
+
+
@dataclass
class InboundMessage:
"""Message received from a chat channel."""
- # channel: str # telegram, discord, slack, whatsapp
sender_id: str # User identifier
- # chat_id: str # Chat/channel identifier
content: str # Message text
session_key: SessionKey
timestamp: datetime = field(default_factory=datetime.now)
media: list[str] = field(default_factory=list) # Media URLs
metadata: dict[str, Any] = field(default_factory=dict) # Channel-specific data
- # @property
- # def session_key(self) -> str:
- # """Unique key for session identification."""
- # return f"{self.channel}:{self.chat_id}"
-
@dataclass
class OutboundMessage:
@@ -32,6 +35,18 @@ class OutboundMessage:
session_key: SessionKey
content: str
+ event_type: OutboundEventType = OutboundEventType.RESPONSE
reply_to: str | None = None
media: list[str] = field(default_factory=list)
metadata: dict[str, Any] = field(default_factory=dict)
+ token_usage: dict[str, int] = field(default_factory=dict)
+
+ @property
+ def channel(self) -> str:
+ """Get channel key from session key."""
+ return self.session_key.channel_key()
+
+ @property
+ def is_normal_message(self) -> bool:
+ """Check if this is a normal response message."""
+ return self.event_type == OutboundEventType.RESPONSE
diff --git a/bot/vikingbot/bus/queue.py b/bot/vikingbot/bus/queue.py
index 82d3a002..83c96e97 100644
--- a/bot/vikingbot/bus/queue.py
+++ b/bot/vikingbot/bus/queue.py
@@ -1,7 +1,7 @@
"""Async message queue for decoupled channel-agent communication."""
import asyncio
-from typing import Callable, Awaitable
+from typing import Callable, Awaitable, Any
from loguru import logger
@@ -26,6 +26,7 @@ def __init__(self):
async def publish_inbound(self, msg: InboundMessage) -> None:
"""Publish a message from a channel to the agent."""
+ #print(f'publish_inbound={msg}')
await self.inbound.put(msg)
async def consume_inbound(self) -> InboundMessage:
@@ -34,6 +35,7 @@ async def consume_inbound(self) -> InboundMessage:
async def publish_outbound(self, msg: OutboundMessage) -> None:
"""Publish a response from the agent to channels."""
+ #print(f'publish_outbound={msg}')
await self.outbound.put(msg)
async def consume_outbound(self) -> OutboundMessage:
@@ -41,12 +43,12 @@ async def consume_outbound(self) -> OutboundMessage:
return await self.outbound.get()
def subscribe_outbound(
- self, channel: str, callback: Callable[[OutboundMessage], Awaitable[None]]
+ self, channel_key: str, callback: Callable[[OutboundMessage], Awaitable[None]]
) -> None:
- """Subscribe to outbound messages for a specific channel."""
- if channel not in self._outbound_subscribers:
- self._outbound_subscribers[channel] = []
- self._outbound_subscribers[channel].append(callback)
+ """Subscribe to outbound messages for a specific channel key."""
+ if channel_key not in self._outbound_subscribers:
+ self._outbound_subscribers[channel_key] = []
+ self._outbound_subscribers[channel_key].append(callback)
async def dispatch_outbound(self) -> None:
"""
@@ -57,14 +59,17 @@ async def dispatch_outbound(self) -> None:
while self._running:
try:
msg = await asyncio.wait_for(self.outbound.get(), timeout=1.0)
- subscribers = self._outbound_subscribers.get(msg.channel, [])
+ channel_key = msg.session_key.channel_key()
+ subscribers = self._outbound_subscribers.get(channel_key, [])
for callback in subscribers:
try:
await callback(msg)
except Exception as e:
- logger.exception(f"Error dispatching to {msg.channel}: {e}")
+ logger.exception(f"Error dispatching to {channel_key}: {e}")
except asyncio.TimeoutError:
continue
+ except asyncio.CancelledError:
+ break
def stop(self) -> None:
"""Stop the dispatcher loop."""
diff --git a/bot/vikingbot/channels/base.py b/bot/vikingbot/channels/base.py
index f5cef1a7..abf266d8 100644
--- a/bot/vikingbot/channels/base.py
+++ b/bot/vikingbot/channels/base.py
@@ -138,7 +138,7 @@ async def _handle_message(
msg = InboundMessage(
session_key=SessionKey(
- type=str(self.channel_type.value), channel_id=self.channel_id, chat_id=chat_id
+ type=str(getattr(self.channel_type, 'value', self.channel_type)), channel_id=self.channel_id, chat_id=chat_id
),
sender_id=str(sender_id),
content=content,
diff --git a/bot/vikingbot/channels/chat.py b/bot/vikingbot/channels/chat.py
new file mode 100644
index 00000000..7e8bd6b5
--- /dev/null
+++ b/bot/vikingbot/channels/chat.py
@@ -0,0 +1,163 @@
+# Copyright (c) 2026 Beijing Volcano Engine Technology Co., Ltd.
+# SPDX-License-Identifier: Apache-2.0
+"""Chat channel for interactive mode."""
+
+import asyncio
+import os
+import signal
+import sys
+from pathlib import Path
+from typing import Any
+
+from loguru import logger
+from rich.style import Style
+
+from vikingbot.bus.events import InboundMessage, OutboundMessage, OutboundEventType
+from vikingbot.bus.queue import MessageBus
+from vikingbot.channels.base import BaseChannel
+from vikingbot.config.schema import SessionKey, BaseChannelConfig
+
+
+class ChatChannelConfig(BaseChannelConfig):
+ """Configuration for ChatChannel."""
+
+ enabled: bool = True
+ type: Any = "cli"
+
+ def channel_id(self) -> str:
+ return "chat"
+
+
+class ChatChannel(BaseChannel):
+ """
+ Chat channel for interactive mode.
+
+ This channel supports:
+ - Interactive mode (prompt-based)
+ - Displays thinking steps and tool calls
+ """
+
+ name: str = "chat"
+
+ def __init__(
+ self,
+ config: BaseChannelConfig,
+ bus: MessageBus,
+ workspace_path: Path | None = None,
+ session_id: str = "cli__chat__default",
+ markdown: bool = True,
+ logs: bool = False,
+ ):
+ super().__init__(config, bus, workspace_path)
+ self.session_id = session_id
+ self.markdown = markdown
+ self.logs = logs
+ self._response_received = asyncio.Event()
+ self._last_response: str | None = None
+
+ async def start(self) -> None:
+ """Start the chat channel."""
+ self._running = True
+
+ # Interactive mode only
+ await self._run_interactive()
+
+ async def stop(self) -> None:
+ """Stop the chat channel."""
+ self._running = False
+
+ async def send(self, msg: OutboundMessage) -> None:
+ """Send a message - display thinking events and store final response."""
+ from vikingbot.cli.commands import console
+ from rich.markdown import Markdown
+ from rich.text import Text
+
+ if msg.is_normal_message:
+ self._last_response = msg.content
+ self._response_received.set()
+ # Print Bot: response
+ console.print()
+ content = msg.content or ""
+ console.print("[bold red]Bot:[/bold red]")
+ from rich.markdown import Markdown
+ from rich.text import Text
+ body = Markdown(content, style="red") if self.markdown else Text(content, style=Style(color="red"))
+
+ console.print(body)
+ console.print()
+ else:
+ # Handle thinking events
+ if msg.event_type == OutboundEventType.REASONING:
+ # Truncate long reasoning
+ content = msg.content.strip()
+ if content:
+ if len(content) > 100:
+ content = content[:100] + "..."
+ console.print(f" [dim]Think: {content}[/dim]")
+ elif msg.event_type == OutboundEventType.TOOL_CALL:
+ console.print(f" [dim]├─ Calling: {msg.content}[/dim]")
+ elif msg.event_type == OutboundEventType.TOOL_RESULT:
+ # Truncate long tool results
+ content = msg.content
+ if len(content) > 150:
+ content = content[:150] + "..."
+ console.print(f" [dim]└─ Result: {content}[/dim]")
+
+ async def _run_interactive(self) -> None:
+ """Run in interactive mode."""
+ from vikingbot.cli.commands import (
+ _flush_pending_tty_input,
+ _init_prompt_session,
+ _is_exit_command,
+ _restore_terminal,
+ __logo__,
+ console,
+ _read_interactive_input_async,
+ )
+
+ _init_prompt_session()
+
+ def _exit_on_sigint(signum, frame):
+ _restore_terminal()
+ console.print("\nGoodbye!")
+ os._exit(0)
+
+ signal.signal(signal.SIGINT, _exit_on_sigint)
+
+ while self._running:
+ try:
+ _flush_pending_tty_input()
+
+ user_input = await _read_interactive_input_async()
+ command = user_input.strip()
+
+ if not command:
+ continue
+
+ if _is_exit_command(command):
+ _restore_terminal()
+ console.print("\nGoodbye!")
+ break
+
+ # Reset and send message
+ self._response_received.clear()
+ self._last_response = None
+
+ msg = InboundMessage(
+ session_key=SessionKey.from_safe_name(self.session_id),
+ sender_id="user",
+ content=user_input,
+ )
+ await self.bus.publish_inbound(msg)
+
+ # Wait for response
+ await self._response_received.wait()
+
+ except KeyboardInterrupt:
+ _restore_terminal()
+ console.print("\nGoodbye!")
+ break
+ except EOFError:
+ _restore_terminal()
+ console.print("\nGoodbye!")
+ break
diff --git a/bot/vikingbot/channels/dingtalk.py b/bot/vikingbot/channels/dingtalk.py
index 3584b122..51809a08 100644
--- a/bot/vikingbot/channels/dingtalk.py
+++ b/bot/vikingbot/channels/dingtalk.py
@@ -112,7 +112,7 @@ async def start(self) -> None:
try:
if not DINGTALK_AVAILABLE:
logger.exception(
- "DingTalk Stream SDK not installed. Run: pip install dingtalk-stream"
+ "DingTalk Stream SDK not installed. Install with: uv pip install 'vikingbot[dingtalk]' (or uv pip install -e \".[dingtalk]\" for local dev)"
)
return
@@ -189,6 +189,10 @@ async def _get_access_token(self) -> str | None:
async def send(self, msg: OutboundMessage) -> None:
"""Send a message through DingTalk."""
+ # Only send normal response messages, skip thinking/tool_call/etc.
+ if not msg.is_normal_message:
+ return
+
token = await self._get_access_token()
if not token:
return
diff --git a/bot/vikingbot/channels/discord.py b/bot/vikingbot/channels/discord.py
index b1e43388..b80ad495 100644
--- a/bot/vikingbot/channels/discord.py
+++ b/bot/vikingbot/channels/discord.py
@@ -75,6 +75,10 @@ async def stop(self) -> None:
async def send(self, msg: OutboundMessage) -> None:
"""Send a message through Discord REST API."""
+ # Only send normal response messages, skip thinking/tool_call/etc.
+ if not msg.is_normal_message:
+ return
+
if not self._http:
logger.warning("Discord HTTP client not initialized")
return
diff --git a/bot/vikingbot/channels/feishu.py b/bot/vikingbot/channels/feishu.py
index 1aa78796..d17ca8c0 100644
--- a/bot/vikingbot/channels/feishu.py
+++ b/bot/vikingbot/channels/feishu.py
@@ -1,22 +1,14 @@
"""Feishu/Lark channel implementation using lark-oapi SDK with WebSocket long connection."""
import asyncio
-import base64
import io
import json
import re
-import os
-import threading
import tempfile
+import threading
from collections import OrderedDict
-from pathlib import Path
-from typing import Any, Tuple
-from urllib.parse import urlparse
-
-import httpx
-from loguru import logger
+from typing import Any
-from vikingbot.utils import get_data_path
import httpx
from loguru import logger
@@ -51,6 +43,8 @@
P2ImMessageReceiveV1,
GetImageRequest,
GetMessageResourceRequest,
+ ReplyMessageRequest,
+ ReplyMessageRequestBody,
)
FEISHU_AVAILABLE = True
@@ -122,7 +116,6 @@ async def _upload_image_to_feishu(self, image_data: bytes) -> str:
"""
Upload image to Feishu media library and get image_key.
"""
- import time
token = await self._get_tenant_access_token()
url = "https://open.feishu.cn/open-apis/im/v1/images"
@@ -133,12 +126,9 @@ async def _upload_image_to_feishu(self, image_data: bytes) -> str:
files = {"image": ("image.png", io.BytesIO(image_data), "image/png")}
data = {"image_type": "message"}
- logger.debug(f"Uploading image to {url} with image_data {image_data[:20]}...")
-
async with httpx.AsyncClient(timeout=60.0) as client:
resp = await client.post(url, headers=headers, data=data, files=files)
- logger.debug(f"Upload response status: {resp.status_code}")
- logger.debug(f"Upload response content: {resp.text}")
+ # logger.debug(f"Upload response status: {resp.status_code}")
resp.raise_for_status()
result = resp.json()
if result.get("code") != 0:
@@ -185,13 +175,14 @@ async def _save_image_to_temp(self, image_bytes: bytes) -> str:
f.write(image_bytes)
temp_path = f.name
- logger.debug(f"Saved image to temp file: {temp_path}")
return temp_path
async def start(self) -> None:
"""Start the Feishu bot with WebSocket long connection."""
if not FEISHU_AVAILABLE:
- logger.exception("Feishu SDK not installed. Run: pip install lark-oapi")
+ logger.exception(
+ "Feishu SDK not installed. Install with: uv pip install 'vikingbot[feishu]' (or uv pip install -e \".[feishu]\" for local dev)"
+ )
return
if not self.config.app_id or not self.config.app_secret:
@@ -280,8 +271,6 @@ def _add_reaction_sync(self, message_id: str, emoji_type: str) -> None:
if not response.success():
logger.warning(f"Failed to add reaction: code={response.code}, msg={response.msg}")
- else:
- logger.debug(f"Added {emoji_type} reaction to message {message_id}")
except Exception as e:
logger.warning(f"Error adding reaction: {e}")
@@ -410,7 +399,6 @@ async def _process_content_with_images(
alt_text = m.group(1) or ""
img_url = m.group(2)
try:
- logger.debug(f"Processing Markdown image: {img_url[:100]}...")
is_content, result = await self._parse_data_uri(img_url)
if not is_content and isinstance(result, bytes):
@@ -427,7 +415,6 @@ async def _process_content_with_images(
for m in re.finditer(send_pattern, content):
img_url = m.group(1) or ""
try:
- logger.debug(f"Processing Markdown image: {img_url[:100]}...")
is_content, result = await self._parse_data_uri(img_url)
if not is_content and isinstance(result, bytes):
@@ -460,6 +447,10 @@ async def send(self, msg: OutboundMessage) -> None:
logger.warning("Feishu client not initialized")
return
+ # Only send normal response messages, skip thinking/tool_call/etc.
+ if not msg.is_normal_message:
+ return
+
try:
# Determine receive_id_type based on chat_id format
# open_id starts with "ou_", chat_id starts with "oc_"
@@ -467,40 +458,126 @@ async def send(self, msg: OutboundMessage) -> None:
receive_id_type = "chat_id"
else:
receive_id_type = "open_id"
- logger.info(f"[DEBUG] Feishu send() content: {msg.content[:300]}")
- # No images extracted from content, but content might still have Markdown images
- elements = await self._process_content_with_images(
- msg.content, receive_id_type, msg.session_key.chat_id
- )
- card = {
- "config": {"wide_screen_mode": True},
- "elements": elements,
- }
- content = json.dumps(card, ensure_ascii=False)
+ # Process images and get cleaned content
+ cleaned_content, images = await self._extract_and_upload_images(msg.content)
- request = (
- CreateMessageRequest.builder()
- .receive_id_type(receive_id_type)
- .request_body(
- CreateMessageRequestBody.builder()
- .receive_id(msg.session_key.chat_id)
- .msg_type("interactive")
- .content(content)
- .build()
- )
- .build()
+ # Process @mentions: convert @ou_xxxx to Feishu mention format
+ # Pattern: @ou_xxxxxxx (user open_id)
+ import re
+
+ mention_pattern = r"@(ou_[a-zA-Z0-9_-]+)"
+
+ def replace_mention(match):
+ open_id = match.group(1)
+ return f'@{open_id}'
+
+ # Replace all mentions
+ content_with_mentions = re.sub(mention_pattern, replace_mention, cleaned_content)
+
+ # Also support @all mention
+ content_with_mentions = content_with_mentions.replace(
+ "@all", '所有人'
)
- response = self._client.im.v1.message.create(request)
+ # Check if we need to reply to a specific message
+ # Get reply message ID from metadata (original incoming message ID)
+ reply_to_message_id = None
+ if msg.metadata:
+ reply_to_message_id = msg.metadata.get("reply_to_message_id") or msg.metadata.get(
+ "message_id"
+ )
- if not response.success():
- logger.exception(
- f"Failed to send Feishu message: code={response.code}, "
- f"msg={response.msg}, log_id={response.get_log_id()}"
+ # Build post message content
+ content_elements = []
+
+ # Add @mention for the original sender when replying
+ original_sender_id = None
+ if reply_to_message_id and msg.metadata:
+ original_sender_id = msg.metadata.get("sender_id")
+
+ # Build content line: [@mention, text content]
+ content_line = []
+
+ # Add @mention element for original sender when replying
+ if original_sender_id:
+ content_line.append({"tag": "at", "user_id": original_sender_id})
+
+ # Add text content
+ if content_with_mentions.strip():
+ content_line.append({"tag": "text", "text": content_with_mentions})
+
+ # Add content line if not empty
+ if content_line:
+ content_elements.append(content_line)
+
+ # Add images
+ for img in images:
+ content_elements.append([{"tag": "img", "image_key": img["image_key"]}])
+
+ # Ensure we have content
+ if not content_elements:
+ content_elements.append([{"tag": "text", "text": " "}])
+
+ post_content = {"zh_cn": {"title": "", "content": content_elements}}
+
+ import json
+
+ content = json.dumps(post_content, ensure_ascii=False)
+
+ if reply_to_message_id:
+ # Reply to existing message (quotes the original)
+ # Only reply in thread if the original message is in a topic (has root_id and is a thread)
+ should_reply_in_thread = False
+ if msg.metadata:
+ root_id = msg.metadata.get("root_id")
+ # Only use reply_in_thread=True if this is an actual topic group thread
+ # In Feishu, topic groups have root_id set for messages in threads
+ # root_id will be set if the message is already part of a thread
+ should_reply_in_thread = root_id is not None and root_id != reply_to_message_id
+
+ request = (
+ ReplyMessageRequest.builder()
+ .message_id(reply_to_message_id)
+ .request_body(
+ ReplyMessageRequestBody.builder()
+ .content(content)
+ .msg_type("post")
+ # Only reply in topic thread if it's actually a topic thread (not regular group)
+ .reply_in_thread(should_reply_in_thread)
+ .build()
+ )
+ .build()
)
+ response = self._client.im.v1.message.reply(request)
else:
- logger.debug(f"Feishu message sent to {msg.session_key.chat_id}")
+ # Send new message
+ request = (
+ CreateMessageRequest.builder()
+ .receive_id_type(receive_id_type)
+ .request_body(
+ CreateMessageRequestBody.builder()
+ .receive_id(msg.session_key.chat_id)
+ .msg_type("post")
+ .content(content)
+ .build()
+ )
+ .build()
+ )
+ response = self._client.im.v1.message.create(request)
+
+ if not response.success():
+ if response.code == 230011:
+ # Original message was withdrawn, just log warning
+ logger.warning(
+ f"Failed to reply to message: original message was withdrawn, code={response.code}, "
+ f"msg={response.msg}, log_id={response.get_log_id()}"
+ )
+ else:
+ logger.exception(
+ f"Failed to send Feishu message: code={response.code}, "
+ f"msg={response.msg}, log_id={response.get_log_id()}"
+ )
except Exception as e:
logger.exception(f"Error sending Feishu message: {e}")
@@ -547,11 +624,6 @@ async def _on_message(self, data: "P2ImMessageReceiveV1") -> None:
content = ""
media = []
- # Log detailed message info for debugging
- logger.info(
- f"Received Feishu message: msg_type={msg_type}, content={message.content[:200]}"
- )
-
if msg_type == "text":
try:
content = json.loads(message.content).get("text", "")
@@ -621,8 +693,6 @@ async def _on_message(self, data: "P2ImMessageReceiveV1") -> None:
logger.warning(
f"Could not download image for image_key: {image_key}"
)
- else:
- logger.warning(f"No image_key found in message content: {msg_content}")
except Exception as e:
logger.warning(f"Failed to download Feishu image: {e}")
import traceback
@@ -634,8 +704,15 @@ async def _on_message(self, data: "P2ImMessageReceiveV1") -> None:
if not content:
return
+ import re
+
+ mention_pattern = re.compile(r"@_user_\d+")
+ content = mention_pattern.sub(f"@{sender_id}", content)
+
# Forward to message bus
reply_to = chat_id if chat_type == "group" else sender_id
+ logger.info(f"Received message from Feishu: {content}")
+
await self._handle_message(
sender_id=sender_id,
chat_id=reply_to,
@@ -645,8 +722,49 @@ async def _on_message(self, data: "P2ImMessageReceiveV1") -> None:
"message_id": message_id,
"chat_type": chat_type,
"msg_type": msg_type,
+ "root_id": message.root_id, # Topic/thread ID for topic groups
+ "sender_id": sender_id, # Original message sender ID for @mention in replies
},
)
except Exception as e:
logger.exception(f"Error processing Feishu message")
+
+ async def _extract_and_upload_images(self, content: str) -> tuple[str, list[dict]]:
+ """Extract images from markdown content, upload to Feishu, and return cleaned content."""
+ images = []
+ cleaned_content = content
+
+ # Pattern 1: 
+ markdown_pattern = r"!\[([^\]]*)\]\((send://[^)\s]+\.(png|jpeg|jpg|gif|bmp|webp))\)"
+ for m in re.finditer(markdown_pattern, content):
+ img_url = m.group(2)
+ try:
+ is_content, result = await self._parse_data_uri(img_url)
+
+ if not is_content and isinstance(result, bytes):
+ image_key = await self._upload_image_to_feishu(result)
+ images.append({"image_key": image_key})
+ except Exception as e:
+ logger.exception(f"Failed to upload Markdown image {img_url[:100]}: {e}")
+
+ # Remove markdown image syntax
+ cleaned_content = re.sub(markdown_pattern, "", cleaned_content)
+
+ # Pattern 2: send://... (without alt text)
+ send_pattern = r"(send://[^)\s]+\.(png|jpeg|jpg|gif|bmp|webp))\)?"
+ for m in re.finditer(send_pattern, content):
+ img_url = m.group(1) or ""
+ try:
+ is_content, result = await self._parse_data_uri(img_url)
+
+ if not is_content and isinstance(result, bytes):
+ image_key = await self._upload_image_to_feishu(result)
+ images.append({"image_key": image_key})
+ except Exception as e:
+ logger.exception(f"Failed to upload Markdown image {img_url[:100]}: {e}")
+
+ # Remove standalone send:// URLs
+ cleaned_content = re.sub(send_pattern, "", cleaned_content)
+
+ return cleaned_content.strip(), images
diff --git a/bot/vikingbot/channels/manager.py b/bot/vikingbot/channels/manager.py
index 68e0c333..f61d6344 100644
--- a/bot/vikingbot/channels/manager.py
+++ b/bot/vikingbot/channels/manager.py
@@ -10,7 +10,7 @@
from vikingbot.bus.events import OutboundMessage
from vikingbot.bus.queue import MessageBus
from vikingbot.channels.base import BaseChannel
-from vikingbot.config.schema import Config, ChannelsConfig
+from vikingbot.config.schema import BaseChannelConfig, ChannelType, Config
class ChannelManager:
@@ -18,120 +18,151 @@ class ChannelManager:
Manages chat channels and coordinates message routing.
Responsibilities:
- - Initialize enabled channels (Telegram, WhatsApp, etc.)
+ - Add channels (directly or from config)
- Start/stop channels
- Route outbound messages
"""
- def __init__(self, config: Config, bus: MessageBus):
- self.config = config
+ def __init__(self, bus: MessageBus):
self.bus = bus
self.channels: dict[str, BaseChannel] = {}
self._dispatch_task: asyncio.Task | None = None
+ self._workspace_path: Any | None = None
+ self._additional_deps: dict[str, Any] = {}
+
+ def add_channel(self, channel: BaseChannel) -> None:
+ """Add a channel directly."""
+ channel_key = channel.config.channel_key()
+ self.channels[channel_key] = channel
+ logger.info(f"Channel added: {channel.name} ({channel_key})")
+
+ def add_channel_from_config(
+ self,
+ channel_config: BaseChannelConfig,
+ workspace_path: Any | None = None,
+ **additional_deps,
+ ) -> None:
+ """
+ Add a channel from config.
+
+ Args:
+ channel_config: Channel configuration
+ workspace_path: Workspace path for channels that need it
+ **additional_deps: Additional dependencies for specific channels
+ """
+ if not channel_config.enabled:
+ return
- self._init_channels()
-
- def _init_channels(self) -> None:
- """Initialize channels based on config."""
- from vikingbot.config.schema import ChannelType
-
- channels_config = self.config.channels_config
+ try:
+ channel = None
+
+ if channel_config.type == ChannelType.TELEGRAM:
+ from vikingbot.channels.telegram import TelegramChannel
+
+ channel = TelegramChannel(
+ channel_config,
+ self.bus,
+ groq_api_key=additional_deps.get("groq_api_key"),
+ )
+
+ elif channel_config.type == ChannelType.FEISHU:
+ from vikingbot.channels.feishu import FeishuChannel
+
+ channel = FeishuChannel(
+ channel_config,
+ self.bus,
+ workspace_path=workspace_path,
+ )
+
+ elif channel_config.type == ChannelType.DISCORD:
+ from vikingbot.channels.discord import DiscordChannel
+
+ channel = DiscordChannel(
+ channel_config,
+ self.bus,
+ workspace_path=workspace_path,
+ )
+
+ elif channel_config.type == ChannelType.WHATSAPP:
+ from vikingbot.channels.whatsapp import WhatsAppChannel
+
+ channel = WhatsAppChannel(
+ channel_config,
+ self.bus,
+ workspace_path=workspace_path,
+ )
+
+ elif channel_config.type == ChannelType.MOCHAT:
+ from vikingbot.channels.mochat import MochatChannel
+
+ channel = MochatChannel(
+ channel_config,
+ self.bus,
+ workspace_path=workspace_path,
+ )
+
+ elif channel_config.type == ChannelType.DINGTALK:
+ from vikingbot.channels.dingtalk import DingTalkChannel
+
+ channel = DingTalkChannel(
+ channel_config,
+ self.bus,
+ workspace_path=workspace_path,
+ )
+
+ elif channel_config.type == ChannelType.EMAIL:
+ from vikingbot.channels.email import EmailChannel
+
+ channel = EmailChannel(
+ channel_config,
+ self.bus,
+ workspace_path=workspace_path,
+ )
+
+ elif channel_config.type == ChannelType.SLACK:
+ from vikingbot.channels.slack import SlackChannel
+
+ channel = SlackChannel(
+ channel_config,
+ self.bus,
+ workspace_path=workspace_path,
+ )
+
+ elif channel_config.type == ChannelType.QQ:
+ from vikingbot.channels.qq import QQChannel
+
+ channel = QQChannel(
+ channel_config,
+ self.bus,
+ workspace_path=workspace_path,
+ )
+
+ if channel:
+ self.add_channel(channel)
+
+ except ImportError as e:
+ channel_type = getattr(channel_config.type, "value", str(channel_config.type))
+ logger.warning(
+ f"Channel {channel_config.type} not available: {e}. "
+ f"Install with: uv pip install 'vikingbot[{channel_type}]' "
+ f"(or uv pip install -e \".[{channel_type}]\" for local dev)"
+ )
+
+ def load_channels_from_config(
+ self,
+ config: Config,
+ ) -> None:
+ """Load all enabled channels from a Config object."""
+ channels_config = config.channels_config
all_channel_configs = channels_config.get_all_channels()
- workspace_path = self.config.workspace_path
+ workspace_path = config.workspace_path
for channel_config in all_channel_configs:
- if not channel_config.enabled:
- continue
-
- try:
- channel = None
- if channel_config.type == ChannelType.TELEGRAM:
- from vikingbot.channels.telegram import TelegramChannel
-
- channel = TelegramChannel(
- channel_config,
- self.bus,
- groq_api_key=self.config.providers.groq.api_key,
- )
-
- elif channel_config.type == ChannelType.FEISHU:
- from vikingbot.channels.feishu import FeishuChannel
-
- channel = FeishuChannel(
- channel_config,
- self.bus,
- workspace_path=workspace_path,
- )
-
- elif channel_config.type == ChannelType.DISCORD:
- from vikingbot.channels.discord import DiscordChannel
-
- channel = DiscordChannel(
- channel_config,
- self.bus,
- workspace_path=workspace_path,
- )
-
- elif channel_config.type == ChannelType.WHATSAPP:
- from vikingbot.channels.whatsapp import WhatsAppChannel
-
- channel = WhatsAppChannel(
- channel_config,
- self.bus,
- workspace_path=workspace_path,
- )
-
- elif channel_config.type == ChannelType.MOCHAT:
- from vikingbot.channels.mochat import MochatChannel
-
- channel = MochatChannel(
- channel_config,
- self.bus,
- workspace_path=workspace_path,
- )
-
- elif channel_config.type == ChannelType.DINGTALK:
- from vikingbot.channels.dingtalk import DingTalkChannel
-
- channel = DingTalkChannel(
- channel_config,
- self.bus,
- workspace_path=workspace_path,
- )
-
- elif channel_config.type == ChannelType.EMAIL:
- from vikingbot.channels.email import EmailChannel
-
- channel = EmailChannel(
- channel_config,
- self.bus,
- workspace_path=workspace_path,
- )
-
- elif channel_config.type == ChannelType.SLACK:
- from vikingbot.channels.slack import SlackChannel
-
- channel = SlackChannel(
- channel_config,
- self.bus,
- workspace_path=workspace_path,
- )
-
- elif channel_config.type == ChannelType.QQ:
- from vikingbot.channels.qq import QQChannel
-
- channel = QQChannel(
- channel_config,
- self.bus,
- workspace_path=workspace_path,
- )
-
- if channel:
- self.channels[channel.config.channel_key()] = channel
- logger.info(f"Channel enabled: {channel.name}")
-
- except ImportError as e:
- logger.warning(f"Channel {channel_config.type} not available: {e}")
+ self.add_channel_from_config(
+ channel_config,
+ workspace_path=workspace_path,
+ groq_api_key=config.providers.groq.api_key if hasattr(config.providers, "groq") else None,
+ )
async def _start_channel(self, name: str, channel: BaseChannel) -> None:
"""Start a channel and log any exceptions."""
diff --git a/bot/vikingbot/channels/openapi.py b/bot/vikingbot/channels/openapi.py
new file mode 100644
index 00000000..2443ea32
--- /dev/null
+++ b/bot/vikingbot/channels/openapi.py
@@ -0,0 +1,443 @@
+"""OpenAPI channel for HTTP-based chat API."""
+
+import asyncio
+import secrets
+import uuid
+from datetime import datetime
+from pathlib import Path
+from typing import Any, AsyncGenerator, Callable, Dict, List, Optional
+
+from fastapi import APIRouter, Depends, Header, HTTPException, Request
+from fastapi.responses import StreamingResponse
+from loguru import logger
+from pydantic import BaseModel
+
+from vikingbot.bus.events import InboundMessage, OutboundEventType, OutboundMessage
+from vikingbot.bus.queue import MessageBus
+from vikingbot.channels.base import BaseChannel
+from vikingbot.channels.openapi_models import (
+ ChatMessage,
+ ChatRequest,
+ ChatResponse,
+ ChatStreamEvent,
+ ErrorResponse,
+ EventType,
+ HealthResponse,
+ MessageRole,
+ SessionCreateRequest,
+ SessionCreateResponse,
+ SessionDetailResponse,
+ SessionInfo,
+ SessionListResponse,
+)
+from vikingbot.config.schema import BaseChannelConfig, Config, SessionKey
+
+
+class OpenAPIChannelConfig(BaseChannelConfig):
+ """Configuration for OpenAPI channel."""
+
+ enabled: bool = True
+ type: str = "openapi"
+ api_key: str = "" # If empty, no auth required
+ allow_from: list[str] = []
+ max_concurrent_requests: int = 100
+
+ def channel_id(self) -> str:
+ return "openapi"
+
+
+class PendingResponse:
+ """Tracks a pending response from the agent."""
+
+ def __init__(self):
+ self.events: List[Dict[str, Any]] = []
+ self.final_content: Optional[str] = None
+ self.event = asyncio.Event()
+ self.stream_queue: asyncio.Queue[Optional[ChatStreamEvent]] = asyncio.Queue()
+
+ async def add_event(self, event_type: str, data: Any):
+ """Add an event to the response."""
+ event = {"type": event_type, "data": data, "timestamp": datetime.now().isoformat()}
+ self.events.append(event)
+ await self.stream_queue.put(ChatStreamEvent(event=EventType(event_type), data=data))
+
+ def set_final(self, content: str):
+ """Set the final response content."""
+ self.final_content = content
+ self.event.set()
+
+ async def close_stream(self):
+ """Close the stream queue."""
+ await self.stream_queue.put(None)
+
+
+class OpenAPIChannel(BaseChannel):
+ """
+ OpenAPI channel exposing HTTP endpoints for chat API.
+ This channel works differently from others - it doesn't subscribe
+ to outbound messages directly but uses request-response pattern.
+ """
+
+ name: str = "openapi"
+
+ def __init__(
+ self,
+ config: OpenAPIChannelConfig,
+ bus: MessageBus,
+ workspace_path: Path | None = None,
+ app: "FastAPI | None" = None,
+ ):
+ super().__init__(config, bus, workspace_path)
+ self.config = config
+ self._pending: Dict[str, PendingResponse] = {}
+ self._sessions: Dict[str, Dict[str, Any]] = {}
+ self._router: Optional[APIRouter] = None
+ self._app = app # External FastAPI app to register routes on
+ self._server: Optional[asyncio.Task] = None # Server task
+
+ async def start(self) -> None:
+ """Start the channel - register routes to external FastAPI app if provided."""
+ self._running = True
+
+ # Register routes to external FastAPI app
+ if self._app is not None:
+ self._setup_routes()
+
+ logger.info("OpenAPI channel started")
+
+ async def stop(self) -> None:
+ """Stop the channel."""
+ self._running = False
+ # Complete all pending responses
+ for pending in self._pending.values():
+ pending.set_final("")
+ logger.info("OpenAPI channel stopped")
+
+ async def send(self, msg: OutboundMessage) -> None:
+ """
+ Handle outbound messages - routes to pending responses.
+ This is called by the message bus dispatcher.
+ """
+ session_id = msg.session_key.chat_id
+ pending = self._pending.get(session_id)
+
+ if not pending:
+ # No pending request for this session, ignore
+ return
+
+ if msg.event_type == OutboundEventType.RESPONSE:
+ # Final response
+ pending.set_final(msg.content or "")
+ await pending.close_stream()
+ elif msg.event_type == OutboundEventType.REASONING:
+ await pending.add_event("reasoning", msg.content)
+ elif msg.event_type == OutboundEventType.TOOL_CALL:
+ await pending.add_event("tool_call", msg.content)
+ elif msg.event_type == OutboundEventType.TOOL_RESULT:
+ await pending.add_event("tool_result", msg.content)
+
+ def get_router(self) -> APIRouter:
+ """Get or create the FastAPI router."""
+ if self._router is None:
+ self._router = self._create_router()
+ return self._router
+
+ def _create_router(self) -> APIRouter:
+ """Create the FastAPI router with all routes."""
+ router = APIRouter()
+ channel = self # Capture for closures
+
+ async def verify_api_key(x_api_key: Optional[str] = Header(None)) -> bool:
+ """Verify API key if configured."""
+ if not channel.config.api_key:
+ return True # No auth required
+ if not x_api_key:
+ raise HTTPException(status_code=401, detail="X-API-Key header required")
+ # Use secrets.compare_digest for timing-safe comparison
+ if not secrets.compare_digest(x_api_key, channel.config.api_key):
+ raise HTTPException(status_code=403, detail="Invalid API key")
+ return True
+
+ @router.get("/health", response_model=HealthResponse)
+ async def health_check():
+ """Health check endpoint."""
+ from vikingbot import __version__
+
+ return HealthResponse(
+ status="healthy" if channel._running else "unhealthy",
+ version=__version__,
+ )
+
+ @router.post("/chat", response_model=ChatResponse)
+ async def chat(
+ request: ChatRequest,
+ authorized: bool = Depends(verify_api_key),
+ ):
+ """Send a chat message and get a response."""
+ return await channel._handle_chat(request)
+
+ @router.post("/chat/stream")
+ async def chat_stream(
+ request: ChatRequest,
+ authorized: bool = Depends(verify_api_key),
+ ):
+ """Send a chat message and get a streaming response."""
+ if not request.stream:
+ request.stream = True
+ return await channel._handle_chat_stream(request)
+
+ @router.get("/sessions", response_model=SessionListResponse)
+ async def list_sessions(
+ authorized: bool = Depends(verify_api_key),
+ ):
+ """List all sessions."""
+ sessions = []
+ for session_id, session_data in channel._sessions.items():
+ sessions.append(
+ SessionInfo(
+ id=session_id,
+ created_at=session_data.get("created_at", datetime.now()),
+ last_active=session_data.get("last_active", datetime.now()),
+ message_count=session_data.get("message_count", 0),
+ )
+ )
+ return SessionListResponse(sessions=sessions, total=len(sessions))
+
+ @router.post("/sessions", response_model=SessionCreateResponse)
+ async def create_session(
+ request: SessionCreateRequest,
+ authorized: bool = Depends(verify_api_key),
+ ):
+ """Create a new session."""
+ session_id = str(uuid.uuid4())
+ now = datetime.now()
+ channel._sessions[session_id] = {
+ "user_id": request.user_id,
+ "created_at": now,
+ "last_active": now,
+ "message_count": 0,
+ "metadata": request.metadata or {},
+ }
+ return SessionCreateResponse(session_id=session_id, created_at=now)
+
+ @router.get("/sessions/{session_id}", response_model=SessionDetailResponse)
+ async def get_session(
+ session_id: str,
+ authorized: bool = Depends(verify_api_key),
+ ):
+ """Get session details."""
+ if session_id not in channel._sessions:
+ raise HTTPException(status_code=404, detail="Session not found")
+
+ session_data = channel._sessions[session_id]
+ info = SessionInfo(
+ id=session_id,
+ created_at=session_data.get("created_at", datetime.now()),
+ last_active=session_data.get("last_active", datetime.now()),
+ message_count=session_data.get("message_count", 0),
+ )
+ # Get messages from session manager if available
+ messages = session_data.get("messages", [])
+ return SessionDetailResponse(session=info, messages=messages)
+
+ @router.delete("/sessions/{session_id}")
+ async def delete_session(
+ session_id: str,
+ authorized: bool = Depends(verify_api_key),
+ ):
+ """Delete a session."""
+ if session_id not in channel._sessions:
+ raise HTTPException(status_code=404, detail="Session not found")
+
+ del channel._sessions[session_id]
+ return {"deleted": True}
+
+ return router
+
+ def _setup_routes(self) -> None:
+ """Setup routes on the external FastAPI app."""
+ if self._app is None:
+ logger.warning("No external FastAPI app provided, cannot setup routes")
+ return
+
+ # Get the router and include it at root path
+ # Note: openviking-server adds its own /bot/v1 prefix when proxying
+ router = self.get_router()
+ self._app.include_router(router, prefix="/bot/v1")
+ logger.info("OpenAPI routes registered at root path")
+
+ async def _handle_chat(self, request: ChatRequest) -> ChatResponse:
+ """Handle a chat request."""
+ # Generate or use provided session ID
+ session_id = request.session_id or str(uuid.uuid4())
+ user_id = request.user_id or "anonymous"
+
+ # Create session if new
+ if session_id not in self._sessions:
+ self._sessions[session_id] = {
+ "user_id": user_id,
+ "created_at": datetime.now(),
+ "last_active": datetime.now(),
+ "message_count": 0,
+ "messages": [],
+ }
+
+ # Update session activity
+ self._sessions[session_id]["last_active"] = datetime.now()
+ self._sessions[session_id]["message_count"] += 1
+
+ # Create pending response tracker
+ pending = PendingResponse()
+ self._pending[session_id] = pending
+
+ try:
+ # Build session key
+ session_key = SessionKey(
+ type="openapi",
+ channel_id=self.config.channel_id(),
+ chat_id=session_id,
+ )
+
+ # Build content with context if provided
+ content = request.message
+ if request.context:
+ # Context is handled separately by session manager
+ pass
+
+ # Create and publish inbound message
+ msg = InboundMessage(
+ session_key=session_key,
+ sender_id=user_id,
+ content=content,
+ )
+
+ await self.bus.publish_inbound(msg)
+
+ # Wait for response with timeout
+ try:
+ await asyncio.wait_for(pending.event.wait(), timeout=300.0)
+ except asyncio.TimeoutError:
+ raise HTTPException(status_code=504, detail="Request timeout")
+
+ # Build response
+ response_content = pending.final_content or ""
+
+ return ChatResponse(
+ session_id=session_id,
+ message=response_content,
+ events=pending.events if pending.events else None,
+ )
+
+ except HTTPException:
+ raise
+ except Exception as e:
+ logger.exception(f"Error handling chat request: {e}")
+ raise HTTPException(status_code=500, detail=f"Internal error: {str(e)}")
+ finally:
+ # Clean up pending
+ self._pending.pop(session_id, None)
+
+ async def _handle_chat_stream(self, request: ChatRequest) -> StreamingResponse:
+ """Handle a streaming chat request."""
+ session_id = request.session_id or str(uuid.uuid4())
+ user_id = request.user_id or "anonymous"
+
+ # Create session if new
+ if session_id not in self._sessions:
+ self._sessions[session_id] = {
+ "user_id": user_id,
+ "created_at": datetime.now(),
+ "last_active": datetime.now(),
+ "message_count": 0,
+ "messages": [],
+ }
+
+ self._sessions[session_id]["last_active"] = datetime.now()
+ self._sessions[session_id]["message_count"] += 1
+
+ pending = PendingResponse()
+ self._pending[session_id] = pending
+
+ async def event_generator():
+ try:
+ # Build session key and send message
+ session_key = SessionKey(
+ type="openapi",
+ channel_id=self.config.channel_id(),
+ chat_id=session_id,
+ )
+
+ msg = InboundMessage(
+ session_key=session_key,
+ sender_id=user_id,
+ content=request.message,
+ )
+
+ await self.bus.publish_inbound(msg)
+
+ # Stream events as they arrive
+ while True:
+ try:
+ event = await asyncio.wait_for(pending.stream_queue.get(), timeout=300.0)
+ if event is None:
+ break
+ yield f"data: {event.model_dump_json()}\n\n"
+ except asyncio.TimeoutError:
+ yield f"data: {ChatStreamEvent(event=EventType.RESPONSE, data={'error': 'timeout'}).model_dump_json()}\n\n"
+ break
+
+ except Exception as e:
+ logger.exception(f"Error in stream generator: {e}")
+ error_event = ChatStreamEvent(
+ event=EventType.RESPONSE,
+ data={"error": str(e)}
+ )
+ yield f"data: {error_event.model_dump_json()}\n\n"
+ finally:
+ self._pending.pop(session_id, None)
+
+ return StreamingResponse(
+ event_generator(),
+ media_type="text/event-stream",
+ headers={
+ "Cache-Control": "no-cache",
+ "Connection": "keep-alive",
+ },
+ )
+
+
+def get_openapi_router(bus: MessageBus, config: Config) -> APIRouter:
+ """
+ Create and return the OpenAPI router for mounting in FastAPI.
+
+ This factory function creates an OpenAPIChannel and returns its router.
+ The router should be mounted in the main FastAPI app.
+ """
+ # Find OpenAPI config from channels
+ openapi_config = None
+ for ch_config in config.channels:
+ if isinstance(ch_config, dict) and ch_config.get("type") == "openapi":
+ openapi_config = OpenAPIChannelConfig(**ch_config)
+ break
+ elif hasattr(ch_config, "type") and getattr(ch_config, "type", None) == "openapi":
+ openapi_config = ch_config
+ break
+
+ if openapi_config is None:
+ # Create default config
+ openapi_config = OpenAPIChannelConfig()
+
+ # Create channel and get router
+ channel = OpenAPIChannel(
+ config=openapi_config,
+ bus=bus,
+ workspace_path=config.workspace_path,
+ )
+
+ # Register channel's send method as subscriber for outbound messages
+ bus.subscribe_outbound(
+ f"openapi__{openapi_config.channel_id()}",
+ channel.send,
+ )
+
+ return channel.get_router()
diff --git a/bot/vikingbot/channels/openapi_models.py b/bot/vikingbot/channels/openapi_models.py
new file mode 100644
index 00000000..0d4da5c8
--- /dev/null
+++ b/bot/vikingbot/channels/openapi_models.py
@@ -0,0 +1,114 @@
+"""Pydantic models for OpenAPI channel."""
+
+from datetime import datetime
+from enum import Enum
+from typing import Any, Dict, List, Optional
+
+from pydantic import BaseModel, Field
+
+
+class MessageRole(str, Enum):
+ """Message role enumeration."""
+
+ USER = "user"
+ ASSISTANT = "assistant"
+ SYSTEM = "system"
+ TOOL = "tool"
+
+
+class EventType(str, Enum):
+ """Event type enumeration."""
+
+ RESPONSE = "response"
+ TOOL_CALL = "tool_call"
+ TOOL_RESULT = "tool_result"
+ REASONING = "reasoning"
+ ITERATION = "iteration"
+
+
+class ChatMessage(BaseModel):
+ """A single chat message."""
+
+ role: MessageRole = Field(..., description="Role of the message sender")
+ content: str = Field(..., description="Message content")
+ timestamp: Optional[datetime] = Field(default_factory=datetime.now, description="Message timestamp")
+
+
+class ChatRequest(BaseModel):
+ """Request body for chat endpoint."""
+
+ message: str = Field(..., description="User message to send", min_length=1)
+ session_id: Optional[str] = Field(default=None, description="Session ID (optional, will create new if not provided)")
+ user_id: Optional[str] = Field(default=None, description="User identifier (optional)")
+ stream: bool = Field(default=False, description="Whether to stream the response")
+ context: Optional[List[ChatMessage]] = Field(default=None, description="Additional context messages")
+
+
+class ChatResponse(BaseModel):
+ """Response from chat endpoint (non-streaming)."""
+
+ session_id: str = Field(..., description="Session ID")
+ message: str = Field(..., description="Assistant's response message")
+ events: Optional[List[Dict[str, Any]]] = Field(default=None, description="Intermediate events (thinking, tool calls)")
+ timestamp: datetime = Field(default_factory=datetime.now, description="Response timestamp")
+
+
+class ChatStreamEvent(BaseModel):
+ """A single event in the chat stream (SSE)."""
+
+ event: EventType = Field(..., description="Event type")
+ data: Any = Field(..., description="Event data")
+ timestamp: datetime = Field(default_factory=datetime.now, description="Event timestamp")
+
+
+class SessionInfo(BaseModel):
+ """Session information."""
+
+ id: str = Field(..., description="Session ID")
+ created_at: datetime = Field(..., description="Session creation time")
+ last_active: datetime = Field(..., description="Last activity time")
+ message_count: int = Field(default=0, description="Number of messages in session")
+
+
+class SessionCreateRequest(BaseModel):
+ """Request to create a new session."""
+
+ user_id: Optional[str] = Field(default=None, description="User identifier")
+ metadata: Optional[Dict[str, Any]] = Field(default=None, description="Optional session metadata")
+
+
+class SessionCreateResponse(BaseModel):
+ """Response from session creation."""
+
+ session_id: str = Field(..., description="Created session ID")
+ created_at: datetime = Field(default_factory=datetime.now, description="Creation timestamp")
+
+
+class SessionListResponse(BaseModel):
+ """Response listing all sessions."""
+
+ sessions: List[SessionInfo] = Field(default_factory=list, description="List of sessions")
+ total: int = Field(..., description="Total number of sessions")
+
+
+class SessionDetailResponse(BaseModel):
+ """Detailed session information including messages."""
+
+ session: SessionInfo = Field(..., description="Session information")
+ messages: List[ChatMessage] = Field(default_factory=list, description="Session messages")
+
+
+class HealthResponse(BaseModel):
+ """Health check response."""
+
+ status: str = Field(default="healthy", description="Service status")
+ version: Optional[str] = Field(default=None, description="API version")
+ timestamp: datetime = Field(default_factory=datetime.now, description="Check timestamp")
+
+
+class ErrorResponse(BaseModel):
+ """Error response."""
+
+ error: str = Field(..., description="Error message")
+ code: Optional[str] = Field(default=None, description="Error code")
+ detail: Optional[str] = Field(default=None, description="Detailed error information")
diff --git a/bot/vikingbot/channels/qq.py b/bot/vikingbot/channels/qq.py
index 237b4fbc..49eda8fd 100644
--- a/bot/vikingbot/channels/qq.py
+++ b/bot/vikingbot/channels/qq.py
@@ -60,7 +60,7 @@ def __init__(self, config: QQChannelConfig, bus: MessageBus, **kwargs):
async def start(self) -> None:
"""Start the QQ bot."""
if not QQ_AVAILABLE:
- logger.exception("QQ SDK not installed. Run: pip install qq-botpy")
+ logger.exception("QQ SDK not installed. Install with: uv pip install 'vikingbot[qq]' (or uv pip install -e \".[qq]\" for local dev)")
return
if not self.config.app_id or not self.config.secret:
@@ -98,6 +98,10 @@ async def stop(self) -> None:
async def send(self, msg: OutboundMessage) -> None:
"""Send a message through QQ."""
+ # Only send normal response messages, skip thinking/tool_call/etc.
+ if not msg.is_normal_message:
+ return
+
if not self._client:
logger.warning("QQ client not initialized")
return
diff --git a/bot/vikingbot/channels/single_turn.py b/bot/vikingbot/channels/single_turn.py
new file mode 100644
index 00000000..d4c44f09
--- /dev/null
+++ b/bot/vikingbot/channels/single_turn.py
@@ -0,0 +1,95 @@
+# Copyright (c) 2026 Beijing Volcano Engine Technology Co., Ltd.
+# SPDX-License-Identifier: Apache-2.0
+"""Single-turn channel - no extra output, just the result."""
+
+import asyncio
+from pathlib import Path
+from typing import Any
+import json
+
+from loguru import logger
+
+from vikingbot.bus.events import InboundMessage, OutboundMessage, OutboundEventType
+from vikingbot.bus.queue import MessageBus
+from vikingbot.channels.base import BaseChannel
+from vikingbot.config.schema import SessionKey, BaseChannelConfig
+
+
+class SingleTurnChannelConfig(BaseChannelConfig):
+ """Configuration for SingleTurnChannel."""
+
+ enabled: bool = True
+ type: Any = "cli"
+
+ def channel_id(self) -> str:
+ return "chat"
+
+
+class SingleTurnChannel(BaseChannel):
+ """
+ Single-turn channel for one-off messages.
+
+ Only outputs the final result, no extra messages, no thinking/tool call display.
+ Only error-level logs are shown.
+ """
+
+ name: str = "single_turn"
+
+ def __init__(
+ self,
+ config: BaseChannelConfig,
+ bus: MessageBus,
+ workspace_path: Path | None = None,
+ message: str = "",
+ session_id: str = "cli__chat__default",
+ markdown: bool = True,
+ eval: bool = False,
+ ):
+ super().__init__(config, bus, workspace_path)
+ self.message = message
+ self.session_id = session_id
+ self.markdown = markdown
+ self._response_received = asyncio.Event()
+ self._last_response: str | None = None
+ self._eval = eval
+
+ async def start(self) -> None:
+ """Start the single-turn channel - send message and wait for response."""
+ self._running = True
+
+ # Send the message
+ msg = InboundMessage(
+ session_key=SessionKey.from_safe_name(self.session_id),
+ sender_id="default",
+ content=self.message,
+ )
+ await self.bus.publish_inbound(msg)
+
+ # Wait for response with timeout
+ try:
+ await asyncio.wait_for(self._response_received.wait(), timeout=300.0)
+ if self._last_response:
+ from vikingbot.cli.commands import console
+ from rich.markdown import Markdown
+ from rich.text import Text
+ content = self._last_response or ""
+ body = Markdown(content) if self.markdown else Text(content)
+ console.print(body)
+ except asyncio.TimeoutError:
+ logger.error("Timeout waiting for response")
+
+ async def stop(self) -> None:
+ """Stop the single-turn channel."""
+ self._running = False
+
+ async def send(self, msg: OutboundMessage) -> None:
+ """Send a message - store final response for later retrieval."""
+ if msg.is_normal_message:
+ if self._eval:
+ output = {
+ "text": msg.content,
+ "token_usage": msg.token_usage,
+ }
+ msg.content = json.dumps(output, ensure_ascii=False)
+ self._last_response = msg.content
+ self._response_received.set()
diff --git a/bot/vikingbot/channels/slack.py b/bot/vikingbot/channels/slack.py
index 42fc07b5..e8744afb 100644
--- a/bot/vikingbot/channels/slack.py
+++ b/bot/vikingbot/channels/slack.py
@@ -74,6 +74,10 @@ async def stop(self) -> None:
async def send(self, msg: OutboundMessage) -> None:
"""Send a message through Slack."""
+ # Only send normal response messages, skip thinking/tool_call/etc.
+ if not msg.is_normal_message:
+ return
+
if not self._web_client:
logger.warning("Slack client not running")
return
diff --git a/bot/vikingbot/channels/stdio.py b/bot/vikingbot/channels/stdio.py
new file mode 100644
index 00000000..58a82443
--- /dev/null
+++ b/bot/vikingbot/channels/stdio.py
@@ -0,0 +1,179 @@
+# Copyright (c) 2026 Beijing Volcano Engine Technology Co., Ltd.
+# SPDX-License-Identifier: Apache-2.0
+"""Stdio channel for vikingbot - communicates via stdin/stdout."""
+
+import asyncio
+import json
+import sys
+from pathlib import Path
+from typing import Any
+
+from loguru import logger
+
+from vikingbot.bus.events import InboundMessage, OutboundMessage
+from vikingbot.bus.queue import MessageBus
+from vikingbot.channels.base import BaseChannel
+from vikingbot.config.schema import SessionKey, BaseChannelConfig, ChannelType
+
+
+class StdioChannelConfig(BaseChannelConfig):
+ """Configuration for StdioChannel."""
+
+ enabled: bool = True
+ type: Any = "stdio"
+
+ def channel_id(self) -> str:
+ return "stdio"
+
+
+class StdioChannel(BaseChannel):
+ """
+ Stdio channel for vikingbot.
+
+ This channel communicates via stdin/stdout using JSON messages:
+ - Reads JSON messages from stdin
+ - Publishes them to the MessageBus
+ - Subscribes to outbound messages and writes them to stdout
+ """
+
+ name: str = "stdio"
+
+ def __init__(
+ self, config: BaseChannelConfig, bus: MessageBus, workspace_path: Path | None = None
+ ):
+ super().__init__(config, bus, workspace_path)
+ self._response_queue: asyncio.Queue[str] = asyncio.Queue()
+
+ async def start(self) -> None:
+ """Start the stdio channel."""
+ self._running = True
+ logger.info("Starting stdio channel")
+
+ # Start reader and writer tasks
+ reader_task = asyncio.create_task(self._read_stdin())
+ writer_task = asyncio.create_task(self._write_stdout())
+
+ # Send ready signal
+ await self._send_json({"type": "ready"})
+
+ try:
+ await asyncio.gather(reader_task, writer_task)
+ except asyncio.CancelledError:
+ self._running = False
+ reader_task.cancel()
+ writer_task.cancel()
+ await asyncio.gather(reader_task, writer_task, return_exceptions=True)
+
+ async def stop(self) -> None:
+ """Stop the stdio channel."""
+ self._running = False
+ logger.info("Stopping stdio channel")
+
+ async def send(self, msg: OutboundMessage) -> None:
+ """Send a message via stdout."""
+ if msg.is_normal_message:
+ await self._send_json({
+ "type": "response",
+ "content": msg.content,
+ })
+ else:
+ # For thinking events, just send the content as-is
+ await self._send_json({
+ "type": "event",
+ "event_type": msg.event_type.value if hasattr(msg.event_type, "value") else str(msg.event_type),
+ "content": msg.content,
+ })
+
+ async def _send_json(self, data: dict[str, Any]) -> None:
+ """Send JSON data to stdout."""
+ try:
+ line = json.dumps(data, ensure_ascii=False)
+ print(line, flush=True)
+ except Exception as e:
+ logger.exception(f"Failed to send JSON: {e}")
+
+ async def _read_stdin(self) -> None:
+ """Read lines from stdin and publish to bus."""
+ loop = asyncio.get_event_loop()
+
+ while self._running:
+ try:
+ # Read a line from stdin
+ line = await loop.run_in_executor(None, sys.stdin.readline)
+
+ if not line:
+ # EOF
+ self._running = False
+ break
+
+ line = line.strip()
+ if not line:
+ continue
+
+ # Parse the input
+ try:
+ request = json.loads(line)
+ except json.JSONDecodeError:
+ # Treat as simple text message
+ request = {"type": "message", "content": line}
+
+ await self._handle_request(request)
+
+ except Exception as e:
+ logger.exception(f"Error reading from stdin: {e}")
+ await self._send_json({
+ "type": "error",
+ "message": str(e),
+ })
+
+ async def _write_stdout(self) -> None:
+ """Write responses from the queue to stdout."""
+ while self._running:
+ try:
+ # Wait for a response with timeout
+ content = await asyncio.wait_for(
+ self._response_queue.get(),
+ timeout=0.5,
+ )
+ await self._send_json({
+ "type": "response",
+ "content": content,
+ })
+ except asyncio.TimeoutError:
+ continue
+ except Exception as e:
+ logger.exception(f"Error writing to stdout: {e}")
+
+ async def _handle_request(self, request: dict[str, Any]) -> None:
+ """Handle an incoming request."""
+ request_type = request.get("type", "message")
+
+ if request_type == "ping":
+ await self._send_json({"type": "pong"})
+
+ elif request_type == "message":
+ content = request.get("content", "")
+ chat_id = request.get("chat_id", "default")
+ sender_id = request.get("sender_id", "user")
+
+ # Create and publish inbound message
+ msg = InboundMessage(
+ session_key=SessionKey(
+ type="stdio",
+ channel_id=self.channel_id,
+ chat_id=chat_id,
+ ),
+ sender_id=sender_id,
+ content=content,
+ )
+ await self.bus.publish_inbound(msg)
+
+ elif request_type == "quit":
+ await self._send_json({"type": "bye"})
+ self._running = False
+
+ else:
+ await self._send_json({
+ "type": "error",
+ "message": f"Unknown request type: {request_type}",
+ })
diff --git a/bot/vikingbot/channels/telegram.py b/bot/vikingbot/channels/telegram.py
index ef831862..2e76a32d 100644
--- a/bot/vikingbot/channels/telegram.py
+++ b/bot/vikingbot/channels/telegram.py
@@ -193,6 +193,10 @@ async def stop(self) -> None:
async def send(self, msg: OutboundMessage) -> None:
"""Send a message through Telegram."""
+ # Only send normal response messages, skip thinking/tool_call/etc.
+ if not msg.is_normal_message:
+ return
+
if not self._app:
logger.warning("Telegram bot not running")
return
diff --git a/bot/vikingbot/channels/whatsapp.py b/bot/vikingbot/channels/whatsapp.py
index f5fc981d..23794a88 100644
--- a/bot/vikingbot/channels/whatsapp.py
+++ b/bot/vikingbot/channels/whatsapp.py
@@ -79,6 +79,10 @@ async def stop(self) -> None:
async def send(self, msg: OutboundMessage) -> None:
"""Send a message through WhatsApp."""
+ # Only send normal response messages, skip thinking/tool_call/etc.
+ if not msg.is_normal_message:
+ return
+
if not self._ws or not self._connected:
logger.warning("WhatsApp bridge not connected")
return
diff --git a/bot/vikingbot/cli/commands.py b/bot/vikingbot/cli/commands.py
index ad1bc555..ebf95135 100644
--- a/bot/vikingbot/cli/commands.py
+++ b/bot/vikingbot/cli/commands.py
@@ -3,40 +3,38 @@
import asyncio
import json
import os
-import signal
-from multiprocessing.spawn import prepare
-from pathlib import Path
import select
+import signal
import sys
-from xml.etree.ElementPath import prepare_self
-from loguru import logger
+from pathlib import Path
+
import typer
-from jinja2.filters import prepare_map
+from loguru import logger
+from prompt_toolkit import PromptSession
+from prompt_toolkit.formatted_text import HTML
+from prompt_toolkit.history import FileHistory
+from prompt_toolkit.patch_stdout import patch_stdout
from rich.console import Console
from rich.markdown import Markdown
from rich.table import Table
from rich.text import Text
-from prompt_toolkit import PromptSession
-from prompt_toolkit.formatted_text import HTML
-from prompt_toolkit.history import FileHistory
-from prompt_toolkit.patch_stdout import patch_stdout
-from vikingbot.config.loader import load_config, ensure_config, get_data_dir, get_config_path
-from vikingbot.bus.queue import MessageBus
+from vikingbot import __logo__, __version__
from vikingbot.agent.loop import AgentLoop
-
-from vikingbot.session.manager import SessionManager
+from vikingbot.bus.queue import MessageBus
+from vikingbot.channels.manager import ChannelManager
+from vikingbot.config.loader import ensure_config, get_config_path, get_data_dir, load_config
+from vikingbot.config.schema import SessionKey
from vikingbot.cron.service import CronService
from vikingbot.cron.types import CronJob
from vikingbot.heartbeat.service import HeartbeatService
-from vikingbot import __version__, __logo__
-from vikingbot.config.schema import SessionKey
+from vikingbot.integrations.langfuse import LangfuseClient
+from vikingbot.config.loader import load_config
# Create sandbox manager
from vikingbot.sandbox.manager import SandboxManager
-from vikingbot.utils.helpers import get_source_workspace_path
-from vikingbot.channels.manager import ChannelManager
-
+from vikingbot.session.manager import SessionManager
+from vikingbot.utils.helpers import get_source_workspace_path, set_bot_data_path, get_history_path, get_bridge_path
app = typer.Typer(
name="vikingbot",
@@ -47,6 +45,12 @@
console = Console()
EXIT_COMMANDS = {"exit", "quit", "/exit", "/quit", ":q"}
+
+def _init_bot_data(config):
+ """Initialize bot data directory and set global paths."""
+ set_bot_data_path(config.bot_data_path)
+
+
# ---------------------------------------------------------------------------
# CLI input: prompt_toolkit for editing, paste, history, and display
# ---------------------------------------------------------------------------
@@ -107,7 +111,7 @@ def _init_prompt_session() -> None:
except Exception:
pass
- history_file = Path.home() / ".vikingbot" / "history" / "cli_history"
+ history_file = get_history_path() / "cli_history"
history_file.parent.mkdir(parents=True, exist_ok=True)
_PROMPT_SESSION = PromptSession(
@@ -145,7 +149,7 @@ async def _read_interactive_input_async() -> str:
try:
with patch_stdout():
return await _PROMPT_SESSION.prompt_async(
- HTML("You: "),
+ HTML("You: "),
)
except EOFError as exc:
raise KeyboardInterrupt from exc
@@ -165,15 +169,18 @@ def main(
pass
-def _make_provider(config):
+def _make_provider(config, langfuse_client: None = None):
"""Create LiteLLMProvider from config. Allows starting without API key."""
from vikingbot.providers.litellm_provider import LiteLLMProvider
- p = config.get_provider()
- model = config.agents.defaults.model
+
+ config = load_config()
+ p = config.agents
+
+ model = p.model
api_key = p.api_key if p else None
- api_base = config.get_api_base()
- provider_name = config.get_provider_name()
+ api_base = p.api_base if p else None
+ provider_name = p.provider if p else None
if not (api_key) and not model.startswith("bedrock/"):
console.print("[yellow]Warning: No API key configured.[/yellow]")
@@ -185,6 +192,7 @@ def _make_provider(config):
default_model=model,
extra_headers=p.extra_headers if p else None,
provider_name=provider_name,
+ # langfuse_client=langfuse_client,
)
@@ -195,14 +203,17 @@ def _make_provider(config):
@app.command()
def gateway(
- port: int = typer.Option(18790, "--port", "-p", help="Gateway port"),
- console_port: int = typer.Option(18791, "--console-port", help="Console web UI port"),
+ port: int = typer.Option(18791, "--port", "-p", help="Gateway port"),
+ # console_port: int = typer.Option(18791, "--console-port", help="Console web UI port"),
enable_console: bool = typer.Option(
True, "--console/--no-console", help="Enable console web UI"
),
+ agent: bool = typer.Option(
+ True, "--agent/--no-agent", help="Enable agent loop for OpenAPI/chat"
+ ),
verbose: bool = typer.Option(False, "--verbose", "-v", help="Verbose output"),
):
- """Start the vikingbot gateway."""
+ """Start the vikingbot gateway with OpenAPI chat enabled by default."""
if verbose:
import logging
@@ -211,51 +222,103 @@ def gateway(
bus = MessageBus()
config = ensure_config()
- session_manager = SessionManager(config.workspace_path)
+ _init_bot_data(config)
+ session_manager = SessionManager(config.bot_data_path)
+
+ # Create FastAPI app for OpenAPI
+ from fastapi import FastAPI
+ fastapi_app = FastAPI(
+ title="Vikingbot OpenAPI",
+ description="HTTP API for Vikingbot chat",
+ version="1.0.0",
+ )
cron = prepare_cron(bus)
- channels = prepare_channel(config, bus)
+ channels = prepare_channel(config, bus, fastapi_app=fastapi_app, enable_openapi=True, openapi_port=port)
agent_loop = prepare_agent_loop(config, bus, session_manager, cron)
heartbeat = prepare_heartbeat(config, agent_loop, session_manager)
async def run():
+ import uvicorn
+
+ # Setup OpenAPI routes before starting
+ openapi_channel = None
+ for name, channel in channels.channels.items():
+ if hasattr(channel, 'name') and channel.name == "openapi":
+ openapi_channel = channel
+ break
+
+ if openapi_channel is not None and hasattr(openapi_channel, '_setup_routes'):
+ openapi_channel._setup_routes()
+ logger.info("OpenAPI routes registered")
+
+ # Start uvicorn server for OpenAPI
+ config_uvicorn = uvicorn.Config(
+ fastapi_app,
+ host="0.0.0.0",
+ port=port,
+ log_level="info",
+ )
+ server = uvicorn.Server(config_uvicorn)
+
tasks = []
tasks.append(cron.start())
tasks.append(heartbeat.start())
tasks.append(channels.start_all())
tasks.append(agent_loop.run())
- if enable_console:
- tasks.append(start_console(console_port))
+ tasks.append(server.serve()) # Start HTTP server
+ # if enable_console:
+ # tasks.append(start_console(console_port))
await asyncio.gather(*tasks)
asyncio.run(run())
-def prepare_agent_loop(config, bus, session_manager, cron):
- sandbox_parent_path = config.workspace_path
+def prepare_agent_loop(config, bus, session_manager, cron, quiet: bool = False, eval: bool = False):
+ sandbox_parent_path = config.bot_data_path
source_workspace_path = get_source_workspace_path()
sandbox_manager = SandboxManager(config, sandbox_parent_path, source_workspace_path)
- console.print(
- f"[green]✓[/green] Sandbox: enabled (backend={config.sandbox.backend}, mode={config.sandbox.mode})"
- )
- provider = _make_provider(config)
+ if config.sandbox.backend == "direct":
+ logger.warning("Sandbox: disabled (using DIRECT mode - commands run directly on host)")
+ else:
+ logger.info(f"Sandbox: enabled (backend={config.sandbox.backend}, mode={config.sandbox.mode})")
+
+ # Initialize Langfuse if enabled
+ langfuse_client = None
+ # logger.info(f"[LANGFUSE] Config check: has langfuse attr={hasattr(config, 'langfuse')}")
+
+ if hasattr(config, "langfuse") and config.langfuse.enabled:
+ langfuse_client = LangfuseClient(
+ enabled=config.langfuse.enabled,
+ secret_key=config.langfuse.secret_key,
+ public_key=config.langfuse.public_key,
+ base_url=config.langfuse.base_url,
+ )
+ LangfuseClient.set_instance(langfuse_client)
+ if langfuse_client.enabled:
+ logger.info(f"Langfuse: enabled (base_url={config.langfuse.base_url})")
+ else:
+ logger.warning("Langfuse: configured but failed to initialize")
+
+ provider = _make_provider(config, langfuse_client)
# Create agent with cron service
agent = AgentLoop(
bus=bus,
provider=provider,
workspace=config.workspace_path,
- model=config.agents.defaults.model,
- max_iterations=config.agents.defaults.max_tool_iterations,
- memory_window=config.agents.defaults.memory_window,
+ model=config.agents.model,
+ max_iterations=config.agents.max_tool_iterations,
+ memory_window=config.agents.memory_window,
brave_api_key=config.tools.web.search.api_key or None,
exa_api_key=None,
- gen_image_model=config.agents.defaults.gen_image_model,
+ gen_image_model=config.agents.gen_image_model,
exec_config=config.tools.exec,
cron_service=cron,
session_manager=session_manager,
sandbox_manager=sandbox_manager,
config=config,
+ eval=eval
)
# Set the agent reference in cron if it uses the holder pattern
if hasattr(cron, '_agent_holder'):
@@ -263,7 +326,7 @@ def prepare_agent_loop(config, bus, session_manager, cron):
return agent
-def prepare_cron(bus) -> CronService:
+def prepare_cron(bus, quiet: bool = False) -> CronService:
# Create cron service first (callback set after agent creation)
cron_store_path = get_data_dir() / "cron" / "jobs.json"
cron = CronService(cron_store_path)
@@ -314,15 +377,42 @@ async def on_cron_job(job: CronJob) -> str | None:
cron._agent_holder = agent_holder
cron_status = cron.status()
- if cron_status["jobs"] > 0:
- console.print(f"[green]✓[/green] Cron: {cron_status['jobs']} scheduled jobs")
+ if cron_status["jobs"] > 0 and not quiet:
+ logger.info(f"Cron: {cron_status['jobs']} scheduled jobs")
return cron
-def prepare_channel(config, bus):
+def prepare_channel(config, bus, fastapi_app=None, enable_openapi: bool = False, openapi_port: int = 18790):
+ """Prepare channels for the bot.
+
+ Args:
+ config: Bot configuration
+ bus: Message bus for communication
+ fastapi_app: External FastAPI app to register OpenAPI routes on
+ enable_openapi: Whether to enable OpenAPI channel for gateway mode
+ openapi_port: Port for OpenAPI channel (default: 18790)
+ """
+ channels = ChannelManager(bus)
+ channels.load_channels_from_config(config)
+
+ # Enable OpenAPI channel for gateway mode if requested
+ if enable_openapi and fastapi_app is not None:
+ from vikingbot.channels.openapi import OpenAPIChannel, OpenAPIChannelConfig
+
+ openapi_config = OpenAPIChannelConfig(
+ enabled=True,
+ port=openapi_port,
+ api_key="", # No auth required by default
+ )
+ openapi_channel = OpenAPIChannel(
+ openapi_config,
+ bus,
+ app=fastapi_app, # Pass the external FastAPI app
+ )
+ channels.add_channel(openapi_channel)
+ logger.info(f"OpenAPI channel enabled on port {openapi_port}")
- channels = ChannelManager(config, bus)
if channels.enabled_channels:
console.print(f"[green]✓[/green] Channels enabled: {', '.join(channels.enabled_channels)}")
else:
@@ -357,20 +447,22 @@ async def on_heartbeat(prompt: str, session_key: SessionKey | None = None) -> st
async def start_console(console_port):
+ """Start the console web UI in a separate thread within the same process."""
try:
- import subprocess
- import sys
- import os
-
- def start_gradio():
- script_path = os.path.join(
- os.path.dirname(__file__), "..", "console", "console_gradio_simple.py"
- )
- subprocess.Popen([sys.executable, script_path, str(console_port)])
-
- start_gradio()
+ import threading
+ from vikingbot.console.console_gradio_simple import run_console_server
+
+ def run_in_thread():
+ try:
+ run_console_server(console_port)
+ except Exception as e:
+ console.print(f"[yellow]Console server error: {e}[/yellow]")
+
+ thread = threading.Thread(target=run_in_thread, daemon=True)
+ thread.start()
+ console.print(f"[green]✓[/green] Console: http://localhost:{console_port}")
except Exception as e:
- console.print(f"[yellow]Warning: Gradio not available ({e})[/yellow]")
+ console.print(f"[yellow]Warning: Console not available ({e})[/yellow]")
# ============================================================================
@@ -378,92 +470,134 @@ def start_gradio():
# ============================================================================
+# Helper for thinking spinner context
+def _thinking_ctx(logs: bool):
+ """Return a context manager for showing thinking spinner."""
+ if logs:
+ from contextlib import nullcontext
+ return nullcontext()
+ return console.status("[dim]vikingbot is thinking...[/dim]", spinner="dots")
+
+
+def prepare_agent_channel(config, bus, mode: str, message: str | None, session_id: str, markdown: bool, logs: bool, eval: bool = False):
+ """Prepare channel for agent command."""
+ from vikingbot.channels.chat import ChatChannel, ChatChannelConfig
+ from vikingbot.channels.stdio import StdioChannel, StdioChannelConfig
+ from vikingbot.channels.single_turn import SingleTurnChannel, SingleTurnChannelConfig
+
+ channels = ChannelManager(bus)
+
+ if mode == "stdio":
+ channel_config = StdioChannelConfig()
+ channel = StdioChannel(
+ channel_config,
+ bus,
+ workspace_path=config.workspace_path,
+ )
+ channels.add_channel(channel)
+ elif message is not None:
+ # Single message mode - use SingleTurnChannel for clean output
+ channel_config = SingleTurnChannelConfig()
+ channel = SingleTurnChannel(
+ channel_config,
+ bus,
+ workspace_path=config.workspace_path,
+ message=message,
+ session_id=session_id,
+ markdown=markdown,
+ eval=eval,
+ )
+ channels.add_channel(channel)
+ else:
+ # Interactive mode - use ChatChannel with thinking display
+ channel_config = ChatChannelConfig()
+ channel = ChatChannel(
+ channel_config,
+ bus,
+ workspace_path=config.workspace_path,
+ session_id=session_id,
+ markdown=markdown,
+ logs=logs,
+ )
+ channels.add_channel(channel)
+
+ return channels
+
+
@app.command()
-def agent(
+def chat(
message: str = typer.Option(None, "--message", "-m", help="Message to send to the agent"),
- session_id: str = typer.Option("cli__default__direct", "--session", "-s", help="Session ID"),
+ session_id: str = typer.Option(None, "--session", "-s", help="Session ID"),
markdown: bool = typer.Option(
True, "--markdown/--no-markdown", help="Render assistant output as Markdown"
),
logs: bool = typer.Option(
False, "--logs/--no-logs", help="Show vikingbot runtime logs during chat"
),
+ mode: str = typer.Option(
+ "direct", "--mode", help="Mode: direct (interactive), stdio (JSON IPC)"
+ ),
+ eval: bool = typer.Option(
+ False, "--eval", "-e", help="Run evaluation mode, output JSON results"
+ ),
):
"""Interact with the agent directly."""
- if logs:
+ if message is not None:
+ # Single-turn mode: only show error logs
+ logger.remove()
+ logger.add(sys.stderr, level="ERROR")
+ elif logs:
logger.enable("vikingbot")
else:
logger.disable("vikingbot")
- session_key = SessionKey.from_safe_name(session_id)
-
bus = MessageBus()
config = ensure_config()
- session_manager = SessionManager(config.workspace_path)
+ _init_bot_data(config)
+ session_manager = SessionManager(config.bot_data_path)
- cron = prepare_cron(bus)
- agent_loop = prepare_agent_loop(config, bus, session_manager, cron)
+ is_single_turn = message is not None
+ # Use unified default session ID
+ if session_id is None:
+ session_id = "cli__chat__default"
+ cron = prepare_cron(bus, quiet=is_single_turn)
+ channels = prepare_agent_channel(config, bus, mode, message, session_id, markdown, logs, eval)
+ agent_loop = prepare_agent_loop(config, bus, session_manager, cron, quiet=is_single_turn, eval=eval)
- # Show spinner when logs are off (no output to miss); skip when logs are on
- def _thinking_ctx():
- if logs:
- from contextlib import nullcontext
+ async def run():
+ if is_single_turn:
+ # Single-turn mode: run channels and agent, exit after response
+ task_cron = asyncio.create_task(cron.start())
+ task_channels = asyncio.create_task(channels.start_all())
+ task_agent = asyncio.create_task(agent_loop.run())
+
+ # Wait for channels to complete (it will complete after getting response)
+ done, pending = await asyncio.wait(
+ [task_channels],
+ return_when=asyncio.FIRST_COMPLETED
+ )
- return nullcontext()
- # Animated spinner is safe to use with prompt_toolkit input handling
- return console.status("[dim]vikingbot is thinking...[/dim]", spinner="dots")
+ # Cancel all other tasks
+ for task in pending:
+ task.cancel()
+ task_cron.cancel()
+ task_agent.cancel()
- if message:
- # Single message mode
- async def run_once():
- with _thinking_ctx():
- response = await agent_loop.process_direct(message, session_key=session_key)
- _print_agent_response(response, render_markdown=markdown)
+ # Wait for cancellation
+ await asyncio.gather(task_cron, task_agent, return_exceptions=True)
+ else:
+ # Interactive mode: run forever
+ tasks = []
+ tasks.append(cron.start())
+ tasks.append(channels.start_all())
+ tasks.append(agent_loop.run())
- asyncio.run(run_once())
- else:
- # Interactive mode
- _init_prompt_session()
- console.print(
- f"{__logo__} Interactive mode (type [bold]exit[/bold] or [bold]Ctrl+C[/bold] to quit)\n"
- )
+ await asyncio.gather(*tasks)
- def _exit_on_sigint(signum, frame):
- _restore_terminal()
- console.print("\nGoodbye!")
- os._exit(0)
-
- signal.signal(signal.SIGINT, _exit_on_sigint)
-
- async def run_interactive():
- while True:
- try:
- _flush_pending_tty_input()
- user_input = await _read_interactive_input_async()
- command = user_input.strip()
- if not command:
- continue
-
- if _is_exit_command(command):
- _restore_terminal()
- console.print("\nGoodbye!")
- break
-
- with _thinking_ctx():
- response = await agent_loop.process_direct(
- user_input, session_key=session_key
- )
- _print_agent_response(response, render_markdown=markdown)
- except KeyboardInterrupt:
- _restore_terminal()
- console.print("\nGoodbye!")
- break
- except EOFError:
- _restore_terminal()
- console.print("\nGoodbye!")
- break
-
- asyncio.run(run_interactive())
+ try:
+ asyncio.run(run())
+ except KeyboardInterrupt:
+ console.print("\nGoodbye!")
# ============================================================================
@@ -478,7 +612,6 @@ async def run_interactive():
@channels_app.command("status")
def channels_status():
"""Show channel status."""
- from vikingbot.config.loader import load_config
from vikingbot.config.schema import ChannelType
config = load_config()
@@ -525,7 +658,7 @@ def _get_bridge_dir() -> Path:
import subprocess
# User's bridge location
- user_bridge = Path.home() / ".vikingbot" / "bridge"
+ user_bridge = get_bridge_path()
# Check if already built
if (user_bridge / "dist" / "index.js").exists():
@@ -548,7 +681,7 @@ def _get_bridge_dir() -> Path:
if not source:
console.print("[red]Bridge source not found.[/red]")
- console.print("Try reinstalling: pip install --force-reinstall vikingbot")
+ console.print("Try reinstalling: uv pip install --force-reinstall vikingbot")
raise typer.Exit(1)
console.print(f"{__logo__} Setting up bridge...")
@@ -581,7 +714,7 @@ def _get_bridge_dir() -> Path:
def channels_login():
"""Link device via QR code."""
import subprocess
- from vikingbot.config.loader import load_config
+
from vikingbot.config.schema import ChannelType
config = load_config()
@@ -763,7 +896,7 @@ async def run():
return await service.run_job(job_id, force=force)
if asyncio.run(run()):
- console.print(f"[green]✓[/green] Job executed")
+ console.print("[green]✓[/green] Job executed")
else:
console.print(f"[red]Failed to run job {job_id}[/red]")
@@ -776,7 +909,6 @@ async def run():
@app.command()
def status():
"""Show vikingbot status."""
- from vikingbot.config.loader import load_config, get_config_path
config_path = get_config_path()
config = load_config()
@@ -794,7 +926,7 @@ def status():
if config_path.exists():
from vikingbot.providers.registry import PROVIDERS
- console.print(f"Model: {config.agents.defaults.model}")
+ console.print(f"Model: {config.agents.model}")
# Check API keys from registry
for spec in PROVIDERS:
@@ -814,34 +946,16 @@ def status():
)
-@app.command()
-def tui(
- console_port: int = typer.Option(18791, "--console-port", help="Console web UI port"),
- enable_console: bool = typer.Option(
- True, "--console/--no-console", help="Enable console web UI"
- ),
-):
- """Launch vikingbot TUI interface interface."""
- """Interact with the agent directly."""
- logger.enable("vikingbot")
- if enable_console:
- console.print(f"[green]✓[/green] Console: http://localhost:{console_port} ")
-
- bus = MessageBus()
- config = ensure_config()
- session_manager = SessionManager(config.workspace_path)
-
- cron = prepare_cron(bus)
- agent_loop = prepare_agent_loop(config, bus, session_manager, cron)
-
- async def run():
- tasks = []
- from vikingbot.tui.app import run_tui
-
- tasks.append(run_tui(agent_loop, bus, config))
- await asyncio.gather(*tasks)
+# ============================================================================
+# Test Commands
+# ============================================================================
- asyncio.run(run())
+try:
+ from vikingbot.cli.test_commands import test_app
+ app.add_typer(test_app, name="test")
+except ImportError:
+ # If test commands not available, don't add them
+ pass
if __name__ == "__main__":
diff --git a/bot/vikingbot/config/loader.py b/bot/vikingbot/config/loader.py
index 25c464f5..4a17e76a 100644
--- a/bot/vikingbot/config/loader.py
+++ b/bot/vikingbot/config/loader.py
@@ -1,6 +1,7 @@
"""Configuration loading utilities."""
import json
+import os
from pathlib import Path
from typing import Any
from loguru import logger
@@ -8,8 +9,26 @@
def get_config_path() -> Path:
- """Get the default configuration file path."""
- return Path.home() / ".vikingbot" / "config.json"
+ """Get the path to ov.conf config file.
+
+ Resolution order:
+ 1. OPENVIKING_CONFIG_FILE environment variable
+ 2. ~/.openviking/ov.conf
+ """
+ return _resolve_ov_conf_path()
+
+
+def _resolve_ov_conf_path() -> Path:
+ """Resolve the ov.conf file path."""
+ # Check environment variable first
+ env_path = os.environ.get("OPENVIKING_CONFIG_FILE")
+ if env_path:
+ path = Path(env_path).expanduser()
+ if path.exists():
+ return path
+
+ # Default path
+ return Path.home() / ".openviking" / "ov.conf"
def get_data_dir() -> Path:
@@ -20,23 +39,30 @@ def get_data_dir() -> Path:
def ensure_config():
+ """Ensure ov.conf exists, create with default bot config if not."""
config_path = get_config_path()
+
if not config_path.exists():
logger.info("Config not found, creating default config...")
- config = Config()
- save_config(config)
+ # Create directory if needed
+ config_path.parent.mkdir(parents=True, exist_ok=True)
+
+ # Create default config with empty bot section
+ default_config = Config()
+ save_config(default_config, config_path)
logger.info(f"[green]✓[/green] Created default config at {config_path}")
+
config = load_config(config_path)
return config
def load_config(config_path: Path | None = None) -> Config:
"""
- Load configuration from file or create default.
+ Load configuration from ov.conf's bot field, and merge vlm config for model.
Args:
- config_path: Optional path to config file. Uses default if not provided.
+ config_path: Optional path to ov.conf file. Uses default if not provided.
Returns:
Loaded configuration object.
@@ -46,9 +72,32 @@ def load_config(config_path: Path | None = None) -> Config:
if path.exists():
try:
with open(path) as f:
- data = json.load(f)
- data = _migrate_config(data)
- return Config.model_validate(convert_keys(data))
+ full_data = json.load(f)
+
+ # Extract bot section
+ bot_data = full_data.get("bot", {})
+ bot_data = convert_keys(bot_data)
+
+ # Extract storage.workspace from root level, default to ~/.openviking_data
+ storage_data = full_data.get("storage", {})
+ if isinstance(storage_data, dict) and "workspace" in storage_data:
+ bot_data["storage_workspace"] = storage_data["workspace"]
+ else:
+ bot_data["storage_workspace"] = "~/.openviking/data"
+
+ # Extract and merge vlm config for model settings only
+ # Provider config is directly read from OpenVikingConfig at runtime
+ vlm_data = full_data.get("vlm", {})
+ vlm_data = convert_keys(vlm_data)
+ if vlm_data:
+ _merge_vlm_model_config(bot_data, vlm_data)
+
+ bot_server_data = bot_data.get("ov_server", {})
+ ov_server_data = full_data.get("server", {})
+ _merge_ov_server_config(bot_server_data, ov_server_data)
+ bot_data["ov_server"] = bot_server_data
+
+ return Config.model_validate(bot_data)
except (json.JSONDecodeError, ValueError) as e:
print(f"Warning: Failed to load config from {path}: {e}")
print("Using default configuration.")
@@ -56,45 +105,71 @@ def load_config(config_path: Path | None = None) -> Config:
return Config()
+def _merge_vlm_model_config(bot_data: dict, vlm_data: dict) -> None:
+ """
+ Merge vlm model config into bot config.
+
+ Only sets model - provider config is read directly from OpenVikingConfig.
+ """
+ # Set default model from vlm.model
+ if vlm_data.get("model"):
+ if "agents" not in bot_data:
+ bot_data["agents"] = {}
+ # Prepend provider prefix if provider is specified
+ model = vlm_data["model"]
+ provider = vlm_data.get("provider")
+ if provider and "/" not in model:
+ model = f"{provider}/{model}"
+ bot_data["agents"]["model"] = model
+ bot_data["agents"]["provider"] = provider if provider else ""
+ bot_data["agents"]["api_base"] = vlm_data.get("api_base", "")
+ bot_data["agents"]["api_key"] = vlm_data.get("api_key", "")
+
+def _merge_ov_server_config(bot_data: dict, ov_data: dict) -> None:
+ """
+ Merge ov_server config into bot config.
+ """
+ if "server_url" not in bot_data or not bot_data["server_url"]:
+ host = ov_data.get("host", "127.0.0.1")
+ port = ov_data.get("port", "1933")
+ bot_data["server_url"] = f"http://{host}:{port}"
+ if "root_api_key" not in bot_data or not bot_data["root_api_key"]:
+ bot_data["root_api_key"] = ov_data.get("root_api_key", "")
+ if "root_api_key" in ov_data and ov_data["root_api_key"]:
+ bot_data["mode"] = "remote"
+ else:
+ bot_data["mode"] = "local"
+
def save_config(config: Config, config_path: Path | None = None) -> None:
"""
- Save configuration to file.
+ Save configuration to ov.conf's bot field, preserving other sections.
Args:
config: Configuration to save.
- config_path: Optional path to save to. Uses default if not provided.
+ config_path: Optional path to ov.conf file. Uses default if not provided.
"""
path = config_path or get_config_path()
path.parent.mkdir(parents=True, exist_ok=True)
- data = config.model_dump()
- data = convert_to_camel(data)
-
+ # Read existing config if it exists
+ full_data = {}
+ if path.exists():
+ try:
+ with open(path) as f:
+ full_data = json.load(f)
+ except (json.JSONDecodeError, IOError):
+ pass
+
+ # Update bot section - only save fields that were explicitly set
+ bot_data = config.model_dump(exclude_unset=True)
+ if bot_data:
+ full_data["bot"] = convert_to_camel(bot_data)
+ else:
+ full_data.pop("bot", None)
+
+ # Write back full config
with open(path, "w") as f:
- json.dump(data, f, indent=2)
-
-
-def _migrate_config(data: dict) -> dict:
- """Migrate old config formats to current."""
- # Move sandbox.network/filesystem/runtime to sandbox.backends.srt if they exist
- if "sandbox" in data:
- sandbox = data["sandbox"]
- # Initialize backends if not present
- if "backends" not in sandbox:
- sandbox["backends"] = {}
- if "srt" not in sandbox["backends"]:
- sandbox["backends"]["srt"] = {}
- srt_backend = sandbox["backends"]["srt"]
- # Move network
- if "network" in sandbox:
- srt_backend["network"] = sandbox.pop("network")
- # Move filesystem
- if "filesystem" in sandbox:
- srt_backend["filesystem"] = sandbox.pop("filesystem")
- # Move runtime
- if "runtime" in sandbox:
- srt_backend["runtime"] = sandbox.pop("runtime")
- return data
+ json.dump(full_data, f, indent=2)
def convert_keys(data: Any) -> Any:
diff --git a/bot/vikingbot/config/schema.py b/bot/vikingbot/config/schema.py
index 6a517c70..4bd1e10a 100644
--- a/bot/vikingbot/config/schema.py
+++ b/bot/vikingbot/config/schema.py
@@ -2,8 +2,9 @@
from enum import Enum
from pathlib import Path
-from typing import Union, Any
-from pydantic import BaseModel, Field, ConfigDict
+from typing import Any, Dict, Optional
+
+from pydantic import BaseModel, ConfigDict, Field
from pydantic_settings import BaseSettings, SettingsConfigDict
@@ -19,6 +20,7 @@ class ChannelType(str, Enum):
EMAIL = "email"
SLACK = "slack"
QQ = "qq"
+ OPENAPI = "openapi"
class SandboxBackend(str, Enum):
@@ -41,11 +43,14 @@ class SandboxMode(str, Enum):
class BaseChannelConfig(BaseModel):
"""Base channel configuration."""
- type: ChannelType
+ type: Any = ChannelType.TELEGRAM # Default for backwards compatibility
enabled: bool = True
def channel_id(self) -> str:
- raise "default"
+ return "default"
+
+ def channel_key(self):
+ return f"{getattr(self.type, 'value', self.type)}__{self.channel_id()}"
# ========== Channel helper configs ==========
@@ -95,7 +100,7 @@ class FeishuChannelConfig(BaseChannelConfig):
app_secret: str = ""
encrypt_key: str = ""
verification_token: str = ""
- allow_from: list[str] = Field(default_factory=list)
+ allow_from: list[str] = Field(default_factory=list) ## 允许更新Agent对话的Feishu用户ID列表
def channel_id(self) -> str:
# Use app_id directly as the ID
@@ -237,6 +242,19 @@ def channel_id(self) -> str:
return self.app_id
+class OpenAPIChannelConfig(BaseChannelConfig):
+ """OpenAPI channel configuration for HTTP-based chat API."""
+
+ type: ChannelType = ChannelType.OPENAPI
+ enabled: bool = True
+ api_key: str = "" # If empty, no auth required
+ allow_from: list[str] = Field(default_factory=list)
+ max_concurrent_requests: int = 100
+
+ def channel_id(self) -> str:
+ return "openapi"
+
+
class ChannelsConfig(BaseModel):
"""Configuration for chat channels - array of channel configs."""
@@ -334,6 +352,8 @@ def _parse_channel_config(self, config: dict[str, Any]) -> BaseChannelConfig:
return SlackChannelConfig(**config)
elif channel_type == ChannelType.QQ:
return QQChannelConfig(**config)
+ elif channel_type == ChannelType.OPENAPI:
+ return OpenAPIChannelConfig(**config)
else:
return BaseChannelConfig(**config)
@@ -348,22 +368,17 @@ def get_all_channels(self) -> list[BaseChannelConfig]:
return result
-class AgentDefaults(BaseModel):
- """Default agent configuration."""
+class AgentsConfig(BaseModel):
+ """Agent configuration."""
- workspace: str = "~/.vikingbot/workspace"
model: str = "openai/doubao-seed-2-0-pro-260215"
- max_tokens: int = 8192
- temperature: float = 0.7
max_tool_iterations: int = 50
memory_window: int = 50
gen_image_model: str = "openai/doubao-seedream-4-5-251128"
-
-
-class AgentsConfig(BaseModel):
- """Agent configuration."""
-
- defaults: AgentDefaults = Field(default_factory=AgentDefaults)
+ provider: str = ""
+ api_key: str = ""
+ api_base: str = ""
+ extra_headers: dict[str, str] = None
class ProviderConfig(BaseModel):
@@ -418,10 +433,12 @@ class WebSearchConfig(BaseModel):
class OpenVikingConfig(BaseModel):
"""Viking tools configuration."""
- mode: str = "local" # local or remote
+ mode: str = "remote" # local or remote
server_url: str = ""
- user_id: str = ""
- api_key: str = ""
+ root_api_key: str = ""
+ account_id: str = "default"
+ admin_user_id: str = "default"
+ agent_id: str = ""
class WebToolsConfig(BaseModel):
@@ -543,6 +560,15 @@ class SandboxBackendsConfig(BaseModel):
aiosandbox: AioSandboxBackendConfig = Field(default_factory=AioSandboxBackendConfig)
+class LangfuseConfig(BaseModel):
+ """Langfuse observability configuration."""
+
+ enabled: bool = False
+ secret_key: str = "sk-lf-vikingbot-secret-key-2026"
+ public_key: str = "pk-lf-vikingbot-public-key-2026"
+ base_url: str = "http://localhost:3000"
+
+
class SandboxConfig(BaseModel):
"""Sandbox configuration."""
@@ -556,12 +582,13 @@ class Config(BaseSettings):
agents: AgentsConfig = Field(default_factory=AgentsConfig)
channels: list[Any] = Field(default_factory=list)
- providers: ProvidersConfig = Field(default_factory=ProvidersConfig)
+ providers: ProvidersConfig = Field(default_factory=ProvidersConfig, deprecated=True) # Deprecated: Use ov.conf vlm config instead
gateway: GatewayConfig = Field(default_factory=GatewayConfig)
tools: ToolsConfig = Field(default_factory=ToolsConfig)
- openviking: OpenVikingConfig = Field(default_factory=OpenVikingConfig)
+ ov_server: OpenVikingConfig = Field(default_factory=OpenVikingConfig)
sandbox: SandboxConfig = Field(default_factory=SandboxConfig)
heartbeat: HeartbeatConfig = Field(default_factory=HeartbeatConfig)
+ langfuse: LangfuseConfig = Field(default_factory=LangfuseConfig)
hooks: list[str] = Field(["vikingbot.hooks.builtins.openviking_hooks.hooks"])
skills: list[str] = Field(
default_factory=lambda: [
@@ -575,6 +602,8 @@ class Config(BaseSettings):
"summarize",
]
)
+ storage_workspace: str | None = None # From ov.conf root level storage.workspace
+ use_local_memory: bool = False
@property
def channels_config(self) -> ChannelsConfig:
@@ -583,30 +612,61 @@ def channels_config(self) -> ChannelsConfig:
config.channels = self.channels
return config
+ @property
+ def bot_data_path(self) -> Path:
+ """Get expanded bot data path: {storage_workspace}/bot."""
+ return Path(self.storage_workspace).expanduser() / "bot"
+
@property
def workspace_path(self) -> Path:
- """Get expanded workspace path."""
- return Path(self.agents.defaults.workspace).expanduser()
+ """Get expanded workspace path: {storage_workspace}/bot/workspace."""
+ return self.bot_data_path / "workspace"
+
+ @property
+ def ov_data_path(self) -> Path:
+ return self.bot_data_path / "ov_data"
+
+ def _get_vlm_config(self) -> Optional[Dict[str, Any]]:
+ """Get vlm config from OpenVikingConfig. Returns (vlm_config_dict)."""
+ from openviking_cli.utils.config import get_openviking_config
+ ov_config = get_openviking_config()
+
+ if hasattr(ov_config, "vlm"):
+ return ov_config.vlm.model_dump()
+ return None
def _match_provider(
self, model: str | None = None
) -> tuple["ProviderConfig | None", str | None]:
- """Match provider config and its registry name. Returns (config, spec_name)."""
- from vikingbot.providers.registry import PROVIDERS
-
- model_lower = (model or self.agents.defaults.model).lower()
-
- # Match by keyword (order follows PROVIDERS registry)
- for spec in PROVIDERS:
- p = getattr(self.providers, spec.name, None)
- if p and any(kw in model_lower for kw in spec.keywords) and p.api_key:
- return p, spec.name
-
- # Fallback: gateways first, then others (follows registry order)
- for spec in PROVIDERS:
- p = getattr(self.providers, spec.name, None)
- if p and p.api_key:
- return p, spec.name
+ """Match provider config from ov.conf vlm section. Returns (config, spec_name)."""
+ # Get from OpenVikingConfig vlm
+ vlm_config = self._get_vlm_config()
+
+ if vlm_config:
+ provider_name = vlm_config.get("provider")
+ if provider_name:
+ # Build provider config from vlm
+ provider_config = ProviderConfig()
+
+ # Try to get from vlm.providers first
+ if "providers" in vlm_config and provider_name in vlm_config["providers"]:
+ p_data = vlm_config["providers"][provider_name]
+ if "api_key" in p_data:
+ provider_config.api_key = p_data["api_key"]
+ if "api_base" in p_data:
+ provider_config.api_base = p_data["api_base"]
+ if "extra_headers" in p_data:
+ provider_config.extra_headers = p_data["extra_headers"]
+ else:
+ # Fall back to top-level vlm fields
+ if vlm_config.get("api_key"):
+ provider_config.api_key = vlm_config["api_key"]
+ if vlm_config.get("api_base"):
+ provider_config.api_base = vlm_config["api_base"]
+
+ if provider_config.api_key:
+ return provider_config, provider_name
+
return None, None
def get_provider(self, model: str | None = None) -> ProviderConfig | None:
@@ -631,9 +691,6 @@ def get_api_base(self, model: str | None = None) -> str | None:
p, name = self._match_provider(model)
if p and p.api_base:
return p.api_base
- # Only gateways get a default api_base here. Standard providers
- # (like Moonshot) set their base URL via env vars in _setup_env
- # to avoid polluting the global litellm.api_base.
if name:
spec = find_by_name(name)
if spec and spec.is_gateway and spec.default_api_base:
diff --git a/bot/vikingbot/console/console_gradio_simple.py b/bot/vikingbot/console/web_console.py
similarity index 92%
rename from bot/vikingbot/console/console_gradio_simple.py
rename to bot/vikingbot/console/web_console.py
index ef39c2c7..4617dfae 100644
--- a/bot/vikingbot/console/console_gradio_simple.py
+++ b/bot/vikingbot/console/web_console.py
@@ -6,7 +6,7 @@
import gradio as gr
-from vikingbot.config.loader import load_config, save_config
+from vikingbot.config.loader import load_config, save_config, get_config_path
from vikingbot.config.schema import Config, ChannelType, SandboxBackend, SandboxMode
@@ -38,7 +38,7 @@ def create_dashboard_tab():
|--------|-------|
| 🟢 Status | Running |
| 📦 Version | {__version__} |
- | 📁 Config Path | {str(config.workspace_path.parent / "config.json")} |
+ | 📁 Config Path | {str(get_config_path())} |
| 🖥️ Workspace Path | {str(config.workspace_path)} |
""")
@@ -299,7 +299,8 @@ def create_sessions_tab():
status_msg = gr.Markdown("")
def refresh_sessions():
- sessions_dir = Path.home() / ".vikingbot" / "sessions"
+ config = load_config()
+ sessions_dir = config.bot_data_path / "sessions"
if not sessions_dir.exists():
return gr.Dropdown(choices=[], value=None), ""
session_files = list(sessions_dir.glob("*.jsonl")) + list(sessions_dir.glob("*.json"))
@@ -309,7 +310,8 @@ def refresh_sessions():
def load_session(session_name):
if not session_name:
return "", "Please select a session"
- sessions_dir = Path.home() / ".vikingbot" / "sessions"
+ config = load_config()
+ sessions_dir = config.bot_data_path / "sessions"
session_file_jsonl = sessions_dir / f"{session_name}.jsonl"
session_file_json = sessions_dir / f"{session_name}.json"
@@ -403,17 +405,10 @@ def load_file_content(selected_file):
create_workspace_tab()
-if __name__ == "__main__":
- import uvicorn
+def create_console_app(bus=None, config=None):
+ """Create and return the FastAPI app with Gradio mounted."""
from fastapi import FastAPI
- port = 18791
- if len(sys.argv) > 1:
- try:
- port = int(sys.argv[1])
- except ValueError:
- pass
-
# Create FastAPI app for health endpoint
app = FastAPI()
@@ -424,9 +419,42 @@ async def health_endpoint():
return {"status": "healthy", "version": __version__}
+ # Mount OpenAPI router if bus and config are provided
+ if bus is not None and config is not None:
+ try:
+ from vikingbot.channels.openapi import get_openapi_router
+
+ openapi_router = get_openapi_router(bus, config)
+ app.include_router(
+ openapi_router,
+ prefix="/api/v1/openapi",
+ tags=["openapi"],
+ )
+ except Exception as e:
+ import logging
+
+ logging.getLogger(__name__).warning(f"Failed to mount OpenAPI router: {e}")
+
# Mount Gradio app
demo.queue()
app = gr.mount_gradio_app(app, demo, path="/")
- # Launch with uvicorn
- uvicorn.run(app, host="0.0.0.0", port=port)
+ return app
+
+
+def run_console_server(port: int = 18791):
+ """Run the console server in the current thread."""
+ import uvicorn
+
+ app = create_console_app()
+ uvicorn.run(app, host="0.0.0.0", port=port, log_level="warning")
+
+
+if __name__ == "__main__":
+ port = 18791
+ if len(sys.argv) > 1:
+ try:
+ port = int(sys.argv[1])
+ except ValueError:
+ pass
+ run_console_server(port)
diff --git a/bot/vikingbot/hooks/base.py b/bot/vikingbot/hooks/base.py
index ed7705b8..4d9e8c68 100644
--- a/bot/vikingbot/hooks/base.py
+++ b/bot/vikingbot/hooks/base.py
@@ -4,6 +4,8 @@
from typing import Any, Dict, Optional
from datetime import datetime
+from vikingbot.config.schema import SessionKey
+
# class HookType(Enum):
# SYNC = "sync"
@@ -16,7 +18,8 @@ class HookContext:
event_type: str
session_id: Optional[str] = None
# 沙箱唯一主键
- sandbox_key: Optional[str] = None
+ workspace_id: Optional[str] = None
+ session_key: SessionKey = None
metadata: Dict[str, Any] = None
timestamp: datetime = None
diff --git a/bot/vikingbot/hooks/builtins/openviking_hooks.py b/bot/vikingbot/hooks/builtins/openviking_hooks.py
index ce201f9c..3e3daded 100644
--- a/bot/vikingbot/hooks/builtins/openviking_hooks.py
+++ b/bot/vikingbot/hooks/builtins/openviking_hooks.py
@@ -8,6 +8,7 @@
from ...session import Session
from vikingbot.config.loader import load_config
+from vikingbot.config.schema import SessionKey
try:
from vikingbot.openviking_mount.ov_server import VikingClient
@@ -26,18 +27,45 @@ class OpenVikingCompactHook(Hook):
def __init__(self):
self._client = None
- async def _get_client(self, sandbox_key: str) -> VikingClient:
+ async def _get_client(self, workspace_id: str) -> VikingClient:
if not self._client:
- client = await VikingClient.create(sandbox_key)
+ client = await VikingClient.create(workspace_id)
self._client = client
return self._client
+ def _filter_messages_by_sender(self, messages: list[dict], allow_from: list[str]) -> list[dict]:
+ """筛选出 sender_id 在 allow_from 列表中的消息"""
+ if not allow_from:
+ return []
+ return [msg for msg in messages if msg.get("sender_id") in allow_from]
+
+ def _get_channel_allow_from(self, session_key: SessionKey) -> list[str]:
+ """根据 session_id 获取对应频道的 allow_from 配置"""
+ config = load_config()
+ if not session_key or not config.channels:
+ return []
+
+ # 查找对应类型的 channel config
+ for channel_config in config.channels:
+ if hasattr(channel_config, "type") and channel_config.type == session_key.channel_id:
+ if hasattr(channel_config, "allow_from"):
+ return channel_config.allow_from
+ return []
+
async def execute(self, context: HookContext, **kwargs) -> Any:
vikingbot_session: Session = kwargs.get("session", {})
- session_id = context.session_id
+ session_id = context.session_key.safe_name()
+
try:
- client = await self._get_client(context.sandbox_key)
- result = await client.commit(session_id, vikingbot_session.messages)
+ allow_from = self._get_channel_allow_from(session_id)
+ filtered_messages = self._filter_messages_by_sender(vikingbot_session.messages, allow_from)
+
+ if not filtered_messages:
+ logger.info(f"No messages to commit openviking for session {session_id} (allow_from filter applied)")
+ return {"success": True, "message": "No messages matched allow_from filter"}
+
+ client = await self._get_client(context.workspace_id)
+ result = await client.commit(session_id, filtered_messages, load_config().ov_server.admin_user_id)
return result
except Exception as e:
logger.exception(f"Failed to add message to OpenViking: {e}")
@@ -51,26 +79,26 @@ class OpenVikingPostCallHook(Hook):
def __init__(self):
self._client = None
- async def _get_client(self, sandbox_key: str) -> VikingClient:
+ async def _get_client(self, workspace_id: str) -> VikingClient:
if not self._client:
- client = await VikingClient.create(sandbox_key)
+ client = await VikingClient.create(workspace_id)
self._client = client
return self._client
- async def _read_skill_memory(self, sandbox_key: str, skill_name: str) -> str:
- ov_client = await self._get_client(sandbox_key)
+ async def _read_skill_memory(self, workspace_id: str, skill_name: str) -> str:
+ ov_client = await self._get_client(workspace_id)
config = load_config()
- openviking_config = config.openviking
- if not skill_name or (not sandbox_key and openviking_config.mode != "local"):
+ openviking_config = config.ov_server
+ if not skill_name:
return ""
try:
if openviking_config.mode == "local":
skill_memory_uri = f"viking://agent/ffb1327b18bf/memories/skills/{skill_name}.md"
else:
+ agent_space_name = ov_client.get_agent_space_name(openviking_config.admin_user_id)
skill_memory_uri = (
- f"viking://agent/{ov_client.agent_space_name}/memories/skills/{skill_name}.md"
+ f"viking://agent/{agent_space_name}/memories/skills/{skill_name}.md"
)
- # logger.warning(f"skill_memory_uri={skill_memory_uri}")
content = await ov_client.read_content(skill_memory_uri, level="read")
# logger.warning(f"content={content}")
return f"\n\n---\n## Skill Memory\n{content}" if content else ""
@@ -86,7 +114,7 @@ async def execute(self, context: HookContext, tool_name, params, result) -> Any:
skill_name = match.group(1).strip()
# logger.debug(f"skill_name={skill_name}")
- agent_space_name = context.sandbox_key
+ agent_space_name = context.workspace_id
# logger.debug(f"agent_space_name={agent_space_name}")
skill_memory = await self._read_skill_memory(agent_space_name, skill_name)
diff --git a/bot/vikingbot/integrations/__init__.py b/bot/vikingbot/integrations/__init__.py
new file mode 100644
index 00000000..62567e3d
--- /dev/null
+++ b/bot/vikingbot/integrations/__init__.py
@@ -0,0 +1,16 @@
+"""Integrations with external services."""
+
+from typing import TYPE_CHECKING
+
+if TYPE_CHECKING:
+ from vikingbot.integrations.langfuse import LangfuseClient
+
+__all__ = ["LangfuseClient"]
+
+
+def __getattr__(name: str):
+ if name == "LangfuseClient":
+ from vikingbot.integrations.langfuse import LangfuseClient
+
+ return LangfuseClient
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
diff --git a/bot/vikingbot/integrations/langfuse.py b/bot/vikingbot/integrations/langfuse.py
new file mode 100644
index 00000000..cd6f8d2c
--- /dev/null
+++ b/bot/vikingbot/integrations/langfuse.py
@@ -0,0 +1,321 @@
+"""Langfuse integration for LLM observability."""
+
+from contextlib import contextmanager
+from typing import Any, Generator
+
+from loguru import logger
+
+# Try to import langfuse - will be None if not installed
+Langfuse = None
+propagate_attributes = None
+
+try:
+ from langfuse import Langfuse
+ from langfuse import propagate_attributes as _propagate_attributes
+ propagate_attributes = _propagate_attributes
+except ImportError:
+ pass
+
+
+class LangfuseClient:
+ """Wrapper for Langfuse client with optional support."""
+
+ _instance: "LangfuseClient | None" = None
+
+ def __init__(
+ self,
+ enabled: bool = False,
+ secret_key: str = "",
+ public_key: str = "",
+ base_url: str = "https://cloud.langfuse.com",
+ ):
+ self._client = None
+ self.enabled = enabled
+
+ if not self.enabled:
+ return
+
+ if Langfuse is None:
+ logger.warning("Langfuse not installed. Install with: uv pip install vikingbot[langfuse] (or uv pip install -e \".[langfuse]\" for local dev). Configure in ~/.openviking/ov.conf under bot.langfuse")
+ self.enabled = False
+ return
+
+ if not secret_key:
+ logger.warning("Langfuse enabled but no secret_key provided. Configure in ~/.openviking/ov.conf under bot.langfuse")
+ self.enabled = False
+ return
+
+ try:
+ self._client = Langfuse(
+ secret_key=secret_key,
+ public_key=public_key,
+ host=base_url,
+ )
+ self._client.auth_check()
+ except Exception as e:
+ logger.warning(f"Langfuse initialized failed: {type(e).__name__}: {e}")
+ self.enabled = False
+ self._client = None
+
+ @classmethod
+ def get_instance(cls) -> "LangfuseClient":
+ """Get the singleton instance."""
+ if cls._instance is None:
+ logger.warning("[LANGFUSE] No instance set, creating default (disabled) instance")
+ cls._instance = LangfuseClient(enabled=False)
+ return cls._instance
+
+ @classmethod
+ def set_instance(cls, instance: "LangfuseClient") -> None:
+ """Set the singleton instance."""
+ cls._instance = instance
+
+ def flush(self) -> None:
+ """Flush pending events to Langfuse."""
+ if self.enabled and self._client:
+ self._client.flush()
+
+ @contextmanager
+ def propagate_attributes(
+ self,
+ session_id: str | None = None,
+ user_id: str | None = None,
+ ) -> Generator[None, None, None]:
+ """
+ Propagate attributes (session_id, user_id) to all nested observations.
+
+ Args:
+ session_id: Optional session ID to associate with all nested observations
+ user_id: Optional user ID to associate with all nested observations
+ """
+ if not self.enabled:
+ logger.warning(f"[LANGFUSE] propagate_attributes skipped: Langfuse client not enabled")
+ yield
+ return
+ if not self._client:
+ logger.warning(f"[LANGFUSE] propagate_attributes skipped: Langfuse client not initialized")
+ yield
+ return
+
+ try:
+ propagate_kwargs = {}
+ if session_id:
+ propagate_kwargs["session_id"] = session_id
+ if user_id:
+ propagate_kwargs["user_id"] = user_id
+
+ if not propagate_kwargs:
+ yield
+ return
+
+ # Use module-level propagate_attributes from langfuse SDK v3
+ global propagate_attributes
+ if propagate_attributes is not None:
+ logger.info(f"[LANGFUSE] Propagating attributes: {list(propagate_kwargs.keys())}")
+ with propagate_attributes(**propagate_kwargs):
+ yield
+ else:
+ logger.warning(f"[LANGFUSE] propagate_attributes not available (SDK version may not support it)")
+ yield
+ except Exception as e:
+ logger.debug(f"[LANGFUSE] propagate_attributes error: {e}")
+ yield
+
+ @contextmanager
+ def trace(
+ self,
+ name: str,
+ session_id: str | None = None,
+ user_id: str | None = None,
+ metadata: dict[str, Any] | None = None,
+ ) -> Generator[Any, None, None]:
+ """
+ Create a trace context manager.
+ In v3 SDK, trace is implicitly created by first span/generation.
+ """
+ if not self.enabled or not self._client:
+ yield None
+ return
+
+ try:
+ # In v3, we use start_as_current_span to create the root span
+ with self._client.start_as_current_span(
+ name=name,
+ session_id=session_id,
+ user_id=user_id,
+ metadata=metadata or {},
+ ) as span:
+ yield span
+ except Exception as e:
+ logger.debug(f"Langfuse trace error: {e}")
+ yield None
+
+ @contextmanager
+ def span(
+ self,
+ name: str,
+ trace_id: str | None = None,
+ parent_observation_id: str | None = None,
+ metadata: dict[str, Any] | None = None,
+ ) -> Generator[Any, None, None]:
+ """Create a span context manager."""
+ if not self.enabled or not self._client:
+ yield None
+ return
+
+ try:
+ with self._client.start_as_current_span(
+ name=name,
+ metadata=metadata or {},
+ ) as span:
+ yield span
+ except Exception as e:
+ logger.debug(f"Langfuse span error: {e}")
+ yield None
+
+ @contextmanager
+ def generation(
+ self,
+ name: str,
+ model: str,
+ trace_id: str | None = None,
+ parent_observation_id: str | None = None,
+ prompt: list[dict[str, Any]] | None = None,
+ metadata: dict[str, Any] | None = None,
+ ) -> Generator[Any, None, None]:
+ """
+ Create a generation context manager for LLM calls.
+
+ Args:
+ name: Name of the generation
+ model: Model name
+ trace_id: Optional trace ID (not used in v3)
+ parent_observation_id: Optional parent observation ID (not used in v3)
+ prompt: Optional prompt messages
+ metadata: Optional metadata
+ """
+ if not self.enabled or not self._client:
+ yield None
+ return
+
+ try:
+ with self._client.start_as_current_generation(
+ name=name,
+ model=model,
+ input=prompt,
+ metadata=metadata or {},
+ ) as generation:
+ yield generation
+ except Exception as e:
+ logger.debug(f"Langfuse generation error: {e}")
+ yield None
+
+ def update_generation(
+ self,
+ generation: Any,
+ output: str | None = None,
+ usage: dict[str, int] | None = None,
+ metadata: dict[str, Any] | None = None,
+ ) -> None:
+ """Update a generation with output and usage."""
+ if not self.enabled or not generation:
+ return
+
+ try:
+ update_kwargs: dict[str, Any] = {}
+ if output is not None:
+ update_kwargs["output"] = output
+ if usage:
+ update_kwargs["usage"] = {
+ "prompt_tokens": usage.get("prompt_tokens", 0),
+ "completion_tokens": usage.get("completion_tokens", 0),
+ "total_tokens": usage.get("total_tokens", 0),
+ }
+ if metadata:
+ if hasattr(generation, "metadata") and generation.metadata:
+ update_kwargs["metadata"] = {**generation.metadata, **metadata}
+ else:
+ update_kwargs["metadata"] = metadata
+
+ # In v3, update via the generation object's update method
+ if hasattr(generation, "update"):
+ generation.update(**update_kwargs)
+ # Or use client's update_current_generation
+ elif self._client and hasattr(self._client, "update_current_generation"):
+ self._client.update_current_generation(**update_kwargs)
+
+ except Exception as e:
+ logger.debug(f"Langfuse update generation error: {e}")
+
+ @contextmanager
+ def tool_call(
+ self,
+ name: str,
+ input: dict[str, Any] | None = None,
+ session_id: str | None = None,
+ metadata: dict[str, Any] | None = None,
+ ) -> Generator[Any, None, None]:
+ """
+ Create a span for tool/function call execution.
+
+ Args:
+ name: Name of the tool/function
+ input: Input arguments to the tool
+ session_id: Optional session ID for tracing
+ metadata: Optional metadata
+
+ Yields:
+ Langfuse span object or None if not enabled
+ """
+ if not self.enabled or not self._client:
+ yield None
+ return
+
+ try:
+ combined_metadata = metadata or {}
+ if session_id:
+ combined_metadata["session_id"] = session_id
+
+ with self._client.start_as_current_span(
+ name=f"tool:{name}",
+ input=input,
+ metadata=combined_metadata,
+ ) as span:
+ yield span
+ except Exception as e:
+ logger.debug(f"Langfuse tool call span error: {e}")
+ yield None
+
+ def end_tool_call(
+ self,
+ span: Any,
+ output: str | None = None,
+ success: bool = True,
+ metadata: dict[str, Any] | None = None,
+ ) -> None:
+ """
+ End a tool call span with output and status.
+
+ Args:
+ span: The span object from tool_call()
+ output: Output of the tool call
+ success: Whether the tool call succeeded
+ metadata: Optional additional metadata
+ """
+ if not self.enabled or not span:
+ return
+
+ try:
+ update_kwargs: dict[str, Any] = {}
+ if output is not None:
+ update_kwargs["output"] = output
+
+ combined_metadata = metadata or {}
+ combined_metadata["success"] = success
+ update_kwargs["metadata"] = combined_metadata
+
+ if hasattr(span, "update"):
+ span.update(**update_kwargs)
+
+ except Exception as e:
+ logger.debug(f"Langfuse end tool call error: {e}")
diff --git a/bot/vikingbot/openviking_mount/__init__.py b/bot/vikingbot/openviking_mount/__init__.py
index deb92491..cbce294f 100644
--- a/bot/vikingbot/openviking_mount/__init__.py
+++ b/bot/vikingbot/openviking_mount/__init__.py
@@ -5,9 +5,10 @@
让用户可以像操作普通文件一样操作OpenViking上的数据。
"""
+from typing import TYPE_CHECKING
+
from .mount import OpenVikingMount, MountScope, MountConfig, FileInfo
from .manager import OpenVikingMountManager, MountPoint, get_mount_manager
-from .viking_fuse import OpenVikingFUSE, mount_fuse, FUSEMountManager, FUSE_AVAILABLE
from .session_integration import SessionOpenVikingManager, get_session_ov_manager
__all__ = [
@@ -25,3 +26,14 @@
"SessionOpenVikingManager",
"get_session_ov_manager",
]
+
+if TYPE_CHECKING:
+ from .viking_fuse import OpenVikingFUSE, mount_fuse, FUSEMountManager, FUSE_AVAILABLE
+
+
+def __getattr__(name: str):
+ if name in ("OpenVikingFUSE", "mount_fuse", "FUSEMountManager", "FUSE_AVAILABLE"):
+ from .viking_fuse import OpenVikingFUSE, mount_fuse, FUSEMountManager, FUSE_AVAILABLE
+
+ return locals()[name]
+ raise AttributeError(f"module {__name__!r} has no attribute {name!r}")
diff --git a/bot/vikingbot/openviking_mount/ov_server.py b/bot/vikingbot/openviking_mount/ov_server.py
index 51183fa7..1e40bbf6 100644
--- a/bot/vikingbot/openviking_mount/ov_server.py
+++ b/bot/vikingbot/openviking_mount/ov_server.py
@@ -1,11 +1,13 @@
import asyncio
import hashlib
-from typing import Any, Dict, List, Optional
+from typing import List, Dict, Any, Optional
-import openviking as ov
from loguru import logger
+import time
-from vikingbot.config.loader import get_data_dir, load_config
+import openviking as ov
+from vikingbot.config.loader import load_config
+from vikingbot.openviking_mount.user_apikey_manager import UserApiKeyManager
viking_resource_prefix = "viking://resources/"
@@ -13,34 +15,52 @@
class VikingClient:
def __init__(self, agent_id: Optional[str] = None):
config = load_config()
- openviking_config = config.openviking
+ openviking_config = config.ov_server
+ self.openviking_config = openviking_config
+ self.ov_path = config.ov_data_path
if openviking_config.mode == "local":
- ov_data_path = get_data_dir() / "ov_data"
- ov_data_path.mkdir(parents=True, exist_ok=True)
- self.client = ov.AsyncOpenViking(path=str(ov_data_path))
- self.user_id = "default"
+ self.client = ov.AsyncHTTPClient(
+ url=openviking_config.server_url
+ )
self.agent_id = "default"
- self.agent_space_name = self.client.user.agent_space_name()
+ self.account_id = "default"
+ self.admin_user_id = "default"
+ self._apikey_manager = None
else:
self.client = ov.AsyncHTTPClient(
url=openviking_config.server_url,
- api_key=openviking_config.api_key,
+ api_key=openviking_config.root_api_key,
agent_id=agent_id,
)
self.agent_id = agent_id
- self.user_id = openviking_config.user_id
- self.agent_space_name = hashlib.md5(
- (self.user_id + self.agent_id).encode()
- ).hexdigest()[:12]
+ self.account_id = openviking_config.account_id
+ self.admin_user_id = openviking_config.admin_user_id
+ self._apikey_manager = None
+ if self.ov_path:
+ self._apikey_manager = UserApiKeyManager(
+ ov_path=self.ov_path,
+ server_url=openviking_config.server_url,
+ account_id=openviking_config.account_id,
+ )
self.mode = openviking_config.mode
async def _initialize(self):
"""Initialize the client (must be called after construction)"""
await self.client.initialize()
+ # 检查并初始化 admin_user_id(如果配置了)
+ if self.mode == "remote" and self.admin_user_id:
+ user_exists = await self._check_user_exists(self.admin_user_id)
+ if not user_exists:
+ await self._initialize_user(self.admin_user_id, role="admin")
+
@classmethod
async def create(cls, agent_id: Optional[str] = None):
- """Factory method to create and initialize a VikingClient instance"""
+ """Factory method to create and initialize a VikingClient instance.
+
+ Args:
+ agent_id: The agent ID to use
+ """
instance = cls(agent_id)
await instance._initialize()
return instance
@@ -70,6 +90,9 @@ def _relation_to_dict(self, relation: Any) -> Dict[str, Any]:
"reason": getattr(relation, "reason", ""),
}
+ def get_agent_space_name(self, user_id: str) -> str:
+ return hashlib.md5((user_id + self.agent_id).encode()).hexdigest()[:12]
+
async def find(self, query: str, target_uri: Optional[str] = None):
"""搜索资源"""
if target_uri:
@@ -114,6 +137,31 @@ async def read_content(self, uri: str, level: str = "abstract") -> str:
logger.warning(f"Failed to read content from {uri}: {e}")
return ""
+ async def read_user_profile(self, user_id: str) -> str:
+ """读取用户 profile。
+
+ 首先检查用户是否存在,如不存在则初始化用户并返回空字符串。
+ 用户存在时,再查询 profile 信息。
+
+ Args:
+ user_id: 用户ID
+
+ Returns:
+ str: 用户 profile 内容,如果用户不存在或查询失败返回空字符串
+ """
+ # Step 1: 检查用户是否存在
+ user_exists = await self._check_user_exists(user_id)
+
+ # Step 2: 如果用户不存在,初始化用户并直接返回
+ if not user_exists:
+ await self._initialize_user(user_id)
+ return ""
+
+ # Step 3: 用户存在,查询 profile
+ uri = f"viking://user/{user_id}/memories/profile.md"
+ result = await self.read_content(uri=uri, level="read")
+ return result
+
async def search(self, query: str, target_uri: Optional[str] = "") -> Dict[str, Any]:
# session = self.client.session()
@@ -135,8 +183,11 @@ async def search(self, query: str, target_uri: Optional[str] = "") -> Dict[str,
"target_uri": target_uri,
}
- async def search_user_memory(self, query: str) -> list[Any]:
- uri_user_memory = f"viking://user/{self.user_id}/memories/"
+ async def search_user_memory(self, query: str, user_id: str) -> list[Any]:
+ user_exists = await self._check_user_exists(user_id)
+ if not user_exists:
+ return []
+ uri_user_memory = f"viking://user/{user_id}/memories/"
result = await self.client.search(query, target_uri=uri_user_memory)
return (
[self._matched_context_to_dict(m) for m in result.memories]
@@ -144,15 +195,126 @@ async def search_user_memory(self, query: str) -> list[Any]:
else []
)
- async def search_memory(self, query: str, limit: int = 10) -> dict[str, list[Any]]:
- """通过上下文消息,检索viking 的user、Agent memory"""
- uri_user_memory = f"viking://user/{self.user_id}/memories/"
+ async def _check_user_exists(self, user_id: str) -> bool:
+ """检查用户是否存在于账户中。
+
+ Args:
+ user_id: 用户ID
+
+ Returns:
+ bool: 用户是否存在
+ """
+ if self.mode == "local":
+ return True
+ try:
+ res = await self.client.admin_list_users(self.account_id)
+ if not res or len(res) == 0:
+ return False
+ return any(user.get("user_id") == user_id for user in res)
+ except Exception as e:
+ logger.warning(f"Failed to check user existence: {e}")
+ return False
+
+ async def _initialize_user(self, user_id: str, role: str = "user") -> bool:
+ """初始化用户。
+
+ Args:
+ user_id: 用户ID
+
+ Returns:
+ bool: 初始化是否成功
+ """
+ if self.mode == "local":
+ return True
+ try:
+ result = await self.client.admin_register_user(
+ account_id=self.account_id, user_id=user_id, role=role
+ )
+
+ # Save the API key if returned and we're in remote mode with a valid apikey manager
+ if self._apikey_manager and isinstance(result, dict):
+ api_key = result.get("user_key")
+ if api_key:
+ self._apikey_manager.set_apikey(user_id, api_key)
+
+ return True
+ except Exception as e:
+ if "User already exists" in str(e):
+ return True
+ logger.warning(f"Failed to initialize user {user_id}: {e}")
+ return False
+
+ async def _get_or_create_user_apikey(self, user_id: str) -> Optional[str]:
+ """获取或创建用户的 API key。
+
+ 优先从本地 json 文件获取,如果本地没有则:
+ 1. 删除用户(如果存在)
+ 2. 重新创建用户
+ 3. 保存新的 API key
+
+ Args:
+ user_id: 用户ID
+
+ Returns:
+ API key 或 None(如果获取失败)
+ """
+ if not self._apikey_manager:
+ return None
+
+ # Step 1: Check local storage first
+ api_key = self._apikey_manager.get_apikey(user_id)
+ if api_key:
+ return api_key
+
+ try:
+ # 2a. Remove user if exists
+ user_exists = await self._check_user_exists(user_id)
+ if user_exists:
+ await self.client.admin_remove_user(self.account_id, user_id)
+ # 2b. Recreate user - this will save API key in _initialize_user
+ success = await self._initialize_user(user_id)
+ if not success:
+ logger.warning(f"Failed to recreate user {user_id}")
+ return None
+
+ # 2c. Get API key from local storage (it was saved by _initialize_user)
+ api_key = self._apikey_manager.get_apikey(user_id)
+ if api_key:
+ return api_key
+ else:
+ return None
+
+ except Exception as e:
+ logger.error(f"Error getting or creating API key for user {user_id}: {e}")
+ return None
+
+ async def search_memory(
+ self, query: str, user_id: str, limit: int = 10
+ ) -> dict[str, list[Any]]:
+ """通过上下文消息,检索viking 的user、Agent memory。
+
+ 首先检查用户是否存在,如不存在则初始化用户并返回空结果。
+ 用户存在时,再进行记忆检索。
+ """
+ # Step 1: 检查用户是否存在
+ user_exists = await self._check_user_exists(user_id)
+
+ # Step 2: 如果用户不存在,初始化用户并直接返回
+ if not user_exists:
+ await self._initialize_user(user_id)
+ return {
+ "user_memory": [],
+ "agent_memory": [],
+ }
+ # Step 3: 用户存在,查询记忆
+ uri_user_memory = f"viking://user/{user_id}/memories/"
user_memory = await self.client.find(
query=query,
target_uri=uri_user_memory,
limit=limit,
)
- uri_agent_memory = f"viking://agent/{self.agent_space_name}/memories/"
+ agent_space_name = self.get_agent_space_name(user_id)
+ uri_agent_memory = f"viking://agent/{agent_space_name}/memories/"
agent_memory = await self.client.find(
query=query,
target_uri=uri_agent_memory,
@@ -171,114 +333,101 @@ async def glob(self, pattern: str, uri: Optional[str] = None) -> Dict[str, Any]:
"""通过 glob 模式匹配文件"""
return await self.client.glob(pattern, uri=uri)
- async def commit(self, session_id: str, messages: list[dict[str, Any]]):
+ async def commit(self, session_id: str, messages: list[dict[str, Any]], user_id: str = None):
"""提交会话"""
import re
import uuid
-
from openviking.message.part import Part, TextPart, ToolPart
- session = self.client.session(session_id)
-
- if self.mode == "local":
- for message in messages:
- # logger.debug(f"message === {message}")
- role = message.get("role")
- content = message.get("content")
- tools_used = message.get("tools_used") or []
-
- parts: list[Part] = []
-
- if content:
- parts.append(TextPart(text=content))
-
- for tool_info in tools_used:
- tool_name = tool_info.get("tool_name", "")
- # logger.debug(f"tool_name === {tool_name}")
- if not tool_name:
- continue
-
- tool_id = f"{tool_name}_{uuid.uuid4().hex[:8]}"
- tool_input = None
- try:
- import json
-
- args_str = tool_info.get("args", "{}")
- tool_input = json.loads(args_str) if args_str else {}
- except Exception:
- tool_input = {"raw_args": tool_info.get("args", "")}
-
- result_str = str(tool_info.get("result", ""))
-
- skill_uri = ""
- if tool_name == "read_file" and result_str:
- match = re.search(r"^---\s*\nname:\s*(.+?)\s*\n", result_str, re.MULTILINE)
- if match:
- skill_name = match.group(1).strip()
- skill_uri = f"viking://agent/skills/{skill_name}"
- # logger.debug(f"skill_uri === {skill_uri}")
-
- execute_success = tool_info.get("execute_success", True)
- tool_status = "completed" if execute_success else "error"
- # logger.debug(f"tool_info={tool_info}")
- parts.append(
- ToolPart(
- tool_id=tool_id,
- tool_name=tool_name,
- tool_uri=f"viking://session/{session_id}/tools/{tool_id}",
- tool_input=tool_input,
- tool_output=result_str[:2000],
- tool_status=tool_status,
- skill_uri=skill_uri,
- duration_ms=float(tool_info.get("duration", 0.0)),
- prompt_tokens=tool_info.get("input_token"),
- completion_tokens=tool_info.get("output_token"),
- )
+ user_exists = await self._check_user_exists(user_id)
+ if not user_exists:
+ success = await self._initialize_user(user_id)
+ if not success:
+ return {"error": "Failed to initialize user"}
+
+ # For remote mode, try to get user's API key and create a dedicated client
+ client = self.client
+ start = time.time()
+ if self.mode == "remote" and user_id and user_id != self.admin_user_id and self._apikey_manager:
+ user_api_key = await self._get_or_create_user_apikey(user_id)
+ if user_api_key:
+ # Create a new HTTP client with user's API key
+ client = ov.AsyncHTTPClient(
+ url=self.openviking_config.server_url,
+ api_key=user_api_key,
+ agent_id=self.agent_id,
+ )
+ await client.initialize()
+
+ create_res = await client.create_session()
+ session_id = create_res['session_id']
+ session = client.session(session_id)
+
+ for message in messages:
+ role = message.get("role")
+ content = message.get("content")
+ tools_used = message.get("tools_used") or []
+
+ parts: list[Any] = []
+
+ if content:
+ parts.append(TextPart(text=content))
+
+ for tool_info in tools_used:
+ tool_name = tool_info.get("tool_name", "")
+ if not tool_name:
+ continue
+
+ tool_id = f"{tool_name}_{uuid.uuid4().hex[:8]}"
+ tool_input = None
+ try:
+ import json
+
+ args_str = tool_info.get("args", "{}")
+ tool_input = json.loads(args_str) if args_str else {}
+ except Exception:
+ tool_input = {"raw_args": tool_info.get("args", "")}
+
+ result_str = str(tool_info.get("result", ""))
+
+ skill_uri = ""
+ if tool_name == "read_file" and result_str:
+ match = re.search(r"^---\s*\nname:\s*(.+?)\s*\n", result_str, re.MULTILINE)
+ if match:
+ skill_name = match.group(1).strip()
+ skill_uri = f"viking://agent/skills/{skill_name}"
+
+ execute_success = tool_info.get("execute_success", True)
+ tool_status = "completed" if execute_success else "error"
+ parts.append(
+ ToolPart(
+ tool_id=tool_id,
+ tool_name=tool_name,
+ tool_uri=f"viking://session/{session_id}/tools/{tool_id}",
+ tool_input=tool_input,
+ tool_output=result_str[:2000],
+ tool_status=tool_status,
+ skill_uri=skill_uri,
+ duration_ms=float(tool_info.get("duration", 0.0)),
+ prompt_tokens=tool_info.get("input_token"),
+ completion_tokens=tool_info.get("output_token"),
)
+ )
- if not parts:
- parts = [TextPart(text=content or "")]
-
- session.add_message(role=role, parts=parts)
+ if not parts:
+ continue
+ await session.add_message(role=role, parts=parts)
- result = session.commit()
- else:
- for message in messages:
- await session.add_message(role=message.get("role"), content=message.get("content"))
- result = await session.commit()
- logger.debug(f"Message add ed to OpenViking session {session_id}")
+ result = await session.commit()
+ if client is not self.client:
+ await client.close()
+ logger.info(f"time spent: {time.time() - start}")
+ logger.debug(f"Message add ed to OpenViking session {session_id}, user: {user_id}")
return {"success": result["status"]}
- def close(self):
+ async def close(self):
"""关闭客户端"""
- self.client.close()
-
- def _parse_viking_memory(self, result: Any) -> str:
- if result and len(result) > 0:
- user_memories = []
- for idx, memory in enumerate(result, start=1):
- user_memories.append(
- f"{idx}. {getattr(memory, 'abstract', '')}; "
- f"uri: {getattr(memory, 'uri', '')}; "
- f"isDir: {getattr(memory, 'is_leaf', False)}; "
- f"related score: {getattr(memory, 'score', 0.0)}"
- )
- return "\n".join(user_memories)
- return ""
-
- async def get_viking_memory_context(
- self, session_id: str, current_message: str, history: list[dict[str, Any]]
- ) -> str:
- result = await self.search_memory(current_message, limit=5)
- if not result:
- return ""
- user_memory = self._parse_viking_memory(result["user_memory"])
- agent_memory = self._parse_viking_memory(result["agent_memory"])
- return (
- f"## Related openviking memories.Using tools to read more details.\n"
- f"### user memories:\n{user_memory}\n"
- f"### agent memories:\n{agent_memory}"
- )
+ await self.client.close()
async def main_test():
@@ -286,33 +435,32 @@ async def main_test():
# res = client.list_resources()
# res = await client.search("头有点疼", target_uri="viking://user/memories/")
# res = await client.get_viking_memory_context("123", current_message="头疼", history=[])
- # res = await client.search_memory("你好")
+ # res = await client.search_memory("你好", "user_1")
# res = await client.list_resources("viking://resources/")
# res = await client.read_content("viking://user/memories/profile.md", level="read")
# res = await client.add_resource("/Users/bytedance/Documents/论文/吉比特年报.pdf", "吉比特年报")
res = await client.commit(
- "123",
- [
- {"role": "user", "content": "我叫吴彦祖"},
- {
- "role": "assistant",
- "content": "好的吴彦祖😎,我已经记 住你的名字啦,之后随时都可以认出你~",
- },
- ],
+ session_id="99999",
+ messages=[{"role": "user", "content": "你好"}],
+ user_id="1010101010",
)
# res = await client.commit("1234", [{"role": "user", "content": "帮我搜索 Python asyncio 教程"}
# ,{"role": "assistant", "content": "我来帮你r搜索 Python asyncio 相关的教程。"}])
print(res)
- print("等待后台处理完成...")
- await client.client.wait_processed(timeout=60)
+ await client.close()
print("处理完成!")
async def account_test():
- client = ov.AsyncHTTPClient(url="")
+ client = ov.AsyncHTTPClient(url="http://localhost:1933", api_key="test")
await client.initialize()
- res = await client.search("test", target_uri="viking://memories/")
+
+ # res = await client.admin_list_users("eval")
+ # res = await client.admin_remove_user("default", "ou_69e48b1314d1400af9d40fe3e4c24b8a")
+ # res = await client.admin_remove_user("default", "admin")
+ # res = await client.admin_list_accounts()
+ res = await client.admin_create_account("eval", "default")
print(res)
diff --git a/bot/vikingbot/openviking_mount/user_apikey_manager.py b/bot/vikingbot/openviking_mount/user_apikey_manager.py
new file mode 100644
index 00000000..da638d95
--- /dev/null
+++ b/bot/vikingbot/openviking_mount/user_apikey_manager.py
@@ -0,0 +1,122 @@
+"""User API Key persistence manager for OpenViking remote mode."""
+
+import json
+import hashlib
+from pathlib import Path
+from typing import Optional
+
+from loguru import logger
+
+
+class UserApiKeyManager:
+ """Manages user API key persistence based on server_url and account_id.
+
+ Stores API keys in a JSON file located at:
+ {ov_path}/user_apikeys_{hash}.json
+
+ where {hash} is derived from server_url and account_id.
+ """
+
+ def __init__(self, ov_path: Path, server_url: str, account_id: str):
+ """Initialize the API key manager.
+
+ Args:
+ ov_path: The ov_path where config files are stored
+ server_url: The OpenViking server URL
+ account_id: The account ID
+ """
+ self.ov_path = Path(ov_path)
+ self.server_url = server_url
+ self.account_id = account_id
+
+ # Generate hash from server_url and account_id
+ hash_input = f"{server_url}:{account_id}"
+ self.config_hash = hashlib.md5(hash_input.encode()).hexdigest()[:16]
+
+ # Config file path
+ self.config_dir = self.ov_path
+ self.config_file = self.config_dir / f"user_apikeys_{self.config_hash}.json"
+
+ # In-memory cache
+ self._apikeys: dict[str, str] = {}
+ self._loaded = False
+
+ def _ensure_config_dir(self) -> None:
+ """Ensure the config directory exists."""
+ self.config_dir.mkdir(parents=True, exist_ok=True)
+
+ def _load(self) -> None:
+ """Load API keys from the config file."""
+ if self._loaded:
+ return
+
+ if self.config_file.exists():
+ try:
+ with open(self.config_file, "r", encoding="utf-8") as f:
+ data = json.load(f)
+ self._apikeys = data.get("apikeys", {})
+ except Exception as e:
+ logger.warning(f"Failed to load API keys from {self.config_file}: {e}")
+ self._apikeys = {}
+ else:
+ logger.debug(f"API key config file not found: {self.config_file}")
+
+ self._loaded = True
+
+ def _save(self) -> None:
+ """Save API keys to the config file."""
+ self._ensure_config_dir()
+
+ try:
+ data = {
+ "server_url": self.server_url,
+ "account_id": self.account_id,
+ "apikeys": self._apikeys,
+ }
+
+ with open(self.config_file, "w", encoding="utf-8") as f:
+ json.dump(data, f, indent=2, ensure_ascii=False)
+
+ logger.debug(f"Saved {len(self._apikeys)} API keys to {self.config_file}")
+ except Exception as e:
+ logger.error(f"Failed to save API keys to {self.config_file}: {e}")
+ raise
+
+ def get_apikey(self, user_id: str) -> Optional[str]:
+ """Get the API key for a specific user.
+
+ Args:
+ user_id: The user ID
+
+ Returns:
+ The API key if found, None otherwise
+ """
+ self._load()
+ return self._apikeys.get(user_id)
+
+ def set_apikey(self, user_id: str, api_key: str) -> None:
+ """Set the API key for a specific user.
+
+ Args:
+ user_id: The user ID
+ api_key: The API key to store
+ """
+ self._load()
+ self._apikeys[user_id] = api_key
+ self._save()
+
+ def delete_apikey(self, user_id: str) -> bool:
+ """Delete the API key for a specific user.
+
+ Args:
+ user_id: The user ID
+
+ Returns:
+ True if the key was deleted, False if not found
+ """
+ self._load()
+ if user_id in self._apikeys:
+ del self._apikeys[user_id]
+ self._save()
+ return True
+ return False
diff --git a/bot/vikingbot/openviking_mount/viking_fuse.py b/bot/vikingbot/openviking_mount/viking_fuse.py
index 7d59a19d..d3fac59a 100644
--- a/bot/vikingbot/openviking_mount/viking_fuse.py
+++ b/bot/vikingbot/openviking_mount/viking_fuse.py
@@ -25,7 +25,6 @@
FUSE_AVAILABLE = True
except (ImportError, OSError) as e:
FUSE_AVAILABLE = False
- logger.warning(f"fusepy not available: {e}")
# 创建占位符
Operations = object
FUSE = None
@@ -478,7 +477,7 @@ def unmount_all(self) -> None:
def mount_fuse(*args, **kwargs):
raise ImportError(
- "fusepy and libfuse are required. Install with: pip install fusepy and install libfuse system package"
+ "fusepy and libfuse are required. Install with: uv pip install 'vikingbot[fuse]' (or uv pip install -e \".[fuse]\" for local dev) and install libfuse system package"
)
class FUSEMountManager:
diff --git a/bot/vikingbot/providers/base.py b/bot/vikingbot/providers/base.py
index 0b0caf9c..6d00de8f 100644
--- a/bot/vikingbot/providers/base.py
+++ b/bot/vikingbot/providers/base.py
@@ -51,6 +51,7 @@ async def chat(
model: str | None = None,
max_tokens: int = 4096,
temperature: float = 0.7,
+ session_id: str | None = None,
) -> LLMResponse:
"""
Send a chat completion request.
@@ -61,6 +62,7 @@ async def chat(
model: Model identifier (provider-specific).
max_tokens: Maximum tokens in response.
temperature: Sampling temperature.
+ session_id: Optional session ID for tracing.
Returns:
LLMResponse with content and/or tool calls.
diff --git a/bot/vikingbot/providers/litellm_provider.py b/bot/vikingbot/providers/litellm_provider.py
index f5412eb5..84eea405 100644
--- a/bot/vikingbot/providers/litellm_provider.py
+++ b/bot/vikingbot/providers/litellm_provider.py
@@ -4,9 +4,11 @@
import os
from typing import Any
+from loguru import logger
import litellm
from litellm import acompletion
+from vikingbot.integrations.langfuse import LangfuseClient
from vikingbot.providers.base import LLMProvider, LLMResponse, ToolCallRequest
from vikingbot.providers.registry import find_by_model, find_gateway
from vikingbot.utils.helpers import cal_str_tokens
@@ -28,10 +30,12 @@ def __init__(
default_model: str = "anthropic/claude-opus-4-5",
extra_headers: dict[str, str] | None = None,
provider_name: str | None = None,
+ langfuse_client: LangfuseClient | None = None,
):
super().__init__(api_key, api_base)
self.default_model = default_model
self.extra_headers = extra_headers or {}
+ self.langfuse = langfuse_client or LangfuseClient.get_instance()
# Detect gateway / local deployment.
# provider_name (from config key) is the primary signal;
@@ -107,6 +111,7 @@ async def chat(
model: str | None = None,
max_tokens: int = 4096,
temperature: float = 0.7,
+ session_id: str | None = None,
) -> LLMResponse:
"""
Send a chat completion request via LiteLLM.
@@ -117,6 +122,7 @@ async def chat(
model: Model identifier (e.g., 'anthropic/claude-sonnet-4-5').
max_tokens: Maximum tokens in response.
temperature: Sampling temperature.
+ session_id: Optional session ID for tracing.
Returns:
LLMResponse with content and/or tool calls.
@@ -149,10 +155,71 @@ async def chat(
kwargs["tools"] = tools
kwargs["tool_choice"] = "auto"
+ # Direct Langfuse v3 SDK usage
+ # Note: session_id is set via propagate_attributes in loop.py, not here
+ langfuse_generation = None
try:
+ if self.langfuse.enabled and self.langfuse._client:
+ metadata = {"has_tools": tools is not None}
+ langfuse_generation = self.langfuse._client.start_generation(
+ name="llm-chat",
+ model=model,
+ input=messages,
+ metadata=metadata,
+ )
+
response = await acompletion(**kwargs)
- return self._parse_response(response)
+ llm_response = self._parse_response(response)
+
+ # Update and end Langfuse generation
+ if langfuse_generation:
+ output_text = llm_response.content or ""
+ if llm_response.tool_calls:
+ output_text = (
+ output_text
+ or f"[Tool calls: {[tc.name for tc in llm_response.tool_calls]}]"
+ )
+
+ # Update generation with output and usage
+ update_kwargs: dict[str, Any] = {
+ "output": output_text,
+ "metadata": {"finish_reason": llm_response.finish_reason},
+ }
+
+ if llm_response.usage:
+ # Langfuse v3 SDK expects "usage_details" with "input" and "output" keys
+ usage_details: dict[str, Any] = {
+ "input": llm_response.usage.get("prompt_tokens", 0),
+ "output": llm_response.usage.get("completion_tokens", 0),
+ }
+
+ # Add cache read tokens if available (OpenAI/Anthropic prompt caching)
+ # Try multiple possible field names for cached tokens
+ cache_read_tokens = (
+ llm_response.usage.get("cache_read_input_tokens") or
+ llm_response.usage.get("prompt_tokens_details", {}).get("cached_tokens")
+ )
+ if cache_read_tokens:
+ usage_details["cache_read_input_tokens"] = cache_read_tokens
+
+ update_kwargs["usage_details"] = usage_details
+ # Log the usage details being sent to Langfuse
+ # logger.info(f"[LANGFUSE] Updating generation with usage_details: {usage_details}")
+
+ langfuse_generation.update(**update_kwargs)
+ langfuse_generation.end()
+ self.langfuse.flush()
+
+ return llm_response
except Exception as e:
+ # End Langfuse generation with error
+ if langfuse_generation:
+ langfuse_generation.update(
+ output=f"Error: {str(e)}",
+ metadata={"error": str(e)},
+ )
+ langfuse_generation.end()
+ self.langfuse.flush()
# Return error as content for graceful handling
return LLMResponse(
content=f"Error calling LLM: {str(e)}",
@@ -189,6 +256,20 @@ def _parse_response(self, response: Any) -> LLMResponse:
"total_tokens": response.usage.total_tokens,
}
+ # Extract cached tokens from various provider formats
+ # OpenAI style: prompt_tokens_details.cached_tokens
+ if hasattr(response.usage, "prompt_tokens_details"):
+ details = response.usage.prompt_tokens_details
+ if details and hasattr(details, "cached_tokens"):
+ cached = details.cached_tokens
+ if cached:
+ usage["cache_read_input_tokens"] = cached
+ # Anthropic style: cache_read_input_tokens
+ elif hasattr(response.usage, "cache_read_input_tokens"):
+ cached = response.usage.cache_read_input_tokens
+ if cached:
+ usage["cache_read_input_tokens"] = cached
+
reasoning_content = getattr(message, "reasoning_content", None)
return LLMResponse(
diff --git a/bot/vikingbot/sandbox/backends/aiosandbox.py b/bot/vikingbot/sandbox/backends/aiosandbox.py
index e459feed..e15cf03c 100644
--- a/bot/vikingbot/sandbox/backends/aiosandbox.py
+++ b/bot/vikingbot/sandbox/backends/aiosandbox.py
@@ -35,7 +35,7 @@ async def start(self) -> None:
self._client = AsyncSandbox(base_url=self._base_url)
logger.info("[AioSandbox] Connected successfully")
except ImportError:
- logger.error("agent-sandbox SDK not installed. Install with: pip install agent-sandbox")
+ logger.error("agent-sandbox SDK not installed. Install with: uv pip install 'vikingbot[sandbox]' (or uv pip install -e \".[sandbox]\" for local dev)")
raise
except Exception as e:
logger.error("[AioSandbox] Failed to start: {}", e)
diff --git a/bot/vikingbot/sandbox/backends/direct.py b/bot/vikingbot/sandbox/backends/direct.py
index 95c02ac9..37859a21 100644
--- a/bot/vikingbot/sandbox/backends/direct.py
+++ b/bot/vikingbot/sandbox/backends/direct.py
@@ -29,7 +29,7 @@ async def start(self) -> None:
"""Start the backend (no-op for direct backend)."""
self._workspace.mkdir(parents=True, exist_ok=True)
self._running = True
- logger.info("Direct backend started")
+ #logger.info("Direct backend started")
async def execute(self, command: str, timeout: int = 60, **kwargs: Any) -> str:
"""Execute a command directly on the host."""
diff --git a/bot/vikingbot/sandbox/backends/opensandbox.py b/bot/vikingbot/sandbox/backends/opensandbox.py
index 99e3a762..66c1027f 100644
--- a/bot/vikingbot/sandbox/backends/opensandbox.py
+++ b/bot/vikingbot/sandbox/backends/opensandbox.py
@@ -205,7 +205,7 @@ async def start(self) -> None:
logger.info("OpenSandbox created successfully")
except ImportError:
- logger.error("opensandbox SDK not installed. Install with: pip install opensandbox")
+ logger.error("opensandbox SDK not installed. Install with: uv pip install 'vikingbot[sandbox]' (or uv pip install -e \".[sandbox]\" for local dev)")
raise
except Exception as e:
logger.error("Failed to create OpenSandbox: {}", e)
diff --git a/bot/vikingbot/sandbox/manager.py b/bot/vikingbot/sandbox/manager.py
index 02230428..91bc651e 100644
--- a/bot/vikingbot/sandbox/manager.py
+++ b/bot/vikingbot/sandbox/manager.py
@@ -31,16 +31,16 @@ async def get_sandbox(self, session_key: SessionKey) -> SandboxBackend:
async def _get_or_create_sandbox(self, session_key: SessionKey) -> SandboxBackend:
"""Get or create session-specific sandbox."""
- sandbox_key = self.to_sandbox_key(session_key)
- if sandbox_key not in self._sandboxes:
- sandbox = await self._create_sandbox(sandbox_key)
- self._sandboxes[sandbox_key] = sandbox
- return self._sandboxes[sandbox_key]
+ workspace_id = self.to_workspace_id(session_key)
+ if workspace_id not in self._sandboxes:
+ sandbox = await self._create_sandbox(workspace_id)
+ self._sandboxes[workspace_id] = sandbox
+ return self._sandboxes[workspace_id]
- async def _create_sandbox(self, sandbox_key: str) -> SandboxBackend:
+ async def _create_sandbox(self, workspace_id: str) -> SandboxBackend:
"""Create new sandbox instance."""
- workspace = self.workspace / sandbox_key
- instance = self._backend_cls(self.config.sandbox, sandbox_key, workspace)
+ workspace = self.workspace / workspace_id
+ instance = self._backend_cls(self.config.sandbox, workspace_id, workspace)
try:
await instance.start()
except Exception as e:
@@ -88,10 +88,10 @@ async def _copy_bootstrap_files(self, sandbox_workspace: Path) -> None:
async def cleanup_session(self, session_key: SessionKey) -> None:
"""Clean up sandbox for a session."""
- sandbox_key = self.to_sandbox_key(session_key)
- if sandbox_key in self._sandboxes:
- await self._sandboxes[sandbox_key].stop()
- del self._sandboxes[sandbox_key]
+ workspace_id = self.to_workspace_id(session_key)
+ if workspace_id in self._sandboxes:
+ await self._sandboxes[workspace_id].stop()
+ del self._sandboxes[workspace_id]
async def cleanup_all(self) -> None:
"""Clean up all sandboxes."""
@@ -100,9 +100,9 @@ async def cleanup_all(self) -> None:
self._sandboxes.clear()
def get_workspace_path(self, session_key: SessionKey) -> Path:
- return self.workspace / self.to_sandbox_key(session_key)
+ return self.workspace / self.to_workspace_id(session_key)
- def to_sandbox_key(self, session_key: SessionKey):
+ def to_workspace_id(self, session_key: SessionKey):
if self.config.sandbox.mode == "shared":
return "shared"
else:
diff --git a/bot/vikingbot/session/manager.py b/bot/vikingbot/session/manager.py
index ca00574b..e97ae97c 100644
--- a/bot/vikingbot/session/manager.py
+++ b/bot/vikingbot/session/manager.py
@@ -31,9 +31,13 @@ class Session:
updated_at: datetime = field(default_factory=datetime.now)
metadata: dict[str, Any] = field(default_factory=dict)
- def add_message(self, role: str, content: str, **kwargs: Any) -> None:
+ def add_message(
+ self, role: str, content: str, sender_id: str | None = None, **kwargs: Any
+ ) -> None:
"""Add a message to the session."""
msg = {"role": role, "content": content, "timestamp": datetime.now().isoformat(), **kwargs}
+ if sender_id is not None:
+ msg["sender_id"] = sender_id
self.messages.append(msg)
self.updated_at = datetime.now()
@@ -63,18 +67,39 @@ def clear(self) -> None:
class SessionManager:
"""
- Manages conversation sessions.
-
- Sessions are stored as JSONL files in sessions directory.
+ Manages conversation sessions with persistence and caching.
+
+ SessionManager handles the lifecycle of conversation sessions, including
+ creation, retrieval, caching, and persistent storage. Sessions are stored
+ as JSONL files in a designated directory for durability.
+
+ The manager maintains an in-memory cache of active sessions to improve
+ performance and reduce disk I/O. Sessions are automatically persisted when
+ modified.
+
+ Attributes:
+ bot_data_path: Path to the bot's data directory.
+ workspace: Path to the workspace directory within bot_data.
+ sessions_dir: Path where session JSONL files are stored.
+ _cache: In-memory cache mapping SessionKey to Session objects.
+ sandbox_manager: Optional sandbox manager for isolated operations.
+
+ Example:
+ >>> manager = SessionManager(Path("/path/to/bot/data"))
+ >>> session_key = SessionKey(channel="telegram", chat_id="12345")
+ >>> session = manager.get_or_create(session_key)
+ >>> session.add_message("user", "Hello!")
+ >>> await manager.save(session)
"""
def __init__(
self,
- workspace: Path,
+ bot_data_path: Path,
sandbox_manager: "SandboxManager | None" = None,
):
- self.workspace = workspace
- self.sessions_dir = ensure_dir(Path.home() / ".vikingbot" / "sessions")
+ self.bot_data_path = bot_data_path
+ self.workspace = bot_data_path / "workspace"
+ self.sessions_dir = ensure_dir(bot_data_path / "sessions")
self._cache: dict[SessionKey, Session] = {}
self.sandbox_manager = sandbox_manager
@@ -111,7 +136,7 @@ def get_or_create(self, key: SessionKey, skip_heartbeat: bool = False) -> Sessio
if self.sandbox_manager.config.mode == "shared":
workspace_path = self.sandbox_manager.workspace / "shared"
else:
- workspace_path = self.sandbox_manager.workspace / key.replace(":", "_")
+ workspace_path = self.sandbox_manager.workspace / key.safe_name()
ensure_session_workspace(workspace_path)
# Initialize sandbox
@@ -175,7 +200,7 @@ def _load(self, session_key: SessionKey) -> Session | None:
logger.warning(f"Failed to load session {session_key}: {e}")
return None
- def save(self, session: Session) -> None:
+ async def save(self, session: Session) -> None:
"""Save a session to disk."""
path = self._get_session_path(session.key)
diff --git a/bot/vikingbot/tests/__init__.py b/bot/vikingbot/tests/__init__.py
new file mode 100644
index 00000000..1ed28a3a
--- /dev/null
+++ b/bot/vikingbot/tests/__init__.py
@@ -0,0 +1 @@
+"""Vikingbot test suite."""
diff --git a/bot/vikingbot/tests/integration/__init__.py b/bot/vikingbot/tests/integration/__init__.py
new file mode 100644
index 00000000..c210facc
--- /dev/null
+++ b/bot/vikingbot/tests/integration/__init__.py
@@ -0,0 +1 @@
+"""Integration tests."""
diff --git a/bot/vikingbot/tests/unit/__init__.py b/bot/vikingbot/tests/unit/__init__.py
new file mode 100644
index 00000000..e0310a01
--- /dev/null
+++ b/bot/vikingbot/tests/unit/__init__.py
@@ -0,0 +1 @@
+"""Unit tests."""
diff --git a/bot/vikingbot/tests/unit/test_agent/__init__.py b/bot/vikingbot/tests/unit/test_agent/__init__.py
new file mode 100644
index 00000000..e3416230
--- /dev/null
+++ b/bot/vikingbot/tests/unit/test_agent/__init__.py
@@ -0,0 +1 @@
+"""Agent module tests."""
diff --git a/bot/vikingbot/tests/unit/test_bus/__init__.py b/bot/vikingbot/tests/unit/test_bus/__init__.py
new file mode 100644
index 00000000..785e2723
--- /dev/null
+++ b/bot/vikingbot/tests/unit/test_bus/__init__.py
@@ -0,0 +1 @@
+"""Message bus tests."""
diff --git a/bot/vikingbot/tests/unit/test_channels/__init__.py b/bot/vikingbot/tests/unit/test_channels/__init__.py
new file mode 100644
index 00000000..4294d628
--- /dev/null
+++ b/bot/vikingbot/tests/unit/test_channels/__init__.py
@@ -0,0 +1 @@
+"""Channel adapter tests."""
diff --git a/bot/vikingbot/tests/unit/test_config/__init__.py b/bot/vikingbot/tests/unit/test_config/__init__.py
new file mode 100644
index 00000000..b27d053b
--- /dev/null
+++ b/bot/vikingbot/tests/unit/test_config/__init__.py
@@ -0,0 +1 @@
+"""Configuration tests."""
diff --git a/bot/vikingbot/tui/app.py b/bot/vikingbot/tui/app.py
deleted file mode 100644
index ecab2d4f..00000000
--- a/bot/vikingbot/tui/app.py
+++ /dev/null
@@ -1,379 +0,0 @@
-"""Main TUI application using Textual framework."""
-
-import asyncio
-from typing import Optional
-
-from textual import on
-from textual.app import App, ComposeResult
-from textual.containers import Container, Vertical, Horizontal
-from textual.widgets import Header, Footer, Static, Input, Button, RichLog
-from textual.binding import Binding
-from textual.reactive import reactive
-
-from vikingbot.config.schema import SessionKey
-from vikingbot.tui.state import TUIState, MessageRole, Message, ThinkingStep, ThinkingStepType
-from vikingbot import __logo__
-
-
-class ThinkingPanel(Vertical):
- """思考过程面板"""
-
- def __init__(self, state: TUIState) -> None:
- super().__init__()
- self.state = state
- self.thinking_log = RichLog(id="thinking-log", markup=True, wrap=True, auto_scroll=True)
- self.title = Static("[bold yellow]🧠 Thinking Process[/bold yellow]", id="thinking-title")
-
- def compose(self) -> ComposeResult:
- yield self.title
- yield self.thinking_log
-
- def add_step(self, step: ThinkingStep) -> None:
- """添加思考步骤"""
- if step.step_type == ThinkingStepType.ITERATION:
- self.thinking_log.write(f"[dim]━━━ {step.content} ━━━[/dim]")
- elif step.step_type == ThinkingStepType.REASONING:
- self.thinking_log.write(f"[cyan]💭 Reasoning:[/cyan] {step.content}")
- elif step.step_type == ThinkingStepType.TOOL_CALL:
- self.thinking_log.write(f"[magenta]🔧 Tool:[/magenta] {step.content}")
- elif step.step_type == ThinkingStepType.TOOL_RESULT:
- self.thinking_log.write(f"[green]✓ Result:[/green] {step.content}")
-
- def clear(self) -> None:
- """清空思考过程"""
- self.thinking_log.clear()
-
-
-class MessageList(RichLog):
- """消息列表组件,显示聊天消息"""
-
- def add_message(self, message: Message) -> None:
- """添加消息到列表"""
- if message.role == MessageRole.USER:
- self.write(f"[bold cyan]You:[/bold cyan] {message.content}")
- elif message.role == MessageRole.ASSISTANT:
- self.write(f"[bold green]🐈 vikingbot:[/bold green]")
- self.write(message.content)
- elif message.role == MessageRole.SYSTEM:
- self.write(f"[dim]{message.content}[/dim]")
- self.write("")
-
-
-class ChatInput(Horizontal):
- """聊天输入框组件"""
-
- def compose(self) -> ComposeResult:
- yield Input(placeholder="Type your message here...", id="chat-input")
- yield Button("Send", variant="primary", id="send-button")
-
-
-class ThinkingIndicator(Static):
- """思考状态指示器"""
-
- is_thinking = reactive(False)
-
- def render(self) -> str:
- if self.is_thinking:
- return "[dim]vikingbot is thinking...[/dim]"
- return ""
-
-
-class StatusBar(Static):
- """状态栏显示会话信息"""
-
- def __init__(self, state: TUIState) -> None:
- super().__init__()
- self.state = state
-
- def render(self) -> str:
- status = f"Messages: {self.state.message_count}"
- if self.state.total_tokens > 0:
- status += f" | Tokens: {self.state.total_tokens}"
- if self.state.last_error:
- status += f" | [red]Error: {self.state.last_error}[/red]"
- status += " | [F2] Toggle Thinking | [F3] Clear Thinking"
- return status
-
-
-class ChatScreen(Horizontal):
- """聊天主屏幕(左右分栏布局)"""
-
- def __init__(self, state: TUIState) -> None:
- super().__init__()
- self.state = state
- self.message_list = MessageList(id="message-list", markup=True, wrap=True)
- self.thinking_panel = ThinkingPanel(state)
- self.thinking_indicator = ThinkingIndicator(id="thinking-indicator")
- self.status_bar = StatusBar(state)
-
- def compose(self) -> ComposeResult:
- # 左侧:聊天区域
- with Vertical(id="left-panel"):
- yield self.message_list
- yield self.thinking_indicator
- yield ChatInput(id="chat-input-container")
-
- # 右侧:思考过程面板
- with Vertical(id="right-panel"):
- yield self.thinking_panel
-
- yield self.status_bar
-
- def on_mount(self) -> None:
- """挂载时初始化消息列表"""
- for message in self.state.messages:
- self.message_list.add_message(message)
- # 根据状态显示/隐藏思考面板
- self._update_thinking_panel_visibility()
-
- def _update_thinking_panel_visibility(self) -> None:
- """更新思考面板可见性"""
- right_panel = self.query_one("#right-panel", Vertical)
- right_panel.display = self.state.show_thinking_panel
-
- def toggle_thinking_panel(self) -> None:
- """切换思考面板显示/隐藏"""
- self.state.show_thinking_panel = not self.state.show_thinking_panel
- self._update_thinking_panel_visibility()
- self.status_bar.refresh()
-
- def update_thinking(self, is_thinking: bool) -> None:
- """更新思考状态"""
- self.thinking_indicator.is_thinking = is_thinking
-
- def add_message(self, message: Message) -> None:
- """添加消息并更新状态"""
- self.state.messages.append(message)
- self.message_list.add_message(message)
- self.state.message_count = len(self.state.messages)
- self.status_bar.refresh()
-
- def add_thinking_step(self, step: ThinkingStep) -> None:
- """添加思考步骤"""
- self.state.current_thinking_steps.append(step)
- self.thinking_panel.add_step(step)
-
- def clear_thinking(self) -> None:
- """清空思考过程"""
- self.state.current_thinking_steps.clear()
- self.thinking_panel.clear()
-
-
-class NanobotTUI(App):
- """vikingbot Textual TUI 主应用"""
-
- CSS_PATH = "styles/tui.css"
- BINDINGS = [
- Binding("ctrl+c", "quit", "Quit", show=True),
- Binding("ctrl+d", "quit", "Quit", show=True),
- Binding("escape", "quit", "Quit", show=True),
- Binding("up", "history_up", "Previous message", show=True),
- Binding("down", "history_down", "Next message", show=True),
- Binding("ctrl+l", "clear", "Clear chat", show=True),
- Binding("f2", "toggle_thinking", "Toggle thinking panel", show=True),
- Binding("f3", "clear_thinking", "Clear thinking", show=True),
- ]
-
- def __init__(self, agent_loop, bus, config) -> None:
- super().__init__()
- self.agent_loop = agent_loop
- self.bus = bus
- self.config = config
- self.state = TUIState()
- self.chat_screen: Optional[ChatScreen] = None
-
- # 设置思考回调
- self.state.thinking_callback = self._on_thinking_step
-
- def _on_thinking_step(self, step) -> None:
- """思考步骤回调(处理来自 agent loop 的回调)"""
- # 转换 step 类型(来自 loop.py 的简化版本)
- from vikingbot.agent.loop import ThinkingStepType as LoopThinkingStepType
-
- converted_type_map = {
- LoopThinkingStepType.REASONING: ThinkingStepType.REASONING,
- LoopThinkingStepType.TOOL_CALL: ThinkingStepType.TOOL_CALL,
- LoopThinkingStepType.TOOL_RESULT: ThinkingStepType.TOOL_RESULT,
- LoopThinkingStepType.ITERATION: ThinkingStepType.ITERATION,
- }
-
- converted_step = ThinkingStep(
- step_type=converted_type_map.get(step.step_type, ThinkingStepType.REASONING),
- content=step.content,
- timestamp=step.timestamp,
- metadata=step.metadata or {},
- )
-
- if self.chat_screen:
- self.chat_screen.add_thinking_step(converted_step)
-
- def compose(self) -> ComposeResult:
- """创建应用布局"""
- yield Header()
- self.chat_screen = ChatScreen(self.state)
- yield self.chat_screen
- yield Footer()
-
- def on_mount(self) -> None:
- """应用挂载时显示欢迎信息"""
- self.title = "🐈 vikingbot TUI"
- self.sub_title = "Interactive AI Programming Assistant"
-
- # 添加欢迎消息
- if self.chat_screen:
- welcome_msg = Message(
- role=MessageRole.SYSTEM,
- content=f"{__logo__} Welcome to vikingbot TUI! Type your message below.",
- )
- self.chat_screen.add_message(welcome_msg)
-
- # 延迟聚焦到输入框,确保组件已完全挂载
- self.call_later(self._focus_input)
-
- def _focus_input(self) -> None:
- """设置焦点到输入框"""
- try:
- input_widget = self.query_one("#chat-input", Input)
- self.set_focus(input_widget)
- except Exception:
- pass
-
- @on(Input.Submitted, "#chat-input")
- @on(Button.Pressed, "#send-button")
- async def on_message_sent(self) -> None:
- """处理消息发送"""
- if not self.chat_screen:
- return
-
- input_widget = self.query_one("#chat-input", Input)
- message_text = input_widget.value.strip()
-
- if not message_text:
- return
-
- # 检查退出命令
- if self._is_exit_command(message_text):
- await self.action_quit()
- return
-
- # 清空输入框
- input_widget.value = ""
-
- # 让输入框失去焦点,避免光标一直闪烁
- self.set_focus(None)
-
- # 清空当前思考过程
- self.chat_screen.clear_thinking()
-
- # 添加用户消息
- user_message = Message(role=MessageRole.USER, content=message_text)
- self.chat_screen.add_message(user_message)
- self.state.input_history.append(message_text)
- self.state.history_index = len(self.state.input_history)
-
- # 显示思考状态
- self.chat_screen.update_thinking(True)
-
- original_callback = None
- try:
- # 设置 agent loop 的回调
- original_callback = getattr(self.agent_loop, "thinking_callback", None)
- self.agent_loop.thinking_callback = self._on_thinking_step
-
- # 处理消息
- response = await self.agent_loop.process_direct(
- message_text, session_key=self.state.session_key
- )
-
- # 恢复原回调
- self.agent_loop.thinking_callback = original_callback
-
- # 添加助手回复
- assistant_message = Message(role=MessageRole.ASSISTANT, content=response)
- self.chat_screen.add_message(assistant_message)
-
- # 更新令牌计数(简化)
- self.state.total_tokens += len(response) // 4
-
- # 重新聚焦到输入框
- self.set_focus(input_widget)
-
- except Exception as e:
- # 恢复原回调
- if hasattr(self.agent_loop, "thinking_callback"):
- self.agent_loop.thinking_callback = original_callback
- # 显示错误
- error_msg = Message(role=MessageRole.SYSTEM, content=f"[red]Error: {e}[/red]")
- self.chat_screen.add_message(error_msg)
- self.state.last_error = str(e)
- # 重新聚焦到输入框
- self.set_focus(input_widget)
- finally:
- # 隐藏思考状态
- self.chat_screen.update_thinking(False)
- self.chat_screen.status_bar.refresh()
-
- def action_history_up(self) -> None:
- """上一条历史消息"""
- if self.state.input_history:
- input_widget = self.query_one("#chat-input", Input)
- if self.state.history_index > 0:
- self.state.history_index -= 1
- input_widget.value = self.state.input_history[self.state.history_index]
- input_widget.cursor_position = len(input_widget.value)
-
- def action_history_down(self) -> None:
- """下一条历史消息"""
- if self.state.input_history:
- input_widget = self.query_one("#chat-input", Input)
- if self.state.history_index < len(self.state.input_history) - 1:
- self.state.history_index += 1
- input_widget.value = self.state.input_history[self.state.history_index]
- input_widget.cursor_position = len(input_widget.value)
- elif self.state.history_index == len(self.state.input_history) - 1:
- self.state.history_index = len(self.state.input_history)
- input_widget.value = ""
-
- def action_clear(self) -> None:
- """清空聊天并开始新会话"""
- self.state.messages.clear()
- self.state.message_count = 0
- self.state.total_tokens = 0
- self.state.last_error = None
-
- # 生成新的 session ID
- import uuid
-
- self.state.session_key = SessionKey(
- type="tui", channel_id="default", chat_id="uuid.uuid4().hex[:8]"
- )
-
- # 清空思考过程
- if self.chat_screen:
- self.chat_screen.clear_thinking()
- self.chat_screen.message_list.clear()
- welcome_msg = Message(
- role=MessageRole.SYSTEM,
- content=f"{__logo__} Chat cleared. New session started (Session: {self.state.session_key}).",
- )
- self.chat_screen.add_message(welcome_msg)
-
- def action_toggle_thinking(self) -> None:
- """切换思考面板"""
- if self.chat_screen:
- self.chat_screen.toggle_thinking_panel()
-
- def action_clear_thinking(self) -> None:
- """清空思考过程"""
- if self.chat_screen:
- self.chat_screen.clear_thinking()
-
- def _is_exit_command(self, command: str) -> bool:
- """检查是否为退出命令"""
- return command.lower().strip() in {"exit", "quit", "/exit", "/quit", ":q"}
-
-
-async def run_tui(agent_loop, bus, config) -> None:
- """运行 TUI 应用"""
- app = NanobotTUI(agent_loop, bus, config)
- await app.run_async()
diff --git a/bot/vikingbot/tui/state.py b/bot/vikingbot/tui/state.py
deleted file mode 100644
index 996142d5..00000000
--- a/bot/vikingbot/tui/state.py
+++ /dev/null
@@ -1,61 +0,0 @@
-"""TUI state management module."""
-
-from dataclasses import dataclass, field
-from datetime import datetime
-from enum import Enum
-from typing import List, Optional, Callable, Any
-
-from vikingbot.config.schema import SessionKey
-
-
-class MessageRole(Enum):
- USER = "user"
- ASSISTANT = "assistant"
- SYSTEM = "system"
-
-
-class ThinkingStepType(Enum):
- """思考步骤类型"""
-
- REASONING = "reasoning" # 推理内容
- TOOL_CALL = "tool_call" # 工具调用
- TOOL_RESULT = "tool_result" # 工具结果
- ITERATION = "iteration" # 迭代开始
-
-
-@dataclass
-class ThinkingStep:
- """单个思考步骤"""
-
- step_type: ThinkingStepType
- content: str
- timestamp: datetime = field(default_factory=datetime.now)
- metadata: dict = field(default_factory=dict)
-
-
-@dataclass
-class Message:
- role: MessageRole
- content: str
- timestamp: datetime = field(default_factory=datetime.now)
- tokens_used: Optional[int] = None
- thinking_steps: List[ThinkingStep] = field(default_factory=list)
-
-
-@dataclass
-class TUIState:
- messages: List[Message] = field(default_factory=list)
- session_key: SessionKey = SessionKey(type="tui", channel_id="default", chat_id="default")
- is_thinking: bool = False
- thinking_message: str = "vikingbot is thinking..."
- input_text: str = ""
- input_history: List[str] = field(default_factory=list)
- history_index: int = -1
- last_error: Optional[str] = None
- total_tokens: int = 0
- message_count: int = 0
-
- # 思考过程相关
- current_thinking_steps: List[ThinkingStep] = field(default_factory=list)
- show_thinking_panel: bool = True # 是否显示思考面板
- thinking_callback: Optional[Callable[[ThinkingStep], None]] = None
diff --git a/bot/vikingbot/tui/styles/tui.css b/bot/vikingbot/tui/styles/tui.css
deleted file mode 100644
index 4f5c2861..00000000
--- a/bot/vikingbot/tui/styles/tui.css
+++ /dev/null
@@ -1,135 +0,0 @@
-/* vikingbot TUI 样式 */
-
-/* 应用整体样式 */
-NanobotTUI {
- background: $surface;
-}
-
-/* 聊天屏幕(左右分栏) */
-ChatScreen {
- height: 1fr;
- layout: horizontal;
-}
-
-/* 左侧面板 */
-#left-panel {
- width: 60%;
- height: 1fr;
- layout: vertical;
- border-right: solid $primary;
-}
-
-/* 右侧面板 */
-#right-panel {
- width: 40%;
- height: 1fr;
- layout: vertical;
- background: $surface-lighten-1;
-}
-
-/* 思考面板标题 */
-#thinking-title {
- height: 1;
- padding: 0 1;
- background: $primary;
- color: $text;
- text-style: bold;
-}
-
-/* 思考日志 */
-#thinking-log {
- height: 1fr;
- padding: 0 1;
- background: $surface-lighten-1;
- border: none;
-}
-
-/* 消息列表 */
-#message-list {
- height: 1fr;
- padding: 0 1;
- background: $surface;
- border: none;
-}
-
-/* 思考指示器 */
-#thinking-indicator {
- height: 1;
- padding: 0 1;
- color: $text-muted;
- text-style: italic;
-}
-
-/* 聊天输入容器 */
-#chat-input-container {
- height: auto;
- padding: 1 1;
- border-top: solid $primary;
- background: $surface;
-}
-
-/* 输入框 */
-#chat-input {
- width: 1fr;
- margin-right: 1;
-}
-
-/* 发送按钮 */
-#send-button {
- width: auto;
- min-width: 8;
-}
-
-/* 状态栏 */
-StatusBar {
- height: 1;
- padding: 0 1;
- background: $panel;
- color: $text-muted;
- dock: bottom;
-}
-
-/* 消息样式 */
-.assistant-message {
- color: $success;
-}
-
-.user-message {
- color: $primary;
-}
-
-.system-message {
- color: $text-muted;
- text-style: italic;
-}
-
-/* 错误消息 */
-.error-message {
- color: $error;
-}
-
-/* 按钮悬停效果 */
-Button:hover {
- background: $primary-lighten-1;
-}
-
-Button:focus {
- border: solid $primary;
-}
-
-/* 输入框焦点效果 */
-Input:focus {
- border: solid $primary;
-}
-
-/* 禁用输入框 hover 闪烁效果 */
-Input:hover {
- background: $surface;
- border: solid $surface;
-}
-
-/* 确保输入框保持稳定样式 */
-#chat-input:hover {
- background: $surface;
- border: solid $surface;
-}
\ No newline at end of file
diff --git a/bot/vikingbot/utils/__init__.py b/bot/vikingbot/utils/__init__.py
index cb068612..26571480 100644
--- a/bot/vikingbot/utils/__init__.py
+++ b/bot/vikingbot/utils/__init__.py
@@ -1,5 +1,33 @@
"""Utility functions for vikingbot."""
-from vikingbot.utils.helpers import ensure_dir, get_workspace_path, get_data_path
+from vikingbot.utils.helpers import (
+ ensure_dir,
+ get_workspace_path,
+ get_data_path,
+ get_bot_data_path,
+ set_bot_data_path,
+ get_sessions_path,
+ get_history_path,
+ get_bridge_path,
+ get_images_path,
+ get_media_path,
+ get_received_path,
+ get_mochat_path,
+ get_mounts_path,
+)
-__all__ = ["ensure_dir", "get_workspace_path", "get_data_path"]
+__all__ = [
+ "ensure_dir",
+ "get_workspace_path",
+ "get_data_path",
+ "get_bot_data_path",
+ "set_bot_data_path",
+ "get_sessions_path",
+ "get_history_path",
+ "get_bridge_path",
+ "get_images_path",
+ "get_media_path",
+ "get_received_path",
+ "get_mochat_path",
+ "get_mounts_path",
+]
diff --git a/bot/vikingbot/utils/helpers.py b/bot/vikingbot/utils/helpers.py
index 62899a68..cf1d761c 100644
--- a/bot/vikingbot/utils/helpers.py
+++ b/bot/vikingbot/utils/helpers.py
@@ -10,36 +10,76 @@ def ensure_dir(path: Path) -> Path:
return path
+# Global bot data path - must be set before use
+_bot_data_path: Path | None = None
+
+
+def set_bot_data_path(path: Path) -> None:
+ """Set the global bot data path."""
+ global _bot_data_path
+ _bot_data_path = path
+
+
+def get_bot_data_path() -> Path:
+ """Get the bot data directory. set_bot_data_path() must be called first."""
+ global _bot_data_path
+ if not _bot_data_path:
+ raise RuntimeError("bot_data_path not set. Call set_bot_data_path() first.")
+ return ensure_dir(_bot_data_path)
+
+
def get_data_path() -> Path:
- """Get the vikingbot data directory (~/.vikingbot)."""
- return ensure_dir(Path.home() / ".vikingbot")
+ """Get the bot data directory. Alias for get_bot_data_path()."""
+ return get_bot_data_path()
-def get_source_workspace_path() -> Path:
- """Get the source workspace path from the codebase."""
- return Path(__file__).parent.parent.parent / "workspace"
+def get_sessions_path() -> Path:
+ """Get the sessions storage directory."""
+ return ensure_dir(get_bot_data_path() / "sessions")
-def get_workspace_path(workspace: str | None = None, ensure_exists: bool = True) -> Path:
- """
- Get the workspace path.
+def get_history_path() -> Path:
+ """Get the CLI history directory."""
+ return ensure_dir(get_bot_data_path() / "history")
- Args:
- workspace: Optional workspace path. Defaults to ~/.vikingbot/workspace/shared.
- ensure_exists: If True, ensure the directory exists (creates it if necessary.
- Returns:
- Expanded workspace path.
- """
- if workspace:
- path = Path(workspace).expanduser()
- else:
- path = Path.home() / ".vikingbot" / "workspace" / "shared"
+def get_bridge_path() -> Path:
+ """Get the bridge directory."""
+ return ensure_dir(get_bot_data_path() / "bridge")
+
+
+def get_images_path() -> Path:
+ """Get the images directory."""
+ return ensure_dir(get_bot_data_path() / "images")
+
+
+def get_media_path() -> Path:
+ """Get the media directory."""
+ return ensure_dir(get_bot_data_path() / "media")
- if ensure_exists:
- ensure_workspace_templates(path)
- return ensure_dir(path)
- return path
+
+def get_received_path() -> Path:
+ """Get the received files directory."""
+ return ensure_dir(get_bot_data_path() / "received")
+
+
+def get_mochat_path() -> Path:
+ """Get the mochat state directory."""
+ return ensure_dir(get_bot_data_path() / "mochat")
+
+
+def get_mounts_path() -> Path:
+ """Get the mounts directory."""
+ return ensure_dir(get_bot_data_path() / "mounts")
+
+
+def get_source_workspace_path() -> Path:
+ """Get the source workspace path from the codebase."""
+ return Path(__file__).parent.parent.parent / "workspace"
+
+
+def get_workspace_path() -> Path:
+ return ensure_dir(get_bot_data_path() / "workspace")
def ensure_workspace_templates(workspace: Path) -> None:
@@ -212,11 +252,6 @@ def _create_minimal_workspace_templates(workspace: Path) -> None:
skills_dir.mkdir(exist_ok=True)
-def get_sessions_path() -> Path:
- """Get the sessions storage directory."""
- return ensure_dir(get_data_path() / "sessions")
-
-
def get_skills_path(workspace: Path | None = None) -> Path:
"""Get the skills directory within the workspace."""
ws = workspace or get_workspace_path()
diff --git a/bot/vikingbot/utils/tracing.py b/bot/vikingbot/utils/tracing.py
new file mode 100644
index 00000000..9dee8534
--- /dev/null
+++ b/bot/vikingbot/utils/tracing.py
@@ -0,0 +1,174 @@
+"""
+Abstract tracing utilities for observability.
+
+This module provides a tracing abstraction that is not tied to any specific
+backend (Langfuse, OpenTelemetry, etc.), allowing for easy switching of
+implementations.
+"""
+
+from contextlib import contextmanager
+from contextvars import ContextVar
+from functools import wraps
+from typing import Any, Callable, Generator, TypeVar
+
+from loguru import logger
+
+# Context variable to store current session ID
+_session_id: ContextVar[str | None] = ContextVar("session_id", default=None)
+
+T = TypeVar("T")
+
+# Try to import langfuse observe decorator
+try:
+ from langfuse.decorators import observe as langfuse_observe
+except ImportError:
+ langfuse_observe = None
+
+
+def get_current_session_id() -> str | None:
+ """Get the current session ID from context."""
+ return _session_id.get()
+
+
+
+@contextmanager
+def set_session_id(session_id: str | None) -> Generator[None, None, None]:
+ """
+ Set the session ID for the current context.
+
+ Args:
+ session_id: The session ID to set, or None to clear.
+
+ Example:
+ with set_session_id("user-123"):
+ # All nested operations will see this session_id
+ result = await process_message(msg)
+ """
+ token = _session_id.set(session_id)
+ try:
+ yield
+ finally:
+ _session_id.reset(token)
+
+
+def trace(
+ name: str | None = None,
+ *,
+ extract_session_id: Callable[..., str] | None = None,
+ extract_user_id: Callable[..., str] | None = None,
+) -> Callable[[Callable[..., T]], Callable[..., T]]:
+ """
+ Decorator to trace a function execution with session context.
+
+ This decorator is backend-agnostic. It manages session ID injection
+ through context variables, without binding to any specific tracing
+ implementation (Langfuse, OpenTelemetry, etc.).
+
+ Args:
+ name: Optional name for the trace span. Defaults to function name.
+ extract_session_id: Optional callable to extract session_id from
+ function arguments. The callable receives all positional (*args)
+ and keyword (**kwargs) arguments of the decorated function.
+ extract_user_id: Optional callable to extract user_id from
+ function arguments (e.g., lambda msg: msg.sender_id).
+
+ Returns:
+ Decorated function with tracing context management.
+
+ Example:
+ @trace(
+ name="process_message",
+ extract_session_id=lambda msg: msg.session_key.safe_name(),
+ extract_user_id=lambda msg: msg.sender_id,
+ )
+ async def process_message(msg: InboundMessage) -> Response:
+ # session_id and user_id are automatically propagated
+ return await handle(msg)
+ """
+ def decorator(func: Callable[..., T]) -> Callable[..., T]:
+ span_name = name or func.__name__
+
+ # Apply @observe decorator if available for Langfuse tracing
+ wrapped_func = func
+ if langfuse_observe is not None:
+ wrapped_func = langfuse_observe(name=span_name)(func)
+
+ @wraps(func)
+ async def async_wrapper(*args: Any, **kwargs: Any) -> T:
+ # Extract session_id if extractor provided
+ session_id: str | None = None
+ if extract_session_id:
+ try:
+ # Inspect the extractor's signature to determine how to call it
+ import inspect
+ sig = inspect.signature(extract_session_id)
+ param_count = len([
+ p for p in sig.parameters.values()
+ if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD)
+ ])
+
+ if param_count == 1 and len(args) >= 1:
+ # Extractor expects single arg (e.g., lambda msg: ...)
+ # Use the last arg which is typically the message/object
+ session_id = extract_session_id(args[-1])
+ else:
+ # Extractor expects multiple args or specific signature
+ session_id = extract_session_id(*args, **kwargs)
+ except Exception as e:
+ logger.warning(f"Failed to extract session_id: {e}")
+
+ # Extract user_id if extractor provided
+ user_id: str | None = None
+ if extract_user_id:
+ try:
+ import inspect
+ sig = inspect.signature(extract_user_id)
+ param_count = len([
+ p for p in sig.parameters.values()
+ if p.kind in (p.POSITIONAL_ONLY, p.POSITIONAL_OR_KEYWORD)
+ ])
+
+ if param_count == 1 and len(args) >= 1:
+ user_id = extract_user_id(args[-1])
+ else:
+ user_id = extract_user_id(*args, **kwargs)
+ except Exception as e:
+ logger.warning(f"Failed to extract user_id: {e}")
+
+ # Fall back to current context if no session_id extracted
+ if session_id is None:
+ session_id = get_current_session_id()
+ logger.debug(f"[TRACE] No session_id extracted, using context: {session_id}")
+ else:
+ #logger.info(f"[TRACE] Extracted session_id: {session_id}")
+ pass
+
+ if user_id:
+ #logger.info(f"[TRACE] Extracted user_id: {user_id}")
+ pass
+
+ # Use context manager to set session_id for nested operations
+ if session_id:
+ with set_session_id(session_id):
+ # Also propagate to langfuse if available
+ from vikingbot.integrations.langfuse import LangfuseClient
+
+ langfuse = LangfuseClient.get_instance()
+ has_propagate = hasattr(langfuse, "propagate_attributes")
+ # logger.info(f"[LANGFUSE] Client status: enabled={langfuse.enabled}, has_propagate_attributes={has_propagate}")
+ if langfuse.enabled and has_propagate:
+ # logger.info(f"[LANGFUSE] Starting trace with attributes: session_id={session_id}, user_id={user_id}")
+ with langfuse.propagate_attributes(session_id=session_id, user_id=user_id):
+ return await wrapped_func(*args, **kwargs)
+ else:
+ if not langfuse.enabled:
+ logger.warning(f"[LANGFUSE] Client not enabled")
+ if not has_propagate:
+ logger.warning(f"[LANGFUSE] propagate_attributes not available")
+ return await wrapped_func(*args, **kwargs)
+ else:
+ return await wrapped_func(*args, **kwargs)
+
+ return async_wrapper # type: ignore[return-value]
+
+ return decorator
diff --git a/bot/workspace/TOOLS.md b/bot/workspace/TOOLS.md
index 6e70d5b5..479aae3d 100644
--- a/bot/workspace/TOOLS.md
+++ b/bot/workspace/TOOLS.md
@@ -34,7 +34,7 @@ Search for user-related memories and events.
```
openviking_memory_commit(session_id: str, messages: list) -> str
```
-**All important conversations, events, and memories MUST be committed to OpenViking** for future retrieval and context understanding.
+**All user's important conversations, information, and memories MUST be committed to OpenViking** for future retrieval and context understanding.
---
@@ -172,49 +172,6 @@ cron(
)
```
-## OpenViking Tools
-
-### openviking_read
-Read content from OpenViking resources at different levels.
-```
-openviking_read(uri: str, level: str = "abstract") -> str
-```
-
-**Levels:**
-- `abstract`: L0 - Brief summary
-- `overview`: L1 - Medium overview
-- `read`: L2 - Full content
-
-### openviking_list
-List resources in an OpenViking path.
-```
-openviking_list(uri: str = "", recursive: bool = False) -> str
-```
-
-### openviking_search
-Search for resources in OpenViking using semantic search.
-```
-openviking_search(query: str, target_uri: str = None) -> str
-```
-
-### openviking_grep
-Search OpenViking resources using regex patterns.
-```
-openviking_grep(uri: str, pattern: str, case_insensitive: bool = False) -> str
-```
-
-### openviking_glob
-Find OpenViking resources using glob patterns.
-```
-openviking_glob(pattern: str, uri: str = "") -> str
-```
-
-### user_memory_search
-Search for user memories in OpenViking.
-```
-user_memory_search(query: str) -> str
-```
-
## Heartbeat Task Management
The `HEARTBEAT.md` file in the workspace is checked at regular intervals.
diff --git a/crates/ov_cli/Cargo.toml b/crates/ov_cli/Cargo.toml
index 2ff7eac0..8a93a1d8 100644
--- a/crates/ov_cli/Cargo.toml
+++ b/crates/ov_cli/Cargo.toml
@@ -16,6 +16,7 @@ reqwest = { version = "0.12", features = ["json", "multipart", "rustls-tls"], de
serde = { version = "1.0", features = ["derive"] }
serde_json = { version = "1.0", features = ["preserve_order"] }
tokio = { version = "1.38", features = ["full"] }
+futures = "0.3"
colored = "2.1"
dirs = "5.0"
anyhow = "1.0"
@@ -28,3 +29,4 @@ zip = "2.2"
tempfile = "3.12"
url = "2.5"
walkdir = "2.5"
+which = "6.0"
diff --git a/crates/ov_cli/src/commands/chat.rs b/crates/ov_cli/src/commands/chat.rs
new file mode 100644
index 00000000..23dcc940
--- /dev/null
+++ b/crates/ov_cli/src/commands/chat.rs
@@ -0,0 +1,329 @@
+//! Chat command for interacting with Vikingbot via OpenAPI
+
+use std::io::Write;
+use std::time::Duration;
+
+use clap::Parser;
+use reqwest::Client;
+use serde::{Deserialize, Serialize};
+
+use crate::error::{Error, Result};
+
+const DEFAULT_ENDPOINT: &str = "http://localhost:18790/api/v1/openapi";
+
+/// Chat with Vikingbot via OpenAPI
+#[derive(Debug, Parser)]
+pub struct ChatCommand {
+ /// API endpoint URL
+ #[arg(short, long, default_value = DEFAULT_ENDPOINT)]
+ pub endpoint: String,
+
+ /// API key for authentication
+ #[arg(short, long, env = "VIKINGBOT_API_KEY")]
+ pub api_key: Option,
+
+ /// Session ID to use (creates new if not provided)
+ #[arg(short, long)]
+ pub session: Option,
+
+ /// User ID
+ #[arg(short, long, default_value = "cli_user")]
+ pub user: String,
+
+ /// Non-interactive mode (single message)
+ #[arg(short = 'M', long)]
+ pub message: Option,
+
+ /// Stream the response
+ #[arg(long)]
+ pub stream: bool,
+
+ /// Disable rich formatting
+ #[arg(long)]
+ pub no_format: bool,
+}
+
+/// Chat message for API
+#[derive(Debug, Serialize, Deserialize)]
+struct ChatMessage {
+ role: String,
+ content: String,
+}
+
+/// Chat request body
+#[derive(Debug, Serialize)]
+struct ChatRequest {
+ message: String,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ session_id: Option,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ user_id: Option,
+ stream: bool,
+ #[serde(skip_serializing_if = "Option::is_none")]
+ context: Option>,
+}
+
+/// Chat response
+#[derive(Debug, Deserialize)]
+struct ChatResponse {
+ session_id: String,
+ message: String,
+ #[serde(default)]
+ events: Option>,
+}
+
+/// Stream event
+#[derive(Debug, Deserialize)]
+struct StreamEvent {
+ event: String,
+ data: serde_json::Value,
+}
+
+impl ChatCommand {
+ /// Execute the chat command
+ pub async fn execute(&self) -> Result<()> {
+ let client = Client::builder()
+ .timeout(Duration::from_secs(300))
+ .build()
+ .map_err(|e| Error::Network(format!("Failed to create HTTP client: {}", e)))?;
+
+ if let Some(message) = &self.message {
+ // Single message mode - ignore stream flag for now
+ self.send_message(&client, message).await
+ } else {
+ // Interactive mode
+ self.run_interactive(&client).await
+ }
+ }
+
+ /// Send a single message and get response
+ async fn send_message(&self, client: &Client, message: &str) -> Result<()> {
+ let url = format!("{}/chat", self.endpoint);
+
+ let request = ChatRequest {
+ message: message.to_string(),
+ session_id: self.session.clone(),
+ user_id: Some(self.user.clone()),
+ stream: false,
+ context: None,
+ };
+
+ let mut req_builder = client.post(&url).json(&request);
+
+ if let Some(api_key) = &self.api_key {
+ req_builder = req_builder.header("X-API-Key", api_key);
+ }
+
+ let response = req_builder
+ .send()
+ .await
+ .map_err(|e| Error::Network(format!("Failed to send request: {}", e)))?;
+
+ if !response.status().is_success() {
+ let status = response.status();
+ let text = response.text().await.unwrap_or_default();
+ return Err(Error::Api(format!("Request failed ({}): {}", status, text)));
+ }
+
+ let chat_response: ChatResponse = response
+ .json()
+ .await
+ .map_err(|e| Error::Parse(format!("Failed to parse response: {}", e)))?;
+
+ // Print events if any
+ if let Some(events) = &chat_response.events {
+ for event in events {
+ if let (Some(etype), Some(data)) = (
+ event.get("type").and_then(|v| v.as_str()),
+ event.get("data"),
+ ) {
+ match etype {
+ "reasoning" => {
+ let content = data.as_str().unwrap_or("");
+ if !self.no_format {
+ println!("\x1b[2mThink: {}...\x1b[0m", &content[..content.len().min(100)]);
+ }
+ }
+ "tool_call" => {
+ let content = data.as_str().unwrap_or("");
+ if !self.no_format {
+ println!("\x1b[2m├─ Calling: {}\x1b[0m", content);
+ }
+ }
+ "tool_result" => {
+ let content = data.as_str().unwrap_or("");
+ if !self.no_format {
+ let truncated = if content.len() > 150 {
+ format!("{}...", &content[..150])
+ } else {
+ content.to_string()
+ };
+ println!("\x1b[2m└─ Result: {}\x1b[0m", truncated);
+ }
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+
+ // Print final response
+ if !self.no_format {
+ println!("\n\x1b[1;31mBot:\x1b[0m");
+ println!("{}", chat_response.message);
+ println!();
+ } else {
+ println!("{}", chat_response.message);
+ }
+
+ Ok(())
+ }
+
+ /// Run interactive chat mode
+ async fn run_interactive(&self, client: &Client) -> Result<()> {
+ println!("Vikingbot Chat - Interactive Mode");
+ println!("Endpoint: {}", self.endpoint);
+ if let Some(session) = &self.session {
+ println!("Session: {}", session);
+ }
+ println!("Type 'exit', 'quit', or press Ctrl+C to exit");
+ println!("----------------------------------------\n");
+
+ let mut session_id = self.session.clone();
+
+ loop {
+ // Read input
+ print!("\x1b[1;32mYou:\x1b[0m ");
+ std::io::stdout().flush().map_err(|e| Error::Io(e))?;
+
+ let mut input = String::new();
+ std::io::stdin().read_line(&mut input).map_err(|e| Error::Io(e))?;
+ let input = input.trim();
+
+ if input.is_empty() {
+ continue;
+ }
+
+ // Check for exit
+ if input.eq_ignore_ascii_case("exit") || input.eq_ignore_ascii_case("quit") {
+ println!("\nGoodbye!");
+ break;
+ }
+
+ // Send message
+ let url = format!("{}/chat", self.endpoint);
+
+ let request = ChatRequest {
+ message: input.to_string(),
+ session_id: session_id.clone(),
+ user_id: Some(self.user.clone()),
+ stream: false,
+ context: None,
+ };
+
+ let mut req_builder = client.post(&url).json(&request);
+
+ if let Some(api_key) = &self.api_key {
+ req_builder = req_builder.header("X-API-Key", api_key);
+ }
+
+ match req_builder.send().await {
+ Ok(response) => {
+ if response.status().is_success() {
+ match response.json::().await {
+ Ok(chat_response) => {
+ // Save session ID
+ if session_id.is_none() {
+ session_id = Some(chat_response.session_id.clone());
+ }
+
+ // Print events
+ if let Some(events) = chat_response.events {
+ for event in events {
+ if let (Some(etype), Some(data)) = (
+ event.get("type").and_then(|v| v.as_str()),
+ event.get("data"),
+ ) {
+ match etype {
+ "reasoning" => {
+ let content = data.as_str().unwrap_or("");
+ if content.len() > 100 {
+ println!("\x1b[2mThink: {}...\x1b[0m", &content[..100]);
+ } else {
+ println!("\x1b[2mThink: {}\x1b[0m", content);
+ }
+ }
+ "tool_call" => {
+ println!("\x1b[2m├─ Calling: {}\x1b[0m", data.as_str().unwrap_or(""));
+ }
+ "tool_result" => {
+ let content = data.as_str().unwrap_or("");
+ let truncated = if content.len() > 150 {
+ format!("{}...", &content[..150])
+ } else {
+ content.to_string()
+ };
+ println!("\x1b[2m└─ Result: {}\x1b[0m", truncated);
+ }
+ _ => {}
+ }
+ }
+ }
+ }
+
+ // Print response
+ println!("\n\x1b[1;31mBot:\x1b[0m");
+ println!("{}", chat_response.message);
+ println!();
+ }
+ Err(e) => {
+ eprintln!("\x1b[1;31mError parsing response: {}\x1b[0m", e);
+ }
+ }
+ } else {
+ let status = response.status();
+ let text = response.text().await.unwrap_or_default();
+ eprintln!("\x1b[1;31mRequest failed ({}): {}\x1b[0m", status, text);
+ }
+ }
+ Err(e) => {
+ eprintln!("\x1b[1;31mFailed to send request: {}\x1b[0m", e);
+ }
+ }
+ }
+
+ println!("\nGoodbye!");
+ Ok(())
+ }
+}
+
+impl ChatCommand {
+ /// Execute the chat command (public wrapper)
+ pub async fn run(&self) -> Result<()> {
+ self.execute().await
+ }
+}
+
+impl ChatCommand {
+ /// Create a new ChatCommand with the given parameters
+ #[allow(clippy::too_many_arguments)]
+ pub fn new(
+ endpoint: String,
+ api_key: Option,
+ session: Option,
+ user: String,
+ message: Option,
+ stream: bool,
+ no_format: bool,
+ ) -> Self {
+ Self {
+ endpoint,
+ api_key,
+ session,
+ user,
+ message,
+ stream,
+ no_format,
+ }
+ }
+}
diff --git a/crates/ov_cli/src/commands/mod.rs b/crates/ov_cli/src/commands/mod.rs
index be3b61fd..1e8d1bcd 100644
--- a/crates/ov_cli/src/commands/mod.rs
+++ b/crates/ov_cli/src/commands/mod.rs
@@ -1,4 +1,5 @@
pub mod admin;
+pub mod chat;
pub mod content;
pub mod search;
pub mod filesystem;
diff --git a/crates/ov_cli/src/main.rs b/crates/ov_cli/src/main.rs
index a0d58d8f..72bdfeb1 100644
--- a/crates/ov_cli/src/main.rs
+++ b/crates/ov_cli/src/main.rs
@@ -326,6 +326,21 @@ enum Commands {
#[arg(default_value = "viking://")]
uri: String,
},
+ /// Chat with vikingbot agent
+ Chat {
+ /// Message to send to the agent
+ #[arg(short, long)]
+ message: Option,
+ /// Session ID
+ #[arg(short, long, default_value = "cli__chat__default")]
+ session: String,
+ /// Render assistant output as Markdown
+ #[arg(long = "markdown", default_value = "true")]
+ markdown: bool,
+ /// Show vikingbot runtime logs during chat
+ #[arg(long = "logs", default_value = "false")]
+ logs: bool,
+ },
/// Configuration management
Config {
#[command(subcommand)]
@@ -462,8 +477,16 @@ enum ConfigCommands {
#[tokio::main]
async fn main() {
+ // Check for chat command first - handle it directly to bypass clap global args
+ // but we keep it in Cli enum so it shows up in help
+ let args: Vec = std::env::args().collect();
+ if args.len() >= 2 && args[1] == "chat" {
+ handle_chat_direct(&args[2..]).await;
+ return;
+ }
+
let cli = Cli::parse();
-
+
let output_format = cli.output;
let compact = cli.compact;
@@ -560,6 +583,18 @@ async fn main() {
Commands::Tui { uri } => {
handle_tui(uri, ctx).await
}
+ Commands::Chat { message, session, markdown, logs } => {
+ let cmd = commands::chat::ChatCommand {
+ endpoint: std::env::var("VIKINGBOT_ENDPOINT").unwrap_or_else(|_| "http://localhost:18790/api/v1/openapi".to_string()),
+ api_key: std::env::var("VIKINGBOT_API_KEY").ok(),
+ session: Some(session),
+ user: "cli_user".to_string(),
+ message,
+ stream: false,
+ no_format: !markdown,
+ };
+ cmd.run().await
+ }
Commands::Config { action } => handle_config(action, ctx).await,
Commands::Version => {
println!("{}", env!("CARGO_PKG_VERSION"));
@@ -991,6 +1026,69 @@ async fn handle_health(ctx: CliContext) -> Result<()> {
Ok(())
}
+async fn handle_chat_direct(args: &[String]) {
+ use tokio::process::Command;
+
+ // First check if vikingbot is available
+ let vikingbot_available = which::which("vikingbot").is_ok() || {
+ // Also check if we can import the module
+ let python = std::env::var("PYTHON").unwrap_or_else(|_| "python3".to_string());
+ let check = Command::new(&python)
+ .args(["-c", "import vikingbot; print('ok')"])
+ .output()
+ .await;
+ check.map(|o| o.status.success()).unwrap_or(false)
+ };
+
+ if !vikingbot_available {
+ eprintln!("Error: vikingbot not found. Please install vikingbot first:");
+ eprintln!();
+ eprintln!(" Option 1: Install from local source (recommended for development)");
+ eprintln!(" cd bot");
+ eprintln!(" uv pip install -e \".[dev]\"");
+ eprintln!();
+ eprintln!(" Option 2: Install from PyPI (coming soon)");
+ eprintln!(" pip install vikingbot");
+ eprintln!();
+ std::process::exit(1);
+ }
+
+ // Try to find vikingbot executable first
+ let (cmd, mut vikingbot_args) = if let Ok(vikingbot) = which::which("vikingbot") {
+ (vikingbot, vec!["chat".to_string()])
+ } else {
+ let python = std::env::var("PYTHON").unwrap_or_else(|_| "python3".to_string());
+ (
+ std::path::PathBuf::from(python),
+ vec!["-m".to_string(), "vikingbot.cli.commands".to_string(), "chat".to_string()],
+ )
+ };
+
+ // Always add our default session first
+ vikingbot_args.push("--session".to_string());
+ vikingbot_args.push("cli__chat__default".to_string());
+
+ // Now add all user args - if user provided --session it will override the default
+ vikingbot_args.extend(args.iter().cloned());
+
+ // Execute and pass through all signals
+ let status = Command::new(&cmd)
+ .args(&vikingbot_args)
+ .status()
+ .await;
+
+ match status {
+ Ok(s) if !s.success() => {
+ std::process::exit(s.code().unwrap_or(1));
+ }
+ Err(e) => {
+ eprintln!("Error: {}", e);
+ std::process::exit(1);
+ }
+ _ => {}
+ }
+}
+
async fn handle_tui(uri: String, ctx: CliContext) -> Result<()> {
let client = ctx.get_client();
tui::run_tui(client, &uri).await
diff --git a/openviking/server/app.py b/openviking/server/app.py
index 038aeb25..a99d248b 100644
--- a/openviking/server/app.py
+++ b/openviking/server/app.py
@@ -16,6 +16,7 @@
from openviking.server.models import ERROR_CODE_TO_HTTP_STATUS, ErrorInfo, Response
from openviking.server.routers import (
admin_router,
+ bot_router,
content_router,
debug_router,
filesystem_router,
@@ -147,6 +148,14 @@ async def general_error_handler(request: Request, exc: Exception):
).model_dump(),
)
+ # Configure Bot API if --with-bot is enabled
+ if config.with_bot:
+ import openviking.server.routers.bot as bot_module
+ bot_module.set_bot_api_url(config.bot_api_url)
+ logger.info(f"Bot API proxy enabled, forwarding to {config.bot_api_url}")
+ else:
+ logger.info("Bot API proxy disabled (use --with-bot to enable)")
+
# Register routers
app.include_router(system_router)
app.include_router(admin_router)
@@ -159,5 +168,6 @@ async def general_error_handler(request: Request, exc: Exception):
app.include_router(pack_router)
app.include_router(debug_router)
app.include_router(observer_router)
+ app.include_router(bot_router, prefix="/bot/v1")
return app
diff --git a/openviking/server/bootstrap.py b/openviking/server/bootstrap.py
index 8d9d95cd..83b8149c 100644
--- a/openviking/server/bootstrap.py
+++ b/openviking/server/bootstrap.py
@@ -4,6 +4,9 @@
import argparse
import os
+import subprocess
+import sys
+import time
import uvicorn
@@ -50,6 +53,23 @@ def main():
default=None,
help="Path to ov.conf config file",
)
+ parser.add_argument(
+ "--bot",
+ action="store_true",
+ help="Also start vikingbot gateway after server starts",
+ )
+ parser.add_argument(
+ "--with-bot",
+ action="store_true",
+ dest="with_bot",
+ help="Enable Bot API proxy to Vikingbot (requires Vikingbot running)",
+ )
+ parser.add_argument(
+ "--bot-url",
+ default="http://localhost:18790",
+ dest="bot_url",
+ help="Vikingbot OpenAPIChannel URL (default: http://localhost:18790)",
+ )
args = parser.parse_args()
@@ -66,6 +86,10 @@ def main():
config.host = args.host
if args.port is not None:
config.port = args.port
+ if args.with_bot:
+ config.with_bot = True
+ if args.bot_url:
+ config.bot_api_url = args.bot_url
# Configure logging for Uvicorn
configure_uvicorn_logging()
@@ -73,7 +97,100 @@ def main():
# Create and run app
app = create_app(config)
print(f"OpenViking HTTP Server is running on {config.host}:{config.port}")
- uvicorn.run(app, host=config.host, port=config.port, log_config=None)
+ if config.with_bot:
+ print(f"Bot API proxy enabled, forwarding to {config.bot_api_url}")
+
+ # Start vikingbot gateway if --with-bot is set
+ bot_process = None
+ if args.with_bot:
+ bot_process = _start_vikingbot_gateway()
+
+ try:
+ uvicorn.run(app, host=config.host, port=config.port, log_config=None)
+ finally:
+ # Cleanup vikingbot process on shutdown
+ if bot_process is not None:
+ _stop_vikingbot_gateway(bot_process)
+
+
+def _start_vikingbot_gateway() -> subprocess.Popen:
+ """Start vikingbot gateway as a subprocess."""
+ print("Starting vikingbot gateway...")
+
+ # Check if vikingbot is available
+ vikingbot_cmd = None
+ if subprocess.run(["which", "vikingbot"], capture_output=True).returncode == 0:
+ vikingbot_cmd = ["vikingbot", "gateway"]
+ else:
+ # Try python -m vikingbot
+ python_cmd = sys.executable
+ try:
+ result = subprocess.run(
+ [python_cmd, "-m", "vikingbot", "--help"],
+ capture_output=True,
+ timeout=5
+ )
+ if result.returncode == 0:
+ vikingbot_cmd = [python_cmd, "-m", "vikingbot", "gateway"]
+ except (subprocess.TimeoutExpired, FileNotFoundError):
+ pass
+
+ if vikingbot_cmd is None:
+ print("Warning: vikingbot not found. Please install vikingbot first.")
+ print(" cd bot && uv pip install -e '.[dev]'")
+ return None
+
+ # Start vikingbot gateway process
+ try:
+ # Set environment to ensure it uses the same Python environment
+ env = os.environ.copy()
+
+ process = subprocess.Popen(
+ vikingbot_cmd,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ text=True,
+ env=env,
+ )
+
+ # Wait a moment to check if it started successfully
+ time.sleep(2)
+ if process.poll() is not None:
+ # Process exited early
+ stdout, stderr = process.communicate(timeout=1)
+ print(f"Warning: vikingbot gateway exited early (code {process.returncode})")
+ if stderr:
+ print(f"Error: {stderr[:500]}")
+ return None
+
+ print(f"Vikingbot gateway started (PID: {process.pid})")
+ return process
+
+ except Exception as e:
+ print(f"Warning: Failed to start vikingbot gateway: {e}")
+ return None
+
+
+def _stop_vikingbot_gateway(process: subprocess.Popen) -> None:
+ """Stop the vikingbot gateway subprocess."""
+ if process is None:
+ return
+
+ print(f"\nStopping vikingbot gateway (PID: {process.pid})...")
+
+ try:
+ # Try graceful termination first
+ process.terminate()
+ try:
+ process.wait(timeout=5)
+ print("Vikingbot gateway stopped gracefully.")
+ except subprocess.TimeoutExpired:
+ # Force kill if it doesn't stop in time
+ process.kill()
+ process.wait()
+ print("Vikingbot gateway force killed.")
+ except Exception as e:
+ print(f"Error stopping vikingbot gateway: {e}")
if __name__ == "__main__":
diff --git a/openviking/server/config.py b/openviking/server/config.py
index 9b1631d6..2fdbcbdf 100644
--- a/openviking/server/config.py
+++ b/openviking/server/config.py
@@ -25,6 +25,8 @@ class ServerConfig:
port: int = 1933
root_api_key: Optional[str] = None
cors_origins: List[str] = field(default_factory=lambda: ["*"])
+ with_bot: bool = False # Enable Bot API proxy to Vikingbot
+ bot_api_url: str = "http://localhost:18790" # Vikingbot OpenAPIChannel URL (default port)
def load_server_config(config_path: Optional[str] = None) -> ServerConfig:
diff --git a/openviking/server/routers/__init__.py b/openviking/server/routers/__init__.py
index d90e6687..05a75f97 100644
--- a/openviking/server/routers/__init__.py
+++ b/openviking/server/routers/__init__.py
@@ -3,6 +3,7 @@
"""OpenViking HTTP Server routers."""
from openviking.server.routers.admin import router as admin_router
+from openviking.server.routers.bot import router as bot_router
from openviking.server.routers.content import router as content_router
from openviking.server.routers.debug import router as debug_router
from openviking.server.routers.filesystem import router as filesystem_router
@@ -16,6 +17,7 @@
__all__ = [
"admin_router",
+ "bot_router",
"system_router",
"resources_router",
"filesystem_router",
diff --git a/openviking/server/routers/bot.py b/openviking/server/routers/bot.py
new file mode 100644
index 00000000..b39f16f8
--- /dev/null
+++ b/openviking/server/routers/bot.py
@@ -0,0 +1,204 @@
+"""Bot API router for proxying requests to Vikingbot OpenAPIChannel.
+
+This router provides endpoints for the Bot API that proxy requests to the
+Vikingbot OpenAPIChannel when the --with-bot option is enabled.
+"""
+
+import json
+import os
+from typing import AsyncGenerator, Optional
+
+import httpx
+from fastapi import APIRouter, Depends, Header, HTTPException, Request, status
+from fastapi.responses import JSONResponse, StreamingResponse
+from loguru import logger
+
+router = APIRouter(prefix="", tags=["bot"])
+
+# Bot API configuration - set when --with-bot is enabled
+BOT_API_URL: Optional[str] = None # e.g., "http://localhost:18791"
+
+
+def set_bot_api_url(url: str) -> None:
+ """Set the Bot API URL. Called by app.py when --with-bot is enabled."""
+ global BOT_API_URL
+ BOT_API_URL = url
+
+
+def get_bot_url() -> str:
+ """Get the Bot API URL, raising 503 if not configured."""
+ if BOT_API_URL is None:
+ raise HTTPException(
+ status_code=status.HTTP_503_SERVICE_UNAVAILABLE,
+ detail="Bot service not enabled. Start server with --with-bot option.",
+ )
+ return BOT_API_URL
+
+
+async def verify_auth(request: Request) -> Optional[str]:
+ """Extract and return authorization token from request."""
+ # Try X-API-Key header first
+ api_key = request.headers.get("X-API-Key")
+ if api_key:
+ return api_key
+
+ # Try Authorization header (Bearer token)
+ auth_header = request.headers.get("Authorization")
+ if auth_header and auth_header.startswith("Bearer "):
+ return auth_header[7:] # Remove "Bearer " prefix
+
+ return None
+
+
+@router.get("/health")
+async def health_check(request: Request):
+ """Health check endpoint for Bot API.
+
+ Returns 503 if --with-bot is not enabled.
+ Proxies to Vikingbot health check if enabled.
+ """
+ bot_url = get_bot_url()
+
+ try:
+ async with httpx.AsyncClient() as client:
+ print(f'url={f"{bot_url}/bot/v1/health"}')
+ # Forward to Vikingbot OpenAPIChannel health endpoint
+ response = await client.get(
+ f"{bot_url}/bot/v1/health",
+ timeout=5.0,
+ )
+ response.raise_for_status()
+ return response.json()
+ except httpx.RequestError as e:
+ logger.error(f"Failed to connect to bot service at {bot_url}: {e}")
+ raise HTTPException(
+ status_code=status.HTTP_502_BAD_GATEWAY,
+ detail=f"Bot service unavailable: {str(e)}",
+ )
+ except httpx.HTTPStatusError as e:
+ logger.error(f"Bot service returned error: {e}")
+ raise HTTPException(
+ status_code=status.HTTP_502_BAD_GATEWAY,
+ detail=f"Bot service error: {e.response.text}",
+ )
+
+
+@router.post("/chat")
+async def chat(request: Request):
+ """Send a message to the bot and get a response.
+
+ Proxies the request to Vikingbot OpenAPIChannel.
+ """
+ bot_url = get_bot_url()
+ auth_token = await verify_auth(request)
+
+ # Read request body
+ try:
+ body = await request.json()
+ except json.JSONDecodeError:
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail="Invalid JSON in request body",
+ )
+
+ try:
+ async with httpx.AsyncClient() as client:
+ # Build headers
+ headers = {"Content-Type": "application/json"}
+ if auth_token:
+ headers["X-API-Key"] = auth_token
+
+ # Forward to Vikingbot OpenAPIChannel chat endpoint
+ response = await client.post(
+ f"{bot_url}/bot/v1/chat",
+ json=body,
+ headers=headers,
+ timeout=300.0, # 5 minute timeout for chat
+ )
+ response.raise_for_status()
+ return response.json()
+ except httpx.RequestError as e:
+ logger.error(f"Failed to connect to bot service: {e}")
+ raise HTTPException(
+ status_code=status.HTTP_502_BAD_GATEWAY,
+ detail=f"Bot service unavailable: {str(e)}",
+ )
+ except httpx.HTTPStatusError as e:
+ logger.error(f"Bot service returned error: {e}")
+ # Forward the status code if it's a client error
+ if e.response.status_code < 500:
+ raise HTTPException(
+ status_code=e.response.status_code,
+ detail=e.response.text,
+ )
+ raise HTTPException(
+ status_code=status.HTTP_502_BAD_GATEWAY,
+ detail=f"Bot service error: {e.response.text}",
+ )
+
+
+@router.post("/chat/stream")
+async def chat_stream(request: Request):
+ """Send a message to the bot and get a streaming response.
+
+ Proxies the request to Vikingbot OpenAPIChannel with SSE streaming.
+ """
+ bot_url = get_bot_url()
+ auth_token = await verify_auth(request)
+
+ # Read request body
+ try:
+ body = await request.json()
+ except json.JSONDecodeError:
+ raise HTTPException(
+ status_code=status.HTTP_400_BAD_REQUEST,
+ detail="Invalid JSON in request body",
+ )
+
+ async def event_stream() -> AsyncGenerator[str, None]:
+ """Generate SSE events from bot response stream."""
+ try:
+ async with httpx.AsyncClient() as client:
+ # Build headers
+ headers = {"Content-Type": "application/json"}
+ if auth_token:
+ headers["X-API-Key"] = auth_token
+
+ # Forward to Vikingbot OpenAPIChannel stream endpoint
+ async with client.stream(
+ "POST",
+ f"{bot_url}/chat/stream",
+ json=body,
+ headers=headers,
+ timeout=300.0,
+ ) as response:
+ response.raise_for_status()
+
+ # Stream the response content
+ async for line in response.aiter_lines():
+ if line:
+ # Forward the SSE line
+ yield f"{line}\n\n"
+ except httpx.RequestError as e:
+ logger.error(f"Failed to connect to bot service: {e}")
+ error_event = {
+ "event": "error",
+ "data": json.dumps({"error": f"Bot service unavailable: {str(e)}"}),
+ }
+ yield f"data: {json.dumps(error_event)}\n\n"
+ except httpx.HTTPStatusError as e:
+ logger.error(f"Bot service returned error: {e}")
+ error_event = {
+ "event": "error",
+ "data": json.dumps({"error": f"Bot service error: {e.response.text}"}),
+ }
+ yield f"data: {json.dumps(error_event)}\n\n"
+
+ return StreamingResponse(
+ event_stream(),
+ media_type="text/event-stream",
+ headers={
+ "Cache-Control": "no-cache",
+ "Connection": "keep-alive",
+ },
+ )
diff --git a/openviking_cli/cli/commands/chat.py b/openviking_cli/cli/commands/chat.py
new file mode 100644
index 00000000..71c7b6ae
--- /dev/null
+++ b/openviking_cli/cli/commands/chat.py
@@ -0,0 +1,82 @@
+# Copyright (c) 2026 Beijing Volcano Engine Technology Co., Ltd.
+# SPDX-License-Identifier: Apache-2.0
+"""Chat command - wrapper for vikingbot agent."""
+
+import importlib.util
+import shutil
+import subprocess
+import sys
+
+import typer
+
+
+def _check_vikingbot() -> bool:
+ """Check if vikingbot is available."""
+ return importlib.util.find_spec("vikingbot") is not None
+
+
+def chat(
+ message: str = typer.Option(None, "--message", "-m", help="Message to send to the agent"),
+ session_id: str = typer.Option(
+ "cli__default__direct", "--session", "-s", help="Session ID"
+ ),
+ markdown: bool = typer.Option(
+ True, "--markdown/--no-markdown", help="Render assistant output as Markdown"
+ ),
+ logs: bool = typer.Option(
+ False, "--logs/--no-logs", help="Show vikingbot runtime logs during chat"
+ ),
+):
+ """
+ Chat with vikingbot agent.
+
+ This is equivalent to `vikingbot chat`.
+ """
+ if not _check_vikingbot():
+ typer.echo(
+ typer.style(
+ "Error: vikingbot not found. Please install vikingbot first:",
+ fg="red",
+ )
+ )
+ typer.echo()
+ typer.echo(" Option 1: Install from local source (recommended for development)")
+ typer.echo(" cd bot")
+ typer.echo(" uv pip install -e \".[dev]\"")
+ typer.echo()
+ typer.echo(" Option 2: Install from PyPI (coming soon)")
+ typer.echo(" pip install vikingbot")
+ typer.echo()
+ raise typer.Exit(1)
+
+ # Build the command arguments
+ args = []
+
+ if message:
+ args.extend(["--message", message])
+ args.extend(["--session", session_id])
+ if not markdown:
+ args.append("--no-markdown")
+ if logs:
+ args.append("--logs")
+
+ # Check if vikingbot command exists
+ vikingbot_path = shutil.which("vikingbot")
+
+ if vikingbot_path:
+ # Build the command: vikingbot chat [args...]
+ full_args = [vikingbot_path, "chat"] + args
+ else:
+ # Fallback: use python -m
+ full_args = [sys.executable, "-m", "vikingbot.cli.commands", "chat"] + args
+
+ # Pass through all arguments to vikingbot agent
+ try:
+ subprocess.run(full_args, check=True)
+ except subprocess.CalledProcessError as e:
+ raise typer.Exit(e.returncode)
+
+
+def register(app: typer.Typer) -> None:
+ """Register chat command."""
+ app.command("chat")(chat)
diff --git a/openviking_cli/client/http.py b/openviking_cli/client/http.py
index 0c6f612f..312f5d08 100644
--- a/openviking_cli/client/http.py
+++ b/openviking_cli/client/http.py
@@ -143,9 +143,12 @@ def __init__(
timeout: HTTP request timeout in seconds. Default 60.0.
"""
if url is None:
+ print(f'OPENVIKING_CLI_CONFIG_ENV={OPENVIKING_CLI_CONFIG_ENV}')
+ print(f'DEFAULT_OVCLI_CONF={DEFAULT_OVCLI_CONF}')
config_path = resolve_config_path(None, OPENVIKING_CLI_CONFIG_ENV, DEFAULT_OVCLI_CONF)
if config_path:
cfg = load_json_config(config_path)
+
url = cfg.get("url")
api_key = api_key or cfg.get("api_key")
agent_id = agent_id or cfg.get("agent_id")
diff --git a/openviking_cli/utils/config/open_viking_config.py b/openviking_cli/utils/config/open_viking_config.py
index 427e4ae0..b78020b4 100644
--- a/openviking_cli/utils/config/open_viking_config.py
+++ b/openviking_cli/utils/config/open_viking_config.py
@@ -128,6 +128,7 @@ def from_dict(cls, config: Dict[str, Any]) -> "OpenVikingConfig":
# Remove sections managed by other loaders (e.g. server config)
config_copy.pop("server", None)
+ config_copy.pop("bot", None)
# Handle parser configurations from nested "parsers" section
parser_configs = {}
diff --git a/test_chat_command.py b/test_chat_command.py
new file mode 100644
index 00000000..520e7a8c
--- /dev/null
+++ b/test_chat_command.py
@@ -0,0 +1,31 @@
+#!/usr/bin/env python3
+"""Test script to verify the ov chat command implementation."""
+
+import sys
+from pathlib import Path
+
+# Add the root directory to Python path
+root_dir = Path(__file__).parent
+sys.path.insert(0, str(root_dir))
+
+from openviking_cli.cli.commands.chat import _check_vikingbot
+
+
+def test_vikingbot_detection():
+ """Test that vikingbot detection works."""
+ print("Testing vikingbot detection...")
+ has_vikingbot = _check_vikingbot()
+ print(f"vikingbot available: {has_vikingbot}")
+
+ # Also check via import
+ try:
+ import vikingbot
+ print(f"Direct import successful: vikingbot {vikingbot.__version__}")
+ except ImportError:
+ print("Direct import: vikingbot not found")
+
+ return has_vikingbot
+
+
+if __name__ == "__main__":
+ test_vikingbot_detection()
diff --git a/test_chat_integration.py b/test_chat_integration.py
new file mode 100644
index 00000000..9d97178b
--- /dev/null
+++ b/test_chat_integration.py
@@ -0,0 +1,77 @@
+#!/usr/bin/env python3
+"""Integration test for ov chat command."""
+
+import sys
+import subprocess
+from pathlib import Path
+
+# Add root to path
+root_dir = Path(__file__).parent
+sys.path.insert(0, str(root_dir))
+
+
+def test_chat_command_exists():
+ """Test that chat command is registered."""
+ print("Testing chat command registration...")
+ result = subprocess.run(
+ [sys.executable, "-m", "openviking_cli.cli.main", "--help"],
+ capture_output=True,
+ text=True,
+ )
+ print("Exit code:", result.returncode)
+ print("\nSTDOUT:")
+ print(result.stdout)
+ if result.stderr:
+ print("\nSTDERR:")
+ print(result.stderr)
+
+ # Check if chat is in the help output
+ if "chat" in result.stdout:
+ print("\n✓ SUCCESS: chat command found in help!")
+ return True
+ else:
+ print("\n✗ FAILED: chat command not found in help")
+ return False
+
+
+def test_chat_help():
+ """Test that chat --help shows correct parameters."""
+ print("\n\nTesting chat --help...")
+ result = subprocess.run(
+ [sys.executable, "-m", "openviking_cli.cli.main", "chat", "--help"],
+ capture_output=True,
+ text=True,
+ )
+ print("Exit code:", result.returncode)
+ print("\nSTDOUT:")
+ print(result.stdout)
+ if result.stderr:
+ print("\nSTDERR:")
+ print(result.stderr)
+
+ # Check for expected parameters
+ expected_params = ["--message", "-m", "--session", "-s", "--markdown", "--logs"]
+ found = all(p in result.stdout for p in expected_params)
+ if found:
+ print("\n✓ SUCCESS: All expected parameters found!")
+ else:
+ print("\n✗ FAILED: Some parameters missing")
+ return found
+
+
+if __name__ == "__main__":
+ print("=" * 60)
+ print("Testing ov chat command integration")
+ print("=" * 60)
+ print()
+
+ success1 = test_chat_command_exists()
+ success2 = test_chat_help()
+
+ print("\n" + "=" * 60)
+ if success1 and success2:
+ print("✓ All tests passed!")
+ sys.exit(0)
+ else:
+ print("✗ Some tests failed!")
+ sys.exit(1)