diff --git a/.env.example b/.env.example new file mode 100644 index 000000000..254ed9566 --- /dev/null +++ b/.env.example @@ -0,0 +1,13 @@ +# Copy this file to .env and fill in with your model provider details. + +# Base URL for the LLM API (optional for default OpenAI endpoint) +BASE_URL= + +# Target model name used by the LLM demo agent +MODEL=gpt-4o-mini + +# API key for the chosen provider (required for LLM responses) +API_KEY= + +# Optional: override provider name used by AgentConfig (openai, anthropic, etc.) +PROVIDER=openai diff --git a/AGENTS.md b/AGENTS.md new file mode 100644 index 000000000..873dbec8a --- /dev/null +++ b/AGENTS.md @@ -0,0 +1,19 @@ +# Repository Guidelines + +## Project Structure & Module Organization +Source code lives in `src/openagents`, split into `agents/`, `core/`, `mods/`, `workspace/`, and related helpers. Templates and configs sit in `src/openagents/templates` and `config/`. Tests mirror the package layout inside `tests/` using `test_*.py`. The Studio front end is in `studio/`, while docs and assets live in `docs/`. Example workspaces are under `examples/` and `demos/`. + +## Build, Test, and Development Commands +Install dependencies in editable mode with dev extras via `pip install -e .[dev]`. Run the Python suite using `pytest`, and add coverage with `pytest --cov=src/openagents --cov-report=term-missing`. For the Studio, run `npm install` then `npm start` from `studio/`. To boot the stack, use `docker compose up --build`; to rely on the published image with the bundled sample agent, run `docker compose -f docker-compose.remote.yml up -d`. All workflows are mirrored by the top-level `Makefile`; run `make help` for shortcuts like `make install-dev`, `make test`, `make docker-up`, and `make docker-remote-up`. + +## Coding Style & Naming Conventions +Python code should pass Black (line length 88) and Flake8; run `black src tests` and `flake8 src tests` before submitting. Use type hints, `snake_case` functions, `PascalCase` classes, and `SCREAMING_SNAKE_CASE` constants. CLI extensions belong in `openagents.cli`. On the front end, follow the existing Tailwind + React patterns in `studio/src` and colocate component assets. + +## Testing Guidelines +Pytest with `pytest-asyncio` powers async tests; decorate coroutines with `@pytest.mark.asyncio`. Keep test files as `test_.py` and mirror package paths. Changes to transports or mods should add integration-style tests beneath `tests//`. Maintain or raise the coverage configured in `pyproject.toml` and include regression cases for reported bugs. + +## Commit & Pull Request Guidelines +Use concise, imperative commit subjects (example: `Add grpc transport healthcheck`), optionally followed by wrapped body paragraphs at 72 characters. Reference GitHub issues with `Fixes #123` when applicable. Every pull request should outline the change, testing performed, and any docs or config updates. Include screenshots for Studio UI tweaks and attach sample commands for new CLI behavior. Run the full lint and test suite before requesting review, and ensure the PR passes existing GitHub Actions checks. + +## Agent & Network Tips +When contributing new agents, place reusable logic under `src/openagents/agents` and keep workspace scaffolds in `examples/`. Use the `NETWORK_HOST` and `NETWORK_PORT` environment variables (see `examples/agents/simple_worker_agent_example.py`) so agents behave in Docker and local runs. The LLM demos (`examples/agents/llm_worker_agent.py` 和 `examples/agents/chinese_poet_agent.py`) 展示了如何通过 `.env` 提供 `BASE_URL`, `MODEL`, `API_KEY` 并在缺省时优雅降级。Verify agents connect to a network started with `openagents network start` or Docker, and document any credentials. For network-level changes, update the corresponding YAML templates and call out migration steps in the README or release notes. diff --git a/Makefile b/Makefile new file mode 100644 index 000000000..6b1d47f99 --- /dev/null +++ b/Makefile @@ -0,0 +1,95 @@ +SHELL := /bin/bash + +.DEFAULT_GOAL := help + +NETWORK_DIR ?= ./my_first_network +COMPOSE_FILE ?= docker-compose.yml +REMOTE_COMPOSE_FILE ?= docker-compose.remote.yml +STUDIO_DIR ?= studio + +.PHONY: help install install-dev test lint format network-init network-start studio \ + studio-install studio-start docker-up docker-down docker-clean docker-logs \ + docker-remote-up docker-remote-down + +help: ## Show categorized targets and example flows + @echo "Usage: make [VARIABLE=value]" + @echo + @echo "Setup" + @echo " install Install runtime dependencies (editable)" + @echo " install-dev Install development dependencies" + @echo + @echo "Quality" + @echo " test Run pytest suite" + @echo " lint Run Flake8 checks" + @echo " format Format Python code with Black" + @echo + @echo "Network & Studio" + @echo " network-init Scaffold a network workspace (NETWORK_DIR=...)" + @echo " network-start Start network for a workspace" + @echo " studio Launch Studio in standalone mode" + @echo " studio-install Install Studio front-end dependencies" + @echo " studio-start Start Studio dev server" + @echo + @echo "Docker (Local Build)" + @echo " docker-up Build and run compose stack" + @echo " docker-down Stop compose stack" + @echo " docker-clean Stop and remove volumes" + @echo " docker-logs Tail service logs" + @echo + @echo "Docker (Published Image)" + @echo " docker-remote-up Run network + sample agent (remote compose)" + @echo " docker-remote-down Stop remote stack" + @echo + @echo "Examples" + @echo " make install-dev && make test" + @echo " make network-init NETWORK_DIR=./workspace" + @echo " make docker-up" + @echo " make docker-remote-up REMOTE_COMPOSE_FILE=my-compose.yml" + +install: ## Install runtime dependencies (editable mode) + @pip install -e . + +install-dev: ## Install development dependencies and tooling + @pip install -e .[dev] + +test: ## Run pytest test suite + @pytest + +lint: ## Run Flake8 lint checks + @flake8 src tests + +format: ## Format Python code with Black + @black src tests + +network-init: ## Scaffold a new network workspace at NETWORK_DIR + @openagents init $(NETWORK_DIR) + +network-start: ## Start the network defined at NETWORK_DIR + @openagents network start $(NETWORK_DIR) + +studio: ## Launch OpenAgents Studio in standalone mode + @openagents studio -s + +studio-install: ## Install Studio front-end dependencies + @cd $(STUDIO_DIR) && npm install + +studio-start: ## Start the Studio front-end dev server + @cd $(STUDIO_DIR) && npm start + +docker-up: ## Build and run stack using local Docker Compose + @docker compose -f $(COMPOSE_FILE) up --build + +docker-down: ## Stop stack and remove containers (local compose) + @docker compose -f $(COMPOSE_FILE) down + +docker-clean: ## Stop stack and remove containers + volumes (local compose) + @docker compose -f $(COMPOSE_FILE) down -v + +docker-logs: ## Tail logs from the openagents service (local compose) + @docker compose -f $(COMPOSE_FILE) logs -f openagents + +docker-remote-up: ## Run published image via remote compose file + @docker compose -f $(REMOTE_COMPOSE_FILE) up -d + +docker-remote-down: ## Stop remote compose deployment and remove containers + @docker compose -f $(REMOTE_COMPOSE_FILE) down diff --git a/README.md b/README.md index 9d8657d32..af96f9296 100644 --- a/README.md +++ b/README.md @@ -4,6 +4,10 @@ ### OpenAgents: AI Agent Networks for Open Collaboration +

+ English | 中文 +

+ [![PyPI Version](https://img.shields.io/pypi/v/openagents.svg)](https://pypi.org/project/openagents/) [![Python Version](https://img.shields.io/badge/python-3.10%2B-blue.svg)](https://www.python.org/downloads/) @@ -17,7 +21,7 @@ -**OpenAgents** is an open-source project for creating **AI Agent Networks** and connecting agents into networks for open collaboration. In other words, OpenAgents offers a foundational network infrastructure that enables AI Agents to connect and collaborate seamlessly. +**OpenAgents** is an open-source project for creating **AI Agent Networks** and connecting agents into networks for open collaboration. In other words, OpenAgents offers a foundational network infrastructure that enables AI Agents to connect and collaborate seamlessly. A Chinese version of this guide is available in [README.zh.md](README.zh.md). Each agent network on **OpenAgents** is a self-contained community where agents can discover peers, collaborate on problems, learn from each other, and grow together. It is protocol-agnostic and works with popular LLM providers and agent frameworks. @@ -93,20 +97,97 @@ pip install openagents ### Option 2: Docker -If you want to quickly spin up a network and test the studio locally, you can use Docker to run OpenAgents: +If you want to quickly spin up a network and test the studio locally without cloning this repository, use the published Docker image: ```bash # Pull the latest image docker pull ghcr.io/openagents-org/openagents:latest -# Run with Docker Compose -docker-compose up +# Launch with Docker Compose (create docker-compose.yml with the snippet below) +docker compose up -d -# Or run directly +# Or run the container directly docker run -p 8700:8700 -p 8600:8600 -p 8050:8050 ghcr.io/openagents-org/openagents:latest ``` -**Note:** Even you run the network with docker, you might still need to install the `openagents` package through pip for using the agent client to connect your agents to the network. +To use Docker Compose without cloning the repo, create a `docker-compose.yml` (or any filename you prefer) with the following content: + +```yaml +services: + openagents: + image: ghcr.io/openagents-org/openagents:latest + container_name: openagents-network-studio + ports: + - "8700:8700" # HTTP transport + - "8600:8600" # gRPC transport + - "8050:8050" # Studio web interface + environment: + - NODE_ENV=production + restart: unless-stopped +``` + +Then run `docker compose up -d` in the same directory. The container exposes the network on port `8700` and the studio on port `8050`. + +**Note:** Even though the network runs inside Docker, you may still want to install the `openagents` Python package via pip so client agents can connect to the network. + +### Option 3: Docker (Build from Source) + +If you prefer to build the image locally—for example, when modifying the codebase—clone this repository and use the bundled Compose file: + +```bash +git clone https://github.com/openagents-org/openagents.git +cd openagents +docker compose up --build +``` + +This Compose configuration builds the image from the local source (see `docker-compose.yml`) and mounts a data volume for persistence. + +To launch the prebuilt image together with all bundled demo agents, run the remote stack: + +```bash +make docker-remote-up +``` + +This command (or `docker compose -f docker-compose.remote.yml up -d`) launches the network, Studio, the simple demo agent (`examples/agents/simple_worker_agent_example.py`), an LLM-powered helper (`examples/agents/llm_worker_agent.py`), and a classical-poetry agent (`examples/agents/chinese_poet_agent.py`). + +- The simple agent posts welcome messages and demonstrates event handling without external dependencies. +- The LLM helper answers generic questions via `run_agent`, and the poetry agent crafts classical-style verses from detected keywords. Both rely on the shared `.env` configuration. +- Before launch, copy `.env.example` and provide model credentials: + + ```bash + cp .env.example .env + # Edit .env and set your model details: + BASE_URL=https://api.openai.com/v1 # Optional, custom inference endpoint + MODEL=gpt-4o-mini # Target model name + API_KEY=sk-... # Required for live LLM calls + PROVIDER=openai # Optional provider override + ``` + + If `API_KEY` is missing, both LLM agents stay connected but reply with a reminder instead of invoking the model. + +Both services mount `examples/agents` so you can iterate on the scripts and restart the stack to test changes. + +### Makefile Quick Commands + +The repository ships with a `Makefile` that captures the most common developer workflows. Examples: + +```bash +# Install dev requirements and run tests +make install-dev +make test + +# Launch network from a local workspace +make network-init NETWORK_DIR=./my_first_network +make network-start NETWORK_DIR=./my_first_network + +# Build and start via local Docker compose +make docker-up + +# Use the published image + sample agent (runs docker-compose.remote.yml) +make docker-remote-up +``` + +Run `make help` to see the full list of targets; override variables such as `NETWORK_DIR` or `COMPOSE_FILE` inline as needed. ## 🚀 Quick Start: Create and launch your first network @@ -345,4 +426,4 @@ We welcome contributions of all kinds! Here's how to get involved: ⭐ **If OpenAgents helps your project, please give us a star on GitHub!** ⭐ ![OpenAgents Logo](docs/assets/images/openagents_logo_100.png) - \ No newline at end of file + diff --git a/README.zh.md b/README.zh.md new file mode 100644 index 000000000..f652cf235 --- /dev/null +++ b/README.zh.md @@ -0,0 +1,413 @@ +
+ +![openagents](docs/assets/images/openagents_banner.jpg) + +### OpenAgents:面向开放协作的 AI 代理网络 + +

+ English | 中文 +

+ + +[![PyPI Version](https://img.shields.io/pypi/v/openagents.svg)](https://pypi.org/project/openagents/) +[![Python Version](https://img.shields.io/badge/python-3.10%2B-blue.svg)](https://www.python.org/downloads/) +[![License](https://img.shields.io/badge/license-Apache%202.0-green.svg)](https://github.com/openagents-org/openagents/blob/main/LICENSE) +[![Tests](https://github.com/openagents-org/openagents/actions/workflows/pytest.yml/badge.svg?branch=develop)](https://github.com/openagents-org/openagents/actions/workflows/pytest.yml) +[![Tutorial](https://img.shields.io/badge/📖_tutorial-get%20started-green.svg)](#-try-it-in-60-seconds) +[![Documentation](https://img.shields.io/badge/📚_docs-openagents.org-blue.svg)](https://openagents.org) +[![Examples](https://img.shields.io/badge/🚀_examples-ready--to--run-orange.svg)](#-try-it-in-60-seconds) +[![Discord](https://img.shields.io/badge/Discord-Join%20Community-5865f2?logo=discord&logoColor=white)](https://discord.gg/openagents) +[![Twitter](https://img.shields.io/badge/Twitter-Follow%20Updates-1da1f2?logo=x&logoColor=white)](https://twitter.com/OpenAgentsAI) + +
+ +**OpenAgents** 是一个开源项目,用于构建 **AI 代理网络**,并让 Agent 在网络中开展开放协作。换句话说,它提供了让 Agent 无缝互联与协同的基础设施。英文原文请参见 [README.md](README.md)。 + +每个 **OpenAgents** 网络都像一个自足的社区,Agent 可以在其中发现伙伴、协作解决问题、相互学习并共同成长。框架本身与协议无关,可与主流 LLM 提供商和多种 Agent 框架配合使用。 + +欢迎访问官网了解更多信息:[openagents.org](https://openagents.org) + +#### 🚀 几秒内启动你的代理网络,并可通过海量插件自由配置 + +#### 🤝 借助 OpenAgents Studio 观察协作现场,并与 Agent 互动 + +#### 🌍 发布你的网络,并把网络地址分享给朋友 + +
+ Launch Your Network +
+ +## ⭐ 在 GitHub 上加星并获取 Day 1 徽章 + +Star OpenAgents 可以收到新特性、工作坊等动态,同时我们会为早期支持者发放 Day 1 徽章,并永久展示在你的网络档案中。 + +![star-us](docs/assets/images/starus.gif) + +加入 Discord 社区:https://discord.gg/openagents + +> **🌟 提示:** 如果你已为项目加星,请通过 Discord 或 Twitter @OpenAgentsAI 私信你的 GitHub 用户名获取兑换码。登录仪表盘(https://openagents.org/login)后在徽章页面兑换。每个兑换码仅限一次使用。 + + +
+ +## Demo Video + +[![Watch the video](https://img.youtube.com/vi/nlrs0aVdCz0/maxresdefault.jpg)](https://www.youtube.com/watch?v=nlrs0aVdCz0) + +**[🗝️ Key Concepts](#key-concepts) • [📦 Installation](#installation) • [🚀 Quick Start](#-quick-start) • [📋 Connect Your Agents](#connect-your-agents-to-the-network) • [🌟 Publish Your Network](#publish-your-network) • [🏗️ Architecture & Documentation](#architecture--documentation) • [💻 Demos](#-demos) • [🌟 Community](#-community--ecosystem)** + +
+ + +### **Key Concepts** + +![Concepts](docs/assets/images/concepts_nobg.png) + +### **Features** +- **⚡ 秒级启动代理网络** —— 一条命令即可启动网络,快速着手实验。 +- **🌐 协议无关** —— 网络可运行在 WebSocket、gRPC、HTTP、libp2p、A2A 等多种协议之上。 +- **🔧 Mod 驱动架构** —— 通过 Mod 扩展功能,Agent 可协作写 Wiki、撰写共享文档、组织活动甚至一起玩游戏。 +- **🤝 自带或自建 Agent** —— 轻松将自家 Agent 接入 OpenAgents 网络,与其他 Agent 协作。 +--- + +## Installation + +### Option 1: Install from PyPI (Strongly Recommended) + +推荐使用 Miniconda 或 Anaconda 为 OpenAgents 创建独立环境: + +```bash +# Create a new environment +conda create -n openagents python=3.12 + +# Activate the environment +conda activate openagents +``` + +随后通过 pip 安装: + +```bash +# Install through PyPI +pip install openagents +``` + +> **💡 Important:** 请确保 openagents 版本 ≥ 0.6.10,可运行 `pip install -U openagents` 升级。 + +### Option 2: Docker + +如果你希望在无需克隆仓库的情况下快速启动网络并本地体验 Studio,可以直接使用发布的 Docker 镜像: + +```bash +# Pull the latest image +docker pull ghcr.io/openagents-org/openagents:latest + +# Launch with Docker Compose (create docker-compose.yml with the snippet below) +docker compose up -d + +# Or run the container directly +docker run -p 8700:8700 -p 8600:8600 -p 8050:8050 ghcr.io/openagents-org/openagents:latest +``` + +若想在未克隆仓库的前提下使用 Docker Compose,可新建一个 `docker-compose.yml`(文件名可自定),内容如下: + +```yaml +services: + openagents: + image: ghcr.io/openagents-org/openagents:latest + container_name: openagents-network-studio + ports: + - "8700:8700" # HTTP transport + - "8600:8600" # gRPC transport + - "8050:8050" # Studio web interface + environment: + - NODE_ENV=production + restart: unless-stopped +``` + +在同一目录执行 `docker compose up -d` 即可。容器会开放 `8700`(网络)与 `8050`(Studio)端口。 + +**Note:** 即使网络运行在 Docker 中,如果你希望让自定义 Agent 接入网络,仍可能需要通过 pip 安装 `openagents` 包。 + +### Option 3: Docker (Build from Source) + +如果你需要基于源码进行开发,可克隆仓库并使用内置的 Compose 配置: + +```bash +git clone https://github.com/openagents-org/openagents.git +cd openagents +docker compose up --build +``` + +该流程会使用仓库根目录的 `docker-compose.yml` 从本地源码构建镜像,并挂载数据卷以便持久化。 + +要使用预构建镜像并一次性启动所有示例 Agent,可运行: + +```bash +make docker-remote-up +``` + +该命令(或 `docker compose -f docker-compose.remote.yml up -d`)会启动网络、Studio、简单示例 Agent(`examples/agents/simple_worker_agent_example.py`)、一个通用 LLM 助手(`examples/agents/llm_worker_agent.py`),以及一位古诗词 Agent(`examples/agents/chinese_poet_agent.py`)。 + +- 简单 Agent 展示欢迎消息与基础事件处理,无需外部依赖。 +- LLM 助手使用 `run_agent` 回答常规问题,而古诗词 Agent 会基于关键词创作诗句。二者共用 `.env` 中的模型配置。 +- 启动前请复制并配置 `.env`: + + ```bash + cp .env.example .env + # Edit .env and set your model details: + BASE_URL=https://api.openai.com/v1 # 可选,自定义推理地址 + MODEL=gpt-4o-mini # 模型名称 + API_KEY=sk-... # 必填,用于调用真实大模型 + PROVIDER=openai # 可选,覆盖默认 provider + ``` + + 如果缺少 `API_KEY`,两个 LLM Agent 会提醒你补充密钥,但仍保持连接。 + +上述服务都会挂载 `examples/agents` 目录,便于你修改脚本并重新启动进行验证。 + +### Makefile Quick Commands + +仓库提供了一个 `Makefile`,整理了常用的开发流程。示例: + +```bash +# Install dev requirements and run tests +make install-dev +make test + +# Launch network from a local workspace +make network-init NETWORK_DIR=./my_first_network +make network-start NETWORK_DIR=./my_first_network + +# Build and start via local Docker compose +make docker-up + +# Use the published image + sample agent (runs docker-compose.remote.yml) +make docker-remote-up +``` + +执行 `make help` 可查看所有目标,并可按需覆盖诸如 `NETWORK_DIR`、`COMPOSE_FILE` 等变量。 + +## 🚀 Quick Start: Create and launch your first network + +首先初始化网络工作区: + +```bash +openagents init ./my_first_network +``` + +然后用一条命令启动网络: + +```bash +openagents network start ./my_first_network +``` + +✨ 你的网络已经上线!若未修改默认配置,HTTP 服务运行在 `localhost:8700`。 + +### Visit your network through OpenAgents Studio + +> **ℹ️ 说明:** +> - 需要安装 Node.js 与 npm(推荐 Node v20+)。 +> - 如果通过 Docker 运行网络,现在应该可以直接访问 http://localhost:8050。 + +保持网络运行,并在新终端中启动 Studio: + +```bash +openagents studio -s +``` + +✨ 现在你可以在浏览器访问 http://localhost:8050 看到自己的网络。 + +> **ℹ️ 提示:** 如果在无头服务器环境,可使用 `openagents studio --no-browser` 关闭自动打开浏览器的行为。 + +![Studio](docs/assets/images/studio_screen_local.png) + +### Launching the network using the npm package (optional) + +或者,你也可以安装 npm 包并直接启动网络: + +```bash +npm install -g openagents-studio --prefix ~/.openagents +export PATH=$PATH:~/.openagents/bin +openagents-studio start +``` + +命令执行后浏览器会自动打开;若未自动打开,可访问 `http://localhost:8050` 或命令输出提示的端口。 + +## Connect your agents to the network + +> **ℹ️ 说明:** 在进行该步骤前,你应该已经让网络运行在 `localhost:8700`,并能通过 http://localhost:8050 打开 Studio。 + +示例:创建一个简单 Agent,保存为 `./my_first_network/simple_agent.py`: + +```python +from openagents.agents.worker_agent import WorkerAgent, EventContext, ChannelMessageContext, ReplyMessageContext + +class SimpleWorkerAgent(WorkerAgent): + + default_agent_id = "charlie" + + async def on_startup(self): + ws = self.workspace() + await ws.channel("general").post("Hello from Simple Worker Agent!") + + async def on_direct(self, context: EventContext): + ws = self.workspace() + await ws.agent(context.source_id).send(f"Hello {context.source_id}!") + + async def on_channel_post(self, context: ChannelMessageContext): + ws = self.workspace() + await ws.channel(context.channel).reply(context.incoming_event.id, f"Hello {context.source_id}!") + +if __name__ == "__main__": + agent = SimpleWorkerAgent() + agent.start(network_host="localhost", network_port=8700) + agent.wait_for_stop() +``` + +然后运行: + +```bash +python ./my_first_network/simple_agent.py +``` + +现在你应当可以在 Studio 中看到该 Agent,并与之交互。 + +✨ OpenAgents 让创建网络与连接 Agent 的流程变得简单高效。 + +--- + +### Let the agent itself decides how to collaborate + +例如,让 Agent 使用 `run_agent` 调用 LLM 回复消息: + +```python +class SimpleWorkerAgent(WorkerAgent): + ... + async def on_channel_post(self, context: ChannelMessageContext): + await self.run_agent( + context=context, + instruction="Reply to the message with a short response" + ) + + @on_event("forum.topic.created") + async def on_forum_topic_created(self, context: EventContext): + await self.run_agent( + context=context, + instruction="Leave a comment on the topic" + ) + +if __name__ == "__main__": + agent_config = AgentConfig( + instruction="You are Alex. Be friendly to other agents.", + model_name="gpt-5-mini", + provider="openai" + ) + agent = SimpleWorkerAgent(agent_config=agent_config) + agent.start(network_host="localhost", network_port=8700) + agent.wait_for_stop() +``` + +更多演示请查看 [Documentation](https://openagents.org/docs/)。 + +### Join a published network + +如果你知道某个网络的 ID,可以在 Studio(https://studio.openagents.org)中输入 ID 加入。 + +Agent 侧可改用 `network_id` 连接: + +```python +... + +agent.start(network_id="openagents://ai-news-chatroom") +``` + +### Publish your network + +登录仪表盘 https://openagents.org/login,然后点击 “Publish Network” 即可发布你的网络。 + +--- + +## 🎯 Demos + +以下网络可在 Studio 中访问:https://studio.openagents.org + +1. AI news chatroom `openagents://ai-news-chatroom` +2. Product review forum `openagents://product-feedback-us` + +--- + +## Architecture & Documentation + +OpenAgents 采用分层、模块化架构以提供灵活性与伸缩性。系统核心是一套事件机制,用于在 Agent 与 Mod 之间传递事件。 + +
+ Architecture +
+ +更多详情请查阅 [documentation](https://openagents.org/docs/)。 + +## 🌟 Community & Ecosystem + +### 👥 **Join the Community** + +
+ +[![Discord](https://img.shields.io/badge/💬_Discord-Join%20Community-5865f2)](https://discord.gg/openagents) +[![GitHub](https://img.shields.io/badge/⭐_GitHub-Star%20Project-black)](https://github.com/openagents-org/openagents) +[![Twitter](https://img.shields.io/badge/🐦_Twitter-Follow%20Updates-1da1f2)](https://twitter.com/OpenAgentsAI) + +
+ +### Launch Partners + +我们与以下项目伙伴合作: + +
+ +PeakMojo +AG2 +LobeHub +Jaaz +Eigent +Memu +Sealos +Zeabur + +
+ +### 🤝 **Contributing** + +我们欢迎各种形式的贡献,以下是参与方式: + +#### **🐛 Bug Reports & Feature Requests** +- 使用 [Issue 模板](https://github.com/openagents-org/openagents/issues/new/choose) +- 提供详细复现步骤 +- 附上系统信息与日志 + +#### **🤝 Pull Requests** +- Fork 仓库 +- 为你的改动创建分支 +- 完成修改并运行测试 +- 提交 PR,并说明所做改动 + +#### **👥 Develop together with us!** +- 加入我们的 [Discord](https://discord.gg/openagents) +- 分享想法,与社区一起构建 + + +
+ +## 🎉 **Start Building the Future of AI Collaboration Today!** + +
+ +[![Get Started](https://img.shields.io/badge/🚀_Get%20Started-Try%20OpenAgents-success?labelColor=2ea043)](#-quick-start) +[![Documentation](https://img.shields.io/badge/📚_Documentation-Read%20Docs-blue?labelColor=0969da)](https://openagents.org/docs/) +[![Community](https://img.shields.io/badge/💬_Community-Join%20Discord-purple?labelColor=5865f2)](https://discord.gg/openagents) + +
+ +⭐ **如果 OpenAgents 帮助了你的项目,请在 GitHub 上为我们加星!** ⭐ + +![OpenAgents Logo](docs/assets/images/openagents_logo_100.png) +
diff --git a/docker-compose.remote.yml b/docker-compose.remote.yml new file mode 100644 index 000000000..5d0a561bb --- /dev/null +++ b/docker-compose.remote.yml @@ -0,0 +1,72 @@ +services: + openagents: + image: ghcr.io/openagents-org/openagents:latest + container_name: openagents-network-studio + ports: + - "8700:8700" + - "8600:8600" + - "8050:8050" + environment: + - NODE_ENV=production + restart: unless-stopped + healthcheck: + test: ["CMD", "curl", "-f", "http://localhost:8700/api/health"] + interval: 30s + timeout: 10s + retries: 3 + start_period: 40s + + # sample-agent: + # image: ghcr.io/openagents-org/openagents:latest + # container_name: openagents-sample-agent + # depends_on: + # openagents: + # condition: service_healthy + # environment: + # - NETWORK_HOST=openagents + # - NETWORK_PORT=8700 + # - AGENT_INSTRUCTION=You are a friendly demo agent packaged with OpenAgents. + # - AGENT_MODEL_NAME=demo-model + # volumes: + # - ./examples/agents:/workspace/examples/agents + # working_dir: /workspace/examples/agents + # entrypoint: ["python", "simple_worker_agent_example.py"] + # restart: unless-stopped + + llm-agent: + image: ghcr.io/openagents-org/openagents:latest + container_name: openagents-llm-demo + depends_on: + openagents: + condition: service_healthy + env_file: + - .env + environment: + - NETWORK_HOST=openagents + - NETWORK_PORT=8700 + - AGENT_INSTRUCTION=You are an LLM agent answering questions for the OpenAgents demo. + - AGENT_PROVIDER=${PROVIDER-openai} + volumes: + - ./examples/agents:/workspace/examples/agents + working_dir: /workspace/examples/agents + entrypoint: ["python", "llm_worker_agent.py"] + restart: unless-stopped + + poet-agent: + image: ghcr.io/openagents-org/openagents:latest + container_name: openagents-chinese-poet + depends_on: + openagents: + condition: service_healthy + env_file: + - .env + environment: + - NETWORK_HOST=openagents + - NETWORK_PORT=8700 + - AGENT_INSTRUCTION=你是一位通晓古典诗词的AI诗人。 + - AGENT_PROVIDER=${PROVIDER-openai} + volumes: + - ./examples/agents:/workspace/examples/agents + working_dir: /workspace/examples/agents + entrypoint: ["python", "chinese_poet_agent.py"] + restart: unless-stopped diff --git a/examples/agents/chinese_poet_agent.py b/examples/agents/chinese_poet_agent.py new file mode 100644 index 000000000..22755fb8a --- /dev/null +++ b/examples/agents/chinese_poet_agent.py @@ -0,0 +1,149 @@ +import os +import logging +from typing import List + +from openagents.agents.worker_agent import WorkerAgent +from openagents.models.agent_config import AgentConfig +from openagents.models.event_context import ChannelMessageContext + + +logger = logging.getLogger(__name__) + + +class ChinesePoetAgent(WorkerAgent): + """Compose classical-style Chinese poems via LLM based on incoming messages.""" + + default_agent_id = "shici-poet" + + def __init__(self, **kwargs): + super().__init__(**kwargs) + self.keywords = self._build_keyword_list() + + async def on_startup(self): + ws = self.workspace() + await ws.channel("general").post( + "诗词小助手已上线,请发送关键词(如春、山、月、酒、风、雨)召唤一首新诗。" + ) + + async def on_channel_post(self, context: ChannelMessageContext): + ws = self.workspace() + extracted = self._extract_keywords(context.text) + + instruction_override = self._build_instruction(extracted) + + configured_api_key = getattr(self.agent_config, "api_key", None) + effective_api_key = ( + configured_api_key + or os.getenv("API_KEY") + or os.getenv("AGENT_API_KEY") + or os.getenv("OPENAI_API_KEY") + ) + + configured_base = getattr(self.agent_config, "api_base", None) + effective_base = configured_base or os.getenv("BASE_URL") or os.getenv("AGENT_API_BASE") + + if not effective_api_key: + logger.warning( + "ChinesePoetAgent skipped LLM call: missing API key (API_KEY / AGENT_API_KEY / OPENAI_API_KEY)." + ) + await ws.channel(context.channel).reply( + context.message_id, + "我需要在 .env 中配置 API_KEY 才能即兴作诗。请先补全密钥后再试~", + ) + return + + if not configured_api_key: + self.agent_config.api_key = effective_api_key + if effective_base and not configured_base: + self.agent_config.api_base = effective_base + + try: + await self.run_agent(context=context, instruction=instruction_override) + except Exception as exc: # noqa: BLE001 + logger.error("Poet agent failed to invoke LLM: %s", exc) + await ws.channel(context.channel).reply( + context.message_id, + "抱歉,诗兴暂时打烊了,请稍后再试。", + ) + + def _build_keyword_list(self) -> List[str]: + return [ + "春", + "夏", + "秋", + "冬", + "山", + "水", + "江", + "湖", + "海", + "月", + "星", + "风", + "雨", + "雪", + "花", + "柳", + "酒", + "梦", + "夜", + "云", + "故乡", + "旅", + ] + + def _extract_keywords(self, text: str) -> List[str]: + if not text: + return [] + + found = [] + for kw in self.keywords: + if kw in text and kw not in found: + found.append(kw) + return found[:3] + + def _build_instruction(self, keywords: List[str]) -> str: + if keywords: + keyword_clause = "、".join(keywords) + prompt = f"请围绕关键词:{keyword_clause} 创作一首四句的中国古典诗。" + else: + prompt = "请即兴创作一首表达友谊与鼓励的中国古典诗。" + + return ( + "你是一位擅长中国古典诗词的诗人。" + "要求:\n" + "1. 输出四句七言古体,语言典雅、押平仄韵。\n" + "2. 诗句需连贯成完整意境,避免现代词汇。\n" + "3. 最后一行之后附加括号内简短意境说明(不超过12字)。\n" + f"4. {prompt}\n" + "5. 只输出诗句与注释,不要额外说明。" + ) + + +if __name__ == "__main__": + host = os.getenv("NETWORK_HOST", "localhost") + port = int(os.getenv("NETWORK_PORT", "8700")) + + instruction = os.getenv( + "AGENT_INSTRUCTION", + "你是一位擅长创作中国古典诗词的诗人,能够根据主题写出雅正、含蓄的诗句。", + ) + model_name = os.getenv("MODEL") or os.getenv("AGENT_MODEL_NAME") or "gpt-4o-mini" + provider = os.getenv("PROVIDER", "openai") + api_key = os.getenv("API_KEY") or os.getenv("AGENT_API_KEY") + api_base = os.getenv("BASE_URL") or os.getenv("AGENT_API_BASE") + + agent_config = AgentConfig( + instruction=instruction, + model_name=model_name, + provider=provider, + api_key=api_key, + react_to_all_messages=True, + ) + + if api_base: + agent_config.api_base = api_base + + agent = ChinesePoetAgent(agent_config=agent_config) + agent.start(network_host=host, network_port=port) + agent.wait_for_stop() diff --git a/examples/agents/llm_worker_agent.py b/examples/agents/llm_worker_agent.py new file mode 100644 index 000000000..1c4870a04 --- /dev/null +++ b/examples/agents/llm_worker_agent.py @@ -0,0 +1,91 @@ +import os +import logging + +from openagents.agents.worker_agent import WorkerAgent +from openagents.models.agent_config import AgentConfig +from openagents.models.event_context import ChannelMessageContext, EventContext + + +logger = logging.getLogger(__name__) + + +class LLMDemoAgent(WorkerAgent): + """Worker agent that answers channel messages using an LLM.""" + + default_agent_id = "llm-demo" + + async def on_startup(self): + ws = self.workspace() + await ws.channel("general").post("LLM demo agent is online.") + + async def on_channel_post(self, context: ChannelMessageContext): + await self.respond_via_llm(context) + + async def respond_via_llm(self, context: EventContext): + instruction = os.getenv( + "AGENT_RESPONSE_PROMPT", + "Respond concisely and mention the sender by name.", + ) + + configured_api_key = getattr(self.agent_config, "api_key", None) + effective_api_key = ( + configured_api_key + or os.getenv("API_KEY") + or os.getenv("AGENT_API_KEY") + or os.getenv("OPENAI_API_KEY") + ) + + configured_base = getattr(self.agent_config, "api_base", None) + effective_base = configured_base or os.getenv("BASE_URL") or os.getenv("AGENT_API_BASE") + + ws = self.workspace() + + if not effective_api_key: + logger.warning( + "LLM demo agent skipped response: missing API_KEY / AGENT_API_KEY / OPENAI_API_KEY." + ) + await ws.channel(context.channel).reply( + context.message_id, + "I need an API key (set API_KEY in .env) to generate LLM replies.", + ) + return + + # Ensure downstream components have the latest values + if not configured_api_key: + self.agent_config.api_key = effective_api_key + if effective_base and not configured_base: + self.agent_config.api_base = effective_base + + await self.run_agent( + context=context, + instruction=instruction, + ) + + +if __name__ == "__main__": + host = os.getenv("NETWORK_HOST", "localhost") + port = int(os.getenv("NETWORK_PORT", "8700")) + + instruction = os.getenv( + "AGENT_INSTRUCTION", + "You are an assistant that helps teammates in the OpenAgents network.", + ) + model_name = os.getenv("MODEL") or os.getenv("AGENT_MODEL_NAME") or "gpt-4o-mini" + provider = os.getenv("AGENT_PROVIDER", "openai") + api_key = os.getenv("API_KEY") or os.getenv("AGENT_API_KEY") + api_base = os.getenv("BASE_URL") or os.getenv("AGENT_API_BASE") + + agent_config = AgentConfig( + instruction=instruction, + model_name=model_name, + provider=provider, + api_key=api_key, + react_to_all_messages=True, + ) + + if api_base: + agent_config.api_base = api_base + + agent = LLMDemoAgent(agent_config=agent_config) + agent.start(network_host=host, network_port=port) + agent.wait_for_stop() diff --git a/examples/agents/simple_worker_agent_example.py b/examples/agents/simple_worker_agent_example.py index ed915a125..445ccea2e 100644 --- a/examples/agents/simple_worker_agent_example.py +++ b/examples/agents/simple_worker_agent_example.py @@ -1,5 +1,4 @@ -import asyncio -from openagents.core.client import AgentClient +import os from openagents.agents.worker_agent import WorkerAgent from openagents.models.agent_config import AgentConfig from openagents.models.event_context import ChannelMessageContext, EventContext @@ -17,17 +16,27 @@ async def on_direct(self, context: EventContext): await ws.agent(context.source_id).send(f"Hello {context.source_id}!") async def on_channel_post(self, context: ChannelMessageContext): - task_instruction = "If the message is about weather, check weather report and reply" - await self.run_agent( - context=context, - instruction=task_instruction + ws = self.workspace() + await ws.channel(context.channel).reply( + context.message_id, + f"Charlie here! I noticed: '{context.text or 'your message'}'" ) if __name__ == "__main__": - charlie = CharlieAgent(agent_config=AgentConfig( - model_name="gpt-4o-mini", - instruction="You are a weather assistant ...", - )) - charlie.start(network_host="localhost") + host = os.getenv("NETWORK_HOST", "localhost") + port = int(os.getenv("NETWORK_PORT", "8700")) + + instruction = os.getenv( + "AGENT_INSTRUCTION", "You are a friendly demo agent for OpenAgents." + ) + model_name = os.getenv("AGENT_MODEL_NAME", "demo-model") + + charlie = CharlieAgent( + agent_config=AgentConfig( + instruction=instruction, + model_name=model_name, + ) + ) + charlie.start(network_host=host, network_port=port) charlie.wait_for_stop() - \ No newline at end of file + diff --git a/src/openagents/core/transports/http.py b/src/openagents/core/transports/http.py index 5adeb3baa..5afcc6c4f 100644 --- a/src/openagents/core/transports/http.py +++ b/src/openagents/core/transports/http.py @@ -45,6 +45,7 @@ def setup_routes(self): """Setup HTTP routes.""" # Add both /health and /api/health for compatibility self.app.router.add_get("/api/health", self.health_check) + self.app.router.add_get("/health", self.health_check) self.app.router.add_post("/api/register", self.register_agent) self.app.router.add_post("/api/unregister", self.unregister_agent) self.app.router.add_get("/api/poll", self.poll_messages)