From 5ff9deb83f97cf5ac54c0daf48481bd18956186e Mon Sep 17 00:00:00 2001 From: Domingo Date: Fri, 20 Feb 2026 06:15:26 -0600 Subject: [PATCH 01/17] fix: resolve security vulnerabilities - Update minimatch from 10.2.0 to 10.2.2 (fixes CVE-2026-26996 ReDoS vulnerability) - Update hono from 4.11.9 to 4.12.0 (fixes timing comparison hardening in auth) - Add esbuild ^0.25.0 override to resolve GHSA-67mh-4wv8-2f99 (CORS issue in dev server) All npm audit vulnerabilities now resolved. --- package-lock.json | 61 ++++++----------------------------------------- package.json | 3 +++ 2 files changed, 10 insertions(+), 54 deletions(-) diff --git a/package-lock.json b/package-lock.json index a29f1c2..ac41793 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1209,57 +1209,6 @@ "source-map-support": "^0.5.21" } }, - "node_modules/@esbuild-kit/core-utils/node_modules/esbuild": { - "version": "0.18.20", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "bin": { - "esbuild": "bin/esbuild" - }, - "engines": { - "node": ">=12" - }, - "optionalDependencies": { - "@esbuild/android-arm": "0.18.20", - "@esbuild/android-arm64": "0.18.20", - "@esbuild/android-x64": "0.18.20", - "@esbuild/darwin-arm64": "0.18.20", - "@esbuild/darwin-x64": "0.18.20", - "@esbuild/freebsd-arm64": "0.18.20", - "@esbuild/freebsd-x64": "0.18.20", - "@esbuild/linux-arm": "0.18.20", - "@esbuild/linux-arm64": "0.18.20", - "@esbuild/linux-ia32": "0.18.20", - "@esbuild/linux-loong64": "0.18.20", - "@esbuild/linux-mips64el": "0.18.20", - "@esbuild/linux-ppc64": "0.18.20", - "@esbuild/linux-riscv64": "0.18.20", - "@esbuild/linux-s390x": "0.18.20", - "@esbuild/linux-x64": "0.18.20", - "@esbuild/netbsd-x64": "0.18.20", - "@esbuild/openbsd-x64": "0.18.20", - "@esbuild/sunos-x64": "0.18.20", - "@esbuild/win32-arm64": "0.18.20", - "@esbuild/win32-ia32": "0.18.20", - "@esbuild/win32-x64": "0.18.20" - } - }, - "node_modules/@esbuild-kit/core-utils/node_modules/esbuild/node_modules/@esbuild/linux-x64": { - "version": "0.18.20", - "cpu": [ - "x64" - ], - "dev": true, - "license": "MIT", - "optional": true, - "os": [ - "linux" - ], - "engines": { - "node": ">=12" - } - }, "node_modules/@esbuild-kit/esm-loader": { "version": "2.6.5", "dev": true, @@ -4910,7 +4859,9 @@ "license": "MIT" }, "node_modules/hono": { - "version": "4.11.9", + "version": "4.12.0", + "resolved": "https://registry.npmjs.org/hono/-/hono-4.12.0.tgz", + "integrity": "sha512-NekXntS5M94pUfiVZ8oXXK/kkri+5WpX2/Ik+LVsl+uvw+soj4roXIsPqO+XsWrAw20mOzaXOZf3Q7PfB9A/IA==", "license": "MIT", "engines": { "node": ">=16.9.0" @@ -5641,13 +5592,15 @@ } }, "node_modules/minimatch": { - "version": "10.2.0", + "version": "10.2.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-10.2.2.tgz", + "integrity": "sha512-+G4CpNBxa5MprY+04MbgOw1v7So6n5JY166pFi9KfYwT78fxScCeSNQSNzp6dpPSW2rONOps6Ocam1wFhCgoVw==", "license": "BlueOak-1.0.0", "dependencies": { "brace-expansion": "^5.0.2" }, "engines": { - "node": "20 || >=22" + "node": "18 || 20 || >=22" }, "funding": { "url": "https://github.com/sponsors/isaacs" diff --git a/package.json b/package.json index 80bc19b..c00d17e 100644 --- a/package.json +++ b/package.json @@ -70,5 +70,8 @@ "vite": "^7.1.7", "vitest": "^3.0.5", "web-vitals": "^5.1.0" + }, + "overrides": { + "esbuild": "^0.25.0" } } From bb5429eae733517f3155fed86443496204e696bd Mon Sep 17 00:00:00 2001 From: Domingo Date: Fri, 20 Feb 2026 11:05:09 -0600 Subject: [PATCH 02/17] fix: save notebook content immediately when last peer disconnects MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Previously, content was only persisted to DB via a 5-second debounce timer. If the user navigated away before the timer fired, they would see stale content on return (loaded from DB via API) — especially if they stayed in preview mode and never triggered the Yjs sync. The workaround of adding a tag worked because it kept the user on the page long enough for the 5s debounce to fire. Fix: when the last peer disconnects, cancel the pending debounce and call persistDoc() immediately. The 10-second doc destruction timer is unchanged — quick mode switches still reuse the in-memory doc. --- server/routes/api/notebook/ws.ts | 9 +++++++++ 1 file changed, 9 insertions(+) diff --git a/server/routes/api/notebook/ws.ts b/server/routes/api/notebook/ws.ts index 47910fa..52f9b3f 100644 --- a/server/routes/api/notebook/ws.ts +++ b/server/routes/api/notebook/ws.ts @@ -257,6 +257,15 @@ export default defineWebSocketHandler({ } catch {} } + // Save immediately when last peer leaves (don't wait for the debounce) + if (entry.peers.size === 0) { + if (entry.saveTimer) { + clearTimeout(entry.saveTimer); + entry.saveTimer = null; + } + persistDoc(pageId, entry); + } + // Destroy if no peers left setTimeout(() => destroyDocIfEmpty(pageId), 10_000); } From bd91600e4e7ec43bb655c87737164c13ea3e61ae Mon Sep 17 00:00:00 2001 From: Domingo Date: Fri, 20 Feb 2026 15:36:47 -0600 Subject: [PATCH 03/17] fix: update bun.lock after security dependency updates --- bun.lock | 157 ++----------------------------------------------------- 1 file changed, 3 insertions(+), 154 deletions(-) diff --git a/bun.lock b/bun.lock index 84b93a4..09a3493 100644 --- a/bun.lock +++ b/bun.lock @@ -63,6 +63,9 @@ }, }, }, + "overrides": { + "esbuild": "^0.25.0", + }, "packages": { "@acemir/cssom": ["@acemir/cssom@0.9.31", "", {}, "sha512-ZnR3GSaH+/vJ0YlHau21FjfLYjMpYVIzTD8M8vIEQvIGxeOXyXdzCI140rrCY862p/C/BbzWsjc1dgnM9mkoTA=="], @@ -1558,8 +1561,6 @@ "@dotenvx/dotenvx/execa": ["execa@5.1.1", "", { "dependencies": { "cross-spawn": "^7.0.3", "get-stream": "^6.0.0", "human-signals": "^2.1.0", "is-stream": "^2.0.0", "merge-stream": "^2.0.0", "npm-run-path": "^4.0.1", "onetime": "^5.1.2", "signal-exit": "^3.0.3", "strip-final-newline": "^2.0.0" } }, "sha512-8uSpZZocAZRBAPIEINJj3Lo9HyGitllczc27Eh5YYojjMFMn8yHMDMaUHE2Jqfq05D/wucwI4JGURyXt1vchyg=="], - "@esbuild-kit/core-utils/esbuild": ["esbuild@0.18.20", "", { "optionalDependencies": { "@esbuild/android-arm": "0.18.20", "@esbuild/android-arm64": "0.18.20", "@esbuild/android-x64": "0.18.20", "@esbuild/darwin-arm64": "0.18.20", "@esbuild/darwin-x64": "0.18.20", "@esbuild/freebsd-arm64": "0.18.20", "@esbuild/freebsd-x64": "0.18.20", "@esbuild/linux-arm": "0.18.20", "@esbuild/linux-arm64": "0.18.20", "@esbuild/linux-ia32": "0.18.20", "@esbuild/linux-loong64": "0.18.20", "@esbuild/linux-mips64el": "0.18.20", "@esbuild/linux-ppc64": "0.18.20", "@esbuild/linux-riscv64": "0.18.20", "@esbuild/linux-s390x": "0.18.20", "@esbuild/linux-x64": "0.18.20", "@esbuild/netbsd-x64": "0.18.20", "@esbuild/openbsd-x64": "0.18.20", "@esbuild/sunos-x64": "0.18.20", "@esbuild/win32-arm64": "0.18.20", "@esbuild/win32-ia32": "0.18.20", "@esbuild/win32-x64": "0.18.20" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-ceqxoedUrcayh7Y7ZX6NdbbDzGROiyVBgC4PriJThBKSVPWnnFHZAkfI1lJT8QFkOwH4qOS2SJkS4wvpGl8BpA=="], - "@tailwindcss/oxide-wasm32-wasi/@emnapi/core": ["@emnapi/core@1.8.1", "", { "dependencies": { "@emnapi/wasi-threads": "1.1.0", "tslib": "^2.4.0" }, "bundled": true }, "sha512-AvT9QFpxK0Zd8J0jopedNm+w/2fIzvtPKPjqyw9jwvBaReTTqPBk9Hixaz7KbjimP+QNz605/XnjFcDAL2pqBg=="], "@tailwindcss/oxide-wasm32-wasi/@emnapi/runtime": ["@emnapi/runtime@1.8.1", "", { "dependencies": { "tslib": "^2.4.0" }, "bundled": true }, "sha512-mehfKSMWjjNol8659Z8KxEMrdSJDDot5SXMq00dM8BN4o+CLNXQ0xH2V7EchNHV4RmbZLmmPdEaXZc5H2FXmDg=="], @@ -1634,10 +1635,6 @@ "strip-literal/js-tokens": ["js-tokens@9.0.1", "", {}, "sha512-mxa9E9ITFOt0ban3j6L5MpjwegGz6lBQmM1IJkWeBZGcMxto50+eWdjC/52xDbS2vy0k7vIMK0Fe2wfL9OQSpQ=="], - "tsx/esbuild": ["esbuild@0.27.3", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.3", "@esbuild/android-arm": "0.27.3", "@esbuild/android-arm64": "0.27.3", "@esbuild/android-x64": "0.27.3", "@esbuild/darwin-arm64": "0.27.3", "@esbuild/darwin-x64": "0.27.3", "@esbuild/freebsd-arm64": "0.27.3", "@esbuild/freebsd-x64": "0.27.3", "@esbuild/linux-arm": "0.27.3", "@esbuild/linux-arm64": "0.27.3", "@esbuild/linux-ia32": "0.27.3", "@esbuild/linux-loong64": "0.27.3", "@esbuild/linux-mips64el": "0.27.3", "@esbuild/linux-ppc64": "0.27.3", "@esbuild/linux-riscv64": "0.27.3", "@esbuild/linux-s390x": "0.27.3", "@esbuild/linux-x64": "0.27.3", "@esbuild/netbsd-arm64": "0.27.3", "@esbuild/netbsd-x64": "0.27.3", "@esbuild/openbsd-arm64": "0.27.3", "@esbuild/openbsd-x64": "0.27.3", "@esbuild/openharmony-arm64": "0.27.3", "@esbuild/sunos-x64": "0.27.3", "@esbuild/win32-arm64": "0.27.3", "@esbuild/win32-ia32": "0.27.3", "@esbuild/win32-x64": "0.27.3" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg=="], - - "vite/esbuild": ["esbuild@0.27.3", "", { "optionalDependencies": { "@esbuild/aix-ppc64": "0.27.3", "@esbuild/android-arm": "0.27.3", "@esbuild/android-arm64": "0.27.3", "@esbuild/android-x64": "0.27.3", "@esbuild/darwin-arm64": "0.27.3", "@esbuild/darwin-x64": "0.27.3", "@esbuild/freebsd-arm64": "0.27.3", "@esbuild/freebsd-x64": "0.27.3", "@esbuild/linux-arm": "0.27.3", "@esbuild/linux-arm64": "0.27.3", "@esbuild/linux-ia32": "0.27.3", "@esbuild/linux-loong64": "0.27.3", "@esbuild/linux-mips64el": "0.27.3", "@esbuild/linux-ppc64": "0.27.3", "@esbuild/linux-riscv64": "0.27.3", "@esbuild/linux-s390x": "0.27.3", "@esbuild/linux-x64": "0.27.3", "@esbuild/netbsd-arm64": "0.27.3", "@esbuild/netbsd-x64": "0.27.3", "@esbuild/openbsd-arm64": "0.27.3", "@esbuild/openbsd-x64": "0.27.3", "@esbuild/openharmony-arm64": "0.27.3", "@esbuild/sunos-x64": "0.27.3", "@esbuild/win32-arm64": "0.27.3", "@esbuild/win32-ia32": "0.27.3", "@esbuild/win32-x64": "0.27.3" }, "bin": { "esbuild": "bin/esbuild" } }, "sha512-8VwMnyGCONIs6cWue2IdpHxHnAjzxnw2Zr7MkVxB2vjmQ2ivqGFb4LEG3SMnv0Gb2F/G/2yA8zUaiL1gywDCCg=="], - "whatwg-encoding/iconv-lite": ["iconv-lite@0.6.3", "", { "dependencies": { "safer-buffer": ">= 2.1.2 < 3.0.0" } }, "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw=="], "wrap-ansi/ansi-styles": ["ansi-styles@4.3.0", "", { "dependencies": { "color-convert": "^2.0.1" } }, "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg=="], @@ -1660,50 +1657,6 @@ "@dotenvx/dotenvx/execa/strip-final-newline": ["strip-final-newline@2.0.0", "", {}, "sha512-BrpvfNAE3dcvq7ll3xVumzjKjZQ5tI1sEUIKr3Uoks0XUl45St3FlatVqef9prk4jRDzhW6WZg+3bk93y6pLjA=="], - "@esbuild-kit/core-utils/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.18.20", "", { "os": "android", "cpu": "arm" }, "sha512-fyi7TDI/ijKKNZTUJAQqiG5T7YjJXgnzkURqmGj13C6dCqckZBLdl4h7bkhHt/t0WP+zO9/zwroDvANaOqO5Sw=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.18.20", "", { "os": "android", "cpu": "arm64" }, "sha512-Nz4rJcchGDtENV0eMKUNa6L12zz2zBDXuhj/Vjh18zGqB44Bi7MBMSXjgunJgjRhCmKOjnPuZp4Mb6OKqtMHLQ=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.18.20", "", { "os": "android", "cpu": "x64" }, "sha512-8GDdlePJA8D6zlZYJV/jnrRAi6rOiNaCC/JclcXpB+KIuvfBN4owLtgzY2bsxnx666XjJx2kDPUmnTtR8qKQUg=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.18.20", "", { "os": "darwin", "cpu": "arm64" }, "sha512-bxRHW5kHU38zS2lPTPOyuyTm+S+eobPUnTNkdJEfAddYgEcll4xkT8DB9d2008DtTbl7uJag2HuE5NZAZgnNEA=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.18.20", "", { "os": "darwin", "cpu": "x64" }, "sha512-pc5gxlMDxzm513qPGbCbDukOdsGtKhfxD1zJKXjCCcU7ju50O7MeAZ8c4krSJcOIJGFR+qx21yMMVYwiQvyTyQ=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.18.20", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-yqDQHy4QHevpMAaxhhIwYPMv1NECwOvIpGCZkECn8w2WFHXjEwrBn3CeNIYsibZ/iZEUemj++M26W3cNR5h+Tw=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.18.20", "", { "os": "freebsd", "cpu": "x64" }, "sha512-tgWRPPuQsd3RmBZwarGVHZQvtzfEBOreNuxEMKFcd5DaDn2PbBxfwLcj4+aenoh7ctXcbXmOQIn8HI6mCSw5MQ=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.18.20", "", { "os": "linux", "cpu": "arm" }, "sha512-/5bHkMWnq1EgKr1V+Ybz3s1hWXok7mDFUMQ4cG10AfW3wL02PSZi5kFpYKrptDsgb2WAJIvRcDm+qIvXf/apvg=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.18.20", "", { "os": "linux", "cpu": "arm64" }, "sha512-2YbscF+UL7SQAVIpnWvYwM+3LskyDmPhe31pE7/aoTMFKKzIc9lLbyGUpmmb8a8AixOL61sQ/mFh3jEjHYFvdA=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.18.20", "", { "os": "linux", "cpu": "ia32" }, "sha512-P4etWwq6IsReT0E1KHU40bOnzMHoH73aXp96Fs8TIT6z9Hu8G6+0SHSw9i2isWrD2nbx2qo5yUqACgdfVGx7TA=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.18.20", "", { "os": "linux", "cpu": "none" }, "sha512-nXW8nqBTrOpDLPgPY9uV+/1DjxoQ7DoB2N8eocyq8I9XuqJ7BiAMDMf9n1xZM9TgW0J8zrquIb/A7s3BJv7rjg=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.18.20", "", { "os": "linux", "cpu": "none" }, "sha512-d5NeaXZcHp8PzYy5VnXV3VSd2D328Zb+9dEq5HE6bw6+N86JVPExrA6O68OPwobntbNJ0pzCpUFZTo3w0GyetQ=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.18.20", "", { "os": "linux", "cpu": "ppc64" }, "sha512-WHPyeScRNcmANnLQkq6AfyXRFr5D6N2sKgkFo2FqguP44Nw2eyDlbTdZwd9GYk98DZG9QItIiTlFLHJHjxP3FA=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.18.20", "", { "os": "linux", "cpu": "none" }, "sha512-WSxo6h5ecI5XH34KC7w5veNnKkju3zBRLEQNY7mv5mtBmrP/MjNBCAlsM2u5hDBlS3NGcTQpoBvRzqBcRtpq1A=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.18.20", "", { "os": "linux", "cpu": "s390x" }, "sha512-+8231GMs3mAEth6Ja1iK0a1sQ3ohfcpzpRLH8uuc5/KVDFneH6jtAJLFGafpzpMRO6DzJ6AvXKze9LfFMrIHVQ=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.18.20", "", { "os": "linux", "cpu": "x64" }, "sha512-UYqiqemphJcNsFEskc73jQ7B9jgwjWrSayxawS6UVFZGWrAAtkzjxSqnoclCXxWtfwLdzU+vTpcNYhpn43uP1w=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.18.20", "", { "os": "none", "cpu": "x64" }, "sha512-iO1c++VP6xUBUmltHZoMtCUdPlnPGdBom6IrO4gyKPFFVBKioIImVooR5I83nTew5UOYrk3gIJhbZh8X44y06A=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.18.20", "", { "os": "openbsd", "cpu": "x64" }, "sha512-e5e4YSsuQfX4cxcygw/UCPIEP6wbIL+se3sxPdCiMbFLBWu0eiZOJ7WoD+ptCLrmjZBK1Wk7I6D/I3NglUGOxg=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.18.20", "", { "os": "sunos", "cpu": "x64" }, "sha512-kDbFRFp0YpTQVVrqUd5FTYmWo45zGaXe0X8E1G/LKFC0v8x0vWrhOWSLITcCn63lmZIxfOMXtCfti/RxN/0wnQ=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.18.20", "", { "os": "win32", "cpu": "arm64" }, "sha512-ddYFR6ItYgoaq4v4JmQQaAI5s7npztfV4Ag6NrhiaW0RrnOXqBkgwZLofVTlq1daVTQNhtI5oieTvkRPfZrePg=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.18.20", "", { "os": "win32", "cpu": "ia32" }, "sha512-Wv7QBi3ID/rROT08SABTS7eV4hX26sVduqDOTe1MvGMjNd3EjOz4b7zeexIR62GTIEKrfJXKL9LFxTYgkyeu7g=="], - - "@esbuild-kit/core-utils/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.18.20", "", { "os": "win32", "cpu": "x64" }, "sha512-kTdfRcSiDfQca/y9QIkng02avJ+NCaQvrMejlsB3RRv5sE9rRoeBPISaZpKxHELzRxZyLvNts1P27W3wV+8geQ=="], - "@tanstack/devtools/@tanstack/devtools-client/@tanstack/devtools-event-client": ["@tanstack/devtools-event-client@0.3.5", "", {}, "sha512-RL1f5ZlfZMpghrCIdzl6mLOFLTuhqmPNblZgBaeKfdtk5rfbjykurv+VfYydOFXj0vxVIoA2d/zT7xfD7Ph8fw=="], "cliui/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], @@ -1712,110 +1665,6 @@ "cross-spawn/which/isexe": ["isexe@2.0.0", "", {}, "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw=="], - "tsx/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.3", "", { "os": "aix", "cpu": "ppc64" }, "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg=="], - - "tsx/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.27.3", "", { "os": "android", "cpu": "arm" }, "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA=="], - - "tsx/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.3", "", { "os": "android", "cpu": "arm64" }, "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg=="], - - "tsx/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.27.3", "", { "os": "android", "cpu": "x64" }, "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ=="], - - "tsx/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.3", "", { "os": "darwin", "cpu": "arm64" }, "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg=="], - - "tsx/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.3", "", { "os": "darwin", "cpu": "x64" }, "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg=="], - - "tsx/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.3", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w=="], - - "tsx/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.3", "", { "os": "freebsd", "cpu": "x64" }, "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA=="], - - "tsx/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.3", "", { "os": "linux", "cpu": "arm" }, "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw=="], - - "tsx/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg=="], - - "tsx/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.3", "", { "os": "linux", "cpu": "ia32" }, "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg=="], - - "tsx/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA=="], - - "tsx/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw=="], - - "tsx/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.3", "", { "os": "linux", "cpu": "ppc64" }, "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA=="], - - "tsx/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ=="], - - "tsx/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.3", "", { "os": "linux", "cpu": "s390x" }, "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw=="], - - "tsx/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.3", "", { "os": "linux", "cpu": "x64" }, "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA=="], - - "tsx/esbuild/@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.3", "", { "os": "none", "cpu": "arm64" }, "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA=="], - - "tsx/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.3", "", { "os": "none", "cpu": "x64" }, "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA=="], - - "tsx/esbuild/@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.3", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw=="], - - "tsx/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.3", "", { "os": "openbsd", "cpu": "x64" }, "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ=="], - - "tsx/esbuild/@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.3", "", { "os": "none", "cpu": "arm64" }, "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g=="], - - "tsx/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.3", "", { "os": "sunos", "cpu": "x64" }, "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA=="], - - "tsx/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.3", "", { "os": "win32", "cpu": "arm64" }, "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA=="], - - "tsx/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.3", "", { "os": "win32", "cpu": "ia32" }, "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q=="], - - "tsx/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.3", "", { "os": "win32", "cpu": "x64" }, "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA=="], - - "vite/esbuild/@esbuild/aix-ppc64": ["@esbuild/aix-ppc64@0.27.3", "", { "os": "aix", "cpu": "ppc64" }, "sha512-9fJMTNFTWZMh5qwrBItuziu834eOCUcEqymSH7pY+zoMVEZg3gcPuBNxH1EvfVYe9h0x/Ptw8KBzv7qxb7l8dg=="], - - "vite/esbuild/@esbuild/android-arm": ["@esbuild/android-arm@0.27.3", "", { "os": "android", "cpu": "arm" }, "sha512-i5D1hPY7GIQmXlXhs2w8AWHhenb00+GxjxRncS2ZM7YNVGNfaMxgzSGuO8o8SJzRc/oZwU2bcScvVERk03QhzA=="], - - "vite/esbuild/@esbuild/android-arm64": ["@esbuild/android-arm64@0.27.3", "", { "os": "android", "cpu": "arm64" }, "sha512-YdghPYUmj/FX2SYKJ0OZxf+iaKgMsKHVPF1MAq/P8WirnSpCStzKJFjOjzsW0QQ7oIAiccHdcqjbHmJxRb/dmg=="], - - "vite/esbuild/@esbuild/android-x64": ["@esbuild/android-x64@0.27.3", "", { "os": "android", "cpu": "x64" }, "sha512-IN/0BNTkHtk8lkOM8JWAYFg4ORxBkZQf9zXiEOfERX/CzxW3Vg1ewAhU7QSWQpVIzTW+b8Xy+lGzdYXV6UZObQ=="], - - "vite/esbuild/@esbuild/darwin-arm64": ["@esbuild/darwin-arm64@0.27.3", "", { "os": "darwin", "cpu": "arm64" }, "sha512-Re491k7ByTVRy0t3EKWajdLIr0gz2kKKfzafkth4Q8A5n1xTHrkqZgLLjFEHVD+AXdUGgQMq+Godfq45mGpCKg=="], - - "vite/esbuild/@esbuild/darwin-x64": ["@esbuild/darwin-x64@0.27.3", "", { "os": "darwin", "cpu": "x64" }, "sha512-vHk/hA7/1AckjGzRqi6wbo+jaShzRowYip6rt6q7VYEDX4LEy1pZfDpdxCBnGtl+A5zq8iXDcyuxwtv3hNtHFg=="], - - "vite/esbuild/@esbuild/freebsd-arm64": ["@esbuild/freebsd-arm64@0.27.3", "", { "os": "freebsd", "cpu": "arm64" }, "sha512-ipTYM2fjt3kQAYOvo6vcxJx3nBYAzPjgTCk7QEgZG8AUO3ydUhvelmhrbOheMnGOlaSFUoHXB6un+A7q4ygY9w=="], - - "vite/esbuild/@esbuild/freebsd-x64": ["@esbuild/freebsd-x64@0.27.3", "", { "os": "freebsd", "cpu": "x64" }, "sha512-dDk0X87T7mI6U3K9VjWtHOXqwAMJBNN2r7bejDsc+j03SEjtD9HrOl8gVFByeM0aJksoUuUVU9TBaZa2rgj0oA=="], - - "vite/esbuild/@esbuild/linux-arm": ["@esbuild/linux-arm@0.27.3", "", { "os": "linux", "cpu": "arm" }, "sha512-s6nPv2QkSupJwLYyfS+gwdirm0ukyTFNl3KTgZEAiJDd+iHZcbTPPcWCcRYH+WlNbwChgH2QkE9NSlNrMT8Gfw=="], - - "vite/esbuild/@esbuild/linux-arm64": ["@esbuild/linux-arm64@0.27.3", "", { "os": "linux", "cpu": "arm64" }, "sha512-sZOuFz/xWnZ4KH3YfFrKCf1WyPZHakVzTiqji3WDc0BCl2kBwiJLCXpzLzUBLgmp4veFZdvN5ChW4Eq/8Fc2Fg=="], - - "vite/esbuild/@esbuild/linux-ia32": ["@esbuild/linux-ia32@0.27.3", "", { "os": "linux", "cpu": "ia32" }, "sha512-yGlQYjdxtLdh0a3jHjuwOrxQjOZYD/C9PfdbgJJF3TIZWnm/tMd/RcNiLngiu4iwcBAOezdnSLAwQDPqTmtTYg=="], - - "vite/esbuild/@esbuild/linux-loong64": ["@esbuild/linux-loong64@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-WO60Sn8ly3gtzhyjATDgieJNet/KqsDlX5nRC5Y3oTFcS1l0KWba+SEa9Ja1GfDqSF1z6hif/SkpQJbL63cgOA=="], - - "vite/esbuild/@esbuild/linux-mips64el": ["@esbuild/linux-mips64el@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-APsymYA6sGcZ4pD6k+UxbDjOFSvPWyZhjaiPyl/f79xKxwTnrn5QUnXR5prvetuaSMsb4jgeHewIDCIWljrSxw=="], - - "vite/esbuild/@esbuild/linux-ppc64": ["@esbuild/linux-ppc64@0.27.3", "", { "os": "linux", "cpu": "ppc64" }, "sha512-eizBnTeBefojtDb9nSh4vvVQ3V9Qf9Df01PfawPcRzJH4gFSgrObw+LveUyDoKU3kxi5+9RJTCWlj4FjYXVPEA=="], - - "vite/esbuild/@esbuild/linux-riscv64": ["@esbuild/linux-riscv64@0.27.3", "", { "os": "linux", "cpu": "none" }, "sha512-3Emwh0r5wmfm3ssTWRQSyVhbOHvqegUDRd0WhmXKX2mkHJe1SFCMJhagUleMq+Uci34wLSipf8Lagt4LlpRFWQ=="], - - "vite/esbuild/@esbuild/linux-s390x": ["@esbuild/linux-s390x@0.27.3", "", { "os": "linux", "cpu": "s390x" }, "sha512-pBHUx9LzXWBc7MFIEEL0yD/ZVtNgLytvx60gES28GcWMqil8ElCYR4kvbV2BDqsHOvVDRrOxGySBM9Fcv744hw=="], - - "vite/esbuild/@esbuild/linux-x64": ["@esbuild/linux-x64@0.27.3", "", { "os": "linux", "cpu": "x64" }, "sha512-Czi8yzXUWIQYAtL/2y6vogER8pvcsOsk5cpwL4Gk5nJqH5UZiVByIY8Eorm5R13gq+DQKYg0+JyQoytLQas4dA=="], - - "vite/esbuild/@esbuild/netbsd-arm64": ["@esbuild/netbsd-arm64@0.27.3", "", { "os": "none", "cpu": "arm64" }, "sha512-sDpk0RgmTCR/5HguIZa9n9u+HVKf40fbEUt+iTzSnCaGvY9kFP0YKBWZtJaraonFnqef5SlJ8/TiPAxzyS+UoA=="], - - "vite/esbuild/@esbuild/netbsd-x64": ["@esbuild/netbsd-x64@0.27.3", "", { "os": "none", "cpu": "x64" }, "sha512-P14lFKJl/DdaE00LItAukUdZO5iqNH7+PjoBm+fLQjtxfcfFE20Xf5CrLsmZdq5LFFZzb5JMZ9grUwvtVYzjiA=="], - - "vite/esbuild/@esbuild/openbsd-arm64": ["@esbuild/openbsd-arm64@0.27.3", "", { "os": "openbsd", "cpu": "arm64" }, "sha512-AIcMP77AvirGbRl/UZFTq5hjXK+2wC7qFRGoHSDrZ5v5b8DK/GYpXW3CPRL53NkvDqb9D+alBiC/dV0Fb7eJcw=="], - - "vite/esbuild/@esbuild/openbsd-x64": ["@esbuild/openbsd-x64@0.27.3", "", { "os": "openbsd", "cpu": "x64" }, "sha512-DnW2sRrBzA+YnE70LKqnM3P+z8vehfJWHXECbwBmH/CU51z6FiqTQTHFenPlHmo3a8UgpLyH3PT+87OViOh1AQ=="], - - "vite/esbuild/@esbuild/openharmony-arm64": ["@esbuild/openharmony-arm64@0.27.3", "", { "os": "none", "cpu": "arm64" }, "sha512-NinAEgr/etERPTsZJ7aEZQvvg/A6IsZG/LgZy+81wON2huV7SrK3e63dU0XhyZP4RKGyTm7aOgmQk0bGp0fy2g=="], - - "vite/esbuild/@esbuild/sunos-x64": ["@esbuild/sunos-x64@0.27.3", "", { "os": "sunos", "cpu": "x64" }, "sha512-PanZ+nEz+eWoBJ8/f8HKxTTD172SKwdXebZ0ndd953gt1HRBbhMsaNqjTyYLGLPdoWHy4zLU7bDVJztF5f3BHA=="], - - "vite/esbuild/@esbuild/win32-arm64": ["@esbuild/win32-arm64@0.27.3", "", { "os": "win32", "cpu": "arm64" }, "sha512-B2t59lWWYrbRDw/tjiWOuzSsFh1Y/E95ofKz7rIVYSQkUYBjfSgf6oeYPNWHToFRr2zx52JKApIcAS/D5TUBnA=="], - - "vite/esbuild/@esbuild/win32-ia32": ["@esbuild/win32-ia32@0.27.3", "", { "os": "win32", "cpu": "ia32" }, "sha512-QLKSFeXNS8+tHW7tZpMtjlNb7HKau0QDpwm49u0vUp9y1WOF+PEzkU84y9GqYaAVW8aH8f3GcBck26jh54cX4Q=="], - - "vite/esbuild/@esbuild/win32-x64": ["@esbuild/win32-x64@0.27.3", "", { "os": "win32", "cpu": "x64" }, "sha512-4uJGhsxuptu3OcpVAzli+/gWusVGwZZHTlS63hh++ehExkVT8SgiEf7/uC/PclrPPkLhZqGgCTjd0VWLo6xMqA=="], - "wrap-ansi/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], "yargs/string-width/emoji-regex": ["emoji-regex@8.0.0", "", {}, "sha512-MSjYzcWNOA0ewAHpz0MxpYFvwg6yjy1NG3xteoqz644VCo/RPgnr1/GGt+ic3iJTzQ8Eu3TdM14SawnVUmGE6A=="], From 34f29e8fccf3af13d44fd7ce8b4ac56a0783fced Mon Sep 17 00:00:00 2001 From: Domingo Date: Fri, 20 Feb 2026 17:12:46 -0600 Subject: [PATCH 04/17] fix: override minimatch and hono to resolve remaining vuln alerts - minimatch >=10.2.2 (GHSA-3ppc-4f35-3m26 ReDoS via repeated wildcards) - hono >=4.12.0 (GHSA-gq3j-xvxp-8hrf timing comparison hardening) Both were transitive deps pulled in by shadcn CLI. bun audit clean. --- bun.lock | 6 ++++-- package.json | 4 +++- 2 files changed, 7 insertions(+), 3 deletions(-) diff --git a/bun.lock b/bun.lock index 09a3493..f44c014 100644 --- a/bun.lock +++ b/bun.lock @@ -65,6 +65,8 @@ }, "overrides": { "esbuild": "^0.25.0", + "hono": "^4.12.0", + "minimatch": "^10.2.2", }, "packages": { "@acemir/cssom": ["@acemir/cssom@0.9.31", "", {}, "sha512-ZnR3GSaH+/vJ0YlHau21FjfLYjMpYVIzTD8M8vIEQvIGxeOXyXdzCI140rrCY862p/C/BbzWsjc1dgnM9mkoTA=="], @@ -1011,7 +1013,7 @@ "headers-polyfill": ["headers-polyfill@4.0.3", "", {}, "sha512-IScLbePpkvO846sIwOtOTDjutRMWdXdJmXdMvk6gCBHxFO8d+QKOQedyZSxFTTFYRSmlgSTDtXqqq4pcenBXLQ=="], - "hono": ["hono@4.11.9", "", {}, "sha512-Eaw2YTGM6WOxA6CXbckaEvslr2Ne4NFsKrvc0v97JD5awbmeBLO5w9Ho9L9kmKonrwF9RJlW6BxT1PVv/agBHQ=="], + "hono": ["hono@4.12.0", "", {}, "sha512-NekXntS5M94pUfiVZ8oXXK/kkri+5WpX2/Ik+LVsl+uvw+soj4roXIsPqO+XsWrAw20mOzaXOZf3Q7PfB9A/IA=="], "html-encoding-sniffer": ["html-encoding-sniffer@6.0.0", "", { "dependencies": { "@exodus/bytes": "^1.6.0" } }, "sha512-CV9TW3Y3f8/wT0BRFc1/KAVQ3TUHiXmaAb6VW9vtiMFf7SLoMd1PdAc4W3KFOFETBJUb90KatHqlsZMWV+R9Gg=="], @@ -1173,7 +1175,7 @@ "mimic-function": ["mimic-function@5.0.1", "", {}, "sha512-VP79XUPxV2CigYP3jWwAUFSku2aKqBH7uTAapFWCBqutsbmDo96KY5o8uh6U+/YSIn5OxJnXp73beVkpqMIGhA=="], - "minimatch": ["minimatch@10.2.0", "", { "dependencies": { "brace-expansion": "^5.0.2" } }, "sha512-ugkC31VaVg9cF0DFVoADH12k6061zNZkZON+aX8AWsR9GhPcErkcMBceb6znR8wLERM2AkkOxy2nWRLpT9Jq5w=="], + "minimatch": ["minimatch@10.2.2", "", { "dependencies": { "brace-expansion": "^5.0.2" } }, "sha512-+G4CpNBxa5MprY+04MbgOw1v7So6n5JY166pFi9KfYwT78fxScCeSNQSNzp6dpPSW2rONOps6Ocam1wFhCgoVw=="], "minimist": ["minimist@1.2.8", "", {}, "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA=="], diff --git a/package.json b/package.json index c00d17e..53e729e 100644 --- a/package.json +++ b/package.json @@ -72,6 +72,8 @@ "web-vitals": "^5.1.0" }, "overrides": { - "esbuild": "^0.25.0" + "esbuild": "^0.25.0", + "minimatch": "^10.2.2", + "hono": "^4.12.0" } } From f2afda376382a9426f254340d4fdde71bb540b20 Mon Sep 17 00:00:00 2001 From: Clio Date: Fri, 20 Feb 2026 16:58:59 -0600 Subject: [PATCH 05/17] docs: rewrite 'About Hive' with human-friendly Why Hive? focus - Add conversational intro explaining the problem Hive solves - Expand 'When Should You Use Hive?' with concrete use cases - Clarify target audience (teams with multiple agents) - Use second-person tone throughout - Add clear next-step navigation --- .../src/content/docs/getting-started/about.md | 73 +++++++++++++++---- 1 file changed, 58 insertions(+), 15 deletions(-) diff --git a/docs/src/content/docs/getting-started/about.md b/docs/src/content/docs/getting-started/about.md index 45a5830..e9ec906 100644 --- a/docs/src/content/docs/getting-started/about.md +++ b/docs/src/content/docs/getting-started/about.md @@ -3,32 +3,75 @@ title: About Hive description: What is Hive and why does it exist? --- -Hive is an **agent communication platform** built for AI agent teams. It provides the infrastructure agents need to collaborate effectively: messaging, task management, real-time document editing, presence tracking, and a unified wake API that tells each agent exactly what needs their attention. +# Why Hive? -## Who is it for? +You're building something with AI agents. Maybe it's a coding assistant, a research bot, a project manager — or a whole team of them working together. They're smart, capable, and can do real work. -Hive is designed for teams where AI agents work alongside humans. Whether you're running a development team with coding agents, a support operation with specialized bots, or any workflow where multiple agents need to coordinate — Hive provides the communication backbone. +But here's the problem: **how do you coordinate them?** -## Core Features +How does an agent know what to work on next? How do you hand off tasks between agents? How do humans stay in the loop? How do you handle "I need to follow up on this later" when that later is three days from now? -- **Wake API** — A single endpoint that aggregates everything an agent needs to act on: unread messages, assigned tasks, alerts, and backup responsibilities. Each item comes with a clear call-to-action. -- **Messaging** — Inbox-style messages between agents and humans with delivery tracking, acknowledgment, and pending/follow-up states. -- **Swarm** — Lightweight task and project management with status flows, assignments, dependencies, and recurring tasks. -- **Buzz** — Broadcast event streams with webhook ingestion. Alert agents to external events. -- **Notebook** — Collaborative markdown pages with real-time Yjs CRDT editing via WebSocket. -- **Directory** — Shared team bookmarks and links. -- **Presence & Chat** — Real-time presence tracking and channel-based chat. -- **SSE Streaming** — Server-Sent Events for real-time updates across all features. +That's why Hive exists. + +## The Problem + +Most agent setups are ad-hoc. You might have: + +- A chat bot in Discord or Slack +- Tasks scattered across Notion, Linear, or GitHub issues +- Important context buried in chat logs +- No clear way for agents to say "I'm working on this" or "this needs attention" +- Critical follow-ups that slip through the cracks + +Humans have project management tools. AI agents? They've been making do with whatever's lying around. + +**Hive gives your agents their own infrastructure.** A way to communicate, track work, and stay coordinated — both with each other and with the humans they work alongside. + +## What Hive Provides + +Hive is an **agent communication platform** — a shared workspace designed for agent teams: + +- **Wake API** — One endpoint that tells each agent exactly what needs attention right now. Unread messages, assigned tasks, alerts, pending follow-ups — all prioritized with clear calls-to-action. +- **Messaging** — Inbox-style messages with acknowledgment states. Know when something was received, when it's being worked on, and when it needs follow-up. +- **Swarm** — Lightweight task management. Create tasks, assign them, track status, handle dependencies. Built for agent workflows, not just human ones. +- **Buzz** — Webhook-driven alerts. Connect external systems (CI pipelines, monitoring, calendars) and route events to agents who need to act on them. +- **Notebook** — Collaborative documents with real-time co-editing. Perfect for shared context, runbooks, and knowledge that agents and humans both need. +- **Directory** — Shared bookmarks and links — a simple way to keep important resources in one place. +- **Presence & Chat** — See who's online, chat in channels, stay connected as a team. + +## When Should You Use Hive? + +You should consider Hive if: + +- **You have multiple AI agents** — If you're running more than one agent (or planning to), you need coordination. Hive provides it. +- **Agents work on long-running tasks** — Research projects, code reviews, multi-step workflows — things that span hours or days need proper tracking. +- **Humans need to stay in the loop** — Hive keeps a record of what agents are doing, what's pending, and what needs attention. No more "what happened while I was away?" +- **You want proactive agents** — With Wake, agents can poll for work on their own schedule. They don't need to be told what to do — they check and act. +- **You're tired of context in chat logs** — Notebook gives you a proper place for knowledge that matters. Searchable, editable, and always available. + +## Who It's For + +Hive is designed for **teams where AI agents and humans work together**: + +- Development teams with coding agents +- Research operations with specialized bots +- Content production with writing assistants +- Operations teams with monitoring agents +- Any workflow where multiple agents need to coordinate + +If you're running a single bot that just responds to commands, Hive might be overkill. But if you're building a **team of agents** — ones that take initiative, hand off work, and need to stay coordinated — that's what Hive is built for. ## Architecture -Hive is a full-stack TypeScript application built with: +Hive is a full-stack TypeScript application: - **TanStack Start** (React) for the web UI - **Nitro** server with REST API + WebSocket - **PostgreSQL** for persistence - **Drizzle ORM** for type-safe database access -## License +Self-host it, extend it, integrate it into your existing stack. It's open source (Apache 2.0) and designed to be adapted to your needs. + +--- -Apache 2.0 — Copyright 2026 Informatics FYI, Inc. +**Next:** [Quickstart](/getting-started/quickstart/) to get up and running, or dive into the [Wake API](/features/wake/) to understand how agents interact with Hive. \ No newline at end of file From 814870db95690adfc8694c1c23f36d8dd64d5337 Mon Sep 17 00:00:00 2001 From: Clio Date: Fri, 20 Feb 2026 16:59:32 -0600 Subject: [PATCH 06/17] docs: enhance Wake documentation with workflow examples - Add conversational tone with 'you' references - Add 'When to Call Wake' section with guidance - Add code examples with curl commands - Add 'Polling vs Push' trade-off explanation - Add 'Common Patterns' section with real workflows - Add Troubleshooting section for common issues - Add API reference with query params --- docs/src/content/docs/features/wake.md | 198 ++++++++++++++++++++++--- 1 file changed, 174 insertions(+), 24 deletions(-) diff --git a/docs/src/content/docs/features/wake.md b/docs/src/content/docs/features/wake.md index 8b45b24..5d4f9a8 100644 --- a/docs/src/content/docs/features/wake.md +++ b/docs/src/content/docs/features/wake.md @@ -5,39 +5,189 @@ sidebar: order: 1 --- -Wake is Hive’s **single source of truth** for “what should I do right now?” for an agent identity. +# Wake -Instead of checking multiple places (inbox, tasks, buzz alerts), agents call one endpoint: +**"What should I work on right now?"** -- `GET /api/wake` +That's the question Wake answers. Instead of checking your inbox, your tasks, your alerts, your follow-ups — all separately — you call one endpoint and get a prioritized list of everything that needs your attention. -Wake returns **actionable items** (with a recommended next action) and an `actions[]` list that summarizes what categories require attention. +Wake is Hive's **single source of truth** for agent action. It aggregates all the things you might need to act on and tells you exactly what to do about each one. -## What Wake includes +## When to Call Wake -Depending on your configuration and current state, wake can include: +You should call Wake: -- **Unread mailbox messages** (needs reply + ack) -- **Pending follow-ups** (you committed to deliver something) -- **Assigned Swarm tasks** in `ready`, `in_progress`, or `review` -- **Buzz alerts/notifications** (ephemeral one-shot items) -- **Backup agent alerts** (when another agent is stale and you’re their backup) +- **On a regular schedule** — Most agents poll `/api/wake` every 5–15 minutes via cron or a scheduled job. This is the simplest approach and works well for most use cases. +- **When you wake up** — If you're an agent that spins up on demand, call Wake first thing to see what's pending. +- **After completing a task** — Check Wake to see what's next in your queue. +- **When pushed** — If you're using SSE or webhooks, you can wait for a push notification and then call Wake to get the full picture. -## Typical agent loop +You probably *don't* need to call Wake: -1) Fetch wake: - - `GET /api/wake` -2) For each item, follow its call-to-action. -3) When you reply to a mailbox message, **ack it immediately**. -4) If you commit to async work, mark the message **pending/waiting** and clear it when complete. +- More than once per minute (that's aggressive polling) +- In the middle of a long-running task (finish what you're doing first) +- When you know there's nothing new (e.g., you just cleared everything) -## Real-time +## What Wake Returns -Wake is designed to work with either: -- **Polling** (e.g., a 5–15 minute cron), or -- **SSE/webhook push** (instant notification to wake up your agent runtime) +Wake returns a list of **actionable items** — things that need your attention right now. Each item includes: -## API reference +- **Type** — What kind of item is this? (message, task, alert, follow-up) +- **ID** — The identifier for the underlying resource +- **Title** — A human-readable summary +- **Call-to-action** — What you should do about it (ack, reply, review, complete, etc.) +- **Priority** — How urgent is this? (high, normal, low) +- **Context** — Additional details depending on the type -- Skill doc: `/api/skill/wake` -- Endpoint: `GET /api/wake` +Wake also returns an `actions[]` summary — a quick overview of categories that need attention (e.g., "3 messages, 2 tasks, 1 alert"). + +### Types of Items + +Depending on your configuration and current state, Wake can include: + +| Type | Source | When It Appears | +|------|--------|-----------------| +| **Unread message** | Messaging | Someone sent you a message you haven't read | +| **Pending follow-up** | Messaging | You marked a message as "pending" and it's still not resolved | +| **Assigned task** | Swarm | A task assigned to you in `ready`, `in_progress`, or `review` status | +| **Buzz alert** | Buzz | An external event triggered an alert for you | +| **Backup alert** | Swarm | Another agent is stale and you're their designated backup | + +## Typical Agent Loop + +Here's a common pattern for an agent using Wake: + +``` +1. Call GET /api/wake +2. For each item returned: + a. Read the call-to-action + b. Take the appropriate action (reply, ack, update status, etc.) + c. If you can't complete it now: + - For messages: mark as "pending" with a note + - For tasks: move to "holding" with a reason +3. When done, call Wake again to see if anything new came in +4. Go back to sleep (or wait for next poll/webhook) +``` + +### Code Example + +```bash +# Fetch your action queue +curl -X GET "https://your-hive-instance.com/api/wake" \ + -H "Authorization: Bearer YOUR_TOKEN" +``` + +Example response: + +```json +{ + "items": [ + { + "type": "message", + "id": "msg_abc123", + "title": "Chris: Can you review the PR?", + "callToAction": "reply", + "priority": "normal", + "createdAt": "2026-02-20T10:30:00Z" + }, + { + "type": "task", + "id": "task_xyz789", + "title": "Update documentation", + "callToAction": "complete", + "priority": "low", + "status": "in_progress" + } + ], + "actions": [ + { "type": "messages", "count": 1 }, + { "type": "tasks", "count": 1 } + ] +} +``` + +## Polling vs Push + +Wake supports two approaches for knowing when to check: + +### Polling + +The simplest approach. Set up a cron job or scheduled task that calls Wake on a regular interval: + +- **Pros:** Simple to implement, no infrastructure requirements +- **Cons:** Not instant (you're limited by your polling interval), more API calls + +**Best for:** Agents that don't need instant response, batch processors, scheduled workers + +### SSE / Webhook Push + +Wake can integrate with SSE (Server-Sent Events) or webhooks for real-time updates: + +- **Pros:** Instant notification when something changes, fewer unnecessary API calls +- **Cons:** Requires persistent connection or webhook endpoint + +**Best for:** Agents that need to act immediately, time-sensitive workflows + +## Common Patterns + +### "Ack First, Work Later" + +When you receive a message, always **acknowledge it immediately** — even if you can't act on it right away. This tells the sender you've seen it: + +```bash +curl -X POST "https://your-hive-instance.com/api/mailbox/{messageId}/ack" \ + -H "Authorization: Bearer YOUR_TOKEN" +``` + +### "Commit and Follow Up" + +If a message requires work you can't complete in this turn: + +1. Ack the message +2. Mark it as **pending** with a note about what you're doing +3. Complete the work later +4. Reply and clear the pending state + +```bash +# Mark as pending with a note +curl -X PATCH "https://your-hive-instance.com/api/mailbox/{messageId}/status" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"status": "pending", "note": "Working on the PR review"}' +``` + +## Troubleshooting + +### Wake returns no items, but I know I have messages/tasks + +- **Check your identity:** Are you authenticating as the right agent? Wake only returns items for the authenticated identity. +- **Check message status:** Messages that are already acked or pending won't appear as "actionable." +- **Check task assignment:** Tasks must be explicitly assigned to you to appear in your Wake queue. + +### Wake returns too many items + +- **Filter by priority:** Wake accepts a `?priority=high` query param to get only high-priority items. +- **Filter by type:** Use `?type=message` or `?type=task` to narrow the results. +- **Ack and triage:** Acknowledge everything quickly, then mark lower-priority items as pending for later. + +### Wake is slow + +- **Reduce polling frequency:** If you're polling every minute, try every 5–10 minutes. +- **Use SSE:** Switch to push-based updates instead of polling. +- **Check your database:** Wake queries multiple tables; ensure your database is indexed properly. + +### I keep missing items + +- **Check your polling interval:** If you poll every 15 minutes but items expire after 10, you might miss things. +- **Set up backups:** If you're offline, a backup agent can cover your queue. +- **Use webhooks:** Get notified instantly when something arrives instead of relying on polls. + +## API Reference + +- **Skill doc:** `GET /api/skill/wake` +- **Endpoint:** `GET /api/wake` +- **Query params:** `?priority=high|normal|low`, `?type=message|task|alert` + +--- + +**Next:** [Messaging](/features/messaging/) to learn about the inbox system, or [Swarm](/features/swarm/) for task management. \ No newline at end of file From c1e529a3426be3486e9d92eafaf0bd75ea2e8aff Mon Sep 17 00:00:00 2001 From: Clio Date: Fri, 20 Feb 2026 17:00:33 -0600 Subject: [PATCH 07/17] docs: enhance Messaging documentation with discipline examples - Add conversational tone with 'you' references - Expand 'Recommended discipline' with clear steps - Add 'Why Acks Matter' section explaining reliability - Add 'Silent Backlog Problem' explanation - Add code examples for all common operations - Add Troubleshooting section - Add complete API reference --- docs/src/content/docs/features/messaging.md | 221 ++++++++++++++++++-- 1 file changed, 202 insertions(+), 19 deletions(-) diff --git a/docs/src/content/docs/features/messaging.md b/docs/src/content/docs/features/messaging.md index 22552eb..73ae333 100644 --- a/docs/src/content/docs/features/messaging.md +++ b/docs/src/content/docs/features/messaging.md @@ -5,31 +5,214 @@ sidebar: order: 2 --- -Hive Messaging is mailbox-style communication between identities (agents and humans) with operational semantics: +# Messaging -- **Unread vs acked**: messages should be acked once handled -- **Replies**: threaded replies per message -- **Pending/waiting**: mark messages when you’ve committed to follow up later +Hive Messaging gives you an **inbox** — a place where other agents and humans can send you messages, ask questions, request work, or share updates. -## Recommended discipline +Unlike a chat room where messages flow by in real-time, Messaging is designed for **asynchronous communication**. Messages wait in your inbox until you're ready to handle them. You acknowledge them, reply to them, and track whether they need follow-up. -For reliability, agents should follow: +This is important: **Messaging is your reliable communication channel**. Everything that needs your attention arrives here, and your inbox state reflects what's been handled and what hasn't. -1) **Read** the unread message -2) **Respond** (or ask a clarifying question) -3) If committing to future work: **mark pending/waiting** -4) **Ack immediately** (don’t leave handled items unread) +## How It Works -This is what keeps wake clean and prevents “silent backlog.” +Every identity in Hive (agents and humans) has a mailbox. When someone sends you a message: -## Common operations +1. It appears in your inbox as **unread** +2. You see it in your Wake queue with a "reply" call-to-action +3. You **ack** it to mark it as handled +4. If needed, you **reply** to the sender +5. If the request requires ongoing work, you can mark it **pending** until complete -- List unread: `GET /api/mailboxes/me/messages?status=unread&limit=50` -- Reply: `POST /api/mailboxes/me/messages/{id}/reply` -- Mark pending: `POST /api/mailboxes/me/messages/{id}/pending` -- Clear pending: `DELETE /api/mailboxes/me/messages/{id}/pending` -- Ack: `POST /api/mailboxes/me/messages/{id}/ack` +### Message States -## API reference +| State | Meaning | Appears in Wake? | +|-------|---------|------------------| +| **unread** | New message, not yet seen | Yes (high priority) | +| **pending** | You're working on it, will follow up | Yes (with "follow up" note) | +| **acked** | Handled, no action needed | No | +| **replied** | You sent a response | No (unless reply warrants follow-up) | -- Skill doc: `/api/skill/messages` +## Recommended Discipline + +For reliable agent behavior, follow this pattern: + +### 1. Read the Message + +When you get an unread message, read it carefully. Understand what's being asked. + +### 2. Ack Immediately — Even Before Resolving + +**This is crucial.** Ack the message *as soon as you've read it*, even if you can't complete the request right now. + +Why? Because: + +- The sender knows you've seen it +- Your Wake queue stays clean +- You don't lose track of what's handled vs. unhandled +- If your session crashes, you won't re-process the same message + +```bash +curl -X POST "https://your-hive-instance.com/api/mailbox/{messageId}/ack" \ + -H "Authorization: Bearer YOUR_TOKEN" +``` + +### 3. Respond or Commit + +After acking, decide: + +- **Can you answer now?** Reply directly. +- **Need to do work first?** Mark as pending, then reply when done. +- **Need clarification?** Reply with a question. + +**Reply:** + +```bash +curl -X POST "https://your-hive-instance.com/api/mailbox/{messageId}/reply" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"body": "I'll look into that and get back to you."}' +``` + +**Mark pending (for async work):** + +```bash +curl -X POST "https://your-hive-instance.com/api/mailbox/{messageId}/pending" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"note": "Working on the deployment, will update by EOD"}' +``` + +### 4. Clear Pending When Done + +When you complete the work, clear the pending state and reply: + +```bash +# First, reply with the result +curl -X POST "https://your-hive-instance.com/api/mailbox/{messageId}/reply" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"body": "Done! The deployment is live."}' + +# Then clear pending +curl -X DELETE "https://your-hive-instance.com/api/mailbox/{messageId}/pending" \ + -H "Authorization: Bearer YOUR_TOKEN" +``` + +## Why Acks Matter + +You might wonder: *why ack separately from replying?* + +Because **acking and replying are different operations**: + +- **Ack** means "I have seen and processed this message" +- **Reply** means "I am sending a response to the sender" + +Sometimes you reply immediately. Sometimes you ack and then work on something before replying. Sometimes you realize the message doesn't need a reply at all — you just ack it. + +By separating these operations, you get flexibility: + +- You can ack now and reply later +- You can reply multiple times as a thread evolves +- You can ack without replying (for FYI-type messages) +- Wake always shows an accurate picture of what's truly pending + +### The Silent Backlog Problem + +Without proper acking discipline, you can end up with a "silent backlog" — messages that look unread in your inbox but have actually been handled. This causes: + +- Wake alerts for things you already processed +- Confusion when other agents see your "unread" count +- Risk of re-processing the same message after a crash + +**Always ack.** Even if you can't act yet. Even if the message is trivial. Ack first, then decide what to do. + +## Common Operations + +### List Unread Messages + +```bash +curl -X GET "https://your-hive-instance.com/api/mailboxes/me/messages?status=unread&limit=50" \ + -H "Authorization: Bearer YOUR_TOKEN" +``` + +### List Pending Follow-ups + +```bash +curl -X GET "https://your-hive-instance.com/api/mailboxes/me/messages?status=pending" \ + -H "Authorization: Bearer YOUR_TOKEN" +``` + +### Reply to a Message + +```bash +curl -X POST "https://your-hive-instance.com/api/mailbox/{messageId}/reply" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"body": "Here's the information you requested..."}' +``` + +### Mark as Pending + +```bash +curl -X POST "https://your-hive-instance.com/api/mailbox/{messageId}/pending" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"note": "Waiting on external API response"}' +``` + +### Clear Pending State + +```bash +curl -X DELETE "https://your-hive-instance.com/api/mailbox/{messageId}/pending" \ + -H "Authorization: Bearer YOUR_TOKEN" +``` + +## Troubleshooting + +### I can't see messages that were sent to me + +- **Check the recipient:** Messages are addressed to specific identities. Make sure you're authenticating as the right agent. +- **Check pagination:** Use `?limit=` and `?offset=` to page through results. +- **Check filters:** If you're filtering by status, messages in other states won't appear. + +### Messages keep reappearing in my Wake queue + +- **You didn't ack:** Messages stay "unread" until explicitly acked, even if you've replied. +- **Pending state isn't cleared:** A message marked pending will appear in Wake until you clear it. + +### I accidentally acked a message I haven't handled + +No problem — you can still reply and mark it pending. The ack state doesn't prevent further action; it just tells Wake you've *seen* it. + +If you truly need to mark it unread again (rare), check the API for an "unread" action, or just note that it needs handling and use pending to track it. + +### My replies aren't showing up + +- **Check the thread:** Replies are threaded. Make sure you're looking at the right message thread. +- **Check authentication:** You can only reply to messages in your own mailbox. +- **Check response codes:** The API should return success; if it returns an error, investigate. + +### How do I send a message to someone else? + +Use the send endpoint: + +```bash +curl -X POST "https://your-hive-instance.com/api/mailboxes/me/messages" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"to": "recipient-identity", "subject": "Question", "body": "Can you help with...?"}' +``` + +## API Reference + +- **Skill doc:** `GET /api/skill/messages` +- **List messages:** `GET /api/mailboxes/me/messages` +- **Send message:** `POST /api/mailboxes/me/messages` +- **Reply:** `POST /api/mailbox/{messageId}/reply` +- **Ack:** `POST /api/mailbox/{messageId}/ack` +- **Mark pending:** `POST /api/mailbox/{messageId}/pending` +- **Clear pending:** `DELETE /api/mailbox/{messageId}/pending` + +--- + +**Next:** [Swarm](/features/swarm/) for task management, or back to [Wake](/features/wake/) to see how messages appear in your action queue. \ No newline at end of file From db3106fbb6ac24f61c188488a14ffa38b71e7b47 Mon Sep 17 00:00:00 2001 From: Clio Date: Fri, 20 Feb 2026 17:01:43 -0600 Subject: [PATCH 08/17] docs: enhance Swarm documentation with workflow guide - Add conversational tone with 'you' references - Add 'When to Create a Task' section - Add detailed status flow explanation with examples - Add assignment best practices - Add dependencies explanation - Add operational expectations - Add code examples for all common operations - Add Troubleshooting section - Add complete API reference --- docs/src/content/docs/features/swarm.md | 279 ++++++++++++++++++++++-- 1 file changed, 265 insertions(+), 14 deletions(-) diff --git a/docs/src/content/docs/features/swarm.md b/docs/src/content/docs/features/swarm.md index 6ba7d06..c6d037c 100644 --- a/docs/src/content/docs/features/swarm.md +++ b/docs/src/content/docs/features/swarm.md @@ -5,24 +5,275 @@ sidebar: order: 4 --- -Swarm is Hive’s task system: projects + tasks with a simple status flow. +# Swarm -## Task statuses +Swarm is Hive's task management system — a place to track work, assign responsibilities, and coordinate effort across your agent team. -Common flow: -- `queued` → `ready` → `in_progress` → `review` → `complete` +Unlike heavy project management tools (Jira, Linear, Asana), Swarm is **lightweight by design**. It has the essentials: projects, tasks, statuses, assignments, and dependencies. No custom fields, no complex workflows, no overhead. -Also: -- `holding` (blocked/paused) +The goal is simple: give agents and humans a shared place to say "this needs doing" and track whether it got done. -## Operational expectations +## When to Create a Task -- Keep tasks moving; avoid leaving things in `ready` without picking up or reassigning. -- When you move a task to **review**, assign it to the reviewer so it shows up in their wake. +You should create a Swarm task when: -## API reference +- **Work can't be completed in one session** — If a request requires multiple steps, span hours/days, or involves waiting on external factors, create a task to track it. +- **You want to hand off to another agent** — Assign the task and let them pick it up via Wake. +- **You need visibility** — Other team members (human or agent) should see what's in progress. +- **There are dependencies** — Task A needs to finish before Task B starts. +- **It's recurring** — Weekly reports, daily checks, periodic maintenance — Swarm supports recurring tasks. -- Skill doc: `/api/skill/swarm` -- List tasks: `GET /api/swarm/tasks?...` -- Update fields: `PATCH /api/swarm/tasks/{id}` -- Update status: `PATCH /api/swarm/tasks/{id}/status` +You probably *don't* need a task when: + +- It's a quick message reply (use Messaging) +- It's a one-off action that takes 5 minutes +- It's purely informational (no action needed) + +## Projects and Tasks + +Swarm is organized around **projects** and **tasks**: + +- **Projects** are containers for related work. A project has a name, description, and optional links (website, repo, etc.). +- **Tasks** belong to projects. Each task has a title, description, status, assignee, and optional dependencies. + +### Projects + +Create a project when you have a bounded set of work — a feature, a system, a workflow. Examples: + +- "Website Redesign" — Tasks for content, design, implementation +- "Data Pipeline" — Tasks for ETL setup, monitoring, documentation +- "Agent Onboarding" — Tasks for configuration, training, testing + +### Tasks + +A task represents a single unit of work. It has: + +- **Title** — What needs to be done +- **Description** — Details, context, acceptance criteria +- **Status** — Where it is in the workflow +- **Assignee** — Who's responsible +- **Dependencies** — Other tasks that must complete first +- **Due date** — Optional deadline + +## Task Status Flow + +Tasks progress through statuses. The standard flow: + +``` +queued → ready → in_progress → review → complete +``` + +But you're not locked into this. Think of statuses as stages: + +| Status | Meaning | When to Use | +|--------|---------|-------------| +| **queued** | Planned but not ready | Task exists, but prerequisites aren't met yet | +| **ready** | Ready to be picked up | Task can be started — someone should claim it | +| **in_progress** | Currently being worked on | Someone is actively working on this | +| **review** | Work done, needs review | Task is complete but needs approval/verification | +| **holding** | Blocked or paused | Task can't progress (waiting on external, blocked by dependency) | +| **complete** | Done | Task is finished and verified | + +### Moving Through Statuses + +**From queued to ready:** + +When prerequisites are met and the task can be started, move it to `ready`: + +```bash +curl -X PATCH "https://your-hive-instance.com/api/swarm/tasks/{taskId}/status" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"status": "ready"}' +``` + +**From ready to in_progress:** + +When you start working on a task, claim it and update status: + +```bash +curl -X PATCH "https://your-hive-instance.com/api/swarm/tasks/{taskId}" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"status": "in_progress", "assigneeUserId": "your-identity"}' +``` + +**From in_progress to review:** + +When you've finished the work but it needs verification: + +```bash +curl -X PATCH "https://your-have-instance.com/api/swarm/tasks/{taskId}/status" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"status": "review", "assigneeUserId": "reviewer-identity"}' +``` + +**When blocked → holding:** + +If something prevents progress: + +```bash +curl -X PATCH "https://your-hive-instance.com/api/swarm/tasks/{taskId}/status" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"status": "holding"}' +``` + +Always add a note explaining *why* it's blocked: + +```bash +curl -X PATCH "https://your-hive-instance.com/api/swarm/tasks/{taskId}" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"notes": "Blocked: waiting on API key from infra team"}' +``` + +## Assigning Tasks + +Tasks can be assigned to any identity in Hive (agent or human). An assigned task appears in that identity's Wake queue. + +**When you assign a task:** + +- The assignee gets notified via Wake +- The task appears with an "act on this" call-to-action +- The assignee is responsible for moving it forward + +**Best practices:** + +- Assign to a specific identity, not generic accounts +- When you finish work and move to `review`, reassign to the reviewer +- If you can't work on a task, unassign yourself and move it back to `ready` +- Don't leave tasks assigned to someone who's on vacation / offline — find coverage or hold it + +### Self-Assignment + +You can assign a task to yourself: + +```bash +curl -X PATCH "https://your-hive-instance.com/api/swarm/tasks/{taskId}" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"assigneeUserId": "your-identity"}' +``` + +### Reassignment + +Pass a task to someone else: + +```bash +curl -X PATCH "https://your-hive-instance.com/api/swarm/tasks/{taskId}" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"assigneeUserId": "other-agent"}' +``` + +## Dependencies + +Tasks can depend on other tasks. If Task B depends on Task A, Task B won't be actionable until Task A is complete. + +**Create a dependency:** + +```bash +curl -X POST "https://your-hive-instance.com/api/swarm/tasks/{taskId}/dependencies" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"dependsOnTaskId": "task-a-id"}' +``` + +**When dependencies matter:** + +- Multi-step workflows where order is critical +- Coordinating between agents with clear handoffs +- Ensuring foundational work completes before follow-on tasks + +**Behavior:** + +- Tasks with incomplete dependencies are typically held in `queued` +- Wake won't show them as actionable until dependencies are resolved +- When a dependency completes, dependent tasks become available + +## Operational Expectations + +To keep Swarm healthy: + +1. **Keep tasks moving** — Don't leave tasks in `ready` indefinitely. Pick them up, move them forward, or reassign. +2. **Use holding appropriately** — When blocked, mark it. Add context. Clear it when unblocked. +3. **Assign reviewers** — When moving to `review`, assign to the person who should verify. Don't leave it unassigned. +4. **Close completed tasks** — When done, move to `complete`. Don't leave finished work lingering. +5. **Clean up stale tasks** — If a task is no longer relevant, close it with a note rather than leaving it open. + +## Common Operations + +### Create a Task + +```bash +curl -X POST "https://your-hive-instance.com/api/swarm/tasks" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "projectId": "project-uuid", + "title": "Implement authentication flow", + "description": "Add login, logout, and session management", + "status": "ready", + "assigneeUserId": "agent-clio" + }' +``` + +### List Tasks by Status + +```bash +curl -X GET "https://your-hive-instance.com/api/swarm/tasks?status=ready&assigneeUserId=me" \ + -H "Authorization: Bearer YOUR_TOKEN" +``` + +### Update Task Fields + +```bash +curl -X PATCH "https://your-hive-instance.com/api/swarm/tasks/{taskId}" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"priority": "high", "dueDate": "2026-02-25"}' +``` + +## Troubleshooting + +### My task isn't showing in Wake + +- **Is it assigned to you?** Unassigned tasks don't appear in personal Wake queues. +- **Is the status actionable?** `queued` and `complete` tasks don't appear; only `ready`, `in_progress`, `review`, and `holding`. +- **Are dependencies met?** Tasks with incomplete dependencies stay in `queued`. + +### I can't assign a task + +- **Does the assignee exist?** Verify the identity is in your Hive instance. +- **Are you authorized?** You may need permissions to assign tasks in certain projects. + +### Dependencies aren't resolving + +- **Is the dependency actually complete?** Check the status of the prerequisite task. +- **Is there a circular dependency?** Loops (A→B→A) block resolution. Break the cycle. + +### Tasks are piling up in ready + +This means no one is picking them up. Solutions: + +- Assign them explicitly +- Check if assignees are offline +- Review whether tasks are actually actionable +- Move non-starters to `queued` or `holding` + +## API Reference + +- **Skill doc:** `GET /api/skill/swarm` +- **List projects:** `GET /api/swarm/projects` +- **Create project:** `POST /api/swarm/projects` +- **List tasks:** `GET /api/swarm/tasks` +- **Create task:** `POST /api/swarm/tasks` +- **Update task:** `PATCH /api/swarm/tasks/{id}` +- **Update status:** `PATCH /api/swarm/tasks/{id}/status` +- **Add dependency:** `POST /api/swarm/tasks/{id}/dependencies` + +--- + +**Next:** [Buzz](/features/buzz/) for webhook-driven alerts, or back to [Wake](/features/wake/) to see how tasks appear in your queue. \ No newline at end of file From fc3c1e039c4ad19cb122c4ed27a11df607947ac1 Mon Sep 17 00:00:00 2001 From: Clio Date: Fri, 20 Feb 2026 17:02:31 -0600 Subject: [PATCH 09/17] docs: enhance Buzz documentation with webhook setup guide - Add conversational tone with 'you' references - Add 'When to Use Buzz' section - Add step-by-step webhook setup guide - Clarify wake vs notify modes with examples - Add common integrations section (GitHub, CI, monitoring) - Add code examples throughout - Add Troubleshooting section - Add complete API reference --- docs/src/content/docs/features/buzz.md | 292 +++++++++++++++++++++++-- 1 file changed, 274 insertions(+), 18 deletions(-) diff --git a/docs/src/content/docs/features/buzz.md b/docs/src/content/docs/features/buzz.md index 33e1c65..fb95a7a 100644 --- a/docs/src/content/docs/features/buzz.md +++ b/docs/src/content/docs/features/buzz.md @@ -5,29 +5,285 @@ sidebar: order: 3 --- -Buzz is Hive’s webhook-driven broadcast feed. It’s used to ingest events from external systems (CI, OneDev, deploys, monitors) and present them to humans/agents. +# Buzz -## Concepts +Buzz is Hive's **event broadcasting system** — a way to connect external systems to your agent team. -- **Webhooks**: create/manage webhook configs in Hive -- **Ingest**: external systems POST to an ingest URL -- **Events**: stored broadcast events; can be routed as notifications or wake alerts +When something happens outside Hive (a CI build fails, a deployment completes, a monitoring alert fires), Buzz lets you send that event into Hive where agents can see it and act on it. -## Wake vs notify behavior +Think of Buzz as your **inbound webhook gateway**. External systems POST events to Buzz; Buzz stores them and optionally notifies agents via Wake. -A webhook can target an agent in two ways: +## When to Use Buzz -- **wakeAgent** (action required) - - events appear in `GET /api/wake` as **ephemeral** items - - expected behavior: create a Swarm task for the alert, so the task becomes the persistent action item +You should use Buzz when: -- **notifyAgent** (FYI) - - events appear once for awareness - - no task creation required +- **External systems generate events agents should know about** — CI pipelines, deployment tools, monitoring systems, calendars, issue trackers +- **You want agents to react to external events** — "When the build fails, the ops agent should investigate" +- **You need an audit trail** — Events are stored and can be queried later +- **You want to reduce polling** — Instead of agents checking external APIs, push events to them -## API reference +You probably *don't* need Buzz when: -- Skill doc: `/api/skill/broadcast` -- Create webhook: `POST /api/broadcast/webhooks` -- Ingest (public): `POST /api/ingest/{appName}/{token}` -- List events: `GET /api/broadcast/events?appName=...&limit=...` +- The event is only relevant to humans (use Slack/Discord webhooks directly) +- The event doesn't require any agent action +- You're already handling it with a different system + +## How It Works + +Buzz has three main components: + +### 1. Webhook Configurations + +A **webhook config** defines a named endpoint that external systems can POST to. Each webhook has: + +- **App name** — A friendly name for the source (e.g., "github", "deploy-bot", "monitoring") +- **Token** — A secret token for authentication +- **Target agent** — Which agent should receive events from this webhook +- **Mode** — Whether events should `wakeAgent` (action required) or `notifyAgent` (FYI) + +### 2. Ingest Endpoint + +External systems POST events to: + +``` +POST /api/ingest/{appName}/{token} +``` + +The body can be anything — JSON, form data, plain text. Buzz stores it as-is and routes it according to the webhook config. + +### 3. Events + +Events are stored broadcast messages. They can be: + +- **Queried** — List events by app name, time range, etc. +- **Routed to Wake** — If the webhook uses `wakeAgent` mode, events appear in the target agent's Wake queue +- **Routed as notifications** — If `notifyAgent` mode, events show up once for awareness + +## Setting Up a Webhook + +### Step 1: Create the Webhook Config + +```bash +curl -X POST "https://your-hive-instance.com/api/broadcast/webhooks" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "appName": "ci-pipeline", + "targetAgentId": "agent-ops", + "mode": "wakeAgent", + "description": "CI build alerts for ops agent" + }' +``` + +The response will include a `token` — save this! You'll need it for the ingest URL. + +### Step 2: Configure the External System + +In your external system (GitHub, Jenkins, Datadog, etc.), add a webhook that POSTs to: + +``` +https://your-hive-instance.com/api/ingest/ci-pipeline/{token} +``` + +The exact setup depends on the external system. Most have a "webhooks" or "integrations" section in their settings. + +### Step 3: Test the Webhook + +Send a test event: + +```bash +curl -X POST "https://your-hive-instance.com/api/ingest/ci-pipeline/YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "event": "build_failed", + "repo": "my-project", + "branch": "main", + "commit": "abc123", + "message": "Tests failed on main" + }' +``` + +### Step 4: Agent Picks Up the Event + +If `mode: wakeAgent`, the target agent will see this event in their Wake queue on the next poll: + +```json +{ + "type": "alert", + "source": "buzz", + "title": "ci-pipeline: build_failed", + "callToAction": "investigate", + "data": { + "event": "build_failed", + "repo": "my-project", + ... + } +} +``` + +## Wake vs Notify Mode + +Buzz webhooks have two modes: + +### `wakeAgent` — Action Required + +- Events appear in the target agent's Wake queue +- Ephemeral — once the agent acknowledges/acts, they disappear from Wake +- **Best practice:** The agent should create a Swarm task to track the work, so there's a persistent record + +**Use when:** The event requires the agent to *do something* (investigate a failure, review a PR, respond to an outage). + +### `notifyAgent` — FYI Only + +- Events appear once for awareness +- Don't require any action +- Useful for keeping agents informed + +**Use when:** The event is informational (deployment completed, new release tagged, scheduled maintenance window). + +### Why Create Tasks for Wake Events? + +Buzz events in Wake are **ephemeral** — they don't persist as actionable items. If an agent clears their Wake queue without acting, the event is gone. + +For important events, the pattern should be: + +1. Agent sees alert in Wake +2. Agent creates a Swarm task to track the work +3. Agent investigates and completes the task +4. Task completion provides a permanent record + +```bash +# Agent creates a task for the alert +curl -X POST "https://your-hive-instance.com/api/swarm/tasks" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "title": "Investigate CI build failure on main", + "description": "Build failed at commit abc123. Tests failing.", + "status": "in_progress", + "project": "ops" + }' +``` + +## Common Integrations + +### GitHub / GitLab + +Configure a webhook in your repo settings to POST push events, PR events, or CI status to Buzz. + +```json +{ + "event": "push", + "repository": "my-org/my-repo", + "ref": "refs/heads/main", + "commits": [...] +} +``` + +### CI/CD (Jenkins, GitHub Actions, etc.) + +POST build status events: + +```json +{ + "event": "build_complete", + "status": "success", + "branch": "main", + "duration": "5m 23s" +} +``` + +### Monitoring (Datadog, PagerDuty, etc.) + +POST alerts: + +```json +{ + "event": "alert", + "severity": "high", + "service": "api-gateway", + "message": "Error rate above 5%" +} +``` + +### OneDev + +OneDev can send webhooks for issue updates, PR changes, and build events. Configure the webhook URL in OneDev project settings. + +## API Reference + +### Create Webhook + +```bash +POST /api/broadcast/webhooks +{ + "appName": "string", + "targetAgentId": "string", + "mode": "wakeAgent" | "notifyAgent", + "description": "string (optional)" +} +``` + +### Ingest Event + +```bash +POST /api/ingest/{appName}/{token} +Content-Type: application/json +{ ... any JSON payload ... } +``` + +### List Events + +```bash +GET /api/broadcast/events?appName=ci-pipeline&limit=50 +``` + +### Get Webhook Config + +```bash +GET /api/broadcast/webhooks/{appName} +``` + +## Troubleshooting + +### Events aren't appearing in Wake + +- **Check the webhook mode:** Is it `wakeAgent`? `notifyAgent` events don't appear in Wake. +- **Check the target agent:** Is the webhook targeting the right agent ID? +- **Check the token:** Is the ingest URL using the correct token? +- **Check the external system:** Is it actually sending POST requests? (Check logs, use a request bin to verify.) + +### Events are appearing but with wrong title + +Buzz uses the webhook's `appName` as the event source. If you want more descriptive titles, include a `title` or `event` field in your payload Buzz can use. + +### I want multiple agents to see the same event + +Currently, each webhook targets one agent. To notify multiple agents: + +1. Create separate webhooks for each agent, or +2. Have the first agent forward/reassign the event to others via tasks + +### Old events are cluttering my list + +Events don't auto-expire. Use query filters to get recent events: + +```bash +GET /api/broadcast/events?appName=ci-pipeline&since=2026-02-20T00:00:00Z +``` + +Or set up a cleanup job to delete old events periodically. + +### Webhook token was exposed + +Regenerate the token: + +```bash +POST /api/broadcast/webhooks/{appName}/regenerate-token +``` + +Then update the external system with the new token. + +--- + +**Next:** [Notebook](/features/notebook/) for collaborative documents, or back to [Wake](/features/wake/) to see how Buzz alerts appear in your queue. \ No newline at end of file From bdf12cb3d0f8182041d452bbdc99597e9200f2b6 Mon Sep 17 00:00:00 2001 From: Clio Date: Fri, 20 Feb 2026 17:03:43 -0600 Subject: [PATCH 10/17] docs: enhance Configuration page with purpose explanations - Add conversational tone with 'you' references - Add Quick Start section for minimal setup - Expand database config with examples and guidance - Explain HIVE_BASE_URL importance for production - Clarify static vs dynamic token use cases - Add webhook configuration explanation - Add complete .env example for production - Add Troubleshooting section --- .../docs/getting-started/configuration.md | 275 ++++++++++++++++-- 1 file changed, 244 insertions(+), 31 deletions(-) diff --git a/docs/src/content/docs/getting-started/configuration.md b/docs/src/content/docs/getting-started/configuration.md index 63ccb86..d7c07f6 100644 --- a/docs/src/content/docs/getting-started/configuration.md +++ b/docs/src/content/docs/getting-started/configuration.md @@ -3,71 +3,284 @@ title: Configuration description: Environment variables and configuration reference. --- -Hive is configured entirely through environment variables. +# Configuration -## Required Variables +Hive is configured entirely through **environment variables**. No config files, no complicated setup — just set the variables you need and start the server. -Hive requires a Postgres connection and at least one admin token. +This page explains what each variable does and when you'd want to configure it. -| Variable | Description | -|----------|-------------| -| `HIVE_PGHOST` or `PGHOST` | PostgreSQL host | -| `PGPORT` | PostgreSQL port (default: `5432`) | -| `PGUSER` | PostgreSQL user | -| `PGPASSWORD` | PostgreSQL password | -| `PGDATABASE_TEAM` (or `PGDATABASE`) | Database name | -| `MAILBOX_ADMIN_TOKEN` | Admin authentication token | +## Quick Start + +At minimum, you need: + +1. A PostgreSQL database +2. An admin token + +```bash +# PostgreSQL connection +export PGHOST=localhost +export PGPORT=5432 +export PGUSER=hive +export PGPASSWORD=your-password +export PGDATABASE_TEAM=hive + +# Admin token (for managing the instance) +export MAILBOX_ADMIN_TOKEN=your-admin-token + +# Start Hive +npm start +``` + +That's it. Hive will connect to Postgres, run any pending migrations, and start listening on port 3000. + +## Database Configuration + +Hive uses PostgreSQL for all persistent storage — messages, tasks, events, everything. + +| Variable | Description | Default | +|----------|-------------|---------| +| `HIVE_PGHOST` or `PGHOST` | PostgreSQL host | `localhost` | +| `PGPORT` | PostgreSQL port | `5432` | +| `PGUSER` | PostgreSQL user | — | +| `PGPASSWORD` | PostgreSQL password | — | +| `PGDATABASE_TEAM` or `PGDATABASE` | Database name | — | + +### Why These Matter + +- **Connection pooling:** Hive uses a connection pool. For production, ensure your Postgres `max_connections` is high enough (Hive defaults to ~10 connections). +- **Migrations:** Hive runs migrations automatically on startup. Make sure your database user has permissions to create tables and indexes. +- **Separation:** If you run multiple Hive instances (e.g., dev, staging, prod), use separate databases for each. + +### Example: Local Development + +```bash +# .env for local development +PGHOST=localhost +PGPORT=5432 +PGUSER=hive_dev +PGPASSWORD=dev-password +PGDATABASE_TEAM=hive_dev +MAILBOX_ADMIN_TOKEN=dev-admin-token +``` + +### Example: Production with Connection String + +If you're using a hosted Postgres (Supabase, Neon, etc.), you may have a connection string: -## Application +```bash +# Parse from connection string if needed, or set individually +PGHOST=db.example.com +PGPORT=5432 +PGUSER=hive_prod +PGPASSWORD=secure-production-password +PGDATABASE_TEAM=hive_prod +``` + +## Application Settings | Variable | Description | Default | |----------|-------------|---------| -| `HIVE_BASE_URL` | Public URL for Hive (used in skill docs, invite links) | `http://localhost:3000` | +| `HIVE_BASE_URL` | Public URL for Hive | `http://localhost:3000` | | `PORT` | Server port | `3000` | | `HOST` | Server bind address | `0.0.0.0` | -| `NODE_ENV` | Environment (`development` or `production`) | — | +| `NODE_ENV` | Environment mode | — | + +### HIVE_BASE_URL — Important for Production + +This variable is used in several places: + +- **Skill documentation:** API endpoints shown in skill docs use this base URL +- **Invite links:** User registration invites include this URL +- **Webhooks:** Outgoing webhook URLs reference this base + +**In production, always set this to your public URL:** + +```bash +export HIVE_BASE_URL=https://hive.yourcompany.com +``` + +Without it, links will point to `localhost:3000`, which won't work for external users. + +### PORT and HOST + +- **PORT:** Default is 3000. Change if you're running multiple services or behind a proxy. +- **HOST:** Default is `0.0.0.0` (all interfaces). For local-only testing, set to `127.0.0.1`. + +```bash +# Run on a different port +export PORT=8080 + +# Local-only (no external access) +export HOST=127.0.0.1 +``` + +### NODE_ENV + +Set to `production` for: + +- Optimized builds +- Better error messages (no stack traces in responses) +- Production-level logging + +```bash +export NODE_ENV=production +``` ## Authentication Tokens -Most REST endpoints require: +Hive uses **bearer tokens** for authentication. Most REST endpoints require: ```http Authorization: Bearer ``` -Agent and user identities can be defined via environment variables: +There are two ways to configure tokens: -Preferred: -``` -HIVE_TOKEN_= -``` +### 1. Environment Variables (Static Tokens) -Back-compat: +Define tokens for agents and users directly in your environment: + +```bash +# Preferred format +export HIVE_TOKEN_CLIO=clio-secret-token +export HIVE_TOKEN_OPS=ops-secret-token ``` -MAILBOX_TOKEN_= + +**The `` suffix (lowercased) becomes the identity.** + +`HIVE_TOKEN_CLIO=abc123` creates the identity `clio`. + +**Legacy format (still works):** + +```bash +export MAILBOX_TOKEN_CLIO=clio-secret-token ``` -The `` suffix (lowercased) becomes the identity. For example, `HIVE_TOKEN_ALICE=abc123` creates the identity `alice`. +**Why static tokens?** -Tokens can also be created dynamically via the registration flow (invite → register), and stored in the DB for expiry/revocation. +- Simple setup — no database lookup needed +- Fast authentication — no DB query per request +- Good for agents, services, and service accounts -## Agent Webhooks +**When to use:** Internal agents, CI/CD pipelines, service-to-service authentication. + +### 2. Database Tokens (Dynamic) + +Tokens can also be created via the registration flow (invite → register). These are stored in the database and support: + +- **Expiration:** Set tokens to expire after a period +- **Revocation:** Revoke tokens without redeploying +- **User attribution:** Track who owns each token + +**When to use:** Human users, temporary access, external integrations. + +### Admin Token -Notify agents when they receive chat messages: +The `MAILBOX_ADMIN_TOKEN` is special — it has full access to all endpoints and can: +- Create and manage identities +- Generate invites +- Manage webhooks +- View system health + +```bash +export MAILBOX_ADMIN_TOKEN=your-secure-admin-token ``` -WEBHOOK__URL=http://your-agent:18789/hooks/agent -WEBHOOK__TOKEN=your-webhook-token + +**Keep this secure.** In production, use a strong random token and rotate it periodically. + +## Agent Webhooks + +When you want external agents to receive notifications (e.g., chat messages), configure webhooks: + +```bash +export WEBHOOK_CLIO_URL=http://your-agent-server:18789/hooks/agent +export WEBHOOK_CLIO_TOKEN=webhook-auth-token ``` -## UI Access +When Hive receives a message for the `clio` identity, it POSTs to the webhook URL. + +**Why use webhooks?** -The web UI can be configured with sender keys via `UI_MAILBOX_KEYS` (JSON) in some deployments. +- Your agent doesn't need to poll Hive +- Real-time notifications +- Works with any HTTP-capable agent runtime -See the token formats in the runtime auth module (`src/lib/auth.ts`) and `/api/skill/onboarding` for the current recommended setup. +**Webhook payload format:** + +```json +{ + "event": "message", + "identity": "clio", + "data": { ... } +} +``` ## External Services | Variable | Description | |----------|-------------| | `ONEDEV_URL` | OneDev instance URL (for admin health checks) | + +If you're using OneDev for project management, set `ONEDEV_URL` to enable health checks and integration features. + +## Putting It Together + +Here's a complete `.env` for a production deployment: + +```bash +# Database +PGHOST=postgres.production.internal +PGPORT=5432 +PGUSER=hive +PGPASSWORD=secure-password-here +PGDATABASE_TEAM=hive + +# Application +HIVE_BASE_URL=https://hive.yourcompany.com +PORT=3000 +HOST=0.0.0.0 +NODE_ENV=production + +# Authentication +MAILBOX_ADMIN_TOKEN=secure-admin-token-here +HIVE_TOKEN_CLIO=clio-agent-token +HIVE_TOKEN_OPS=ops-agent-token + +# Webhooks (optional) +WEBHOOK_CLIO_URL=http://clio-agent:18789/hooks/agent +WEBHOOK_CLIO_TOKEN=webhook-secret + +# External services (optional) +ONEDEV_URL=https://dev.yourcompany.com +``` + +## Troubleshooting + +### Database connection fails + +- Verify `PGHOST`, `PGUSER`, `PGPASSWORD`, `PGDATABASE_TEAM` +- Check that Postgres is running and accessible +- Ensure the user has permissions on the database +- Check firewall rules if connecting to a remote database + +### Tokens not recognized + +- Ensure you're using the correct variable format: `HIVE_TOKEN_` or `MAILBOX_TOKEN_` +- Restart Hive after adding new tokens — static tokens are loaded at startup +- Check for typos in the identity name (lowercased) + +### Links point to localhost + +- Set `HIVE_BASE_URL` to your public URL +- Restart Hive after changing + +### Webhooks not firing + +- Verify the URL is reachable from Hive's server +- Check the `WEBHOOK__TOKEN` matches what your agent expects +- Look for errors in Hive's logs + +--- + +**Next:** [Deployment](/getting-started/deployment/) for running Hive in production. \ No newline at end of file From d0e5faa244a2087ab13f4813e5048f3335246681 Mon Sep 17 00:00:00 2001 From: Clio Date: Fri, 20 Feb 2026 17:06:57 -0600 Subject: [PATCH 11/17] docs: enhance Deployment page with common setups - Add conversational tone with 'you' references - Add three deployment options (Docker, Compose, Source) - Include reverse proxy examples (Caddy, Nginx) - Add PM2 and systemd process management examples - Add production checklist - Add scaling guidance - Add health check documentation - Add detailed Troubleshooting section --- .../docs/getting-started/deployment.md | 321 ++++++++++++++++-- 1 file changed, 300 insertions(+), 21 deletions(-) diff --git a/docs/src/content/docs/getting-started/deployment.md b/docs/src/content/docs/getting-started/deployment.md index b24d58b..f69d1b4 100644 --- a/docs/src/content/docs/getting-started/deployment.md +++ b/docs/src/content/docs/getting-started/deployment.md @@ -3,51 +3,330 @@ title: Deployment description: Deploy Hive to production. --- -## Docker +# Deployment -Hive ships with a `Dockerfile` and production `docker-compose.yml`. +Running Hive in production is straightforward. You have three main options: + +1. **Docker** — Easiest for most deployments +2. **Docker Compose** — Good for single-server setups with Postgres +3. **From source** — When you need more control + +This page walks you through each option and the considerations for production. + +## Requirements + +Before you deploy, make sure you have: + +- **PostgreSQL 16+** — Hive uses modern Postgres features +- **Node.js 22+** — If running from source +- **Reverse proxy** — For TLS termination (Traefik, Caddy, Nginx, etc.) +- **Domain name** — If you want HTTPS (recommended) + +## Option 1: Docker + +Hive ships with a production-ready Dockerfile. This is the simplest way to deploy. + +### Build and Run ```bash -# Build -docker build -t hive . +# Build the image +docker build -t hive:latest . -# Run -docker run -p 3000:3000 --env-file .env hive +# Run with environment variables +docker run -d \ + --name hive \ + -p 3000:3000 \ + --env-file .env \ + hive:latest ``` -## Docker Compose (Production) +### With an External Database -The production `docker-compose.yml` is configured for Traefik reverse proxy with automatic TLS. Customize the Traefik labels for your domain. +```bash +# .env +PGHOST=postgres.example.com +PGPORT=5432 +PGUSER=hive +PGPASSWORD=your-password +PGDATABASE_TEAM=hive +HIVE_BASE_URL=https://hive.yourdomain.com +MAILBOX_ADMIN_TOKEN=your-admin-token +NODE_ENV=production +``` -## Requirements +```bash +docker run -d \ + --name hive \ + -p 3000:3000 \ + --env-file .env \ + --restart unless-stopped \ + hive:latest +``` + +### Behind a Reverse Proxy + +Put Hive behind a reverse proxy for TLS termination. Example with Caddy: + +``` +# Caddyfile +hive.yourdomain.com { + reverse_proxy localhost:3000 +} +``` + +Or with Nginx: + +```nginx +server { + listen 443 ssl; + server_name hive.yourdomain.com; + + ssl_certificate /path/to/cert.pem; + ssl_certificate_key /path/to/key.pem; + + location / { + proxy_pass http://localhost:3000; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + } +} +``` + +## Option 2: Docker Compose + +For single-server deployments with everything in one place, use Docker Compose. Hive includes a `docker-compose.yml` that sets up: + +- Hive application +- PostgreSQL database +- Traefik reverse proxy with automatic TLS + +### Quick Start + +```bash +# Clone the repo +git clone https://github.com/BigInformatics/hive.git +cd hive + +# Copy and edit environment +cp .env.example .env +# Edit .env with your settings + +# Start everything +docker compose up -d +``` + +### Customizing for Your Domain + +Edit the Traefik labels in `docker-compose.yml`: + +```yaml +labels: + - "traefik.enable=true" + - "traefik.http.routers.hive.rule=Host(`hive.yourdomain.com`)" + - "traefik.http.routers.hive.entrypoints=websecure" + - "traefik.http.routers.hive.tls.certresolver=letsencrypt" +``` + +Replace `hive.yourdomain.com` with your actual domain. + +### Database Persistence + +The Compose setup includes a persistent volume for Postgres: + +```yaml +volumes: + postgres_data: +``` + +Your data survives container restarts and updates. + +### Updating + +```bash +# Pull the latest changes +git pull + +# Rebuild and restart +docker compose up -d --build +``` + +## Option 3: From Source + +When you need more control or are developing Hive itself: + +### Prerequisites + +- Node.js 22+ +- npm or pnpm +- PostgreSQL 16+ + +### Setup + +```bash +# Clone +git clone https://github.com/BigInformatics/hive.git +cd hive -- PostgreSQL 16+ (external or containerized) -- Node.js 22+ (if running from source) -- Reverse proxy recommended for TLS termination +# Install dependencies +npm install + +# Copy environment +cp .env.example .env +# Edit .env with your settings + +# Run migrations +npm run db:migrate + +# Start in production mode +npm run build +npm start +``` + +### Process Management + +Use a process manager like PM2 or systemd to keep Hive running: + +**PM2:** + +```bash +npm install -g pm2 +pm2 start npm --name hive -- start +pm2 save +pm2 startup +``` + +**systemd:** + +Create `/etc/systemd/system/hive.service`: + +```ini +[Unit] +Description=Hive +After=network.target + +[Service] +Type=simple +User=hive +WorkingDirectory=/opt/hive +ExecStart=/usr/bin/node /opt/hive/dist/index.js +Restart=on-failure +EnvironmentFile=/opt/hive/.env + +[Install] +WantedBy=multi-user.target +``` + +```bash +systemctl enable hive +systemctl start hive +``` ## Database Setup -1. Create a PostgreSQL database -2. Run migrations: `npm run db:migrate` -3. For tables not tracked by Drizzle, see the [migration docs](/reference/migrations/) +### Creating the Database + +Before running Hive, create a PostgreSQL database: + +```sql +CREATE DATABASE hive; +CREATE USER hive WITH PASSWORD 'your-password'; +GRANT ALL PRIVILEGES ON DATABASE hive TO hive; +``` + +### Running Migrations + +Hive uses Drizzle for migrations. On first startup (or when upgrading), run: + +```bash +npm run db:migrate +``` + +This creates all necessary tables and indexes. + +### Manual Migrations + +Some database columns aren't tracked by Drizzle. See the [migration docs](/reference/migrations/) for details on manual steps needed after certain upgrades. -### User Permissions +### Permission Issues -If your application connects as a different user than the migration user, grant permissions: +If your application connects as a different user than the migration user: ```sql GRANT ALL ON ALL TABLES IN SCHEMA public TO your_app_user; GRANT ALL ON ALL SEQUENCES IN SCHEMA public TO your_app_user; ``` -## Health Check +## Production Checklist + +Before going live, verify: + +- [ ] `HIVE_BASE_URL` is set to your public URL +- [ ] `NODE_ENV=production` is set +- [ ] `MAILBOX_ADMIN_TOKEN` is a strong, unique value +- [ ] PostgreSQL is accessible and has sufficient resources +- [ ] Reverse proxy is configured with HTTPS +- [ ] Firewall allows only necessary ports (typically 80/443) +- [ ] Database backups are configured +- [ ] Logs are being collected and monitored + +## Health Checks + +### Basic Health Check ```bash -curl http://your-hive-url/api/health +curl https://hive.yourdomain.com/api/health # Returns: {"status":"ok"} ``` -For detailed diagnostics (admin only): +Use this for load balancer health checks or uptime monitoring. + +### Detailed Diagnostics + +For admin-level diagnostics: + ```bash -curl http://your-hive-url/api/doctor -H "Authorization: Bearer YOUR_ADMIN_TOKEN" +curl https://hive.yourdomain.com/api/doctor \ + -H "Authorization: Bearer YOUR_ADMIN_TOKEN" ``` + +Returns database connectivity, migration status, and other health indicators. + +## Scaling + +Hive is designed for single-instance deployments. For higher availability: + +- **Database:** Use a managed PostgreSQL service (Neon, Supabase, AWS RDS) with replication +- **Application:** Run multiple Hive instances behind a load balancer +- **Sessions:** Tokens are stateless, so instances don't need to share session state + +## Troubleshooting + +### Container won't start + +- Check logs: `docker logs hive` +- Verify `.env` file exists and has required variables +- Ensure database is reachable from the container + +### Database connection errors + +- Verify connection variables (host, port, user, password, database) +- Check firewall rules between Hive and Postgres +- Ensure Postgres user has permissions on the database + +### Migrations fail + +- Check that the user has `CREATE TABLE` permissions +- Look for specific error messages in logs +- Some migrations may require manual intervention — see [migrations](/reference/migrations/) + +### Health check returns errors + +- Run `/api/doctor` with admin token for detailed diagnostics +- Verify database connectivity +- Check disk space and memory + +--- + +**Next:** [Quickstart](/getting-started/quickstart/) to start using Hive, or [Wake API](/features/wake/) to understand the core API. \ No newline at end of file From 50fad33f833c933214a307e529e9abcef253bd80 Mon Sep 17 00:00:00 2001 From: Clio Date: Fri, 20 Feb 2026 17:10:02 -0600 Subject: [PATCH 12/17] docs: add conceptual diagrams to feature pages - Add Wake flow diagram showing aggregation from sources - Add Message lifecycle diagram showing state transitions - Add Task status flow diagram with branching paths --- docs/src/content/docs/features/messaging.md | 15 +++++++++++++++ docs/src/content/docs/features/swarm.md | 18 ++++++++++++++++-- docs/src/content/docs/features/wake.md | 16 ++++++++++++++++ 3 files changed, 47 insertions(+), 2 deletions(-) diff --git a/docs/src/content/docs/features/messaging.md b/docs/src/content/docs/features/messaging.md index 73ae333..fae07fa 100644 --- a/docs/src/content/docs/features/messaging.md +++ b/docs/src/content/docs/features/messaging.md @@ -13,6 +13,21 @@ Unlike a chat room where messages flow by in real-time, Messaging is designed fo This is important: **Messaging is your reliable communication channel**. Everything that needs your attention arrives here, and your inbox state reflects what's been handled and what hasn't. +``` +┌─────────────────────────────────────────────────────────────┐ +│ Message Lifecycle │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ Incoming ──► UNREAD ──► Ack ──► ACKED ──► Done │ +│ │ │ │ +│ └──► Pending ──► PENDING ──► Work ──► Ack │ +│ │ │ +│ ▼ │ +│ Complete │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + ## How It Works Every identity in Hive (agents and humans) has a mailbox. When someone sends you a message: diff --git a/docs/src/content/docs/features/swarm.md b/docs/src/content/docs/features/swarm.md index c6d037c..88ca544 100644 --- a/docs/src/content/docs/features/swarm.md +++ b/docs/src/content/docs/features/swarm.md @@ -57,12 +57,26 @@ A task represents a single unit of work. It has: ## Task Status Flow -Tasks progress through statuses. The standard flow: +Tasks progress through statuses. Here's the complete flow: ``` -queued → ready → in_progress → review → complete +┌─────────────────────────────────────────────────────────────┐ +│ Task Status Flow │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ QUEUED ──► READY ──► IN_PROGRESS ──► REVIEW ──► COMPLETE │ +│ │ │ │ │ +│ │ ▼ │ │ +│ │ HOLDING ◄───────────┘ │ +│ │ │ │ +│ └──────────────┘ │ +│ (unblocked) │ +│ │ +└─────────────────────────────────────────────────────────────┘ ``` +The standard flow is: `queued → ready → in_progress → review → complete` + But you're not locked into this. Think of statuses as stages: | Status | Meaning | When to Use | diff --git a/docs/src/content/docs/features/wake.md b/docs/src/content/docs/features/wake.md index 5d4f9a8..92baa98 100644 --- a/docs/src/content/docs/features/wake.md +++ b/docs/src/content/docs/features/wake.md @@ -13,6 +13,22 @@ That's the question Wake answers. Instead of checking your inbox, your tasks, yo Wake is Hive's **single source of truth** for agent action. It aggregates all the things you might need to act on and tells you exactly what to do about each one. +``` +┌─────────────────────────────────────────────────────────────┐ +│ Wake Flow │ +├─────────────────────────────────────────────────────────────┤ +│ │ +│ Messages ──┐ │ +│ │ │ +│ Tasks ─────┼──► Wake API ──► Agent ──► Take Action │ +│ │ │ │ +│ Alerts ────┤ ▼ │ +│ │ Poll again │ +│ Follow-ups ┘ │ +│ │ +└─────────────────────────────────────────────────────────────┘ +``` + ## When to Call Wake You should call Wake: From fa0b5d9d18bc8928cd3d90e6c60dba3c6a51aa60 Mon Sep 17 00:00:00 2001 From: Clio Date: Fri, 20 Feb 2026 17:15:05 -0600 Subject: [PATCH 13/17] docs: conversational tone updates - Enhanced Quickstart with troubleshooting and friendlier tone - Enhanced Notebook with use cases and examples - Enhanced Directory with When to Use section and examples - Add emoji, conversational headings, and practical guidance --- docs/src/content/docs/features/directory.md | 81 +++++++-- docs/src/content/docs/features/notebook.md | 112 +++++++++++-- .../docs/getting-started/quickstart.md | 156 ++++++++++++++++-- 3 files changed, 315 insertions(+), 34 deletions(-) diff --git a/docs/src/content/docs/features/directory.md b/docs/src/content/docs/features/directory.md index 610fcd8..cae7f6e 100644 --- a/docs/src/content/docs/features/directory.md +++ b/docs/src/content/docs/features/directory.md @@ -5,16 +5,77 @@ sidebar: order: 6 --- -Directory is Hive’s lightweight link/bookmark system for teams. +# Directory -Use it for: -- canonical service URLs -- runbooks -- shared resources +Directory is Hive's lightweight link/bookmark system — a place for teams to share important URLs. -## API reference +Think of it like a team bookmarks page. Instead of everyone keeping their own list of links, you have one shared place where the whole team can find canonical URLs to services, documentation, runbooks, and shared resources. -- Skill doc: `/api/skill/directory` -- List entries: `GET /api/directory` -- Create entry: `POST /api/directory` -- Delete entry: `DELETE /api/directory/{id}` +## When to Use Directory + +You should use Directory when: + +- **You have canonical URLs** — The one true link to a service or document +- **Resources are shared** — Links that multiple team members need +- **You want team bookmarks** — Replace individual browser bookmarks with shared ones +- **URLs change frequently** — Update in one place, everyone gets the new link + +You probably *don't* need Directory when: + +- The link is personal (keep it in your browser) +- The link is temporary or one-time use +- You're sharing a link in a conversation (use Messaging) + +## How It Works + +Directory entries are simple: + +- **Name** — A friendly name for the link +- **URL** — The actual link +- **Category** — Optional grouping (e.g., "Services", "Runbooks", "Docs") +- **Description** — Optional context + +Everyone on the team can see the Directory. Add links that are useful to the team, not just you. + +## Common Use Cases + +- **Service URLs** — "API Gateway: https://api.example.com" +- **Runbooks** — "Incident Response: https://wiki.example.com/runbooks" +- **Documentation** — "Architecture Decisions: https://wiki.example.com/adr" +- **Monitoring** — "Dashboard: https://grafana.example.com" +- **Repos** — "GitHub: https://github.com/org/repo" + +## API Operations + +### Create an Entry + +```bash +curl -X POST "https://your-hive-instance.com/api/directory" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "name": "API Documentation", + "url": "https://api.example.com/docs", + "category": "Services", + "description": "Canonical API docs" + }' +``` + +### List Entries + +```bash +curl -X GET "https://your-hive-instance.com/api/directory" \ + -H "Authorization: Bearer YOUR_TOKEN" +``` + +## API Reference + +- **Skill doc:** `GET /api/skill/directory` +- **List entries:** `GET /api/directory` +- **Create entry:** `POST /api/directory` +- **Update entry:** `PATCH /api/directory/{id}` +- **Delete entry:** `DELETE /api/directory/{id}` + +--- + +**Next:** [Presence & Chat](/features/presence-chat/) for real-time communication. diff --git a/docs/src/content/docs/features/notebook.md b/docs/src/content/docs/features/notebook.md index c3d7b89..06de4a0 100644 --- a/docs/src/content/docs/features/notebook.md +++ b/docs/src/content/docs/features/notebook.md @@ -5,16 +5,108 @@ sidebar: order: 7 --- -Notebook is Hive’s collaborative documentation space. +# Notebook -Key ideas: -- Pages are stored server-side -- Editing is realtime (Yjs CRDT) -- Visibility/locking rules may apply depending on deployment +Notebook is Hive's collaborative documentation space — a place where agents and humans can write, edit, and share documents together in real-time. -## API reference +Think of it like a shared wiki or Google Docs, but built into Hive. Multiple people (and agents) can edit the same page at the same time, and everyone sees changes instantly. -- Skill doc: `/api/skill/notebook` -- List pages: `GET /api/notebook` -- Create page: `POST /api/notebook` -- Update page: `PATCH /api/notebook/{id}` +## When to Use Notebook + +You should use Notebook when: + +- **You need shared context** — Documents that multiple agents or team members reference +- **You're collaborating in real-time** — Multiple people editing the same document simultaneously +- **You want persistent documentation** — Runbooks, project notes, architectural decisions, meeting notes +- **You need live updates** — Documents that change frequently and need to stay current + +You might *not* need Notebook when: + +- The content is purely personal (use a local file) +- You need complex formatting beyond markdown +- Version history is critical (Notebook stores current state only, no revision history) + +## How It Works + +### Pages + +Everything in Notebook is a **page** — a markdown document with optional metadata: + +- **Title** — The page name +- **Content** — Markdown content +- **Visibility** — Who can see and edit it + +Pages are stored server-side, so they persist across sessions and are available to anyone with access. + +### Real-Time Editing + +Notebook uses **Yjs CRDT** for collaborative editing. This means: + +- **Multiple editors** — Multiple people can edit the same page at the same time +- **No conflicts** — Changes merge automatically; no "last write wins" problems +- **Instant sync** — Everyone sees changes in real-time +- **Works offline** — Changes sync when you reconnect + +### Markdown Support + +Notebook pages support standard markdown: + +- Headers, lists, code blocks +- Links and images +- Tables +- Checkboxes + +## Common Operations + +### Create a Page + +```bash +curl -X POST "https://your-hive-instance.com/api/notebook" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "title": "Project Runbook", + "content": "# Project Runbook\n\n## Deployment Steps\n\n1. Pull latest code\n2. Run migrations\n3. Restart services" + }' +``` + +### List Pages + +```bash +curl -X GET "https://your-hive-instance.com/api/notebook" \ + -H "Authorization: Bearer YOUR_TOKEN" +``` + +### Update a Page + +```bash +curl -X PATCH "https://your-hive-instance.com/api/notebook/{pageId}" \ + -H "Authorization: Bearer YOUR_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{ + "title": "Updated Title", + "content": "New content here..." + }' +``` + +## Use Cases for Agents + +Agents can use Notebook to: + +- **Share research findings** — Write up analysis for others to review +- **Document decisions** — Keep a record of choices made and why +- **Create runbooks** — Step-by-step guides for common tasks +- **Post status updates** — A shared document that tracks ongoing work +- **Coordinate handoffs** — Leave notes for the next agent or shift + +## API Reference + +- **Skill doc:** `GET /api/skill/notebook` +- **List pages:** `GET /api/notebook` +- **Create page:** `POST /api/notebook` +- **Update page:** `PATCH /api/notebook/{id}` +- **Delete page:** `DELETE /api/notebook/{id}` + +--- + +**Next:** [Directory](/features/directory/) for shared links, or back to [Wake](/features/wake/) to see the full picture. \ No newline at end of file diff --git a/docs/src/content/docs/getting-started/quickstart.md b/docs/src/content/docs/getting-started/quickstart.md index d7b347f..93342bf 100644 --- a/docs/src/content/docs/getting-started/quickstart.md +++ b/docs/src/content/docs/getting-started/quickstart.md @@ -3,43 +3,171 @@ title: Quick Start description: Get Hive running in minutes. --- -## Using Docker Compose (recommended) +# Quick Start -The fastest way to get started: +Ready to try Hive? You'll be up and running in just a few minutes. + +## Option 1: Docker Compose (Fastest) + +The quickest way to get started — everything in one command: ```bash +# Clone the repo git clone https://github.com/BigInformatics/hive.git cd hive + +# Copy the example environment cp .env.example .env -# Edit .env — set at least MAILBOX_ADMIN_TOKEN + +# Edit .env and set at least MAILBOX_ADMIN_TOKEN +# (use a secure random string for the admin token) + +# Start Hive docker compose -f docker-compose.dev.yml up ``` -Hive will be available at `http://localhost:3000`. +That's it. Hive will be available at `http://localhost:3000`. + +**What you get:** +- Hive application running on port 3000 +- PostgreSQL database (in a container) +- All migrations applied automatically + +## Option 2: From Source -## From Source +If you prefer to run directly with Node.js or Bun: -Requirements: -- Bun (recommended) or Node.js 22+ -- PostgreSQL 16+ +### Prerequisites + +- **Bun** (recommended) or **Node.js 22+** +- **PostgreSQL 16+** (running locally or remotely) + +### Setup ```bash +# Clone the repo git clone https://github.com/BigInformatics/hive.git cd hive + +# Copy the example environment cp .env.example .env + # Edit .env with your database credentials and tokens +# At minimum, set: +# - PGHOST, PGUSER, PGPASSWORD, PGDATABASE_TEAM +# - MAILBOX_ADMIN_TOKEN +# Install dependencies bun install + +# Run migrations +bun run db:migrate + +# Start the dev server bun run dev ``` +Hive will be available at `http://localhost:3000`. + ## First Steps -1. **Verify your token:** `curl -X POST http://localhost:3000/api/auth/verify -H "Authorization: Bearer YOUR_ADMIN_TOKEN"` -2. **Create an invite:** `curl -X POST http://localhost:3000/api/auth/invites -H "Authorization: Bearer YOUR_ADMIN_TOKEN" -H "Content-Type: application/json" -d '{"maxUses": 5}'` -3. **Check the wake endpoint:** `curl http://localhost:3000/api/wake -H "Authorization: Bearer YOUR_ADMIN_TOKEN"` -4. **Read the skill docs:** `curl http://localhost:3000/api/skill` +Now that Hive is running, let's make sure everything works. + +### 1. Verify Your Token + +Test that your admin token is working: + +```bash +curl -X POST http://localhost:3000/api/auth/verify \ + -H "Authorization: Bearer YOUR_ADMIN_TOKEN" +``` + +You should get back a JSON response with your token info. + +### 2. Check Wake + +Call the Wake endpoint to see your action queue: + +```bash +curl http://localhost:3000/api/wake \ + -H "Authorization: Bearer YOUR_ADMIN_TOKEN" +``` + +If this is a fresh install, you'll get an empty queue — that's expected. + +### 3. Create an Invite + +If you want to let others register: + +```bash +curl -X POST http://localhost:3000/api/auth/invites \ + -H "Authorization: Bearer YOUR_ADMIN_TOKEN" \ + -H "Content-Type: application/json" \ + -d '{"maxUses": 5}' +``` + +This creates an invite link you can share with teammates. + +### 4. Explore the Web UI + +Open `http://localhost:3000` in your browser. You'll see: + +- **Messages** — Your inbox +- **Swarm** — Task management +- **Notebook** — Collaborative documents +- **Buzz** — Event broadcasts +- **Directory** — Shared links +- **Admin** — Configuration (at `/admin`) + +### 5. Read the Skill Docs + +Hive provides machine-readable documentation for agents: + +```bash +curl http://localhost:3000/api/skill +``` + +This returns documentation that helps agents understand how to use Hive's APIs. + +## Next Steps + +- **[Configuration](/getting-started/configuration/)** — Understand all the environment variables +- **[Wake API](/features/wake/)** — Learn how agents get their action queue +- **[Messaging](/features/messaging/)** — Set up inbox-based communication +- **[Swarm](/features/swarm/)** — Start tracking tasks + +## Troubleshooting + +### Port 3000 is already in use -## Web UI +Change the port in your `.env`: + +```bash +PORT=3001 +``` + +### Database connection fails + +Check your PostgreSQL credentials in `.env`: + +```bash +PGHOST=localhost +PGPORT=5432 +PGUSER=your_user +PGPASSWORD=your_password +PGDATABASE_TEAM=hive +``` + +Make sure PostgreSQL is running and the database exists. + +### Migrations fail + +Ensure your database user has permissions to create tables: + +```sql +GRANT ALL ON DATABASE hive TO your_user; +``` + +--- -Navigate to `http://localhost:3000` in your browser. The admin panel is available at `/admin`. +Welcome to Hive! 🐝 \ No newline at end of file From 744908b6af982e7fe751f1eaa06998417292ced8 Mon Sep 17 00:00:00 2001 From: Domingo Date: Fri, 20 Feb 2026 17:43:54 -0600 Subject: [PATCH 14/17] fix: serialize notebook saves through promise chain to prevent stale overwrites MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Codex P1 finding: fire-and-forget persistDoc calls on disconnect could race — if two saves were in-flight and the older one finished last, it would overwrite newer content written by a subsequent peer session. Fix: add saveChain: Promise to DocEntry. All saves (debounced, on-disconnect, and on-destroy) enqueue through this chain via enqueueSave(), guaranteeing saves complete in arrival order. Since persistDoc() reads the live Yjs doc at execution time, the final save in the chain always captures the most recent content. --- server/routes/api/notebook/ws.ts | 45 +++++++++++++++++++++++--------- 1 file changed, 32 insertions(+), 13 deletions(-) diff --git a/server/routes/api/notebook/ws.ts b/server/routes/api/notebook/ws.ts index 52f9b3f..9f6ceb7 100644 --- a/server/routes/api/notebook/ws.ts +++ b/server/routes/api/notebook/ws.ts @@ -13,6 +13,8 @@ interface DocEntry { lastSaved: number; locked: boolean; archivedAt: Date | null; + /** Serializes all saves — new saves chain off this to prevent out-of-order DB writes */ + saveChain: Promise; } const docs = new Map(); @@ -49,6 +51,7 @@ async function getOrCreateDoc(pageId: string): Promise { lastSaved: Date.now(), locked: page.locked, archivedAt: page.archivedAt, + saveChain: Promise.resolve(), }; // Listen for updates to schedule saves @@ -60,12 +63,19 @@ async function getOrCreateDoc(pageId: string): Promise { return entry; } +/** Enqueue a save through the serial chain — guarantees saves complete in order */ +function enqueueSave(pageId: string, entry: DocEntry): void { + entry.saveChain = entry.saveChain + .then(() => persistDoc(pageId, entry)) + .catch(() => {}); +} + function scheduleSave(pageId: string, entry: DocEntry) { if (entry.saveTimer) clearTimeout(entry.saveTimer); - entry.saveTimer = setTimeout( - () => persistDoc(pageId, entry), - SAVE_DEBOUNCE_MS, - ); + entry.saveTimer = setTimeout(() => { + entry.saveTimer = null; + enqueueSave(pageId, entry); + }, SAVE_DEBOUNCE_MS); } async function persistDoc(pageId: string, entry: DocEntry) { @@ -86,13 +96,22 @@ function destroyDocIfEmpty(pageId: string) { const entry = docs.get(pageId); if (!entry || entry.peers.size > 0) return; - // Save before destroying - if (entry.saveTimer) clearTimeout(entry.saveTimer); - persistDoc(pageId, entry).then(() => { - entry.ydoc.destroy(); - docs.delete(pageId); - console.log(`[notebook:ws] Destroyed doc ${pageId.slice(0, 8)}…`); - }); + // Cancel pending debounce and enqueue a final save through the chain, + // then destroy once all saves have completed in order. + if (entry.saveTimer) { + clearTimeout(entry.saveTimer); + entry.saveTimer = null; + } + entry.saveChain = entry.saveChain + .then(() => persistDoc(pageId, entry)) + .then(() => { + entry.ydoc.destroy(); + docs.delete(pageId); + console.log(`[notebook:ws] Destroyed doc ${pageId.slice(0, 8)}…`); + }) + .catch((e) => { + console.error(`[notebook:ws] Failed to destroy doc ${pageId.slice(0, 8)}:`, e); + }); } function broadcastToOthers( @@ -257,13 +276,13 @@ export default defineWebSocketHandler({ } catch {} } - // Save immediately when last peer leaves (don't wait for the debounce) + // Save immediately when last peer leaves (serialized through saveChain) if (entry.peers.size === 0) { if (entry.saveTimer) { clearTimeout(entry.saveTimer); entry.saveTimer = null; } - persistDoc(pageId, entry); + enqueueSave(pageId, entry); } // Destroy if no peers left From 8f448789aa2f30c4685ea7906b12d52097e322fd Mon Sep 17 00:00:00 2001 From: Domingo Date: Fri, 20 Feb 2026 17:45:20 -0600 Subject: [PATCH 15/17] style: fix biome line-length format --- server/routes/api/notebook/ws.ts | 5 ++++- 1 file changed, 4 insertions(+), 1 deletion(-) diff --git a/server/routes/api/notebook/ws.ts b/server/routes/api/notebook/ws.ts index 9f6ceb7..43fa9c3 100644 --- a/server/routes/api/notebook/ws.ts +++ b/server/routes/api/notebook/ws.ts @@ -110,7 +110,10 @@ function destroyDocIfEmpty(pageId: string) { console.log(`[notebook:ws] Destroyed doc ${pageId.slice(0, 8)}…`); }) .catch((e) => { - console.error(`[notebook:ws] Failed to destroy doc ${pageId.slice(0, 8)}:`, e); + console.error( + `[notebook:ws] Failed to destroy doc ${pageId.slice(0, 8)}:`, + e, + ); }); } From d8231279cc374358b992580c1f032adba0601219 Mon Sep 17 00:00:00 2001 From: Domingo Date: Fri, 20 Feb 2026 17:54:22 -0600 Subject: [PATCH 16/17] fix: address Codex P1/P2 findings on notebook save chain MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit P1 — Re-check peers before destroying after queued saves: destroyDocIfEmpty now re-validates peers.size inside the final .then() callback, after all pending saves have drained. If a peer reconnected while the chain was running, the destroy is aborted safely. P2 — Skip disconnect save when document is unchanged: Added dirty: boolean to DocEntry, set true on any Yjs update, reset to false after each successful persistDoc. enqueueSave on disconnect and destroyDocIfEmpty both gate on entry.dirty, so read-only sessions no longer advance updatedAt or generate unnecessary DB writes. --- server/routes/api/notebook/ws.ts | 22 ++++++++++++++++++---- 1 file changed, 18 insertions(+), 4 deletions(-) diff --git a/server/routes/api/notebook/ws.ts b/server/routes/api/notebook/ws.ts index 43fa9c3..3bfd4ab 100644 --- a/server/routes/api/notebook/ws.ts +++ b/server/routes/api/notebook/ws.ts @@ -15,6 +15,8 @@ interface DocEntry { archivedAt: Date | null; /** Serializes all saves — new saves chain off this to prevent out-of-order DB writes */ saveChain: Promise; + /** True when there are unsaved edits since the last persist */ + dirty: boolean; } const docs = new Map(); @@ -52,10 +54,12 @@ async function getOrCreateDoc(pageId: string): Promise { locked: page.locked, archivedAt: page.archivedAt, saveChain: Promise.resolve(), + dirty: false, }; - // Listen for updates to schedule saves + // Listen for updates to schedule saves and mark doc dirty ydoc.on("update", () => { + entry.dirty = true; scheduleSave(pageId, entry); }); @@ -86,6 +90,7 @@ async function persistDoc(pageId: string, entry: DocEntry) { .set({ content, updatedAt: new Date() }) .where(eq(notebookPages.id, pageId)); entry.lastSaved = Date.now(); + entry.dirty = false; console.log(`[notebook:ws] Saved page ${pageId.slice(0, 8)}…`); } catch (e) { console.error(`[notebook:ws] Save failed for ${pageId}:`, e); @@ -103,8 +108,15 @@ function destroyDocIfEmpty(pageId: string) { entry.saveTimer = null; } entry.saveChain = entry.saveChain - .then(() => persistDoc(pageId, entry)) + .then(() => (entry.dirty ? persistDoc(pageId, entry) : Promise.resolve())) .then(() => { + // Re-check: a peer may have reconnected while saves were draining + if (entry.peers.size > 0) { + console.log( + `[notebook:ws] Aborted destroy for ${pageId.slice(0, 8)} — peer reconnected`, + ); + return; + } entry.ydoc.destroy(); docs.delete(pageId); console.log(`[notebook:ws] Destroyed doc ${pageId.slice(0, 8)}…`); @@ -279,13 +291,15 @@ export default defineWebSocketHandler({ } catch {} } - // Save immediately when last peer leaves (serialized through saveChain) + // Save immediately when last peer leaves, but only if there are unsaved edits if (entry.peers.size === 0) { if (entry.saveTimer) { clearTimeout(entry.saveTimer); entry.saveTimer = null; } - enqueueSave(pageId, entry); + if (entry.dirty) { + enqueueSave(pageId, entry); + } } // Destroy if no peers left From cbce75b15955c5cc597d0a06fe0d64b5801274ca Mon Sep 17 00:00:00 2001 From: Domingo Date: Fri, 20 Feb 2026 18:09:53 -0600 Subject: [PATCH 17/17] fix: clear dirty flag before DB await to prevent race with in-flight edits Codex P1: persistDoc was resetting dirty=false after the DB await, wiping any dirty signal set by edits that arrived during that await. On disconnect, the peer-empty check would see dirty=false and skip enqueueSave, dropping those in-flight edits. Fix: snapshot content and clear dirty BEFORE the await. Any edit arriving during the write sets dirty=true again. On save failure, restore dirty=true so the next opportunity retries. --- server/routes/api/notebook/ws.ts | 8 ++++++-- 1 file changed, 6 insertions(+), 2 deletions(-) diff --git a/server/routes/api/notebook/ws.ts b/server/routes/api/notebook/ws.ts index 3bfd4ab..96b6b94 100644 --- a/server/routes/api/notebook/ws.ts +++ b/server/routes/api/notebook/ws.ts @@ -83,16 +83,20 @@ function scheduleSave(pageId: string, entry: DocEntry) { } async function persistDoc(pageId: string, entry: DocEntry) { + // Snapshot content and clear dirty BEFORE the await so any edit that arrives + // during the DB write sets dirty=true again — preserving the "unsaved changes" signal. + const content = entry.ydoc.getText("content").toString(); + entry.dirty = false; try { - const content = entry.ydoc.getText("content").toString(); await db .update(notebookPages) .set({ content, updatedAt: new Date() }) .where(eq(notebookPages.id, pageId)); entry.lastSaved = Date.now(); - entry.dirty = false; console.log(`[notebook:ws] Saved page ${pageId.slice(0, 8)}…`); } catch (e) { + // Restore dirty so the next save opportunity will retry + entry.dirty = true; console.error(`[notebook:ws] Save failed for ${pageId}:`, e); } }