diff --git a/app/static/datacenter/datacenter.js b/app/static/datacenter/datacenter.js
index 2d3087cc..383cfc6f 100644
--- a/app/static/datacenter/datacenter.js
+++ b/app/static/datacenter/datacenter.js
@@ -108,6 +108,11 @@ function updateMetricsUI(data) {
setText('m-req-success', safeNum(sum.success).toLocaleString());
setText('m-req-failed', safeNum(sum.failed).toLocaleString());
setText('m-success-rate', formatPercent(safeNum(sum.success_rate)));
+ setText('m-total-tokens', safeNum(sum.total_tokens).toLocaleString());
+ setText('m-input-tokens', safeNum(sum.input_tokens).toLocaleString());
+ setText('m-output-tokens', safeNum(sum.output_tokens).toLocaleString());
+ setText('m-reasoning-tokens', safeNum(sum.reasoning_tokens).toLocaleString());
+ setText('m-cached-tokens', safeNum(sum.cached_tokens).toLocaleString());
const cache = data.cache || {};
const li = cache.local_image || { count: 0, size_mb: 0 };
From d0a064c436e3ffd902012ef6207e3deb00d7df69 Mon Sep 17 00:00:00 2001
From: ZIC143 <2022220052@email.szu.edu.cn>
Date: Sat, 7 Feb 2026 21:11:01 +0800
Subject: [PATCH 3/8] =?UTF-8?q?feat(chat):=20enhance=20token=20usage=20tra?=
=?UTF-8?q?cking=20in=20stream=20processing=20and=20logging=20feat(chat)?=
=?UTF-8?q?=EF=BC=9A=E5=A2=9E=E5=BC=BA=E6=B5=81=E5=BC=8F=E5=A4=84=E7=90=86?=
=?UTF-8?q?=E5=92=8C=E6=97=A5=E5=BF=97=E4=B8=AD=E7=9A=84=20token=20?=
=?UTF-8?q?=E4=BD=BF=E7=94=A8=E9=87=8F=E8=B7=9F=E8=B8=AA?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
app/services/grok/chat.py | 15 +++++++++++++--
app/services/grok/processor.py | 18 +++++++++++++++++-
src/grok/processor.ts | 18 +++++++++++++++---
src/routes/openai.ts | 17 +++++++++++------
4 files changed, 56 insertions(+), 12 deletions(-)
diff --git a/app/services/grok/chat.py b/app/services/grok/chat.py
index 471d2fb4..e476ceb4 100644
--- a/app/services/grok/chat.py
+++ b/app/services/grok/chat.py
@@ -532,7 +532,8 @@ async def completions(
# 处理响应
if is_stream:
- processor = StreamProcessor(model_name, token, think).process(response)
+ stream_processor = StreamProcessor(model_name, token, think)
+ processor = stream_processor.process(response)
prompt_messages = [msg.model_dump() for msg in messages]
async def _wrapped_stream():
@@ -545,8 +546,18 @@ async def _wrapped_stream():
# Only count as "success" when the stream ends naturally.
try:
if completed:
+ usage = stream_processor.build_usage(prompt_messages)
+ raw = usage.get("_raw") or {}
await token_mgr.sync_usage(token, model_name, consume_on_fail=True, is_usage=True)
- await request_stats.record_request(model_name, success=True)
+ await request_stats.record_request(
+ model_name,
+ success=True,
+ total_tokens=int(usage.get("total_tokens", 0) or 0),
+ input_tokens=int(usage.get("prompt_tokens", 0) or 0),
+ output_tokens=int(usage.get("completion_tokens", 0) or 0),
+ reasoning_tokens=int(raw.get("reasoning_tokens", 0) or 0),
+ cached_tokens=int(raw.get("cached_tokens", 0) or 0),
+ )
else:
await request_stats.record_request(model_name, success=False)
except Exception:
diff --git a/app/services/grok/processor.py b/app/services/grok/processor.py
index f1c81832..3be986a6 100644
--- a/app/services/grok/processor.py
+++ b/app/services/grok/processor.py
@@ -117,6 +117,8 @@ def __init__(self, model: str, token: str = "", think: bool = None):
self.fingerprint: str = ""
self.think_opened: bool = False
self.role_sent: bool = False
+ self._output_text: str = ""
+ self._reasoning_text: str = ""
self.filter_tags = get_config("grok.filter_tags", [])
self.image_format = get_config("app.image_format", "url")
@@ -158,6 +160,7 @@ async def process(self, response: AsyncIterable[bytes]) -> AsyncGenerator[str, N
idx = img.get('imageIndex', 0) + 1
progress = img.get('progress', 0)
yield self._sse(f"正在生成第{idx}张图片中,当前进度{progress}%\n")
+ self._reasoning_text += f"正在生成第{idx}张图片中,当前进度{progress}%\n"
continue
# modelResponse
@@ -165,6 +168,7 @@ async def process(self, response: AsyncIterable[bytes]) -> AsyncGenerator[str, N
if self.think_opened and self.show_think:
if msg := mr.get("message"):
yield self._sse(msg + "\n")
+ self._reasoning_text += msg + "\n"
yield self._sse("\n")
self.think_opened = False
@@ -172,18 +176,21 @@ async def process(self, response: AsyncIterable[bytes]) -> AsyncGenerator[str, N
for url in mr.get("generatedImageUrls", []):
parts = url.split("/")
img_id = parts[-2] if len(parts) >= 2 else "image"
-
+
if self.image_format == "base64":
dl_service = self._get_dl()
base64_data = await dl_service.to_base64(url, self.token, "image")
if base64_data:
yield self._sse(f"\n")
+ self._output_text += f"\n"
else:
final_url = await self.process_url(url, "image")
yield self._sse(f"\n")
+ self._output_text += f"\n"
else:
final_url = await self.process_url(url, "image")
yield self._sse(f"\n")
+ self._output_text += f"\n"
if (meta := mr.get("metadata", {})).get("llm_info", {}).get("modelHash"):
self.fingerprint = meta["llm_info"]["modelHash"]
@@ -193,9 +200,14 @@ async def process(self, response: AsyncIterable[bytes]) -> AsyncGenerator[str, N
if (token := resp.get("token")) is not None:
if token and not (self.filter_tags and any(t in token for t in self.filter_tags)):
yield self._sse(token)
+ if self.think_opened and self.show_think:
+ self._reasoning_text += token
+ else:
+ self._output_text += token
if self.think_opened:
yield self._sse("\n")
+ self.think_opened = False
yield self._sse(finish="stop")
yield "data: [DONE]\n\n"
except Exception as e:
@@ -204,6 +216,10 @@ async def process(self, response: AsyncIterable[bytes]) -> AsyncGenerator[str, N
finally:
await self.close()
+ def build_usage(self, prompt_messages: Optional[list[dict]] = None) -> dict[str, Any]:
+ usage = build_chat_usage(prompt_messages or [], (self._output_text + self._reasoning_text))
+ return usage
+
class CollectProcessor(BaseProcessor):
"""非流式响应处理器"""
diff --git a/src/grok/processor.ts b/src/grok/processor.ts
index c6de5d20..9200010c 100644
--- a/src/grok/processor.ts
+++ b/src/grok/processor.ts
@@ -123,7 +123,11 @@ export function createOpenAiStreamFromGrokNdjson(
global: GlobalSettings;
origin: string;
promptMessages?: Array<{ content?: unknown }>;
- onFinish?: (result: { status: number; duration: number }) => Promise
| void;
+ onFinish?: (result: {
+ status: number;
+ duration: number;
+ usage?: ReturnType;
+ }) => Promise | void;
},
): ReadableStream {
const { settings, global, origin } = opts;
@@ -419,7 +423,13 @@ export function createOpenAiStreamFromGrokNdjson(
);
controller.enqueue(encoder.encode(makeChunk(id, created, currentModel, "", "stop")));
controller.enqueue(encoder.encode(makeDone()));
- if (opts.onFinish) await opts.onFinish({ status: finalStatus, duration: (Date.now() - startTime) / 1000 });
+ if (opts.onFinish) {
+ await opts.onFinish({
+ status: finalStatus,
+ duration: (Date.now() - startTime) / 1000,
+ usage,
+ });
+ }
controller.close();
} catch (e) {
finalStatus = 500;
@@ -429,7 +439,9 @@ export function createOpenAiStreamFromGrokNdjson(
),
);
controller.enqueue(encoder.encode(makeDone()));
- if (opts.onFinish) await opts.onFinish({ status: finalStatus, duration: (Date.now() - startTime) / 1000 });
+ if (opts.onFinish) {
+ await opts.onFinish({ status: finalStatus, duration: (Date.now() - startTime) / 1000 });
+ }
controller.close();
} finally {
try {
diff --git a/src/routes/openai.ts b/src/routes/openai.ts
index df6f89f6..e3fe307a 100644
--- a/src/routes/openai.ts
+++ b/src/routes/openai.ts
@@ -1304,8 +1304,13 @@ openAiRoutes.post("/chat/completions", async (c) => {
global: settingsBundle.global,
origin,
promptMessages,
- onFinish: async ({ status, duration }) => {
+ onFinish: async ({ status, duration, usage }) => {
const promptEst = estimateInputTokensFromMessages(promptMessages);
+ const resolved = usage ?? buildChatUsageFromTexts({
+ promptTextTokens: promptEst.textTokens,
+ promptImageTokens: promptEst.imageTokens,
+ completionText: "",
+ });
await addRequestLog(c.env.DB, {
ip,
model: requestedModel,
@@ -1313,11 +1318,11 @@ openAiRoutes.post("/chat/completions", async (c) => {
status,
key_name: keyName,
token_suffix: jwt.slice(-6),
- total_tokens: promptEst.promptTokens,
- input_tokens: promptEst.promptTokens,
- output_tokens: 0,
- reasoning_tokens: 0,
- cached_tokens: 0,
+ total_tokens: resolved.total_tokens,
+ input_tokens: resolved.input_tokens,
+ output_tokens: resolved.output_tokens,
+ reasoning_tokens: resolved.reasoning_tokens,
+ cached_tokens: resolved.cached_tokens,
error: status === 200 ? "" : "stream_error",
});
},
From 4a8593803b28eba1dc9dab66480bd0864969f980 Mon Sep 17 00:00:00 2001
From: ZIC143 <2022220052@email.szu.edu.cn>
Date: Sat, 7 Feb 2026 21:18:06 +0800
Subject: [PATCH 4/8] =?UTF-8?q?feat(logs):=20update=20request=20log=20stru?=
=?UTF-8?q?cture=20to=20include=20token=20metrics=20feat(logs):=20?=
=?UTF-8?q?=E6=9B=B4=E6=96=B0=E8=AF=B7=E6=B1=82=E6=97=A5=E5=BF=97=E7=BB=93?=
=?UTF-8?q?=E6=9E=84=E4=BB=A5=E5=8C=85=E5=90=AB=20token=20=E6=8C=87?=
=?UTF-8?q?=E6=A0=87?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/repo/logs.ts | 9 ++++--
src/routes/admin.ts | 5 +++
src/routes/openai.ts | 75 ++++++++++++++++++++++++++++++++++++++++----
3 files changed, 81 insertions(+), 8 deletions(-)
diff --git a/src/repo/logs.ts b/src/repo/logs.ts
index 3e7d9a82..bf3832fb 100644
--- a/src/repo/logs.ts
+++ b/src/repo/logs.ts
@@ -1,5 +1,5 @@
import type { Env } from "../env";
-import { dbAll, dbRun } from "../db";
+import { dbAll, dbFirst, dbRun } from "../db";
import { nowMs, formatUtcMs } from "../utils/time";
export interface RequestLogRow {
@@ -20,9 +20,14 @@ export interface RequestLogRow {
error: string;
}
+type RequestLogInsert = Omit &
+ Partial> & {
+ id?: string;
+ };
+
export async function addRequestLog(
db: Env["DB"],
- entry: Omit & { id?: string },
+ entry: RequestLogInsert,
): Promise {
const ts = nowMs();
const id = entry.id ?? String(ts);
diff --git a/src/routes/admin.ts b/src/routes/admin.ts
index db194bd6..f323e35a 100644
--- a/src/routes/admin.ts
+++ b/src/routes/admin.ts
@@ -1260,6 +1260,11 @@ adminRoutes.post("/api/logs/add", requireAdminAuth, async (c) => {
status: Number(body.status ?? 200),
key_name: "admin",
token_suffix: "",
+ total_tokens: 0,
+ input_tokens: 0,
+ output_tokens: 0,
+ reasoning_tokens: 0,
+ cached_tokens: 0,
error: String(body.error ?? ""),
});
return c.json({ success: true });
diff --git a/src/routes/openai.ts b/src/routes/openai.ts
index e3fe307a..62a5136c 100644
--- a/src/routes/openai.ts
+++ b/src/routes/openai.ts
@@ -27,6 +27,9 @@ import { nowMs } from "../utils/time";
import { arrayBufferToBase64 } from "../utils/base64";
import { upsertCacheRow } from "../repo/cache";
+const IMAGE_GENERATION_MODEL_ID = "grok-imagine-1.0";
+const IMAGE_EDIT_MODEL_ID = "grok-imagine-1.0-edit";
+
function openAiError(message: string, code: string): Record {
return { error: { message, type: "invalid_request_error", code } };
}
@@ -502,6 +505,43 @@ function parseImageModel(input: unknown, fallback: string): string {
return String(input ?? fallback).trim() || fallback;
}
+function parseImagePrompt(input: unknown): string {
+ if (input === undefined || input === null) return "";
+ if (typeof input === "string") return input.trim();
+ if (Array.isArray(input)) return input.map((v) => String(v ?? "")).join(" ").trim();
+ return String(input).trim();
+}
+
+function parseImageCount(input: unknown): number {
+ const raw = Number(input);
+ if (!Number.isFinite(raw)) return 1;
+ const value = Math.floor(raw);
+ return Math.max(1, Math.min(10, value));
+}
+
+function parseImageSize(input: unknown): string {
+ const value = String(input ?? "").trim().toLowerCase();
+ if (!value) return "1024x1024";
+ const allowed = new Set([
+ "256x256",
+ "512x512",
+ "1024x1024",
+ "1024x576",
+ "1280x720",
+ "1536x864",
+ "576x1024",
+ "720x1280",
+ "864x1536",
+ "1024x1536",
+ "512x768",
+ "768x1024",
+ "1536x1024",
+ "768x512",
+ "1024x768",
+ ]);
+ return allowed.has(value) ? value : "1024x1024";
+}
+
function parseImageStream(input: unknown): boolean {
return toBool(input);
}
@@ -1420,6 +1460,7 @@ openAiRoutes.post("/images/generations", async (c) => {
const keyName = c.get("apiAuth").name ?? "Unknown";
const origin = new URL(c.req.url).origin;
+ let prompt = "";
let requestedModel = IMAGE_GENERATION_MODEL_ID;
try {
const body = (await c.req.json()) as {
@@ -1431,7 +1472,7 @@ openAiRoutes.post("/images/generations", async (c) => {
stream?: unknown;
response_format?: unknown;
};
- const prompt = parseImagePrompt(body.prompt);
+ prompt = parseImagePrompt(body.prompt);
const promptErr = nonEmptyPromptOrError(prompt);
if (promptErr) return c.json(openAiError(promptErr.message, promptErr.code), 400);
@@ -1505,6 +1546,11 @@ openAiRoutes.post("/images/generations", async (c) => {
status,
key_name: keyName,
token_suffix: getTokenSuffix(experimentalToken.token),
+ total_tokens: 0,
+ input_tokens: 0,
+ output_tokens: 0,
+ reasoning_tokens: 0,
+ cached_tokens: 0,
error: status === 200 ? "" : "stream_error",
});
},
@@ -1586,6 +1632,11 @@ openAiRoutes.post("/images/generations", async (c) => {
status,
key_name: keyName,
token_suffix: getTokenSuffix(chosen.token),
+ total_tokens: 0,
+ input_tokens: 0,
+ output_tokens: 0,
+ reasoning_tokens: 0,
+ cached_tokens: 0,
error: status === 200 ? "" : "stream_error",
});
},
@@ -1682,7 +1733,7 @@ openAiRoutes.post("/images/generations", async (c) => {
keyName,
status: 400,
error: message,
- prompt: imageCallPrompt("generation", prompt),
+ prompt: imageCallPrompt("generation", prompt || ""),
});
return c.json(openAiError(message, "content_policy_violation"), 400);
}
@@ -1694,7 +1745,7 @@ openAiRoutes.post("/images/generations", async (c) => {
keyName,
status: 500,
error: message,
- prompt: imageCallPrompt("generation", prompt),
+ prompt: imageCallPrompt("generation", prompt || ""),
});
return c.json(openAiError(message || "Internal error", "internal_error"), 500);
}
@@ -1707,10 +1758,11 @@ openAiRoutes.post("/images/edits", async (c) => {
const origin = new URL(c.req.url).origin;
const maxImageBytes = 50 * 1024 * 1024;
+ let prompt = "";
let requestedModel = IMAGE_EDIT_MODEL_ID;
try {
const form = await c.req.formData();
- const prompt = parseImagePrompt(form.get("prompt"));
+ prompt = parseImagePrompt(form.get("prompt"));
const promptErr = nonEmptyPromptOrError(prompt);
if (promptErr) return c.json(openAiError(promptErr.message, promptErr.code), 400);
@@ -1825,6 +1877,7 @@ openAiRoutes.post("/images/edits", async (c) => {
cookie,
settings: settingsBundle.grok,
n,
+ prompt: imageCallPrompt("edit", prompt),
onFinish: async ({ status, duration }) => {
await addRequestLog(c.env.DB, {
ip,
@@ -1833,6 +1886,11 @@ openAiRoutes.post("/images/edits", async (c) => {
status,
key_name: keyName,
token_suffix: getTokenSuffix(chosen.token),
+ total_tokens: 0,
+ input_tokens: 0,
+ output_tokens: 0,
+ reasoning_tokens: 0,
+ cached_tokens: 0,
error: status === 200 ? "" : "stream_error",
});
},
@@ -1896,6 +1954,11 @@ openAiRoutes.post("/images/edits", async (c) => {
status,
key_name: keyName,
token_suffix: getTokenSuffix(chosen.token),
+ total_tokens: 0,
+ input_tokens: 0,
+ output_tokens: 0,
+ reasoning_tokens: 0,
+ cached_tokens: 0,
error: status === 200 ? "" : "stream_error",
});
},
@@ -1979,7 +2042,7 @@ openAiRoutes.post("/images/edits", async (c) => {
keyName,
status: 400,
error: message,
- prompt: imageCallPrompt("edit", prompt),
+ prompt: imageCallPrompt("edit", prompt || ""),
});
return c.json(openAiError(message, "content_policy_violation"), 400);
}
@@ -1991,7 +2054,7 @@ openAiRoutes.post("/images/edits", async (c) => {
keyName,
status: 500,
error: message,
- prompt: imageCallPrompt("edit", prompt),
+ prompt: imageCallPrompt("edit", prompt || ""),
});
return c.json(openAiError(message || "Internal error", "internal_error"), 500);
}
From 0f4e63d5fc6646ed439729cdd1812524c363cd05 Mon Sep 17 00:00:00 2001
From: ZIC143 <2022220052@email.szu.edu.cn>
Date: Sat, 7 Feb 2026 22:04:04 +0800
Subject: [PATCH 5/8] =?UTF-8?q?fix(logs):=20correct=20SQL=20insert=20state?=
=?UTF-8?q?ment=20parameter=20count=20in=20addRequestLog=20function=20fix(?=
=?UTF-8?q?logs):=20=E4=BF=AE=E6=AD=A3=20addRequestLog=20=E5=87=BD?=
=?UTF-8?q?=E6=95=B0=E4=B8=AD=20SQL=20=E6=8F=92=E5=85=A5=E8=AF=AD=E5=8F=A5?=
=?UTF-8?q?=E7=9A=84=E5=8F=82=E6=95=B0=E6=95=B0=E9=87=8F=E5=A4=84=E7=90=86?=
=?UTF-8?q?=E9=94=99=E8=AF=AF?=
MIME-Version: 1.0
Content-Type: text/plain; charset=UTF-8
Content-Transfer-Encoding: 8bit
---
src/repo/logs.ts | 2 +-
1 file changed, 1 insertion(+), 1 deletion(-)
diff --git a/src/repo/logs.ts b/src/repo/logs.ts
index bf3832fb..1db5b645 100644
--- a/src/repo/logs.ts
+++ b/src/repo/logs.ts
@@ -34,7 +34,7 @@ export async function addRequestLog(
const time = formatUtcMs(ts);
await dbRun(
db,
- "INSERT INTO request_logs(id,time,timestamp,ip,model,duration,status,key_name,token_suffix,total_tokens,input_tokens,output_tokens,reasoning_tokens,cached_tokens,error) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
+ "INSERT INTO request_logs(id,time,timestamp,ip,model,duration,status,key_name,token_suffix,total_tokens,input_tokens,output_tokens,reasoning_tokens,cached_tokens,error) VALUES(?,?,?,?,?,?,?,?,?,?,?,?,?,?,?)",
[
id,
time,
From a64039d628498049c5ba1b1935c0543e754958ef Mon Sep 17 00:00:00 2001
From: ZIC143 <2022220052@email.szu.edu.cn>
Date: Sat, 21 Feb 2026 20:54:13 +0800
Subject: [PATCH 6/8] fix(migrations): preserve migration compatibility by
removing redundant ALTER TABLE statements
---
migrations/0006_request_logs_usage.sql | 18 +++++++++++-------
1 file changed, 11 insertions(+), 7 deletions(-)
diff --git a/migrations/0006_request_logs_usage.sql b/migrations/0006_request_logs_usage.sql
index 74664323..6a98e0ea 100644
--- a/migrations/0006_request_logs_usage.sql
+++ b/migrations/0006_request_logs_usage.sql
@@ -1,7 +1,11 @@
--- Add token usage columns to request_logs
-
-ALTER TABLE request_logs ADD COLUMN total_tokens INTEGER NOT NULL DEFAULT 0;
-ALTER TABLE request_logs ADD COLUMN input_tokens INTEGER NOT NULL DEFAULT 0;
-ALTER TABLE request_logs ADD COLUMN output_tokens INTEGER NOT NULL DEFAULT 0;
-ALTER TABLE request_logs ADD COLUMN reasoning_tokens INTEGER NOT NULL DEFAULT 0;
-ALTER TABLE request_logs ADD COLUMN cached_tokens INTEGER NOT NULL DEFAULT 0;
\ No newline at end of file
+-- request_logs token usage columns are part of the base schema now (0001_init.sql).
+--
+-- Historical note:
+-- 0006 previously added:
+-- total_tokens, input_tokens, output_tokens, reasoning_tokens, cached_tokens
+--
+-- But on fresh databases initialized from the current 0001 schema, those columns
+-- already exist. Re-applying ALTER TABLE here causes duplicate-column failures on
+-- remote migration runs.
+--
+-- Keep this migration as a no-op to preserve migration ordering compatibility.
\ No newline at end of file
From 2dcfaf6c5b9baf222005bf85ae2a7045d20e12ee Mon Sep 17 00:00:00 2001
From: ZIC143 <2022220052@email.szu.edu.cn>
Date: Sat, 21 Feb 2026 22:38:27 +0800
Subject: [PATCH 7/8] feat: implement token pagination and filtering in admin
API
- Added pagination support for token retrieval in the admin API, allowing clients to specify page number and items per page.
- Introduced filtering options for token type, status, NSFW content, and search functionality.
- Enhanced the token listing endpoint to return total counts, current page, and total pages.
- Updated frontend components to support pagination controls and display total token counts.
- Added CSS styles for pagination controls.
- Created tests for default pagination, filtering, and "all" mode for token listing.
---
app/api/v1/admin.py | 162 +++++++++++-
app/static/token/token.css | 42 ++++
app/static/token/token.html | 18 ++
app/static/token/token.js | 271 ++++++++++++---------
app/template/admin.html | 75 +++++-
src/repo/tokens.ts | 96 ++++++++
src/routes/admin.ts | 88 ++++++-
tests/test_admin_tokens_list_pagination.py | 81 ++++++
8 files changed, 707 insertions(+), 126 deletions(-)
create mode 100644 tests/test_admin_tokens_list_pagination.py
diff --git a/app/api/v1/admin.py b/app/api/v1/admin.py
index 10a26214..ced70d62 100644
--- a/app/api/v1/admin.py
+++ b/app/api/v1/admin.py
@@ -462,6 +462,121 @@ def _normalize_admin_token_item(pool_name: str, item: Any) -> dict | None:
}
+TOKEN_PAGE_DEFAULT = 30
+TOKEN_PAGE_ALLOWED = {30, 50, 200}
+TOKEN_PAGE_ALL_LIMIT = 10000
+
+
+def _parse_token_page(page: Any) -> int:
+ try:
+ n = int(page)
+ except Exception:
+ n = 1
+ return max(1, n)
+
+
+def _parse_token_per_page(per_page: Any) -> tuple[int, bool]:
+ v = str(per_page if per_page is not None else "").strip().lower()
+ if v in ("all", "全部"):
+ return TOKEN_PAGE_ALL_LIMIT, True
+ try:
+ n = int(v or TOKEN_PAGE_DEFAULT)
+ except Exception:
+ return TOKEN_PAGE_DEFAULT, False
+ if n not in TOKEN_PAGE_ALLOWED:
+ return TOKEN_PAGE_DEFAULT, False
+ return n, False
+
+
+def _is_token_invalid(item: dict) -> bool:
+ return str(item.get("status") or "").strip().lower() in ("invalid", "expired", "disabled")
+
+
+def _is_token_exhausted(item: dict) -> bool:
+ status = str(item.get("status") or "").strip().lower()
+ if status == "cooling":
+ return True
+ try:
+ quota_known = bool(item.get("quota_known"))
+ quota = int(item.get("quota"))
+ except Exception:
+ quota_known = False
+ quota = -1
+ if quota_known and quota <= 0:
+ return True
+
+ token_type = str(item.get("token_type") or "sso")
+ try:
+ heavy_known = bool(item.get("heavy_quota_known"))
+ heavy_quota = int(item.get("heavy_quota"))
+ except Exception:
+ heavy_known = False
+ heavy_quota = -1
+ if token_type == "ssoSuper" and heavy_known and heavy_quota <= 0:
+ return True
+ return False
+
+
+def _is_token_active(item: dict) -> bool:
+ return (not _is_token_invalid(item)) and (not _is_token_exhausted(item))
+
+
+def _match_token_status(item: dict, status: str) -> bool:
+ s = str(status or "").strip().lower()
+ if not s:
+ return True
+ if s in ("invalid", "失效"):
+ return _is_token_invalid(item)
+ if s in ("active", "正常"):
+ return _is_token_active(item)
+ if s in ("exhausted", "额度耗尽", "limited", "限流中"):
+ return _is_token_exhausted(item)
+ if s in ("cooling", "冷却中"):
+ return str(item.get("status") or "").strip().lower() == "cooling"
+ if s in ("unused", "未使用"):
+ try:
+ quota = int(item.get("quota"))
+ except Exception:
+ quota = -2
+ return quota == -1
+ return True
+
+
+def _match_token_nsfw(item: dict, nsfw: str) -> bool:
+ v = str(nsfw or "").strip().lower()
+ if not v:
+ return True
+ note = str(item.get("note") or "").lower()
+ has_nsfw = "nsfw" in note
+ if v in ("1", "true", "yes", "on", "enabled"):
+ return has_nsfw
+ if v in ("0", "false", "no", "off", "disabled"):
+ return not has_nsfw
+ return True
+
+
+def _filter_admin_tokens(items: list[dict], *, token_type: str, status: str, nsfw: str, search: str) -> list[dict]:
+ token_type_norm = str(token_type or "all").strip()
+ search_norm = str(search or "").strip().lower()
+
+ out: list[dict] = []
+ for item in items:
+ cur_type = str(item.get("token_type") or "sso")
+ if token_type_norm in ("sso", "ssoSuper") and cur_type != token_type_norm:
+ continue
+ if not _match_token_status(item, status):
+ continue
+ if not _match_token_nsfw(item, nsfw):
+ continue
+ if search_norm:
+ token = str(item.get("token") or "").lower()
+ note = str(item.get("note") or "").lower()
+ if search_norm not in token and search_norm not in note:
+ continue
+ out.append(item)
+ return out
+
+
def _collect_tokens_from_pool_payload(payload: Any) -> list[str]:
if not isinstance(payload, dict):
return []
@@ -675,12 +790,20 @@ async def get_storage_info():
return {"type": storage_type or "local"}
@router.get("/api/v1/admin/tokens", dependencies=[Depends(verify_api_key)])
-async def get_tokens_api():
+async def get_tokens_api(
+ page: int = Query(default=1),
+ per_page: str = Query(default="30"),
+ token_type: str = Query(default="all"),
+ status: str = Query(default=""),
+ nsfw: str = Query(default=""),
+ search: str = Query(default=""),
+):
"""获取所有 Token"""
storage = get_storage()
tokens = await storage.load_tokens()
data = tokens if isinstance(tokens, dict) else {}
out: dict[str, list[dict]] = {}
+ normalized_items: list[dict] = []
for pool_name, raw_items in data.items():
arr = raw_items if isinstance(raw_items, list) else []
normalized: list[dict] = []
@@ -688,8 +811,43 @@ async def get_tokens_api():
obj = _normalize_admin_token_item(pool_name, item)
if obj:
normalized.append(obj)
+ normalized_items.append({**obj, "pool": str(pool_name)})
out[str(pool_name)] = normalized
- return out
+
+ current_page = _parse_token_page(page)
+ page_size, is_all = _parse_token_per_page(per_page)
+ filtered = _filter_admin_tokens(
+ normalized_items,
+ token_type=token_type,
+ status=status,
+ nsfw=nsfw,
+ search=search,
+ )
+
+ total = len(filtered)
+ pages = max(1, (total + page_size - 1) // page_size)
+ if current_page > pages:
+ current_page = pages
+ start = (current_page - 1) * page_size
+ end = start + page_size
+ page_items = filtered[start:end]
+
+ page_pools: dict[str, list[dict]] = {"ssoBasic": [], "ssoSuper": []}
+ for item in page_items:
+ pool = str(item.get("pool") or "ssoBasic")
+ obj = dict(item)
+ obj.pop("pool", None)
+ page_pools.setdefault(pool, []).append(obj)
+
+ return {
+ "items": page_items,
+ "total": total,
+ "page": current_page,
+ "per_page": "all" if is_all else page_size,
+ "pages": pages,
+ "ssoBasic": page_pools.get("ssoBasic", []),
+ "ssoSuper": page_pools.get("ssoSuper", []),
+ }
@router.post("/api/v1/admin/tokens", dependencies=[Depends(verify_api_key)])
async def update_tokens_api(data: dict):
diff --git a/app/static/token/token.css b/app/static/token/token.css
index c5e9185e..b46e0301 100644
--- a/app/static/token/token.css
+++ b/app/static/token/token.css
@@ -198,6 +198,39 @@
align-items: flex-start;
}
+ .token-pagination {
+ display: flex;
+ align-items: center;
+ justify-content: space-between;
+ gap: 10px;
+ flex-wrap: wrap;
+ }
+
+ .token-pagination-controls {
+ display: flex;
+ align-items: center;
+ gap: 8px;
+ flex-wrap: wrap;
+ }
+
+ .token-page-size {
+ height: 28px;
+ min-width: 70px;
+ padding: 0 8px;
+ font-size: 12px;
+ }
+
+ .token-page-btn {
+ height: 28px;
+ padding: 0 10px;
+ font-size: 12px;
+ }
+
+ .token-page-btn:disabled {
+ opacity: 0.5;
+ cursor: not-allowed;
+ }
+
#batch-actions {
background: rgba(255, 255, 255, 0.92);
border: 1px solid var(--border);
@@ -294,6 +327,15 @@
width: 100%;
}
+ .token-pagination {
+ flex-direction: column;
+ align-items: stretch;
+ }
+
+ .token-pagination-controls {
+ justify-content: space-between;
+ }
+
#batch-actions {
left: 12px;
right: 12px;
diff --git a/app/static/token/token.html b/app/static/token/token.html
index fb9427fc..fe5925d2 100644
--- a/app/static/token/token.html
+++ b/app/static/token/token.html
@@ -155,6 +155,24 @@ Token 列表