diff --git a/.gitignore b/.gitignore index 9209ef5..bc3b29d 100644 --- a/.gitignore +++ b/.gitignore @@ -1,2 +1,4 @@ node_modules out +.venv +.DS_Store \ No newline at end of file diff --git a/.vscodeignore b/.vscodeignore index e69de29..918dbb5 100644 --- a/.vscodeignore +++ b/.vscodeignore @@ -0,0 +1,23 @@ +__pycache__ +.DS_Store +.git +.github +.gitignore +.venv +.vscode-test/** +.vscode/** +.yarnrc +*.pyc +*.vsix +**/.eslintrc.json +**/*.map +**/*.ts +**/tsconfig.json +!node_modules/** +scripts/** +src/** +tests/** +docs/** +eslint.config.js +.prettierignore +client/** \ No newline at end of file diff --git a/client/README_WEB.md b/client/README_WEB.md new file mode 100644 index 0000000..507e6b0 --- /dev/null +++ b/client/README_WEB.md @@ -0,0 +1,22 @@ +AI Chat Web UI + +Run a minimal Flask server that serves a single-page chat UI and proxies requests to your local AI API. + +Setup + +1. Create a virtualenv and install requirements: + + python -m venv .venv + . .venv/bin/activate || . .\.venv\Scripts\Activate.ps1 + pip install -r requirements-web.txt + +2. Start the server (optionally check and change API_BASE to your API endpoint): + + python server.py + +3. Open http://localhost:8080 in your browser. + +Notes + +- The UI posts to /api/chat and expects the API at ${API_BASE}/chat/completions. +- The model selector will send the chosen model name in the request body. diff --git a/client/copilot_proxy.py b/client/copilot_proxy.py new file mode 100644 index 0000000..20a4451 --- /dev/null +++ b/client/copilot_proxy.py @@ -0,0 +1,62 @@ +import json +import os + +import requests + +API_BASE = os.environ.get('API_BASE', 'http://localhost:3000/v1') +MODEL = os.environ.get('MODEL', 'gpt-4o') + +def call_stream(): + url = f"{API_BASE}/chat/completions" + payload = { + "model": MODEL, + "messages": [ + { + "role": "user", + "content": "Hello! Can you introduce yourself briefly?" + } + ], + "stream": True + } + + with requests.post(url, json=payload, stream=True, timeout=30) as resp: + try: + resp.raise_for_status() + except requests.HTTPError: + print(f"HTTP Error {resp.status_code}: {resp.text}") + return + buffer = "" + for raw_line in resp.iter_lines(decode_unicode=True): + if raw_line is None: + continue + line = raw_line.strip() + if not line: + # empty line => end of one SSE event, process buffer + if buffer: + try: + # some servers produce lines like "data: {...}" + data_line = buffer + if data_line.startswith("data:"): + data_line = data_line[len("data:"):].strip() + if data_line and data_line != "[DONE]": + obj = json.loads(data_line) + choices = obj.get("choices", []) + if choices: + fragment = choices[0].get("delta", {}).get("content", "") + if fragment: + print(fragment, end="", flush=True) + except json.JSONDecodeError: + # ignore lines that are not JSON + pass + buffer = "" + # accumulate lines for this event + # Many SSE streams send each event as a single "data: " line, + # but some may split fragments across multiple "data:" lines. + elif line.startswith("data:"): + # append JSON after "data:" + buffer += (line + "\n") + + print("\n\nStream finished.") + +if __name__ == "__main__": + call_stream() \ No newline at end of file diff --git a/client/requirements-web.txt b/client/requirements-web.txt new file mode 100644 index 0000000..88a502b --- /dev/null +++ b/client/requirements-web.txt @@ -0,0 +1,2 @@ +Flask>=2.0 +requests>=2.25 diff --git a/client/server.py b/client/server.py new file mode 100644 index 0000000..d671bd6 --- /dev/null +++ b/client/server.py @@ -0,0 +1,53 @@ +import os +import requests +from flask import Flask, jsonify, request, send_from_directory + +app = Flask(__name__, static_folder='web', static_url_path='/static') + +# Configurable API base +API_BASE = os.environ.get('API_BASE', 'http://localhost:3000/v1') +PORT = int(os.environ.get('PORT', '8080')) + +# Headers that should not be forwarded from upstream +HOP_BY_HOP_HEADERS = frozenset([ + 'transfer-encoding', 'connection', 'keep-alive', + 'proxy-authenticate', 'proxy-authorization', 'te', + 'trailers', 'upgrade', 'content-encoding', 'content-length' +]) + +def filter_headers(headers): + """Filter out hop-by-hop headers from upstream response.""" + return [(k, v) for k, v in headers if k.lower() not in HOP_BY_HOP_HEADERS] + +@app.route('/') +def index(): + return send_from_directory('web', 'index.html') + +@app.route('/api/chat', methods=['POST']) +def api_chat(): + payload = request.get_json(force=True) + # forward to the underlying API + url = f"{API_BASE}/chat/completions" + try: + resp = requests.post(url, json=payload, timeout=60) + except requests.RequestException as e: + return jsonify({'error':'upstream request failed', 'details': str(e)}), 502 + + return (resp.content, resp.status_code, filter_headers(resp.headers.items())) + + +@app.route('/api/models', methods=['GET']) +def api_models(): + """Fetch available models from upstream API and return them.""" + url = f"{API_BASE}/models" + try: + resp = requests.get(url, timeout=20) + except requests.RequestException as e: + return jsonify({'error':'upstream request failed', 'details': str(e)}), 502 + + return (resp.content, resp.status_code, filter_headers(resp.headers.items())) + +if __name__ == '__main__': + port = PORT + debug = os.environ.get('FLASK_DEBUG', 'false').lower() == 'true' + app.run(host='127.0.0.1', port=port, debug=debug) diff --git a/client/web/app.js b/client/web/app.js new file mode 100644 index 0000000..ea8e60e --- /dev/null +++ b/client/web/app.js @@ -0,0 +1,163 @@ +const form = document.getElementById('chat-form'); +const promptEl = document.getElementById('prompt'); +const messagesEl = document.getElementById('messages'); +const modelSel = document.getElementById('model'); +const newChatBtn = document.getElementById('new-chat'); + +// persistent conversation stored in localStorage +const STORAGE_KEY = 'ai_chat_messages_v1'; +let messages = []; + +function loadMessages(){ + try{ messages = JSON.parse(localStorage.getItem(STORAGE_KEY) || '[]'); }catch(e){ messages = []; } +} + +function saveMessages(){ + try{ localStorage.setItem(STORAGE_KEY, JSON.stringify(messages)); }catch(e){/* ignore */} +} + +function renderMessages(){ + messagesEl.innerHTML = ''; + for(const m of messages){ + const d = document.createElement('div'); + d.className = 'msg ' + (m.role === 'user' ? 'user' : 'ai'); + d.textContent = m.content; + messagesEl.appendChild(d); + } + messagesEl.scrollTop = messagesEl.scrollHeight; +} + +loadMessages(); +renderMessages(); + +// load available models from server and populate selector +async function loadModels(){ + try{ + const resp = await fetch('/api/models'); + if(!resp.ok) throw new Error('Failed to fetch models'); + const json = await resp.json(); + let list = []; + if(Array.isArray(json)) list = json; + else if(Array.isArray(json.data)) list = json.data; + else if(Array.isArray(json.models)) list = json.models; + + // normalize to strings + const opts = list.map(it => { + if(typeof it === 'string') return it; + return it.id || it.name || it.model || JSON.stringify(it); + }).filter(Boolean); + + modelSel.innerHTML = ''; + if(opts.length){ + for(const id of opts){ + const o = document.createElement('option'); + o.value = id; o.textContent = id; + modelSel.appendChild(o); + } + // restore previously selected model if any + const saved = localStorage.getItem('ai_chat_selected_model'); + if(saved) modelSel.value = saved; + }else{ + throw new Error('no models'); + } + }catch(err){ + // fallback set + modelSel.innerHTML = ''; + ['gpt-5-mini','gpt-4o-mini','gpt-4o'].forEach(v=>{ + const o = document.createElement('option'); o.value=v; o.textContent=v; modelSel.appendChild(o); + }); + console.warn('Could not load models, using fallback', err); + } +} + +modelSel.addEventListener('change', ()=>{ + try{ localStorage.setItem('ai_chat_selected_model', modelSel.value); }catch(e){} +}); + +loadModels(); + +// New chat button clears conversation +if(newChatBtn){ + newChatBtn.addEventListener('click', ()=>{ + messages = []; + saveMessages(); + renderMessages(); + promptEl.focus(); + }); +} + +// Enter to send (Shift+Enter inserts newline) +promptEl.addEventListener('keydown', (e)=>{ + if(e.key === 'Enter' && !e.shiftKey){ + e.preventDefault(); + if(typeof form.requestSubmit === 'function') form.requestSubmit(); + else form.dispatchEvent(new Event('submit', {cancelable:true})); + } +}); + +form.addEventListener('submit', async (e)=>{ + e.preventDefault(); + const prompt = promptEl.value.trim(); + if(!prompt) return; + + // Disable form during request + const submitBtn = form.querySelector('button[type="submit"]'); + if(submitBtn) submitBtn.disabled = true; + promptEl.disabled = true; + + // add user message to conversation + messages.push({role:'user', content: prompt}); + saveMessages(); + renderMessages(); + promptEl.value = ''; + + // add temporary assistant placeholder (only for display, not sent to API) + messages.push({role:'assistant', content: '...'}); + saveMessages(); + renderMessages(); + + // Build messages to send (exclude the placeholder) + const messagesToSend = messages.slice(0, -1); + + try{ + const resp = await fetch('/api/chat', { + method: 'POST', + headers: {'Content-Type':'application/json'}, + body: JSON.stringify({ model: modelSel.value, messages: messagesToSend }) + }); + if(!resp.ok){ + const txt = await resp.text(); + // replace last assistant placeholder with error + messages[messages.length-1].content = `Error: ${resp.status} ${txt}`; + saveMessages(); + renderMessages(); + return; + } + + const data = await resp.json(); + // extract assistant content from common API shapes + let content = ''; + try{ + const choices = data.choices || []; + if(choices.length && choices[0].message) content = choices[0].message.content || ''; + else if(choices.length && choices[0].delta) content = choices.map(c=>c.delta?.content||'').join(''); + else if(data.text) content = data.text; + else content = JSON.stringify(data); + }catch(e){ content = JSON.stringify(data) } + + // replace placeholder with real assistant message + messages[messages.length-1].content = content; + saveMessages(); + renderMessages(); + }catch(err){ + messages[messages.length-1].content = 'Network error'; + saveMessages(); + renderMessages(); + console.error(err); + }finally{ + // Re-enable form + if(submitBtn) submitBtn.disabled = false; + promptEl.disabled = false; + promptEl.focus(); + } +}); diff --git a/client/web/index.html b/client/web/index.html new file mode 100644 index 0000000..f1426e7 --- /dev/null +++ b/client/web/index.html @@ -0,0 +1,30 @@ + + + + + + AI Chat + + + +
+
+

AI Chat

+ + + +
+ +
+ +
+ + +
+
+ + + + diff --git a/client/web/styles.css b/client/web/styles.css new file mode 100644 index 0000000..5fb0b32 --- /dev/null +++ b/client/web/styles.css @@ -0,0 +1,24 @@ +:root{ + --bg:#0f1720; + --panel:#0b1320; + --muted:#9aa4b2; + --accent:#3b82f6; + --text:#e6eef6; +} +*{box-sizing:border-box} +body{font-family:Inter,system-ui,Segoe UI,Roboto,Arial;background:var(--bg);color:var(--text);margin:0;min-height:100vh;display:flex;align-items:center;justify-content:center} +.chat-root{width:720px;max-width:96vw;height:80vh;background:linear-gradient(180deg,#081223,#071827);border-radius:12px;padding:16px;display:flex;flex-direction:column;gap:12px;box-shadow:0 6px 30px rgba(0,0,0,.6)} +header{display:flex;align-items:center;gap:12px} +header h1{margin:0;font-size:1.1rem} +select{background:transparent;color:var(--text);border:1px solid rgba(255,255,255,0.06);padding:6px;border-radius:6px} +.new-chat{margin-left:auto;background:transparent;border:1px solid rgba(255,255,255,0.06);color:var(--text);padding:6px 10px;border-radius:6px;cursor:pointer} +.new-chat:hover{background:rgba(255,255,255,0.02)} +.messages{flex:1;overflow:auto;padding:8px;border-radius:8px;background:rgba(255,255,255,0.02);display:flex;flex-direction:column;gap:8px} +.msg{padding:10px;border-radius:8px;max-width:80%;line-height:1.35;word-break:break-word} +.msg.user{background:linear-gradient(90deg,#0f1720,#102133);align-self:flex-end;border:1px solid rgba(255,255,255,0.03)} +.msg.ai{background:linear-gradient(90deg,#021224,#042133);align-self:flex-start;border:1px solid rgba(255,255,255,0.03)} +.chat-form{display:flex;gap:8px} +textarea{flex:1;padding:10px;border-radius:8px;border:1px solid rgba(255,255,255,0.04);background:transparent;color:var(--text);resize:none} +button{background:var(--accent);border:none;color:white;padding:10px 14px;border-radius:8px;cursor:pointer} +button:disabled{opacity:0.5;cursor:not-allowed} +textarea:focus-visible,select:focus-visible,button:focus-visible{outline:2px solid var(--accent);outline-offset:2px} diff --git a/docs/specs/archive/plan-rest-api.md b/docs/specs/archive/plan-rest-api.md deleted file mode 100644 index 6b79c28..0000000 --- a/docs/specs/archive/plan-rest-api.md +++ /dev/null @@ -1,47 +0,0 @@ -# Spec Implementation Plan - -## Project Environment Setup -- [x] Initialize the project with a TypeScript setup (package.json, tsconfig.json, etc.). -- [x] Install dependencies: express, dotenv, morgan, typescript, ts-node, @types/node, @types/express, and @types/morgan. - -## REST API Server Implementation (src/server.ts) -- [x] Create an asynchronous REST API with Express. -- [x] Create a POST endpoint at /v1/chat/completions that: - - Parses a JSON payload. - - Checks for a boolean parameter `stream`. - - If `stream` is false or not provided, returns a JSON response with the following mock data: - ```json - { - "id": "chatcmpl-mock", - "object": "chat.completion", - "created": 1234567890, - "choices": [ - { - "index": 0, - "message": { "role": "assistant", "content": "This is a mock response." }, - "finish_reason": "stop" - } - ], - "usage": { "prompt_tokens": 5, "completion_tokens": 7, "total_tokens": 12 } - } - ``` - - If `stream` is true, initiates streaming: - - Sets the header `Content-Type: text/event-stream`. - - Uses asynchronous delays (e.g., `setTimeout`) and `res.write` to simulate the streaming response. - - Logs the progress of each data chunk sent. - -## LiteLLM Client Implementation -- [x] Created Python client using LiteLLM to call the mock `/v1/chat/completions` endpoint -- [x] Implemented both streaming and non-streaming test calls -- [x] Added environment variable configuration for API endpoint - -## Configuration Management and Logging -- [ ] Create a configuration module (e.g., src/config.ts) to load server configurations (port, log levels, etc.) from environment variables with default values. -- [x] Integrate logging (e.g., using morgan) to log incoming requests and streaming events. - -## Documentation -- [ ] Update the readme.md file with: - - Installation and configuration instructions. - - Build and run instructions for the API server. - - How to use the LiteLLM client. - - Testing instructions (unit and integration tests). diff --git a/docs/specs/archive/plan-ui-settings.md b/docs/specs/archive/plan-ui-settings.md deleted file mode 100644 index 474e3d6..0000000 --- a/docs/specs/archive/plan-ui-settings.md +++ /dev/null @@ -1,183 +0,0 @@ -# Implementation Plan: Configure Express Server Port - -## Overview -This plan outlines the steps to add a minimal settings UI for the copilot-proxy extension, allowing users to configure and persist the port used by the Express server. The port value is stored in the VS Code configuration (`copilotProxy.port`) and is used when starting the server. - -## Step 1: Update Server Initialization -- **File:** `src/server.ts` -- **Objective:** - Modify the `startServer` function to accept an optional `port` parameter (default: 3000) and use it with `app.listen()`. - -- **Code Changes:** - -```diff --const port = process.env.PORT || 3000; --export function startServer() { -- const server = app.listen(port, () => { -- console.log(`Server is running on port ${port}`); -- }); -- return server; --} -+export function startServer(port: number = 3000) { -+ const server = app.listen(port, () => { -+ console.log(`Server is running on port ${port}`); -+ }); -+ return server; -+} -``` - -- **Test Steps:** - 1. **Manual Test:** Call `startServer(5000)` and verify the console logs "Server is running on port 5000". - 2. **Integration Test:** Use a network tool (e.g., `netstat`) to confirm the server binds to the specified port. - -## Step 2: Modify the Start Server Command -- **File:** `src/extension.ts` -- **Objective:** - Update the "Copilot Proxy - Start Server" command to read the port from the VS Code configuration (`copilotProxy.port`) and pass that value to `startServer()`. - -- **Code Changes:** - -```diff -- vscode.commands.registerCommand('Copilot Proxy - Start Server', () => { -- if (!serverInstance) { -- serverInstance = startServer(); -- vscode.window.showInformationMessage('Express server started.'); -- } else { -- vscode.window.showInformationMessage('Express server is already running.'); -- } -- }) -+ vscode.commands.registerCommand('Copilot Proxy - Start Server', () => { -+ if (!serverInstance) { -+ const configPort = vscode.workspace.getConfiguration("copilotProxy").get("port", 3000); -+ serverInstance = startServer(configPort); -+ vscode.window.showInformationMessage(`Express server started on port ${configPort}.`); -+ } else { -+ vscode.window.showInformationMessage('Express server is already running.'); -+ } -+ }); -``` - -- **Test Steps:** - 1. **Command Test:** Run “Copilot Proxy - Start Server” from the VS Code Command Palette and verify the correct port is used. - 2. **Log Verification:** Confirm in the server logs that the server starts on the port from the configuration. - -## Step 3: Add the Minimal Settings GUI -- **File:** `src/extension.ts` -- **Objective:** - Create a function `configurePort()` that uses `vscode.window.showInputBox` to prompt the user for a new port: - - Display the current port as the default value. - - Validate that the input is a positive integer. - - On confirmation, update the configuration persistently using `config.update()`. - -- **Code Addition:** - -```typescript -function configurePort() { - const config = vscode.workspace.getConfiguration("copilotProxy"); - const currentPort = config.get("port", 3000); - vscode.window.showInputBox({ - prompt: "Enter the port for the Express server:", - placeHolder: "e.g., 3000", - value: String(currentPort), - validateInput: (value: string): string | undefined => { - const port = Number(value); - if (isNaN(port) || port <= 0) { - return "Please enter a valid positive integer for the port."; - } - return undefined; - } - }).then(newPortStr => { - if (newPortStr !== undefined) { - const newPort = Number(newPortStr); - config.update("port", newPort, vscode.ConfigurationTarget.Global); - vscode.window.showInformationMessage(`Port updated to ${newPort}. Restart the server if it's running.`); - } - }); -} -``` - -- **Test Steps:** - 1. **Direct Invocation:** Call `configurePort()` and verify the input box shows the current port. - 2. **Validation Check:** Test entering invalid values (e.g., negative numbers or non-numeric input) and observe the validation message. - 3. **Persistence Check:** Confirm that after a valid update, the new port is saved in the configuration. - -## Step 4: Register the "Configure Port" Command -- **File:** `src/extension.ts` -- **Objective:** - Register a new command (`Copilot Proxy: Configure Port`) within the `activate()` function to invoke `configurePort()`. - -- **Code Addition:** - -```typescript -// Register command to configure the port. -context.subscriptions.push( - vscode.commands.registerCommand('Copilot Proxy: Configure Port', () => { - configurePort(); - }) -); -``` - -- **Test Steps:** - 1. **Command Test:** Run “Copilot Proxy: Configure Port” from the Command Palette and check that the input box appears with the current port pre-filled. - 2. **Persistence Test:** After updating via the command, verify that the configuration reflects the new port. - -## Step 5: Update package.json to Contribute the New Configuration -- **File:** `package.json` -- **Objective:** - Add a configuration contribution to make the `copilotProxy.port` setting visible in the VS Code Settings UI. - -- **Code Addition:** - -```json -"contributes": { - "configuration": { - "type": "object", - "title": "Copilot Proxy", - "properties": { - "copilotProxy.port": { - "type": "number", - "default": 3000, - "description": "Port for the Express server." - } - } - } -} -``` - -- **Test Steps:** - 1. **Settings UI Test:** Open the VS Code Settings UI and verify that the `copilotProxy.port` setting is visible with the correct default value and modifiable. - -## Step 6: Update Documentation in readme.md -- **File:** `readme.md` -- **Objective:** - Document the new settings feature: - - Explain the use of the `Copilot Proxy: Configure Port` command. - - Instruct users on modifying the `copilotProxy.port` setting via the Settings UI. - - Remind users to restart the server for changes to take effect. - -- **Documentation Update Example:** - -``` -The copilot-proxy extension now allows you to configure the port for the Express server. -- Use the `Copilot Proxy: Configure Port` command to update the port using a simple input box. -- Alternatively, adjust the `copilotProxy.port` setting via the VS Code Settings UI. -Please restart the server if it's currently running to use the new port. -``` - -- **Test Steps:** - 1. **Review:** Confirm that the documentation is clear and that a user can follow the instructions to update the port. - -## Step 7: Create/Update Unit Tests (Optional) -- **Objective:** - Optionally, write unit or integration tests to: - - Simulate the behavior of `configurePort()` using mocked input from `vscode.window.showInputBox`. - - Verify that the configuration updates correctly. - - Ensure that `startServer()` uses the port value from the configuration. - -- **Test Steps:** - 1. **Mocking Test:** Use a testing framework (e.g., Mocha or Jest) to simulate input and assert that configuration update calls are made with the correct value. - 2. **Port Verification:** Call `startServer()` with a test port and verify through logs or network inspection that the server starts on the specified port. - ---- - -This plan details all the steps required for implementation along with isolated test steps for each change. diff --git a/docs/specs/archive/plan-use-vscode-llm.md b/docs/specs/archive/plan-use-vscode-llm.md deleted file mode 100644 index 1822a3b..0000000 --- a/docs/specs/archive/plan-use-vscode-llm.md +++ /dev/null @@ -1,81 +0,0 @@ -### **2. Proposed Plan (to be recorded in `docs/specs/plan.md`)** - -- [x] **Step 1: Whitelist Validation in `server.ts`** - - **Task:** Modify `/v1/chat/completions` endpoint to check that `req.body.model` is one of the allowed models. - - **Test:** - - Send a request with a model not in the whitelist and expect an HTTP 400 error with a clear error message. - - **Example Outline:** - - Define an array of allowed models: `["gpt-4o", "gpt-4o-mini", "o1", "o1-mini", "claude-3.5-sonnet"]`. - - If `req.body.model` is not in the array, return a 400 response with `{ error: "Model not supported" }`. - -- [x] **Step 2: Branch Based on the `stream` Flag in `server.ts`** - - **Task:** In the endpoint handler, branch logic for streaming vs. non-streaming requests. - - **Test:** - - For a streaming request (`stream: true`), verify that the server writes multiple HTTP chunks. - - For a non-streaming request (`stream: false`), verify that the full JSON response is returned after the whole response is accumulated. - - **Example Outline:** - - If `stream` is true, call the asynchronous function (e.g., `processChatRequest`) and iterate over its yielded chunks, writing each with `res.write(...)`. - - If `stream` is false, await the full result and then send it with `res.json(...)`. - -- [x] **Step 3: Implement `processChatRequest` in `extension.ts`** - - **Task:** Create a new async function `processChatRequest` that: - 1. Receives a `ChatCompletionRequest`. - 2. Maps the request messages to the format required by `vscode.LanguageModelChatMessage` (direct mapping). - 3. Uses `vscode.lm.selectChatModels` to select the language model based on the provided `model` (which, by this point, is known to be allowed). - 4. Depending on the `stream` flag: - - **Streaming:** Returns an async iterator that yields chunks mimicking a `ChatCompletionChunk`. Each yielded object should include the necessary properties (e.g., an `id`, `object`, `created`, `model`, and a `choices` array with a `delta` containing the fragment). - - **Non-Streaming:** Accumulates fragments from the model response and then returns a full `ChatCompletionResponse` JSON object. - - **Test:** - - Test this function independently by invoking it with a mock `ChatCompletionRequest`. - - For streaming, verify that iterating over the returned async iterator produces valid chunk objects. - - For non-streaming, verify that the full response object is assembled correctly. - - **Example Outline:** - - Map messages: Iterate over `request.messages` and create an array using `vscode.LanguageModelChatMessage.User(message.content)`. - - Select model using `vscode.lm.selectChatModels({ vendor: 'copilot', family: request.model })`. - - For streaming: - - Use an async generator function that yields objects like: - - `{ - id: "chatcmpl-async-mock", - object: "chat.completion.chunk", - created: Date.now(), - model: request.model, - choices: [{ - delta: { content: fragment }, - index: 0, - finish_reason: null - }] - }` - - Yield for each fragment received. - - For non-streaming: - - Accumulate fragments into a string. - - Return an object like: - - `{ - id: "chatcmpl-full-mock", - object: "chat.completion", - created: Date.now(), - choices: [{ - index: 0, - message: { role: "assistant", content: fullResponse }, - finish_reason: "stop" - }], - usage: { prompt_tokens: 0, completion_tokens: 0, total_tokens: 0 } - }`. - -- [ ] **Step 4: Integrate Logging and Error Handling** - - **Task:** Add logging statements in both `server.ts` and `extension.ts` (especially in `processChatRequest`) to capture key events and possible errors. - - **Test:** - - Manually trigger both valid and error cases and examine the logs. - -- [ ] **Step 5: Testing and Verification** - - **Task:** Write or update unit/integration tests to ensure: - 1. The whitelist check properly rejects unsupported models. - 2. The branching on the `stream` flag correctly processes both streaming and non-streaming requests. - 3. The async generator in streaming mode yields correctly structured chunks. - 4. The full response in non-streaming mode is correctly assembled. - - **Test:** - - Run the tests to verify each isolated behavior. - -- [ ] **Step 6: Update Documentation** - - **Task:** Update the README and any relevant internal documentation with instructions on the new server–extension integration, the allowed models, the response handling, and how to run the tests. - - **Test:** - - Confirm that following the documentation allows a new developer to verify the functionality. diff --git a/docs/specs/archive/plan-vscode-extension.md b/docs/specs/archive/plan-vscode-extension.md deleted file mode 100644 index acfe485..0000000 --- a/docs/specs/archive/plan-vscode-extension.md +++ /dev/null @@ -1,130 +0,0 @@ -I am in the Analysis Workflow - -Plan for Integrating Express Server into the VS Code Extension ---------------------------------------------------------------- - -Step 1: Update package.json ---------------------------- -- Add the following extension metadata: - - "engines": { "vscode": "^1.70.0" } - - "activationEvents": [ "onCommand:my-extension.copilot-proxy" ] - - "contributes": { - "commands": [ - { - "command": "my-extension.copilot-proxy", - "title": "Hello World" - } - ] - } -- Update "main" to point to "./out/extension.js" -- Adjust build and test scripts if necessary - -Step 2: Update tsconfig.json ----------------------------- -- Change "target" to "ES2021" -- Set "module" to "commonjs" -- Set "rootDir" to "src" -- Change "outDir" to "out" (to match VS Code extension requirements) -- Enable "sourceMap": true -- Exclude "node_modules" and ".vscode-test" - -Step 3: Modify src/server.ts ----------------------------- -- Refactor the server startup by exporting a startup function instead of immediately calling app.listen. -- Modify the code as follows: - - export function startServer() { - const server = app.listen(port, () => { - console.log(`Server is running on port ${port}`); - }); - return server; - } - - // If running as a standalone process, start the server automatically. - if (require.main === module) { - startServer(); - } - -Step 4: Create src/extension.ts -------------------------------- -- Create a new file "src/extension.ts" with the following content: - - import * as vscode from 'vscode'; - import { startServer } from './server'; - - let serverInstance: ReturnType; - - export function activate(context: vscode.ExtensionContext) { - console.log('Extension "my-extension" is now active!'); - // Start the Express server on activation - serverInstance = startServer(); - - // Dispose the server on extension deactivation - context.subscriptions.push({ - dispose: () => { - if (serverInstance) { - serverInstance.close(); - console.log('Express server has been stopped.'); - } - } - }); - - // Register the "copilot-proxy" command - context.subscriptions.push( - vscode.commands.registerCommand('my-extension.copilot-proxy', () => { - vscode.window.showInformationMessage('Hello World from My Extension!'); - }) - ); - } - - export function deactivate() { - if (serverInstance) { - serverInstance.close(); - console.log('Express server has been stopped on deactivation.'); - } - } - -Step 5: Create .vscode/launch.json ---------------------------------- -- Create a new file ".vscode/launch.json" with the following content: - - { - "version": "0.2.0", - "configurations": [ - { - "name": "Launch Extension", - "type": "extensionHost", - "request": "launch", - "runtimeExecutable": "${execPath}", - "args": [ - "--extensionDevelopmentPath=${workspaceFolder}" - ], - "outFiles": [ - "${workspaceFolder}/out/**/*.js" - ], - "preLaunchTask": "npm: compile" - } - ] - } - -Step 6 (Optional): Test Setup ----------------------------- -- Optionally, set up a test suite using vscode-test and Mocha. -- Create a folder "test/" containing test files such as "extension.test.ts" and "runTest.ts". -- Update package.json with test scripts: - - "test": "npm run compile && npm run test:extension" - - "test:extension": "node ./out/test/runTest.js" - -Summary: ----------- -This plan integrates the Express server into the VS Code extension lifecycle by: -- Refactoring the server startup into an exported function. -- Creating an extension entry point that starts and stops the Express server using the activate and deactivate methods. -- Updating configuration files (package.json, tsconfig.json, .vscode/launch.json) to support extension development. -- Optionally, adding a test suite to validate extension functionality. - -Follow these steps to ensure that: -- The VS Code extension loads correctly. -- The Express server starts when the extension is activated. -- The "my-extension.copilot-proxy" command displays the expected message. -- The server is gracefully stopped on extension deactivation. diff --git a/package-lock.json b/package-lock.json index e4e3b2e..cc536fc 100644 --- a/package-lock.json +++ b/package-lock.json @@ -1,12 +1,12 @@ { "name": "copilot-proxy", - "version": "1.0.2", + "version": "1.0.5", "lockfileVersion": 3, "requires": true, "packages": { "": { "name": "copilot-proxy", - "version": "1.0.2", + "version": "1.0.5", "license": "ISC", "dependencies": { "dotenv": "^16.0.0", @@ -19,11 +19,10 @@ "@types/node": "^18.11.18", "@types/vscode": "^1.70.0", "ts-node": "^10.9.1", - "typescript": "^4.9.5", - "vsce": "^2.15.0" + "typescript": "^4.9.5" }, "engines": { - "vscode": "^1.70.0" + "vscode": "^1.95.0" } }, "node_modules/@cspotcode/source-map-support": { @@ -50,9 +49,9 @@ } }, "node_modules/@jridgewell/sourcemap-codec": { - "version": "1.5.0", - "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", - "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "version": "1.5.5", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz", + "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==", "dev": true, "license": "MIT" }, @@ -68,9 +67,9 @@ } }, "node_modules/@tsconfig/node10": { - "version": "1.0.11", - "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.11.tgz", - "integrity": "sha512-DcRjDCujK/kCk/cUe8Xz8ZSpm8mS3mNNpta+jGCA6USEDfktlNvm1+IuZ9eTcDbNk41BHwpHHeW+N1lKCz4zOw==", + "version": "1.0.12", + "resolved": "https://registry.npmjs.org/@tsconfig/node10/-/node10-1.0.12.tgz", + "integrity": "sha512-UCYBaeFvM11aU2y3YPZ//O5Rhj+xKyzy7mvcIoAjASbigy8mHMryP5cK7dgjlz2hWxh1g5pLw084E0a/wlUSFQ==", "dev": true, "license": "MIT" }, @@ -96,9 +95,9 @@ "license": "MIT" }, "node_modules/@types/body-parser": { - "version": "1.19.5", - "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.5.tgz", - "integrity": "sha512-fB3Zu92ucau0iQ0JMCFQE7b/dv8Ot07NI3KaZIkIUNXq82k4eBAqUaneXfleGY9JWskeS9y+u0nXMyspcuQrCg==", + "version": "1.19.6", + "resolved": "https://registry.npmjs.org/@types/body-parser/-/body-parser-1.19.6.tgz", + "integrity": "sha512-HLFeCYgz89uk22N5Qg3dvGvsv46B8GLvKKo1zKG4NybA8U2DiEO3w9lqGg29t/tfLRJpJ6iQxnVw4OnB7MoM9g==", "dev": true, "license": "MIT", "dependencies": { @@ -117,22 +116,22 @@ } }, "node_modules/@types/express": { - "version": "4.17.21", - "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.21.tgz", - "integrity": "sha512-ejlPM315qwLpaQlQDTjPdsUFSc6ZsP4AN6AlWnogPjQ7CVi7PYF3YVz+CY3jE2pwYf7E/7HlDAN0rV2GxTG0HQ==", + "version": "4.17.25", + "resolved": "https://registry.npmjs.org/@types/express/-/express-4.17.25.tgz", + "integrity": "sha512-dVd04UKsfpINUnK0yBoYHDF3xu7xVH4BuDotC/xGuycx4CgbP48X/KF/586bcObxT0HENHXEU8Nqtu6NR+eKhw==", "dev": true, "license": "MIT", "dependencies": { "@types/body-parser": "*", "@types/express-serve-static-core": "^4.17.33", "@types/qs": "*", - "@types/serve-static": "*" + "@types/serve-static": "^1" } }, "node_modules/@types/express-serve-static-core": { - "version": "4.19.6", - "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.19.6.tgz", - "integrity": "sha512-N4LZ2xG7DatVqhCZzOGb1Yi5lMbXSZcmdLDe9EzSndPV2HpWYWzRbaerl2n27irrm94EPpprqa8KpskPT085+A==", + "version": "4.19.8", + "resolved": "https://registry.npmjs.org/@types/express-serve-static-core/-/express-serve-static-core-4.19.8.tgz", + "integrity": "sha512-02S5fmqeoKzVZCHPZid4b8JH2eM5HzQLZWN2FohQEy/0eXTq8VXZfSN6Pcr3F6N9R/vNrj7cpgbhjie6m/1tCA==", "dev": true, "license": "MIT", "dependencies": { @@ -143,9 +142,9 @@ } }, "node_modules/@types/http-errors": { - "version": "2.0.4", - "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.4.tgz", - "integrity": "sha512-D0CFMMtydbJAegzOyHjtiKPLlvnm3iTZyZRSZoLq2mRhDdmLfIWOCYPfQJ4cu2erKghU++QvjcUjp/5h7hESpA==", + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@types/http-errors/-/http-errors-2.0.5.tgz", + "integrity": "sha512-r8Tayk8HJnX0FztbZN7oVqGccWgw98T/0neJphO91KkmOzug1KkofZURD4UaD5uH8AqcFLfdPErnBod0u71/qg==", "dev": true, "license": "MIT" }, @@ -157,9 +156,9 @@ "license": "MIT" }, "node_modules/@types/morgan": { - "version": "1.9.9", - "resolved": "https://registry.npmjs.org/@types/morgan/-/morgan-1.9.9.tgz", - "integrity": "sha512-iRYSDKVaC6FkGSpEVVIvrRGw0DfJMiQzIn3qr2G5B3C//AWkulhXgaBd7tS9/J79GWSYMTHGs7PfI5b3Y8m+RQ==", + "version": "1.9.10", + "resolved": "https://registry.npmjs.org/@types/morgan/-/morgan-1.9.10.tgz", + "integrity": "sha512-sS4A1zheMvsADRVfT0lYbJ4S9lmsey8Zo2F7cnbYjWHP67Q0AwMYuuzLlkIM2N8gAbb9cubhIVFwcIN2XyYCkA==", "dev": true, "license": "MIT", "dependencies": { @@ -167,9 +166,9 @@ } }, "node_modules/@types/node": { - "version": "18.19.76", - "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.76.tgz", - "integrity": "sha512-yvR7Q9LdPz2vGpmpJX5LolrgRdWvB67MJKDPSgIIzpFbaf9a1j/f5DnLp5VDyHGMR0QZHlTr1afsD87QCXFHKw==", + "version": "18.19.130", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.130.tgz", + "integrity": "sha512-GRaXQx6jGfL8sKfaIDD6OupbIHBr9jv7Jnaml9tB7l4v068PAOXqfcujMMo5PhbIs6ggR1XODELqahT2R8v0fg==", "dev": true, "license": "MIT", "dependencies": { @@ -177,9 +176,9 @@ } }, "node_modules/@types/qs": { - "version": "6.9.18", - "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.9.18.tgz", - "integrity": "sha512-kK7dgTYDyGqS+e2Q4aK9X3D7q234CIZ1Bv0q/7Z5IwRDoADNU81xXJK/YVyLbLTZCoIwUoDoffFeF+p/eIklAA==", + "version": "6.14.0", + "resolved": "https://registry.npmjs.org/@types/qs/-/qs-6.14.0.tgz", + "integrity": "sha512-eOunJqu0K1923aExK6y8p6fsihYEn/BYuQ4g0CxAAgFc4b/ZLN4CrsRZ55srTdqoiLzU2B2evC+apEIxprEzkQ==", "dev": true, "license": "MIT" }, @@ -191,32 +190,42 @@ "license": "MIT" }, "node_modules/@types/send": { - "version": "0.17.4", - "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.4.tgz", - "integrity": "sha512-x2EM6TJOybec7c52BX0ZspPodMsQUd5L6PRwOunVyVUhXiBSKf3AezDL8Dgvgt5o0UfKNfuA0eMLr2wLT4AiBA==", + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@types/send/-/send-1.2.1.tgz", + "integrity": "sha512-arsCikDvlU99zl1g69TcAB3mzZPpxgw0UQnaHeC1Nwb015xp8bknZv5rIfri9xTOcMuaVgvabfIRA7PSZVuZIQ==", "dev": true, "license": "MIT", "dependencies": { - "@types/mime": "^1", "@types/node": "*" } }, "node_modules/@types/serve-static": { - "version": "1.15.7", - "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.7.tgz", - "integrity": "sha512-W8Ym+h8nhuRwaKPaDw34QUkwsGi6Rc4yYqvKFo5rm2FUEhCFbzVWrxXUxuKK8TASjWsysJY0nsmNCGhCOIsrOw==", + "version": "1.15.10", + "resolved": "https://registry.npmjs.org/@types/serve-static/-/serve-static-1.15.10.tgz", + "integrity": "sha512-tRs1dB+g8Itk72rlSI2ZrW6vZg0YrLI81iQSTkMmOqnqCaNr/8Ek4VwWcN5vZgCYWbg/JJSGBlUaYGAOP73qBw==", "dev": true, "license": "MIT", "dependencies": { "@types/http-errors": "*", "@types/node": "*", - "@types/send": "*" + "@types/send": "<1" + } + }, + "node_modules/@types/serve-static/node_modules/@types/send": { + "version": "0.17.6", + "resolved": "https://registry.npmjs.org/@types/send/-/send-0.17.6.tgz", + "integrity": "sha512-Uqt8rPBE8SY0RK8JB1EzVOIZ32uqy8HwdxCnoCOsYrvnswqmFZ/k+9Ikidlk/ImhsdvBsloHbAlewb2IEBV/Og==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/mime": "^1", + "@types/node": "*" } }, "node_modules/@types/vscode": { - "version": "1.97.0", - "resolved": "https://registry.npmjs.org/@types/vscode/-/vscode-1.97.0.tgz", - "integrity": "sha512-ueE73loeOTe7olaVyqP9mrRI54kVPJifUPjblZo9fYcv1CuVLPOEKEkqW0GkqPC454+nCEoigLWnC2Pp7prZ9w==", + "version": "1.109.0", + "resolved": "https://registry.npmjs.org/@types/vscode/-/vscode-1.109.0.tgz", + "integrity": "sha512-0Pf95rnwEIwDbmXGC08r0B4TQhAbsHQ5UyTIgVgoieDe4cOnf92usuR5dEczb6bTKEp7ziZH4TV1TRGPPCExtw==", "dev": true, "license": "MIT" }, @@ -234,9 +243,9 @@ } }, "node_modules/acorn": { - "version": "8.14.0", - "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.14.0.tgz", - "integrity": "sha512-cl669nCJTZBsL97OF4kUQm5g5hC2uihk0NxY3WENAC0TYdILVkAyHymAntgxGkl7K+t0cXIrH5siy5S4XkFycA==", + "version": "8.15.0", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.15.0.tgz", + "integrity": "sha512-NZyJarBfL7nWwIq+FDL6Zp/yHEhePMNnnJ0y3qfieCrmNvYct8uvtiV41UvlSe6apAfk0fY1FbWx+NwfmpvtTg==", "dev": true, "license": "MIT", "bin": { @@ -259,19 +268,6 @@ "node": ">=0.4.0" } }, - "node_modules/ansi-styles": { - "version": "3.2.1", - "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-3.2.1.tgz", - "integrity": "sha512-VT0ZI6kZRdTh8YyJw3SMbYm/u+NqfsAxEpWO0Pf9sq8/e94WxxOpPKx9FR1FlyCtOVDNOQ+8ntlqFxiRc+r5qA==", - "dev": true, - "license": "MIT", - "dependencies": { - "color-convert": "^1.9.0" - }, - "engines": { - "node": ">=4" - } - }, "node_modules/arg": { "version": "4.1.3", "resolved": "https://registry.npmjs.org/arg/-/arg-4.1.3.tgz", @@ -279,58 +275,12 @@ "dev": true, "license": "MIT" }, - "node_modules/argparse": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", - "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", - "dev": true, - "license": "Python-2.0" - }, "node_modules/array-flatten": { "version": "1.1.1", "resolved": "https://registry.npmjs.org/array-flatten/-/array-flatten-1.1.1.tgz", "integrity": "sha512-PCVAQswWemu6UdxsDFFX/+gVeYqKAod3D3UVm91jHwynguOwAvYPhx8nNlM++NqRcK6CxxpUafjmhIdKiHibqg==", "license": "MIT" }, - "node_modules/azure-devops-node-api": { - "version": "11.2.0", - "resolved": "https://registry.npmjs.org/azure-devops-node-api/-/azure-devops-node-api-11.2.0.tgz", - "integrity": "sha512-XdiGPhrpaT5J8wdERRKs5g8E0Zy1pvOYTli7z9E8nmOn3YGp4FhtjhrOyFmX/8veWCwdI69mCHKJw6l+4J/bHA==", - "dev": true, - "license": "MIT", - "dependencies": { - "tunnel": "0.0.6", - "typed-rest-client": "^1.8.4" - } - }, - "node_modules/balanced-match": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", - "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", - "dev": true, - "license": "MIT" - }, - "node_modules/base64-js": { - "version": "1.5.1", - "resolved": "https://registry.npmjs.org/base64-js/-/base64-js-1.5.1.tgz", - "integrity": "sha512-AKpaYlHn8t4SVbOHCy+b5+KKgvR4vrsD8vbvrbiQJps7fKDTkjkDry6ji0rUJjC0kzbNePLwzxq8iypo41qeWA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, "node_modules/basic-auth": { "version": "2.0.1", "resolved": "https://registry.npmjs.org/basic-auth/-/basic-auth-2.0.1.tgz", @@ -349,95 +299,30 @@ "integrity": "sha512-Gd2UZBJDkXlY7GbJxfsE8/nvKkUEU1G38c1siN6QP6a9PT9MmHB8GnpscSmMJSoF8LOIrt8ud/wPtojys4G6+g==", "license": "MIT" }, - "node_modules/bl": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/bl/-/bl-4.1.0.tgz", - "integrity": "sha512-1W07cM9gS6DcLperZfFSj+bWLtaPGSOHWhPiGzXmvVJbRLdG82sH/Kn8EtW1VqWVA54AKf2h5k5BbnIbwF3h6w==", - "dev": true, - "license": "MIT", - "dependencies": { - "buffer": "^5.5.0", - "inherits": "^2.0.4", - "readable-stream": "^3.4.0" - } - }, "node_modules/body-parser": { - "version": "1.20.3", - "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.3.tgz", - "integrity": "sha512-7rAxByjUMqQ3/bHJy7D6OGXvx/MMc4IqBn/X0fcM1QUcAItpZrBEYhWGem+tzXH90c+G01ypMcYJBO9Y30203g==", + "version": "1.20.4", + "resolved": "https://registry.npmjs.org/body-parser/-/body-parser-1.20.4.tgz", + "integrity": "sha512-ZTgYYLMOXY9qKU/57FAo8F+HA2dGX7bqGc71txDRC1rS4frdFI5R7NhluHxH6M0YItAP0sHB4uqAOcYKxO6uGA==", "license": "MIT", "dependencies": { - "bytes": "3.1.2", + "bytes": "~3.1.2", "content-type": "~1.0.5", "debug": "2.6.9", "depd": "2.0.0", - "destroy": "1.2.0", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "on-finished": "2.4.1", - "qs": "6.13.0", - "raw-body": "2.5.2", + "destroy": "~1.2.0", + "http-errors": "~2.0.1", + "iconv-lite": "~0.4.24", + "on-finished": "~2.4.1", + "qs": "~6.14.0", + "raw-body": "~2.5.3", "type-is": "~1.6.18", - "unpipe": "1.0.0" + "unpipe": "~1.0.0" }, "engines": { "node": ">= 0.8", "npm": "1.2.8000 || >= 1.4.16" } }, - "node_modules/boolbase": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", - "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==", - "dev": true, - "license": "ISC" - }, - "node_modules/brace-expansion": { - "version": "1.1.11", - "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", - "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", - "dev": true, - "license": "MIT", - "dependencies": { - "balanced-match": "^1.0.0", - "concat-map": "0.0.1" - } - }, - "node_modules/buffer": { - "version": "5.7.1", - "resolved": "https://registry.npmjs.org/buffer/-/buffer-5.7.1.tgz", - "integrity": "sha512-EHcyIPBQ4BSGlvjB16k5KgAJ27CIsHY/2JBmCRReo48y9rQ3MaUzWX3KVlBa4U7MyX02HdVj0K7C3WaB3ju7FQ==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "base64-js": "^1.3.1", - "ieee754": "^1.1.13" - } - }, - "node_modules/buffer-crc32": { - "version": "0.2.13", - "resolved": "https://registry.npmjs.org/buffer-crc32/-/buffer-crc32-0.2.13.tgz", - "integrity": "sha512-VO9Ht/+p3SN7SKWqcrgEzjGbRSJYTx+Q1pTQC0wrWqHx0vpJraQ6GtHx8tvcg1rlK1byhU5gccxgOgj7B0TDkQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": "*" - } - }, "node_modules/bytes": { "version": "3.1.2", "resolved": "https://registry.npmjs.org/bytes/-/bytes-3.1.2.tgz", @@ -461,13 +346,13 @@ } }, "node_modules/call-bound": { - "version": "1.0.3", - "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.3.tgz", - "integrity": "sha512-YTd+6wGlNlPxSuri7Y6X8tY2dmm12UMH66RpKMhiX6rsk5wXXnYgbUcOt8kiS31/AjfoTOvCsE+w8nZQLQnzHA==", + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/call-bound/-/call-bound-1.0.4.tgz", + "integrity": "sha512-+ys997U96po4Kx/ABpBCqhA9EuxJaQWDQg7295H4hBphv3IZg0boBKuwYpt4YXp6MZ5AmZQnU/tyMTlRpaSejg==", "license": "MIT", "dependencies": { - "call-bind-apply-helpers": "^1.0.1", - "get-intrinsic": "^1.2.6" + "call-bind-apply-helpers": "^1.0.2", + "get-intrinsic": "^1.3.0" }, "engines": { "node": ">= 0.4" @@ -476,106 +361,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/chalk": { - "version": "2.4.2", - "resolved": "https://registry.npmjs.org/chalk/-/chalk-2.4.2.tgz", - "integrity": "sha512-Mti+f9lpJNcwF4tWV8/OrTTtF1gZi+f8FqlyAdouralcFWFQWF2+NgCHShjkCb+IFBLq9buZwE1xckQU4peSuQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "ansi-styles": "^3.2.1", - "escape-string-regexp": "^1.0.5", - "supports-color": "^5.3.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/cheerio": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/cheerio/-/cheerio-1.0.0.tgz", - "integrity": "sha512-quS9HgjQpdaXOvsZz82Oz7uxtXiy6UIsIQcpBj7HRw2M63Skasm9qlDocAM7jNuaxdhpPU7c4kJN+gA5MCu4ww==", - "dev": true, - "license": "MIT", - "dependencies": { - "cheerio-select": "^2.1.0", - "dom-serializer": "^2.0.0", - "domhandler": "^5.0.3", - "domutils": "^3.1.0", - "encoding-sniffer": "^0.2.0", - "htmlparser2": "^9.1.0", - "parse5": "^7.1.2", - "parse5-htmlparser2-tree-adapter": "^7.0.0", - "parse5-parser-stream": "^7.1.2", - "undici": "^6.19.5", - "whatwg-mimetype": "^4.0.0" - }, - "engines": { - "node": ">=18.17" - }, - "funding": { - "url": "https://github.com/cheeriojs/cheerio?sponsor=1" - } - }, - "node_modules/cheerio-select": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/cheerio-select/-/cheerio-select-2.1.0.tgz", - "integrity": "sha512-9v9kG0LvzrlcungtnJtpGNxY+fzECQKhK4EGJX2vByejiMX84MFNQw4UxPJl3bFbTMw+Dfs37XaIkCwTZfLh4g==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0", - "css-select": "^5.1.0", - "css-what": "^6.1.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3", - "domutils": "^3.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/chownr": { - "version": "1.1.4", - "resolved": "https://registry.npmjs.org/chownr/-/chownr-1.1.4.tgz", - "integrity": "sha512-jJ0bqzaylmJtVnNgzTeSOs8DPavpbYgEr/b0YL8/2GO3xJEhInFmhKMUnEJQjZumK7KXGFhUy89PrsJWlakBVg==", - "dev": true, - "license": "ISC" - }, - "node_modules/color-convert": { - "version": "1.9.3", - "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-1.9.3.tgz", - "integrity": "sha512-QfAUtd+vFdAtFQcC8CCyYt1fYWxSqAiK2cSD6zDB8N3cpsEBAvRxp9zOGg6G/SHHJYAT88/az/IuDGALsNVbGg==", - "dev": true, - "license": "MIT", - "dependencies": { - "color-name": "1.1.3" - } - }, - "node_modules/color-name": { - "version": "1.1.3", - "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.3.tgz", - "integrity": "sha512-72fSenhMw2HZMTVHeCA9KCmpEIbzWiQsjN+BHcBbS9vr1mtt+vJjPdksIBNUmKAW8TFUDPJK5SUU3QhE9NEXDw==", - "dev": true, - "license": "MIT" - }, - "node_modules/commander": { - "version": "6.2.1", - "resolved": "https://registry.npmjs.org/commander/-/commander-6.2.1.tgz", - "integrity": "sha512-U7VdrJFnJgo4xjrHpTzu0yrHPGImdsmD95ZlgYSEajAn2JKzDhDTPG9kBTefmObL2w/ngeZnilk+OV9CG3d7UA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">= 6" - } - }, - "node_modules/concat-map": { - "version": "0.0.1", - "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", - "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", - "dev": true, - "license": "MIT" - }, "node_modules/content-disposition": { "version": "0.5.4", "resolved": "https://registry.npmjs.org/content-disposition/-/content-disposition-0.5.4.tgz", @@ -598,18 +383,18 @@ } }, "node_modules/cookie": { - "version": "0.7.1", - "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.1.tgz", - "integrity": "sha512-6DnInpx7SJ2AK3+CTUE/ZM0vWTUboZCegxhC2xiIydHR9jNuTAASBrfEpHhiGOZw/nX51bHt6YQl8jsGo4y/0w==", + "version": "0.7.2", + "resolved": "https://registry.npmjs.org/cookie/-/cookie-0.7.2.tgz", + "integrity": "sha512-yki5XnKuf750l50uGTllt6kKILY4nQ1eNIQatoXEByZ5dWgnKqbnqmTrBE5B4N7lrMJKQ2ytWMiTO2o0v6Ew/w==", "license": "MIT", "engines": { "node": ">= 0.6" } }, "node_modules/cookie-signature": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.6.tgz", - "integrity": "sha512-QADzlaHc8icV8I7vbaJXJwod9HWYp8uCqf1xa4OfNu1T7JVxQIrUgOWtHdNDtPiywmFbiS12VjotIXLrKM3orQ==", + "version": "1.0.7", + "resolved": "https://registry.npmjs.org/cookie-signature/-/cookie-signature-1.0.7.tgz", + "integrity": "sha512-NXdYc3dLr47pBkpUCHtKSwIOQXLVn8dZEuywboCOJY/osA0wFSLlSawr3KN8qXJEyX66FcONTH8EIlVuK0yyFA==", "license": "MIT" }, "node_modules/create-require": { @@ -619,36 +404,6 @@ "dev": true, "license": "MIT" }, - "node_modules/css-select": { - "version": "5.1.0", - "resolved": "https://registry.npmjs.org/css-select/-/css-select-5.1.0.tgz", - "integrity": "sha512-nwoRF1rvRRnnCqqY7updORDsuqKzqYJ28+oSMaJMMgOauh3fvwHqMS7EZpIPqK8GL+g9mKxF1vP/ZjSeNjEVHg==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0", - "css-what": "^6.1.0", - "domhandler": "^5.0.2", - "domutils": "^3.0.1", - "nth-check": "^2.0.1" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, - "node_modules/css-what": { - "version": "6.1.0", - "resolved": "https://registry.npmjs.org/css-what/-/css-what-6.1.0.tgz", - "integrity": "sha512-HTUrgRJ7r4dsZKU6GjmpfRK1O76h97Z8MfS1G0FozR+oF2kG6Vfe8JE6zwrkbxigziPHinCJ+gCPjA9EaBDtRw==", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">= 6" - }, - "funding": { - "url": "https://github.com/sponsors/fb55" - } - }, "node_modules/debug": { "version": "2.6.9", "resolved": "https://registry.npmjs.org/debug/-/debug-2.6.9.tgz", @@ -658,32 +413,6 @@ "ms": "2.0.0" } }, - "node_modules/decompress-response": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/decompress-response/-/decompress-response-6.0.0.tgz", - "integrity": "sha512-aW35yZM6Bb/4oJlZncMH2LCoZtJXTRxES17vE3hoRiowU2kWHaJKFkSBDnDR+cm9J+9QhXmREyIfv0pji9ejCQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "mimic-response": "^3.1.0" - }, - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/deep-extend": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/deep-extend/-/deep-extend-0.6.0.tgz", - "integrity": "sha512-LOHxIOaPYdHlJRtCQfDIVZtfw/ufM8+rVj649RIHzcm/vGwQRXFt6OPqIFWsm2XEMrNIEtWR64sY1LEKD2vAOA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4.0.0" - } - }, "node_modules/depd": { "version": "2.0.0", "resolved": "https://registry.npmjs.org/depd/-/depd-2.0.0.tgz", @@ -703,89 +432,20 @@ "npm": "1.2.8000 || >= 1.4.16" } }, - "node_modules/detect-libc": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.0.3.tgz", - "integrity": "sha512-bwy0MGW55bG41VqxxypOsdSdGqLwXPI/focwgTYCFMbdUiBAxLg9CFzG08sz2aqzknwiX7Hkl0bQENjg8iLByw==", - "dev": true, - "license": "Apache-2.0", - "engines": { - "node": ">=8" - } - }, "node_modules/diff": { - "version": "4.0.2", - "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.2.tgz", - "integrity": "sha512-58lmxKSA4BNyLz+HHMUzlOEpg09FV+ev6ZMe3vJihgdxzgcwZ8VoEEPmALCZG9LmqfVoNMMKpttIYTVG6uDY7A==", + "version": "4.0.4", + "resolved": "https://registry.npmjs.org/diff/-/diff-4.0.4.tgz", + "integrity": "sha512-X07nttJQkwkfKfvTPG/KSnE2OMdcUCao6+eXF3wmnIQRn2aPAHH3VxDbDOdegkd6JbPsXqShpvEOHfAT+nCNwQ==", "dev": true, "license": "BSD-3-Clause", "engines": { "node": ">=0.3.1" } }, - "node_modules/dom-serializer": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/dom-serializer/-/dom-serializer-2.0.0.tgz", - "integrity": "sha512-wIkAryiqt/nV5EQKqQpo3SToSOV9J0DnbJqwK7Wv/Trc92zIAYZ4FlMu+JPFW1DfGFt81ZTCGgDEabffXeLyJg==", - "dev": true, - "license": "MIT", - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.2", - "entities": "^4.2.0" - }, - "funding": { - "url": "https://github.com/cheeriojs/dom-serializer?sponsor=1" - } - }, - "node_modules/domelementtype": { - "version": "2.3.0", - "resolved": "https://registry.npmjs.org/domelementtype/-/domelementtype-2.3.0.tgz", - "integrity": "sha512-OLETBj6w0OsagBwdXnPdN0cnMfF9opN69co+7ZrbfPGrdpPVNBUj02spi6B1N7wChLQiPn4CSH/zJvXw56gmHw==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "BSD-2-Clause" - }, - "node_modules/domhandler": { - "version": "5.0.3", - "resolved": "https://registry.npmjs.org/domhandler/-/domhandler-5.0.3.tgz", - "integrity": "sha512-cgwlv/1iFQiFnU96XXgROh8xTeetsnJiDsTc7TYCLFd9+/WNkIqPTxiM/8pSd8VIrhXGTf1Ny1q1hquVqDJB5w==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "domelementtype": "^2.3.0" - }, - "engines": { - "node": ">= 4" - }, - "funding": { - "url": "https://github.com/fb55/domhandler?sponsor=1" - } - }, - "node_modules/domutils": { - "version": "3.2.2", - "resolved": "https://registry.npmjs.org/domutils/-/domutils-3.2.2.tgz", - "integrity": "sha512-6kZKyUajlDuqlHKVX1w7gyslj9MPIXzIFiz/rGu35uC1wMi+kMhQwGhl4lt9unC9Vb9INnY9Z3/ZA3+FhASLaw==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "dom-serializer": "^2.0.0", - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3" - }, - "funding": { - "url": "https://github.com/fb55/domutils?sponsor=1" - } - }, "node_modules/dotenv": { - "version": "16.4.7", - "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.4.7.tgz", - "integrity": "sha512-47qPchRCykZC03FhkYAhrvwU4xDBFIj1QPqaarj6mdM/hgUzfPHcpkHJOn3mJAufFeeAxAzeGsr5X0M4k6fLZQ==", + "version": "16.6.1", + "resolved": "https://registry.npmjs.org/dotenv/-/dotenv-16.6.1.tgz", + "integrity": "sha512-uBq4egWHTcTt33a72vpSG0z3HnPuIl6NqYcTrKEg2azoEyl2hpW0zqlxysq2pK9HlDIHyHyakeYaYnSAwd8bow==", "license": "BSD-2-Clause", "engines": { "node": ">=12" @@ -823,56 +483,6 @@ "node": ">= 0.8" } }, - "node_modules/encoding-sniffer": { - "version": "0.2.0", - "resolved": "https://registry.npmjs.org/encoding-sniffer/-/encoding-sniffer-0.2.0.tgz", - "integrity": "sha512-ju7Wq1kg04I3HtiYIOrUrdfdDvkyO9s5XM8QAj/bN61Yo/Vb4vgJxy5vi4Yxk01gWHbrofpPtpxM8bKger9jhg==", - "dev": true, - "license": "MIT", - "dependencies": { - "iconv-lite": "^0.6.3", - "whatwg-encoding": "^3.1.1" - }, - "funding": { - "url": "https://github.com/fb55/encoding-sniffer?sponsor=1" - } - }, - "node_modules/encoding-sniffer/node_modules/iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "dev": true, - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/end-of-stream": { - "version": "1.4.4", - "resolved": "https://registry.npmjs.org/end-of-stream/-/end-of-stream-1.4.4.tgz", - "integrity": "sha512-+uw1inIHVPQoaVuHzRyXd21icM+cnt4CzD5rW+NC1wjOUSTOs+Te7FOv7AhN7vS9x/oIyhLP5PR1H+phQAHu5Q==", - "dev": true, - "license": "MIT", - "dependencies": { - "once": "^1.4.0" - } - }, - "node_modules/entities": { - "version": "4.5.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", - "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", - "dev": true, - "license": "BSD-2-Clause", - "engines": { - "node": ">=0.12" - }, - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, "node_modules/es-define-property": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/es-define-property/-/es-define-property-1.0.1.tgz", @@ -909,16 +519,6 @@ "integrity": "sha512-NiSupZ4OeuGwr68lGIeym/ksIZMJodUGOSCZ/FSnTxcrekbvqrgdUxlJOMpijaKZVjAJrWrGs/6Jy8OMuyj9ow==", "license": "MIT" }, - "node_modules/escape-string-regexp": { - "version": "1.0.5", - "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-1.0.5.tgz", - "integrity": "sha512-vbRorB5FUQWvla16U8R/qgaFIya2qGzwDrNmCZuYKrbdSUMG6I1ZCGQRefkRVhuOkIGVne7BQ35DSfo1qvJqFg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.8.0" - } - }, "node_modules/etag": { "version": "1.8.1", "resolved": "https://registry.npmjs.org/etag/-/etag-1.8.1.tgz", @@ -928,50 +528,40 @@ "node": ">= 0.6" } }, - "node_modules/expand-template": { - "version": "2.0.3", - "resolved": "https://registry.npmjs.org/expand-template/-/expand-template-2.0.3.tgz", - "integrity": "sha512-XYfuKMvj4O35f/pOXLObndIRvyQ+/+6AhODh+OKWj9S9498pHHn/IMszH+gt0fBCRWMNfk1ZSp5x3AifmnI2vg==", - "dev": true, - "license": "(MIT OR WTFPL)", - "engines": { - "node": ">=6" - } - }, "node_modules/express": { - "version": "4.21.2", - "resolved": "https://registry.npmjs.org/express/-/express-4.21.2.tgz", - "integrity": "sha512-28HqgMZAmih1Czt9ny7qr6ek2qddF4FclbMzwhCREB6OFfH+rXAnuNCwo1/wFvrtbgsQDb4kSbX9de9lFbrXnA==", + "version": "4.22.1", + "resolved": "https://registry.npmjs.org/express/-/express-4.22.1.tgz", + "integrity": "sha512-F2X8g9P1X7uCPZMA3MVf9wcTqlyNp7IhH5qPCI0izhaOIYXaW9L535tGA3qmjRzpH+bZczqq7hVKxTR4NWnu+g==", "license": "MIT", "dependencies": { "accepts": "~1.3.8", "array-flatten": "1.1.1", - "body-parser": "1.20.3", - "content-disposition": "0.5.4", + "body-parser": "~1.20.3", + "content-disposition": "~0.5.4", "content-type": "~1.0.4", - "cookie": "0.7.1", - "cookie-signature": "1.0.6", + "cookie": "~0.7.1", + "cookie-signature": "~1.0.6", "debug": "2.6.9", "depd": "2.0.0", "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "etag": "~1.8.1", - "finalhandler": "1.3.1", - "fresh": "0.5.2", - "http-errors": "2.0.0", + "finalhandler": "~1.3.1", + "fresh": "~0.5.2", + "http-errors": "~2.0.0", "merge-descriptors": "1.0.3", "methods": "~1.1.2", - "on-finished": "2.4.1", + "on-finished": "~2.4.1", "parseurl": "~1.3.3", - "path-to-regexp": "0.1.12", + "path-to-regexp": "~0.1.12", "proxy-addr": "~2.0.7", - "qs": "6.13.0", + "qs": "~6.14.0", "range-parser": "~1.2.1", "safe-buffer": "5.2.1", - "send": "0.19.0", - "serve-static": "1.16.2", + "send": "~0.19.0", + "serve-static": "~1.16.2", "setprototypeof": "1.2.0", - "statuses": "2.0.1", + "statuses": "~2.0.1", "type-is": "~1.6.18", "utils-merge": "1.0.1", "vary": "~1.1.2" @@ -984,28 +574,18 @@ "url": "https://opencollective.com/express" } }, - "node_modules/fd-slicer": { - "version": "1.1.0", - "resolved": "https://registry.npmjs.org/fd-slicer/-/fd-slicer-1.1.0.tgz", - "integrity": "sha512-cE1qsB/VwyQozZ+q1dGxR8LBYNZeofhEdUNGSMbQD3Gw2lAzX9Zb3uIU6Ebc/Fmyjo9AWWfnn0AUCHqtevs/8g==", - "dev": true, - "license": "MIT", - "dependencies": { - "pend": "~1.2.0" - } - }, "node_modules/finalhandler": { - "version": "1.3.1", - "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.1.tgz", - "integrity": "sha512-6BN9trH7bp3qvnrRyzsBz+g3lZxTNZTbVO2EV1CS0WIcDbawYVdYvGflME/9QP0h0pYlCDBCTjYa9nZzMDpyxQ==", + "version": "1.3.2", + "resolved": "https://registry.npmjs.org/finalhandler/-/finalhandler-1.3.2.tgz", + "integrity": "sha512-aA4RyPcd3badbdABGDuTXCMTtOneUCAYH/gxoYRTZlIJdF0YPWuGqiAsIrhNnnqdXGswYk6dGujem4w80UJFhg==", "license": "MIT", "dependencies": { "debug": "2.6.9", "encodeurl": "~2.0.0", "escape-html": "~1.0.3", - "on-finished": "2.4.1", + "on-finished": "~2.4.1", "parseurl": "~1.3.3", - "statuses": "2.0.1", + "statuses": "~2.0.2", "unpipe": "~1.0.0" }, "engines": { @@ -1030,20 +610,6 @@ "node": ">= 0.6" } }, - "node_modules/fs-constants": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs-constants/-/fs-constants-1.0.0.tgz", - "integrity": "sha512-y6OAwoSIf7FyjMIv94u+b5rdheZEjzR63GTyZJm5qh4Bi+2YgwLCcI/fPFZkL5PSixOt6ZNKm+w+Hfp/Bciwow==", - "dev": true, - "license": "MIT" - }, - "node_modules/fs.realpath": { - "version": "1.0.0", - "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", - "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", - "dev": true, - "license": "ISC" - }, "node_modules/function-bind": { "version": "1.1.2", "resolved": "https://registry.npmjs.org/function-bind/-/function-bind-1.1.2.tgz", @@ -1090,35 +656,6 @@ "node": ">= 0.4" } }, - "node_modules/github-from-package": { - "version": "0.0.0", - "resolved": "https://registry.npmjs.org/github-from-package/-/github-from-package-0.0.0.tgz", - "integrity": "sha512-SyHy3T1v2NUXn29OsWdxmK6RwHD+vkj3v8en8AOBZ1wBQ/hCAQ5bAQTD02kW4W9tUp/3Qh6J8r9EvntiyCmOOw==", - "dev": true, - "license": "MIT" - }, - "node_modules/glob": { - "version": "7.2.3", - "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", - "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", - "deprecated": "Glob versions prior to v9 are no longer supported", - "dev": true, - "license": "ISC", - "dependencies": { - "fs.realpath": "^1.0.0", - "inflight": "^1.0.4", - "inherits": "2", - "minimatch": "^3.1.1", - "once": "^1.3.0", - "path-is-absolute": "^1.0.0" - }, - "engines": { - "node": "*" - }, - "funding": { - "url": "https://github.com/sponsors/isaacs" - } - }, "node_modules/gopd": { "version": "1.2.0", "resolved": "https://registry.npmjs.org/gopd/-/gopd-1.2.0.tgz", @@ -1131,16 +668,6 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/has-flag": { - "version": "3.0.0", - "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-3.0.0.tgz", - "integrity": "sha512-sKJf1+ceQBr4SMkvQnBDNDtf4TXpVhVGateu0t918bl30FnbE2m4vNLX+VWe/dpjlb+HugGYzW7uQXH98HPEYw==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4" - } - }, "node_modules/has-symbols": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/has-symbols/-/has-symbols-1.1.0.tgz", @@ -1165,53 +692,24 @@ "node": ">= 0.4" } }, - "node_modules/hosted-git-info": { - "version": "4.1.0", - "resolved": "https://registry.npmjs.org/hosted-git-info/-/hosted-git-info-4.1.0.tgz", - "integrity": "sha512-kyCuEOWjJqZuDbRHzL8V93NzQhwIB71oFWSyzVo+KPZI+pnQPPxucdkrOZvkLRnrf5URsQM+IJ09Dw29cRALIA==", - "dev": true, - "license": "ISC", - "dependencies": { - "lru-cache": "^6.0.0" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/htmlparser2": { - "version": "9.1.0", - "resolved": "https://registry.npmjs.org/htmlparser2/-/htmlparser2-9.1.0.tgz", - "integrity": "sha512-5zfg6mHUoaer/97TxnGpxmbR7zJtPwIYFMZ/H5ucTlPZhKvtum05yiPK3Mgai3a0DyVxv7qYqoweaEd2nrYQzQ==", - "dev": true, - "funding": [ - "https://github.com/fb55/htmlparser2?sponsor=1", - { - "type": "github", - "url": "https://github.com/sponsors/fb55" - } - ], - "license": "MIT", - "dependencies": { - "domelementtype": "^2.3.0", - "domhandler": "^5.0.3", - "domutils": "^3.1.0", - "entities": "^4.5.0" - } - }, "node_modules/http-errors": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.0.tgz", - "integrity": "sha512-FtwrG/euBzaEjYeRqOgly7G0qviiXoJWnvEH2Z1plBdXgbyjv34pHTSb9zoeHMyDy33+DWy5Wt9Wo+TURtOYSQ==", + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/http-errors/-/http-errors-2.0.1.tgz", + "integrity": "sha512-4FbRdAX+bSdmo4AUFuS0WNiPz8NgFt+r8ThgNWmlrjQjt1Q7ZR9+zTlce2859x4KSXrwIsaeTqDoKQmtP8pLmQ==", "license": "MIT", "dependencies": { - "depd": "2.0.0", - "inherits": "2.0.4", - "setprototypeof": "1.2.0", - "statuses": "2.0.1", - "toidentifier": "1.0.1" + "depd": "~2.0.0", + "inherits": "~2.0.4", + "setprototypeof": "~1.2.0", + "statuses": "~2.0.2", + "toidentifier": "~1.0.1" }, "engines": { "node": ">= 0.8" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/express" } }, "node_modules/iconv-lite": { @@ -1226,52 +724,12 @@ "node": ">=0.10.0" } }, - "node_modules/ieee754": { - "version": "1.2.1", - "resolved": "https://registry.npmjs.org/ieee754/-/ieee754-1.2.1.tgz", - "integrity": "sha512-dcyqhDvX1C46lXZcVqCpK+FtMRQVdIMN6/Df5js2zouUsqG7I6sFxitIC+7KYK29KdXOLHdu9zL4sFnoVQnqaA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "BSD-3-Clause" - }, - "node_modules/inflight": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", - "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", - "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", - "dev": true, - "license": "ISC", - "dependencies": { - "once": "^1.3.0", - "wrappy": "1" - } - }, "node_modules/inherits": { "version": "2.0.4", "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", "license": "ISC" }, - "node_modules/ini": { - "version": "1.3.8", - "resolved": "https://registry.npmjs.org/ini/-/ini-1.3.8.tgz", - "integrity": "sha512-JV/yugV2uzW5iMRSiZAyDtQd+nxtUnjeLt0acNdw98kKLrvuRVyB80tsREOE7yvGVgalhZ6RNXCmEHkUKBKxew==", - "dev": true, - "license": "ISC" - }, "node_modules/ipaddr.js": { "version": "1.9.1", "resolved": "https://registry.npmjs.org/ipaddr.js/-/ipaddr.js-1.9.1.tgz", @@ -1281,51 +739,6 @@ "node": ">= 0.10" } }, - "node_modules/keytar": { - "version": "7.9.0", - "resolved": "https://registry.npmjs.org/keytar/-/keytar-7.9.0.tgz", - "integrity": "sha512-VPD8mtVtm5JNtA2AErl6Chp06JBfy7diFQ7TQQhdpWOl6MrCRB+eRbvAZUsbGQS9kiMq0coJsy0W0vHpDCkWsQ==", - "dev": true, - "hasInstallScript": true, - "license": "MIT", - "dependencies": { - "node-addon-api": "^4.3.0", - "prebuild-install": "^7.0.1" - } - }, - "node_modules/leven": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/leven/-/leven-3.1.0.tgz", - "integrity": "sha512-qsda+H8jTaUaN/x5vzW2rzc+8Rw4TAQ/4KjB46IwK5VH+IlVeeeje/EoZRpiXvIqjFgK84QffqPztGI3VBLG1A==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=6" - } - }, - "node_modules/linkify-it": { - "version": "3.0.3", - "resolved": "https://registry.npmjs.org/linkify-it/-/linkify-it-3.0.3.tgz", - "integrity": "sha512-ynTsyrFSdE5oZ/O9GEf00kPngmOfVwazR5GKDq6EYfhlpFug3J2zybX56a2PRRpc9P+FuSoGNAwjlbDs9jJBPQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "uc.micro": "^1.0.1" - } - }, - "node_modules/lru-cache": { - "version": "6.0.0", - "resolved": "https://registry.npmjs.org/lru-cache/-/lru-cache-6.0.0.tgz", - "integrity": "sha512-Jo6dJ04CmSjuznwJSS3pUeWmd/H0ffTlkXXgwZi+eq1UCmqQwCh+eLsYOYCwY991i2Fah4h1BEMCx4qThGbsiA==", - "dev": true, - "license": "ISC", - "dependencies": { - "yallist": "^4.0.0" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/make-error": { "version": "1.3.6", "resolved": "https://registry.npmjs.org/make-error/-/make-error-1.3.6.tgz", @@ -1333,33 +746,6 @@ "dev": true, "license": "ISC" }, - "node_modules/markdown-it": { - "version": "12.3.2", - "resolved": "https://registry.npmjs.org/markdown-it/-/markdown-it-12.3.2.tgz", - "integrity": "sha512-TchMembfxfNVpHkbtriWltGWc+m3xszaRD0CZup7GFFhzIgQqxIfn3eGj1yZpfuflzPvfkt611B2Q/Bsk1YnGg==", - "dev": true, - "license": "MIT", - "dependencies": { - "argparse": "^2.0.1", - "entities": "~2.1.0", - "linkify-it": "^3.0.1", - "mdurl": "^1.0.1", - "uc.micro": "^1.0.5" - }, - "bin": { - "markdown-it": "bin/markdown-it.js" - } - }, - "node_modules/markdown-it/node_modules/entities": { - "version": "2.1.0", - "resolved": "https://registry.npmjs.org/entities/-/entities-2.1.0.tgz", - "integrity": "sha512-hCx1oky9PFrJ611mf0ifBLBRW8lUUVRlFolb5gWRfIELabBlbp9xZvrqZLZAs+NxFnbfQoeGd8wDkygjg7U85w==", - "dev": true, - "license": "BSD-2-Clause", - "funding": { - "url": "https://github.com/fb55/entities?sponsor=1" - } - }, "node_modules/math-intrinsics": { "version": "1.1.0", "resolved": "https://registry.npmjs.org/math-intrinsics/-/math-intrinsics-1.1.0.tgz", @@ -1369,13 +755,6 @@ "node": ">= 0.4" } }, - "node_modules/mdurl": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/mdurl/-/mdurl-1.0.1.tgz", - "integrity": "sha512-/sKlQJCBYVY9Ers9hqzKou4H6V5UWc/M59TH2dvkt+84itfnq7uFOMLpOiOS4ujvHP4etln18fmIxA5R5fll0g==", - "dev": true, - "license": "MIT" - }, "node_modules/media-typer": { "version": "0.3.0", "resolved": "https://registry.npmjs.org/media-typer/-/media-typer-0.3.0.tgz", @@ -1436,60 +815,17 @@ "node": ">= 0.6" } }, - "node_modules/mimic-response": { - "version": "3.1.0", - "resolved": "https://registry.npmjs.org/mimic-response/-/mimic-response-3.1.0.tgz", - "integrity": "sha512-z0yWI+4FDrrweS8Zmt4Ej5HdJmky15+L2e6Wgn3+iK5fWzb6T3fhNFq2+MeTRb064c6Wr4N/wv0DzQTjNzHNGQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=10" - }, - "funding": { - "url": "https://github.com/sponsors/sindresorhus" - } - }, - "node_modules/minimatch": { - "version": "3.1.2", - "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", - "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", - "dev": true, - "license": "ISC", - "dependencies": { - "brace-expansion": "^1.1.7" - }, - "engines": { - "node": "*" - } - }, - "node_modules/minimist": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/minimist/-/minimist-1.2.8.tgz", - "integrity": "sha512-2yyAR8qBkN3YuheJanUpWC5U3bb5osDywNB8RzDVlDwDHbocAJveqqj1u8+SVD7jkWT4yvsHCpWqqWqAxb0zCA==", - "dev": true, - "license": "MIT", - "funding": { - "url": "https://github.com/sponsors/ljharb" - } - }, - "node_modules/mkdirp-classic": { - "version": "0.5.3", - "resolved": "https://registry.npmjs.org/mkdirp-classic/-/mkdirp-classic-0.5.3.tgz", - "integrity": "sha512-gKLcREMhtuZRwRAfqP3RFW+TK4JqApVBtOIftVgjuABpAtpxhPGaDcfvbhNvD0B8iD1oUr/txX35NjcaY6Ns/A==", - "dev": true, - "license": "MIT" - }, "node_modules/morgan": { - "version": "1.10.0", - "resolved": "https://registry.npmjs.org/morgan/-/morgan-1.10.0.tgz", - "integrity": "sha512-AbegBVI4sh6El+1gNwvD5YIck7nSA36weD7xvIxG4in80j/UoK8AEGaWnnz8v1GxonMCltmlNs5ZKbGvl9b1XQ==", + "version": "1.10.1", + "resolved": "https://registry.npmjs.org/morgan/-/morgan-1.10.1.tgz", + "integrity": "sha512-223dMRJtI/l25dJKWpgij2cMtywuG/WiUKXdvwfbhGKBhy1puASqXwFzmWZ7+K73vUPoR7SS2Qz2cI/g9MKw0A==", "license": "MIT", "dependencies": { "basic-auth": "~2.0.1", "debug": "2.6.9", "depd": "~2.0.0", "on-finished": "~2.3.0", - "on-headers": "~1.0.2" + "on-headers": "~1.1.0" }, "engines": { "node": ">= 0.8.0" @@ -1513,20 +849,6 @@ "integrity": "sha512-Tpp60P6IUJDTuOq/5Z8cdskzJujfwqfOTkrwIwj7IRISpnkJnT6SyJ4PCPnGMoFjC9ddhal5KVIYtAt97ix05A==", "license": "MIT" }, - "node_modules/mute-stream": { - "version": "0.0.8", - "resolved": "https://registry.npmjs.org/mute-stream/-/mute-stream-0.0.8.tgz", - "integrity": "sha512-nnbWWOkoWyUsTjKrhgD0dcz22mdkSnpYqbEjIm2nhwhuxlSkpywJmBo8h0ZqJdkp73mb90SssHkN4rsRaBAfAA==", - "dev": true, - "license": "ISC" - }, - "node_modules/napi-build-utils": { - "version": "2.0.0", - "resolved": "https://registry.npmjs.org/napi-build-utils/-/napi-build-utils-2.0.0.tgz", - "integrity": "sha512-GEbrYkbfF7MoNaoh2iGG84Mnf/WZfB0GdGEsM8wz7Expx/LlWf5U8t9nvJKXSp3qr5IsEbK04cBGhol/KwOsWA==", - "dev": true, - "license": "MIT" - }, "node_modules/negotiator": { "version": "0.6.3", "resolved": "https://registry.npmjs.org/negotiator/-/negotiator-0.6.3.tgz", @@ -1536,52 +858,6 @@ "node": ">= 0.6" } }, - "node_modules/node-abi": { - "version": "3.74.0", - "resolved": "https://registry.npmjs.org/node-abi/-/node-abi-3.74.0.tgz", - "integrity": "sha512-c5XK0MjkGBrQPGYG24GBADZud0NCbznxNx0ZkS+ebUTrmV1qTDxPxSL8zEAPURXSbLRWVexxmP4986BziahL5w==", - "dev": true, - "license": "MIT", - "dependencies": { - "semver": "^7.3.5" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/node-abi/node_modules/semver": { - "version": "7.7.1", - "resolved": "https://registry.npmjs.org/semver/-/semver-7.7.1.tgz", - "integrity": "sha512-hlq8tAfn0m/61p4BVRcPzIGr6LKiMwo4VM6dGi6pt4qcRkmNzTcWq6eCEjEh+qXjkMDvPlOFFSGwQjoEa6gyMA==", - "dev": true, - "license": "ISC", - "bin": { - "semver": "bin/semver.js" - }, - "engines": { - "node": ">=10" - } - }, - "node_modules/node-addon-api": { - "version": "4.3.0", - "resolved": "https://registry.npmjs.org/node-addon-api/-/node-addon-api-4.3.0.tgz", - "integrity": "sha512-73sE9+3UaLYYFmDsFZnqCInzPyh3MqIwZO9cw58yIqAZhONrrabrYyYe3TuIqtIiOuTXVhsGau8hcrhhwSsDIQ==", - "dev": true, - "license": "MIT" - }, - "node_modules/nth-check": { - "version": "2.1.1", - "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", - "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", - "dev": true, - "license": "BSD-2-Clause", - "dependencies": { - "boolbase": "^1.0.0" - }, - "funding": { - "url": "https://github.com/fb55/nth-check?sponsor=1" - } - }, "node_modules/object-inspect": { "version": "1.13.4", "resolved": "https://registry.npmjs.org/object-inspect/-/object-inspect-1.13.4.tgz", @@ -1607,74 +883,14 @@ } }, "node_modules/on-headers": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.0.2.tgz", - "integrity": "sha512-pZAE+FJLoyITytdqK0U5s+FIpjN0JP3OzFi/u8Rx+EV5/W+JTWGXG8xFzevE7AjBfDqHv/8vL8qQsIhHnqRkrA==", + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/on-headers/-/on-headers-1.1.0.tgz", + "integrity": "sha512-737ZY3yNnXy37FHkQxPzt4UZ2UWPWiCZWLvFZ4fu5cueciegX0zGPnrlY6bwRg4FdQOe9YU8MkmJwGhoMybl8A==", "license": "MIT", "engines": { "node": ">= 0.8" } }, - "node_modules/once": { - "version": "1.4.0", - "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", - "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", - "dev": true, - "license": "ISC", - "dependencies": { - "wrappy": "1" - } - }, - "node_modules/parse-semver": { - "version": "1.1.1", - "resolved": "https://registry.npmjs.org/parse-semver/-/parse-semver-1.1.1.tgz", - "integrity": "sha512-Eg1OuNntBMH0ojvEKSrvDSnwLmvVuUOSdylH/pSCPNMIspLlweJyIWXCE+k/5hm3cj/EBUYwmWkjhBALNP4LXQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "semver": "^5.1.0" - } - }, - "node_modules/parse5": { - "version": "7.2.1", - "resolved": "https://registry.npmjs.org/parse5/-/parse5-7.2.1.tgz", - "integrity": "sha512-BuBYQYlv1ckiPdQi/ohiivi9Sagc9JG+Ozs0r7b/0iK3sKmrb0b9FdWdBbOdx6hBCM/F9Ir82ofnBhtZOjCRPQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "entities": "^4.5.0" - }, - "funding": { - "url": "https://github.com/inikulin/parse5?sponsor=1" - } - }, - "node_modules/parse5-htmlparser2-tree-adapter": { - "version": "7.1.0", - "resolved": "https://registry.npmjs.org/parse5-htmlparser2-tree-adapter/-/parse5-htmlparser2-tree-adapter-7.1.0.tgz", - "integrity": "sha512-ruw5xyKs6lrpo9x9rCZqZZnIUntICjQAd0Wsmp396Ul9lN/h+ifgVV1x1gZHi8euej6wTfpqX8j+BFQxF0NS/g==", - "dev": true, - "license": "MIT", - "dependencies": { - "domhandler": "^5.0.3", - "parse5": "^7.0.0" - }, - "funding": { - "url": "https://github.com/inikulin/parse5?sponsor=1" - } - }, - "node_modules/parse5-parser-stream": { - "version": "7.1.2", - "resolved": "https://registry.npmjs.org/parse5-parser-stream/-/parse5-parser-stream-7.1.2.tgz", - "integrity": "sha512-JyeQc9iwFLn5TbvvqACIF/VXG6abODeB3Fwmv/TGdLk2LfbWkaySGY72at4+Ty7EkPZj854u4CrICqNk2qIbow==", - "dev": true, - "license": "MIT", - "dependencies": { - "parse5": "^7.0.0" - }, - "funding": { - "url": "https://github.com/inikulin/parse5?sponsor=1" - } - }, "node_modules/parseurl": { "version": "1.3.3", "resolved": "https://registry.npmjs.org/parseurl/-/parseurl-1.3.3.tgz", @@ -1684,56 +900,12 @@ "node": ">= 0.8" } }, - "node_modules/path-is-absolute": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", - "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, "node_modules/path-to-regexp": { "version": "0.1.12", "resolved": "https://registry.npmjs.org/path-to-regexp/-/path-to-regexp-0.1.12.tgz", "integrity": "sha512-RA1GjUVMnvYFxuqovrEqZoxxW5NUZqbwKtYz/Tt7nXerk0LbLblQmrsgdeOxV5SFHf0UDggjS/bSeOZwt1pmEQ==", "license": "MIT" }, - "node_modules/pend": { - "version": "1.2.0", - "resolved": "https://registry.npmjs.org/pend/-/pend-1.2.0.tgz", - "integrity": "sha512-F3asv42UuXchdzt+xXqfW1OGlVBe+mxa2mqI0pg5yAHZPvFmY3Y6drSf/GQ1A86WgWEN9Kzh/WrgKa6iGcHXLg==", - "dev": true, - "license": "MIT" - }, - "node_modules/prebuild-install": { - "version": "7.1.3", - "resolved": "https://registry.npmjs.org/prebuild-install/-/prebuild-install-7.1.3.tgz", - "integrity": "sha512-8Mf2cbV7x1cXPUILADGI3wuhfqWvtiLA1iclTDbFRZkgRQS0NqsPZphna9V+HyTEadheuPmjaJMsbzKQFOzLug==", - "dev": true, - "license": "MIT", - "dependencies": { - "detect-libc": "^2.0.0", - "expand-template": "^2.0.3", - "github-from-package": "0.0.0", - "minimist": "^1.2.3", - "mkdirp-classic": "^0.5.3", - "napi-build-utils": "^2.0.0", - "node-abi": "^3.3.0", - "pump": "^3.0.0", - "rc": "^1.2.7", - "simple-get": "^4.0.0", - "tar-fs": "^2.0.0", - "tunnel-agent": "^0.6.0" - }, - "bin": { - "prebuild-install": "bin.js" - }, - "engines": { - "node": ">=10" - } - }, "node_modules/proxy-addr": { "version": "2.0.7", "resolved": "https://registry.npmjs.org/proxy-addr/-/proxy-addr-2.0.7.tgz", @@ -1747,24 +919,13 @@ "node": ">= 0.10" } }, - "node_modules/pump": { - "version": "3.0.2", - "resolved": "https://registry.npmjs.org/pump/-/pump-3.0.2.tgz", - "integrity": "sha512-tUPXtzlGM8FE3P0ZL6DVs/3P58k9nk8/jZeQCurTJylQA8qFYzHFfhBJkuqyE0FifOsQ0uKWekiZ5g8wtr28cw==", - "dev": true, - "license": "MIT", - "dependencies": { - "end-of-stream": "^1.1.0", - "once": "^1.3.1" - } - }, "node_modules/qs": { - "version": "6.13.0", - "resolved": "https://registry.npmjs.org/qs/-/qs-6.13.0.tgz", - "integrity": "sha512-+38qI9SOr8tfZ4QmJNplMUxqjbe7LKvvZgWdExBOmd+egZTtjLB67Gu0HRX3u/XOq7UU2Nx6nsjvS16Z9uwfpg==", + "version": "6.14.1", + "resolved": "https://registry.npmjs.org/qs/-/qs-6.14.1.tgz", + "integrity": "sha512-4EK3+xJl8Ts67nLYNwqw/dsFVnCf+qR7RgXSK9jEEm9unao3njwMDdmsdvoKBKHzxd7tCYz5e5M+SnMjdtXGQQ==", "license": "BSD-3-Clause", "dependencies": { - "side-channel": "^1.0.6" + "side-channel": "^1.1.0" }, "engines": { "node": ">=0.6" @@ -1783,64 +944,20 @@ } }, "node_modules/raw-body": { - "version": "2.5.2", - "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.2.tgz", - "integrity": "sha512-8zGqypfENjCIqGhgXToC8aB2r7YrBX+AQAfIPs/Mlk+BtPTztOvTS01NRW/3Eh60J+a48lt8qsCzirQ6loCVfA==", + "version": "2.5.3", + "resolved": "https://registry.npmjs.org/raw-body/-/raw-body-2.5.3.tgz", + "integrity": "sha512-s4VSOf6yN0rvbRZGxs8Om5CWj6seneMwK3oDb4lWDH0UPhWcxwOWw5+qk24bxq87szX1ydrwylIOp2uG1ojUpA==", "license": "MIT", "dependencies": { - "bytes": "3.1.2", - "http-errors": "2.0.0", - "iconv-lite": "0.4.24", - "unpipe": "1.0.0" + "bytes": "~3.1.2", + "http-errors": "~2.0.1", + "iconv-lite": "~0.4.24", + "unpipe": "~1.0.0" }, "engines": { "node": ">= 0.8" } }, - "node_modules/rc": { - "version": "1.2.8", - "resolved": "https://registry.npmjs.org/rc/-/rc-1.2.8.tgz", - "integrity": "sha512-y3bGgqKj3QBdxLbLkomlohkvsA8gdAiUQlSBJnBhfn+BPxg4bc62d8TcBW15wavDfgexCgccckhcZvywyQYPOw==", - "dev": true, - "license": "(BSD-2-Clause OR MIT OR Apache-2.0)", - "dependencies": { - "deep-extend": "^0.6.0", - "ini": "~1.3.0", - "minimist": "^1.2.0", - "strip-json-comments": "~2.0.1" - }, - "bin": { - "rc": "cli.js" - } - }, - "node_modules/read": { - "version": "1.0.7", - "resolved": "https://registry.npmjs.org/read/-/read-1.0.7.tgz", - "integrity": "sha512-rSOKNYUmaxy0om1BNjMN4ezNT6VKK+2xF4GBhc81mkH7L60i6dp8qPYrkndNLT3QPphoII3maL9PVC9XmhHwVQ==", - "dev": true, - "license": "ISC", - "dependencies": { - "mute-stream": "~0.0.4" - }, - "engines": { - "node": ">=0.8" - } - }, - "node_modules/readable-stream": { - "version": "3.6.2", - "resolved": "https://registry.npmjs.org/readable-stream/-/readable-stream-3.6.2.tgz", - "integrity": "sha512-9u/sniCrY3D5WdsERHzHE4G2YCXqoG5FTHUiCC4SIbr6XcLZBY05ya9EKjYek9O5xOAwjGq+1JdGBAS7Q9ScoA==", - "dev": true, - "license": "MIT", - "dependencies": { - "inherits": "^2.0.3", - "string_decoder": "^1.1.1", - "util-deprecate": "^1.0.1" - }, - "engines": { - "node": ">= 6" - } - }, "node_modules/safe-buffer": { "version": "5.2.1", "resolved": "https://registry.npmjs.org/safe-buffer/-/safe-buffer-5.2.1.tgz", @@ -1867,56 +984,30 @@ "integrity": "sha512-YZo3K82SD7Riyi0E1EQPojLz7kpepnSQI9IyPbHHg1XXXevb5dJI7tpyN2ADxGcQbHG7vcyRHk0cbwqcQriUtg==", "license": "MIT" }, - "node_modules/sax": { - "version": "1.4.1", - "resolved": "https://registry.npmjs.org/sax/-/sax-1.4.1.tgz", - "integrity": "sha512-+aWOz7yVScEGoKNd4PA10LZ8sk0A/z5+nXQG5giUO5rprX9jgYsTdov9qCchZiPIZezbZH+jRut8nPodFAX4Jg==", - "dev": true, - "license": "ISC" - }, - "node_modules/semver": { - "version": "5.7.2", - "resolved": "https://registry.npmjs.org/semver/-/semver-5.7.2.tgz", - "integrity": "sha512-cBznnQ9KjJqU67B52RMC65CMarK2600WFnbkcaiwWq3xy/5haFJlshgnpjovMVJ+Hff49d8GEn0b87C5pDQ10g==", - "dev": true, - "license": "ISC", - "bin": { - "semver": "bin/semver" - } - }, "node_modules/send": { - "version": "0.19.0", - "resolved": "https://registry.npmjs.org/send/-/send-0.19.0.tgz", - "integrity": "sha512-dW41u5VfLXu8SJh5bwRmyYUbAoSB3c9uQh6L8h/KtsFREPWpbX1lrljJo186Jc4nmci/sGUZ9a0a0J2zgfq2hw==", + "version": "0.19.2", + "resolved": "https://registry.npmjs.org/send/-/send-0.19.2.tgz", + "integrity": "sha512-VMbMxbDeehAxpOtWJXlcUS5E8iXh6QmN+BkRX1GARS3wRaXEEgzCcB10gTQazO42tpNIya8xIyNx8fll1OFPrg==", "license": "MIT", "dependencies": { "debug": "2.6.9", "depd": "2.0.0", "destroy": "1.2.0", - "encodeurl": "~1.0.2", + "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "etag": "~1.8.1", - "fresh": "0.5.2", - "http-errors": "2.0.0", + "fresh": "~0.5.2", + "http-errors": "~2.0.1", "mime": "1.6.0", "ms": "2.1.3", - "on-finished": "2.4.1", + "on-finished": "~2.4.1", "range-parser": "~1.2.1", - "statuses": "2.0.1" + "statuses": "~2.0.2" }, "engines": { "node": ">= 0.8.0" } }, - "node_modules/send/node_modules/encodeurl": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/encodeurl/-/encodeurl-1.0.2.tgz", - "integrity": "sha512-TPJXq8JqFaVYm2CWmPvnP2Iyo4ZSM7/QKcSmuMLDObfpH5fi7RUGmd/rTDf+rut/saiDiQEeVTNgAmJEdAOx0w==", - "license": "MIT", - "engines": { - "node": ">= 0.8" - } - }, "node_modules/send/node_modules/ms": { "version": "2.1.3", "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.3.tgz", @@ -1924,15 +1015,15 @@ "license": "MIT" }, "node_modules/serve-static": { - "version": "1.16.2", - "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.2.tgz", - "integrity": "sha512-VqpjJZKadQB/PEbEwvFdO43Ax5dFBZ2UECszz8bQ7pi7wt//PWe1P6MN7eCnjsatYtBT6EuiClbjSWP2WrIoTw==", + "version": "1.16.3", + "resolved": "https://registry.npmjs.org/serve-static/-/serve-static-1.16.3.tgz", + "integrity": "sha512-x0RTqQel6g5SY7Lg6ZreMmsOzncHFU7nhnRWkKgWuMTu5NN0DR5oruckMqRvacAN9d5w6ARnRBXl9xhDCgfMeA==", "license": "MIT", "dependencies": { "encodeurl": "~2.0.0", "escape-html": "~1.0.3", "parseurl": "~1.3.3", - "send": "0.19.0" + "send": "~0.19.1" }, "engines": { "node": ">= 0.8.0" @@ -2016,135 +1107,15 @@ "url": "https://github.com/sponsors/ljharb" } }, - "node_modules/simple-concat": { - "version": "1.0.1", - "resolved": "https://registry.npmjs.org/simple-concat/-/simple-concat-1.0.1.tgz", - "integrity": "sha512-cSFtAPtRhljv69IK0hTVZQ+OfE9nePi/rtJmw5UjHeVyVroEqJXP1sFztKUy1qU+xvz3u/sfYJLa947b7nAN2Q==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT" - }, - "node_modules/simple-get": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/simple-get/-/simple-get-4.0.1.tgz", - "integrity": "sha512-brv7p5WgH0jmQJr1ZDDfKDOSeWWg+OVypG99A/5vYGPqJ6pxiaHLy8nxtFjBA7oMa01ebA9gfh1uMCFqOuXxvA==", - "dev": true, - "funding": [ - { - "type": "github", - "url": "https://github.com/sponsors/feross" - }, - { - "type": "patreon", - "url": "https://www.patreon.com/feross" - }, - { - "type": "consulting", - "url": "https://feross.org/support" - } - ], - "license": "MIT", - "dependencies": { - "decompress-response": "^6.0.0", - "once": "^1.3.1", - "simple-concat": "^1.0.0" - } - }, "node_modules/statuses": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.1.tgz", - "integrity": "sha512-RwNA9Z/7PrK06rYLIzFMlaF+l73iwpzsqRIFgbMLbTcLD6cOao82TaWefPXQvB2fOC4AjuYSEndS7N/mTCbkdQ==", + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/statuses/-/statuses-2.0.2.tgz", + "integrity": "sha512-DvEy55V3DB7uknRo+4iOGT5fP1slR8wQohVdknigZPMpMstaKJQWhwiYBACJE3Ul2pTnATihhBYnRhZQHGBiRw==", "license": "MIT", "engines": { "node": ">= 0.8" } }, - "node_modules/string_decoder": { - "version": "1.3.0", - "resolved": "https://registry.npmjs.org/string_decoder/-/string_decoder-1.3.0.tgz", - "integrity": "sha512-hkRX8U1WjJFd8LsDJ2yQ/wWWxaopEsABU1XfkM8A+j0+85JAGppt16cr1Whg6KIbb4okU6Mql6BOj+uup/wKeA==", - "dev": true, - "license": "MIT", - "dependencies": { - "safe-buffer": "~5.2.0" - } - }, - "node_modules/strip-json-comments": { - "version": "2.0.1", - "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-2.0.1.tgz", - "integrity": "sha512-4gB8na07fecVVkOI6Rs4e7T6NOTki5EmL7TUduTs6bu3EdnSycntVJ4re8kgZA+wx9IueI2Y11bfbgwtzuE0KQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/supports-color": { - "version": "5.5.0", - "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-5.5.0.tgz", - "integrity": "sha512-QjVjwdXIt408MIiAqCX4oUKsgU2EqAGzs2Ppkm4aQYbjm+ZEWEcW4SfFNTr4uMNZma0ey4f5lgLrkB0aX0QMow==", - "dev": true, - "license": "MIT", - "dependencies": { - "has-flag": "^3.0.0" - }, - "engines": { - "node": ">=4" - } - }, - "node_modules/tar-fs": { - "version": "2.1.2", - "resolved": "https://registry.npmjs.org/tar-fs/-/tar-fs-2.1.2.tgz", - "integrity": "sha512-EsaAXwxmx8UB7FRKqeozqEPop69DXcmYwTQwXvyAPF352HJsPdkVhvTaDPYqfNgruveJIJy3TA2l+2zj8LJIJA==", - "dev": true, - "license": "MIT", - "dependencies": { - "chownr": "^1.1.1", - "mkdirp-classic": "^0.5.2", - "pump": "^3.0.0", - "tar-stream": "^2.1.4" - } - }, - "node_modules/tar-stream": { - "version": "2.2.0", - "resolved": "https://registry.npmjs.org/tar-stream/-/tar-stream-2.2.0.tgz", - "integrity": "sha512-ujeqbceABgwMZxEJnk2HDY2DlnUZ+9oEcb1KzTVfYHio0UE6dG71n60d8D2I4qNvleWrrXpmjpt7vZeF1LnMZQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "bl": "^4.0.3", - "end-of-stream": "^1.4.1", - "fs-constants": "^1.0.0", - "inherits": "^2.0.3", - "readable-stream": "^3.1.1" - }, - "engines": { - "node": ">=6" - } - }, - "node_modules/tmp": { - "version": "0.2.3", - "resolved": "https://registry.npmjs.org/tmp/-/tmp-0.2.3.tgz", - "integrity": "sha512-nZD7m9iCPC5g0pYmcaxogYKggSfLsdxl8of3Q/oIbqCqLLIO9IAF0GWjX1z9NZRHPiXv8Wex4yDCaZsgEw0Y8w==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=14.14" - } - }, "node_modules/toidentifier": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/toidentifier/-/toidentifier-1.0.1.tgz", @@ -2198,29 +1169,6 @@ } } }, - "node_modules/tunnel": { - "version": "0.0.6", - "resolved": "https://registry.npmjs.org/tunnel/-/tunnel-0.0.6.tgz", - "integrity": "sha512-1h/Lnq9yajKY2PEbBadPXj3VxsDDu844OnaAo52UVmIzIvwwtBPIuNvkjuzBlTWpfJyUbG3ez0KSBibQkj4ojg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=0.6.11 <=0.7.0 || >=0.7.3" - } - }, - "node_modules/tunnel-agent": { - "version": "0.6.0", - "resolved": "https://registry.npmjs.org/tunnel-agent/-/tunnel-agent-0.6.0.tgz", - "integrity": "sha512-McnNiV1l8RYeY8tBgEpuodCC1mLUdbSN+CYBL7kJsJNInOP8UjDDEwdk6Mw60vdLLrr5NHKZhMAOSrR2NZuQ+w==", - "dev": true, - "license": "Apache-2.0", - "dependencies": { - "safe-buffer": "^5.0.1" - }, - "engines": { - "node": "*" - } - }, "node_modules/type-is": { "version": "1.6.18", "resolved": "https://registry.npmjs.org/type-is/-/type-is-1.6.18.tgz", @@ -2234,18 +1182,6 @@ "node": ">= 0.6" } }, - "node_modules/typed-rest-client": { - "version": "1.8.11", - "resolved": "https://registry.npmjs.org/typed-rest-client/-/typed-rest-client-1.8.11.tgz", - "integrity": "sha512-5UvfMpd1oelmUPRbbaVnq+rHP7ng2cE4qoQkQeAqxRL6PklkxsM0g32/HL0yfvruK6ojQ5x8EE+HF4YV6DtuCA==", - "dev": true, - "license": "MIT", - "dependencies": { - "qs": "^6.9.1", - "tunnel": "0.0.6", - "underscore": "^1.12.1" - } - }, "node_modules/typescript": { "version": "4.9.5", "resolved": "https://registry.npmjs.org/typescript/-/typescript-4.9.5.tgz", @@ -2260,30 +1196,6 @@ "node": ">=4.2.0" } }, - "node_modules/uc.micro": { - "version": "1.0.6", - "resolved": "https://registry.npmjs.org/uc.micro/-/uc.micro-1.0.6.tgz", - "integrity": "sha512-8Y75pvTYkLJW2hWQHXxoqRgV7qb9B+9vFEtidML+7koHUFapnVJAZ6cKs+Qjz5Aw3aZWHMC6u0wJE3At+nSGwA==", - "dev": true, - "license": "MIT" - }, - "node_modules/underscore": { - "version": "1.13.7", - "resolved": "https://registry.npmjs.org/underscore/-/underscore-1.13.7.tgz", - "integrity": "sha512-GMXzWtsc57XAtguZgaQViUOzs0KTkk8ojr3/xAxXLITqf/3EMwxC0inyETfDFjH/Krbhuep0HNbbjI9i/q3F3g==", - "dev": true, - "license": "MIT" - }, - "node_modules/undici": { - "version": "6.21.1", - "resolved": "https://registry.npmjs.org/undici/-/undici-6.21.1.tgz", - "integrity": "sha512-q/1rj5D0/zayJB2FraXdaWxbhWiNKDvu8naDT2dl1yTlvJp4BLtOcp2a5BvgGNQpYYJzau7tf1WgKv3b+7mqpQ==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18.17" - } - }, "node_modules/undici-types": { "version": "5.26.5", "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", @@ -2300,20 +1212,6 @@ "node": ">= 0.8" } }, - "node_modules/url-join": { - "version": "4.0.1", - "resolved": "https://registry.npmjs.org/url-join/-/url-join-4.0.1.tgz", - "integrity": "sha512-jk1+QP6ZJqyOiuEI9AEWQfju/nB2Pw466kbA0LEZljHwKeMgd9WrAEgEGxjPDD2+TNbbb37rTyhEfrCXfuKXnA==", - "dev": true, - "license": "MIT" - }, - "node_modules/util-deprecate": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", - "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", - "dev": true, - "license": "MIT" - }, "node_modules/utils-merge": { "version": "1.0.1", "resolved": "https://registry.npmjs.org/utils-merge/-/utils-merge-1.0.1.tgz", @@ -2339,137 +1237,6 @@ "node": ">= 0.8" } }, - "node_modules/vsce": { - "version": "2.15.0", - "resolved": "https://registry.npmjs.org/vsce/-/vsce-2.15.0.tgz", - "integrity": "sha512-P8E9LAZvBCQnoGoizw65JfGvyMqNGlHdlUXD1VAuxtvYAaHBKLBdKPnpy60XKVDAkQCfmMu53g+gq9FM+ydepw==", - "deprecated": "vsce has been renamed to @vscode/vsce. Install using @vscode/vsce instead.", - "dev": true, - "license": "MIT", - "dependencies": { - "azure-devops-node-api": "^11.0.1", - "chalk": "^2.4.2", - "cheerio": "^1.0.0-rc.9", - "commander": "^6.1.0", - "glob": "^7.0.6", - "hosted-git-info": "^4.0.2", - "keytar": "^7.7.0", - "leven": "^3.1.0", - "markdown-it": "^12.3.2", - "mime": "^1.3.4", - "minimatch": "^3.0.3", - "parse-semver": "^1.1.1", - "read": "^1.0.7", - "semver": "^5.1.0", - "tmp": "^0.2.1", - "typed-rest-client": "^1.8.4", - "url-join": "^4.0.1", - "xml2js": "^0.4.23", - "yauzl": "^2.3.1", - "yazl": "^2.2.2" - }, - "bin": { - "vsce": "vsce" - }, - "engines": { - "node": ">= 14" - } - }, - "node_modules/whatwg-encoding": { - "version": "3.1.1", - "resolved": "https://registry.npmjs.org/whatwg-encoding/-/whatwg-encoding-3.1.1.tgz", - "integrity": "sha512-6qN4hJdMwfYBtE3YBTTHhoeuUrDBPZmbQaxWAqSALV/MeEnR5z1xd8UKud2RAkFoPkmB+hli1TZSnyi84xz1vQ==", - "dev": true, - "license": "MIT", - "dependencies": { - "iconv-lite": "0.6.3" - }, - "engines": { - "node": ">=18" - } - }, - "node_modules/whatwg-encoding/node_modules/iconv-lite": { - "version": "0.6.3", - "resolved": "https://registry.npmjs.org/iconv-lite/-/iconv-lite-0.6.3.tgz", - "integrity": "sha512-4fCk79wshMdzMp2rH06qWrJE4iolqLhCUH+OiuIgU++RB0+94NlDL81atO7GX55uUKueo0txHNtvEyI6D7WdMw==", - "dev": true, - "license": "MIT", - "dependencies": { - "safer-buffer": ">= 2.1.2 < 3.0.0" - }, - "engines": { - "node": ">=0.10.0" - } - }, - "node_modules/whatwg-mimetype": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/whatwg-mimetype/-/whatwg-mimetype-4.0.0.tgz", - "integrity": "sha512-QaKxh0eNIi2mE9p2vEdzfagOKHCcj1pJ56EEHGQOVxp8r9/iszLUUV7v89x9O1p/T+NlTM5W7jW6+cz4Fq1YVg==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=18" - } - }, - "node_modules/wrappy": { - "version": "1.0.2", - "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", - "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", - "dev": true, - "license": "ISC" - }, - "node_modules/xml2js": { - "version": "0.4.23", - "resolved": "https://registry.npmjs.org/xml2js/-/xml2js-0.4.23.tgz", - "integrity": "sha512-ySPiMjM0+pLDftHgXY4By0uswI3SPKLDw/i3UXbnO8M/p28zqexCUoPmQFrYD+/1BzhGJSs2i1ERWKJAtiLrug==", - "dev": true, - "license": "MIT", - "dependencies": { - "sax": ">=0.6.0", - "xmlbuilder": "~11.0.0" - }, - "engines": { - "node": ">=4.0.0" - } - }, - "node_modules/xmlbuilder": { - "version": "11.0.1", - "resolved": "https://registry.npmjs.org/xmlbuilder/-/xmlbuilder-11.0.1.tgz", - "integrity": "sha512-fDlsI/kFEx7gLvbecc0/ohLG50fugQp8ryHzMTuW9vSa1GJ0XYWKnhsUx7oie3G98+r56aTQIUB4kht42R3JvA==", - "dev": true, - "license": "MIT", - "engines": { - "node": ">=4.0" - } - }, - "node_modules/yallist": { - "version": "4.0.0", - "resolved": "https://registry.npmjs.org/yallist/-/yallist-4.0.0.tgz", - "integrity": "sha512-3wdGidZyq5PB084XLES5TpOSRA3wjXAlIWMhum2kRcv/41Sn2emQ0dycQW4uZXLejwKvg6EsvbdlVL+FYEct7A==", - "dev": true, - "license": "ISC" - }, - "node_modules/yauzl": { - "version": "2.10.0", - "resolved": "https://registry.npmjs.org/yauzl/-/yauzl-2.10.0.tgz", - "integrity": "sha512-p4a9I6X6nu6IhoGmBqAcbJy1mlC4j27vEPZX9F4L4/vZT3Lyq1VkFHw/V/PUcB9Buo+DG3iHkT0x3Qya58zc3g==", - "dev": true, - "license": "MIT", - "dependencies": { - "buffer-crc32": "~0.2.3", - "fd-slicer": "~1.1.0" - } - }, - "node_modules/yazl": { - "version": "2.5.1", - "resolved": "https://registry.npmjs.org/yazl/-/yazl-2.5.1.tgz", - "integrity": "sha512-phENi2PLiHnHb6QBVot+dJnaAZ0xosj7p3fWl+znIjBDlnMI2PsZCJZ306BPTFOaHf5qdDEI8x5qFrSOBN5vrw==", - "dev": true, - "license": "MIT", - "dependencies": { - "buffer-crc32": "~0.2.3" - } - }, "node_modules/yn": { "version": "3.1.1", "resolved": "https://registry.npmjs.org/yn/-/yn-3.1.1.tgz", diff --git a/package.json b/package.json index 7d5f92d..46195d0 100644 --- a/package.json +++ b/package.json @@ -1,28 +1,25 @@ { "name": "copilot-proxy", - "version": "1.0.2", - "description": "copilot-proxy for aider", + "displayName": "Copilot Proxy", + "version": "1.0.5", + "description": "OpenAI compatible proxy for GitHub Copilot", "publisher": "PoAI", "license": "ISC", "author": "Lutz Leonhardt", + "readme": "README.md", "repository": { "type": "git", - "url": "https://github.com/yourusername/copilot-proxy.git" + "url": "https://github.com/lutzleonhardt/copilot-proxy.git" }, + "categories": [ + "AI", + "Other" + ], "type": "commonjs", "main": "./out/extension.js", - "files": [ - "out", - "README.md" - ], "engines": { - "vscode": "^1.70.0" + "vscode": "^1.95.0" }, - "activationEvents": [ - "onCommand:Copilot Proxy - Start Server", - "onCommand:Copilot Proxy - Stop Server", - "onCommand:Copilot Proxy: Configure Port" - ], "contributes": { "configuration": { "type": "object", @@ -37,26 +34,41 @@ }, "commands": [ { - "command": "Copilot Proxy - Start Server", - "title": "Copilot Proxy - Start Server" + "command": "copilotProxy.startServer", + "title": "Copilot Proxy: Start Server" }, { - "command": "Copilot Proxy - Stop Server", - "title": "Copilot Proxy - Stop Server" + "command": "copilotProxy.stopServer", + "title": "Copilot Proxy: Stop Server" }, { - "command": "Copilot Proxy: Configure Port", + "command": "copilotProxy.listModels", + "title": "Copilot Proxy: List Models" + }, + { + "command": "copilotProxy.configurePort", "title": "Copilot Proxy: Configure Port" + }, + { + "command": "copilotProxy.createApiToken", + "title": "Copilot Proxy: Create API Token" + }, + { + "command": "copilotProxy.listApiTokens", + "title": "Copilot Proxy: List API Tokens" + }, + { + "command": "copilotProxy.removeApiToken", + "title": "Copilot Proxy: Remove API Token" } ] }, "scripts": { "build": "tsc", "start": "ts-node src/server.ts", - "vsix": "vsce package" + "package": "npm run build && npx @vscode/vsce package" }, "dependencies": { - "dotenv": "^16.0.0", "express": "^4.18.2", "morgan": "^1.10.0" }, @@ -64,9 +76,8 @@ "@types/express": "^4.17.14", "@types/morgan": "^1.9.4", "@types/node": "^18.11.18", - "@types/vscode": "^1.70.0", + "@types/vscode": "^1.95.0", "ts-node": "^10.9.1", - "typescript": "^4.9.5", - "vsce": "^2.15.0" + "typescript": "^4.9.5" } } diff --git a/readme.md b/readme.md index 0cab98e..c148840 100644 --- a/readme.md +++ b/readme.md @@ -7,7 +7,7 @@ Copilot Proxy is a Visual Studio Code extension that exposes the VS Code Languag [![Watch the video](https://img.youtube.com/vi/i1I2CAPOXHM/maxresdefault.jpg)](https://youtu.be/i1I2CAPOXHM) [YouTube Explanation](https://youtu.be/i1I2CAPOXHM) -**Disclaimer:** +**Disclaimer:** This extension is provided as an experiment only. In the past, some users, i.e., cline users, faced bans due to excessive usage. Since Microsoft introduced rate limits to the VS Code LM, no further bans have been reported. Nevertheless, I do not recommend using this extension for anything beyond research and prototyping. At the moment, the supported LLMs by GitHub Copilot are: "gpt-4o", "gpt-4o-mini", "o1", "o1-mini", "claude-3.5-sonnet", and "o3-mini". @@ -37,18 +37,52 @@ At the moment, the supported LLMs by GitHub Copilot are: "gpt-4o", "gpt-4o-mini" The extension provides a configuration setting to specify the port for the Express server: -- **Setting:** `copilotProxy.port` +- **Setting:** `copilotProxy.port` **Default:** `3000` You can change this setting in two ways: - **Via Settings UI:** Open the VS Code Settings (`Ctrl+,` or `Cmd+,`) and search for "Copilot Proxy". - **Via Command Palette:** Run the command **"Copilot Proxy: Configure Port"** to interactively set the port. +### API Token Authentication + +The extension provides API token management commands to secure your proxy server. Tokens are securely stored and persist across sessions. + +**Managing API Tokens:** + +1. **Create a Token:** + - Run **"Copilot Proxy: Create API Token"** from the Command Palette + - Enter a name for the token (e.g., "aider", "production") + - The token will be generated and displayed + - Copy the token immediately - you'll need it for client requests + +2. **List Tokens:** + - Run **"Copilot Proxy: List API Tokens"** to view all created tokens + - Token details are shown in the Copilot Proxy Log + +3. **Remove a Token:** + - Run **"Copilot Proxy: Remove API Token"** + - Select the token to remove from the list + - Confirm the removal + +**Using Tokens in Requests:** + +When API tokens exist, all requests to the server must include a valid token in the `Authorization` header: + +```bash +curl -H "Authorization: Bearer cpx_your_token_here" \ + -H "Content-Type: application/json" \ + -d '{"model": "gpt-4o", "messages": [{"role": "user", "content": "Hello"}]}' \ + http://localhost:3000/v1/chat/completions +``` + +**Important:** If no tokens are created, authentication is disabled and the server accepts all requests. Create at least one token to enable authentication. + ## Using the Extension ### Starting the Server -- Open the Command Palette and run **"Copilot Proxy - Start Server"**. +- Open the Command Palette and run **"Copilot Proxy: Start Server"**. - The server will start on the configured port (default is `3000`), and a notification will confirm the port. ### Stopping the Server @@ -102,9 +136,25 @@ The content of the file should look like this: - name: aider/extra_params extra_params: api_key: n/a - api_base: http://localhost:3000/v1 + api_base: http://localhost:3000/v1 ``` +**Using with API Token Authentication:** + +If you have created API tokens (recommended for security), replace `n/a` with your actual token in all `api_key` fields. To create a token, run **"Copilot Proxy: Create API Token"** from the VS Code Command Palette. + +Example with authentication: + +```yaml +- name: claude-3-5-sonnet-20241022 + extra_params: + model: openai/claude-3.5-sonnet + api_key: cpx_abc123def456... # Your actual token from Create API Token command + api_base: http://localhost:3000/v1 +``` + +If no tokens are created, authentication is disabled and you can use `n/a` or any value. + ## Contributing diff --git a/src/assistants/index.ts b/src/assistants/index.ts new file mode 100644 index 0000000..99867ad --- /dev/null +++ b/src/assistants/index.ts @@ -0,0 +1,25 @@ +/** + * Assistants API Module + * + * Exports all assistants-related functionality: + * - Types for Assistant, Thread, Message, Run, RunStep, StreamEvent + * - State management with persistence support + * - Run execution engine with streaming and native tool calling + * - Tool utilities for ID generation and format conversion + * - Express routes + */ + +export * from './types'; +export { state, SerializedState, PendingToolContext } from './state'; +export { + executeRun, + executeRunNonStreaming, + requestRunCancellation, + isRunActive, + continueRunWithToolOutputs, + continueRunWithToolOutputsNonStreaming +} from './runner'; +export { + generateToolCallId, +} from './tools'; +export { default as assistantsRouter } from './routes'; diff --git a/src/assistants/routes.ts b/src/assistants/routes.ts new file mode 100644 index 0000000..288fd4f --- /dev/null +++ b/src/assistants/routes.ts @@ -0,0 +1,691 @@ +/** + * Express Routes for OpenAI Assistants API + * + * Implements all CRUD operations for: + * - /v1/assistants + * - /v1/threads + * - /v1/threads/:thread_id/messages + * - /v1/threads/:thread_id/runs + * + * Future extensibility: + * - /v1/threads/runs (create thread and run) + * - /v1/threads/:thread_id/runs/:run_id/steps + * - /v1/threads/:thread_id/runs/:run_id/submit_tool_outputs + */ + +import { Router, Request, Response } from 'express'; +import { state } from './state'; +import { executeRun, executeRunNonStreaming, requestRunCancellation, continueRunWithToolOutputs, continueRunWithToolOutputsNonStreaming } from './runner'; +import { + Assistant, + Thread, + Run, + Message, + CreateAssistantRequest, + UpdateAssistantRequest, + CreateThreadRequest, + CreateMessageRequest, + CreateRunRequest, + CreateThreadAndRunRequest, + SubmitToolOutputsRequest, + PaginationParams, +} from './types'; +import { errorResponse, notFoundError, createMessage } from '../utils'; + +const router = Router(); + +// ==================== Validation Helpers ====================================== + +function validateRequired(body: T, fields: (keyof T)[]): string | null { + for (const field of fields) { + if (body[field] === undefined || body[field] === null) { + return `Missing required field: ${String(field)}`; + } + } + return null; +} + +function parsePaginationParams(query: Request['query']): PaginationParams { + return { + limit: query.limit ? Math.min(parseInt(query.limit as string, 10), 100) : 20, + order: (query.order as 'asc' | 'desc') ?? 'desc', + after: query.after as string | undefined, + before: query.before as string | undefined + }; +} + +// ==================== Assistants Routes ==================== + +// Create assistant +router.post('/v1/assistants', (req: Request, res: Response) => { + const body = req.body as CreateAssistantRequest; + + const validationError = validateRequired(body, ['model']); + if (validationError) { + return res.status(400).json(errorResponse(validationError, 'invalid_request_error', 'model')); + } + + const assistant: Assistant = { + id: state.generateAssistantId(), + object: 'assistant', + created_at: Math.floor(Date.now() / 1000), + name: body.name ?? null, + description: body.description ?? null, + model: body.model, + instructions: body.instructions ?? null, + tools: body.tools ?? [], + metadata: body.metadata ?? {} + }; + + state.createAssistant(assistant); + res.status(201).json(assistant); +}); + +// List assistants +router.get('/v1/assistants', (req: Request, res: Response) => { + const params = parsePaginationParams(req.query); + const result = state.listAssistants(params); + res.json(result); +}); + +// Get assistant +router.get('/v1/assistants/:assistant_id', (req: Request, res: Response) => { + const assistant = state.getAssistant(req.params.assistant_id); + if (!assistant) { + return res.status(404).json(notFoundError('assistant')); + } + res.json(assistant); +}); + +// Update assistant (POST for OpenAI compatibility) +router.post('/v1/assistants/:assistant_id', (req: Request, res: Response) => { + const body = req.body as UpdateAssistantRequest; + const updated = state.updateAssistant(req.params.assistant_id, body); + if (!updated) { + return res.status(404).json(notFoundError('assistant')); + } + res.json(updated); +}); + +// Delete assistant +router.delete('/v1/assistants/:assistant_id', (req: Request, res: Response) => { + const deleted = state.deleteAssistant(req.params.assistant_id); + res.json({ + id: req.params.assistant_id, + object: 'assistant.deleted', + deleted + }); +}); + +// ==================== Threads Routes ==================== + +// Create thread +router.post('/v1/threads', (req: Request, res: Response) => { + const body = (req.body || {}) as CreateThreadRequest; + + const thread: Thread = { + id: state.generateThreadId(), + object: 'thread', + created_at: Math.floor(Date.now() / 1000), + metadata: body.metadata ?? {} + }; + + state.createThread(thread); + + // Add initial messages if provided + if (body.messages && Array.isArray(body.messages)) { + for (const msg of body.messages) { + const content = typeof msg.content === 'string' + ? msg.content + : JSON.stringify(msg.content); + + const message = createMessage({ + threadId: thread.id, + messageId: state.generateMessageId(), + content, + role: msg.role || 'user', + attachments: msg.attachments ?? [], + metadata: msg.metadata ?? {} + }); + state.addMessage(thread.id, message); + } + } + + res.status(201).json(thread); +}); + +// Get thread +router.get('/v1/threads/:thread_id', (req: Request, res: Response) => { + const thread = state.getThread(req.params.thread_id); + if (!thread) { + return res.status(404).json(notFoundError('thread')); + } + res.json(thread); +}); + +// Update thread (POST for OpenAI compatibility) +router.post('/v1/threads/:thread_id', (req: Request, res: Response) => { + const updated = state.updateThread(req.params.thread_id, req.body); + if (!updated) { + return res.status(404).json(notFoundError('thread')); + } + res.json(updated); +}); + +// Delete thread +router.delete('/v1/threads/:thread_id', (req: Request, res: Response) => { + const deleted = state.deleteThread(req.params.thread_id); + res.json({ + id: req.params.thread_id, + object: 'thread.deleted', + deleted + }); +}); + +// ==================== Messages Routes ==================== + +// Create message +router.post('/v1/threads/:thread_id/messages', (req: Request, res: Response) => { + const { thread_id } = req.params; + const thread = state.getThread(thread_id); + if (!thread) { + return res.status(404).json(notFoundError('thread')); + } + + const body = req.body as CreateMessageRequest; + + const validationError = validateRequired(body, ['role', 'content']); + if (validationError) { + return res.status(400).json(errorResponse(validationError)); + } + + const content = typeof body.content === 'string' + ? body.content + : JSON.stringify(body.content); + + const message = createMessage({ + threadId: thread_id, + messageId: state.generateMessageId(), + content, + role: body.role, + attachments: body.attachments ?? [], + metadata: body.metadata ?? {} + }); + + state.addMessage(thread_id, message); + res.status(201).json(message); +}); + +// List messages +router.get('/v1/threads/:thread_id/messages', (req: Request, res: Response) => { + const { thread_id } = req.params; + const thread = state.getThread(thread_id); + if (!thread) { + return res.status(404).json(notFoundError('thread')); + } + + const params = { + ...parsePaginationParams(req.query), + run_id: req.query.run_id as string | undefined + }; + const result = state.getMessages(thread_id, params); + res.json(result); +}); + +// Get message +router.get('/v1/threads/:thread_id/messages/:message_id', (req: Request, res: Response) => { + const { thread_id, message_id } = req.params; + const thread = state.getThread(thread_id); + if (!thread) { + return res.status(404).json(notFoundError('thread')); + } + + const message = state.getMessage(thread_id, message_id); + if (!message) { + return res.status(404).json(notFoundError('message')); + } + res.json(message); +}); + +// Update message (POST for OpenAI compatibility) - only metadata can be updated +router.post('/v1/threads/:thread_id/messages/:message_id', (req: Request, res: Response) => { + const { thread_id, message_id } = req.params; + const thread = state.getThread(thread_id); + if (!thread) { + return res.status(404).json(notFoundError('thread')); + } + + // Only metadata updates allowed + const updated = state.updateMessage(thread_id, message_id, { + metadata: req.body.metadata + }); + if (!updated) { + return res.status(404).json(notFoundError('message')); + } + res.json(updated); +}); + +// ==================== Runs Routes ==================== + +// Create run +router.post('/v1/threads/:thread_id/runs', async (req: Request, res: Response) => { + const { thread_id } = req.params; + const thread = state.getThread(thread_id); + if (!thread) { + return res.status(404).json(notFoundError('thread')); + } + + const body = req.body as CreateRunRequest; + + const validationError = validateRequired(body, ['assistant_id']); + if (validationError) { + return res.status(400).json(errorResponse(validationError, 'invalid_request_error', 'assistant_id')); + } + + const assistant = state.getAssistant(body.assistant_id); + if (!assistant) { + return res.status(404).json(notFoundError('assistant')); + } + + // Add additional messages if provided + if (body.additional_messages && Array.isArray(body.additional_messages)) { + for (const msg of body.additional_messages) { + const content = typeof msg.content === 'string' + ? msg.content + : JSON.stringify(msg.content); + + const message = createMessage({ + threadId: thread_id, + messageId: state.generateMessageId(), + content, + role: msg.role || 'user', + attachments: msg.attachments ?? [], + metadata: msg.metadata ?? {} + }); + state.addMessage(thread_id, message); + } + } + + const run: Run = { + id: state.generateRunId(), + object: 'thread.run', + created_at: Math.floor(Date.now() / 1000), + thread_id, + assistant_id: body.assistant_id, + status: 'queued', + required_action: null, + last_error: null, + expires_at: Math.floor(Date.now() / 1000) + 600, // 10 minutes + started_at: null, + cancelled_at: null, + failed_at: null, + completed_at: null, + incomplete_details: null, + model: body.model ?? assistant.model, + instructions: body.instructions ?? null, + tools: body.tools ?? assistant.tools, + metadata: body.metadata ?? {}, + usage: null + }; + + state.addRun(thread_id, run); + + // Check if streaming is requested + if (body.stream) { + // Streaming mode: use SSE + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); + res.setHeader('X-Accel-Buffering', 'no'); + + // Execute run with streaming + (async () => { + try { + const generator = executeRun(thread_id, run.id, true); + for await (const event of generator) { + if (event.event === 'done') { + res.write(`event: done\ndata: [DONE]\n\n`); + } else { + res.write(`event: ${event.event}\ndata: ${JSON.stringify(event.data)}\n\n`); + } + } + } catch (err) { + console.error('Streaming run error:', err); + res.write(`event: error\ndata: ${JSON.stringify({ error: { message: 'Stream error' } })}\n\n`); + } finally { + res.end(); + } + })(); + } else { + // Non-streaming mode: return immediately, execute async + res.status(201).json(run); + + // Execute in background (don't await) + executeRunNonStreaming(thread_id, run.id).catch(err => { + console.error('Run execution failed:', err); + }); + } +}); + +// List runs +router.get('/v1/threads/:thread_id/runs', (req: Request, res: Response) => { + const { thread_id } = req.params; + const thread = state.getThread(thread_id); + if (!thread) { + return res.status(404).json(notFoundError('thread')); + } + + const params = parsePaginationParams(req.query); + const result = state.getRuns(thread_id, params); + res.json(result); +}); + +// Get run +router.get('/v1/threads/:thread_id/runs/:run_id', (req: Request, res: Response) => { + const { thread_id, run_id } = req.params; + const thread = state.getThread(thread_id); + if (!thread) { + return res.status(404).json(notFoundError('thread')); + } + + const run = state.getRun(thread_id, run_id); + if (!run) { + return res.status(404).json(notFoundError('run')); + } + res.json(run); +}); + +// Update run (POST for OpenAI compatibility) - only metadata can be updated +router.post('/v1/threads/:thread_id/runs/:run_id', (req: Request, res: Response) => { + const { thread_id, run_id } = req.params; + const thread = state.getThread(thread_id); + if (!thread) { + return res.status(404).json(notFoundError('thread')); + } + + const updated = state.updateRun(thread_id, run_id, { + metadata: req.body.metadata + }); + if (!updated) { + return res.status(404).json(notFoundError('run')); + } + res.json(updated); +}); + +// Cancel run +router.post('/v1/threads/:thread_id/runs/:run_id/cancel', (req: Request, res: Response) => { + const { thread_id, run_id } = req.params; + const thread = state.getThread(thread_id); + if (!thread) { + return res.status(404).json(notFoundError('thread')); + } + + const run = state.getRun(thread_id, run_id); + if (!run) { + return res.status(404).json(notFoundError('run')); + } + + // Check if run can be cancelled + const cancellableStatuses = ['queued', 'in_progress', 'requires_action']; + if (!cancellableStatuses.includes(run.status)) { + return res.status(400).json( + errorResponse(`Cannot cancel run with status: ${run.status}`, 'invalid_request_error', 'status') + ); + } + + // Request cancellation + requestRunCancellation(thread_id, run_id); + + const updated = state.updateRun(thread_id, run_id, { + status: 'cancelling', + cancelled_at: Math.floor(Date.now() / 1000) + }); + + // After a short delay, mark as cancelled + setTimeout(() => { + const currentRun = state.getRun(thread_id, run_id); + if (currentRun?.status === 'cancelling') { + state.updateRun(thread_id, run_id, { status: 'cancelled' }); + } + }, 100); + + res.json(updated); +}); + +// ==================== Create Thread and Run ==================== + +router.post('/v1/threads/runs', async (req: Request, res: Response) => { + const body = req.body as CreateThreadAndRunRequest; + + const validationError = validateRequired(body, ['assistant_id']); + if (validationError) { + return res.status(400).json(errorResponse(validationError, 'invalid_request_error', 'assistant_id')); + } + + const assistant = state.getAssistant(body.assistant_id); + if (!assistant) { + return res.status(404).json(notFoundError('assistant')); + } + + // Create thread + const threadBody = body.thread ?? {}; + const thread: Thread = { + id: state.generateThreadId(), + object: 'thread', + created_at: Math.floor(Date.now() / 1000), + metadata: threadBody.metadata ?? {} + }; + + state.createThread(thread); + + // Add initial messages if provided + if (threadBody.messages && Array.isArray(threadBody.messages)) { + for (const msg of threadBody.messages) { + const content = typeof msg.content === 'string' + ? msg.content + : JSON.stringify(msg.content); + + const message = createMessage({ + threadId: thread.id, + messageId: state.generateMessageId(), + content, + role: msg.role || 'user', + attachments: msg.attachments ?? [], + metadata: msg.metadata ?? {} + }); + state.addMessage(thread.id, message); + } + } + + // Create run + const run: Run = { + id: state.generateRunId(), + object: 'thread.run', + created_at: Math.floor(Date.now() / 1000), + thread_id: thread.id, + assistant_id: body.assistant_id, + status: 'queued', + required_action: null, + last_error: null, + expires_at: Math.floor(Date.now() / 1000) + 600, + started_at: null, + cancelled_at: null, + failed_at: null, + completed_at: null, + incomplete_details: null, + model: body.model ?? assistant.model, + instructions: body.instructions ?? null, + tools: body.tools ?? assistant.tools, + metadata: body.metadata ?? {}, + usage: null + }; + + state.addRun(thread.id, run); + + // Check if streaming is requested + if (body.stream) { + // Streaming mode: use SSE + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); + res.setHeader('X-Accel-Buffering', 'no'); + + // Execute run with streaming + (async () => { + try { + const generator = executeRun(thread.id, run.id, true); + for await (const event of generator) { + if (event.event === 'done') { + res.write(`event: done\ndata: [DONE]\n\n`); + } else { + res.write(`event: ${event.event}\ndata: ${JSON.stringify(event.data)}\n\n`); + } + } + } catch (err) { + console.error('Streaming run error:', err); + res.write(`event: error\ndata: ${JSON.stringify({ error: { message: 'Stream error' } })}\n\n`); + } finally { + res.end(); + } + })(); + } else { + // Non-streaming mode: return immediately, execute async + res.status(201).json(run); + + // Execute in background + executeRunNonStreaming(thread.id, run.id).catch(err => { + console.error('Run execution failed:', err); + }); + } +}); + +// ==================== Submit Tool Outputs ==================== + +router.post('/v1/threads/:thread_id/runs/:run_id/submit_tool_outputs', async (req: Request, res: Response) => { + const { thread_id, run_id } = req.params; + const thread = state.getThread(thread_id); + if (!thread) { + return res.status(404).json(notFoundError('thread')); + } + + const run = state.getRun(thread_id, run_id); + if (!run) { + return res.status(404).json(notFoundError('run')); + } + + // Check if run is in requires_action status + if (run.status !== 'requires_action') { + return res.status(400).json( + errorResponse( + `Run is not in requires_action status. Current status: ${run.status}`, + 'invalid_request_error', + 'status' + ) + ); + } + + const body = req.body as SubmitToolOutputsRequest; + + const validationError = validateRequired(body, ['tool_outputs']); + if (validationError) { + return res.status(400).json(errorResponse(validationError, 'invalid_request_error', 'tool_outputs')); + } + + // Validate that all required tool calls are provided + const requiredToolCallIds = new Set( + run.required_action?.submit_tool_outputs.tool_calls.map(tc => tc.id) ?? [] + ); + const providedToolCallIds = new Set(body.tool_outputs.map(o => o.tool_call_id)); + + for (const requiredId of requiredToolCallIds) { + if (!providedToolCallIds.has(requiredId)) { + return res.status(400).json( + errorResponse( + `Missing output for tool call: ${requiredId}`, + 'invalid_request_error', + 'tool_outputs' + ) + ); + } + } + + // Check if streaming is requested + if (body.stream) { + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); + res.setHeader('X-Accel-Buffering', 'no'); + + (async () => { + try { + const generator = continueRunWithToolOutputs(thread_id, run_id, body.tool_outputs, true); + for await (const event of generator) { + if (event.event === 'done') { + res.write(`event: done\ndata: [DONE]\n\n`); + } else { + res.write(`event: ${event.event}\ndata: ${JSON.stringify(event.data)}\n\n`); + } + } + } catch (err) { + console.error('Streaming continue error:', err); + res.write(`event: error\ndata: ${JSON.stringify({ error: { message: 'Stream error' } })}\n\n`); + } finally { + res.end(); + } + })(); + } else { + // Non-streaming mode: return the run immediately, execute async + const updatedRun = state.updateRun(thread_id, run_id, { + status: 'in_progress', + required_action: null + }); + res.json(updatedRun); + + // Continue execution in background + continueRunWithToolOutputsNonStreaming(thread_id, run_id, body.tool_outputs).catch(err => { + console.error('Continue run failed:', err); + }); + } +}); + +// ==================== Run Steps ==================== + +// List run steps +router.get('/v1/threads/:thread_id/runs/:run_id/steps', (req: Request, res: Response) => { + const { thread_id, run_id } = req.params; + const thread = state.getThread(thread_id); + if (!thread) { + return res.status(404).json(notFoundError('thread')); + } + + const run = state.getRun(thread_id, run_id); + if (!run) { + return res.status(404).json(notFoundError('run')); + } + + const params = parsePaginationParams(req.query); + const result = state.getRunSteps(run_id, params); + res.json(result); +}); + +// Get run step +router.get('/v1/threads/:thread_id/runs/:run_id/steps/:step_id', (req: Request, res: Response) => { + const { thread_id, run_id, step_id } = req.params; + const thread = state.getThread(thread_id); + if (!thread) { + return res.status(404).json(notFoundError('thread')); + } + + const run = state.getRun(thread_id, run_id); + if (!run) { + return res.status(404).json(notFoundError('run')); + } + + const step = state.getRunStep(run_id, step_id); + if (!step) { + return res.status(404).json(notFoundError('run step')); + } + res.json(step); +}); + +export default router; diff --git a/src/assistants/runner.ts b/src/assistants/runner.ts new file mode 100644 index 0000000..34a6817 --- /dev/null +++ b/src/assistants/runner.ts @@ -0,0 +1,940 @@ +/** + * Run Execution Engine + * + * Executes runs with support for: + * - Streaming mode (yields SSE events) + * - Non-streaming mode (returns promise) + * - Run steps tracking + * - Cancellation support + * - Tool calling (prompt-based) + * + * The executeRun function is a generator that yields StreamEvent objects. + * For non-streaming, consume all events and ignore them. + * For streaming, pipe events to SSE response. + */ + +import { state, PendingToolContext } from './state'; +import { processChatRequest } from '../extension'; +import { ChatCompletionRequest, ChatCompletionChunk, ChatCompletionResponse, ChatMessage } from '../types'; +import { createMessage } from '../utils'; +import { assistantToolsToFunctionTools } from '../toolConvert'; +import { + Run, + Message, + RunStep, + TextContent, + MessageContent, + StreamEvent, + MessageDelta, + ToolCall, + ToolOutput, +} from './types'; + +// Active runs that can be cancelled +const activeRuns = new Map(); + +/** + * Extract text content from MessageContent array + */ +function extractTextFromContent(content: MessageContent[]): string { + return content + .filter((c): c is TextContent => c.type === 'text') + .map(c => c.text.value) + .join('\n'); +} + +/** + * Create a stream event + */ +function createEvent(event: StreamEvent['event'], data: unknown): StreamEvent { + return { event, data }; +} + +/** + * Execute a run as an async generator + * Yields StreamEvent objects for SSE streaming + * + * @param threadId - The thread ID + * @param runId - The run ID + * @param streaming - Whether to yield intermediate events + */ +export async function* executeRun( + threadId: string, + runId: string, + streaming: boolean = false +): AsyncGenerator { + const runKey = `${threadId}:${runId}`; + activeRuns.set(runKey, { cancelled: false }); + + try { + const run = state.getRun(threadId, runId); + const thread = state.getThread(threadId); + + if (!run || !thread) { + state.updateRun(threadId, runId, { + status: 'failed', + failed_at: Math.floor(Date.now() / 1000), + last_error: { code: 'server_error', message: 'Thread or run not found' } + }); + if (streaming) { + yield createEvent('thread.run.failed', state.getRun(threadId, runId)); + yield createEvent('done', '[DONE]'); + } + return; + } + + const assistant = state.getAssistant(run.assistant_id); + if (!assistant) { + state.updateRun(threadId, runId, { + status: 'failed', + failed_at: Math.floor(Date.now() / 1000), + last_error: { code: 'server_error', message: 'Assistant not found' } + }); + if (streaming) { + yield createEvent('thread.run.failed', state.getRun(threadId, runId)); + yield createEvent('done', '[DONE]'); + } + return; + } + + // Check for cancellation + if (activeRuns.get(runKey)?.cancelled) { + state.updateRun(threadId, runId, { + status: 'cancelled', + cancelled_at: Math.floor(Date.now() / 1000) + }); + if (streaming) { + yield createEvent('thread.run.cancelled', state.getRun(threadId, runId)); + yield createEvent('done', '[DONE]'); + } + return; + } + + // Emit run queued event + if (streaming) { + yield createEvent('thread.run.queued', run); + } + + // Mark as in progress + state.updateRun(threadId, runId, { + status: 'in_progress', + started_at: Math.floor(Date.now() / 1000) + }); + + if (streaming) { + yield createEvent('thread.run.in_progress', state.getRun(threadId, runId)); + } + + // Build messages array from thread + const threadMessages = state.getMessages(threadId, { order: 'asc' }); + const chatMessages: ChatMessage[] = []; + + // Build system instructions (assistant instructions + run overrides) + // Tools are passed natively via processChatRequest, not injected into system prompt + let systemContent = ''; + if (assistant.instructions) { + systemContent += assistant.instructions; + } + if (run.instructions) { + systemContent += (systemContent ? '\n\n' : '') + run.instructions; + } + + // Get tools for native passing + const tools = run.tools.length > 0 ? run.tools : assistant.tools; + const functionTools = assistantToolsToFunctionTools(tools); + + // Convert thread messages to chat messages + // Prepend system content to the first user message + let systemPrepended = false; + + for (const msg of threadMessages.data) { + const textContent = extractTextFromContent(msg.content); + + if (msg.role === 'user' && !systemPrepended && systemContent) { + // Prepend system instructions to first user message + chatMessages.push({ + role: 'user', + content: `${systemContent}\n\n---\n\n${textContent}` + }); + systemPrepended = true; + } else { + chatMessages.push({ + role: msg.role, + content: textContent + }); + } + } + + // If no user messages but we have system content, add it as a user message + if (!systemPrepended && systemContent) { + chatMessages.unshift({ + role: 'user', + content: systemContent + }); + } + + // Check for cancellation before calling LLM + if (activeRuns.get(runKey)?.cancelled) { + state.updateRun(threadId, runId, { + status: 'cancelled', + cancelled_at: Math.floor(Date.now() / 1000) + }); + if (streaming) { + yield createEvent('thread.run.cancelled', state.getRun(threadId, runId)); + yield createEvent('done', '[DONE]'); + } + return; + } + + // Create message_creation run step + const stepId = state.generateStepId(); + const messageId = state.generateMessageId(); + + const runStep: RunStep = { + id: stepId, + object: 'thread.run.step', + created_at: Math.floor(Date.now() / 1000), + run_id: runId, + assistant_id: assistant.id, + thread_id: threadId, + type: 'message_creation', + status: 'in_progress', + cancelled_at: null, + completed_at: null, + expired_at: null, + failed_at: null, + last_error: null, + step_details: { + type: 'message_creation', + message_creation: { + message_id: messageId + } + }, + usage: null + }; + + state.addRunStep(runId, runStep); + + if (streaming) { + yield createEvent('thread.run.step.created', runStep); + yield createEvent('thread.run.step.in_progress', runStep); + } + + // Build request - use streaming mode if requested, pass tools natively + const request: ChatCompletionRequest = { + model: run.model || assistant.model, + messages: chatMessages, + stream: streaming, + ...(functionTools.length > 0 ? { tools: functionTools } : {}), + }; + + let fullContent = ''; + let promptTokens = 0; + let completionTokens = 0; + + if (streaming) { + // Streaming mode: yield message deltas + const streamIterator = await processChatRequest(request) as AsyncIterable; + + // Create message in progress + const assistantMessage = createMessage({ + threadId, + messageId, + content: '', + role: 'assistant', + assistantId: assistant.id, + runId, + status: 'in_progress', + }); + + state.addMessage(threadId, assistantMessage); + yield createEvent('thread.message.created', assistantMessage); + yield createEvent('thread.message.in_progress', assistantMessage); + + let deltaIndex = 0; + const accumulatedToolCalls: ToolCall[] = []; + + for await (const chunk of streamIterator) { + // Check for cancellation + if (activeRuns.get(runKey)?.cancelled) { + state.updateRun(threadId, runId, { + status: 'cancelled', + cancelled_at: Math.floor(Date.now() / 1000) + }); + state.updateRunStep(runId, stepId, { + status: 'cancelled', + cancelled_at: Math.floor(Date.now() / 1000) + }); + yield createEvent('thread.run.step.cancelled', state.getRunStep(runId, stepId)); + yield createEvent('thread.run.cancelled', state.getRun(threadId, runId)); + yield createEvent('done', '[DONE]'); + return; + } + + const content = chunk.choices[0]?.delta?.content ?? ''; + if (content) { + fullContent += content; + + // Emit message delta + const delta: MessageDelta = { + id: messageId, + object: 'thread.message.delta', + delta: { + content: [{ + index: deltaIndex, + type: 'text', + text: { + value: content + } + }] + } + }; + yield createEvent('thread.message.delta', delta); + deltaIndex++; + } + + // Check for native tool calls in delta + const chunkToolCalls = chunk.choices[0]?.delta?.tool_calls; + if (chunkToolCalls) { + for (const tc of chunkToolCalls) { + if (tc.id && tc.function?.name) { + accumulatedToolCalls.push({ + id: tc.id, + type: 'function', + function: { + name: tc.function.name, + arguments: tc.function.arguments ?? '{}', + }, + }); + } + } + } + + // Check for finish reason + if (chunk.choices[0]?.finish_reason === 'stop' || chunk.choices[0]?.finish_reason === 'tool_calls') { + break; + } + } + + // Estimate tokens (rough approximation) + completionTokens = fullContent.length; + promptTokens = chatMessages.reduce((sum, m) => sum + (typeof m.content === 'string' ? m.content.length : 0), 0); + + // Update message with full content + state.updateMessage(threadId, messageId, { + status: 'completed', + completed_at: Math.floor(Date.now() / 1000), + content: [{ + type: 'text', + text: { + value: fullContent, + annotations: [] + } + }] + }); + + yield createEvent('thread.message.completed', state.getMessage(threadId, messageId)); + + // Handle tool calls detected during streaming + if (accumulatedToolCalls.length > 0) { + // Complete the message_creation step + state.updateRunStep(runId, stepId, { + status: 'completed', + completed_at: Math.floor(Date.now() / 1000) + }); + + // Create tool_calls run step + const toolStepId = state.generateStepId(); + const toolStep: RunStep = { + id: toolStepId, + object: 'thread.run.step', + created_at: Math.floor(Date.now() / 1000), + run_id: runId, + assistant_id: assistant.id, + thread_id: threadId, + type: 'tool_calls', + status: 'in_progress', + cancelled_at: null, + completed_at: null, + expired_at: null, + failed_at: null, + last_error: null, + step_details: { + type: 'tool_calls', + tool_calls: accumulatedToolCalls + }, + usage: null + }; + state.addRunStep(runId, toolStep); + + // Save context for when tool outputs are submitted + const pendingContext: PendingToolContext = { + runId, + threadId, + toolCalls: accumulatedToolCalls, + partialContent: fullContent, + stepId: toolStepId + }; + state.setPendingToolContext(runId, pendingContext); + + // Update run to requires_action + state.updateRun(threadId, runId, { + status: 'requires_action', + required_action: { + type: 'submit_tool_outputs', + submit_tool_outputs: { + tool_calls: accumulatedToolCalls + } + } + }); + + yield createEvent('thread.run.step.created', toolStep); + yield createEvent('thread.run.requires_action', state.getRun(threadId, runId)); + yield createEvent('done', '[DONE]'); + return; + } + + } else { + // Non-streaming mode + const response = await processChatRequest(request) as ChatCompletionResponse; + + // Check for cancellation after LLM response + if (activeRuns.get(runKey)?.cancelled) { + state.updateRun(threadId, runId, { + status: 'cancelled', + cancelled_at: Math.floor(Date.now() / 1000) + }); + state.updateRunStep(runId, stepId, { + status: 'cancelled', + cancelled_at: Math.floor(Date.now() / 1000) + }); + return; + } + + // Extract response content + const responseContent = response.choices[0]?.message?.content; + if (!responseContent) { + state.updateRun(threadId, runId, { + status: 'failed', + failed_at: Math.floor(Date.now() / 1000), + last_error: { code: 'server_error', message: 'Empty response from model' } + }); + state.updateRunStep(runId, stepId, { + status: 'failed', + failed_at: Math.floor(Date.now() / 1000), + last_error: { code: 'server_error', message: 'Empty response from model' } + }); + return; + } + + fullContent = typeof responseContent === 'string' + ? responseContent + : JSON.stringify(responseContent); + + promptTokens = response.usage?.prompt_tokens ?? 0; + completionTokens = response.usage?.completion_tokens ?? fullContent.length; + + // Check for native tool calls in the response + const responseToolCalls = response.choices?.[0]?.message?.tool_calls; + if (responseToolCalls && responseToolCalls.length > 0) { + // Complete the message_creation step (with partial content if any) + state.updateRunStep(runId, stepId, { + status: 'completed', + completed_at: Math.floor(Date.now() / 1000) + }); + + // Create tool_calls run step + const toolStepId = state.generateStepId(); + const toolStep: RunStep = { + id: toolStepId, + object: 'thread.run.step', + created_at: Math.floor(Date.now() / 1000), + run_id: runId, + assistant_id: assistant.id, + thread_id: threadId, + type: 'tool_calls', + status: 'in_progress', + cancelled_at: null, + completed_at: null, + expired_at: null, + failed_at: null, + last_error: null, + step_details: { + type: 'tool_calls', + tool_calls: responseToolCalls + }, + usage: null + }; + state.addRunStep(runId, toolStep); + + // Save context for when tool outputs are submitted + const pendingContext: PendingToolContext = { + runId, + threadId, + toolCalls: responseToolCalls, + partialContent: fullContent, + stepId: toolStepId + }; + state.setPendingToolContext(runId, pendingContext); + + // Update run to requires_action + state.updateRun(threadId, runId, { + status: 'requires_action', + required_action: { + type: 'submit_tool_outputs', + submit_tool_outputs: { + tool_calls: responseToolCalls + } + } + }); + + // Don't create message yet - wait for tool outputs + return; + } + + // Create assistant message + const assistantMessage = createMessage({ + threadId, + messageId, + content: fullContent, + role: 'assistant', + assistantId: assistant.id, + runId, + }); + + state.addMessage(threadId, assistantMessage); + } + + // Update run step as completed + const usage = { + prompt_tokens: promptTokens, + completion_tokens: completionTokens, + total_tokens: promptTokens + completionTokens + }; + + state.updateRunStep(runId, stepId, { + status: 'completed', + completed_at: Math.floor(Date.now() / 1000), + usage + }); + + if (streaming) { + yield createEvent('thread.run.step.completed', state.getRunStep(runId, stepId)); + } + + // Mark run as completed + state.updateRun(threadId, runId, { + status: 'completed', + completed_at: Math.floor(Date.now() / 1000), + usage + }); + + if (streaming) { + yield createEvent('thread.run.completed', state.getRun(threadId, runId)); + yield createEvent('done', '[DONE]'); + } + + } catch (error) { + console.error('Run execution error:', error); + state.updateRun(threadId, runId, { + status: 'failed', + failed_at: Math.floor(Date.now() / 1000), + last_error: { + code: 'server_error', + message: error instanceof Error ? error.message : 'Unknown error' + } + }); + + if (streaming) { + yield createEvent('error', { + error: { + message: error instanceof Error ? error.message : 'Unknown error', + code: 'server_error' + } + }); + yield createEvent('thread.run.failed', state.getRun(threadId, runId)); + yield createEvent('done', '[DONE]'); + } + } finally { + activeRuns.delete(runKey); + } +} + +/** + * Execute run without streaming (convenience wrapper) + * Consumes all events and returns when complete + */ +export async function executeRunNonStreaming(threadId: string, runId: string): Promise { + const generator = executeRun(threadId, runId, false); + // Consume all events + for await (const _ of generator) { + // Discard events in non-streaming mode + } +} + +/** + * Request cancellation of a run + */ +export function requestRunCancellation(threadId: string, runId: string): boolean { + const runKey = `${threadId}:${runId}`; + const activeRun = activeRuns.get(runKey); + if (activeRun) { + activeRun.cancelled = true; + return true; + } + return false; +} + +/** + * Check if a run is currently active + */ +export function isRunActive(threadId: string, runId: string): boolean { + return activeRuns.has(`${threadId}:${runId}`); +} + +/** + * Continue a run after tool outputs have been submitted + * This resumes execution by adding tool results to the conversation and calling the model again + */ +export async function* continueRunWithToolOutputs( + threadId: string, + runId: string, + toolOutputs: ToolOutput[], + streaming: boolean = false +): AsyncGenerator { + const runKey = `${threadId}:${runId}`; + activeRuns.set(runKey, { cancelled: false }); + + try { + const run = state.getRun(threadId, runId); + const pendingContext = state.getPendingToolContext(runId); + + if (!run || !pendingContext) { + state.updateRun(threadId, runId, { + status: 'failed', + failed_at: Math.floor(Date.now() / 1000), + last_error: { code: 'server_error', message: 'Run or pending context not found' } + }); + if (streaming) { + yield createEvent('thread.run.failed', state.getRun(threadId, runId)); + yield createEvent('done', '[DONE]'); + } + return; + } + + const assistant = state.getAssistant(run.assistant_id); + if (!assistant) { + state.updateRun(threadId, runId, { + status: 'failed', + failed_at: Math.floor(Date.now() / 1000), + last_error: { code: 'server_error', message: 'Assistant not found' } + }); + if (streaming) { + yield createEvent('thread.run.failed', state.getRun(threadId, runId)); + yield createEvent('done', '[DONE]'); + } + return; + } + + // Update run status back to in_progress + state.updateRun(threadId, runId, { + status: 'in_progress', + required_action: null + }); + + if (streaming) { + yield createEvent('thread.run.in_progress', state.getRun(threadId, runId)); + } + + // Complete the tool_calls step + state.updateRunStep(runId, pendingContext.stepId, { + status: 'completed', + completed_at: Math.floor(Date.now() / 1000) + }); + + if (streaming) { + yield createEvent('thread.run.step.completed', state.getRunStep(runId, pendingContext.stepId)); + } + + // Build messages array including tool results + const threadMessages = state.getMessages(threadId, { order: 'asc' }); + const chatMessages: ChatMessage[] = []; + + // Build system instructions + let systemContent = ''; + if (assistant.instructions) { + systemContent += assistant.instructions; + } + if (run.instructions) { + systemContent += (systemContent ? '\n\n' : '') + run.instructions; + } + + // Get tools for native passing + const tools = run.tools.length > 0 ? run.tools : assistant.tools; + const functionTools = assistantToolsToFunctionTools(tools); + + // Convert thread messages to chat messages + let systemPrepended = false; + for (const msg of threadMessages.data) { + const textContent = extractTextFromContent(msg.content); + + if (msg.role === 'user' && !systemPrepended && systemContent) { + chatMessages.push({ + role: 'user', + content: `${systemContent}\n\n---\n\n${textContent}` + }); + systemPrepended = true; + } else { + chatMessages.push({ + role: msg.role, + content: textContent + }); + } + } + + // If no user messages but we have system content, add it + if (!systemPrepended && systemContent) { + chatMessages.unshift({ + role: 'user', + content: systemContent + }); + } + + // Add the assistant message with tool calls (as native tool call parts) + if (pendingContext.toolCalls.length > 0) { + if (pendingContext.partialContent) { + // Include partial content with the tool call assistant message + } + // Add assistant message with tool_calls for the conversation history + chatMessages.push({ + role: 'assistant', + content: pendingContext.partialContent || null, + tool_calls: pendingContext.toolCalls, + }); + } + + // Add tool results as individual tool messages + for (const output of toolOutputs) { + chatMessages.push({ + role: 'tool', + tool_call_id: output.tool_call_id, + content: output.output, + }); + } + + // Clear pending context + state.deletePendingToolContext(runId); + + // Create new message_creation step for the continuation + const stepId = state.generateStepId(); + const messageId = state.generateMessageId(); + + const runStep: RunStep = { + id: stepId, + object: 'thread.run.step', + created_at: Math.floor(Date.now() / 1000), + run_id: runId, + assistant_id: assistant.id, + thread_id: threadId, + type: 'message_creation', + status: 'in_progress', + cancelled_at: null, + completed_at: null, + expired_at: null, + failed_at: null, + last_error: null, + step_details: { + type: 'message_creation', + message_creation: { + message_id: messageId + } + }, + usage: null + }; + + state.addRunStep(runId, runStep); + + if (streaming) { + yield createEvent('thread.run.step.created', runStep); + yield createEvent('thread.run.step.in_progress', runStep); + } + + // Build request with native tool support + const request: ChatCompletionRequest = { + model: run.model || assistant.model, + messages: chatMessages, + stream: streaming, + ...(functionTools.length > 0 ? { tools: functionTools } : {}), + }; + + let fullContent = ''; + let promptTokens = 0; + let completionTokens = 0; + + // Non-streaming continuation + const response = await processChatRequest(request) as ChatCompletionResponse; + + const responseContent = response.choices[0]?.message?.content; + if (!responseContent) { + state.updateRun(threadId, runId, { + status: 'failed', + failed_at: Math.floor(Date.now() / 1000), + last_error: { code: 'server_error', message: 'Empty response from model' } + }); + return; + } + + fullContent = typeof responseContent === 'string' + ? responseContent + : JSON.stringify(responseContent); + + promptTokens = response.usage?.prompt_tokens ?? 0; + completionTokens = response.usage?.completion_tokens ?? fullContent.length; + + // Check for more tool calls (native) + const responseToolCalls = response.choices?.[0]?.message?.tool_calls; + if (responseToolCalls && responseToolCalls.length > 0) { + // Complete the message_creation step + state.updateRunStep(runId, stepId, { + status: 'completed', + completed_at: Math.floor(Date.now() / 1000) + }); + + // Create new tool_calls step + const toolStepId = state.generateStepId(); + const toolStep: RunStep = { + id: toolStepId, + object: 'thread.run.step', + created_at: Math.floor(Date.now() / 1000), + run_id: runId, + assistant_id: assistant.id, + thread_id: threadId, + type: 'tool_calls', + status: 'in_progress', + cancelled_at: null, + completed_at: null, + expired_at: null, + failed_at: null, + last_error: null, + step_details: { + type: 'tool_calls', + tool_calls: responseToolCalls + }, + usage: null + }; + state.addRunStep(runId, toolStep); + + // Save context for next round + const newPendingContext: PendingToolContext = { + runId, + threadId, + toolCalls: responseToolCalls, + partialContent: fullContent, + stepId: toolStepId + }; + state.setPendingToolContext(runId, newPendingContext); + + // Update run to requires_action again + state.updateRun(threadId, runId, { + status: 'requires_action', + required_action: { + type: 'submit_tool_outputs', + submit_tool_outputs: { + tool_calls: responseToolCalls + } + } + }); + + if (streaming) { + yield createEvent('thread.run.step.created', toolStep); + yield createEvent('thread.run.requires_action', state.getRun(threadId, runId)); + yield createEvent('done', '[DONE]'); + } + return; + } + + // Create assistant message + const assistantMessage = createMessage({ + threadId, + messageId, + content: fullContent, + role: 'assistant', + assistantId: assistant.id, + runId, + }); + + state.addMessage(threadId, assistantMessage); + + if (streaming) { + yield createEvent('thread.message.created', assistantMessage); + yield createEvent('thread.message.completed', assistantMessage); + } + + // Update run step + const usage = { + prompt_tokens: promptTokens, + completion_tokens: completionTokens, + total_tokens: promptTokens + completionTokens + }; + + state.updateRunStep(runId, stepId, { + status: 'completed', + completed_at: Math.floor(Date.now() / 1000), + usage + }); + + if (streaming) { + yield createEvent('thread.run.step.completed', state.getRunStep(runId, stepId)); + } + + // Mark run as completed + state.updateRun(threadId, runId, { + status: 'completed', + completed_at: Math.floor(Date.now() / 1000), + usage + }); + + if (streaming) { + yield createEvent('thread.run.completed', state.getRun(threadId, runId)); + yield createEvent('done', '[DONE]'); + } + + } catch (error) { + console.error('Continue run error:', error); + state.updateRun(threadId, runId, { + status: 'failed', + failed_at: Math.floor(Date.now() / 1000), + last_error: { + code: 'server_error', + message: error instanceof Error ? error.message : 'Unknown error' + } + }); + + if (streaming) { + yield createEvent('error', { + error: { + message: error instanceof Error ? error.message : 'Unknown error', + code: 'server_error' + } + }); + yield createEvent('thread.run.failed', state.getRun(threadId, runId)); + yield createEvent('done', '[DONE]'); + } + } finally { + activeRuns.delete(runKey); + } +} + +/** + * Continue run with tool outputs (non-streaming wrapper) + */ +export async function continueRunWithToolOutputsNonStreaming( + threadId: string, + runId: string, + toolOutputs: ToolOutput[] +): Promise { + const generator = continueRunWithToolOutputs(threadId, runId, toolOutputs, false); + for await (const _ of generator) { + // Discard events + } +} diff --git a/src/assistants/state.ts b/src/assistants/state.ts new file mode 100644 index 0000000..306686e --- /dev/null +++ b/src/assistants/state.ts @@ -0,0 +1,424 @@ +/** + * In-Memory State Management for Assistants API + * + * Stores assistants, threads, messages, runs, and run steps in memory. + * Supports persistence via callbacks for VS Code globalState integration. + * + * Features: + * - Debounced auto-save on mutations + * - Run steps tracking + * - Serialization/deserialization for persistence + */ + +import { + Assistant, + Thread, + Message, + Run, + RunStep, + PaginationParams, + OpenAIListResponse, + ToolCall +} from './types'; +import { generateId } from '../utils'; + +// Context saved when a run requires tool outputs +export interface PendingToolContext { + runId: string; + threadId: string; + toolCalls: ToolCall[]; + partialContent: string; // Text generated before tool calls + stepId: string; // The tool_calls step ID +} + +// Persistence callback type +type PersistCallback = (data: SerializedState) => void; + +// Serialized state structure +export interface SerializedState { + assistants: [string, Assistant][]; + threads: [string, Thread][]; + messages: [string, Message[]][]; + runs: [string, Run[]][]; + runSteps: [string, RunStep[]][]; +} + +class AssistantsState { + private assistants: Map = new Map(); + private threads: Map = new Map(); + private messages: Map = new Map(); // thread_id -> messages + private runs: Map = new Map(); // thread_id -> runs + private runSteps: Map = new Map(); // run_id -> steps + private pendingToolContexts: Map = new Map(); // run_id -> context + + // Persistence + private persistCallback: PersistCallback | null = null; + private persistDebounceTimer: NodeJS.Timeout | null = null; + private persistDebounceMs = 1000; // 1 second debounce + + // ==================== Persistence ==================== + + /** + * Set callback for persisting state changes + * Called with debounced delay after mutations + */ + setPersistCallback(callback: PersistCallback | null, debounceMs = 1000): void { + this.persistCallback = callback; + this.persistDebounceMs = debounceMs; + } + + private triggerPersist(): void { + if (!this.persistCallback) return; + + // Clear existing timer + if (this.persistDebounceTimer) { + clearTimeout(this.persistDebounceTimer); + } + + // Set new debounced timer + this.persistDebounceTimer = setTimeout(() => { + if (this.persistCallback) { + this.persistCallback(this.serialize()); + } + }, this.persistDebounceMs); + } + + // ==================== ID Generators ==================== + + generateAssistantId(): string { return generateId('asst'); } + generateThreadId(): string { return generateId('thread'); } + generateMessageId(): string { return generateId('msg'); } + generateRunId(): string { return generateId('run'); } + generateStepId(): string { return generateId('step'); } + + // ==================== Assistants ==================== + + createAssistant(assistant: Assistant): void { + this.assistants.set(assistant.id, assistant); + this.triggerPersist(); + } + + getAssistant(id: string): Assistant | undefined { + return this.assistants.get(id); + } + + listAssistants(params?: PaginationParams): OpenAIListResponse { + let assistants = Array.from(this.assistants.values()); + + // Sort by created_at + const order = params?.order ?? 'desc'; + assistants.sort((a, b) => + order === 'desc' ? b.created_at - a.created_at : a.created_at - b.created_at + ); + + // Apply cursor-based pagination + if (params?.after) { + const afterIndex = assistants.findIndex(a => a.id === params.after); + if (afterIndex !== -1) { + assistants = assistants.slice(afterIndex + 1); + } + } + if (params?.before) { + const beforeIndex = assistants.findIndex(a => a.id === params.before); + if (beforeIndex !== -1) { + assistants = assistants.slice(0, beforeIndex); + } + } + + const limit = Math.min(params?.limit ?? 20, 100); + const hasMore = assistants.length > limit; + assistants = assistants.slice(0, limit); + + return { + object: 'list', + data: assistants, + first_id: assistants[0]?.id ?? null, + last_id: assistants[assistants.length - 1]?.id ?? null, + has_more: hasMore + }; + } + + updateAssistant(id: string, updates: Partial): Assistant | undefined { + const existing = this.assistants.get(id); + if (!existing) return undefined; + const updated = { ...existing, ...updates, id: existing.id }; // Prevent ID change + this.assistants.set(id, updated); + this.triggerPersist(); + return updated; + } + + deleteAssistant(id: string): boolean { + const result = this.assistants.delete(id); + if (result) this.triggerPersist(); + return result; + } + + // ==================== Threads ==================== + + createThread(thread: Thread): void { + this.threads.set(thread.id, thread); + this.messages.set(thread.id, []); + this.runs.set(thread.id, []); + this.triggerPersist(); + } + + getThread(id: string): Thread | undefined { + return this.threads.get(id); + } + + updateThread(id: string, updates: Partial): Thread | undefined { + const existing = this.threads.get(id); + if (!existing) return undefined; + const updated = { ...existing, ...updates, id: existing.id }; + this.threads.set(id, updated); + this.triggerPersist(); + return updated; + } + + deleteThread(id: string): boolean { + // Also clean up run steps for runs in this thread + const threadRuns = this.runs.get(id) || []; + for (const run of threadRuns) { + this.runSteps.delete(run.id); + } + this.messages.delete(id); + this.runs.delete(id); + const result = this.threads.delete(id); + if (result) this.triggerPersist(); + return result; + } + + // ==================== Messages ==================== + + addMessage(threadId: string, message: Message): void { + const threadMessages = this.messages.get(threadId) || []; + threadMessages.push(message); + this.messages.set(threadId, threadMessages); + this.triggerPersist(); + } + + getMessages(threadId: string, params?: PaginationParams & { run_id?: string }): OpenAIListResponse { + let messages = this.messages.get(threadId) || []; + + // Filter by run_id if specified + if (params?.run_id) { + messages = messages.filter(m => m.run_id === params.run_id); + } + + // Sort by created_at + const order = params?.order ?? 'desc'; + messages = [...messages].sort((a, b) => + order === 'desc' ? b.created_at - a.created_at : a.created_at - b.created_at + ); + + // Apply cursor-based pagination + if (params?.after) { + const afterIndex = messages.findIndex(m => m.id === params.after); + if (afterIndex !== -1) { + messages = messages.slice(afterIndex + 1); + } + } + if (params?.before) { + const beforeIndex = messages.findIndex(m => m.id === params.before); + if (beforeIndex !== -1) { + messages = messages.slice(0, beforeIndex); + } + } + + const limit = Math.min(params?.limit ?? 20, 100); + const hasMore = messages.length > limit; + messages = messages.slice(0, limit); + + return { + object: 'list', + data: messages, + first_id: messages[0]?.id ?? null, + last_id: messages[messages.length - 1]?.id ?? null, + has_more: hasMore + }; + } + + getMessage(threadId: string, messageId: string): Message | undefined { + const messages = this.messages.get(threadId) || []; + return messages.find(m => m.id === messageId); + } + + updateMessage(threadId: string, messageId: string, updates: Partial): Message | undefined { + const messages = this.messages.get(threadId); + if (!messages) return undefined; + const index = messages.findIndex(m => m.id === messageId); + if (index === -1) return undefined; + messages[index] = { ...messages[index], ...updates, id: messages[index].id }; + this.triggerPersist(); + return messages[index]; + } + + // ==================== Runs ==================== + + addRun(threadId: string, run: Run): void { + const threadRuns = this.runs.get(threadId) || []; + threadRuns.push(run); + this.runs.set(threadId, threadRuns); + this.runSteps.set(run.id, []); // Initialize steps for this run + this.triggerPersist(); + } + + getRuns(threadId: string, params?: PaginationParams): OpenAIListResponse { + let runs = this.runs.get(threadId) || []; + + // Sort by created_at + const order = params?.order ?? 'desc'; + runs = [...runs].sort((a, b) => + order === 'desc' ? b.created_at - a.created_at : a.created_at - b.created_at + ); + + // Apply cursor-based pagination + if (params?.after) { + const afterIndex = runs.findIndex(r => r.id === params.after); + if (afterIndex !== -1) { + runs = runs.slice(afterIndex + 1); + } + } + if (params?.before) { + const beforeIndex = runs.findIndex(r => r.id === params.before); + if (beforeIndex !== -1) { + runs = runs.slice(0, beforeIndex); + } + } + + const limit = Math.min(params?.limit ?? 20, 100); + const hasMore = runs.length > limit; + runs = runs.slice(0, limit); + + return { + object: 'list', + data: runs, + first_id: runs[0]?.id ?? null, + last_id: runs[runs.length - 1]?.id ?? null, + has_more: hasMore + }; + } + + getRun(threadId: string, runId: string): Run | undefined { + const runs = this.runs.get(threadId) || []; + return runs.find(r => r.id === runId); + } + + updateRun(threadId: string, runId: string, updates: Partial): Run | undefined { + const runs = this.runs.get(threadId); + if (!runs) return undefined; + const index = runs.findIndex(r => r.id === runId); + if (index === -1) return undefined; + runs[index] = { ...runs[index], ...updates, id: runs[index].id }; + this.triggerPersist(); + return runs[index]; + } + + // ==================== Run Steps ==================== + + addRunStep(runId: string, step: RunStep): void { + const steps = this.runSteps.get(runId) || []; + steps.push(step); + this.runSteps.set(runId, steps); + this.triggerPersist(); + } + + getRunSteps(runId: string, params?: PaginationParams): OpenAIListResponse { + let steps = this.runSteps.get(runId) || []; + + // Sort by created_at + const order = params?.order ?? 'desc'; + steps = [...steps].sort((a, b) => + order === 'desc' ? b.created_at - a.created_at : a.created_at - b.created_at + ); + + // Apply cursor-based pagination + if (params?.after) { + const afterIndex = steps.findIndex(s => s.id === params.after); + if (afterIndex !== -1) { + steps = steps.slice(afterIndex + 1); + } + } + if (params?.before) { + const beforeIndex = steps.findIndex(s => s.id === params.before); + if (beforeIndex !== -1) { + steps = steps.slice(0, beforeIndex); + } + } + + const limit = Math.min(params?.limit ?? 20, 100); + const hasMore = steps.length > limit; + steps = steps.slice(0, limit); + + return { + object: 'list', + data: steps, + first_id: steps[0]?.id ?? null, + last_id: steps[steps.length - 1]?.id ?? null, + has_more: hasMore + }; + } + + getRunStep(runId: string, stepId: string): RunStep | undefined { + const steps = this.runSteps.get(runId) || []; + return steps.find(s => s.id === stepId); + } + + updateRunStep(runId: string, stepId: string, updates: Partial): RunStep | undefined { + const steps = this.runSteps.get(runId); + if (!steps) return undefined; + const index = steps.findIndex(s => s.id === stepId); + if (index === -1) return undefined; + steps[index] = { ...steps[index], ...updates, id: steps[index].id }; + this.triggerPersist(); + return steps[index]; + } + + // ==================== Pending Tool Contexts ==================== + + setPendingToolContext(runId: string, context: PendingToolContext): void { + this.pendingToolContexts.set(runId, context); + // Note: We don't persist pending contexts as they're transient + } + + getPendingToolContext(runId: string): PendingToolContext | undefined { + return this.pendingToolContexts.get(runId); + } + + deletePendingToolContext(runId: string): boolean { + return this.pendingToolContexts.delete(runId); + } + + // ==================== Utility ==================== + + clear(): void { + this.assistants.clear(); + this.threads.clear(); + this.messages.clear(); + this.runs.clear(); + this.runSteps.clear(); + this.pendingToolContexts.clear(); + this.triggerPersist(); + } + + serialize(): SerializedState { + return { + assistants: Array.from(this.assistants.entries()), + threads: Array.from(this.threads.entries()), + messages: Array.from(this.messages.entries()), + runs: Array.from(this.runs.entries()), + runSteps: Array.from(this.runSteps.entries()) + }; + } + + restore(data: Partial): void { + if (data.assistants) this.assistants = new Map(data.assistants); + if (data.threads) this.threads = new Map(data.threads); + if (data.messages) this.messages = new Map(data.messages); + if (data.runs) this.runs = new Map(data.runs); + if (data.runSteps) this.runSteps = new Map(data.runSteps); + } +} + +// Singleton export +export const state = new AssistantsState(); diff --git a/src/assistants/tools.ts b/src/assistants/tools.ts new file mode 100644 index 0000000..fd58e74 --- /dev/null +++ b/src/assistants/tools.ts @@ -0,0 +1,20 @@ +/** + * Tool Calling Utilities + * + * Provides ID generation for tool calls. + * + * Native tool calling is handled by the VS Code Language Model API — + * tool definitions are passed via LanguageModelChatRequestOptions.tools, + * and tool call results come back as LanguageModelToolCallPart from the stream. + */ + +// ==================== ID Generation ==================== + +let toolCallCounter = 0; + +/** + * Generate a unique tool call ID + */ +export function generateToolCallId(): string { + return `call_${Date.now().toString(36)}${(++toolCallCounter).toString(36)}`; +} diff --git a/src/assistants/types.ts b/src/assistants/types.ts new file mode 100644 index 0000000..b088ccb --- /dev/null +++ b/src/assistants/types.ts @@ -0,0 +1,354 @@ +/** + * OpenAI Assistants API Types + * + * Full stateful implementation supporting: + * - Assistants (create, get, list, update, delete) + * - Threads (create, get, delete) + * - Messages (create, get, list) + * - Runs (create, get, list, cancel) + * + * Future extensibility: + * - Tool calling (code_interpreter, file_search, function) + * - Streaming runs (SSE) + * - Run steps + */ + +import { ToolCall } from '../types'; + +// Re-export ToolCall so consumers don't need to change imports +export { ToolCall }; + +// ==================== Common Types ==================== + +export interface OpenAIListResponse { + object: 'list'; + data: T[]; + first_id: string | null; + last_id: string | null; + has_more: boolean; +} + +export interface PaginationParams { + limit?: number; // Default 20, max 100 + order?: 'asc' | 'desc'; + after?: string; // Cursor for pagination + before?: string; +} + +// ==================== Tool Types (Future Extension) ==================== + +export type ToolType = 'code_interpreter' | 'file_search' | 'function'; + +export interface FunctionDefinition { + name: string; + description?: string; + parameters?: Record; + strict?: boolean; +} + +export interface AssistantTool { + type: ToolType; + function?: FunctionDefinition; +} + +export interface ToolOutput { + tool_call_id: string; + output: string; +} + +// ==================== Assistant Types ==================== + +export interface Assistant { + id: string; // "asst_abc123" + object: 'assistant'; + created_at: number; // Unix timestamp (seconds) + name: string | null; + description: string | null; + model: string; // e.g., "gpt-4o", "claude-3.5-sonnet" + instructions: string | null; // System prompt + tools: AssistantTool[]; + metadata: Record; // User-defined key-value pairs (max 16) + // Future: tool_resources, temperature, top_p, response_format +} + +export interface CreateAssistantRequest { + model: string; + name?: string; + description?: string; + instructions?: string; + tools?: AssistantTool[]; + metadata?: Record; + // Future: tool_resources, temperature, top_p, response_format +} + +export interface UpdateAssistantRequest { + model?: string; + name?: string | null; + description?: string | null; + instructions?: string | null; + tools?: AssistantTool[]; + metadata?: Record; +} + +// ==================== Thread Types ==================== + +export interface Thread { + id: string; // "thread_abc123" + object: 'thread'; + created_at: number; + metadata: Record; + // Future: tool_resources +} + +export interface CreateThreadRequest { + messages?: CreateMessageRequest[]; // Initial messages + metadata?: Record; + // Future: tool_resources +} + +// ==================== Message Types ==================== + +export type MessageRole = 'user' | 'assistant'; +export type MessageStatus = 'in_progress' | 'incomplete' | 'completed'; + +export interface TextContent { + type: 'text'; + text: { + value: string; + annotations: TextAnnotation[]; + }; +} + +// Future: Support for images and file attachments +export interface ImageFileContent { + type: 'image_file'; + image_file: { + file_id: string; + detail?: 'auto' | 'low' | 'high'; + }; +} + +export interface ImageUrlContent { + type: 'image_url'; + image_url: { + url: string; + detail?: 'auto' | 'low' | 'high'; + }; +} + +// Annotations for citations (future) +export interface TextAnnotation { + type: 'file_citation' | 'file_path'; + text: string; + start_index: number; + end_index: number; + file_citation?: { + file_id: string; + quote?: string; + }; + file_path?: { + file_id: string; + }; +} + +export type MessageContent = TextContent | ImageFileContent | ImageUrlContent; + +export interface Message { + id: string; // "msg_abc123" + object: 'thread.message'; + created_at: number; + thread_id: string; + status: MessageStatus; + incomplete_details: { reason: string } | null; + completed_at: number | null; + incomplete_at: number | null; + role: MessageRole; + content: MessageContent[]; + assistant_id: string | null; // Set if created by a run + run_id: string | null; // Set if created by a run + attachments: MessageAttachment[]; + metadata: Record; +} + +export interface MessageAttachment { + file_id: string; + tools: Array<{ type: ToolType }>; +} + +export interface CreateMessageRequest { + role: 'user' | 'assistant'; + content: string | MessageContent[]; + attachments?: MessageAttachment[]; + metadata?: Record; +} + +// ==================== Run Types ==================== + +export type RunStatus = + | 'queued' + | 'in_progress' + | 'requires_action' // Future: tool calling + | 'cancelling' + | 'cancelled' + | 'failed' + | 'completed' + | 'incomplete' + | 'expired'; + +export interface RunError { + code: 'server_error' | 'rate_limit_exceeded' | 'invalid_prompt'; + message: string; +} + +export interface RunUsage { + prompt_tokens: number; + completion_tokens: number; + total_tokens: number; +} + +// Future: For tool calling support +export interface RequiredAction { + type: 'submit_tool_outputs'; + submit_tool_outputs: { + tool_calls: ToolCall[]; + }; +} + +export interface Run { + id: string; // "run_abc123" + object: 'thread.run'; + created_at: number; + thread_id: string; + assistant_id: string; + status: RunStatus; + required_action: RequiredAction | null; // Future: tool calling + last_error: RunError | null; + expires_at: number | null; // 10 minute timeout + started_at: number | null; + cancelled_at: number | null; + failed_at: number | null; + completed_at: number | null; + incomplete_details: { reason: string } | null; + model: string; + instructions: string | null; // Override assistant instructions + tools: AssistantTool[]; + metadata: Record; + usage: RunUsage | null; + // Future: temperature, top_p, max_prompt_tokens, max_completion_tokens, + // truncation_strategy, response_format, tool_choice, parallel_tool_calls +} + +export interface CreateRunRequest { + assistant_id: string; + model?: string; // Override assistant's model + instructions?: string; // Override instructions + additional_instructions?: string; // Append to instructions + additional_messages?: CreateMessageRequest[]; + tools?: AssistantTool[]; + metadata?: Record; + stream?: boolean; // Future: streaming runs + // Future: temperature, top_p, max_prompt_tokens, max_completion_tokens, + // truncation_strategy, response_format, tool_choice, parallel_tool_calls +} + +// Combined thread + run creation +export interface CreateThreadAndRunRequest { + assistant_id: string; + thread?: CreateThreadRequest; + model?: string; + instructions?: string; + tools?: AssistantTool[]; + metadata?: Record; + stream?: boolean; +} + +// Submit tool outputs +export interface SubmitToolOutputsRequest { + tool_outputs: ToolOutput[]; + stream?: boolean; +} + +// ==================== Run Steps ==================== + +export type RunStepType = 'message_creation' | 'tool_calls'; +export type RunStepStatus = 'in_progress' | 'cancelled' | 'failed' | 'completed' | 'expired'; + +export interface RunStep { + id: string; + object: 'thread.run.step'; + created_at: number; + run_id: string; + assistant_id: string; + thread_id: string; + type: RunStepType; + status: RunStepStatus; + cancelled_at: number | null; + completed_at: number | null; + expired_at: number | null; + failed_at: number | null; + last_error: RunError | null; + step_details: MessageCreationStepDetails | ToolCallsStepDetails; + usage: RunUsage | null; +} + +export interface MessageCreationStepDetails { + type: 'message_creation'; + message_creation: { + message_id: string; + }; +} + +export interface ToolCallsStepDetails { + type: 'tool_calls'; + tool_calls: ToolCall[]; +} + +// ==================== Streaming Events ==================== + +export type StreamEventType = + | 'thread.created' + | 'thread.run.created' + | 'thread.run.queued' + | 'thread.run.in_progress' + | 'thread.run.requires_action' + | 'thread.run.completed' + | 'thread.run.incomplete' + | 'thread.run.failed' + | 'thread.run.cancelling' + | 'thread.run.cancelled' + | 'thread.run.expired' + | 'thread.run.step.created' + | 'thread.run.step.in_progress' + | 'thread.run.step.delta' + | 'thread.run.step.completed' + | 'thread.run.step.failed' + | 'thread.run.step.cancelled' + | 'thread.run.step.expired' + | 'thread.message.created' + | 'thread.message.in_progress' + | 'thread.message.delta' + | 'thread.message.completed' + | 'thread.message.incomplete' + | 'error' + | 'done'; + +export interface StreamEvent { + event: StreamEventType; + data: unknown; +} + +export interface MessageDelta { + id: string; + object: 'thread.message.delta'; + delta: { + content: Array<{ + index: number; + type: 'text'; + text: { + value: string; + annotations?: unknown[]; + }; + }>; + }; +} + diff --git a/src/client/client.py b/src/client/client.py deleted file mode 100644 index 5fe823c..0000000 --- a/src/client/client.py +++ /dev/null @@ -1,34 +0,0 @@ -import os -from litellm import completion - -def run_litellm_non_stream(): - """Calls local server in non-stream mode using LiteLLM.""" - try: - response = completion( - model="gpt-4o", - messages=[{"role": "user", "content": "Create a fibonacci function in Python"}], - stream=False - ) - print("Non-streaming response:", response) - except Exception as e: - print("Error in non-stream mode:", e) - -def run_litellm_stream(): - """Calls local server in stream mode using LiteLLM.""" - try: - response_stream = completion( - model="gpt-4o", - messages=[{"role": "user", "content": "Create a fibonacci function in Python"}], - stream=True - ) - print("Streaming response:") - for chunk in response_stream: - print(chunk, end="", flush=True) - except Exception as e: - print("Error in streaming mode:", e) - -if __name__ == "__main__": - print("Running LiteLLM Client Non-Stream Mode:") - run_litellm_non_stream() - print("\nRunning LiteLLM Client Stream Mode:") - run_litellm_stream() diff --git a/src/client/requirements.txt b/src/client/requirements.txt deleted file mode 100644 index 8e637fb..0000000 --- a/src/client/requirements.txt +++ /dev/null @@ -1 +0,0 @@ -litellm diff --git a/src/client/run_client.sh b/src/client/run_client.sh deleted file mode 100755 index a3c1a83..0000000 --- a/src/client/run_client.sh +++ /dev/null @@ -1,5 +0,0 @@ -#!/bin/bash -export OPENAI_API_KEY="test" -export OPENAI_API_BASE="http://localhost:3000/v1" - -.venv/bin/python client.py diff --git a/src/extension.ts b/src/extension.ts index da515b0..3c9f8f6 100644 --- a/src/extension.ts +++ b/src/extension.ts @@ -1,16 +1,65 @@ import * as vscode from 'vscode'; - -let outputChannel: vscode.OutputChannel; -import { startServer } from './server'; +import * as crypto from 'crypto'; +import { startServer, setApiTokens, addApiToken as addServerToken, removeApiToken as removeServerToken } from './server'; import { ChatCompletionChunk, ChatCompletionRequest, ChatCompletionResponse, - StructuredMessageContent + StructuredMessageContent, + ToolCall, + ToolCallChunk } from './types'; +import { state, SerializedState } from './assistants'; +import { toVSCodeTools, toToolMode } from './toolConvert'; + +let outputChannel: vscode.OutputChannel; let serverInstance: ReturnType | undefined; +// Model info interface for getAvailableModels +interface ModelInfo { + vendor: string; + family: string; + id?: string; +} + +// Extended LanguageModelChat for optional id access +type LanguageModelChatWithId = vscode.LanguageModelChat & { id?: string }; + +// Token interface +interface TokenInfo { + token: string; + name: string; + createdAt: number; +} + +// State persistence keys +const STATE_KEY = 'copilotProxy.assistantsState'; +const TOKENS_KEY = 'copilotProxy.apiTokens'; + +/** + * Generate a secure random API token + */ +function generateToken(): string { + return 'cpx_' + crypto.randomBytes(32).toString('hex'); +} + +/** + * Get stored API tokens + */ +function getStoredTokens(context: vscode.ExtensionContext): TokenInfo[] { + return context.globalState.get(TOKENS_KEY, []); +} + +/** + * Save API tokens to storage + */ +function saveTokens(context: vscode.ExtensionContext, tokens: TokenInfo[]) { + context.globalState.update(TOKENS_KEY, tokens); + // Update server with current tokens + setApiTokens(tokens.map(t => t.token)); +} + function configurePort() { const config = vscode.workspace.getConfiguration("copilotProxy"); const currentPort = config.get("port", 3000); @@ -34,6 +83,21 @@ function configurePort() { }); } +/** + * Get available models from VS Code Language Model API + */ +export async function getAvailableModels(): Promise { + const models = await vscode.lm.selectChatModels({}); + return models.map(m => { + const model = m as LanguageModelChatWithId; + return { + vendor: model.vendor, + family: model.family, + id: model.id + }; + }); +} + export function activate(context: vscode.ExtensionContext) { outputChannel = vscode.window.createOutputChannel('Copilot Proxy Log'); @@ -41,12 +105,37 @@ export function activate(context: vscode.ExtensionContext) { context.subscriptions.push(outputChannel); outputChannel.appendLine('Extension "Copilot Proxy" is now active!'); + // ==================== State Persistence ==================== + + // Restore state from globalState + const savedState = context.globalState.get(STATE_KEY); + if (savedState) { + try { + state.restore(savedState); + outputChannel.appendLine('Restored assistants state from previous session.'); + } catch (err) { + outputChannel.appendLine(`Error restoring state: ${err}`); + } + } + + // Set up persistence callback with debounce + state.setPersistCallback(async (data) => { + try { + await context.globalState.update(STATE_KEY, data); + outputChannel.appendLine('Assistants state saved.'); + } catch (err) { + outputChannel.appendLine(`Error saving state: ${err}`); + } + }, 1000); // 1 second debounce + // Register command to start the Express server. context.subscriptions.push( - vscode.commands.registerCommand('Copilot Proxy - Start Server', () => { + vscode.commands.registerCommand('copilotProxy.startServer', () => { if (!serverInstance) { - const configPort = vscode.workspace.getConfiguration("copilotProxy").get("port", 3000); - serverInstance = startServer(configPort); + const config = vscode.workspace.getConfiguration("copilotProxy"); + const configPort = config.get("port", 3000); + const tokens = getStoredTokens(context); + serverInstance = startServer(configPort, tokens.map(t => t.token)); vscode.window.showInformationMessage(`Express server started on port ${configPort}.`); } else { vscode.window.showInformationMessage('Express server is already running.'); @@ -56,7 +145,7 @@ export function activate(context: vscode.ExtensionContext) { // Register command to stop the Express server. context.subscriptions.push( - vscode.commands.registerCommand('Copilot Proxy - Stop Server', () => { + vscode.commands.registerCommand('copilotProxy.stopServer', () => { if (serverInstance) { serverInstance.close(); serverInstance = undefined; @@ -69,11 +158,145 @@ export function activate(context: vscode.ExtensionContext) { // Register command to configure the port. context.subscriptions.push( - vscode.commands.registerCommand('Copilot Proxy: Configure Port', () => { + vscode.commands.registerCommand('copilotProxy.configurePort', () => { configurePort(); }) ); + // Register command to list available LLM models via the VS Code picker. + context.subscriptions.push( + vscode.commands.registerCommand('copilotProxy.listModels', async () => { + try { + const models = await vscode.lm.selectChatModels({}); + if (!models || models.length === 0) { + vscode.window.showInformationMessage('No model selected.'); + return; + } + outputChannel.appendLine('Available/selected models:'); + for (const m of models) { + const model = m as LanguageModelChatWithId; + outputChannel.appendLine(`vendor: ${model.vendor}, family: ${model.family}${model.id ? ', id: '+model.id : ''}`); + } + vscode.window.showInformationMessage('Model info written to Copilot Proxy Log'); + } catch (err) { + outputChannel.appendLine(`Error listing models: ${String(err)}`); + vscode.window.showErrorMessage('Failed to list models (see Copilot Proxy Log).'); + } + }) + ); + + // ==================== API Token Management Commands ==================== + + // Register command to create a new API token + context.subscriptions.push( + vscode.commands.registerCommand('copilotProxy.createApiToken', async () => { + const name = await vscode.window.showInputBox({ + prompt: "Enter a name for this API token (e.g., 'aider', 'production'):", + placeHolder: "Token name", + validateInput: (value: string): string | undefined => { + if (!value || value.trim().length === 0) { + return "Token name cannot be empty."; + } + return undefined; + } + }); + + if (!name) { + return; + } + + const token = generateToken(); + const tokens = getStoredTokens(context); + tokens.push({ + token, + name: name.trim(), + createdAt: Date.now() + }); + saveTokens(context, tokens); + + outputChannel.appendLine(`Created new API token: ${name}`); + outputChannel.appendLine(`Token: ${token}`); + + const action = await vscode.window.showInformationMessage( + `API token created: ${name}`, + 'Copy Token', + 'Show in Log' + ); + + if (action === 'Copy Token') { + await vscode.env.clipboard.writeText(token); + vscode.window.showInformationMessage('Token copied to clipboard!'); + } else if (action === 'Show in Log') { + outputChannel.show(); + } + }) + ); + + // Register command to list all API tokens + context.subscriptions.push( + vscode.commands.registerCommand('copilotProxy.listApiTokens', async () => { + const tokens = getStoredTokens(context); + + if (tokens.length === 0) { + vscode.window.showInformationMessage('No API tokens found. Create one using "Copilot Proxy: Create API Token".'); + return; + } + + outputChannel.appendLine('\n=== API Tokens ==='); + tokens.forEach((t, idx) => { + const created = new Date(t.createdAt).toLocaleString(); + outputChannel.appendLine(`${idx + 1}. Name: ${t.name}`); + outputChannel.appendLine(` Token: ${t.token}`); + outputChannel.appendLine(` Created: ${created}`); + outputChannel.appendLine(''); + }); + outputChannel.show(); + + vscode.window.showInformationMessage(`Found ${tokens.length} API token(s). Check Copilot Proxy Log for details.`); + }) + ); + + // Register command to remove an API token + context.subscriptions.push( + vscode.commands.registerCommand('copilotProxy.removeApiToken', async () => { + const tokens = getStoredTokens(context); + + if (tokens.length === 0) { + vscode.window.showInformationMessage('No API tokens found.'); + return; + } + + const items = tokens.map((t, idx) => ({ + label: t.name, + description: t.token.substring(0, 16) + '...', + detail: `Created: ${new Date(t.createdAt).toLocaleString()}`, + token: t.token + })); + + const selected = await vscode.window.showQuickPick(items, { + placeHolder: 'Select a token to remove', + canPickMany: false + }); + + if (!selected) { + return; + } + + const confirm = await vscode.window.showWarningMessage( + `Are you sure you want to remove token "${selected.label}"?`, + { modal: true }, + 'Remove' + ); + + if (confirm === 'Remove') { + const updatedTokens = tokens.filter(t => t.token !== selected.token); + saveTokens(context, updatedTokens); + outputChannel.appendLine(`Removed API token: ${selected.label}`); + vscode.window.showInformationMessage(`Token "${selected.label}" removed successfully.`); + } + }) + ); + // Register a disposable to stop the server when the extension is deactivated. context.subscriptions.push({ dispose: () => { @@ -93,7 +316,10 @@ export function deactivate() { } } -function extractMessageContent(content: string | StructuredMessageContent[]): string { +function extractMessageContent(content: string | StructuredMessageContent[] | null | undefined): string { + if (content === null || content === undefined) { + return ''; + } if (typeof content === 'string') { return content; } @@ -106,22 +332,85 @@ function extractMessageContent(content: string | StructuredMessageContent[]): st export async function processChatRequest(request: ChatCompletionRequest): Promise | ChatCompletionResponse> { const userMessages = request.messages.filter(message => message.role.toLowerCase() === "user"); const latestUserMessage = userMessages.length > 0 ? userMessages[userMessages.length - 1].content : ''; - const preview = typeof latestUserMessage === 'string' + const preview = typeof latestUserMessage === 'string' ? (latestUserMessage.length > 30 ? latestUserMessage.slice(0, 30) + '...' : latestUserMessage) : JSON.stringify(latestUserMessage); - + outputChannel.appendLine(`Request received. Model: ${request.model}. Preview: ${preview}`); outputChannel.appendLine(`Full messages: ${JSON.stringify(request.messages, null, 2)}`); - - // Map request messages to vscode.LanguageModelChatMessage format with content extraction - const chatMessages = request.messages.map(message => { + + // Extract system messages and combine their content + const systemMessages = request.messages.filter(message => message.role.toLowerCase() === "system"); + const systemContent = systemMessages + .map(msg => extractMessageContent(msg.content)) + .filter(content => content.length > 0) + .join('\n\n'); + + // Map request messages to vscode.LanguageModelChatMessage format + // Prepend system content to the first user message (VS Code LM API has no SystemMessage) + const chatMessages: vscode.LanguageModelChatMessage[] = []; + let systemPrepended = false; + + for (const message of request.messages) { + const role = message.role.toLowerCase(); + + // Skip system messages as we'll prepend them to the first user message + if (role === "system") { + continue; + } + + // Handle tool result messages + if (role === 'tool' && message.tool_call_id) { + const processedContent = extractMessageContent(message.content); + const resultPart = new vscode.LanguageModelToolResultPart( + message.tool_call_id, + [new vscode.LanguageModelTextPart(processedContent)] + ); + chatMessages.push(vscode.LanguageModelChatMessage.User([resultPart])); + continue; + } + + // Handle assistant messages with tool_calls + if (role === 'assistant' && message.tool_calls && message.tool_calls.length > 0) { + const parts: (vscode.LanguageModelTextPart | vscode.LanguageModelToolCallPart)[] = []; + const textContent = extractMessageContent(message.content); + if (textContent) { + parts.push(new vscode.LanguageModelTextPart(textContent)); + } + for (const tc of message.tool_calls) { + let args: Record; + try { + args = JSON.parse(tc.function.arguments); + } catch { + args = {}; + } + parts.push(new vscode.LanguageModelToolCallPart(tc.id, tc.function.name, args)); + } + chatMessages.push(vscode.LanguageModelChatMessage.Assistant(parts)); + continue; + } + const processedContent = extractMessageContent(message.content); - if (message.role.toLowerCase() === "user") { - return vscode.LanguageModelChatMessage.User(processedContent); + + if (role === "user") { + if (!systemPrepended && systemContent) { + // Prepend system instructions to first user message + const combinedContent = `${systemContent}\n\n---\n\n${processedContent}`; + chatMessages.push(vscode.LanguageModelChatMessage.User(combinedContent)); + systemPrepended = true; + } else { + chatMessages.push(vscode.LanguageModelChatMessage.User(processedContent)); + } } else { - return vscode.LanguageModelChatMessage.Assistant(processedContent); + // Assistant message (without tool_calls) + chatMessages.push(vscode.LanguageModelChatMessage.Assistant(processedContent)); } - }); + } + + // If no user messages but we have system content, add it as a user message + if (!systemPrepended && systemContent) { + chatMessages.unshift(vscode.LanguageModelChatMessage.User(systemContent)); + } const [selectedModel] = await vscode.lm.selectChatModels({ vendor: "copilot", @@ -132,55 +421,108 @@ export async function processChatRequest(request: ChatCompletionRequest): Promis throw new Error(`No language model available for model: ${request.model}`); } + // Build request options with native tool support + const options: vscode.LanguageModelChatRequestOptions = {}; + if (request.tools?.length) { + const toolMode = toToolMode(request.tool_choice); + if (toolMode !== undefined) { + options.tools = toVSCodeTools(request.tools); + options.toolMode = toolMode; + } + // If toolMode is undefined (choice === 'none'), omit tools entirely + } + if (request.stream) { // Streaming mode: call the real backend and yield response chunks. return (async function* () { + const cancellationSource = new vscode.CancellationTokenSource(); try { - const cancellationSource = new vscode.CancellationTokenSource(); const chatResponse = await selectedModel.sendRequest( chatMessages, - {}, + options, cancellationSource.token ); let firstChunk = true; let chunkIndex = 0; - // Iterate over the response fragments from the real backend. - for await (const fragment of chatResponse.text) { - const chunk: ChatCompletionChunk = { + const accumulatedToolCalls: { callId: string; name: string; input: unknown }[] = []; + + // Iterate over the response stream (supports both text and tool call parts) + for await (const part of chatResponse.stream) { + if (part instanceof vscode.LanguageModelTextPart) { + const chunk: ChatCompletionChunk = { + id: `chatcmpl-stream-${chunkIndex}`, + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: request.model, + choices: [ + { + delta: { + ...(firstChunk ? { role: "assistant" } : {}), + content: part.value, + }, + index: 0, + finish_reason: "", + }, + ], + }; + firstChunk = false; + chunkIndex++; + yield chunk; + } else if (part instanceof vscode.LanguageModelToolCallPart) { + accumulatedToolCalls.push({ + callId: part.callId, + name: part.name, + input: part.input, + }); + } + } + + // If tool calls were received, yield them as a tool_calls delta chunk + if (accumulatedToolCalls.length > 0) { + const toolCallChunks: ToolCallChunk[] = accumulatedToolCalls.map((tc, index) => ({ + index, + id: tc.callId, + type: 'function' as const, + function: { + name: tc.name, + arguments: JSON.stringify(tc.input), + }, + })); + + const toolCallsChunk: ChatCompletionChunk = { id: `chatcmpl-stream-${chunkIndex}`, object: "chat.completion.chunk", - created: Date.now(), + created: Math.floor(Date.now() / 1000), model: request.model, choices: [ { delta: { ...(firstChunk ? { role: "assistant" } : {}), - content: fragment, + tool_calls: toolCallChunks, }, index: 0, - finish_reason: "", + finish_reason: "tool_calls", + }, + ], + }; + yield toolCallsChunk; + } else { + // After finishing the iteration with no tool calls, yield a final stop chunk. + const finalChunk: ChatCompletionChunk = { + id: `chatcmpl-stream-final`, + object: "chat.completion.chunk", + created: Math.floor(Date.now() / 1000), + model: request.model, + choices: [ + { + delta: { content: "" }, + index: 0, + finish_reason: "stop", }, ], }; - firstChunk = false; - chunkIndex++; - yield chunk; + yield finalChunk; } - // After finishing the iteration, yield a final chunk to indicate completion. - const finalChunk: ChatCompletionChunk = { - id: `chatcmpl-stream-final`, - object: "chat.completion.chunk", - created: Date.now(), - model: request.model, - choices: [ - { - delta: { content: "" }, - index: 0, - finish_reason: "stop", - }, - ], - }; - yield finalChunk; } catch (error) { outputChannel.appendLine("ERROR: Error in streaming mode:"); if (error instanceof Error) { @@ -190,36 +532,58 @@ export async function processChatRequest(request: ChatCompletionRequest): Promis outputChannel.appendLine(`Unknown error type: ${JSON.stringify(error)}`); } throw error; + } finally { + cancellationSource.dispose(); } - })(); // Add parentheses here to properly close and invoke the IIFE + })(); } else { // Non-streaming mode: call the real backend and accumulate the full response. + const cancellationSource = new vscode.CancellationTokenSource(); try { - const cancellationSource = new vscode.CancellationTokenSource(); const chatResponse = await selectedModel.sendRequest( chatMessages, - {}, + options, cancellationSource.token ); let fullContent = ""; - for await (const fragment of chatResponse.text) { - fullContent += fragment; + const toolCalls: ToolCall[] = []; + + for await (const part of chatResponse.stream) { + if (part instanceof vscode.LanguageModelTextPart) { + fullContent += part.value; + } else if (part instanceof vscode.LanguageModelToolCallPart) { + toolCalls.push({ + id: part.callId, + type: 'function', + function: { + name: part.name, + arguments: JSON.stringify(part.input), + }, + }); + } } + + const hasToolCalls = toolCalls.length > 0; const response: ChatCompletionResponse = { id: "chatcmpl-nonstream", object: "chat.completion", - created: Date.now(), + created: Math.floor(Date.now() / 1000), choices: [ { index: 0, - message: { role: "assistant", content: fullContent }, - finish_reason: "stop", + message: { + role: "assistant", + content: fullContent || null, + ...(hasToolCalls ? { tool_calls: toolCalls } : {}), + }, + finish_reason: hasToolCalls ? "tool_calls" : "stop", }, ], usage: { prompt_tokens: 0, - completion_tokens: fullContent.length, - total_tokens: fullContent.length, + // Rough token estimate (~4 chars per token); not exact but better than char count + completion_tokens: Math.ceil(fullContent.length / 4), + total_tokens: Math.ceil(fullContent.length / 4), }, }; return response; @@ -232,6 +596,8 @@ export async function processChatRequest(request: ChatCompletionRequest): Promis outputChannel.appendLine(`Unknown error type: ${JSON.stringify(error)}`); } throw error; + } finally { + cancellationSource.dispose(); } } } diff --git a/src/server.ts b/src/server.ts index df2eee2..934b2e5 100644 --- a/src/server.ts +++ b/src/server.ts @@ -1,27 +1,464 @@ import express, { Request, Response } from 'express'; -import dotenv from 'dotenv'; import morgan from 'morgan'; -import {ChatCompletionRequest, ChatCompletionChunk, ChatCompletionChunkDelta, ChatCompletionResponse} from './types'; -import { processChatRequest } from './extension'; - -// Load environment variables from .env file if present -dotenv.config(); +import { + ChatCompletionRequest, + ChatCompletionChunk, + ChatCompletionResponse, + CompletionRequest, + CompletionResponse, + EmbeddingRequest, + ModelObject, + ModelsListResponse, + CreateResponseRequest, + ResponseObject, + ResponseFunctionCallItem, + ResponseFunctionCallOutputItem, + ResponseOutputItemUnion, + ChatMessage +} from './types'; +import { processChatRequest, getAvailableModels } from './extension'; +import { assistantsRouter } from './assistants'; +import { generateId, errorResponse, setApiTokens, addApiToken, removeApiToken, authMiddleware } from './utils'; const app = express(); -// Middleware to parse JSON bodies -app.use(express.json()); +// Middleware to parse JSON bodies (50MB limit to accommodate large tool results) +app.use(express.json({ limit: '50mb' })); // Logger middleware app.use(morgan('combined')); -// POST /v1/chat/completions endpoint implementation +// Re-export for extension.ts imports +export { setApiTokens, addApiToken, removeApiToken }; + +// Apply auth middleware to all routes +app.use(authMiddleware); + +// errorResponse and generateId are imported from ./utils + +// ==================== Models Endpoints ==================== + +// GET /v1/models - List available models +app.get('/v1/models', async (req: Request, res: Response) => { + try { + const models = await getAvailableModels(); + const response: ModelsListResponse = { + object: 'list', + data: models.map(m => ({ + id: m.family, + object: 'model' as const, + created: Math.floor(Date.now() / 1000), + owned_by: m.vendor + })) + }; + res.json(response); + } catch (error) { + console.error('Error listing models:', error); + res.status(500).json(errorResponse('Failed to list models', 'server_error')); + } +}); + +// GET /v1/models/:model - Get specific model +app.get('/v1/models/:model', async (req: Request, res: Response) => { + try { + const models = await getAvailableModels(); + const model = models.find(m => m.family === req.params.model); + + if (!model) { + return res.status(404).json( + errorResponse(`Model '${req.params.model}' not found`, 'invalid_request_error', 'model', 'model_not_found') + ); + } + + const response: ModelObject = { + id: model.family, + object: 'model', + created: Math.floor(Date.now() / 1000), + owned_by: model.vendor + }; + res.json(response); + } catch (error) { + console.error('Error getting model:', error); + res.status(500).json(errorResponse('Failed to get model', 'server_error')); + } +}); + +// ==================== Embeddings Endpoint (Stub) ==================== + +// POST /v1/embeddings - Returns 501 Not Implemented +app.post('/v1/embeddings', (req: Request<{}, {}, EmbeddingRequest>, res: Response) => { + res.status(501).json( + errorResponse( + 'Embeddings are not supported by the VS Code Language Model API. ' + + 'Consider using an external embedding service like OpenAI, Ollama, or a local embedding model.', + 'not_implemented', + null, + 'embeddings_not_supported' + ) + ); +}); + +// ==================== Legacy Completions Endpoint ==================== + +// POST /v1/completions - Wrap as chat completion +app.post<{}, {}, CompletionRequest>('/v1/completions', async (req: Request, res: Response) => { + const { model, prompt, stream, ...rest } = req.body; + + // Normalize prompt to string + const promptText = Array.isArray(prompt) ? prompt.join('\n') : prompt; + + // Remove vendor prefixes + const cleanModel = model.split('/').pop()!; + + // Convert to chat completion request + const chatRequest: ChatCompletionRequest = { + model: cleanModel, + messages: [{ role: 'user', content: promptText }], + stream: stream ?? false + }; + + if (stream) { + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); + + try { + const streamIterator = await processChatRequest(chatRequest) as AsyncIterable; + for await (const chunk of streamIterator) { + // Convert chat chunk to completion chunk format + const completionChunk = { + id: chunk.id, + object: 'text_completion', + created: chunk.created, + model: chunk.model, + choices: [{ + index: 0, + text: chunk.choices[0]?.delta?.content ?? '', + finish_reason: chunk.choices[0]?.finish_reason || null, + logprobs: null + }] + }; + res.write(`data: ${JSON.stringify(completionChunk)}\n\n`); + } + res.write('data: [DONE]\n\n'); + res.end(); + } catch (error) { + console.error('Streaming completions error:', error); + if (!res.headersSent) { + res.status(500).json(errorResponse('Streaming error', 'server_error')); + } else { + res.write(`data: ${JSON.stringify({ error: { message: 'Stream error', type: 'server_error' } })}\n\n`); + res.end(); + } + } + } else { + try { + const chatResponse = await processChatRequest(chatRequest) as ChatCompletionResponse; + + const response: CompletionResponse = { + id: chatResponse.id, + object: 'text_completion', + created: chatResponse.created, + model: cleanModel, + choices: [{ + index: 0, + text: typeof chatResponse.choices[0]?.message?.content === 'string' + ? chatResponse.choices[0].message.content + : JSON.stringify(chatResponse.choices[0]?.message?.content ?? ''), + finish_reason: chatResponse.choices[0]?.finish_reason ?? 'stop', + logprobs: null + }], + usage: chatResponse.usage + }; + res.json(response); + } catch (error) { + console.error('Completions error:', error); + res.status(500).json(errorResponse('Error processing request', 'server_error')); + } + } +}); + +// ==================== Responses API Endpoint ==================== + +// POST /v1/responses - Create a model response (new OpenAI API) +app.post<{}, {}, CreateResponseRequest>('/v1/responses', async (req, res) => { + const { model, input, instructions, stream, temperature, max_output_tokens, metadata, tools, tool_choice } = req.body; + + // Validate required field + if (!model) { + return res.status(400).json(errorResponse('Missing required field: model', 'invalid_request_error', 'model')); + } + + // Remove vendor prefixes (don't mutate req.body) + const cleanModel = model.split('/').pop()!; + + // Convert input to chat messages + const messages: ChatMessage[] = []; + + // Add instructions as system message if provided + if (instructions) { + messages.push({ role: 'system', content: instructions }); + } + + // Process input + if (typeof input === 'string') { + messages.push({ role: 'user', content: input }); + } else if (Array.isArray(input)) { + for (const item of input) { + if (item.type === 'message') { + const content = typeof item.content === 'string' + ? item.content + : item.content.map(c => c.text).join(''); + messages.push({ role: item.role, content }); + } else if ('call_id' in item && 'output' in item) { + // Handle tool output from previous turn (function_call_output) + const toolOutput = item as unknown as ResponseFunctionCallOutputItem; + messages.push({ + role: 'user', + content: `Tool result for call_id ${toolOutput.call_id}:\n${toolOutput.output}` + }); + } + } + } + + // Build chat completion request with native tool support + const chatRequest: ChatCompletionRequest = { + model: cleanModel, + messages, + stream: stream ?? false, + temperature, + max_tokens: max_output_tokens, + tools: tools, + // Map 'required' to 'auto' since ChatCompletionRequest doesn't support 'required' + tool_choice: (tool_choice === 'required' ? 'auto' : tool_choice) as ChatCompletionRequest['tool_choice'], + }; + + const responseId = `resp_${Date.now()}_${Math.random().toString(36).substring(2, 15)}`; + const createdAt = Math.floor(Date.now() / 1000); + + if (stream) { + // Streaming mode + res.setHeader('Content-Type', 'text/event-stream'); + res.setHeader('Cache-Control', 'no-cache'); + res.setHeader('Connection', 'keep-alive'); + + try { + // Send initial response.created event + const initialResponse: ResponseObject = { + id: responseId, + object: 'response', + created_at: createdAt, + status: 'in_progress', + completed_at: null, + error: null, + incomplete_details: null, + instructions: instructions ?? null, + max_output_tokens: max_output_tokens ?? null, + model: cleanModel, + output: [], + parallel_tool_calls: true, + previous_response_id: req.body.previous_response_id ?? null, + temperature: temperature ?? 1, + top_p: req.body.top_p ?? 1, + truncation: 'disabled', + usage: null, + metadata: metadata ?? {} + }; + + res.write(`event: response.created\ndata: ${JSON.stringify(initialResponse)}\n\n`); + + const streamIterator = await processChatRequest(chatRequest) as AsyncIterable; + let fullContent = ''; + const messageId = generateId('msg'); + const output: ResponseOutputItemUnion[] = []; + const toolCalls: ResponseFunctionCallItem[] = []; + + for await (const chunk of streamIterator) { + const deltaContent = chunk.choices[0]?.delta?.content ?? ''; + fullContent += deltaContent; + + if (deltaContent) { + // Send content delta event + res.write(`event: response.output_text.delta\ndata: ${JSON.stringify({ delta: deltaContent })}\n\n`); + } + + // Check for native tool calls in the chunk + const chunkToolCalls = chunk.choices[0]?.delta?.tool_calls; + if (chunkToolCalls) { + for (const tc of chunkToolCalls) { + if (tc.id && tc.function?.name) { + const toolCall: ResponseFunctionCallItem = { + type: 'function_call', + id: generateId('fc'), + call_id: tc.id, + name: tc.function.name, + arguments: tc.function.arguments ?? '{}', + status: 'completed' + }; + toolCalls.push(toolCall); + res.write(`event: response.function_call_arguments.done\ndata: ${JSON.stringify(toolCall)}\n\n`); + output.push(toolCall); + } + } + } + } + + // Add text message if there's content + if (fullContent.trim()) { + output.push({ + type: 'message', + id: messageId, + status: 'completed', + role: 'assistant', + content: [{ + type: 'output_text', + text: fullContent, + annotations: [] + }] + }); + } + + // If no output at all, add an empty message + if (output.length === 0) { + output.push({ + type: 'message', + id: messageId, + status: 'completed', + role: 'assistant', + content: [{ + type: 'output_text', + text: '', + annotations: [] + }] + }); + } + + // Send completed response + const completedResponse = { + ...initialResponse, + status: 'completed' as const, + completed_at: Math.floor(Date.now() / 1000), + output, + tools: tools ?? [], + usage: { + input_tokens: 0, + input_tokens_details: { cached_tokens: 0 }, + output_tokens: fullContent.length, + output_tokens_details: { reasoning_tokens: 0 }, + total_tokens: fullContent.length + } + }; + + res.write(`event: response.completed\ndata: ${JSON.stringify(completedResponse)}\n\n`); + res.write('event: done\ndata: [DONE]\n\n'); + res.end(); + } catch (error) { + console.error('Responses API streaming error:', error); + res.write(`event: error\ndata: ${JSON.stringify({ error: { message: 'Stream error', type: 'server_error' } })}\n\n`); + res.end(); + } + } else { + // Non-streaming mode + try { + const chatResponse = await processChatRequest(chatRequest) as ChatCompletionResponse; + const rawContent = typeof chatResponse.choices[0]?.message?.content === 'string' + ? chatResponse.choices[0].message.content + : JSON.stringify(chatResponse.choices[0]?.message?.content ?? ''); + + const messageId = generateId('msg'); + const output: ResponseOutputItemUnion[] = []; + + // Convert native tool calls from processChatRequest to ResponseFunctionCallItem + const nativeToolCalls = chatResponse.choices[0]?.message?.tool_calls; + if (nativeToolCalls && nativeToolCalls.length > 0) { + for (const tc of nativeToolCalls) { + const toolCall: ResponseFunctionCallItem = { + type: 'function_call', + id: generateId('fc'), + call_id: tc.id, + name: tc.function.name, + arguments: tc.function.arguments, + status: 'completed' + }; + output.push(toolCall); + } + } + + // Add text message if there's content + if (rawContent.trim()) { + output.push({ + type: 'message', + id: messageId, + status: 'completed', + role: 'assistant', + content: [{ + type: 'output_text', + text: rawContent, + annotations: [] + }] + }); + } + + // If no output at all, add an empty message + if (output.length === 0) { + output.push({ + type: 'message', + id: messageId, + status: 'completed', + role: 'assistant', + content: [{ + type: 'output_text', + text: '', + annotations: [] + }] + }); + } + + const response = { + id: responseId, + object: 'response' as const, + created_at: createdAt, + status: 'completed' as const, + completed_at: Math.floor(Date.now() / 1000), + error: null, + incomplete_details: null, + instructions: instructions ?? null, + max_output_tokens: max_output_tokens ?? null, + model: cleanModel, + output, + parallel_tool_calls: req.body.parallel_tool_calls ?? true, + previous_response_id: req.body.previous_response_id ?? null, + temperature: temperature ?? 1, + top_p: req.body.top_p ?? 1, + truncation: 'disabled' as const, + usage: { + input_tokens: chatResponse.usage?.prompt_tokens ?? 0, + input_tokens_details: { cached_tokens: 0 }, + output_tokens: chatResponse.usage?.completion_tokens ?? rawContent.length, + output_tokens_details: { reasoning_tokens: 0 }, + total_tokens: chatResponse.usage?.total_tokens ?? rawContent.length + }, + metadata: metadata ?? {}, + tools: tools ?? [] + }; + + res.json(response); + } catch (error) { + console.error('Responses API error:', error); + res.status(500).json(errorResponse('Error processing request', 'server_error')); + } + } +}); + +// ==================== Chat Completions Endpoint ==================== + app.post<{}, {}, ChatCompletionRequest>('/v1/chat/completions', async (req, res) => { - const { model, stream } = req.body; + const { model, stream, tools, tool_choice } = req.body; -// Remove vendor prefixes so that only the actual model name is used. + // Remove vendor prefixes so that only the actual model name is used. // For instance, "openrouter/anthropic/claude-3.5-sonnet" becomes "claude-3.5-sonnet". - req.body.model = model.split('/').pop()!; + const cleanModel = model.split('/').pop()!; + req.body.model = cleanModel; if (stream) { // Set headers for streaming. @@ -30,31 +467,67 @@ app.post<{}, {}, ChatCompletionRequest>('/v1/chat/completions', async (req, res) res.setHeader('Connection', 'keep-alive'); try { - // Call processChatRequest and expect an async iterator for streaming. + // Call processChatRequest — tools are passed natively through the request + // and tool calls come back as proper delta.tool_calls chunks const streamIterator = await processChatRequest(req.body) as AsyncIterable; + for await (const chunk of streamIterator) { + // Forward the chunk directly to the client (tool calls are already in the chunk) res.write(`data: ${JSON.stringify(chunk)}\n\n`); - console.log(`Sent chunk with content: ${chunk.choices[0].delta.content}`); } + res.write("data: [DONE]\n\n"); res.end(); } catch (error) { console.error("Streaming error:", error); - return res.status(500).json({ error: "Streaming error" }); + if (!res.headersSent) { + return res.status(500).json(errorResponse('Streaming error', 'server_error')); + } else { + res.write(`data: ${JSON.stringify({ error: { message: 'Stream error', type: 'server_error' } })}\n\n`); + res.end(); + } } } else { try { // For non-streaming, await a full response. + // Tools are handled natively — tool_calls and finish_reason are already set by processChatRequest const fullResponse = await processChatRequest(req.body) as ChatCompletionResponse; return res.json(fullResponse); } catch (error) { console.error("Non-streaming error:", error); - return res.status(500).json({ error: "Error processing request" }); + return res.status(500).json( + errorResponse('Error processing request', 'server_error') + ); } } }); -export function startServer(port: number = 3000) { +// ==================== Assistants API Routes ==================== + +// Mount assistants router for /v1/assistants, /v1/threads, etc. +app.use(assistantsRouter); + +// ==================== Health Check ==================== + +app.get('/health', (req: Request, res: Response) => { + res.json({ status: 'ok', timestamp: new Date().toISOString() }); +}); + +// ==================== 404 Handler ==================== + +app.use((req: Request, res: Response) => { + res.status(404).json( + errorResponse( + `Unknown endpoint: ${req.method} ${req.path}`, + 'invalid_request_error', + null, + 'unknown_endpoint' + ) + ); +}); + +export function startServer(port: number = 3000, tokens: string[] = []) { + setApiTokens(tokens); const server = app.listen(port, () => { console.log(`Server is running on port ${port}`); }); diff --git a/src/toolConvert.ts b/src/toolConvert.ts new file mode 100644 index 0000000..dd82695 --- /dev/null +++ b/src/toolConvert.ts @@ -0,0 +1,72 @@ +/** + * Tool Conversion Utilities + * + * Converts between OpenAI tool formats and VS Code Language Model API tool types. + * Used by all three calling contexts: + * - Chat Completions API (extension.ts) + * - Responses API (server.ts) + * - Assistants runner (assistants/runner.ts) + */ + +import * as vscode from 'vscode'; +import { FunctionTool, ChatCompletionRequest } from './types'; +import { AssistantTool } from './assistants/types'; + +/** + * Convert OpenAI FunctionTool[] to VS Code LanguageModelChatTool[] + */ +export function toVSCodeTools(tools: FunctionTool[]): vscode.LanguageModelChatTool[] { + return tools + .filter(t => t.type === 'function') + .map(t => ({ + name: t.function.name, + description: t.function.description ?? '', + inputSchema: t.function.parameters, + })); +} + +/** + * Convert Assistants API AssistantTool[] to VS Code LanguageModelChatTool[] + */ +export function assistantToolsToVSCode(tools: AssistantTool[]): vscode.LanguageModelChatTool[] { + return tools + .filter(t => t.type === 'function' && t.function) + .map(t => ({ + name: t.function!.name, + description: t.function!.description ?? '', + inputSchema: t.function!.parameters, + })); +} + +/** + * Convert Assistants API AssistantTool[] to OpenAI FunctionTool[] for chat requests + */ +export function assistantToolsToFunctionTools(tools: AssistantTool[]): FunctionTool[] { + return tools + .filter(t => t.type === 'function' && t.function) + .map(t => ({ + type: 'function' as const, + function: { + name: t.function!.name, + description: t.function!.description, + parameters: t.function!.parameters, + }, + })); +} + +/** + * Convert OpenAI tool_choice to VS Code LanguageModelChatToolMode + * Returns undefined when tools should be omitted entirely (choice === 'none') + */ +export function toToolMode( + choice?: ChatCompletionRequest['tool_choice'] +): vscode.LanguageModelChatToolMode | undefined { + if (choice === 'none') { + return undefined; // omit tools entirely + } + if (typeof choice === 'object') { + return vscode.LanguageModelChatToolMode.Required; + } + // 'auto' or undefined + return vscode.LanguageModelChatToolMode.Auto; +} diff --git a/src/types.ts b/src/types.ts index 1d8e97c..19b3543 100644 --- a/src/types.ts +++ b/src/types.ts @@ -4,19 +4,121 @@ export interface StructuredMessageContent { } export interface ChatMessage { - role: string; - content: string | StructuredMessageContent[]; + role: 'system' | 'user' | 'assistant' | 'tool'; + content: string | StructuredMessageContent[] | null; + tool_calls?: ToolCall[]; + tool_call_id?: string; + name?: string; } export interface ChatCompletionRequest { messages: ChatMessage[]; model: string; stream?: boolean; + // Future extensibility for tool calling + tools?: FunctionTool[]; + tool_choice?: 'none' | 'auto' | { type: 'function'; function: { name: string } }; + // Additional OpenAI parameters (accepted but may not be fully supported) + temperature?: number; + top_p?: number; + max_tokens?: number; + stop?: string | string[]; + presence_penalty?: number; + frequency_penalty?: number; + user?: string; +} + +// ==================== Tool/Function Calling (Future) ==================== + +export interface FunctionTool { + type: 'function'; + function: { + name: string; + description?: string; + parameters?: Record; + strict?: boolean; + }; +} + +export interface ToolCall { + id: string; + type: 'function'; + function: { + name: string; + arguments: string; + }; +} + +// ==================== Legacy Completions API ==================== + +export interface CompletionRequest { + model: string; + prompt: string | string[]; + max_tokens?: number; + temperature?: number; + top_p?: number; + n?: number; + stream?: boolean; + stop?: string | string[]; + presence_penalty?: number; + frequency_penalty?: number; + user?: string; +} + +export interface CompletionChoice { + index: number; + text: string; + finish_reason: string; + logprobs: null; +} + +export interface CompletionResponse { + id: string; + object: 'text_completion'; + created: number; + model: string; + choices: CompletionChoice[]; + usage: ChatCompletionUsage; +} + +// ==================== Embeddings API (Stub) ==================== + +export interface EmbeddingRequest { + input: string | string[]; + model: string; + encoding_format?: 'float' | 'base64'; + dimensions?: number; + user?: string; +} + +// ==================== Models API ==================== + +export interface ModelObject { + id: string; + object: 'model'; + created: number; + owned_by: string; +} + +export interface ModelsListResponse { + object: 'list'; + data: ModelObject[]; +} + +// ==================== Error Response ==================== + +export interface OpenAIErrorResponse { + error: { + message: string; + type: string; + param: string | null; + code: string | null; + }; } export interface ChatCompletionResponse { id: string; - object: string; + object: 'chat.completion'; created: number; choices: ChatCompletionChoice[]; usage: ChatCompletionUsage; @@ -24,8 +126,14 @@ export interface ChatCompletionResponse { export interface ChatCompletionChoice { index: number; - message: ChatMessage; - finish_reason: string; + message: ChatCompletionMessage; + finish_reason: 'stop' | 'tool_calls' | 'length' | 'content_filter' | null; +} + +export interface ChatCompletionMessage { + role: 'assistant'; + content: string | null; + tool_calls?: ToolCall[]; } export interface ChatCompletionUsage { @@ -36,13 +144,24 @@ export interface ChatCompletionUsage { export interface ChatCompletionChunkDelta { role?: string; - content?: string; + content?: string | null; + tool_calls?: ToolCallChunk[]; +} + +export interface ToolCallChunk { + index: number; + id?: string; + type?: 'function'; + function?: { + name?: string; + arguments?: string; + }; } export interface ChatCompletionChunkChoice { delta: ChatCompletionChunkDelta; index: number; - finish_reason: string; + finish_reason: 'stop' | 'tool_calls' | 'length' | 'content_filter' | '' | null; } export interface ChatCompletionChunk { @@ -52,3 +171,92 @@ export interface ChatCompletionChunk { model: string; choices: ChatCompletionChunkChoice[]; } + +// ==================== Responses API ==================== + +export interface ResponseInputItem { + type: 'message'; + role: 'user' | 'assistant' | 'system'; + content: string | ResponseContentItem[]; +} + +export interface ResponseContentItem { + type: 'input_text' | 'output_text'; + text: string; +} + +export interface CreateResponseRequest { + model: string; + input: string | ResponseInputItem[]; + instructions?: string; + stream?: boolean; + temperature?: number; + max_output_tokens?: number; + top_p?: number; + store?: boolean; + metadata?: Record; + tools?: FunctionTool[]; + tool_choice?: 'none' | 'auto' | 'required' | { type: 'function'; name: string }; + previous_response_id?: string; + parallel_tool_calls?: boolean; +} + +export interface ResponseOutputItem { + type: 'message'; + id: string; + status: 'completed' | 'in_progress' | 'failed'; + role: 'assistant'; + content: ResponseOutputContent[]; +} + +export interface ResponseOutputContent { + type: 'output_text'; + text: string; + annotations: unknown[]; +} + +export interface ResponseObject { + id: string; + object: 'response'; + created_at: number; + status: 'completed' | 'failed' | 'in_progress' | 'cancelled' | 'queued' | 'incomplete'; + completed_at: number | null; + error: { message: string; type: string; code: string } | null; + incomplete_details: { reason: string } | null; + instructions: string | null; + max_output_tokens: number | null; + model: string; + output: ResponseOutputItem[]; + parallel_tool_calls: boolean; + previous_response_id: string | null; + temperature: number; + top_p: number; + truncation: 'auto' | 'disabled'; + usage: { + input_tokens: number; + input_tokens_details: { cached_tokens: number }; + output_tokens: number; + output_tokens_details: { reasoning_tokens: number }; + total_tokens: number; + } | null; + metadata: Record; +} + +// ==================== Responses API Tool Calling ==================== + +export interface ResponseFunctionCallItem { + type: 'function_call'; + id: string; + call_id: string; + name: string; + arguments: string; + status: 'completed' | 'in_progress'; +} + +export interface ResponseFunctionCallOutputItem { + type: 'function_call_output'; + call_id: string; + output: string; +} + +export type ResponseOutputItemUnion = ResponseOutputItem | ResponseFunctionCallItem; diff --git a/src/utils.ts b/src/utils.ts new file mode 100644 index 0000000..d7d0f68 --- /dev/null +++ b/src/utils.ts @@ -0,0 +1,154 @@ +/** + * Shared Utilities + * + * Common helpers used across the server, routes, and runner modules. + * Consolidates duplicated generateId and errorResponse implementations. + */ + +import type { NextFunction, Request, Response } from 'express'; +import { OpenAIErrorResponse } from './types'; +import { Message, MessageAttachment } from './assistants/types'; + +// ==================== ID Generation ==================== + +const ID_CHARS = 'ABCDEFGHIJKLMNOPQRSTUVWXYZabcdefghijklmnopqrstuvwxyz0123456789'; +const ID_LENGTH = 24; + +/** + * Generate a unique ID with OpenAI-style prefix. + * Used across server.ts (resp_, msg_, fc_), state.ts (asst_, thread_, msg_, run_, step_). + */ +export function generateId(prefix: string): string { + let id = ''; + for (let i = 0; i < ID_LENGTH; i++) { + id += ID_CHARS.charAt(Math.floor(Math.random() * ID_CHARS.length)); + } + return `${prefix}_${id}`; +} + +// ==================== Error Response ==================== + +/** + * Build an OpenAI-compatible error response object. + */ +export function errorResponse( + message: string, + type = 'invalid_request_error', + param: string | null = null, + code: string | null = null +): OpenAIErrorResponse { + return { + error: { message, type, param, code } + }; +} + +/** + * Shorthand for a 404 "not found" error. + */ +export function notFoundError(resource: string): OpenAIErrorResponse { + return errorResponse(`No ${resource} found`, 'invalid_request_error', null, 'resource_not_found'); +} + +// ==================== Message Factory ==================== + +export interface CreateMessageOptions { + threadId: string; + messageId: string; + content: string; + role?: 'user' | 'assistant'; + assistantId?: string | null; + runId?: string | null; + attachments?: MessageAttachment[]; + metadata?: Record; + status?: 'completed' | 'in_progress' | 'incomplete'; +} + +/** + * Create a Message object with sensible defaults. + * Eliminates the 15+ field boilerplate duplicated across routes.ts and runner.ts. + */ +export function createMessage(opts: CreateMessageOptions): Message { + const now = Math.floor(Date.now() / 1000); + const isCompleted = (opts.status ?? 'completed') === 'completed'; + return { + id: opts.messageId, + object: 'thread.message', + created_at: now, + thread_id: opts.threadId, + status: opts.status ?? 'completed', + incomplete_details: null, + completed_at: isCompleted ? now : null, + incomplete_at: null, + role: opts.role ?? 'user', + content: [{ + type: 'text', + text: { value: opts.content, annotations: [] } + }], + assistant_id: opts.assistantId ?? null, + run_id: opts.runId ?? null, + attachments: opts.attachments ?? [], + metadata: opts.metadata ?? {} + }; +} + +// ==================== Auth Middleware ==================== + +let validTokens: Set = new Set(); + +export function setApiTokens(tokens: string[]) { + validTokens = new Set(tokens); +} + +export function addApiToken(token: string) { + validTokens.add(token); +} + +export function removeApiToken(token: string) { + validTokens.delete(token); +} + +export function authMiddleware(req: Request, res: Response, next: NextFunction) { + // Skip auth if no tokens configured + if (validTokens.size === 0) { + return next(); + } + + const authHeader = req.headers.authorization; + + if (!authHeader) { + return res.status(401).json( + errorResponse( + 'Missing authorization header. Include "Authorization: Bearer " header.', + 'authentication_error', + 'authorization', + 'missing_authorization' + ) + ); + } + + const parts = authHeader.split(' '); + if (parts.length !== 2 || parts[0] !== 'Bearer') { + return res.status(401).json( + errorResponse( + 'Invalid authorization header format. Use "Authorization: Bearer ".', + 'authentication_error', + 'authorization', + 'invalid_authorization_format' + ) + ); + } + + const token = parts[1]; + if (!validTokens.has(token)) { + return res.status(401).json( + errorResponse( + 'Invalid API token.', + 'authentication_error', + 'authorization', + 'invalid_token' + ) + ); + } + + next(); +}