Skip to content
Closed
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,2 +1,4 @@
node_modules
out
.venv
.DS_Store
23 changes: 23 additions & 0 deletions .vscodeignore
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
__pycache__
.DS_Store
.git
.github
.gitignore
.venv
.vscode-test/**
.vscode/**
.yarnrc
*.pyc
*.vsix
**/.eslintrc.json
**/*.map
**/*.ts
**/tsconfig.json
!node_modules/**
scripts/**
src/**
tests/**
docs/**
eslint.config.js
.prettierignore
client/**
22 changes: 22 additions & 0 deletions client/README_WEB.md
Original file line number Diff line number Diff line change
@@ -0,0 +1,22 @@
AI Chat Web UI

Run a minimal Flask server that serves a single-page chat UI and proxies requests to your local AI API.

Setup

1. Create a virtualenv and install requirements:

python -m venv .venv
. .venv/bin/activate || . .\.venv\Scripts\Activate.ps1
pip install -r requirements-web.txt

2. Start the server (optionally check and change API_BASE to your API endpoint):

python server.py

3. Open http://localhost:8080 in your browser.

Notes

- The UI posts to /api/chat and expects the API at ${API_BASE}/chat/completions.
- The model selector will send the chosen model name in the request body.
62 changes: 62 additions & 0 deletions client/copilot_proxy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,62 @@
import json
import os

import requests

API_BASE = os.environ.get('API_BASE', 'http://localhost:3000/v1')
MODEL = os.environ.get('MODEL', 'gpt-4o')

def call_stream():
url = f"{API_BASE}/chat/completions"
payload = {
"model": MODEL,
"messages": [
{
"role": "user",
"content": "Hello! Can you introduce yourself briefly?"
}
],
"stream": True
}

with requests.post(url, json=payload, stream=True, timeout=30) as resp:
try:
resp.raise_for_status()
except requests.HTTPError:
print(f"HTTP Error {resp.status_code}: {resp.text}")
return
buffer = ""
for raw_line in resp.iter_lines(decode_unicode=True):
if raw_line is None:
continue
line = raw_line.strip()
if not line:
# empty line => end of one SSE event, process buffer
if buffer:
try:
# some servers produce lines like "data: {...}"
data_line = buffer
if data_line.startswith("data:"):
data_line = data_line[len("data:"):].strip()
if data_line and data_line != "[DONE]":
obj = json.loads(data_line)
choices = obj.get("choices", [])
if choices:
fragment = choices[0].get("delta", {}).get("content", "")
if fragment:
print(fragment, end="", flush=True)
except json.JSONDecodeError:
# ignore lines that are not JSON
pass
buffer = ""
# accumulate lines for this event
# Many SSE streams send each event as a single "data: <json>" line,
# but some may split fragments across multiple "data:" lines.
elif line.startswith("data:"):
# append JSON after "data:"
buffer += (line + "\n")

print("\n\nStream finished.")

if __name__ == "__main__":
call_stream()
2 changes: 2 additions & 0 deletions client/requirements-web.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,2 @@
Flask>=2.0
requests>=2.25
53 changes: 53 additions & 0 deletions client/server.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,53 @@
import os
import requests
from flask import Flask, jsonify, request, send_from_directory

app = Flask(__name__, static_folder='web', static_url_path='/static')

# Configurable API base
API_BASE = os.environ.get('API_BASE', 'http://localhost:3000/v1')
PORT = int(os.environ.get('PORT', '8080'))

# Headers that should not be forwarded from upstream
HOP_BY_HOP_HEADERS = frozenset([
'transfer-encoding', 'connection', 'keep-alive',
'proxy-authenticate', 'proxy-authorization', 'te',
'trailers', 'upgrade', 'content-encoding', 'content-length'
])

def filter_headers(headers):
"""Filter out hop-by-hop headers from upstream response."""
return [(k, v) for k, v in headers if k.lower() not in HOP_BY_HOP_HEADERS]

@app.route('/')
def index():
return send_from_directory('web', 'index.html')

@app.route('/api/chat', methods=['POST'])
def api_chat():
Copy link

Copilot AI Jan 28, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

api_chat returns tuple of size 2 and tuple of size 3.

Copilot uses AI. Check for mistakes.
payload = request.get_json(force=True)
# forward to the underlying API
url = f"{API_BASE}/chat/completions"
try:
resp = requests.post(url, json=payload, timeout=60)
except requests.RequestException as e:
return jsonify({'error':'upstream request failed', 'details': str(e)}), 502

return (resp.content, resp.status_code, filter_headers(resp.headers.items()))


@app.route('/api/models', methods=['GET'])
def api_models():
Copy link

Copilot AI Jan 28, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

api_models returns tuple of size 2 and tuple of size 3.

Copilot uses AI. Check for mistakes.
"""Fetch available models from upstream API and return them."""
url = f"{API_BASE}/models"
try:
resp = requests.get(url, timeout=20)
except requests.RequestException as e:
return jsonify({'error':'upstream request failed', 'details': str(e)}), 502

return (resp.content, resp.status_code, filter_headers(resp.headers.items()))

if __name__ == '__main__':
port = PORT
debug = os.environ.get('FLASK_DEBUG', 'false').lower() == 'true'
app.run(host='127.0.0.1', port=port, debug=debug)
163 changes: 163 additions & 0 deletions client/web/app.js
Original file line number Diff line number Diff line change
@@ -0,0 +1,163 @@
const form = document.getElementById('chat-form');
const promptEl = document.getElementById('prompt');
const messagesEl = document.getElementById('messages');
const modelSel = document.getElementById('model');
const newChatBtn = document.getElementById('new-chat');

// persistent conversation stored in localStorage
const STORAGE_KEY = 'ai_chat_messages_v1';
let messages = [];

function loadMessages(){
try{ messages = JSON.parse(localStorage.getItem(STORAGE_KEY) || '[]'); }catch(e){ messages = []; }
}

function saveMessages(){
try{ localStorage.setItem(STORAGE_KEY, JSON.stringify(messages)); }catch(e){/* ignore */}
}

function renderMessages(){
messagesEl.innerHTML = '';
for(const m of messages){
const d = document.createElement('div');
d.className = 'msg ' + (m.role === 'user' ? 'user' : 'ai');
d.textContent = m.content;
messagesEl.appendChild(d);
}
messagesEl.scrollTop = messagesEl.scrollHeight;
}

loadMessages();
renderMessages();

// load available models from server and populate selector
async function loadModels(){
try{
const resp = await fetch('/api/models');
if(!resp.ok) throw new Error('Failed to fetch models');
const json = await resp.json();
let list = [];
if(Array.isArray(json)) list = json;
else if(Array.isArray(json.data)) list = json.data;
else if(Array.isArray(json.models)) list = json.models;

// normalize to strings
const opts = list.map(it => {
if(typeof it === 'string') return it;
return it.id || it.name || it.model || JSON.stringify(it);
}).filter(Boolean);

modelSel.innerHTML = '';
if(opts.length){
for(const id of opts){
const o = document.createElement('option');
o.value = id; o.textContent = id;
modelSel.appendChild(o);
}
// restore previously selected model if any
const saved = localStorage.getItem('ai_chat_selected_model');
if(saved) modelSel.value = saved;
}else{
throw new Error('no models');
}
}catch(err){
// fallback set
modelSel.innerHTML = '';
['gpt-5-mini','gpt-4o-mini','gpt-4o'].forEach(v=>{
const o = document.createElement('option'); o.value=v; o.textContent=v; modelSel.appendChild(o);
});
console.warn('Could not load models, using fallback', err);
}
}

modelSel.addEventListener('change', ()=>{
try{ localStorage.setItem('ai_chat_selected_model', modelSel.value); }catch(e){}
});

loadModels();

// New chat button clears conversation
if(newChatBtn){
newChatBtn.addEventListener('click', ()=>{
messages = [];
saveMessages();
renderMessages();
promptEl.focus();
});
}

// Enter to send (Shift+Enter inserts newline)
promptEl.addEventListener('keydown', (e)=>{
if(e.key === 'Enter' && !e.shiftKey){
e.preventDefault();
if(typeof form.requestSubmit === 'function') form.requestSubmit();
else form.dispatchEvent(new Event('submit', {cancelable:true}));
}
});

form.addEventListener('submit', async (e)=>{
e.preventDefault();
const prompt = promptEl.value.trim();
if(!prompt) return;

// Disable form during request
const submitBtn = form.querySelector('button[type="submit"]');
if(submitBtn) submitBtn.disabled = true;
promptEl.disabled = true;

// add user message to conversation
messages.push({role:'user', content: prompt});
saveMessages();
renderMessages();
promptEl.value = '';

// add temporary assistant placeholder (only for display, not sent to API)
messages.push({role:'assistant', content: '...'});
saveMessages();
renderMessages();

// Build messages to send (exclude the placeholder)
const messagesToSend = messages.slice(0, -1);

try{
const resp = await fetch('/api/chat', {
method: 'POST',
headers: {'Content-Type':'application/json'},
body: JSON.stringify({ model: modelSel.value, messages: messagesToSend })
});
if(!resp.ok){
const txt = await resp.text();
// replace last assistant placeholder with error
messages[messages.length-1].content = `Error: ${resp.status} ${txt}`;
saveMessages();
renderMessages();
return;
}

const data = await resp.json();
// extract assistant content from common API shapes
let content = '';
try{
const choices = data.choices || [];
if(choices.length && choices[0].message) content = choices[0].message.content || '';
else if(choices.length && choices[0].delta) content = choices.map(c=>c.delta?.content||'').join('');
else if(data.text) content = data.text;
else content = JSON.stringify(data);
}catch(e){ content = JSON.stringify(data) }
Copy link

Copilot AI Jan 28, 2026

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

Avoid automated semicolon insertion (96% of all statements in the enclosing function have an explicit semicolon).

Suggested change
}catch(e){ content = JSON.stringify(data) }
}catch(e){ content = JSON.stringify(data); }

Copilot uses AI. Check for mistakes.

// replace placeholder with real assistant message
messages[messages.length-1].content = content;
saveMessages();
renderMessages();
}catch(err){
messages[messages.length-1].content = 'Network error';
saveMessages();
renderMessages();
console.error(err);
}finally{
// Re-enable form
if(submitBtn) submitBtn.disabled = false;
promptEl.disabled = false;
promptEl.focus();
}
});
30 changes: 30 additions & 0 deletions client/web/index.html
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
<!doctype html>
<html lang="en">
<head>
<meta charset="utf-8" />
<meta name="viewport" content="width=device-width,initial-scale=1" />
<title>AI Chat</title>
<link rel="stylesheet" href="/static/styles.css">
</head>
<body>
<main class="chat-root">
<header>
<h1>AI Chat</h1>
<label for="model">Model:</label>
<select id="model">
<option>Loading models...</option>
</select>
<button id="new-chat" class="new-chat" title="Start a new conversation">New Chat</button>
</header>

<section id="messages" class="messages"></section>

<form id="chat-form" class="chat-form">
<textarea id="prompt" placeholder="Type your message..." rows="2"></textarea>
<button type="submit">Send</button>
</form>
</main>

<script src="/static/app.js"></script>
</body>
</html>
24 changes: 24 additions & 0 deletions client/web/styles.css
Original file line number Diff line number Diff line change
@@ -0,0 +1,24 @@
:root{
--bg:#0f1720;
--panel:#0b1320;
--muted:#9aa4b2;
--accent:#3b82f6;
--text:#e6eef6;
}
*{box-sizing:border-box}
body{font-family:Inter,system-ui,Segoe UI,Roboto,Arial;background:var(--bg);color:var(--text);margin:0;min-height:100vh;display:flex;align-items:center;justify-content:center}
.chat-root{width:720px;max-width:96vw;height:80vh;background:linear-gradient(180deg,#081223,#071827);border-radius:12px;padding:16px;display:flex;flex-direction:column;gap:12px;box-shadow:0 6px 30px rgba(0,0,0,.6)}
header{display:flex;align-items:center;gap:12px}
header h1{margin:0;font-size:1.1rem}
select{background:transparent;color:var(--text);border:1px solid rgba(255,255,255,0.06);padding:6px;border-radius:6px}
.new-chat{margin-left:auto;background:transparent;border:1px solid rgba(255,255,255,0.06);color:var(--text);padding:6px 10px;border-radius:6px;cursor:pointer}
.new-chat:hover{background:rgba(255,255,255,0.02)}
.messages{flex:1;overflow:auto;padding:8px;border-radius:8px;background:rgba(255,255,255,0.02);display:flex;flex-direction:column;gap:8px}
.msg{padding:10px;border-radius:8px;max-width:80%;line-height:1.35;word-break:break-word}
.msg.user{background:linear-gradient(90deg,#0f1720,#102133);align-self:flex-end;border:1px solid rgba(255,255,255,0.03)}
.msg.ai{background:linear-gradient(90deg,#021224,#042133);align-self:flex-start;border:1px solid rgba(255,255,255,0.03)}
.chat-form{display:flex;gap:8px}
textarea{flex:1;padding:10px;border-radius:8px;border:1px solid rgba(255,255,255,0.04);background:transparent;color:var(--text);resize:none}
button{background:var(--accent);border:none;color:white;padding:10px 14px;border-radius:8px;cursor:pointer}
button:disabled{opacity:0.5;cursor:not-allowed}
textarea:focus-visible,select:focus-visible,button:focus-visible{outline:2px solid var(--accent);outline-offset:2px}
Loading