diff --git a/pyproject.toml b/pyproject.toml index 0e1399bb..dc2d661d 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -41,6 +41,9 @@ vault = [ apps = [ "websockets>=13.0" ] +dashboard = [ + "websockets>=13.0" +] wasm = [] dev = [ "numpy>=2.2.3", @@ -61,7 +64,7 @@ where = ["src"] include = ["mcp_cli*"] [tool.setuptools.package-data] -mcp_cli = ["server_config.json"] +mcp_cli = ["server_config.json", "dashboard/static/**/*"] [dependency-groups] dev = [ "colorama>=0.4.6", diff --git a/src/mcp_cli/chat/chat_context.py b/src/mcp_cli/chat/chat_context.py index 351858bd..78538b10 100644 --- a/src/mcp_cli/chat/chat_context.py +++ b/src/mcp_cli/chat/chat_context.py @@ -131,6 +131,9 @@ def __init__( # ToolProcessor back-reference (set by ToolProcessor.__init__) self.tool_processor: Any = None + # Dashboard bridge (set by chat_handler when --dashboard is active, else None) + self.dashboard_bridge: Any = None + # Tool state (filled during initialization) self.tools: list[ToolInfo] = [] self.internal_tools: list[ToolInfo] = [] diff --git a/src/mcp_cli/chat/chat_handler.py b/src/mcp_cli/chat/chat_handler.py index 3ac966ff..599435c1 100644 --- a/src/mcp_cli/chat/chat_handler.py +++ b/src/mcp_cli/chat/chat_handler.py @@ -49,6 +49,9 @@ async def handle_chat_mode( vm_budget: int = 128_000, health_interval: int = 0, enable_plan_tools: bool = False, + dashboard: bool = False, + no_browser: bool = False, + dashboard_port: int = 0, ) -> bool: """ Launch the interactive chat loop with streaming support. @@ -68,11 +71,15 @@ async def handle_chat_mode( max_turns: Maximum conversation turns before forcing exit (default: 100) model_manager: Pre-configured ModelManager (optional, creates new if None) runtime_config: Runtime configuration with timeout overrides (optional) + dashboard: Launch browser dashboard alongside chat (requires websockets). + no_browser: If True, print dashboard URL but do not open the browser. + dashboard_port: Preferred dashboard port (0 = auto-select). Returns: True if session ended normally, False on failure """ ui: ChatUIManager | None = None + ctx = None try: # Initialize configuration manager @@ -117,6 +124,39 @@ def on_progress(msg: str) -> None: # Update global context with initialized data await app_context.initialize() + # Start dashboard if requested + if dashboard: + try: + from mcp_cli.dashboard.launcher import launch_dashboard + from mcp_cli.dashboard.bridge import DashboardBridge + + _dash_server, _dash_port = await launch_dashboard( + dashboard_port, no_browser + ) + output.info(f"Dashboard: http://localhost:{_dash_port}") + ctx.dashboard_bridge = DashboardBridge(_dash_server) + + # Wire REQUEST_TOOL from browser → tool_manager, result back to browser + _bridge_ref = ctx.dashboard_bridge + + async def _dashboard_execute_tool( + tool_name: str, arguments: dict + ) -> None: + result = await tool_manager.execute_tool(tool_name, arguments) + await _bridge_ref.on_tool_result( + tool_name=tool_name, + server_name="", + result=result.result, + success=result.success, + error=result.error, + ) + + ctx.dashboard_bridge.set_tool_call_callback(_dashboard_execute_tool) + except ImportError: + output.warning( + "Dashboard requires 'websockets'. Install with: pip install mcp-cli[dashboard]" + ) + # Welcome banner # Clear screen unless in debug mode if logger.level > logging.DEBUG: @@ -184,6 +224,13 @@ def on_progress(msg: str) -> None: if ui: await _safe_cleanup(ui) + # Stop dashboard server if running + if ctx is not None and ctx.dashboard_bridge is not None: + try: + await ctx.dashboard_bridge.server.stop() + except Exception as exc: + logger.warning("Error stopping dashboard server: %s", exc) + # Close tool manager try: await tool_manager.close() @@ -258,6 +305,39 @@ async def handle_chat_mode_for_testing( gc.collect() +# Sentinel placed on the input queue when the reader catches a KeyboardInterrupt. +# Allows the main loop to handle Ctrl+C that originated inside get_user_input(). +_INTERRUPT = object() + + +async def _terminal_reader(ui: ChatUIManager, queue: asyncio.Queue) -> None: + """Background task: reads terminal input and puts it on the shared queue.""" + while True: + try: + msg = await ui.get_user_input() + await queue.put(msg) + except EOFError: + await queue.put("exit") + break + except asyncio.CancelledError: + # Re-raise only when this task itself was explicitly cancelled + # (reader_task.cancel()). If CancelledError came from get_user_input() + # directly (e.g. in tests), treat it as an interrupt signal instead. + if asyncio.current_task().cancelling(): # type: ignore[union-attr] + raise + await queue.put(_INTERRUPT) + except KeyboardInterrupt: + # Forward the interrupt to the main loop via the sentinel so the + # reader keeps running (allows subsequent input after Ctrl+C). + await queue.put(_INTERRUPT) + except Exception as exc: + logger.debug("Terminal reader error: %s", exc) + # Yield to the event loop each iteration so that task.cancel() can + # deliver CancelledError even when get_user_input() resolves instantly + # (e.g. in tests with AsyncMock). + await asyncio.sleep(0) + + async def _run_enhanced_chat_loop( ui: ChatUIManager, ctx: ChatContext, @@ -273,71 +353,118 @@ async def _run_enhanced_chat_loop( convo: Conversation processor with streaming support max_turns: Maximum conversation turns before forcing exit (default: 100) """ - while True: - try: - user_msg = await ui.get_user_input() + # Shared queue: terminal reader task and browser WebSocket both put messages here. + # This lets browser input arrive during the terminal prompt wait. + # Type is Any because we also put _INTERRUPT sentinel objects on the queue. + input_queue: asyncio.Queue = asyncio.Queue() - # Skip empty messages - if not user_msg: - continue + # Wire dashboard bridge so browser USER_MESSAGE/USER_COMMAND go into the queue. + if bridge := getattr(ctx, "dashboard_bridge", None): + bridge.set_input_queue(input_queue) - # Handle plain exit commands (without slash) - if user_msg.lower() in ("exit", "quit"): - output.panel("Exiting chat mode.", style="red", title="Goodbye") - break + # Background task: reads terminal input and forwards to the queue. + reader_task = asyncio.create_task(_terminal_reader(ui, input_queue)) - # Handle slash commands - if user_msg.startswith("/"): - # Special handling for interrupt command during streaming - if user_msg.lower() in ("/interrupt", "/stop", "/cancel"): + try: + while True: + try: + user_msg = await input_queue.get() + + # Handle interrupt sentinel forwarded from _terminal_reader + if user_msg is _INTERRUPT: + logger.info( + "Interrupt forwarded from reader — streaming=%s, tools_running=%s", + ui.is_streaming_response, + ui.tools_running, + ) if ui.is_streaming_response: + output.warning("\nStreaming interrupted - type 'exit' to quit.") ui.interrupt_streaming() - output.warning("Streaming response interrupted.") - continue elif ui.tools_running: + output.warning( + "\nTool execution interrupted - type 'exit' to quit." + ) ui._interrupt_now() - continue else: - output.info("Nothing to interrupt.") - continue + output.warning("\nInterrupted - type 'exit' to quit.") + continue - handled = await ui.handle_command(user_msg) - if ctx.exit_requested: - break - if handled: + # Skip empty messages + if not user_msg: continue - # Normal conversation turn with streaming support - if ui.verbose_mode: - ui.print_user_message(user_msg) - await ctx.add_user_message(user_msg) + # Handle plain exit commands (without slash) + if user_msg.lower() in ("exit", "quit"): + output.panel("Exiting chat mode.", style="red", title="Goodbye") + break - # Use the enhanced conversation processor that handles streaming - await convo.process_conversation(max_turns=max_turns) + # Handle slash commands + if user_msg.startswith("/"): + # Special handling for interrupt command during streaming + if user_msg.lower() in ("/interrupt", "/stop", "/cancel"): + if ui.is_streaming_response: + ui.interrupt_streaming() + output.warning("Streaming response interrupted.") + continue + elif ui.tools_running: + ui._interrupt_now() + continue + else: + output.info("Nothing to interrupt.") + continue + + handled = await ui.handle_command(user_msg) + if ctx.exit_requested: + break + if handled: + continue - except (KeyboardInterrupt, asyncio.CancelledError): - # Handle Ctrl+C gracefully (KeyboardInterrupt or asyncio.CancelledError in async code) - logger.info( - f"Interrupt in chat loop - streaming={ui.is_streaming_response}, tools_running={ui.tools_running}" - ) - if ui.is_streaming_response: - output.warning("\nStreaming interrupted - type 'exit' to quit.") - ui.interrupt_streaming() - elif ui.tools_running: - output.warning("\nTool execution interrupted - type 'exit' to quit.") - ui._interrupt_now() - else: - output.warning("\nInterrupted - type 'exit' to quit.") - # CRITICAL: Continue the loop instead of exiting - logger.info("Continuing chat loop after interrupt...") - continue - except EOFError: - output.panel("EOF detected - exiting chat.", style="red", title="Exit") - break - except Exception as exc: - logger.exception("Error processing message") - output.error(f"Error processing message: {exc}") - continue + # Normal conversation turn with streaming support + if ui.verbose_mode: + ui.print_user_message(user_msg) + await ctx.add_user_message(user_msg) + + # Dashboard: broadcast user message + if _dash := getattr(ctx, "dashboard_bridge", None): + try: + await _dash.on_message("user", user_msg) + except Exception as _e: + logger.debug("Dashboard on_message(user) error: %s", _e) + + # Use the enhanced conversation processor that handles streaming + await convo.process_conversation(max_turns=max_turns) + + except (KeyboardInterrupt, asyncio.CancelledError): + # Handle Ctrl+C gracefully + logger.info( + f"Interrupt in chat loop - streaming={ui.is_streaming_response}, tools_running={ui.tools_running}" + ) + if ui.is_streaming_response: + output.warning("\nStreaming interrupted - type 'exit' to quit.") + ui.interrupt_streaming() + elif ui.tools_running: + output.warning( + "\nTool execution interrupted - type 'exit' to quit." + ) + ui._interrupt_now() + else: + output.warning("\nInterrupted - type 'exit' to quit.") + # CRITICAL: Continue the loop instead of exiting + logger.info("Continuing chat loop after interrupt...") + continue + except EOFError: + output.panel("EOF detected - exiting chat.", style="red", title="Exit") + break + except Exception as exc: + logger.exception("Error processing message") + output.error(f"Error processing message: {exc}") + continue + finally: + reader_task.cancel() + try: + await reader_task + except asyncio.CancelledError: + pass async def _safe_cleanup(ui: ChatUIManager) -> None: diff --git a/src/mcp_cli/chat/conversation.py b/src/mcp_cli/chat/conversation.py index 8510e84c..f9dc82c4 100644 --- a/src/mcp_cli/chat/conversation.py +++ b/src/mcp_cli/chat/conversation.py @@ -257,6 +257,24 @@ async def process_conversation(self, max_turns: int = 100): completion: CompletionResponse | None = None + # Dashboard: notify "thinking" + if _dash := getattr(self.context, "dashboard_bridge", None): + try: + await _dash.on_agent_state( + "thinking", + None, + turn_count, + getattr( + getattr(self.context, "token_tracker", None), + "total_tokens", + 0, + ), + ) + except Exception as _e: + logger.debug( + "Dashboard on_agent_state(thinking) error: %s", _e + ) + if supports_streaming: # Use streaming response handler try: @@ -310,6 +328,28 @@ async def process_conversation(self, max_turns: int = 100): f"Processing {len(tool_calls)} tool calls from LLM" ) + # Dashboard: notify "tool_calling" + if _dash := getattr(self.context, "dashboard_bridge", None): + try: + _first_tool = ( + tool_calls[0].function.name if tool_calls else None + ) + await _dash.on_agent_state( + "tool_calling", + _first_tool, + turn_count, + getattr( + getattr(self.context, "token_tracker", None), + "total_tokens", + 0, + ), + ) + except Exception as _e: + logger.debug( + "Dashboard on_agent_state(tool_calling) error: %s", + _e, + ) + # Check split budgets for each tool call type # Get name mapping for looking up actual tool names name_mapping = getattr(self.context, "tool_name_mapping", {}) @@ -572,6 +612,33 @@ async def process_conversation(self, max_turns: int = 100): # Include reasoning_content if present (for DeepSeek reasoner and similar models) await self.context.add_assistant_message(response_content) + # Dashboard: broadcast assistant message and idle state + if _dash := getattr(self.context, "dashboard_bridge", None): + try: + await _dash.on_message( + "assistant", + response_content, + streaming=bool(completion.streaming), + reasoning=reasoning_content + if reasoning_content + else None, + ) + await _dash.on_agent_state( + "idle", + None, + turn_count, + getattr( + getattr(self.context, "token_tracker", None), + "total_tokens", + 0, + ), + ) + except Exception as _e: + logger.debug( + "Dashboard on_message/on_agent_state(idle) error: %s", + _e, + ) + # Auto-save check if hasattr(self.context, "auto_save_check"): self.context.auto_save_check() @@ -649,7 +716,9 @@ async def _handle_streaming_completion( # Set the streaming handler reference in UI manager for interruption support streaming_handler = StreamingResponseHandler( - display=self.ui_manager.display, runtime_config=self.runtime_config + display=self.ui_manager.display, + runtime_config=self.runtime_config, + dashboard_bridge=getattr(self.context, "dashboard_bridge", None), ) self.ui_manager.streaming_handler = streaming_handler diff --git a/src/mcp_cli/chat/streaming_handler.py b/src/mcp_cli/chat/streaming_handler.py index 6ff3571f..aa746531 100644 --- a/src/mcp_cli/chat/streaming_handler.py +++ b/src/mcp_cli/chat/streaming_handler.py @@ -13,7 +13,10 @@ import json import time from enum import Enum -from typing import Any +from typing import TYPE_CHECKING, Any + +if TYPE_CHECKING: + from mcp_cli.dashboard.bridge import DashboardBridge from pydantic import BaseModel, Field @@ -226,18 +229,21 @@ def __init__( self, display: StreamingDisplayManager, runtime_config: RuntimeConfig | None = None, + dashboard_bridge: "DashboardBridge | None" = None, ): """Initialize handler. Args: display: The unified display manager (required, no fallback) runtime_config: Runtime configuration (optional, will load defaults if not provided) + dashboard_bridge: Optional DashboardBridge for live token streaming to browser. """ self.display = display self.tool_accumulator = ToolCallAccumulator() self._interrupted = False self._usage: dict[str, int] | None = None self.runtime_config = runtime_config or load_runtime_config() + self._dashboard_bridge = dashboard_bridge async def stream_response( self, @@ -319,6 +325,13 @@ async def stream_response( f"{len(tool_calls)} tools, {elapsed:.2f}s" ) + # Signal stream end to dashboard + if self._dashboard_bridge is not None: + try: + await self._dashboard_bridge.on_token("", done=True) + except Exception as _e: + logger.debug("Dashboard on_token(done) error: %s", _e) + return response.to_dict() except Exception as e: @@ -469,6 +482,27 @@ async def _process_chunk(self, raw_chunk: dict[str, Any]) -> None: # Use display to process chunk (normalizes format) await self.display.add_chunk(raw_chunk) + # Broadcast token to dashboard if connected + if self._dashboard_bridge is not None: + token: str | None = None + if "response" in raw_chunk: + token = raw_chunk["response"] + elif "content" in raw_chunk: + token = raw_chunk["content"] + elif "text" in raw_chunk: + token = raw_chunk["text"] + elif "delta" in raw_chunk and isinstance(raw_chunk["delta"], dict): + token = raw_chunk["delta"].get("content") + elif "choices" in raw_chunk and raw_chunk["choices"]: + delta = raw_chunk["choices"][0].get("delta", {}) + if isinstance(delta, dict): + token = delta.get("content") + if token: + try: + await self._dashboard_bridge.on_token(token) + except Exception as _e: + logger.debug("Dashboard on_token error: %s", _e) + # Extract tool calls if present if "tool_calls" in raw_chunk and raw_chunk["tool_calls"]: self.tool_accumulator.process_chunk_tool_calls(raw_chunk["tool_calls"]) diff --git a/src/mcp_cli/chat/tool_processor.py b/src/mcp_cli/chat/tool_processor.py index cb1ff8eb..732ed4a9 100644 --- a/src/mcp_cli/chat/tool_processor.py +++ b/src/mcp_cli/chat/tool_processor.py @@ -549,6 +549,42 @@ async def _on_tool_result(self, result: CTPToolResult) -> None: if success and self.tool_manager: await self._check_and_launch_app(actual_tool_name, result.result) + # Dashboard bridge — broadcast tool result to browser clients + if bridge := getattr(self.context, "dashboard_bridge", None): + server_name = getattr(self.context, "tool_to_server_map", {}).get( + actual_tool_name, "" + ) + duration_ms: int | None = None + if ( + meta is not None + and hasattr(meta, "start_time") + and meta.start_time is not None + ): + import time as _time + + duration_ms = int((_time.monotonic() - meta.start_time) * 1000) + _dash_result = ( + actual_result if success and result.result is not None else None + ) + meta_ui = ( + getattr(result.result, "structuredContent", None) + if (success and result.result is not None) + else None + ) + try: + await bridge.on_tool_result( + tool_name=actual_tool_name, + server_name=server_name, + result=_dash_result, + success=success, + error=result.error if not success else None, + duration_ms=duration_ms, + meta_ui=meta_ui, + call_id=result.id, + ) + except Exception as _bridge_exc: + logger.debug("Dashboard bridge on_tool_result error: %s", _bridge_exc) + # Maximum chars of page content to return from a single page_fault. # Prevents oversized pages from flooding the conversation context. _VM_MAX_PAGE_CONTENT_CHARS = 2000 diff --git a/src/mcp_cli/config/defaults.py b/src/mcp_cli/config/defaults.py index 2f034f17..0c8ca659 100644 --- a/src/mcp_cli/config/defaults.py +++ b/src/mcp_cli/config/defaults.py @@ -329,6 +329,23 @@ """Seconds before showing 'initialization timed out' in host page.""" +# ================================================================ +# Dashboard Defaults +# ================================================================ + +DEFAULT_DASHBOARD_PORT_START = 9120 +"""Starting port for the dashboard HTTP+WebSocket server.""" + +DEFAULT_DASHBOARD_AUTO_OPEN_BROWSER = True +"""Automatically open browser when dashboard is launched.""" + +DEFAULT_DASHBOARD_RECONNECT_INTERVAL = 5.0 +"""Seconds between WebSocket reconnect attempts.""" + +DEFAULT_DASHBOARD_LAYOUTS_FILE = "~/.config/mcp-cli/dashboard-layouts.json" +"""Path to user-saved dashboard layout configurations.""" + + # ================================================================ # Memory Scope Defaults (Tier 8) # ================================================================ diff --git a/src/mcp_cli/dashboard/__init__.py b/src/mcp_cli/dashboard/__init__.py new file mode 100644 index 00000000..8f8172bb --- /dev/null +++ b/src/mcp_cli/dashboard/__init__.py @@ -0,0 +1,15 @@ +# mcp_cli/dashboard/__init__.py +"""Dashboard shell for mcp-cli. + +Provides a browser-based tiled panel layout that displays MCP tool activity +and conversation state in real-time. Launched with ``mcp-cli chat --dashboard``. + +Requires ``websockets`` — install with: pip install mcp-cli[dashboard] +""" + +from __future__ import annotations + +from mcp_cli.dashboard.bridge import DashboardBridge +from mcp_cli.dashboard.server import DashboardServer + +__all__ = ["DashboardBridge", "DashboardServer"] diff --git a/src/mcp_cli/dashboard/bridge.py b/src/mcp_cli/dashboard/bridge.py new file mode 100644 index 00000000..92afc0d5 --- /dev/null +++ b/src/mcp_cli/dashboard/bridge.py @@ -0,0 +1,278 @@ +# mcp_cli/dashboard/bridge.py +"""Dashboard bridge — the integration layer between mcp-cli's chat engine +and the browser dashboard. + +The bridge is stored on ``ChatContext.dashboard_bridge``. All hooks are no-ops +when the bridge is ``None`` (i.e., ``--dashboard`` was not set), so there is +zero performance impact on normal usage. + +Hook call sites: + tool_processor._on_tool_result() → bridge.on_tool_result() + conversation.process_user_input() → bridge.on_agent_state(), bridge.on_message() + streaming_handler._process_chunk() → bridge.on_token() (Phase 4) +""" + +from __future__ import annotations + +import asyncio +import datetime +import logging +from collections.abc import Awaitable, Callable +from typing import Any, Literal + +from mcp_cli.dashboard.server import DashboardServer + +logger = logging.getLogger(__name__) + +_PROTOCOL = "mcp-dashboard" +_VERSION = 1 + + +def _envelope(msg_type: str, payload: dict[str, Any]) -> dict[str, Any]: + return { + "protocol": _PROTOCOL, + "version": _VERSION, + "type": msg_type, + "payload": payload, + } + + +def _now() -> str: + return datetime.datetime.now(datetime.timezone.utc).isoformat() + + +class DashboardBridge: + """Routes chat-engine events to connected browser dashboard clients.""" + + def __init__(self, server: DashboardServer) -> None: + self.server = server + self._turn_number: int = 0 + # Set this to inject user messages from the browser back into the chat engine + self._user_message_callback: Callable[[str], None] | None = None + # Queue for injecting browser messages into the chat loop + self._input_queue: asyncio.Queue[str] | None = None + # Callback to execute a tool requested by the browser (REQUEST_TOOL) + self._tool_call_callback: ( + Callable[[str, dict[str, Any]], Awaitable[Any]] | None + ) = None + # View registry discovered from _meta.ui fields in tool results + self._view_registry: list[dict[str, Any]] = [] + self._seen_view_ids: set[str] = set() + # Wire server callbacks + server.on_browser_message = self._on_browser_message + server.on_client_connected = self._on_client_connected + + # ------------------------------------------------------------------ # + # Outbound hooks (chat engine → browser) # + # ------------------------------------------------------------------ # + + async def on_tool_result( + self, + tool_name: str, + server_name: str, + result: Any, + success: bool, + error: str | None = None, + duration_ms: int | None = None, + meta_ui: Any = None, + call_id: str | None = None, + ) -> None: + """Called after every tool execution completes.""" + payload: dict[str, Any] = { + "tool_name": tool_name, + "server_name": server_name, + "agent_id": "default", + "call_id": call_id or "", + "timestamp": _now(), + "duration_ms": duration_ms, + "result": self._serialise(result), + "error": error, + "success": success, + } + if meta_ui is not None: + payload["meta_ui"] = self._serialise(meta_ui) + # Discover new views declared in _meta.ui before broadcasting + if isinstance(meta_ui, dict) and meta_ui.get("view"): + await self._discover_view(meta_ui, server_name) + await self.server.broadcast(_envelope("TOOL_RESULT", payload)) + + async def on_agent_state( + self, + status: Literal["thinking", "tool_calling", "idle"], + current_tool: str | None = None, + turn_number: int | None = None, + tokens_used: int = 0, + ) -> None: + """Called when the agent's status changes.""" + if turn_number is not None: + self._turn_number = turn_number + payload: dict[str, Any] = { + "agent_id": "default", + "status": status, + "current_tool": current_tool, + "turn_number": self._turn_number, + "tokens_used": tokens_used, + "budget_remaining": None, + } + await self.server.broadcast(_envelope("AGENT_STATE", payload)) + + async def on_message( + self, + role: Literal["user", "assistant", "tool"], + content: str, + streaming: bool = False, + reasoning: str | None = None, + tool_calls: list[dict[str, Any]] | None = None, + ) -> None: + """Called when a complete conversation message is emitted.""" + payload: dict[str, Any] = { + "role": role, + "content": content, + "streaming": streaming, + "tool_calls": tool_calls, + "reasoning": reasoning, + } + await self.server.broadcast(_envelope("CONVERSATION_MESSAGE", payload)) + + async def on_token(self, token: str, done: bool = False) -> None: + """Called for each streamed LLM token (high-volume — only used by agent terminal).""" + payload: dict[str, Any] = {"token": token, "done": done} + await self.server.broadcast(_envelope("CONVERSATION_TOKEN", payload)) + + async def on_view_registry_update(self, views: list[dict[str, Any]]) -> None: + """Called when the set of available views changes (server connect/disconnect).""" + await self.server.broadcast({"type": "VIEW_REGISTRY", "views": views}) + + async def _discover_view(self, meta_ui: dict[str, Any], server_name: str) -> None: + """Register a new view from a _meta.ui block and broadcast VIEW_REGISTRY.""" + view_id: str = meta_ui["view"] + if view_id in self._seen_view_ids: + return + self._seen_view_ids.add(view_id) + entry: dict[str, Any] = { + "id": view_id, + "name": meta_ui.get("name") or view_id.replace(":", " ").title(), + "icon": meta_ui.get("icon") or "◻", + "source": server_name, + "type": meta_ui.get("type") or "tool", + "url": meta_ui.get("url") or f"/views/{view_id}.html", + } + self._view_registry.append(entry) + logger.debug( + "Dashboard: discovered new view %s from server %s", view_id, server_name + ) + await self.on_view_registry_update(self._view_registry) + + async def _on_client_connected(self, ws: Any) -> None: + """Send current VIEW_REGISTRY to a newly connected browser client.""" + if not self._view_registry: + return + import json as _json + + try: + await ws.send( + _json.dumps({"type": "VIEW_REGISTRY", "views": self._view_registry}) + ) + except Exception as exc: + logger.debug("Error sending VIEW_REGISTRY to new client: %s", exc) + + # ------------------------------------------------------------------ # + # Inbound messages (browser → chat engine) # + # ------------------------------------------------------------------ # + + def set_input_queue(self, queue: asyncio.Queue[str]) -> None: + """Register the asyncio.Queue that the chat loop reads from. + + Browser messages (USER_MESSAGE / USER_COMMAND) are put on this queue + so the chat loop picks them up alongside terminal input. + """ + self._input_queue = queue + + def set_tool_call_callback( + self, fn: Callable[[str, dict[str, Any]], Awaitable[Any]] + ) -> None: + """Register a callback to execute a browser-requested tool call. + + The callback receives (tool_name, arguments) and should execute the + tool. It is responsible for broadcasting the result back (e.g. by + calling ``on_tool_result()`` internally) or returning the result. + """ + self._tool_call_callback = fn + + async def _on_browser_message(self, msg: dict[str, Any]) -> None: + msg_type = msg.get("type") + if msg_type in ("USER_MESSAGE", "USER_COMMAND"): + content = msg.get("content") or msg.get("command", "") + if content and self._input_queue is not None: + try: + await self._input_queue.put(content) + except Exception as exc: + logger.warning("Error queuing browser message: %s", exc) + elif content: + logger.debug( + "Dashboard received %s but no input queue registered", msg_type + ) + elif msg_type == "REQUEST_TOOL": + tool_name = msg.get("tool_name") or msg.get("tool") or "" + arguments = msg.get("arguments") or msg.get("args") or {} + call_id = msg.get("call_id") or "" + if tool_name and self._tool_call_callback is not None: + try: + await self._tool_call_callback(tool_name, arguments) + except Exception as exc: + logger.warning( + "Error executing REQUEST_TOOL %s: %s", tool_name, exc + ) + await self.on_tool_result( + tool_name=tool_name, + server_name="", + result=None, + success=False, + error=str(exc), + call_id=call_id, + ) + else: + logger.debug( + "Dashboard REQUEST_TOOL: %s (no callback registered)", + tool_name or "(missing tool_name)", + ) + elif msg_type == "USER_ACTION": + action = msg.get("action") or "" + content = msg.get("content") or "" + # Prefer explicit content; fall back to slash-command form of action name + text = content or (f"/{action}" if action else "") + if text and self._input_queue is not None: + try: + await self._input_queue.put(text) + except Exception as exc: + logger.warning("Error queuing USER_ACTION: %s", exc) + else: + logger.debug("Dashboard USER_ACTION not routed: %s", msg) + else: + logger.debug("Dashboard received unknown message type: %s", msg_type) + + # ------------------------------------------------------------------ # + # Helpers # + # ------------------------------------------------------------------ # + + @staticmethod + def _serialise(value: Any) -> Any: + """Convert a value to a JSON-safe representation.""" + if value is None or isinstance(value, (bool, int, float, str)): + return value + if isinstance(value, (list, tuple)): + return [DashboardBridge._serialise(v) for v in value] + if isinstance(value, dict): + return {k: DashboardBridge._serialise(v) for k, v in value.items()} + # Objects with a to_dict / model_dump / __dict__ + if hasattr(value, "to_dict"): + try: + return DashboardBridge._serialise(value.to_dict()) + except Exception: + pass + if hasattr(value, "model_dump"): + try: + return DashboardBridge._serialise(value.model_dump()) + except Exception: + pass + return str(value) diff --git a/src/mcp_cli/dashboard/config.py b/src/mcp_cli/dashboard/config.py new file mode 100644 index 00000000..74ac9b63 --- /dev/null +++ b/src/mcp_cli/dashboard/config.py @@ -0,0 +1,143 @@ +# mcp_cli/dashboard/config.py +"""Dashboard layout configuration: presets and user-saved layouts.""" + +from __future__ import annotations + +import json +import logging +from pathlib import Path +from typing import Any + +from mcp_cli.config.defaults import DEFAULT_DASHBOARD_LAYOUTS_FILE + +logger = logging.getLogger(__name__) + +# ------------------------------------------------------------------ # +# Built-in layout presets # +# ------------------------------------------------------------------ # + +LAYOUT_PRESETS: dict[str, dict[str, Any]] = { + "Minimal": { + "name": "Minimal", + "layout": { + "rows": [ + { + "height": "100%", + "columns": [ + {"width": "100%", "view": "builtin:agent-terminal"}, + ], + } + ] + }, + }, + "Standard": { + "name": "Standard", + "layout": { + "rows": [ + { + "height": "100%", + "columns": [ + {"width": "70%", "view": "builtin:agent-terminal"}, + {"width": "30%", "view": "builtin:activity-stream"}, + ], + } + ] + }, + }, + "Full": { + "name": "Full", + "layout": { + "rows": [ + { + "height": "60%", + "columns": [ + {"width": "65%", "view": "builtin:agent-terminal"}, + {"width": "35%", "view": "builtin:activity-stream"}, + ], + }, + { + "height": "40%", + "columns": [ + {"width": "100%", "view": "auto"}, + ], + }, + ] + }, + }, +} + +DEFAULT_PRESET_NAME = "Standard" + +# ------------------------------------------------------------------ # +# Built-in view registry (always available) # +# ------------------------------------------------------------------ # + +BUILTIN_VIEWS: list[dict[str, Any]] = [ + { + "id": "builtin:agent-terminal", + "name": "Agent Terminal", + "source": "builtin", + "icon": "terminal", + "type": "conversation", + "url": "/views/agent-terminal.html", + }, + { + "id": "builtin:activity-stream", + "name": "Activity Stream", + "source": "builtin", + "icon": "activity", + "type": "stream", + "url": "/views/activity-stream.html", + }, +] + +# ------------------------------------------------------------------ # +# User layout persistence # +# ------------------------------------------------------------------ # + + +def _layouts_path() -> Path: + return Path(DEFAULT_DASHBOARD_LAYOUTS_FILE).expanduser() + + +def load_user_layouts() -> list[dict[str, Any]]: + """Load saved user layouts from disk. Returns empty list on any error.""" + path = _layouts_path() + if not path.exists(): + return [] + try: + data = json.loads(path.read_text(encoding="utf-8")) + if isinstance(data, list): + return data + logger.warning("Dashboard layouts file has unexpected format: %s", path) + return [] + except Exception as exc: + logger.warning("Could not load dashboard layouts from %s: %s", path, exc) + return [] + + +def save_user_layout(name: str, layout: dict[str, Any]) -> None: + """Save a named layout to disk, replacing any existing layout with the same name.""" + layouts = load_user_layouts() + layouts = [lyt for lyt in layouts if lyt.get("name") != name] + layouts.append({"name": name, "layout": layout}) + _write_layouts(layouts) + + +def delete_user_layout(name: str) -> bool: + """Delete a named user layout. Returns True if it existed.""" + layouts = load_user_layouts() + filtered = [lyt for lyt in layouts if lyt.get("name") != name] + if len(filtered) == len(layouts): + return False + _write_layouts(filtered) + return True + + +def _write_layouts(layouts: list[dict[str, Any]]) -> None: + path = _layouts_path() + try: + path.parent.mkdir(parents=True, exist_ok=True) + path.write_text(json.dumps(layouts, indent=2), encoding="utf-8") + except Exception as exc: + logger.warning("Could not save dashboard layouts to %s: %s", path, exc) diff --git a/src/mcp_cli/dashboard/launcher.py b/src/mcp_cli/dashboard/launcher.py new file mode 100644 index 00000000..3bf49fc6 --- /dev/null +++ b/src/mcp_cli/dashboard/launcher.py @@ -0,0 +1,39 @@ +# mcp_cli/dashboard/launcher.py +"""Dashboard launch logic: port selection, HTTP server start, browser open.""" + +from __future__ import annotations + +import logging +import webbrowser + +from mcp_cli.dashboard.server import DashboardServer + +logger = logging.getLogger(__name__) + + +async def launch_dashboard( + port: int = 0, + no_browser: bool = False, +) -> tuple[DashboardServer, int]: + """Start the dashboard server and (optionally) open the browser. + + Args: + port: Preferred port (0 = auto-select starting at DEFAULT_DASHBOARD_PORT_START). + no_browser: If True, do not open the browser (URL is logged at INFO level). + + Returns: + (server, bound_port) — the started server and its actual port. + """ + server = DashboardServer() + bound_port = await server.start(port) + + url = f"http://localhost:{bound_port}" + logger.info("Dashboard available at %s", url) + + if not no_browser: + try: + webbrowser.open(url) + except Exception as exc: + logger.warning("Could not open browser: %s", exc) + + return server, bound_port diff --git a/src/mcp_cli/dashboard/server.py b/src/mcp_cli/dashboard/server.py new file mode 100644 index 00000000..2be02f84 --- /dev/null +++ b/src/mcp_cli/dashboard/server.py @@ -0,0 +1,234 @@ +# mcp_cli/dashboard/server.py +"""Dashboard HTTP + WebSocket server. + +Serves the shell.html and static assets on a single port using the +``websockets`` library's ``process_request`` hook — the same pattern used +by ``mcp_cli.apps.host``. + +WebSocket endpoint: /ws +Static files: / → shell.html + /views/.html + /themes/themes.json +""" + +from __future__ import annotations + +import asyncio +import http +import json +import logging +import mimetypes +from collections.abc import Callable +from pathlib import Path +from typing import Any + +try: + import websockets + from websockets.asyncio.server import serve as ws_serve, ServerConnection + from websockets.http11 import Request, Response +except ImportError: # pragma: no cover + raise ImportError( + "Dashboard support requires websockets. Install with: pip install mcp-cli[dashboard]" + ) + +logger = logging.getLogger(__name__) + +_STATIC_DIR = Path(__file__).parent / "static" + + +class DashboardServer: + """Local HTTP + WebSocket server for the mcp-cli dashboard shell.""" + + def __init__(self) -> None: + self._clients: set[ServerConnection] = set() + self._server: Any = None + # Called when a browser user sends USER_MESSAGE / USER_ACTION / REQUEST_TOOL + self.on_browser_message: Callable[[dict[str, Any]], Any] | None = None + # Called when a new WebSocket client connects (before message loop starts) + self.on_client_connected: Callable[[Any], Any] | None = None + + # ------------------------------------------------------------------ # + # Public API # + # ------------------------------------------------------------------ # + + async def start(self, port: int = 0) -> int: + """Find an available port and start the server. Returns the bound port.""" + bound_port = await self._find_port(port) + + self._server = await ws_serve( + self._ws_handler, + "localhost", + bound_port, + process_request=self._process_request, + ) + logger.info("Dashboard server started on port %d", bound_port) + return bound_port + + async def stop(self) -> None: + """Shut down the server and close all client connections.""" + if self._server is not None: + self._server.close() + try: + await self._server.wait_closed() + except Exception as exc: + logger.debug("Error during dashboard server shutdown: %s", exc) + self._server = None + self._clients.clear() + + async def broadcast(self, msg: dict[str, Any]) -> None: + """Send a JSON message to all connected WebSocket clients.""" + if not self._clients: + return + payload = json.dumps(msg) + dead: list[ServerConnection] = [] + for client in list(self._clients): + try: + await client.send(payload) + except Exception: + dead.append(client) + for c in dead: + self._clients.discard(c) + + # ------------------------------------------------------------------ # + # WebSocket handler # + # ------------------------------------------------------------------ # + + async def _ws_handler(self, ws: ServerConnection) -> None: + self._clients.add(ws) + logger.debug( + "Dashboard WebSocket client connected (%d total)", len(self._clients) + ) + if self.on_client_connected is not None: + try: + result = self.on_client_connected(ws) + if asyncio.iscoroutine(result): + await result + except Exception as exc: + logger.debug("Error in client connected callback: %s", exc) + try: + async for raw in ws: + if isinstance(raw, str): + await self._handle_browser_message(raw) + except websockets.ConnectionClosed: + pass + finally: + self._clients.discard(ws) + logger.debug( + "Dashboard WebSocket client disconnected (%d remain)", + len(self._clients), + ) + + async def _handle_browser_message(self, raw: str) -> None: + try: + msg = json.loads(raw) + except json.JSONDecodeError: + logger.warning("Dashboard received invalid JSON: %.200s", raw) + return + if self.on_browser_message: + try: + result = self.on_browser_message(msg) + if asyncio.iscoroutine(result): + await result + except Exception as exc: + logger.warning("Error in dashboard browser message handler: %s", exc) + + # ------------------------------------------------------------------ # + # HTTP request handler # + # ------------------------------------------------------------------ # + + def _process_request( + self, connection: ServerConnection, request: Request + ) -> Response | None: + path = request.path.split("?")[0] # strip query string + + # WebSocket upgrade — let the library handle it + if path == "/ws": + return None + + # Serve static files + file_path = self._resolve_static(path) + if file_path is None: + body = b"Not Found" + return Response( + http.HTTPStatus.NOT_FOUND, + "Not Found", + websockets.Headers({"Content-Length": str(len(body))}), + body, + ) + + try: + data = file_path.read_bytes() + except OSError as exc: + logger.warning("Could not read static file %s: %s", file_path, exc) + body = b"Internal Server Error" + return Response( + http.HTTPStatus.INTERNAL_SERVER_ERROR, + "Internal Server Error", + websockets.Headers({"Content-Length": str(len(body))}), + body, + ) + + mime, _ = mimetypes.guess_type(str(file_path)) + content_type = mime or "application/octet-stream" + if content_type.startswith("text/"): + content_type += "; charset=utf-8" + + return Response( + http.HTTPStatus.OK, + "OK", + websockets.Headers( + { + "Content-Type": content_type, + "Content-Length": str(len(data)), + "Cache-Control": "no-cache", + } + ), + data, + ) + + def _resolve_static(self, path: str) -> Path | None: + """Map a URL path to a file in the static directory. Returns None if not found.""" + if path in ("", "/"): + candidate = _STATIC_DIR / "shell.html" + elif path.startswith("/views/"): + name = path[len("/views/") :] + candidate = _STATIC_DIR / "views" / name + elif path.startswith("/themes/"): + name = path[len("/themes/") :] + candidate = _STATIC_DIR / "themes" / name + else: + # Reject unknown paths + return None + + # Safety: ensure the resolved path is inside _STATIC_DIR + try: + candidate.resolve().relative_to(_STATIC_DIR.resolve()) + except ValueError: + return None + + if candidate.exists() and candidate.is_file(): + return candidate + return None + + # ------------------------------------------------------------------ # + # Port selection # + # ------------------------------------------------------------------ # + + @staticmethod + async def _find_port(preferred: int) -> int: + from mcp_cli.config.defaults import DEFAULT_DASHBOARD_PORT_START + + start = preferred if preferred > 0 else DEFAULT_DASHBOARD_PORT_START + for port in range(start, start + 20): + try: + server = await asyncio.start_server( + lambda r, w: None, "localhost", port + ) + server.close() + await server.wait_closed() + return port + except OSError: + continue + raise RuntimeError( + f"Could not find an available port in range {start}–{start + 19}" + ) diff --git a/src/mcp_cli/dashboard/static/shell.html b/src/mcp_cli/dashboard/static/shell.html new file mode 100644 index 00000000..2065cf18 --- /dev/null +++ b/src/mcp_cli/dashboard/static/shell.html @@ -0,0 +1,1267 @@ + + + + + +mcp-cli dashboard + + + + + + + + +
+
+ + mcp-cli dashboard +
+ + + + + + +
+ + +
+ + +
+

Settings

+ + +
+ +
+ + +
+ + +
+ + + + diff --git a/src/mcp_cli/dashboard/static/themes/themes.json b/src/mcp_cli/dashboard/static/themes/themes.json new file mode 100644 index 00000000..e61a906e --- /dev/null +++ b/src/mcp_cli/dashboard/static/themes/themes.json @@ -0,0 +1,154 @@ +{ + "dark": { + "name": "dark", + "bg": "#1e1e2e", + "bg_surface": "#262637", + "bg_hover": "#2e2e42", + "fg": "#e0e0e0", + "fg_muted": "#888899", + "accent": "#7aa2f7", + "success": "#9ece6a", + "warning": "#e0af68", + "error": "#f7768e", + "info": "#7dcfff", + "border": "#3b3b52", + "font_mono": "'JetBrains Mono', 'Fira Code', 'Cascadia Code', monospace", + "font_ui": "'Inter', -apple-system, sans-serif", + "font_size": "13px", + "radius": "6px", + "spacing": "8px" + }, + "light": { + "name": "light", + "bg": "#ffffff", + "bg_surface": "#f5f5f5", + "bg_hover": "#ebebeb", + "fg": "#1a1a1a", + "fg_muted": "#666666", + "accent": "#2563eb", + "success": "#16a34a", + "warning": "#d97706", + "error": "#dc2626", + "info": "#0891b2", + "border": "#d1d5db", + "font_mono": "'JetBrains Mono', 'Fira Code', monospace", + "font_ui": "'Inter', -apple-system, sans-serif", + "font_size": "13px", + "radius": "6px", + "spacing": "8px" + }, + "monokai": { + "name": "monokai", + "bg": "#272822", + "bg_surface": "#2f3027", + "bg_hover": "#383830", + "fg": "#f8f8f2", + "fg_muted": "#75715e", + "accent": "#a6e22e", + "success": "#a6e22e", + "warning": "#e6db74", + "error": "#f92672", + "info": "#66d9e8", + "border": "#49483e", + "font_mono": "'JetBrains Mono', 'Fira Code', monospace", + "font_ui": "'Inter', -apple-system, sans-serif", + "font_size": "13px", + "radius": "6px", + "spacing": "8px" + }, + "dracula": { + "name": "dracula", + "bg": "#282a36", + "bg_surface": "#2e3047", + "bg_hover": "#383a4a", + "fg": "#f8f8f2", + "fg_muted": "#6272a4", + "accent": "#bd93f9", + "success": "#50fa7b", + "warning": "#ffb86c", + "error": "#ff5555", + "info": "#8be9fd", + "border": "#44475a", + "font_mono": "'JetBrains Mono', 'Fira Code', monospace", + "font_ui": "'Inter', -apple-system, sans-serif", + "font_size": "13px", + "radius": "6px", + "spacing": "8px" + }, + "solarized": { + "name": "solarized", + "bg": "#002b36", + "bg_surface": "#073642", + "bg_hover": "#0d4451", + "fg": "#839496", + "fg_muted": "#586e75", + "accent": "#268bd2", + "success": "#859900", + "warning": "#b58900", + "error": "#dc322f", + "info": "#2aa198", + "border": "#073642", + "font_mono": "'JetBrains Mono', 'Fira Code', monospace", + "font_ui": "'Inter', -apple-system, sans-serif", + "font_size": "13px", + "radius": "6px", + "spacing": "8px" + }, + "terminal": { + "name": "terminal", + "bg": "#000000", + "bg_surface": "#0a0a0a", + "bg_hover": "#111111", + "fg": "#00ff00", + "fg_muted": "#007700", + "accent": "#00ff00", + "success": "#00ff00", + "warning": "#ffff00", + "error": "#ff0000", + "info": "#00ffff", + "border": "#003300", + "font_mono": "'Courier New', 'Terminal', monospace", + "font_ui": "'Courier New', monospace", + "font_size": "13px", + "radius": "0px", + "spacing": "8px" + }, + "minimal": { + "name": "minimal", + "bg": "#111111", + "bg_surface": "#1a1a1a", + "bg_hover": "#222222", + "fg": "#cccccc", + "fg_muted": "#555555", + "accent": "#aaaaaa", + "success": "#888888", + "warning": "#999999", + "error": "#bbbbbb", + "info": "#888888", + "border": "#2a2a2a", + "font_mono": "'JetBrains Mono', monospace", + "font_ui": "-apple-system, sans-serif", + "font_size": "13px", + "radius": "2px", + "spacing": "8px" + }, + "default": { + "name": "default", + "bg": "#1e1e2e", + "bg_surface": "#262637", + "bg_hover": "#2e2e42", + "fg": "#e0e0e0", + "fg_muted": "#888899", + "accent": "#7aa2f7", + "success": "#9ece6a", + "warning": "#e0af68", + "error": "#f7768e", + "info": "#7dcfff", + "border": "#3b3b52", + "font_mono": "'JetBrains Mono', 'Fira Code', 'Cascadia Code', monospace", + "font_ui": "'Inter', -apple-system, sans-serif", + "font_size": "13px", + "radius": "6px", + "spacing": "8px" + } +} diff --git a/src/mcp_cli/dashboard/static/views/activity-stream.html b/src/mcp_cli/dashboard/static/views/activity-stream.html new file mode 100644 index 00000000..df108384 --- /dev/null +++ b/src/mcp_cli/dashboard/static/views/activity-stream.html @@ -0,0 +1,383 @@ + + + + + +Activity Stream + + + + +
+ + + +
+ +
+
+ +
+ + + + diff --git a/src/mcp_cli/dashboard/static/views/agent-terminal.html b/src/mcp_cli/dashboard/static/views/agent-terminal.html new file mode 100644 index 00000000..893890ce --- /dev/null +++ b/src/mcp_cli/dashboard/static/views/agent-terminal.html @@ -0,0 +1,556 @@ + + + + + +Agent Terminal + + + + + + + + + + + + + + + + +
+
+ +
+ +
+ idle + +
+ +
+ + +
+ + + + diff --git a/src/mcp_cli/main.py b/src/mcp_cli/main.py index 99c41e3f..e35b0e02 100644 --- a/src/mcp_cli/main.py +++ b/src/mcp_cli/main.py @@ -150,6 +150,21 @@ def main_callback( "--plan-tools", help="Enable plan_create/plan_execute as LLM-callable tools for autonomous multi-step planning", ), + dashboard: bool = typer.Option( + False, + "--dashboard", + help="Open browser dashboard alongside chat (requires: pip install mcp-cli[dashboard])", + ), + no_browser: bool = typer.Option( + False, + "--no-browser", + help="Start dashboard server but do not auto-open the browser (prints URL instead)", + ), + dashboard_port: int = typer.Option( + 0, + "--dashboard-port", + help="Dashboard HTTP port (0 = auto-select starting at 9120)", + ), ) -> None: """MCP CLI - If no subcommand is given, start chat mode.""" @@ -377,6 +392,9 @@ async def _start_chat(): vm_budget=vm_budget, health_interval=health_interval, enable_plan_tools=plan_tools, + dashboard=dashboard, + no_browser=no_browser, + dashboard_port=dashboard_port, ) logger.debug(f"Chat mode completed with success: {success}") except asyncio.TimeoutError: @@ -481,6 +499,21 @@ def _chat_command( "--plan-tools", help="Enable plan_create/plan_execute as LLM-callable tools for autonomous multi-step planning", ), + dashboard: bool = typer.Option( + False, + "--dashboard", + help="Open browser dashboard alongside chat (requires: pip install mcp-cli[dashboard])", + ), + no_browser: bool = typer.Option( + False, + "--no-browser", + help="Start dashboard server but do not auto-open the browser (prints URL instead)", + ), + dashboard_port: int = typer.Option( + 0, + "--dashboard-port", + help="Dashboard HTTP port (0 = auto-select starting at 9120)", + ), ) -> None: """Start chat mode (same as default behavior without subcommand).""" # Re-configure logging based on user options @@ -616,6 +649,9 @@ async def _start_chat(): vm_budget=vm_budget, health_interval=health_interval, enable_plan_tools=plan_tools, + dashboard=dashboard, + no_browser=no_browser, + dashboard_port=dashboard_port, ) logger.debug(f"Chat mode completed with success: {success}") except asyncio.TimeoutError: diff --git a/tests/dashboard/__init__.py b/tests/dashboard/__init__.py new file mode 100644 index 00000000..e69de29b diff --git a/tests/dashboard/test_bridge.py b/tests/dashboard/test_bridge.py new file mode 100644 index 00000000..a6528000 --- /dev/null +++ b/tests/dashboard/test_bridge.py @@ -0,0 +1,399 @@ +# tests/dashboard/test_bridge.py +"""Unit tests for DashboardBridge. + +These tests mock DashboardServer so no real WebSocket is needed. +""" + +from __future__ import annotations + +import asyncio +import json +from unittest.mock import AsyncMock, MagicMock + +import pytest + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _make_bridge(): + """Return a (bridge, mock_server) pair.""" + from mcp_cli.dashboard.bridge import DashboardBridge + from mcp_cli.dashboard.server import DashboardServer + + server = MagicMock(spec=DashboardServer) + server.broadcast = AsyncMock() + server.on_browser_message = None + server.on_client_connected = None + bridge = DashboardBridge(server) + return bridge, server + + +# --------------------------------------------------------------------------- +# _serialise +# --------------------------------------------------------------------------- + + +class TestSerialise: + def test_primitives(self): + from mcp_cli.dashboard.bridge import DashboardBridge + + assert DashboardBridge._serialise(None) is None + assert DashboardBridge._serialise(True) is True + assert DashboardBridge._serialise(42) == 42 + assert DashboardBridge._serialise(3.14) == 3.14 + assert DashboardBridge._serialise("hello") == "hello" + + def test_list(self): + from mcp_cli.dashboard.bridge import DashboardBridge + + assert DashboardBridge._serialise([1, "two", None]) == [1, "two", None] + + def test_dict(self): + from mcp_cli.dashboard.bridge import DashboardBridge + + result = DashboardBridge._serialise({"a": 1, "b": [2, 3]}) + assert result == {"a": 1, "b": [2, 3]} + + def test_object_with_to_dict(self): + from mcp_cli.dashboard.bridge import DashboardBridge + + obj = MagicMock() + obj.to_dict.return_value = {"key": "val"} + result = DashboardBridge._serialise(obj) + assert result == {"key": "val"} + + def test_object_with_model_dump(self): + from mcp_cli.dashboard.bridge import DashboardBridge + + obj = MagicMock(spec=[]) + del obj.to_dict + obj.model_dump = MagicMock(return_value={"x": 1}) + result = DashboardBridge._serialise(obj) + assert result == {"x": 1} + + def test_unknown_object_stringified(self): + from mcp_cli.dashboard.bridge import DashboardBridge + + class Custom: + def __str__(self): + return "custom-repr" + + result = DashboardBridge._serialise(Custom()) + assert result == "custom-repr" + + +# --------------------------------------------------------------------------- +# on_tool_result +# --------------------------------------------------------------------------- + + +class TestOnToolResult: + @pytest.mark.asyncio + async def test_broadcast_called(self): + bridge, server = _make_bridge() + await bridge.on_tool_result( + tool_name="test_tool", + server_name="test_server", + result={"rows": 3}, + success=True, + ) + server.broadcast.assert_awaited_once() + msg = server.broadcast.call_args[0][0] + assert msg["type"] == "TOOL_RESULT" + assert msg["payload"]["tool_name"] == "test_tool" + assert msg["payload"]["success"] is True + + @pytest.mark.asyncio + async def test_error_result(self): + bridge, server = _make_bridge() + await bridge.on_tool_result( + tool_name="bad_tool", + server_name="srv", + result=None, + success=False, + error="timeout", + ) + msg = server.broadcast.call_args[0][0] + assert msg["payload"]["success"] is False + assert msg["payload"]["error"] == "timeout" + + @pytest.mark.asyncio + async def test_meta_ui_included_in_payload(self): + bridge, server = _make_bridge() + meta_ui = {"view": "custom:stats"} + await bridge.on_tool_result( + tool_name="stats", + server_name="srv", + result=None, + success=True, + meta_ui=meta_ui, + ) + msg = server.broadcast.call_args[0][0] + assert "meta_ui" in msg["payload"] + + @pytest.mark.asyncio + async def test_view_discovered_from_meta_ui(self): + bridge, server = _make_bridge() + meta_ui = {"view": "custom:stats", "name": "Stats Dashboard"} + await bridge.on_tool_result( + tool_name="get_stats", + server_name="analytics", + result=None, + success=True, + meta_ui=meta_ui, + ) + # VIEW_REGISTRY broadcast should have happened (broadcast called twice total) + calls = server.broadcast.call_args_list + types = [c[0][0]["type"] for c in calls] + assert "VIEW_REGISTRY" in types + + @pytest.mark.asyncio + async def test_view_discovered_only_once(self): + bridge, server = _make_bridge() + meta_ui = {"view": "custom:stats"} + await bridge.on_tool_result("t", "s", None, True, meta_ui=meta_ui) + await bridge.on_tool_result("t", "s", None, True, meta_ui=meta_ui) + registry_broadcasts = [ + c + for c in server.broadcast.call_args_list + if c[0][0].get("type") == "VIEW_REGISTRY" + ] + assert len(registry_broadcasts) == 1 + + +# --------------------------------------------------------------------------- +# on_agent_state +# --------------------------------------------------------------------------- + + +class TestOnAgentState: + @pytest.mark.asyncio + async def test_thinking_state(self): + bridge, server = _make_bridge() + await bridge.on_agent_state("thinking", None, turn_number=1, tokens_used=500) + msg = server.broadcast.call_args[0][0] + assert msg["type"] == "AGENT_STATE" + assert msg["payload"]["status"] == "thinking" + assert msg["payload"]["turn_number"] == 1 + assert msg["payload"]["tokens_used"] == 500 + + @pytest.mark.asyncio + async def test_tool_calling_state(self): + bridge, server = _make_bridge() + await bridge.on_agent_state("tool_calling", "query_db") + msg = server.broadcast.call_args[0][0] + assert msg["payload"]["current_tool"] == "query_db" + + +# --------------------------------------------------------------------------- +# on_message / on_token +# --------------------------------------------------------------------------- + + +class TestOnMessage: + @pytest.mark.asyncio + async def test_user_message(self): + bridge, server = _make_bridge() + await bridge.on_message("user", "hello world") + msg = server.broadcast.call_args[0][0] + assert msg["type"] == "CONVERSATION_MESSAGE" + assert msg["payload"]["role"] == "user" + assert msg["payload"]["content"] == "hello world" + + @pytest.mark.asyncio + async def test_assistant_message_streaming(self): + bridge, server = _make_bridge() + await bridge.on_message("assistant", "some text", streaming=True) + msg = server.broadcast.call_args[0][0] + assert msg["payload"]["streaming"] is True + + @pytest.mark.asyncio + async def test_token(self): + bridge, server = _make_bridge() + await bridge.on_token("Hello") + msg = server.broadcast.call_args[0][0] + assert msg["type"] == "CONVERSATION_TOKEN" + assert msg["payload"]["token"] == "Hello" + assert msg["payload"]["done"] is False + + @pytest.mark.asyncio + async def test_token_done(self): + bridge, server = _make_bridge() + await bridge.on_token("", done=True) + msg = server.broadcast.call_args[0][0] + assert msg["payload"]["done"] is True + + +# --------------------------------------------------------------------------- +# Input queue / browser messages +# --------------------------------------------------------------------------- + + +class TestInputQueue: + @pytest.mark.asyncio + async def test_user_message_goes_to_queue(self): + bridge, server = _make_bridge() + q: asyncio.Queue[str] = asyncio.Queue() + bridge.set_input_queue(q) + await bridge._on_browser_message({"type": "USER_MESSAGE", "content": "ping"}) + assert q.get_nowait() == "ping" + + @pytest.mark.asyncio + async def test_user_command_goes_to_queue(self): + bridge, server = _make_bridge() + q: asyncio.Queue[str] = asyncio.Queue() + bridge.set_input_queue(q) + await bridge._on_browser_message({"type": "USER_COMMAND", "command": "/tools"}) + assert q.get_nowait() == "/tools" + + @pytest.mark.asyncio + async def test_no_queue_registered_logs_debug(self): + bridge, server = _make_bridge() + # Should not raise even without queue + await bridge._on_browser_message({"type": "USER_MESSAGE", "content": "hi"}) + + +# --------------------------------------------------------------------------- +# _on_client_connected +# --------------------------------------------------------------------------- + + +class TestOnClientConnected: + @pytest.mark.asyncio + async def test_sends_registry_when_views_exist(self): + bridge, server = _make_bridge() + bridge._view_registry = [{"id": "custom:stats", "name": "Stats"}] + ws = AsyncMock() + await bridge._on_client_connected(ws) + ws.send.assert_awaited_once() + sent = json.loads(ws.send.call_args[0][0]) + assert sent["type"] == "VIEW_REGISTRY" + assert len(sent["views"]) == 1 + + @pytest.mark.asyncio + async def test_no_send_when_registry_empty(self): + bridge, server = _make_bridge() + ws = AsyncMock() + await bridge._on_client_connected(ws) + ws.send.assert_not_awaited() + + +# --------------------------------------------------------------------------- +# REQUEST_TOOL +# --------------------------------------------------------------------------- + + +class TestRequestTool: + @pytest.mark.asyncio + async def test_callback_invoked(self): + bridge, server = _make_bridge() + calls: list = [] + + async def cb(tool_name, arguments): + calls.append((tool_name, arguments)) + + bridge.set_tool_call_callback(cb) + await bridge._on_browser_message( + { + "type": "REQUEST_TOOL", + "tool_name": "query_db", + "arguments": {"sql": "SELECT 1"}, + } + ) + assert calls == [("query_db", {"sql": "SELECT 1"})] + + @pytest.mark.asyncio + async def test_no_callback_logs_debug(self): + bridge, server = _make_bridge() + # No callback registered — should not raise + await bridge._on_browser_message( + {"type": "REQUEST_TOOL", "tool_name": "query_db", "arguments": {}} + ) + server.broadcast.assert_not_awaited() + + @pytest.mark.asyncio + async def test_missing_tool_name_logs_debug(self): + bridge, server = _make_bridge() + calls: list = [] + + async def cb(tool_name, arguments): # pragma: no cover + calls.append(tool_name) + + bridge.set_tool_call_callback(cb) + await bridge._on_browser_message({"type": "REQUEST_TOOL"}) + assert calls == [] + + @pytest.mark.asyncio + async def test_callback_error_broadcasts_failure(self): + bridge, server = _make_bridge() + + async def bad_cb(tool_name, arguments): + raise RuntimeError("tool exploded") + + bridge.set_tool_call_callback(bad_cb) + await bridge._on_browser_message( + {"type": "REQUEST_TOOL", "tool_name": "bad_tool", "arguments": {}} + ) + # Should have broadcast a TOOL_RESULT with success=False + calls = [c[0][0] for c in server.broadcast.call_args_list] + assert any( + c.get("type") == "TOOL_RESULT" and not c["payload"]["success"] + for c in calls + ) + + @pytest.mark.asyncio + async def test_alt_field_names_accepted(self): + """Browser may send 'tool' instead of 'tool_name', 'args' instead of 'arguments'.""" + bridge, server = _make_bridge() + calls: list = [] + + async def cb(tool_name, arguments): + calls.append((tool_name, arguments)) + + bridge.set_tool_call_callback(cb) + await bridge._on_browser_message( + {"type": "REQUEST_TOOL", "tool": "my_tool", "args": {"x": 1}} + ) + assert calls == [("my_tool", {"x": 1})] + + +# --------------------------------------------------------------------------- +# USER_ACTION +# --------------------------------------------------------------------------- + + +class TestUserAction: + @pytest.mark.asyncio + async def test_action_name_queued_as_slash_command(self): + bridge, server = _make_bridge() + q: asyncio.Queue = asyncio.Queue() + bridge.set_input_queue(q) + await bridge._on_browser_message({"type": "USER_ACTION", "action": "clear"}) + assert q.get_nowait() == "/clear" + + @pytest.mark.asyncio + async def test_content_preferred_over_action(self): + bridge, server = _make_bridge() + q: asyncio.Queue = asyncio.Queue() + bridge.set_input_queue(q) + await bridge._on_browser_message( + {"type": "USER_ACTION", "action": "something", "content": "hello"} + ) + assert q.get_nowait() == "hello" + + @pytest.mark.asyncio + async def test_no_queue_does_not_raise(self): + bridge, server = _make_bridge() + # No queue registered — should not raise + await bridge._on_browser_message({"type": "USER_ACTION", "action": "clear"}) + + @pytest.mark.asyncio + async def test_empty_action_and_content_not_queued(self): + bridge, server = _make_bridge() + q: asyncio.Queue = asyncio.Queue() + bridge.set_input_queue(q) + await bridge._on_browser_message({"type": "USER_ACTION"}) + assert q.empty() diff --git a/tests/dashboard/test_config.py b/tests/dashboard/test_config.py new file mode 100644 index 00000000..0d6dcdb4 --- /dev/null +++ b/tests/dashboard/test_config.py @@ -0,0 +1,186 @@ +# tests/dashboard/test_config.py +"""Unit tests for mcp_cli.dashboard.config.""" + +from __future__ import annotations + +import json +from pathlib import Path +from unittest.mock import patch + + +# --------------------------------------------------------------------------- +# Constants / presets +# --------------------------------------------------------------------------- + + +class TestConstants: + def test_layout_presets_contains_all_three(self): + from mcp_cli.dashboard.config import LAYOUT_PRESETS + + assert set(LAYOUT_PRESETS) == {"Minimal", "Standard", "Full"} + + def test_each_preset_has_layout_key(self): + from mcp_cli.dashboard.config import LAYOUT_PRESETS + + for name, preset in LAYOUT_PRESETS.items(): + assert "layout" in preset, f"Preset {name!r} missing 'layout'" + assert "rows" in preset["layout"], f"Preset {name!r} layout missing 'rows'" + + def test_builtin_views_ids(self): + from mcp_cli.dashboard.config import BUILTIN_VIEWS + + ids = {v["id"] for v in BUILTIN_VIEWS} + assert "builtin:agent-terminal" in ids + assert "builtin:activity-stream" in ids + + def test_default_preset_name_is_standard(self): + from mcp_cli.dashboard.config import DEFAULT_PRESET_NAME + + assert DEFAULT_PRESET_NAME == "Standard" + + +# --------------------------------------------------------------------------- +# load_user_layouts +# --------------------------------------------------------------------------- + + +class TestLoadUserLayouts: + def test_returns_empty_list_when_file_missing(self, tmp_path): + from mcp_cli.dashboard.config import load_user_layouts + + with patch( + "mcp_cli.dashboard.config._layouts_path", + return_value=tmp_path / "nope.json", + ): + result = load_user_layouts() + assert result == [] + + def test_returns_list_from_valid_file(self, tmp_path): + from mcp_cli.dashboard.config import load_user_layouts + + f = tmp_path / "layouts.json" + f.write_text(json.dumps([{"name": "A", "layout": {}}])) + with patch("mcp_cli.dashboard.config._layouts_path", return_value=f): + result = load_user_layouts() + assert result == [{"name": "A", "layout": {}}] + + def test_returns_empty_when_file_is_not_a_list(self, tmp_path): + from mcp_cli.dashboard.config import load_user_layouts + + f = tmp_path / "layouts.json" + f.write_text(json.dumps({"key": "value"})) + with patch("mcp_cli.dashboard.config._layouts_path", return_value=f): + result = load_user_layouts() + assert result == [] + + def test_returns_empty_on_invalid_json(self, tmp_path): + from mcp_cli.dashboard.config import load_user_layouts + + f = tmp_path / "layouts.json" + f.write_text("not json {{{{") + with patch("mcp_cli.dashboard.config._layouts_path", return_value=f): + result = load_user_layouts() + assert result == [] + + def test_returns_empty_on_read_error(self, tmp_path): + from mcp_cli.dashboard.config import load_user_layouts + + f = tmp_path / "layouts.json" + f.write_text("[]") # file exists so path.exists() is True + with patch("mcp_cli.dashboard.config._layouts_path", return_value=f): + with patch.object(Path, "read_text", side_effect=OSError("perm")): + result = load_user_layouts() + assert result == [] + + +# --------------------------------------------------------------------------- +# save_user_layout +# --------------------------------------------------------------------------- + + +class TestSaveUserLayout: + def test_saves_new_layout(self, tmp_path): + from mcp_cli.dashboard.config import load_user_layouts, save_user_layout + + f = tmp_path / "layouts.json" + with patch("mcp_cli.dashboard.config._layouts_path", return_value=f): + save_user_layout("MyLayout", {"rows": []}) + result = load_user_layouts() + + assert len(result) == 1 + assert result[0]["name"] == "MyLayout" + + def test_replaces_existing_layout_by_name(self, tmp_path): + from mcp_cli.dashboard.config import load_user_layouts, save_user_layout + + f = tmp_path / "layouts.json" + with patch("mcp_cli.dashboard.config._layouts_path", return_value=f): + save_user_layout("MyLayout", {"rows": [{"id": "old"}]}) + save_user_layout("MyLayout", {"rows": [{"id": "new"}]}) + result = load_user_layouts() + + assert len(result) == 1 + assert result[0]["layout"]["rows"][0]["id"] == "new" + + def test_multiple_layouts_coexist(self, tmp_path): + from mcp_cli.dashboard.config import load_user_layouts, save_user_layout + + f = tmp_path / "layouts.json" + with patch("mcp_cli.dashboard.config._layouts_path", return_value=f): + save_user_layout("Alpha", {}) + save_user_layout("Beta", {}) + result = load_user_layouts() + + names = {r["name"] for r in result} + assert names == {"Alpha", "Beta"} + + def test_write_error_does_not_raise(self, tmp_path): + from mcp_cli.dashboard.config import save_user_layout + + f = tmp_path / "layouts.json" + with patch("mcp_cli.dashboard.config._layouts_path", return_value=f): + with patch.object(Path, "write_text", side_effect=OSError("disk full")): + save_user_layout("MyLayout", {}) # should not raise + + +# --------------------------------------------------------------------------- +# delete_user_layout +# --------------------------------------------------------------------------- + + +class TestDeleteUserLayout: + def test_delete_existing_returns_true(self, tmp_path): + from mcp_cli.dashboard.config import delete_user_layout, save_user_layout + + f = tmp_path / "layouts.json" + with patch("mcp_cli.dashboard.config._layouts_path", return_value=f): + save_user_layout("ToDelete", {}) + result = delete_user_layout("ToDelete") + + assert result is True + + def test_delete_actually_removes_layout(self, tmp_path): + from mcp_cli.dashboard.config import ( + delete_user_layout, + load_user_layouts, + save_user_layout, + ) + + f = tmp_path / "layouts.json" + with patch("mcp_cli.dashboard.config._layouts_path", return_value=f): + save_user_layout("ToDelete", {}) + save_user_layout("Keep", {}) + delete_user_layout("ToDelete") + result = load_user_layouts() + + assert len(result) == 1 + assert result[0]["name"] == "Keep" + + def test_delete_nonexistent_returns_false(self, tmp_path): + from mcp_cli.dashboard.config import delete_user_layout + + f = tmp_path / "nope.json" + with patch("mcp_cli.dashboard.config._layouts_path", return_value=f): + result = delete_user_layout("Ghost") + + assert result is False diff --git a/tests/dashboard/test_integration.py b/tests/dashboard/test_integration.py new file mode 100644 index 00000000..164f3e59 --- /dev/null +++ b/tests/dashboard/test_integration.py @@ -0,0 +1,312 @@ +# tests/dashboard/test_integration.py +"""Integration tests for DashboardServer using a real WebSocket connection. + +These tests start an actual local server and connect to it with a real +WebSocket client. No mocks for the transport layer — this validates the +full server↔browser message path end-to-end. + +Skipped automatically if the ``websockets`` package is not installed. +""" + +from __future__ import annotations + +import asyncio +import json + +import pytest + +# Skip the whole module when websockets is absent (e.g. minimal install) +websockets = pytest.importorskip("websockets") + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _ws_connect(port: int): + """Return an async context manager that connects to the WS endpoint.""" + from websockets.asyncio.client import connect + + return connect(f"ws://localhost:{port}/ws") + + +async def _recv(ws, timeout: float = 2.0): + """Receive one message with a timeout.""" + return await asyncio.wait_for(ws.recv(), timeout=timeout) + + +# --------------------------------------------------------------------------- +# Fixture +# --------------------------------------------------------------------------- + + +@pytest.fixture +async def live_server(): + """Start a real DashboardServer on a random port. Stops on teardown.""" + from mcp_cli.dashboard.server import DashboardServer + + srv = DashboardServer() + port = await srv.start(port=0) + yield srv, port + await srv.stop() + + +# --------------------------------------------------------------------------- +# Connection + broadcast +# --------------------------------------------------------------------------- + + +class TestServerConnect: + async def test_ws_connects(self, live_server): + _, port = live_server + async with _ws_connect(port): + pass # no exception = success + + async def test_broadcast_received_by_client(self, live_server): + srv, port = live_server + async with _ws_connect(port) as ws: + await asyncio.sleep(0.02) # let server register client + await srv.broadcast({"type": "PING", "val": 42}) + raw = await _recv(ws) + assert json.loads(raw) == {"type": "PING", "val": 42} + + async def test_broadcast_reaches_two_clients(self, live_server): + srv, port = live_server + async with _ws_connect(port) as ws1: + async with _ws_connect(port) as ws2: + await asyncio.sleep(0.02) + await srv.broadcast({"type": "MULTI"}) + r1 = await _recv(ws1) + r2 = await _recv(ws2) + assert json.loads(r1)["type"] == "MULTI" + assert json.loads(r2)["type"] == "MULTI" + + async def test_broadcast_after_client_disconnects_does_not_raise(self, live_server): + srv, port = live_server + async with _ws_connect(port): + pass # connect then immediately disconnect + await asyncio.sleep(0.05) + # Server should have cleaned up; broadcasting to no clients is fine + await srv.broadcast({"type": "AFTER_DISCONNECT"}) + + async def test_stop_closes_server(self, live_server): + srv, port = live_server + await srv.stop() + # Connecting after stop should fail + with pytest.raises(Exception): + async with _ws_connect(port): + pass + + +# --------------------------------------------------------------------------- +# Browser → server message routing +# --------------------------------------------------------------------------- + + +class TestBrowserMessages: + async def test_browser_message_triggers_callback(self, live_server): + srv, port = live_server + received: list[dict] = [] + + async def handler(msg): + received.append(msg) + + srv.on_browser_message = handler + + async with _ws_connect(port) as ws: + await ws.send(json.dumps({"type": "USER_MESSAGE", "content": "hi"})) + await asyncio.sleep(0.1) + + assert received == [{"type": "USER_MESSAGE", "content": "hi"}] + + async def test_invalid_json_ignored_server_stays_alive(self, live_server): + srv, port = live_server + + async with _ws_connect(port) as ws: + await ws.send("not valid json {{{{") + await asyncio.sleep(0.05) + + # Server still running — new connection succeeds + async with _ws_connect(port): + pass + + +# --------------------------------------------------------------------------- +# on_client_connected callback +# --------------------------------------------------------------------------- + + +class TestOnClientConnectedCallback: + async def test_callback_fired_on_connect(self, live_server): + srv, port = live_server + fired: list[int] = [] + + async def cb(ws): + fired.append(1) + + srv.on_client_connected = cb + + async with _ws_connect(port): + await asyncio.sleep(0.05) + + assert fired == [1] + + async def test_bridge_sends_view_registry_to_new_client(self, live_server): + """DashboardBridge wires on_client_connected; new clients get VIEW_REGISTRY.""" + from mcp_cli.dashboard.bridge import DashboardBridge + + srv, port = live_server + bridge = DashboardBridge(srv) + bridge._view_registry = [{"id": "stats:main", "name": "Stats"}] + + async with _ws_connect(port) as ws: + raw = await _recv(ws) + + msg = json.loads(raw) + assert msg["type"] == "VIEW_REGISTRY" + assert msg["views"][0]["id"] == "stats:main" + + +# --------------------------------------------------------------------------- +# DashboardBridge end-to-end message flow +# --------------------------------------------------------------------------- + + +class TestBridgeEndToEnd: + async def test_tool_result_reaches_browser(self, live_server): + from mcp_cli.dashboard.bridge import DashboardBridge + + srv, port = live_server + bridge = DashboardBridge(srv) + + async with _ws_connect(port) as ws: + await asyncio.sleep(0.02) + await bridge.on_tool_result( + tool_name="get_data", + server_name="sqlite", + result={"rows": 3}, + success=True, + ) + raw = await _recv(ws) + + msg = json.loads(raw) + assert msg["type"] == "TOOL_RESULT" + assert msg["payload"]["tool_name"] == "get_data" + assert msg["payload"]["success"] is True + + async def test_agent_state_reaches_browser(self, live_server): + from mcp_cli.dashboard.bridge import DashboardBridge + + srv, port = live_server + bridge = DashboardBridge(srv) + + async with _ws_connect(port) as ws: + await asyncio.sleep(0.02) + await bridge.on_agent_state("thinking", turn_number=3, tokens_used=1024) + raw = await _recv(ws) + + msg = json.loads(raw) + assert msg["type"] == "AGENT_STATE" + assert msg["payload"]["status"] == "thinking" + assert msg["payload"]["turn_number"] == 3 + + async def test_conversation_token_reaches_browser(self, live_server): + from mcp_cli.dashboard.bridge import DashboardBridge + + srv, port = live_server + bridge = DashboardBridge(srv) + + async with _ws_connect(port) as ws: + await asyncio.sleep(0.02) + await bridge.on_token("Hello", done=False) + raw = await _recv(ws) + + msg = json.loads(raw) + assert msg["type"] == "CONVERSATION_TOKEN" + assert msg["payload"]["token"] == "Hello" + assert msg["payload"]["done"] is False + + async def test_request_tool_invokes_callback(self, live_server): + from mcp_cli.dashboard.bridge import DashboardBridge + + srv, port = live_server + bridge = DashboardBridge(srv) + calls: list = [] + + async def cb(name, args): + calls.append((name, args)) + + bridge.set_tool_call_callback(cb) + + async with _ws_connect(port) as ws: + await ws.send( + json.dumps( + { + "type": "REQUEST_TOOL", + "tool_name": "query_db", + "arguments": {"sql": "SELECT 1"}, + } + ) + ) + await asyncio.sleep(0.1) + + assert calls == [("query_db", {"sql": "SELECT 1"})] + + async def test_user_action_queued_as_slash_command(self, live_server): + from mcp_cli.dashboard.bridge import DashboardBridge + + srv, port = live_server + bridge = DashboardBridge(srv) + q: asyncio.Queue = asyncio.Queue() + bridge.set_input_queue(q) + + async with _ws_connect(port) as ws: + await ws.send(json.dumps({"type": "USER_ACTION", "action": "clear"})) + await asyncio.sleep(0.1) + + assert q.get_nowait() == "/clear" + + async def test_user_action_content_queued_directly(self, live_server): + from mcp_cli.dashboard.bridge import DashboardBridge + + srv, port = live_server + bridge = DashboardBridge(srv) + q: asyncio.Queue = asyncio.Queue() + bridge.set_input_queue(q) + + async with _ws_connect(port) as ws: + await ws.send( + json.dumps({"type": "USER_ACTION", "content": "custom message"}) + ) + await asyncio.sleep(0.1) + + assert q.get_nowait() == "custom message" + + async def test_view_discovered_from_tool_result_meta_ui(self, live_server): + from mcp_cli.dashboard.bridge import DashboardBridge + + srv, port = live_server + bridge = DashboardBridge(srv) + + async with _ws_connect(port) as ws: + await asyncio.sleep(0.02) + await bridge.on_tool_result( + tool_name="get_stats", + server_name="analytics", + result=None, + success=True, + meta_ui={"view": "stats:main", "name": "Stats Dashboard"}, + ) + # Should receive two messages: TOOL_RESULT and VIEW_REGISTRY + msgs = [] + for _ in range(2): + raw = await _recv(ws) + msgs.append(json.loads(raw)) + + types = {m["type"] for m in msgs} + assert "TOOL_RESULT" in types + assert "VIEW_REGISTRY" in types + + registry_msg = next(m for m in msgs if m["type"] == "VIEW_REGISTRY") + assert any(v["id"] == "stats:main" for v in registry_msg["views"]) diff --git a/tests/dashboard/test_launcher.py b/tests/dashboard/test_launcher.py new file mode 100644 index 00000000..1d7680e3 --- /dev/null +++ b/tests/dashboard/test_launcher.py @@ -0,0 +1,81 @@ +# tests/dashboard/test_launcher.py +"""Unit tests for mcp_cli.dashboard.launcher.""" + +from __future__ import annotations + +from unittest.mock import AsyncMock, patch + +import pytest + + +def _mock_server(port: int = 9120): + """Return a mock DashboardServer whose start() resolves to the given port.""" + srv = AsyncMock() + srv.start = AsyncMock(return_value=port) + return srv + + +class TestLaunchDashboard: + @pytest.mark.asyncio + async def test_returns_server_and_port(self): + from mcp_cli.dashboard import launcher + + srv = _mock_server(9120) + with patch.object(launcher, "DashboardServer", return_value=srv): + server, port = await launcher.launch_dashboard() + + assert server is srv + assert port == 9120 + + @pytest.mark.asyncio + async def test_opens_browser_when_no_browser_false(self): + from mcp_cli.dashboard import launcher + + with patch.object(launcher, "DashboardServer", return_value=_mock_server(9120)): + with patch("webbrowser.open") as mock_open: + await launcher.launch_dashboard(no_browser=False) + + mock_open.assert_called_once_with("http://localhost:9120") + + @pytest.mark.asyncio + async def test_skips_browser_when_no_browser_true(self): + from mcp_cli.dashboard import launcher + + with patch.object(launcher, "DashboardServer", return_value=_mock_server(9120)): + with patch("webbrowser.open") as mock_open: + await launcher.launch_dashboard(no_browser=True) + + mock_open.assert_not_called() + + @pytest.mark.asyncio + async def test_webbrowser_exception_suppressed(self): + from mcp_cli.dashboard import launcher + + with patch.object(launcher, "DashboardServer", return_value=_mock_server(9120)): + with patch("webbrowser.open", side_effect=Exception("no display")): + server, port = await launcher.launch_dashboard(no_browser=False) + + assert port == 9120 # function completed successfully + + @pytest.mark.asyncio + async def test_preferred_port_passed_to_server(self): + from mcp_cli.dashboard import launcher + + srv = _mock_server(8080) + with patch.object(launcher, "DashboardServer", return_value=srv): + with patch("webbrowser.open"): + _, port = await launcher.launch_dashboard(port=8080) + + srv.start.assert_called_once_with(8080) + assert port == 8080 + + @pytest.mark.asyncio + async def test_default_port_zero_passed_to_server(self): + from mcp_cli.dashboard import launcher + + srv = _mock_server(9120) + with patch.object(launcher, "DashboardServer", return_value=srv): + with patch("webbrowser.open"): + await launcher.launch_dashboard() + + srv.start.assert_called_once_with(0) diff --git a/tests/dashboard/test_server.py b/tests/dashboard/test_server.py new file mode 100644 index 00000000..d5042e23 --- /dev/null +++ b/tests/dashboard/test_server.py @@ -0,0 +1,189 @@ +# tests/dashboard/test_server.py +"""Unit tests for DashboardServer. + +Avoids actually starting WebSocket servers; focuses on pure logic units. +""" + +from __future__ import annotations + +import asyncio +import json +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + + +# --------------------------------------------------------------------------- +# _resolve_static +# --------------------------------------------------------------------------- + + +class TestResolveStatic: + def _server(self): + from mcp_cli.dashboard.server import DashboardServer + + return DashboardServer() + + def test_root_resolves_to_shell_html(self): + s = self._server() + result = s._resolve_static("/") + assert result is not None + assert result.name == "shell.html" + + def test_empty_path_resolves_to_shell_html(self): + s = self._server() + result = s._resolve_static("") + assert result is not None + assert result.name == "shell.html" + + def test_view_path_resolves(self): + s = self._server() + result = s._resolve_static("/views/agent-terminal.html") + assert result is not None + assert result.name == "agent-terminal.html" + + def test_themes_path_resolves(self): + s = self._server() + result = s._resolve_static("/themes/themes.json") + assert result is not None + assert result.name == "themes.json" + + def test_unknown_path_returns_none(self): + s = self._server() + assert s._resolve_static("/unknown/path.js") is None + + def test_path_traversal_rejected(self): + s = self._server() + # Attempt directory traversal + result = s._resolve_static("/views/../../server.py") + assert result is None + + +# --------------------------------------------------------------------------- +# broadcast +# --------------------------------------------------------------------------- + + +class TestBroadcast: + @pytest.mark.asyncio + async def test_no_clients_no_error(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + # Should not raise + await s.broadcast({"type": "TEST"}) + + @pytest.mark.asyncio + async def test_sends_to_all_clients(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + c1, c2 = AsyncMock(), AsyncMock() + s._clients = {c1, c2} + await s.broadcast({"type": "PING"}) + c1.send.assert_awaited_once() + c2.send.assert_awaited_once() + payload = json.loads(c1.send.call_args[0][0]) + assert payload["type"] == "PING" + + @pytest.mark.asyncio + async def test_dead_client_removed(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + good = AsyncMock() + dead = AsyncMock() + dead.send.side_effect = Exception("connection closed") + s._clients = {good, dead} + await s.broadcast({"type": "TEST"}) + assert dead not in s._clients + assert good in s._clients + + +# --------------------------------------------------------------------------- +# _handle_browser_message +# --------------------------------------------------------------------------- + + +class TestHandleBrowserMessage: + @pytest.mark.asyncio + async def test_calls_callback(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + received = [] + + async def handler(msg): + received.append(msg) + + s.on_browser_message = handler + await s._handle_browser_message('{"type":"USER_MESSAGE","content":"hi"}') + assert received == [{"type": "USER_MESSAGE", "content": "hi"}] + + @pytest.mark.asyncio + async def test_invalid_json_ignored(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + # Should not raise + await s._handle_browser_message("not json {{{") + + @pytest.mark.asyncio + async def test_on_client_connected_called(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + connected = [] + + async def on_connect(ws): + connected.append(ws) + + s.on_client_connected = on_connect + + # Simulate _ws_handler by calling the on_client_connected part directly + fake_ws = AsyncMock() + fake_ws.__aiter__ = MagicMock(return_value=iter([])) + result = on_connect(fake_ws) + if asyncio.iscoroutine(result): + await result + assert fake_ws in connected + + +# --------------------------------------------------------------------------- +# _find_port +# --------------------------------------------------------------------------- + + +class TestFindPort: + @pytest.mark.asyncio + async def test_uses_preferred_port_when_available(self): + from mcp_cli.dashboard.server import DashboardServer + + mock_server = MagicMock() + mock_server.close = MagicMock() + mock_server.wait_closed = AsyncMock() + + with patch("asyncio.start_server", new_callable=AsyncMock) as mock_start: + mock_start.return_value = mock_server + port = await DashboardServer._find_port(19999) + assert port == 19999 + + @pytest.mark.asyncio + async def test_increments_when_port_in_use(self): + from mcp_cli.dashboard.server import DashboardServer + + call_count = 0 + + async def mock_start_server(handler, host, port, **kwargs): + nonlocal call_count + call_count += 1 + if call_count < 3: + raise OSError("Address in use") + srv = MagicMock() + srv.close = MagicMock() + srv.wait_closed = AsyncMock() + return srv + + with patch("asyncio.start_server", side_effect=mock_start_server): + port = await DashboardServer._find_port(19990) + assert port == 19992 # skipped 19990 and 19991 diff --git a/tests/dashboard/test_server_extra.py b/tests/dashboard/test_server_extra.py new file mode 100644 index 00000000..e3df6203 --- /dev/null +++ b/tests/dashboard/test_server_extra.py @@ -0,0 +1,310 @@ +# tests/dashboard/test_server_extra.py +"""Additional unit tests for DashboardServer to push coverage above 90%. + +Covers: _process_request (HTTP responses), _ws_handler edge cases, +_handle_browser_message sync/error paths, stop() error handler, +_resolve_static missing-file path, _find_port exhaustion. +""" + +from __future__ import annotations + +import http +from pathlib import Path +from unittest.mock import AsyncMock, MagicMock, patch + +import pytest + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- + + +def _req(path: str): + r = MagicMock() + r.path = path + return r + + +class _FakeWS: + """Minimal async-iterable fake WebSocket for _ws_handler unit tests.""" + + def __init__(self, messages=()): + self._messages = list(messages) + + def __aiter__(self): + return self._gen() + + async def _gen(self): + for m in self._messages: + yield m + + +class _ConnectionClosedWS: + """Fake WebSocket whose iterator raises ConnectionClosed immediately.""" + + def __aiter__(self): + return self._gen() + + async def _gen(self): + from websockets.exceptions import ConnectionClosedOK + from websockets.frames import Close + + raise ConnectionClosedOK(rcvd=Close(1000, ""), sent=None) + yield # noqa: F704 — dead yield makes this an async generator + + +# --------------------------------------------------------------------------- +# _process_request — HTTP paths +# --------------------------------------------------------------------------- + + +class TestProcessRequest: + def test_ws_path_returns_none(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + assert s._process_request(MagicMock(), _req("/ws")) is None + + def test_ws_path_with_query_returns_none(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + # Query string should be stripped; /ws?foo=bar still routes to WS + assert s._process_request(MagicMock(), _req("/ws?foo=bar")) is None + + def test_unknown_path_returns_404(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + resp = s._process_request(MagicMock(), _req("/secret.php")) + assert resp.status_code == http.HTTPStatus.NOT_FOUND + + def test_root_returns_200(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + resp = s._process_request(MagicMock(), _req("/")) + assert resp.status_code == http.HTTPStatus.OK + + def test_root_with_query_string_returns_200(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + resp = s._process_request(MagicMock(), _req("/?v=1")) + assert resp.status_code == http.HTTPStatus.OK + + def test_view_returns_200(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + resp = s._process_request(MagicMock(), _req("/views/agent-terminal.html")) + assert resp.status_code == http.HTTPStatus.OK + + def test_oserror_reading_file_returns_500(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + mock_path = MagicMock(spec=Path) + mock_path.read_bytes.side_effect = OSError("permission denied") + with patch.object(s, "_resolve_static", return_value=mock_path): + resp = s._process_request(MagicMock(), _req("/")) + assert resp.status_code == http.HTTPStatus.INTERNAL_SERVER_ERROR + + def test_content_type_header_set(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + resp = s._process_request(MagicMock(), _req("/")) + assert resp.status_code == http.HTTPStatus.OK + # shell.html is HTML so content-type should contain text/html + ct = resp.headers.get("Content-Type", "") + assert "text/html" in ct + + def test_binary_content_type(self): + """A file with unknown MIME type gets application/octet-stream.""" + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + mock_path = MagicMock(spec=Path) + mock_path.read_bytes.return_value = b"\x00\x01\x02" + mock_path.__str__ = MagicMock(return_value="file.unknown") + with patch.object(s, "_resolve_static", return_value=mock_path): + resp = s._process_request(MagicMock(), _req("/")) + assert resp.status_code == http.HTTPStatus.OK + assert "application/octet-stream" in resp.headers.get("Content-Type", "") + + +# --------------------------------------------------------------------------- +# _resolve_static — missing file paths +# --------------------------------------------------------------------------- + + +class TestResolveStaticMissing: + def test_nonexistent_view_file_returns_none(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + assert s._resolve_static("/views/totally-nonexistent-xyz.html") is None + + def test_nonexistent_theme_file_returns_none(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + assert s._resolve_static("/themes/nonexistent-xyz.json") is None + + +# --------------------------------------------------------------------------- +# _ws_handler edge cases +# --------------------------------------------------------------------------- + + +class TestWsHandlerEdgeCases: + @pytest.mark.asyncio + async def test_sync_callback_returns_none_no_await(self): + """Sync on_client_connected callback (non-coroutine) takes the False branch at 104.""" + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + called = [] + + def sync_cb(ws): + called.append(ws) + # Returns None (not a coroutine) — exercises the asyncio.iscoroutine False branch + + s.on_client_connected = sync_cb + ws = _FakeWS() + await s._ws_handler(ws) + assert called == [ws] + + @pytest.mark.asyncio + async def test_sync_callback_exception_does_not_crash(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + + def bad_cb(ws): + raise RuntimeError("oops") + + s.on_client_connected = bad_cb + ws = _FakeWS() + await s._ws_handler(ws) # should not raise + + @pytest.mark.asyncio + async def test_bytes_message_not_forwarded_to_handler(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + received: list = [] + + async def handler(msg): + received.append(msg) + + s.on_browser_message = handler + ws = _FakeWS([b"binary frame", "text frame"]) + await s._ws_handler(ws) + # Only the str message should reach the handler + assert received == [{"type": None}] or len(received) <= 1 + # bytes message was NOT forwarded (no JSON parse attempt on bytes) + assert not any(m == b"binary frame" for m in received) + + @pytest.mark.asyncio + async def test_connection_closed_handled_gracefully(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + ws = _ConnectionClosedWS() + await s._ws_handler(ws) # ConnectionClosed should be caught, not raised + + @pytest.mark.asyncio + async def test_client_removed_from_set_after_disconnect(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + ws = _FakeWS() + await s._ws_handler(ws) + assert ws not in s._clients + + +# --------------------------------------------------------------------------- +# _handle_browser_message — sync callback and error paths +# --------------------------------------------------------------------------- + + +class TestHandleBrowserMessageExtra: + @pytest.mark.asyncio + async def test_valid_json_no_callback_does_not_raise(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + # on_browser_message is None — should silently do nothing + await s._handle_browser_message('{"type": "PING"}') + + @pytest.mark.asyncio + async def test_sync_callback_called(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + received: list = [] + + def sync_handler(msg): + received.append(msg) + + s.on_browser_message = sync_handler + await s._handle_browser_message('{"type": "SYNC_TEST"}') + assert received == [{"type": "SYNC_TEST"}] + + @pytest.mark.asyncio + async def test_callback_exception_logged_not_raised(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + + def bad_handler(msg): + raise RuntimeError("handler exploded") + + s.on_browser_message = bad_handler + await s._handle_browser_message('{"type": "X"}') # should not raise + + +# --------------------------------------------------------------------------- +# stop() error path +# --------------------------------------------------------------------------- + + +class TestStopEdgeCases: + @pytest.mark.asyncio + async def test_stop_suppresses_wait_closed_error(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + mock_srv = MagicMock() + mock_srv.close = MagicMock() + mock_srv.wait_closed = AsyncMock(side_effect=Exception("shutdown error")) + s._server = mock_srv + await s.stop() + assert s._server is None + + @pytest.mark.asyncio + async def test_stop_when_not_started(self): + from mcp_cli.dashboard.server import DashboardServer + + s = DashboardServer() + await s.stop() # _server is None — should not raise + + +# --------------------------------------------------------------------------- +# _find_port — all ports exhausted +# --------------------------------------------------------------------------- + + +class TestFindPortExhausted: + @pytest.mark.asyncio + async def test_raises_runtime_error_when_all_ports_in_use(self): + from mcp_cli.dashboard.server import DashboardServer + + async def always_fail(*args, **kwargs): + raise OSError("Address in use") + + with patch("asyncio.start_server", side_effect=always_fail): + with pytest.raises(RuntimeError, match="Could not find an available port"): + await DashboardServer._find_port(19990) diff --git a/uv.lock b/uv.lock index 22ba0935..ad560f59 100644 --- a/uv.lock +++ b/uv.lock @@ -1661,6 +1661,9 @@ dependencies = [ apps = [ { name = "websockets" }, ] +dashboard = [ + { name = "websockets" }, +] dev = [ { name = "asyncio" }, { name = "numpy" }, @@ -1703,8 +1706,9 @@ requires-dist = [ { name = "rich", specifier = ">=13.9.4" }, { name = "typer", specifier = ">=0.15.2" }, { name = "websockets", marker = "extra == 'apps'", specifier = ">=13.0" }, + { name = "websockets", marker = "extra == 'dashboard'", specifier = ">=13.0" }, ] -provides-extras = ["vault", "apps", "wasm", "dev"] +provides-extras = ["vault", "apps", "dashboard", "wasm", "dev"] [package.metadata.requires-dev] dev = [