diff --git a/CHANGELOG.md b/CHANGELOG.md index 17d9355..d7e3ec4 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1,3 +1,16 @@ +## [0.5.1] - 2025 + +Prototype an MCP server. + +### Added + +- Added Model Context Provider (MCP) server for agent-assisted code understanding + - analyzes spans to extract code context (file paths, line numbers, function names) + - provides a web UI for exploring traces + - includes WebSocket API for real-time updates + - offers REST APIs for querying traces, spans, and file associations + - has an embedded UI with go:embed + ## [0.5.0] - 2025-04-21 Fork otel-cli to @tobert's personal GitHub. diff --git a/README.md b/README.md index c377cfa..0b97f87 100644 --- a/README.md +++ b/README.md @@ -1,10 +1,8 @@ # otel-cli -[](https://github.com/packethost/standards/blob/master/experimental-statement.md) - -otel-cli is a command-line tool for sending OpenTelemetry traces. It is written in -Go and intended to be used in shell scripts and other places where the best option -available for sending spans is executing another program. +otel-cli is a command-line tool for sending and working with OpenTelemetry traces. +It is written in Go and intended to be used in shell scripts and other places where +the best option available for sending spans is executing another program. otel-cli can be added to your scripts with no configuration and it will run as normal but in non-recording mode and will emit no traces. This follows the OpenTelemetry community's @@ -56,6 +54,9 @@ go build # run this in its own terminal and try some of the commands below! otel-cli server tui +# or run as a Model Context Provider server (see MCP Server section below) +otel-cli server mcp --port 8080 + # configure otel-cli to talk the the local server spawned above export OTEL_EXPORTER_OTLP_ENDPOINT=localhost:4317 @@ -202,12 +203,15 @@ But, if you just want to quickly try out otel-cli, you can also just install it ### 3. A system to receive/inspect the traces you generate -otel-cli can run as a server and accept OTLP connections. It has two modes, one prints to your console -while the other writes to JSON files. +otel-cli can run as a server and accept OTLP connections. It has three modes: +- `tui` which prints traces to your console in a text-based UI +- `json` which writes traces to JSON files +- `mcp` which collects traces, analyzes code context, and provides a web UI and API (see MCP Server section above) ```shell otel-cli server tui otel-cli server json --dir $dir --timeout 60 --max-spans 5 +otel-cli server mcp --port 8080 --project-root $(pwd) ``` Many SaaS vendors accept OTLP these days so one option is to send directly to those. This is not @@ -252,6 +256,61 @@ otel-cli exec --service my-service --name "curl google" curl https://google.com This trace will be available at `localhost:8000`. +## MCP Server (Model Context Provider) + +The MCP Server is a specialized component designed to make otel trace data +available to coding agents like Claude Code. + +### Starting the MCP Server + +```shell +# you can start with defaults, OTLP in 4317 and MCP/http on 8080 +otel-cli server mcp + +# or you can configure things exactly the way you want to +otel-cli server mcp \ + --port 9000 \ + --project-root $HOME/src/otel-cli \ + --retention 1h \ + --max-spans 500 \ + --allow-origins "http://localhost:3000,https://myapp.com" +``` + +### Using the MCP Server + +Once the server is running, direct your OpenTelemetry traces to it: + +```shell +# Send traces to the MCP server +export OTEL_EXPORTER_OTLP_ENDPOINT=localhost:8080 +otel-cli exec --service my-service --name "my-function" ./my-program +``` + +### Accessing the MCP Web UI and API + +- Web UI: http://localhost:8080/ (change the port if you used --port) +- WebSocket endpoint: ws://localhost:8080/ws +- REST API endpoints: + - `GET /api/traces` - List all traces + - `GET /api/trace/{id}` - Get a specific trace + - `GET /api/files` - List all files with spans + - `GET /api/file/{path}` - Get traces for a specific file + - `POST /api/spans/search` - Search across traces + +### Integrating with AI Tools + +The MCP Server is designed to provide structured context about your code's runtime behavior to coding agents. For example: + +```shell +# capture traces in memory with otel-cli +otel-cli server mcp & +# point applications at the otel-cli OTLP server +export OTEL_EXPORTER_OTLP_ENDPOINT=localhost:4317 +# generate a trace with the application +otel-cli exec --service my-service ./my-program +# now you an ask your agent questions about the traces +``` + ### SaaS tracing vendor We've provided Honeycomb, LightStep, and Elastic configurations that you could also use, diff --git a/go.mod b/go.mod index 1493b5a..712af19 100644 --- a/go.mod +++ b/go.mod @@ -6,6 +6,7 @@ toolchain go1.22.4 require ( github.com/google/go-cmp v0.6.0 + github.com/gorilla/websocket v1.5.3 github.com/pterm/pterm v0.12.79 github.com/spf13/cobra v1.8.0 go.opentelemetry.io/otel v1.27.0 diff --git a/go.sum b/go.sum index f04039f..9973e50 100644 --- a/go.sum +++ b/go.sum @@ -33,6 +33,8 @@ github.com/gookit/color v1.4.2/go.mod h1:fqRyamkC1W8uxl+lxCQxOT09l/vYfZ+QeiX3rKQ github.com/gookit/color v1.5.0/go.mod h1:43aQb+Zerm/BWh2GnrgOQm7ffz7tvQXEKV6BFMl7wAo= github.com/gookit/color v1.5.4 h1:FZmqs7XOyGgCAxmWyPslpiok1k05wmY3SJTytgvYFs0= github.com/gookit/color v1.5.4/go.mod h1:pZJOeOS8DM43rXbp4AZo1n9zCU2qjpcRko0b6/QJi9w= +github.com/gorilla/websocket v1.5.3 h1:saDtZ6Pbx/0u+bgYQ3q96pZgCzfhKXGPqt7kZ72aNNg= +github.com/gorilla/websocket v1.5.3/go.mod h1:YR8l580nyteQvAITg2hZ9XVh4b55+EU/adAjf1fMHhE= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0 h1:Wqo399gCIufwto+VfwCSvsnfGpF/w5E9CNxSwbpD6No= github.com/grpc-ecosystem/grpc-gateway/v2 v2.19.0/go.mod h1:qmOFXW2epJhM0qSnUUYpldc7gVz2KMQwJ/QYCDIa7XU= github.com/inconshreveable/mousetrap v1.1.0 h1:wN+x4NVGpMsO7ErUn/mUI3vEoE6Jt13X2s0bqwp9tc8= diff --git a/mcpserver/analyzer.go b/mcpserver/analyzer.go new file mode 100644 index 0000000..8799e89 --- /dev/null +++ b/mcpserver/analyzer.go @@ -0,0 +1,319 @@ +package mcpserver + +import ( + "bufio" + "fmt" + "os" + "path/filepath" + "regexp" + "strconv" + "strings" + + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +// CodeAnalyzer extracts code context from spans +type CodeAnalyzer struct { + projectRoot string +} + +// AnalyzeSpan examines a span and its events to extract code context +func (ca *CodeAnalyzer) AnalyzeSpan(span *tracepb.Span, events []*tracepb.Span_Event) []*CodeSpanContext { + contexts := []*CodeSpanContext{} + + // Extract file paths from span attributes + fileContexts := ca.extractFilePathsFromSpan(span) + contexts = append(contexts, fileContexts...) + + // Process each event for file contexts + for _, event := range events { + // Check for stack traces in event attributes + if event.Name == "exception" || strings.Contains(strings.ToLower(event.Name), "error") { + stackTraceContexts := ca.processStackTrace(event) + contexts = append(contexts, stackTraceContexts...) + } + + // Look for file operations in event attributes + fileOpContexts := ca.extractFilePathsFromEvent(event) + contexts = append(contexts, fileOpContexts...) + } + + return contexts +} + +// extractFilePathsFromSpan looks for file paths in span attributes +func (ca *CodeAnalyzer) extractFilePathsFromSpan(span *tracepb.Span) []*CodeSpanContext { + contexts := []*CodeSpanContext{} + + // Look for file paths in span attributes + for _, attr := range span.Attributes { + // Check for common file-related attribute keys + if strings.Contains(strings.ToLower(attr.Key), "file") || + strings.Contains(strings.ToLower(attr.Key), "path") { + + if val := attr.GetValue().GetStringValue(); val != "" { + if filepath.IsAbs(val) && ca.isFileInProject(val) { + context := &CodeSpanContext{ + FilePath: val, + SpanID: fmt.Sprintf("%x", span.SpanId), + TraceID: fmt.Sprintf("%x", span.TraceId), + Operation: inferOperationFromSpan(span), + } + + // Read the file to get code context + ca.enrichWithFileContents(context) + contexts = append(contexts, context) + } + } + } + } + + return contexts +} + +// extractFilePathsFromEvent looks for file paths in event attributes +func (ca *CodeAnalyzer) extractFilePathsFromEvent(event *tracepb.Span_Event) []*CodeSpanContext { + contexts := []*CodeSpanContext{} + + // Look through event attributes for file paths + for _, attr := range event.Attributes { + if strings.Contains(strings.ToLower(attr.Key), "file") || + strings.Contains(strings.ToLower(attr.Key), "path") { + + if val := attr.GetValue().GetStringValue(); val != "" { + if filepath.IsAbs(val) && ca.isFileInProject(val) { + context := &CodeSpanContext{ + FilePath: val, + Operation: inferOperationFromEvent(event), + } + + // Read the file to get code context + ca.enrichWithFileContents(context) + contexts = append(contexts, context) + } + } + } + } + + return contexts +} + +// isFileInProject checks if a file is within the project root +func (ca *CodeAnalyzer) isFileInProject(path string) bool { + if ca.projectRoot == "" { + return true + } + return strings.HasPrefix(path, ca.projectRoot) +} + +// processStackTrace extracts file paths and line numbers from stack traces +func (ca *CodeAnalyzer) processStackTrace(event *tracepb.Span_Event) []*CodeSpanContext { + contexts := []*CodeSpanContext{} + + // Look for stack trace in event attributes + var stackTrace string + for _, attr := range event.Attributes { + if strings.ToLower(attr.Key) == "stack_trace" || strings.ToLower(attr.Key) == "stacktrace" { + stackTrace = attr.GetValue().GetStringValue() + break + } + } + + if stackTrace == "" { + return contexts + } + + // Parse stack trace using regular expressions + // Different languages have different stack trace formats, but many contain file:line + fileLineRegex := regexp.MustCompile(`([\w\/\.\-]+\.[\w]+):(\d+)`) + matches := fileLineRegex.FindAllStringSubmatch(stackTrace, -1) + + for _, match := range matches { + if len(match) >= 3 { + filePath := match[1] + lineNum, _ := strconv.Atoi(match[2]) + + // Normalize the path and check if it's in the project + if !filepath.IsAbs(filePath) { + filePath = filepath.Join(ca.projectRoot, filePath) + } + + if ca.isFileInProject(filePath) { + context := &CodeSpanContext{ + FilePath: filePath, + LineStart: lineNum, + LineEnd: lineNum + 10, // Include a few lines of context + Operation: "exception", + } + + // Read the file to get code context + ca.enrichWithFileContents(context) + contexts = append(contexts, context) + } + } + } + + return contexts +} + +// enrichWithFileContents reads the source file to add code context +func (ca *CodeAnalyzer) enrichWithFileContents(context *CodeSpanContext) { + if context.FilePath == "" || !fileExists(context.FilePath) { + return + } + + file, err := os.Open(context.FilePath) + if err != nil { + return + } + defer file.Close() + + scanner := bufio.NewScanner(file) + lineNum := 0 + var codeLines []string + + if context.LineStart <= 0 { + context.LineStart = 1 // Default to start of file + } + + // If we don't know the end line, set a reasonable default + if context.LineEnd <= context.LineStart { + context.LineEnd = context.LineStart + 20 // Show about 20 lines + } + + // Cap max lines to avoid huge code snippets + if context.LineEnd - context.LineStart > 50 { + context.LineEnd = context.LineStart + 50 + } + + for scanner.Scan() { + lineNum++ + + // Capture a few lines before the start for context + if lineNum >= context.LineStart-5 && lineNum <= context.LineEnd { + codeLines = append(codeLines, fmt.Sprintf("%d: %s", lineNum, scanner.Text())) + } + + if lineNum > context.LineEnd { + break + } + } + + context.CodeSnapshot = strings.Join(codeLines, "\n") + + // Try to infer function name from code + ca.inferFunctionName(context) +} + +// inferFunctionName attempts to extract the function name from the code +func (ca *CodeAnalyzer) inferFunctionName(context *CodeSpanContext) { + if context.CodeSnapshot == "" { + return + } + + // Look for common function/method declarations based on file extension + ext := strings.ToLower(filepath.Ext(context.FilePath)) + + // Different regex for different languages + var funcRegex *regexp.Regexp + + switch ext { + case ".go": + funcRegex = regexp.MustCompile(`func\s+([A-Za-z0-9_]+)`) + case ".js", ".ts", ".jsx", ".tsx": + funcRegex = regexp.MustCompile(`function\s+([A-Za-z0-9_]+)|([A-Za-z0-9_]+)\s*=\s*\(.*\)\s*=>|([A-Za-z0-9_]+)\s*\(.*\)\s*{`) + case ".py": + funcRegex = regexp.MustCompile(`def\s+([A-Za-z0-9_]+)`) + case ".java", ".kt", ".c", ".cpp", ".cc": + funcRegex = regexp.MustCompile(`[A-Za-z0-9_<>]+\s+([A-Za-z0-9_]+)\s*\(`) + case ".rb": + funcRegex = regexp.MustCompile(`def\s+([A-Za-z0-9_]+)`) + default: + // Generic function-like pattern + funcRegex = regexp.MustCompile(`function\s+([A-Za-z0-9_]+)|\s+([A-Za-z0-9_]+)\s*\(`) + } + + matches := funcRegex.FindStringSubmatch(context.CodeSnapshot) + if len(matches) > 1 { + for i := 1; i < len(matches); i++ { + if matches[i] != "" { + context.FunctionName = matches[i] + return + } + } + } +} + +// inferOperationFromSpan guesses the operation type based on span attributes and name +func inferOperationFromSpan(span *tracepb.Span) string { + spanName := strings.ToLower(span.Name) + + if strings.Contains(spanName, "read") || strings.Contains(spanName, "get") { + return "read" + } + if strings.Contains(spanName, "write") || strings.Contains(spanName, "put") || + strings.Contains(spanName, "save") || strings.Contains(spanName, "update") { + return "write" + } + if strings.Contains(spanName, "exec") || strings.Contains(spanName, "run") { + return "exec" + } + if strings.Contains(spanName, "delete") || strings.Contains(spanName, "remove") { + return "delete" + } + + // Look at span attributes for more clues + for _, attr := range span.Attributes { + attrKey := strings.ToLower(attr.Key) + if strings.Contains(attrKey, "operation") || strings.Contains(attrKey, "action") { + if val := attr.GetValue().GetStringValue(); val != "" { + return strings.ToLower(val) + } + } + } + + return "unknown" +} + +// inferOperationFromEvent guesses the operation type based on event attributes and name +func inferOperationFromEvent(event *tracepb.Span_Event) string { + eventName := strings.ToLower(event.Name) + + if strings.Contains(eventName, "read") || strings.Contains(eventName, "get") { + return "read" + } + if strings.Contains(eventName, "write") || strings.Contains(eventName, "put") || + strings.Contains(eventName, "save") || strings.Contains(eventName, "update") { + return "write" + } + if strings.Contains(eventName, "exec") || strings.Contains(eventName, "run") { + return "exec" + } + if strings.Contains(eventName, "delete") || strings.Contains(eventName, "remove") { + return "delete" + } + if strings.Contains(eventName, "error") || strings.Contains(eventName, "exception") { + return "error" + } + + // Look at event attributes for more clues + for _, attr := range event.Attributes { + attrKey := strings.ToLower(attr.Key) + if strings.Contains(attrKey, "operation") || strings.Contains(attrKey, "action") { + if val := attr.GetValue().GetStringValue(); val != "" { + return strings.ToLower(val) + } + } + } + + return "unknown" +} + +// fileExists checks if a file exists and is not a directory +func fileExists(filename string) bool { + info, err := os.Stat(filename) + if os.IsNotExist(err) { + return false + } + return !info.IsDir() +} \ No newline at end of file diff --git a/mcpserver/handlers.go b/mcpserver/handlers.go new file mode 100644 index 0000000..a9b12de --- /dev/null +++ b/mcpserver/handlers.go @@ -0,0 +1,212 @@ +package mcpserver + +import ( + "encoding/json" + "log" + "net/http" + "strconv" + "strings" +) + +// handleWebsocket handles WebSocket connections +func (mcp *MCPServer) handleWebsocket(w http.ResponseWriter, r *http.Request) { + conn, err := mcp.upgrader.Upgrade(w, r, nil) + if err != nil { + log.Printf("Error upgrading to websocket: %v", err) + return + } + + // Register the new client + mcp.clientsLock.Lock() + mcp.clients[conn] = true + mcp.clientsLock.Unlock() + + // Handle client disconnection + go func() { + defer conn.Close() + + // Wait for client to disconnect or send a message + for { + _, _, err := conn.ReadMessage() + if err != nil { + mcp.clientsLock.Lock() + delete(mcp.clients, conn) + mcp.clientsLock.Unlock() + break + } + } + }() + + // Send initial event to confirm connection + message := WebSocketMessage{ + Type: "connected", + Message: "Connected to MCP server", + } + + conn.WriteJSON(message) +} + +// handleListTraces lists all traces in the store +func (mcp *MCPServer) handleListTraces(w http.ResponseWriter, r *http.Request) { + traces := mcp.store.ListTraces() + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(traces) +} + +// handleGetTrace returns a specific trace by ID +func (mcp *MCPServer) handleGetTrace(w http.ResponseWriter, r *http.Request) { + traceID := strings.TrimPrefix(r.URL.Path, "/api/trace/") + + trace := mcp.store.GetTrace(traceID) + if trace == nil { + http.Error(w, "Trace not found", http.StatusNotFound) + return + } + + // Create a response object with selected information + response := TraceResponse{ + TraceID: trace.TraceID, + StartTime: trace.StartTime, + EndTime: trace.EndTime, + Status: trace.Status, + ErrorMessage: trace.ErrorMessage, + Files: trace.Files, + Spans: make(map[string]SpanResponse), + } + + // Add simplified span data + for id, span := range trace.Spans { + spanResp := SpanResponse{ + Name: span.SpanProto.Name, + ParentID: span.ParentID, + Children: span.Children, + StartTime: span.StartTime, + Duration: span.Duration.Milliseconds(), + } + + // Add file contexts for this span + for _, ctx := range span.FileContexts { + ctxResp := FileContextResponse{ + FilePath: ctx.FilePath, + FunctionName: ctx.FunctionName, + Operation: ctx.Operation, + LineStart: ctx.LineStart, + LineEnd: ctx.LineEnd, + CodeSnippet: ctx.CodeSnapshot, + } + spanResp.FileContexts = append(spanResp.FileContexts, ctxResp) + } + + response.Spans[id] = spanResp + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(response) +} + +// handleListFiles returns all files that have associated spans +func (mcp *MCPServer) handleListFiles(w http.ResponseWriter, r *http.Request) { + files := mcp.store.ListFiles() + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(files) +} + +// handleGetFileTraces returns all traces associated with a specific file +func (mcp *MCPServer) handleGetFileTraces(w http.ResponseWriter, r *http.Request) { + filePath := strings.TrimPrefix(r.URL.Path, "/api/file/") + + fileTraces := mcp.store.GetFileTraces(filePath) + if len(fileTraces) == 0 { + http.Error(w, "File not found in any traces", http.StatusNotFound) + return + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(fileTraces) +} + +// handleSearchSpans handles the search API endpoint +func (mcp *MCPServer) handleSearchSpans(w http.ResponseWriter, r *http.Request) { + // Only accept POST requests + if r.Method != http.MethodPost { + http.Error(w, "Method not allowed", http.StatusMethodNotAllowed) + return + } + + var req SearchRequest + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + http.Error(w, "Invalid request body", http.StatusBadRequest) + return + } + + // Process search request + results := mcp.store.SearchTraces(req) + + // Generate a summary if requested + if req.Query != "" { + summary := generateSearchSummary(req, results) + results.Summary = summary + } + + w.Header().Set("Content-Type", "application/json") + json.NewEncoder(w).Encode(results) +} + +// generateSearchSummary creates a textual summary of the search results +func generateSearchSummary(req SearchRequest, results *SearchResponse) string { + if len(results.Traces) == 0 { + return "No traces found matching your query." + } + + // Basic summary + summary := "" + + if req.ErrorsOnly { + summary += "Found " + pluralize(len(results.Traces), "trace", "traces") + " with errors" + } else { + summary += "Found " + pluralize(len(results.Traces), "trace", "traces") + } + + // Add file context if available + if len(req.Files) > 0 { + if len(req.Files) == 1 { + summary += " involving the file " + req.Files[0] + } else { + summary += " involving " + pluralize(len(req.Files), "file", "files") + } + } + + // Add time range if available + if req.TimeRange != "" { + summary += " in the last " + req.TimeRange + } + + summary += "." + + // Add error statistics + var totalErrors int + for _, trace := range results.Traces { + totalErrors += trace.ErrorCount + } + + if totalErrors > 0 { + summary += " Total of " + pluralize(totalErrors, "error", "errors") + " detected." + } + + // Add file insights + if len(results.FileInsights) > 0 { + summary += " The analysis covers " + pluralize(len(results.FileInsights), "file", "files") + "." + } + + return summary +} + +// pluralize is a helper to correctly pluralize words +func pluralize(count int, singular, plural string) string { + if count == 1 { + return "1 " + singular + } + return strconv.Itoa(count) + " " + plural +} \ No newline at end of file diff --git a/mcpserver/mcpserver.go b/mcpserver/mcpserver.go new file mode 100644 index 0000000..bdbbb63 --- /dev/null +++ b/mcpserver/mcpserver.go @@ -0,0 +1,183 @@ +// mcpserver is a Model Context Provider server that collects and serves +// OpenTelemetry trace data in a format optimized for consumption by coding +// agents like Claude. +package mcpserver + +import ( + "context" + "encoding/hex" + "encoding/json" + "fmt" + "log" + "net/http" + "sync" + "time" + + "github.com/gorilla/websocket" + tracepb "go.opentelemetry.io/proto/otlp/trace/v1" +) + +// MCPConfig holds the configuration for the MCP server +type MCPConfig struct { + Port int + ProjectRoot string + MaxSpans int + RetentionTime time.Duration + AllowedOrigins []string +} + +// MCPServer is the main server struct for the MCP server +type MCPServer struct { + store *TraceStore + analyzer *CodeAnalyzer + httpServer *http.Server + upgrader websocket.Upgrader + clients map[*websocket.Conn]bool + clientsLock sync.Mutex + config *MCPConfig +} + +// TraceStore manages traces with added context for AI consumption +type TraceStore struct { + traces map[string]*TraceData // traceID -> trace data + spansByFile map[string][]*CodeSpanContext // file path -> spans touching this file + lock sync.RWMutex + maxSpans int + retention time.Duration +} + +// TraceData holds complete trace information +type TraceData struct { + TraceID string + RootSpan *SpanData + Spans map[string]*SpanData // spanID -> span data + Files map[string]bool // files touched by this trace + StartTime time.Time + EndTime time.Time + Status string + ErrorMessage string +} + +// SpanData enriches raw spans with context +type SpanData struct { + SpanProto *tracepb.Span + Events []*tracepb.Span_Event + ParentID string + Children []string + FileContexts []*CodeSpanContext + StartTime time.Time + EndTime time.Time + Duration time.Duration +} + +// CodeSpanContext links spans to source code +type CodeSpanContext struct { + FilePath string + LineStart int + LineEnd int + FunctionName string + SpanID string + TraceID string + Operation string // "read", "write", "exec", etc. + CodeSnapshot string // The actual code relevant to this span +} + +// NewMCPServer creates a new MCP server with the given configuration +func NewMCPServer(config *MCPConfig) *MCPServer { + store := &TraceStore{ + traces: make(map[string]*TraceData), + spansByFile: make(map[string][]*CodeSpanContext), + maxSpans: config.MaxSpans, + retention: config.RetentionTime, + } + + analyzer := &CodeAnalyzer{ + projectRoot: config.ProjectRoot, + } + + return &MCPServer{ + store: store, + analyzer: analyzer, + clients: make(map[*websocket.Conn]bool), + config: config, + upgrader: websocket.Upgrader{ + CheckOrigin: func(r *http.Request) bool { + for _, origin := range config.AllowedOrigins { + if origin == "*" { + return true + } + if origin == r.Header.Get("Origin") { + return true + } + } + return false + }, + }, + } +} + +// HandleSpan processes incoming spans from the OTLP server +func (mcp *MCPServer) HandleSpan(ctx context.Context, span *tracepb.Span, events []*tracepb.Span_Event, + rs *tracepb.ResourceSpans, headers map[string]string, meta map[string]string) bool { + + spanData := &SpanData{ + SpanProto: span, + Events: events, + ParentID: hex.EncodeToString(span.ParentSpanId), + StartTime: time.Unix(0, int64(span.StartTimeUnixNano)), + EndTime: time.Unix(0, int64(span.EndTimeUnixNano)), + } + spanData.Duration = spanData.EndTime.Sub(spanData.StartTime) + spanData.FileContexts = mcp.analyzer.AnalyzeSpan(span, events) + + mcp.store.AddSpan(spanData) + + mcp.notifyClients(spanData) + + return false // don't stop server +} + +// StartMCPServer starts the MCP HTTP server. Blocks forever. +func (mcp *MCPServer) StartMCPServer() { + mux := http.NewServeMux() + + mux.HandleFunc("/ws", mcp.handleWebsocket) + + mux.HandleFunc("/api/traces", mcp.handleListTraces) + mux.HandleFunc("/api/trace/", mcp.handleGetTrace) + mux.HandleFunc("/api/files", mcp.handleListFiles) + mux.HandleFunc("/api/file/", mcp.handleGetFileTraces) + mux.HandleFunc("/api/spans/search", mcp.handleSearchSpans) + + mux.Handle("/", GetUIHandler()) + + mcp.httpServer = &http.Server{ + Addr: fmt.Sprintf(":%d", mcp.config.Port), + Handler: mux, + } + + if err := mcp.httpServer.ListenAndServe(); err != http.ErrServerClosed { + log.Fatalf("error starting MCP server: %v", err) + } +} + +// notifyClients sends a message to all connected WebSocket clients +func (mcp *MCPServer) notifyClients(spanData *SpanData) { + message := WebSocketMessage{ + Type: "new_span", + SpanID: hex.EncodeToString(spanData.SpanProto.SpanId), + TraceID: hex.EncodeToString(spanData.SpanProto.TraceId), + } + + messageJSON, _ := json.Marshal(message) + + mcp.clientsLock.Lock() + defer mcp.clientsLock.Unlock() + + for client := range mcp.clients { + if err := client.WriteMessage(websocket.TextMessage, messageJSON); err != nil { + client.Close() + delete(mcp.clients, client) + } + } +} diff --git a/mcpserver/store.go b/mcpserver/store.go new file mode 100644 index 0000000..25fe9d4 --- /dev/null +++ b/mcpserver/store.go @@ -0,0 +1,390 @@ +package mcpserver + +import ( + "encoding/hex" + "log" + "time" +) + +// AddSpan adds a span to the trace store, organizing by trace ID and updating related data +func (store *TraceStore) AddSpan(spanData *SpanData) { + store.lock.Lock() + defer store.lock.Unlock() + + traceID := hex.EncodeToString(spanData.SpanProto.TraceId) + spanID := hex.EncodeToString(spanData.SpanProto.SpanId) + + // Get or create trace + trace, exists := store.traces[traceID] + if !exists { + trace = &TraceData{ + TraceID: traceID, + Spans: make(map[string]*SpanData), + Files: make(map[string]bool), + StartTime: spanData.StartTime, + EndTime: spanData.EndTime, + } + store.traces[traceID] = trace + } + + // Update trace start/end times if needed + if spanData.StartTime.Before(trace.StartTime) { + trace.StartTime = spanData.StartTime + } + if spanData.EndTime.After(trace.EndTime) { + trace.EndTime = spanData.EndTime + } + + // Add span to trace + trace.Spans[spanID] = spanData + + // If span is a root span (no parent), set it as the trace's root span + if len(spanData.ParentID) == 0 || spanData.ParentID == "0000000000000000" { + trace.RootSpan = spanData + } else { + // Add this span as a child of its parent + parentID := spanData.ParentID + if parent, ok := trace.Spans[parentID]; ok { + parent.Children = append(parent.Children, spanID) + } + } + + // Process file contexts + for _, fileCtx := range spanData.FileContexts { + filePath := fileCtx.FilePath + + // Add to file index + store.spansByFile[filePath] = append(store.spansByFile[filePath], fileCtx) + + // Mark file as touched by this trace + trace.Files[filePath] = true + + // Update trace status if this is an error + if fileCtx.Operation == "error" || fileCtx.Operation == "exception" { + trace.Status = "error" + } + } + + // Clean up old traces if we exceed the maximum + store.cleanupOldTraces() +} + +// cleanupOldTraces removes old traces based on retention time and max spans limit +func (store *TraceStore) cleanupOldTraces() { + // Skip if no limits are set + if store.maxSpans <= 0 && store.retention <= 0 { + return + } + + // Count spans and find old traces + var oldTraceIDs []string + var totalSpans int + now := time.Now() + + for id, trace := range store.traces { + totalSpans += len(trace.Spans) + + // Check retention time + if store.retention > 0 { + age := now.Sub(trace.EndTime) + if age > store.retention { + oldTraceIDs = append(oldTraceIDs, id) + continue + } + } + } + + // If we exceed max spans, remove old traces + if store.maxSpans > 0 && totalSpans > store.maxSpans { + // Clean by age if retention wasn't enough + if len(oldTraceIDs) == 0 { + // Find the oldest traces + type traceAge struct { + id string + age time.Time + } + + var ages []traceAge + for id, trace := range store.traces { + ages = append(ages, traceAge{id: id, age: trace.EndTime}) + } + + // Sort by age (oldest first) + for i := 0; i < len(ages); i++ { + for j := i + 1; j < len(ages); j++ { + if ages[i].age.After(ages[j].age) { + ages[i], ages[j] = ages[j], ages[i] + } + } + } + + // Take enough old traces to get under the limit + var removed int + for _, ta := range ages { + if totalSpans <= store.maxSpans { + break + } + trace := store.traces[ta.id] + removed += len(trace.Spans) + totalSpans -= len(trace.Spans) + oldTraceIDs = append(oldTraceIDs, ta.id) + } + } + } + + // Remove the old traces and clean up the file index + for _, id := range oldTraceIDs { + trace := store.traces[id] + + // Remove from file index + for file := range trace.Files { + var newSpans []*CodeSpanContext + for _, sc := range store.spansByFile[file] { + if sc.TraceID != id { + newSpans = append(newSpans, sc) + } + } + + if len(newSpans) > 0 { + store.spansByFile[file] = newSpans + } else { + delete(store.spansByFile, file) + } + } + + // Remove the trace + delete(store.traces, id) + } + + if len(oldTraceIDs) > 0 { + log.Printf("Removed %d old traces from store", len(oldTraceIDs)) + } +} + +// GetTrace returns a specific trace by ID +func (store *TraceStore) GetTrace(traceID string) *TraceData { + store.lock.RLock() + defer store.lock.RUnlock() + + return store.traces[traceID] +} + +// GetSpan returns a specific span by trace ID and span ID +func (store *TraceStore) GetSpan(traceID, spanID string) *SpanData { + store.lock.RLock() + defer store.lock.RUnlock() + + trace, ok := store.traces[traceID] + if !ok { + return nil + } + + return trace.Spans[spanID] +} + +// GetFileTraces returns all traces associated with a specific file +func (store *TraceStore) GetFileTraces(filePath string) map[string][]*CodeSpanContext { + store.lock.RLock() + defer store.lock.RUnlock() + + result := make(map[string][]*CodeSpanContext) + + for _, sc := range store.spansByFile[filePath] { + traceID := sc.TraceID + result[traceID] = append(result[traceID], sc) + } + + return result +} + +// ListFiles returns all files that have associated spans +func (store *TraceStore) ListFiles() []string { + store.lock.RLock() + defer store.lock.RUnlock() + + var files []string + for file := range store.spansByFile { + files = append(files, file) + } + + return files +} + +// ListTraces returns summaries of all traces +func (store *TraceStore) ListTraces() []*TraceDigest { + store.lock.RLock() + defer store.lock.RUnlock() + + var digests []*TraceDigest + + for id, trace := range store.traces { + digest := &TraceDigest{ + TraceID: id, + SpanCount: len(trace.Spans), + StartTime: trace.StartTime, + Duration: float64(trace.EndTime.Sub(trace.StartTime).Milliseconds()), + } + + // Get name from root span if available + if trace.RootSpan != nil && trace.RootSpan.SpanProto != nil { + digest.Name = trace.RootSpan.SpanProto.Name + } + + // Get files + for file := range trace.Files { + digest.Files = append(digest.Files, file) + } + + // Count errors + for _, span := range trace.Spans { + for _, ctx := range span.FileContexts { + if ctx.Operation == "error" || ctx.Operation == "exception" { + digest.ErrorCount++ + } + } + } + + digests = append(digests, digest) + } + + return digests +} + +// SearchTraces performs a search across traces based on the given criteria +func (store *TraceStore) SearchTraces(req SearchRequest) *SearchResponse { + store.lock.RLock() + defer store.lock.RUnlock() + + var traces []*TraceDigest + fileInsights := make(map[string]*FileInsight) + + // Process file filters + var fileSet map[string]bool + if len(req.Files) > 0 { + fileSet = make(map[string]bool) + for _, f := range req.Files { + fileSet[f] = true + } + } + + // Process time range filter + var minTime time.Time + if req.TimeRange != "" { + duration, err := time.ParseDuration(req.TimeRange) + if err == nil { + minTime = time.Now().Add(-duration) + } + } + + // Collect matching traces + for id, trace := range store.traces { + // Skip if outside time range + if !minTime.IsZero() && trace.EndTime.Before(minTime) { + continue + } + + // Skip if errors only and no errors + if req.ErrorsOnly && trace.Status != "error" { + continue + } + + // Check file filter + if fileSet != nil { + hasMatchingFile := false + for file := range trace.Files { + if fileSet[file] { + hasMatchingFile = true + break + } + } + if !hasMatchingFile { + continue + } + } + + // Add to results + digest := &TraceDigest{ + TraceID: id, + SpanCount: len(trace.Spans), + StartTime: trace.StartTime, + Duration: float64(trace.EndTime.Sub(trace.StartTime).Milliseconds()), + } + + // Get name from root span if available + if trace.RootSpan != nil && trace.RootSpan.SpanProto != nil { + digest.Name = trace.RootSpan.SpanProto.Name + } + + // Get files and build insights + for file := range trace.Files { + digest.Files = append(digest.Files, file) + + // Build file insights + insight, exists := fileInsights[file] + if !exists { + insight = &FileInsight{ + FilePath: file, + } + fileInsights[file] = insight + } + + // Collect hotspots and errors + for _, span := range trace.Spans { + for _, ctx := range span.FileContexts { + if ctx.FilePath == file { + // Count as error if appropriate + if ctx.Operation == "error" || ctx.Operation == "exception" { + digest.ErrorCount++ + insight.ErrorLines = append(insight.ErrorLines, ctx.LineStart) + } + + // Track line hotspots + insight.HotspotLines = append(insight.HotspotLines, ctx.LineStart) + + // Track related files (exclude this file) + for otherFile := range trace.Files { + if otherFile != file { + insight.Related = append(insight.Related, otherFile) + } + } + } + } + } + } + + traces = append(traces, digest) + + // Limit results if requested + if req.Limit > 0 && len(traces) >= req.Limit { + break + } + } + + // Build response + response := &SearchResponse{ + Traces: traces, + FileInsights: fileInsights, + } + + return response +} + +// TraceDigest provides key information about a trace +type TraceDigest struct { + TraceID string `json:"traceId"` + Name string `json:"name"` + Duration float64 `json:"durationMs"` + SpanCount int `json:"spanCount"` + ErrorCount int `json:"errorCount"` + Files []string `json:"files"` + StartTime time.Time `json:"startTime"` + KeyEvents []string `json:"keyEvents"` +} + +// FileInsight provides code-centric insights +type FileInsight struct { + FilePath string `json:"filePath"` + HotspotLines []int `json:"hotspotLines"` // Lines with most activity + ErrorLines []int `json:"errorLines"` // Lines associated with errors + Related []string `json:"related"` // Related files +} diff --git a/mcpserver/types.go b/mcpserver/types.go new file mode 100644 index 0000000..2c03553 --- /dev/null +++ b/mcpserver/types.go @@ -0,0 +1,56 @@ +package mcpserver + +// WebSocketMessage represents a message sent over WebSocket +type WebSocketMessage struct { + Type string `json:"type"` + Message string `json:"message,omitempty"` + SpanID string `json:"span_id,omitempty"` + TraceID string `json:"trace_id,omitempty"` +} + +// TraceResponse represents the detailed response for a trace +type TraceResponse struct { + TraceID string `json:"traceId"` + StartTime interface{} `json:"startTime"` + EndTime interface{} `json:"endTime"` + Status string `json:"status"` + ErrorMessage string `json:"message,omitempty"` + Files map[string]bool `json:"files"` + Spans map[string]SpanResponse `json:"spans"` +} + +// SpanResponse represents a simplified span for API responses +type SpanResponse struct { + Name string `json:"name"` + ParentID string `json:"parentId,omitempty"` + Children []string `json:"children,omitempty"` + StartTime interface{} `json:"startTime"` + Duration int64 `json:"durationMs"` + FileContexts []FileContextResponse `json:"fileContexts,omitempty"` +} + +// FileContextResponse represents file context information for API responses +type FileContextResponse struct { + FilePath string `json:"filePath"` + FunctionName string `json:"functionName,omitempty"` + Operation string `json:"operation"` + LineStart int `json:"lineStart"` + LineEnd int `json:"lineEnd"` + CodeSnippet string `json:"codeSnippet,omitempty"` +} + +// SearchRequest defines parameters for trace queries +type SearchRequest struct { + Query string `json:"query"` // Natural language query + Files []string `json:"files"` // Files of interest + TimeRange string `json:"timeRange"` // Time range like "1h", "24h" + ErrorsOnly bool `json:"errorsOnly"` // Only return traces with errors + Limit int `json:"limit"` // Max results +} + +// SearchResponse provides AI-friendly trace data +type SearchResponse struct { + Traces []*TraceDigest `json:"traces"` + FileInsights map[string]*FileInsight `json:"fileInsights"` + Summary string `json:"summary"` +} \ No newline at end of file diff --git a/mcpserver/ui.go b/mcpserver/ui.go new file mode 100644 index 0000000..473d0b8 --- /dev/null +++ b/mcpserver/ui.go @@ -0,0 +1,20 @@ +package mcpserver + +import ( + "embed" + "io/fs" + "net/http" +) + +//go:embed ui +var uiContent embed.FS + +// GetUIHandler returns an http.Handler that serves the embedded UI files +func GetUIHandler() http.Handler { + fsys, err := fs.Sub(uiContent, "ui") + if err != nil { + // This should never happen as we're always embedding the ui directory + panic("failed to create sub-filesystem for UI content: " + err.Error()) + } + return http.FileServer(http.FS(fsys)) +} \ No newline at end of file diff --git a/mcpserver/ui/index.html b/mcpserver/ui/index.html new file mode 100644 index 0000000..9667653 --- /dev/null +++ b/mcpserver/ui/index.html @@ -0,0 +1,433 @@ + + +
+ + +