diff --git a/CLAUDE.md b/CLAUDE.md index cfa206a7..4e6e6ffb 100644 --- a/CLAUDE.md +++ b/CLAUDE.md @@ -40,6 +40,7 @@ CLI (roborev) → HTTP API → Daemon (roborev daemon run) → Worker Pool → A | `internal/storage/` | SQLite operations | | `internal/agent/` | Agent interface + implementations | | `internal/config/config.go` | Config loading, agent resolution | +| `internal/worktree/` | Git worktree helpers (create, patch capture/apply) | ## Conventions @@ -93,7 +94,7 @@ CLI reads this to find the daemon. If port 7373 is busy, daemon auto-increments. ## Design Constraints -- **Daemon tasks must not modify the git working tree.** Background jobs (reviews, CI polling, synthesis) are read-only with respect to the user's repo checkout. They read source files and write results to the database only. CLI commands like `roborev fix` run synchronously in the foreground and may modify files, but nothing enqueued to the worker pool should touch the working tree. If we need background tasks that produce file changes in the future, they should operate in isolated git worktrees — that is a separate initiative. +- **Daemon tasks must not modify the git working tree.** Background jobs (reviews, CI polling, synthesis) are read-only with respect to the user's repo checkout. They read source files and write results to the database only. CLI commands like `roborev fix` run synchronously in the foreground and may modify files. Background `fix` jobs run agents in isolated git worktrees (via `internal/worktree`) and store resulting patches in the database — patches are only applied to the working tree when the user explicitly confirms in the TUI. ## Style Preferences diff --git a/cmd/roborev/daemon_integration_test.go b/cmd/roborev/daemon_integration_test.go index 1d55aae6..985bfe43 100644 --- a/cmd/roborev/daemon_integration_test.go +++ b/cmd/roborev/daemon_integration_test.go @@ -245,6 +245,10 @@ func TestDaemonShutdownBySignal(t *testing.T) { } func TestDaemonSignalCleanup(t *testing.T) { + if runtime.GOOS == "windows" { + t.Skip("skipping daemon signal test on Windows due to file locking differences") + } + // Verify that signal.Stop is called when shutdown // is triggered by a signal. var cleanupCalled bool diff --git a/cmd/roborev/refine.go b/cmd/roborev/refine.go index 8b3d6f62..9628e057 100644 --- a/cmd/roborev/refine.go +++ b/cmd/roborev/refine.go @@ -1,18 +1,12 @@ package main import ( - "bufio" - "bytes" "context" "errors" "fmt" "io" - "io/fs" "os" - "os/exec" - "path/filepath" "sort" - "strconv" "strings" "time" @@ -23,6 +17,7 @@ import ( "github.com/roborev-dev/roborev/internal/git" "github.com/roborev-dev/roborev/internal/prompt" "github.com/roborev-dev/roborev/internal/storage" + "github.com/roborev-dev/roborev/internal/worktree" "github.com/spf13/cobra" ) @@ -513,13 +508,14 @@ func runRefine(ctx RunContext, opts refineOptions) error { branchBefore := git.GetCurrentBranch(repoPath) // Create temp worktree to isolate agent from user's working tree - worktreePath, cleanupWorktree, err := createTempWorktree(repoPath) + wt, err := worktree.Create(repoPath) if err != nil { return fmt.Errorf("create worktree: %w", err) } + worktreePath := wt.Dir // NOTE: not using defer here because we're inside a loop; // defer wouldn't run until runRefine returns, leaking worktrees. - // Instead, cleanupWorktree() is called explicitly before every exit point. + // Instead, wt.Close() is called explicitly before every exit point. // Determine output writer var agentOutput io.Writer @@ -554,23 +550,23 @@ func runRefine(ctx RunContext, opts refineOptions) error { // Safety checks on main repo (before applying any changes) if wasCleanBefore && !git.IsWorkingTreeClean(repoPath) { - cleanupWorktree() + wt.Close() return fmt.Errorf("working tree changed during refine - aborting to prevent data loss") } headAfterAgent, resolveErr := git.ResolveSHA(repoPath, "HEAD") if resolveErr != nil { - cleanupWorktree() + wt.Close() return fmt.Errorf("cannot determine HEAD after agent run: %w", resolveErr) } branchAfterAgent := git.GetCurrentBranch(repoPath) if headAfterAgent != headBefore || branchAfterAgent != branchBefore { - cleanupWorktree() + wt.Close() return fmt.Errorf("HEAD changed during refine (was %s on %s, now %s on %s) - aborting to prevent applying patch to wrong commit", shortSHA(headBefore), branchBefore, shortSHA(headAfterAgent), branchAfterAgent) } if agentErr != nil { - cleanupWorktree() + wt.Close() fmt.Printf("Agent error: %v\n", agentErr) fmt.Println("Will retry in next iteration") continue @@ -578,7 +574,7 @@ func runRefine(ctx RunContext, opts refineOptions) error { // Check if agent made changes in worktree if git.IsWorkingTreeClean(worktreePath) { - cleanupWorktree() + wt.Close() fmt.Println("Agent made no changes - skipping this review") if err := client.AddComment(currentFailedReview.JobID, "roborev-refine", "Agent could not determine how to address findings"); err != nil { fmt.Printf("Warning: failed to add comment to job %d: %v\n", currentFailedReview.JobID, err) @@ -588,12 +584,17 @@ func runRefine(ctx RunContext, opts refineOptions) error { continue } - // Apply worktree changes to main repo and commit - if err := applyWorktreeChanges(repoPath, worktreePath); err != nil { - cleanupWorktree() - return fmt.Errorf("apply worktree changes: %w", err) + // Capture patch from worktree and apply to main repo + patch, err := wt.CapturePatch() + if err != nil { + wt.Close() + return fmt.Errorf("capture worktree patch: %w", err) + } + if err := worktree.ApplyPatch(repoPath, patch); err != nil { + wt.Close() + return fmt.Errorf("apply worktree patch: %w", err) } - cleanupWorktree() + wt.Close() commitMsg := fmt.Sprintf("Address review findings (job %d)\n\n%s", currentFailedReview.JobID, summarizeAgentOutput(output)) newCommit, err := commitWithHookRetry(repoPath, commitMsg, addressAgent, opts.quiet) @@ -1024,167 +1025,7 @@ func summarizeAgentOutput(output string) string { return strings.Join(summary, "\n") } -// createTempWorktree creates a temporary git worktree for isolated agent work -func createTempWorktree(repoPath string) (string, func(), error) { - worktreeDir, err := os.MkdirTemp("", "roborev-refine-") - if err != nil { - return "", nil, err - } - - // Create the worktree (without --recurse-submodules for compatibility with older git). - // Suppress hooks via core.hooksPath= — user hooks shouldn't run in internal worktrees. - cmd := exec.Command("git", "-C", repoPath, "-c", "core.hooksPath="+os.DevNull, "worktree", "add", "--detach", worktreeDir, "HEAD") - if out, err := cmd.CombinedOutput(); err != nil { - _ = os.RemoveAll(worktreeDir) - return "", nil, fmt.Errorf("git worktree add: %w: %s", err, out) - } - - // Initialize and update submodules in the worktree - initArgs := []string{"-C", worktreeDir} - if submoduleRequiresFileProtocol(worktreeDir) { - initArgs = append(initArgs, "-c", "protocol.file.allow=always") - } - initArgs = append(initArgs, "submodule", "update", "--init") - cmd = exec.Command("git", initArgs...) - if out, err := cmd.CombinedOutput(); err != nil { - _ = exec.Command("git", "-C", repoPath, "worktree", "remove", "--force", worktreeDir).Run() - _ = os.RemoveAll(worktreeDir) - return "", nil, fmt.Errorf("git submodule update: %w: %s", err, out) - } - - updateArgs := []string{"-C", worktreeDir} - if submoduleRequiresFileProtocol(worktreeDir) { - updateArgs = append(updateArgs, "-c", "protocol.file.allow=always") - } - updateArgs = append(updateArgs, "submodule", "update", "--init", "--recursive") - cmd = exec.Command("git", updateArgs...) - if out, err := cmd.CombinedOutput(); err != nil { - _ = exec.Command("git", "-C", repoPath, "worktree", "remove", "--force", worktreeDir).Run() - _ = os.RemoveAll(worktreeDir) - return "", nil, fmt.Errorf("git submodule update: %w: %s", err, out) - } - - lfsCmd := exec.Command("git", "-C", worktreeDir, "lfs", "env") - if err := lfsCmd.Run(); err == nil { - cmd = exec.Command("git", "-C", worktreeDir, "lfs", "pull") - _ = cmd.Run() - } - - cleanup := func() { - _ = exec.Command("git", "-C", repoPath, "worktree", "remove", "--force", worktreeDir).Run() - _ = os.RemoveAll(worktreeDir) - } - - return worktreeDir, cleanup, nil -} - -func submoduleRequiresFileProtocol(repoPath string) bool { - gitmodulesPaths := findGitmodulesPaths(repoPath) - if len(gitmodulesPaths) == 0 { - return false - } - for _, gitmodulesPath := range gitmodulesPaths { - file, err := os.Open(gitmodulesPath) - if err != nil { - continue - } - scanner := bufio.NewScanner(file) - for scanner.Scan() { - line := strings.TrimSpace(scanner.Text()) - if line == "" || strings.HasPrefix(line, "#") || strings.HasPrefix(line, ";") { - continue - } - parts := strings.SplitN(line, "=", 2) - if len(parts) != 2 { - continue - } - if !strings.EqualFold(strings.TrimSpace(parts[0]), "url") { - continue - } - url := strings.TrimSpace(parts[1]) - if unquoted, err := strconv.Unquote(url); err == nil { - url = unquoted - } - if isFileProtocolURL(url) { - file.Close() - return true - } - } - file.Close() - } - return false -} - -func findGitmodulesPaths(repoPath string) []string { - var paths []string - err := filepath.WalkDir(repoPath, func(path string, d fs.DirEntry, err error) error { - if err != nil { - return nil - } - if d.IsDir() && d.Name() == ".git" { - return filepath.SkipDir - } - if d.Name() == ".gitmodules" { - paths = append(paths, path) - } - return nil - }) - if err != nil { - return nil - } - return paths -} - -func isFileProtocolURL(url string) bool { - lower := strings.ToLower(url) - if strings.HasPrefix(lower, "file:") { - return true - } - if strings.HasPrefix(url, "/") || strings.HasPrefix(url, "./") || strings.HasPrefix(url, "../") { - return true - } - if len(url) >= 2 && isAlpha(url[0]) && url[1] == ':' { - return true - } - if strings.HasPrefix(url, `\\`) { - return true - } - return false -} - -func isAlpha(b byte) bool { - return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') -} - -// applyWorktreeChanges applies changes from worktree to main repo via patch -func applyWorktreeChanges(repoPath, worktreePath string) error { - // Stage all changes in worktree - cmd := exec.Command("git", "-C", worktreePath, "add", "-A") - if out, err := cmd.CombinedOutput(); err != nil { - return fmt.Errorf("git add in worktree: %w: %s", err, out) - } - - // Get diff as patch - diffCmd := exec.Command("git", "-C", worktreePath, "diff", "--cached", "--binary") - diff, err := diffCmd.Output() - if err != nil { - return fmt.Errorf("git diff in worktree: %w", err) - } - if len(diff) == 0 { - return nil // No changes - } - - // Apply patch to main repo - applyCmd := exec.Command("git", "-C", repoPath, "apply", "--binary", "-") - applyCmd.Stdin = bytes.NewReader(diff) - var stderr bytes.Buffer - applyCmd.Stderr = &stderr - if err := applyCmd.Run(); err != nil { - return fmt.Errorf("git apply: %w: %s", err, stderr.String()) - } - - return nil -} +// Worktree creation and patch operations are in internal/worktree package. // commitWithHookRetry attempts git.CreateCommit and, on failure, // runs the agent to fix whatever the hook complained about. Only diff --git a/cmd/roborev/refine_integration_test.go b/cmd/roborev/refine_integration_test.go index b3742e17..45be5e32 100644 --- a/cmd/roborev/refine_integration_test.go +++ b/cmd/roborev/refine_integration_test.go @@ -10,6 +10,7 @@ import ( "testing" "github.com/roborev-dev/roborev/internal/testutil" + "github.com/roborev-dev/roborev/internal/worktree" ) func TestValidateRefineContext(t *testing.T) { @@ -169,9 +170,9 @@ func TestWorktreeCleanupBetweenIterations(t *testing.T) { // before the next iteration. Verify the directory is removed each time. var prevPath string for i := range 3 { - worktreePath, cleanup, err := createTempWorktree(repo.Root) + wt, err := worktree.Create(repo.Root) if err != nil { - t.Fatalf("iteration %d: createTempWorktree failed: %v", i, err) + t.Fatalf("iteration %d: worktree.Create failed: %v", i, err) } // Verify previous worktree was cleaned up @@ -182,13 +183,13 @@ func TestWorktreeCleanupBetweenIterations(t *testing.T) { } // Verify current worktree exists - if _, err := os.Stat(worktreePath); err != nil { - t.Fatalf("iteration %d: worktree %s should exist: %v", i, worktreePath, err) + if _, err := os.Stat(wt.Dir); err != nil { + t.Fatalf("iteration %d: worktree %s should exist: %v", i, wt.Dir, err) } // Simulate the explicit cleanup call (as done on error/no-change paths) - cleanup() - prevPath = worktreePath + wt.Close() + prevPath = wt.Dir } // Verify the last worktree was also cleaned up @@ -221,19 +222,19 @@ func TestCreateTempWorktreeIgnoresHooks(t *testing.T) { _ = out } - // createTempWorktree should succeed because it suppresses hooks - worktreePath, cleanup, err := createTempWorktree(repo.Root) + // worktree.Create should succeed because it suppresses hooks + wt, err := worktree.Create(repo.Root) if err != nil { - t.Fatalf("createTempWorktree should succeed with failing hook: %v", err) + t.Fatalf("worktree.Create should succeed with failing hook: %v", err) } - defer cleanup() + defer wt.Close() // Verify the worktree directory exists and has the file from the repo - if _, err := os.Stat(worktreePath); err != nil { + if _, err := os.Stat(wt.Dir); err != nil { t.Fatalf("worktree directory should exist: %v", err) } - baseFile := filepath.Join(worktreePath, "base.txt") + baseFile := filepath.Join(wt.Dir, "base.txt") content, err := os.ReadFile(baseFile) if err != nil { t.Fatalf("expected base.txt in worktree: %v", err) @@ -280,13 +281,13 @@ func TestCreateTempWorktreeInitializesSubmodules(t *testing.T) { runMainGit("-c", "protocol.file.allow=always", "submodule", "add", submoduleRepo, "deps/sub") runMainGit("commit", "-m", "add submodule") - worktreePath, cleanup, err := createTempWorktree(mainRepo) + wt, err := worktree.Create(mainRepo) if err != nil { - t.Fatalf("createTempWorktree failed: %v", err) + t.Fatalf("worktree.Create failed: %v", err) } - defer cleanup() + defer wt.Close() - if _, err := os.Stat(filepath.Join(worktreePath, "deps", "sub", "sub.txt")); err != nil { + if _, err := os.Stat(filepath.Join(wt.Dir, "deps", "sub", "sub.txt")); err != nil { t.Fatalf("expected submodule file in worktree: %v", err) } } diff --git a/cmd/roborev/refine_test.go b/cmd/roborev/refine_test.go index 2f121529..78c5283f 100644 --- a/cmd/roborev/refine_test.go +++ b/cmd/roborev/refine_test.go @@ -390,66 +390,6 @@ func TestFindFailedReviewForBranch_AllPass(t *testing.T) { } } -func TestSubmoduleRequiresFileProtocol(t *testing.T) { - tpl := `[submodule "test"] - path = test - %s = %s -` - tests := []struct { - name string - key string - url string - expected bool - }{ - {name: "file-scheme", key: "url", url: "file:///tmp/repo", expected: true}, - {name: "file-scheme-quoted", key: "url", url: `"file:///tmp/repo"`, expected: true}, - {name: "file-scheme-mixed-case-key", key: "URL", url: "file:///tmp/repo", expected: true}, - {name: "file-single-slash", key: "url", url: "file:/tmp/repo", expected: true}, - {name: "unix-absolute", key: "url", url: "/tmp/repo", expected: true}, - {name: "relative-dot", key: "url", url: "./repo", expected: true}, - {name: "relative-dotdot", key: "url", url: "../repo", expected: true}, - {name: "windows-drive-slash", key: "url", url: "C:/repo", expected: true}, - {name: "windows-drive-backslash", key: "url", url: `C:\repo`, expected: true}, - {name: "windows-unc", key: "url", url: `\\server\share\repo`, expected: true}, - {name: "https", key: "url", url: "https://example.com/repo.git", expected: false}, - } - - for _, tc := range tests { - t.Run(tc.name, func(t *testing.T) { - dir := t.TempDir() - gitmodules := filepath.Join(dir, ".gitmodules") - if err := os.WriteFile(gitmodules, fmt.Appendf(nil, tpl, tc.key, tc.url), 0644); err != nil { - t.Fatalf("write .gitmodules: %v", err) - } - if got := submoduleRequiresFileProtocol(dir); got != tc.expected { - t.Fatalf("expected %v, got %v", tc.expected, got) - } - }) - } -} - -func TestSubmoduleRequiresFileProtocolNested(t *testing.T) { - tpl := `[submodule "test"] - path = test - url = %s -` - dir := t.TempDir() - if err := os.WriteFile(filepath.Join(dir, ".gitmodules"), fmt.Appendf(nil, tpl, "https://example.com/repo.git"), 0644); err != nil { - t.Fatalf("write root .gitmodules: %v", err) - } - nestedPath := filepath.Join(dir, "sub", ".gitmodules") - if err := os.MkdirAll(filepath.Dir(nestedPath), 0755); err != nil { - t.Fatalf("mkdir nested: %v", err) - } - if err := os.WriteFile(nestedPath, fmt.Appendf(nil, tpl, "file:///tmp/repo"), 0644); err != nil { - t.Fatalf("write nested .gitmodules: %v", err) - } - - if !submoduleRequiresFileProtocol(dir) { - t.Fatalf("expected nested file URL to require file protocol") - } -} - func TestFindFailedReviewForBranch_NoReviews(t *testing.T) { client := newMockDaemonClient() // No reviews set - GetReviewBySHA will return nil for all commits @@ -784,6 +724,241 @@ func TestFindPendingJobForBranch_OldestFirst(t *testing.T) { } } +func gitCommitFile(t *testing.T, repoDir string, runGit func(...string) string, filename, content, msg string) string { + t.Helper() + if err := os.WriteFile(filepath.Join(repoDir, filename), []byte(content), 0644); err != nil { + t.Fatal(err) + } + runGit("add", filename) + runGit("commit", "-m", msg) + return runGit("rev-parse", "HEAD") +} + +// setupTestGitRepo creates a git repo for testing branch/--since behavior. +// Returns the repo directory, base commit SHA, and a helper to run git commands. +func setupTestGitRepo(t *testing.T) (repoDir string, baseSHA string, runGit func(args ...string) string) { + t.Helper() + + repoDir = t.TempDir() + runGit = func(args ...string) string { + cmd := exec.Command("git", args...) + cmd.Dir = repoDir + out, err := cmd.CombinedOutput() + if err != nil { + t.Fatalf("git %v failed: %v\n%s", args, err, out) + } + return strings.TrimSpace(string(out)) + } + + // Use git init + symbolic-ref for compatibility with Git < 2.28 (which lacks -b flag) + runGit("init") + runGit("symbolic-ref", "HEAD", "refs/heads/main") + runGit("config", "user.email", "test@test.com") + runGit("config", "user.name", "Test") + + baseSHA = gitCommitFile(t, repoDir, runGit, "base.txt", "base", "base commit") + + return repoDir, baseSHA, runGit +} + +func TestValidateRefineContext_RefusesMainBranchWithoutSince(t *testing.T) { + if _, err := exec.LookPath("git"); err != nil { + t.Skip("git not available") + } + + repoDir, _, _ := setupTestGitRepo(t) + + // Stay on main branch (don't create feature branch) + origDir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if err := os.Chdir(repoDir); err != nil { + t.Fatal(err) + } + defer os.Chdir(origDir) + + // Validating without --since on main should fail + _, _, _, _, err = validateRefineContext("", "", "") + if err == nil { + t.Fatal("expected error when validating on main without --since") + } + if !strings.Contains(err.Error(), "refusing to refine on main") { + t.Errorf("expected 'refusing to refine on main' error, got: %v", err) + } + if !strings.Contains(err.Error(), "--since") { + t.Errorf("expected error to mention --since flag, got: %v", err) + } +} + +func TestValidateRefineContext_AllowsMainBranchWithSince(t *testing.T) { + if _, err := exec.LookPath("git"); err != nil { + t.Skip("git not available") + } + + repoDir, baseSHA, runGit := setupTestGitRepo(t) + + // Add another commit on main + gitCommitFile(t, repoDir, runGit, "second.txt", "second", "second commit") + + origDir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if err := os.Chdir(repoDir); err != nil { + t.Fatal(err) + } + defer os.Chdir(origDir) + + // Validating with --since on main should pass + repoPath, currentBranch, _, mergeBase, err := validateRefineContext("", baseSHA, "") + if err != nil { + t.Fatalf("validation should pass with --since on main, got: %v", err) + } + if repoPath == "" { + t.Error("expected non-empty repoPath") + } + if currentBranch != "main" { + t.Errorf("expected currentBranch=main, got %s", currentBranch) + } + if mergeBase != baseSHA { + t.Errorf("expected mergeBase=%s, got %s", baseSHA, mergeBase) + } +} + +func TestValidateRefineContext_SinceWorksOnFeatureBranch(t *testing.T) { + if _, err := exec.LookPath("git"); err != nil { + t.Skip("git not available") + } + + repoDir, baseSHA, runGit := setupTestGitRepo(t) + + // Create feature branch with commits + runGit("checkout", "-b", "feature") + gitCommitFile(t, repoDir, runGit, "feature.txt", "feature", "feature commit") + + origDir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if err := os.Chdir(repoDir); err != nil { + t.Fatal(err) + } + defer os.Chdir(origDir) + + // --since should work on feature branch + repoPath, currentBranch, _, mergeBase, err := validateRefineContext("", baseSHA, "") + if err != nil { + t.Fatalf("--since should work on feature branch, got: %v", err) + } + if repoPath == "" { + t.Error("expected non-empty repoPath") + } + if currentBranch != "feature" { + t.Errorf("expected currentBranch=feature, got %s", currentBranch) + } + if mergeBase != baseSHA { + t.Errorf("expected mergeBase=%s, got %s", baseSHA, mergeBase) + } +} + +func TestValidateRefineContext_InvalidSinceRef(t *testing.T) { + if _, err := exec.LookPath("git"); err != nil { + t.Skip("git not available") + } + + repoDir, _, _ := setupTestGitRepo(t) + + origDir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if err := os.Chdir(repoDir); err != nil { + t.Fatal(err) + } + defer os.Chdir(origDir) + + // Invalid --since ref should fail with clear error + _, _, _, _, err = validateRefineContext("", "nonexistent-ref-abc123", "") + if err == nil { + t.Fatal("expected error for invalid --since ref") + } + if !strings.Contains(err.Error(), "cannot resolve --since") { + t.Errorf("expected 'cannot resolve --since' error, got: %v", err) + } +} + +func TestValidateRefineContext_SinceNotAncestorOfHEAD(t *testing.T) { + if _, err := exec.LookPath("git"); err != nil { + t.Skip("git not available") + } + + repoDir, _, runGit := setupTestGitRepo(t) + + // Create a commit on a separate branch that diverges from main + runGit("checkout", "-b", "other-branch") + otherBranchSHA := gitCommitFile(t, repoDir, runGit, "other.txt", "other", "commit on other branch") + + // Go back to main and create a different commit + runGit("checkout", "main") + gitCommitFile(t, repoDir, runGit, "main2.txt", "main2", "second commit on main") + + origDir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if err := os.Chdir(repoDir); err != nil { + t.Fatal(err) + } + defer os.Chdir(origDir) + + // Using --since with a commit from a different branch (not ancestor of HEAD) should fail + _, _, _, _, err = validateRefineContext("", otherBranchSHA, "") + if err == nil { + t.Fatal("expected error when --since is not an ancestor of HEAD") + } + if !strings.Contains(err.Error(), "not an ancestor of HEAD") { + t.Errorf("expected 'not an ancestor of HEAD' error, got: %v", err) + } +} + +func TestValidateRefineContext_FeatureBranchWithoutSinceStillWorks(t *testing.T) { + if _, err := exec.LookPath("git"); err != nil { + t.Skip("git not available") + } + + repoDir, baseSHA, runGit := setupTestGitRepo(t) + + // Create feature branch + runGit("checkout", "-b", "feature") + gitCommitFile(t, repoDir, runGit, "feature.txt", "feature", "feature commit") + + origDir, err := os.Getwd() + if err != nil { + t.Fatal(err) + } + if err := os.Chdir(repoDir); err != nil { + t.Fatal(err) + } + defer os.Chdir(origDir) + + // Feature branch without --since should pass validation (uses merge-base) + repoPath, currentBranch, _, mergeBase, err := validateRefineContext("", "", "") + if err != nil { + t.Fatalf("feature branch without --since should work, got: %v", err) + } + if repoPath == "" { + t.Error("expected non-empty repoPath") + } + if currentBranch != "feature" { + t.Errorf("expected currentBranch=feature, got %s", currentBranch) + } + // mergeBase should be the base commit (merge-base of feature and main) + if mergeBase != baseSHA { + t.Errorf("expected mergeBase=%s (base commit), got %s", baseSHA, mergeBase) + } +} + func TestCommitWithHookRetrySucceeds(t *testing.T) { if _, err := exec.LookPath("git"); err != nil { t.Skip("git not available") diff --git a/cmd/roborev/tui.go b/cmd/roborev/tui.go index 866d2c6f..75edcd74 100644 --- a/cmd/roborev/tui.go +++ b/cmd/roborev/tui.go @@ -5,10 +5,12 @@ import ( "encoding/json" "errors" "fmt" + "io" "net" "net/http" neturl "net/url" "os" + "os/exec" "path/filepath" "regexp" "slices" @@ -29,6 +31,8 @@ import ( "github.com/roborev-dev/roborev/internal/storage" "github.com/roborev-dev/roborev/internal/update" "github.com/roborev-dev/roborev/internal/version" + "github.com/roborev-dev/roborev/internal/worktree" + godiff "github.com/sourcegraph/go-diff/diff" "github.com/spf13/cobra" ) @@ -104,6 +108,9 @@ const ( tuiViewCommitMsg tuiViewHelp tuiViewTail + tuiViewTasks // Background fix tasks view + tuiViewFixPrompt // Fix prompt confirmation modal + tuiViewPatch // Patch viewer for fix jobs ) // queuePrefetchBuffer is the number of extra rows to fetch beyond what's visible, @@ -247,6 +254,20 @@ type tuiModel struct { mdCache *markdownCache clipboard ClipboardWriter + + // Review view navigation + reviewFromView tuiView // View to return to when exiting review (queue or tasks) + + // Fix task state + fixJobs []storage.ReviewJob // Fix jobs for tasks view + fixSelectedIdx int // Selected index in tasks view + fixPromptText string // Editable fix prompt text + fixPromptJobID int64 // Parent job ID for fix prompt modal + fixPromptFromView tuiView // View to return to after fix prompt closes + fixShowHelp bool // Show help overlay in tasks view + patchText string // Current patch text for patch viewer + patchScroll int // Scroll offset in patch viewer + patchJobID int64 // Job ID of the patch being viewed } // pendingState tracks a pending addressed toggle with sequence number @@ -361,6 +382,31 @@ type tuiReconnectMsg struct { err error } +type tuiFixJobsMsg struct { + jobs []storage.ReviewJob + err error +} + +type tuiFixTriggerResultMsg struct { + job *storage.ReviewJob + err error + warning string // non-fatal issue (e.g. failed to mark stale job) +} + +type tuiApplyPatchResultMsg struct { + jobID int64 + parentJobID int64 // Parent review job (to mark addressed on success) + success bool + err error + rebase bool // True if patch didn't apply and needs rebase +} + +type tuiPatchMsg struct { + jobID int64 + patch string + err error +} + // ClipboardWriter is an interface for clipboard operations (allows mocking in tests) type ClipboardWriter interface { WriteText(text string) error @@ -602,6 +648,9 @@ func (m tuiModel) fetchJobs() tea.Cmd { params.Set("addressed", "false") } + // Exclude fix jobs — they belong in the Tasks view, not the queue + params.Set("exclude_job_type", "fix") + // Set limit: use pagination unless we need client-side filtering (multi-repo) if needsAllJobs { params.Set("limit", "0") @@ -655,6 +704,7 @@ func (m tuiModel) fetchMoreJobs() tea.Cmd { if m.hideAddressed { params.Set("addressed", "false") } + params.Set("exclude_job_type", "fix") url := fmt.Sprintf("%s/api/jobs?%s", m.serverAddr, params.Encode()) resp, err := m.client.Get(url) if err != nil { @@ -1283,6 +1333,17 @@ func (m tuiModel) toggleAddressedForJob(jobID int64, currentState *bool) tea.Cmd } } +// markParentAddressed marks the parent review job as addressed after a fix is applied. +func (m tuiModel) markParentAddressed(parentJobID int64) tea.Cmd { + return func() tea.Msg { + err := m.postAddressed(parentJobID, true, "parent review not found") + if err != nil { + return tuiErrMsg(err) + } + return nil + } +} + // updateSelectedJobID updates the tracked job ID after navigation func (m *tuiModel) updateSelectedJobID() { if m.selectedIdx >= 0 && m.selectedIdx < len(m.jobs) { @@ -1619,8 +1680,8 @@ func (m tuiModel) getVisibleJobs() []storage.ReviewJob { } // Queue help line constants (used by both queueVisibleRows and renderQueueView). -const queueHelpLine1 = "x: cancel | r: rerun | t: tail | p: prompt | c: comment | y: copy | m: commit msg" -const queueHelpLine2 = "↑/↓: navigate | enter: review | a: addressed | f: filter | h: hide | ?: commands | q: quit" +const queueHelpLine1 = "x: cancel | r: rerun | t: tail | p: prompt | c: comment | y: copy | m: commit msg | F: fix" +const queueHelpLine2 = "↑/↓: navigate | enter: review | a: addressed | f: filter | h: hide | T: tasks | ?: help | q: quit" // queueHelpLines computes how many terminal lines the queue help // footer occupies, accounting for wrapping at narrow widths. @@ -1727,7 +1788,12 @@ func (m tuiModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { if m.loadingMore || m.loadingJobs { return m, tea.Batch(m.tick(), m.fetchStatus()) } - return m, tea.Batch(m.tick(), m.fetchJobs(), m.fetchStatus()) + cmds := []tea.Cmd{m.tick(), m.fetchJobs(), m.fetchStatus()} + // Refresh fix jobs when viewing tasks or when fix jobs are in progress + if m.currentView == tuiViewTasks || m.hasActiveFixJobs() { + cmds = append(cmds, m.fetchFixJobs()) + } + return m, tea.Batch(cmds...) case tuiTailTickMsg: if m.currentView == tuiViewTail && m.tailStreaming && m.tailJobID > 0 { @@ -2016,6 +2082,74 @@ func (m tuiModel) Update(msg tea.Msg) (tea.Model, tea.Cmd) { case tuiReconnectMsg: return m.handleReconnectMsg(msg) + + case tuiFixJobsMsg: + if msg.err != nil { + m.err = msg.err + } else { + m.fixJobs = msg.jobs + if m.fixSelectedIdx >= len(m.fixJobs) && len(m.fixJobs) > 0 { + m.fixSelectedIdx = len(m.fixJobs) - 1 + } + } + + case tuiFixTriggerResultMsg: + if msg.err != nil { + m.err = msg.err + m.flashMessage = fmt.Sprintf("Fix failed: %v", msg.err) + m.flashExpiresAt = time.Now().Add(3 * time.Second) + m.flashView = tuiViewTasks + } else if msg.warning != "" { + m.flashMessage = msg.warning + m.flashExpiresAt = time.Now().Add(5 * time.Second) + m.flashView = tuiViewTasks + return m, m.fetchFixJobs() + } else { + m.flashMessage = fmt.Sprintf("Fix job #%d enqueued", msg.job.ID) + m.flashExpiresAt = time.Now().Add(3 * time.Second) + m.flashView = tuiViewTasks + // Refresh tasks list + return m, m.fetchFixJobs() + } + + case tuiPatchMsg: + if msg.err != nil { + m.flashMessage = fmt.Sprintf("Patch fetch failed: %v", msg.err) + m.flashExpiresAt = time.Now().Add(3 * time.Second) + m.flashView = tuiViewTasks + } else { + m.patchText = msg.patch + m.patchJobID = msg.jobID + m.patchScroll = 0 + m.currentView = tuiViewPatch + } + + case tuiApplyPatchResultMsg: + if msg.rebase { + m.flashMessage = fmt.Sprintf("Patch for job #%d doesn't apply cleanly - triggering rebase", msg.jobID) + m.flashExpiresAt = time.Now().Add(5 * time.Second) + m.flashView = tuiViewTasks + return m, tea.Batch(m.triggerRebase(msg.jobID), m.fetchFixJobs()) + } else if msg.success && msg.err != nil { + // Patch applied but commit failed + m.flashMessage = fmt.Sprintf("Job #%d: %v", msg.jobID, msg.err) + m.flashExpiresAt = time.Now().Add(5 * time.Second) + m.flashView = tuiViewTasks + } else if msg.err != nil { + m.flashMessage = fmt.Sprintf("Apply failed: %v", msg.err) + m.flashExpiresAt = time.Now().Add(3 * time.Second) + m.flashView = tuiViewTasks + } else { + m.flashMessage = fmt.Sprintf("Patch from job #%d applied and committed", msg.jobID) + m.flashExpiresAt = time.Now().Add(3 * time.Second) + m.flashView = tuiViewTasks + // Refresh tasks list to show updated status, and mark parent addressed + cmds := []tea.Cmd{m.fetchFixJobs()} + if msg.parentJobID > 0 { + cmds = append(cmds, m.markParentAddressed(msg.parentJobID)) + } + return m, tea.Batch(cmds...) + } } return m, nil @@ -2037,6 +2171,15 @@ func (m tuiModel) View() string { if m.currentView == tuiViewTail { return m.renderTailView() } + if m.currentView == tuiViewTasks { + return m.renderTasksView() + } + if m.currentView == tuiViewFixPrompt { + return m.renderFixPromptView() + } + if m.currentView == tuiViewPatch { + return m.renderPatchView() + } if m.currentView == tuiViewPrompt && m.currentReview != nil { return m.renderPromptView() } @@ -2516,8 +2659,8 @@ func (m tuiModel) renderReviewView() string { b.WriteString(tuiStatusStyle.Render(locationLine)) b.WriteString("\x1b[K") // Clear to end of line - // Show verdict and addressed status on next line - hasVerdict := review.Job.Verdict != nil && *review.Job.Verdict != "" + // Show verdict and addressed status on next line (skip verdict for fix jobs) + hasVerdict := review.Job.Verdict != nil && *review.Job.Verdict != "" && !review.Job.IsFixJob() if hasVerdict || review.Addressed { b.WriteString("\n") if hasVerdict { @@ -2599,7 +2742,7 @@ func (m tuiModel) renderReviewView() string { // headerHeight = title + location line + status line (1) + help + verdict/addressed (0|1) headerHeight := titleLines + locationLines + 1 + helpLines - hasVerdict := review.Job != nil && review.Job.Verdict != nil && *review.Job.Verdict != "" + hasVerdict := review.Job != nil && review.Job.Verdict != nil && *review.Job.Verdict != "" && !review.Job.IsFixJob() if hasVerdict || review.Addressed { headerHeight++ // Add 1 for verdict/addressed line } @@ -3135,6 +3278,8 @@ func helpLines() []string { {"y", "Copy review to clipboard"}, {"x", "Cancel running/queued job"}, {"r", "Re-run completed/failed job"}, + {"F", "Trigger fix for selected review"}, + {"T", "Open Tasks view"}, }, }, { @@ -3179,6 +3324,17 @@ func helpLines() []string { {"esc/q", "Back to queue"}, }, }, + { + group: "Tasks View", + keys: []struct{ key, desc string }{ + {"↑/↓", "Navigate fix jobs"}, + {"A", "Apply patch from completed fix"}, + {"R", "Re-trigger fix (rebase)"}, + {"t", "Tail running fix job output"}, + {"x", "Cancel running/queued fix job"}, + {"esc/T", "Back to queue"}, + }, + }, { group: "General", keys: []struct{ key, desc string }{ @@ -3284,3 +3440,516 @@ func tuiCmd() *cobra.Command { return cmd } + +// renderTasksView renders the background fix tasks list. +func (m tuiModel) renderTasksView() string { + var b strings.Builder + + b.WriteString(tuiTitleStyle.Render("roborev tasks (background fixes)")) + b.WriteString("\x1b[K\n") + + // Help overlay + if m.fixShowHelp { + return m.renderTasksHelpOverlay(&b) + } + + if len(m.fixJobs) == 0 { + b.WriteString("\n No fix tasks. Press F on a review to trigger a background fix.\n") + b.WriteString("\n") + b.WriteString(tuiHelpStyle.Render("T: back to queue | F: fix review | q: quit")) + b.WriteString("\x1b[K\x1b[J") + return b.String() + } + + // Column layout: status, job, parent are fixed; ref and subject split remaining space. + const statusW = 8 // "canceled" is the longest + const idW = 5 // "#" + 4-digit number + const parentW = 11 // "fixes #NNNN" + fixedW := 2 + statusW + 1 + idW + 1 + parentW + 1 + 1 // prefix + inter-column spaces + flexW := max(m.width-fixedW, 15) + // Ref gets 25% of flexible space, subject gets 75% + refW := max(7, flexW*25/100) + subjectW := max(5, flexW-refW-1) + + // Header + header := fmt.Sprintf(" %-*s %-*s %-*s %-*s %s", + statusW, "Status", idW, "Job", parentW, "Parent", refW, "Ref", "Subject") + b.WriteString(tuiStatusStyle.Render(header)) + b.WriteString("\x1b[K\n") + b.WriteString(" " + strings.Repeat("-", min(m.width-4, 200))) + b.WriteString("\x1b[K\n") + + // Render each fix job + visibleRows := m.height - 7 // title + header + separator + help + padding + visibleRows = max(visibleRows, 1) + startIdx := 0 + if m.fixSelectedIdx >= visibleRows { + startIdx = m.fixSelectedIdx - visibleRows + 1 + } + + for i := startIdx; i < len(m.fixJobs) && i < startIdx+visibleRows; i++ { + job := m.fixJobs[i] + + // Status label + var statusLabel string + var statusStyle lipgloss.Style + switch job.Status { + case storage.JobStatusQueued: + statusLabel = "queued" + statusStyle = tuiQueuedStyle + case storage.JobStatusRunning: + statusLabel = "running" + statusStyle = tuiRunningStyle + case storage.JobStatusDone: + statusLabel = "ready" + statusStyle = tuiDoneStyle + case storage.JobStatusFailed: + statusLabel = "failed" + statusStyle = tuiFailedStyle + case storage.JobStatusCanceled: + statusLabel = "canceled" + statusStyle = tuiCanceledStyle + case storage.JobStatusApplied: + statusLabel = "applied" + statusStyle = tuiDoneStyle + case storage.JobStatusRebased: + statusLabel = "rebased" + statusStyle = tuiCanceledStyle + } + + parentRef := "" + if job.ParentJobID != nil { + parentRef = fmt.Sprintf("fixes #%d", *job.ParentJobID) + } + ref := job.GitRef + if len(ref) > refW { + ref = ref[:max(1, refW-3)] + "..." + } + subject := truncateString(job.CommitSubject, subjectW) + + if i == m.fixSelectedIdx { + line := fmt.Sprintf(" %-*s #%-4d %-*s %-*s %s", + statusW, statusLabel, job.ID, parentW, parentRef, refW, ref, subject) + b.WriteString(tuiSelectedStyle.Render(line)) + } else { + styledStatus := statusStyle.Render(fmt.Sprintf("%-*s", statusW, statusLabel)) + rest := fmt.Sprintf(" #%-4d %-*s %-*s %s", + job.ID, parentW, parentRef, refW, ref, subject) + b.WriteString(" " + styledStatus + rest) + } + b.WriteString("\x1b[K\n") + } + + // Flash message + if m.flashMessage != "" && time.Now().Before(m.flashExpiresAt) && m.flashView == tuiViewTasks { + flashStyle := lipgloss.NewStyle().Foreground(lipgloss.AdaptiveColor{Light: "28", Dark: "46"}) + b.WriteString(flashStyle.Render(m.flashMessage)) + } + b.WriteString("\x1b[K\n") + + // Help + b.WriteString(tuiHelpStyle.Render("enter: view | p: patch | A: apply | t: tail | x: cancel | r: refresh | ?: help | T/esc: back")) + b.WriteString("\x1b[K\x1b[J") + + return b.String() +} + +func (m tuiModel) renderTasksHelpOverlay(b *strings.Builder) string { + help := []string{ + "", + " Task Status", + " queued Waiting for a worker to pick up the job", + " running Agent is working in an isolated worktree", + " ready Patch captured and ready to apply to your working tree", + " failed Agent failed (press enter or t to see error details)", + " applied Patch was applied and committed to your working tree", + " canceled Job was canceled by user", + "", + " Keybindings", + " enter/t View review output (ready) or error (failed) or tail (running)", + " p View the patch diff for a ready job", + " A Apply patch from a ready job to your working tree", + " R Re-run fix against current HEAD (when patch is stale)", + " F Trigger a new fix from a review (from queue view)", + " x Cancel a queued or running job", + " r Refresh the task list", + " T/esc Return to the main queue view", + " ? Toggle this help", + "", + " Workflow", + " 1. Press F on a failing review to trigger a background fix", + " 2. The agent runs in an isolated worktree (your files are untouched)", + " 3. When status shows 'ready', press A to apply and commit the patch", + " 4. If the patch is stale (code changed since), press R to re-run", + "", + } + for _, line := range help { + b.WriteString(line) + b.WriteString("\x1b[K\n") + } + b.WriteString(tuiHelpStyle.Render("?: close help")) + b.WriteString("\x1b[K\x1b[J") + return b.String() +} + +// fetchPatch fetches the patch for a fix job from the daemon. +func (m tuiModel) fetchPatch(jobID int64) tea.Cmd { + return func() tea.Msg { + url := m.serverAddr + fmt.Sprintf("/api/job/patch?job_id=%d", jobID) + resp, err := m.client.Get(url) + if err != nil { + return tuiPatchMsg{jobID: jobID, err: err} + } + defer resp.Body.Close() + if resp.StatusCode != http.StatusOK { + return tuiPatchMsg{jobID: jobID, err: fmt.Errorf("no patch available (HTTP %d)", resp.StatusCode)} + } + data, err := io.ReadAll(resp.Body) + if err != nil { + return tuiPatchMsg{jobID: jobID, err: err} + } + return tuiPatchMsg{jobID: jobID, patch: string(data)} + } +} + +func (m tuiModel) renderPatchView() string { + var b strings.Builder + + b.WriteString(tuiTitleStyle.Render(fmt.Sprintf("patch for fix job #%d", m.patchJobID))) + b.WriteString("\x1b[K\n") + + if m.patchText == "" { + b.WriteString("\n No patch available.\n") + } else { + lines := strings.Split(m.patchText, "\n") + visibleRows := max(m.height-4, 1) + maxScroll := max(len(lines)-visibleRows, 0) + start := max(min(m.patchScroll, maxScroll), 0) + end := min(start+visibleRows, len(lines)) + + addStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("34")) // green + delStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("160")) // red + hdrStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("33")) // blue + metaStyle := lipgloss.NewStyle().Foreground(lipgloss.Color("245")) // gray + + for _, line := range lines[start:end] { + display := line + switch { + case strings.HasPrefix(line, "+") && !strings.HasPrefix(line, "+++"): + display = addStyle.Render(line) + case strings.HasPrefix(line, "-") && !strings.HasPrefix(line, "---"): + display = delStyle.Render(line) + case strings.HasPrefix(line, "@@"): + display = hdrStyle.Render(line) + case strings.HasPrefix(line, "diff ") || strings.HasPrefix(line, "index ") || + strings.HasPrefix(line, "---") || strings.HasPrefix(line, "+++"): + display = metaStyle.Render(line) + } + b.WriteString(" " + display) + b.WriteString("\x1b[K\n") + } + + if len(lines) > visibleRows { + pct := 0 + if maxScroll > 0 { + pct = start * 100 / maxScroll + } + b.WriteString(tuiHelpStyle.Render(fmt.Sprintf(" [%d%%]", pct))) + b.WriteString("\x1b[K\n") + } + } + + b.WriteString(tuiHelpStyle.Render("j/k/up/down: scroll | esc: back to tasks")) + b.WriteString("\x1b[K\x1b[J") + return b.String() +} + +// renderFixPromptView renders the fix prompt confirmation modal. +func (m tuiModel) renderFixPromptView() string { + var b strings.Builder + + b.WriteString(tuiTitleStyle.Render(fmt.Sprintf("Fix Review #%d", m.fixPromptJobID))) + b.WriteString("\x1b[K\n\n") + + b.WriteString(" A background fix agent will address the review findings.\n") + b.WriteString(" Optionally enter custom instructions (or press enter for default):\n\n") + + // Show prompt input + promptDisplay := m.fixPromptText + if promptDisplay == "" { + promptDisplay = "(default: fix all findings from the review)" + } + fmt.Fprintf(&b, " > %s_\n", promptDisplay) + b.WriteString("\n") + + b.WriteString(tuiHelpStyle.Render("enter: start fix | esc: cancel")) + b.WriteString("\x1b[K\x1b[J") + + return b.String() +} + +// fetchJobByID fetches a single job by ID from the daemon API. +func (m tuiModel) fetchJobByID(jobID int64) (*storage.ReviewJob, error) { + var result struct { + Jobs []storage.ReviewJob `json:"jobs"` + } + if err := m.getJSON(fmt.Sprintf("/api/jobs?id=%d", jobID), &result); err != nil { + return nil, err + } + for i := range result.Jobs { + if result.Jobs[i].ID == jobID { + return &result.Jobs[i], nil + } + } + return nil, fmt.Errorf("job %d not found", jobID) +} + +// hasActiveFixJobs returns true if any fix jobs are queued or running. +func (m tuiModel) hasActiveFixJobs() bool { + for _, j := range m.fixJobs { + if j.Status == storage.JobStatusQueued || j.Status == storage.JobStatusRunning { + return true + } + } + return false +} + +// fetchFixJobs fetches fix jobs from the daemon. +func (m tuiModel) fetchFixJobs() tea.Cmd { + return func() tea.Msg { + var result struct { + Jobs []storage.ReviewJob `json:"jobs"` + } + err := m.getJSON("/api/jobs?job_type=fix&limit=200", &result) + if err != nil { + return tuiFixJobsMsg{err: err} + } + return tuiFixJobsMsg{jobs: result.Jobs} + } +} + +// triggerFix triggers a background fix job for a parent review. +func (m tuiModel) triggerFix(parentJobID int64, prompt string) tea.Cmd { + return func() tea.Msg { + req := map[string]any{ + "parent_job_id": parentJobID, + } + if prompt != "" { + req["prompt"] = prompt + } + var job storage.ReviewJob + err := m.postJSON("/api/job/fix", req, &job) + if err != nil { + return tuiFixTriggerResultMsg{err: err} + } + return tuiFixTriggerResultMsg{job: &job} + } +} + +// applyFixPatch fetches and applies the patch for a completed fix job. +func (m tuiModel) applyFixPatch(jobID int64) tea.Cmd { + return func() tea.Msg { + // Fetch the patch + url := m.serverAddr + fmt.Sprintf("/api/job/patch?job_id=%d", jobID) + resp, err := m.client.Get(url) + if err != nil { + return tuiApplyPatchResultMsg{jobID: jobID, err: err} + } + defer resp.Body.Close() + + if resp.StatusCode != http.StatusOK { + return tuiApplyPatchResultMsg{jobID: jobID, err: fmt.Errorf("no patch available")} + } + + patchData, err := io.ReadAll(resp.Body) + if err != nil { + return tuiApplyPatchResultMsg{jobID: jobID, err: err} + } + patch := string(patchData) + if patch == "" { + return tuiApplyPatchResultMsg{jobID: jobID, err: fmt.Errorf("empty patch")} + } + + // Fetch the job to find repo path + jobDetail, jErr := m.fetchJobByID(jobID) + if jErr != nil { + return tuiApplyPatchResultMsg{jobID: jobID, err: jErr} + } + + // Check for uncommitted changes in files the patch touches + patchedFiles, pfErr := patchFiles(patch) + if pfErr != nil { + return tuiApplyPatchResultMsg{jobID: jobID, err: pfErr} + } + dirty, dirtyErr := dirtyPatchFiles(jobDetail.RepoPath, patchedFiles) + if dirtyErr != nil { + return tuiApplyPatchResultMsg{jobID: jobID, + err: fmt.Errorf("checking dirty files: %w", dirtyErr)} + } + if len(dirty) > 0 { + return tuiApplyPatchResultMsg{jobID: jobID, + err: fmt.Errorf("uncommitted changes in patch files: %s — stash or commit first", strings.Join(dirty, ", "))} + } + + // Dry-run check — only trigger rebase on actual merge conflicts + if err := worktree.CheckPatch(jobDetail.RepoPath, patch); err != nil { + var conflictErr *worktree.PatchConflictError + if errors.As(err, &conflictErr) { + return tuiApplyPatchResultMsg{jobID: jobID, rebase: true, err: err} + } + return tuiApplyPatchResultMsg{jobID: jobID, err: err} + } + + // Apply the patch + if err := worktree.ApplyPatch(jobDetail.RepoPath, patch); err != nil { + return tuiApplyPatchResultMsg{jobID: jobID, err: err} + } + + var parentJobID int64 + if jobDetail.ParentJobID != nil { + parentJobID = *jobDetail.ParentJobID + } + + // Stage and commit + commitMsg := fmt.Sprintf("fix: apply roborev fix job #%d", jobID) + if parentJobID > 0 { + ref := jobDetail.GitRef + if len(ref) > 7 { + ref = ref[:7] + } + commitMsg = fmt.Sprintf("fix: apply roborev fix for %s (job #%d)", ref, jobID) + } + if err := commitPatch(jobDetail.RepoPath, patch, commitMsg); err != nil { + return tuiApplyPatchResultMsg{jobID: jobID, parentJobID: parentJobID, success: true, + err: fmt.Errorf("patch applied but commit failed: %w", err)} + } + + // Mark the fix job as applied on the server + if err := m.postJSON("/api/job/applied", map[string]any{"job_id": jobID}, nil); err != nil { + return tuiApplyPatchResultMsg{jobID: jobID, parentJobID: parentJobID, success: true, + err: fmt.Errorf("patch applied and committed but failed to mark applied: %w", err)} + } + + return tuiApplyPatchResultMsg{jobID: jobID, parentJobID: parentJobID, success: true} + } +} + +// commitPatch stages only the files touched by patch and commits them. +func commitPatch(repoPath, patch, message string) error { + files, err := patchFiles(patch) + if err != nil { + return err + } + if len(files) == 0 { + return fmt.Errorf("no files found in patch") + } + args := append([]string{"-C", repoPath, "add", "--"}, files...) + addCmd := exec.Command("git", args...) + addCmd.Env = append(os.Environ(), "GIT_LITERAL_PATHSPECS=1") + if out, err := addCmd.CombinedOutput(); err != nil { + return fmt.Errorf("git add: %w: %s", err, out) + } + commitArgs := append( + []string{"-C", repoPath, "commit", "--only", "-m", message, "--"}, + files..., + ) + commitCmd := exec.Command("git", commitArgs...) + commitCmd.Env = append(os.Environ(), "GIT_LITERAL_PATHSPECS=1") + if out, err := commitCmd.CombinedOutput(); err != nil { + return fmt.Errorf("git commit: %w: %s", err, out) + } + return nil +} + +// dirtyPatchFiles returns the subset of files that have uncommitted changes. +func dirtyPatchFiles(repoPath string, files []string) ([]string, error) { + // git diff --name-only shows unstaged changes; --cached shows staged + cmd := exec.Command("git", "-C", repoPath, "diff", "--name-only", "HEAD", "--") + out, err := cmd.Output() + if err != nil { + return nil, fmt.Errorf("git diff: %w", err) + } + dirty := map[string]bool{} + for line := range strings.SplitSeq(strings.TrimSpace(string(out)), "\n") { + if line != "" { + dirty[line] = true + } + } + var overlap []string + for _, f := range files { + if dirty[f] { + overlap = append(overlap, f) + } + } + return overlap, nil +} + +// patchFiles extracts the list of file paths touched by a unified diff. +func patchFiles(patch string) ([]string, error) { + fileDiffs, err := godiff.ParseMultiFileDiff([]byte(patch)) + if err != nil { + return nil, fmt.Errorf("parse patch: %w", err) + } + seen := map[string]bool{} + addFile := func(name, prefix string) { + name = strings.TrimPrefix(name, prefix) + if name != "" && name != "/dev/null" { + seen[name] = true + } + } + for _, fd := range fileDiffs { + addFile(fd.OrigName, "a/") // old path (stages deletion for renames) + addFile(fd.NewName, "b/") // new path (stages addition for renames) + } + files := make([]string, 0, len(seen)) + for f := range seen { + files = append(files, f) + } + return files, nil +} + +// triggerRebase triggers a new fix job that re-applies a stale patch to the current HEAD. +// The server looks up the stale patch from the DB, avoiding large client-to-server transfers. +func (m tuiModel) triggerRebase(staleJobID int64) tea.Cmd { + return func() tea.Msg { + // Find the parent job ID (the original review this fix was for) + staleJob, fetchErr := m.fetchJobByID(staleJobID) + if fetchErr != nil { + return tuiFixTriggerResultMsg{err: fmt.Errorf("stale job %d not found: %w", staleJobID, fetchErr)} + } + + // Use the original parent job ID if this was already a fix job + parentJobID := staleJobID + if staleJob.ParentJobID != nil { + parentJobID = *staleJob.ParentJobID + } + + // Let the server build the rebase prompt from the stale job's patch + req := map[string]any{ + "parent_job_id": parentJobID, + "stale_job_id": staleJobID, + } + var newJob storage.ReviewJob + if err := m.postJSON("/api/job/fix", req, &newJob); err != nil { + return tuiFixTriggerResultMsg{err: fmt.Errorf("trigger rebase: %w", err)} + } + // Mark the stale job as rebased now that the new job exists. + // Skip if already rebased (e.g. retry via R on a rebased job). + var warning string + if staleJob.Status != storage.JobStatusRebased { + if err := m.postJSON( + "/api/job/rebased", + map[string]any{"job_id": staleJobID}, + nil, + ); err != nil { + warning = fmt.Sprintf( + "rebase job #%d enqueued but failed to mark #%d as rebased: %v", + newJob.ID, staleJobID, err, + ) + } + } + return tuiFixTriggerResultMsg{job: &newJob, warning: warning} + } +} + +// truncateString is defined in fix.go diff --git a/cmd/roborev/tui_handlers.go b/cmd/roborev/tui_handlers.go index 56c8cbf3..d8ac5fb5 100644 --- a/cmd/roborev/tui_handlers.go +++ b/cmd/roborev/tui_handlers.go @@ -21,6 +21,12 @@ func (m tuiModel) handleKeyMsg(msg tea.KeyMsg) (tea.Model, tea.Cmd) { return m.handleFilterKey(msg) case tuiViewTail: return m.handleTailKey(msg) + case tuiViewFixPrompt: + return m.handleFixPromptKey(msg) + case tuiViewTasks: + return m.handleTasksKey(msg) + case tuiViewPatch: + return m.handlePatchKey(msg) } // Global keys shared across queue/review/prompt/commitMsg/help views @@ -375,17 +381,27 @@ func (m tuiModel) handleGlobalKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { return m.handleHelpKey() case "esc": return m.handleEscKey() + case "F": + return m.handleFixKey() + case "T": + return m.handleToggleTasksKey() } return m, nil } func (m tuiModel) handleQuitKey() (tea.Model, tea.Cmd) { if m.currentView == tuiViewReview { - m.currentView = tuiViewQueue + returnTo := m.reviewFromView + if returnTo == 0 { + returnTo = tuiViewQueue + } + m.currentView = returnTo m.currentReview = nil m.reviewScroll = 0 m.paginateNav = 0 - m.normalizeSelectionIfHidden() + if returnTo == tuiViewQueue { + m.normalizeSelectionIfHidden() + } return m, nil } if m.currentView == tuiViewPrompt { @@ -694,6 +710,7 @@ func (m tuiModel) handleEnterKey() (tea.Model, tea.Cmd) { job := m.jobs[m.selectedIdx] switch job.Status { case storage.JobStatusDone: + m.reviewFromView = tuiViewQueue return m, m.fetchReview(job.ID) case storage.JobStatusFailed: m.currentBranch = "" @@ -702,6 +719,7 @@ func (m tuiModel) handleEnterKey() (tea.Model, tea.Cmd) { Output: "Job failed:\n\n" + job.Error, Job: &job, } + m.reviewFromView = tuiViewQueue m.currentView = tuiViewReview m.reviewScroll = 0 return m, nil @@ -853,6 +871,21 @@ func (m tuiModel) handleRerunKey() (tea.Model, tea.Cmd) { } func (m tuiModel) handleTailKey2() (tea.Model, tea.Cmd) { + // From prompt view: allow tailing the running job being viewed + if m.currentView == tuiViewPrompt && m.currentReview != nil && m.currentReview.Job != nil { + job := m.currentReview.Job + if job.Status == storage.JobStatusRunning { + m.tailJobID = job.ID + m.tailLines = nil + m.tailScroll = 0 + m.tailStreaming = true + m.tailFollow = true + m.tailFromView = m.reviewFromView + m.currentView = tuiViewTail + return m, tea.Batch(tea.ClearScreen, m.fetchTailOutput(job.ID)) + } + } + if m.currentView != tuiViewQueue || len(m.jobs) == 0 || m.selectedIdx < 0 || m.selectedIdx >= len(m.jobs) { return m, nil } @@ -1034,14 +1067,20 @@ func (m tuiModel) handleEscKey() (tea.Model, tea.Cmd) { m.loadingJobs = true return m, m.fetchJobs() } else if m.currentView == tuiViewReview { - m.currentView = tuiViewQueue + returnTo := m.reviewFromView + if returnTo == 0 { + returnTo = tuiViewQueue + } + m.currentView = returnTo m.currentReview = nil m.reviewScroll = 0 m.paginateNav = 0 - m.normalizeSelectionIfHidden() - if m.hideAddressed && !m.loadingJobs { - m.loadingJobs = true - return m, m.fetchJobs() + if returnTo == tuiViewQueue { + m.normalizeSelectionIfHidden() + if m.hideAddressed && !m.loadingJobs { + m.loadingJobs = true + return m, m.fetchJobs() + } } } else if m.currentView == tuiViewPrompt { m.paginateNav = 0 @@ -1332,6 +1371,279 @@ func (m tuiModel) handleReconnectMsg(msg tuiReconnectMsg) (tea.Model, tea.Cmd) { return m, nil } +// handleFixKey opens the fix prompt modal for the currently selected job. +func (m tuiModel) handleFixKey() (tea.Model, tea.Cmd) { + if m.currentView != tuiViewQueue && m.currentView != tuiViewReview { + return m, nil + } + + // Get the selected job + var job storage.ReviewJob + if m.currentView == tuiViewReview { + if m.currentReview == nil || m.currentReview.Job == nil { + return m, nil + } + job = *m.currentReview.Job + } else { + if len(m.jobs) == 0 || m.selectedIdx < 0 || m.selectedIdx >= len(m.jobs) { + return m, nil + } + job = m.jobs[m.selectedIdx] + } + + // Only allow fix on completed review jobs (not fix jobs — + // fix-of-fix chains are not supported). + if job.IsFixJob() { + m.flashMessage = "Cannot fix a fix job" + m.flashExpiresAt = time.Now().Add(2 * time.Second) + m.flashView = m.currentView + return m, nil + } + if job.Status != storage.JobStatusDone { + m.flashMessage = "Can only fix completed reviews" + m.flashExpiresAt = time.Now().Add(2 * time.Second) + m.flashView = m.currentView + return m, nil + } + + // Open fix prompt modal + m.fixPromptJobID = job.ID + m.fixPromptText = "" // Empty means use default prompt from server + m.fixPromptFromView = m.currentView + m.currentView = tuiViewFixPrompt + return m, nil +} + +// handleToggleTasksKey switches between queue and tasks view. +func (m tuiModel) handleToggleTasksKey() (tea.Model, tea.Cmd) { + if m.currentView == tuiViewTasks { + m.currentView = tuiViewQueue + return m, nil + } + if m.currentView == tuiViewQueue { + m.currentView = tuiViewTasks + return m, m.fetchFixJobs() + } + return m, nil +} + +// handleFixPromptKey handles key input in the fix prompt confirmation modal. +func (m tuiModel) handleFixPromptKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { + switch msg.String() { + case "ctrl+c": + return m, tea.Quit + case "esc": + m.currentView = m.fixPromptFromView + m.fixPromptText = "" + m.fixPromptJobID = 0 + return m, nil + case "enter": + jobID := m.fixPromptJobID + prompt := m.fixPromptText + m.currentView = tuiViewTasks + m.fixPromptText = "" + m.fixPromptJobID = 0 + return m, m.triggerFix(jobID, prompt) + case "backspace": + if len(m.fixPromptText) > 0 { + runes := []rune(m.fixPromptText) + m.fixPromptText = string(runes[:len(runes)-1]) + } + return m, nil + default: + if len(msg.Runes) > 0 { + for _, r := range msg.Runes { + if unicode.IsPrint(r) || r == '\n' || r == '\t' { + m.fixPromptText += string(r) + } + } + } + return m, nil + } +} + +// handleTasksKey handles key input in the tasks view. +func (m tuiModel) handleTasksKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { + switch msg.String() { + case "ctrl+c", "q": + return m, tea.Quit + case "esc", "T": + m.currentView = tuiViewQueue + return m, nil + case "up", "k": + if m.fixSelectedIdx > 0 { + m.fixSelectedIdx-- + } + return m, nil + case "down", "j": + if m.fixSelectedIdx < len(m.fixJobs)-1 { + m.fixSelectedIdx++ + } + return m, nil + case "enter": + // View task: prompt for running, review for done/applied, error for failed + if len(m.fixJobs) > 0 && m.fixSelectedIdx < len(m.fixJobs) { + job := m.fixJobs[m.fixSelectedIdx] + switch { + case job.Status == storage.JobStatusRunning: + if job.Prompt != "" { + m.currentReview = &storage.Review{ + Agent: job.Agent, + Prompt: job.Prompt, + Job: &job, + } + m.reviewFromView = tuiViewTasks + m.currentView = tuiViewPrompt + m.promptScroll = 0 + m.promptFromQueue = false + return m, nil + } + // No prompt yet, go straight to tail + m.tailJobID = job.ID + m.tailLines = nil + m.tailScroll = 0 + m.tailStreaming = true + m.tailFollow = true + m.tailFromView = tuiViewTasks + m.currentView = tuiViewTail + return m, tea.Batch(tea.ClearScreen, m.fetchTailOutput(job.ID)) + case job.HasViewableOutput(): + m.selectedJobID = job.ID + m.reviewFromView = tuiViewTasks + return m, m.fetchReview(job.ID) + case job.Status == storage.JobStatusFailed: + errMsg := job.Error + if errMsg == "" { + errMsg = "unknown error" + } + m.flashMessage = fmt.Sprintf("Job #%d failed: %s", job.ID, errMsg) + m.flashExpiresAt = time.Now().Add(5 * time.Second) + m.flashView = tuiViewTasks + } + } + return m, nil + case "t": + // Tail output: live for running jobs, review for done, error for failed + if len(m.fixJobs) > 0 && m.fixSelectedIdx < len(m.fixJobs) { + job := m.fixJobs[m.fixSelectedIdx] + switch { + case job.Status == storage.JobStatusRunning: + m.tailJobID = job.ID + m.tailLines = nil + m.tailScroll = 0 + m.tailStreaming = true + m.tailFollow = true + m.tailFromView = tuiViewTasks + m.currentView = tuiViewTail + return m, tea.Batch(tea.ClearScreen, m.fetchTailOutput(job.ID)) + case job.HasViewableOutput(): + m.selectedJobID = job.ID + m.reviewFromView = tuiViewTasks + return m, m.fetchReview(job.ID) + case job.Status == storage.JobStatusFailed: + errMsg := job.Error + if errMsg == "" { + errMsg = "unknown error" + } + m.flashMessage = fmt.Sprintf("Job #%d failed: %s", job.ID, errMsg) + m.flashExpiresAt = time.Now().Add(5 * time.Second) + m.flashView = tuiViewTasks + } + } + return m, nil + case "A": + // Apply patch (handled in Phase 5) + if len(m.fixJobs) > 0 && m.fixSelectedIdx < len(m.fixJobs) { + job := m.fixJobs[m.fixSelectedIdx] + if job.Status == storage.JobStatusDone { + return m, m.applyFixPatch(job.ID) + } + } + return m, nil + case "R": + // Manually trigger rebase for a completed or rebased fix job + if len(m.fixJobs) > 0 && m.fixSelectedIdx < len(m.fixJobs) { + job := m.fixJobs[m.fixSelectedIdx] + if job.Status == storage.JobStatusDone || job.Status == storage.JobStatusRebased { + return m, m.triggerRebase(job.ID) + } + } + return m, nil + case "x": + // Cancel fix job + if len(m.fixJobs) > 0 && m.fixSelectedIdx < len(m.fixJobs) { + job := &m.fixJobs[m.fixSelectedIdx] + if job.Status == storage.JobStatusRunning || job.Status == storage.JobStatusQueued { + oldStatus := job.Status + oldFinishedAt := job.FinishedAt + job.Status = storage.JobStatusCanceled + now := time.Now() + job.FinishedAt = &now + return m, m.cancelJob(job.ID, oldStatus, oldFinishedAt) + } + } + return m, nil + case "p": + // View patch for completed fix jobs + if len(m.fixJobs) > 0 && m.fixSelectedIdx < len(m.fixJobs) { + job := m.fixJobs[m.fixSelectedIdx] + if job.HasViewableOutput() { + return m, m.fetchPatch(job.ID) + } + m.flashMessage = "Patch not yet available" + m.flashExpiresAt = time.Now().Add(2 * time.Second) + m.flashView = m.currentView + } + return m, nil + case "r": + // Refresh + return m, m.fetchFixJobs() + case "?": + m.fixShowHelp = !m.fixShowHelp + return m, nil + } + return m, nil +} + +// handlePatchKey handles key input in the patch viewer. +func (m tuiModel) handlePatchKey(msg tea.KeyMsg) (tea.Model, tea.Cmd) { + switch msg.String() { + case "ctrl+c": + return m, tea.Quit + case "esc", "q": + m.currentView = tuiViewTasks + m.patchText = "" + m.patchScroll = 0 + m.patchJobID = 0 + return m, nil + case "up", "k": + if m.patchScroll > 0 { + m.patchScroll-- + } + return m, nil + case "down", "j": + m.patchScroll++ + return m, nil + case "pgup": + visibleLines := max(m.height-4, 1) + m.patchScroll = max(0, m.patchScroll-visibleLines) + return m, tea.ClearScreen + case "pgdown": + visibleLines := max(m.height-4, 1) + m.patchScroll += visibleLines + return m, tea.ClearScreen + case "home", "g": + m.patchScroll = 0 + return m, nil + case "end", "G": + lines := strings.Split(m.patchText, "\n") + visibleRows := max(m.height-4, 1) + m.patchScroll = max(len(lines)-visibleRows, 0) + return m, nil + } + return m, nil +} + // handleConnectionError tracks consecutive connection errors and triggers reconnection. func (m *tuiModel) handleConnectionError(err error) tea.Cmd { if isConnectionError(err) { diff --git a/cmd/roborev/tui_test.go b/cmd/roborev/tui_test.go index 87fce9b0..3da2af4b 100644 --- a/cmd/roborev/tui_test.go +++ b/cmd/roborev/tui_test.go @@ -1442,3 +1442,190 @@ func TestTUIStatusDisplaysCorrectly(t *testing.T) { } } } + +func TestPatchFiles(t *testing.T) { + tests := []struct { + name string + patch string + want []string + }{ + { + name: "simple add", + patch: `diff --git a/main.go b/main.go +--- a/main.go ++++ b/main.go +@@ -1 +1,2 @@ + package main ++// new line +`, + want: []string{"main.go"}, + }, + { + name: "file in b/ directory not double-stripped", + patch: `diff --git a/b/main.go b/b/main.go +--- a/b/main.go ++++ b/b/main.go +@@ -1 +1,2 @@ + package main ++// new line +`, + want: []string{"b/main.go"}, + }, + { + name: "file in a/ directory not double-stripped", + patch: `diff --git a/a/utils.go b/a/utils.go +--- a/a/utils.go ++++ b/a/utils.go +@@ -1 +1,2 @@ + package a ++// new line +`, + want: []string{"a/utils.go"}, + }, + { + name: "new file with /dev/null", + patch: `diff --git a/new.go b/new.go +--- /dev/null ++++ b/new.go +@@ -0,0 +1 @@ ++package main +`, + want: []string{"new.go"}, + }, + { + name: "deleted file with /dev/null", + patch: `diff --git a/old.go b/old.go +--- a/old.go ++++ /dev/null +@@ -1 +0,0 @@ +-package main +`, + want: []string{"old.go"}, + }, + { + name: "rename", + patch: `diff --git a/old.go b/renamed.go +--- a/old.go ++++ b/renamed.go +@@ -1 +1 @@ +-package old ++package renamed +`, + want: []string{"old.go", "renamed.go"}, + }, + } + + for _, tt := range tests { + t.Run(tt.name, func(t *testing.T) { + got, err := patchFiles(tt.patch) + if err != nil { + t.Fatalf("patchFiles returned error: %v", err) + } + wantSet := map[string]bool{} + for _, f := range tt.want { + wantSet[f] = true + } + if len(got) != len(tt.want) { + t.Errorf("expected %d files, got %d: %v", len(tt.want), len(got), got) + } + gotSet := map[string]bool{} + for _, f := range got { + if gotSet[f] { + t.Errorf("duplicate file in output: %q", f) + } + gotSet[f] = true + } + for f := range wantSet { + if !gotSet[f] { + t.Errorf("missing expected file %q", f) + } + } + for f := range gotSet { + if !wantSet[f] { + t.Errorf("unexpected file %q", f) + } + } + }) + } +} + +func TestDirtyPatchFilesError(t *testing.T) { + // dirtyPatchFiles should return an error when git diff fails + // (e.g., invalid repo path), not silently return nil. + _, err := dirtyPatchFiles("/nonexistent/repo/path", []string{"file.go"}) + if err == nil { + t.Fatal("expected error from dirtyPatchFiles with invalid repo, got nil") + } + if !strings.Contains(err.Error(), "git diff") { + t.Errorf("expected error to mention 'git diff', got: %v", err) + } +} + +func TestTUIFixTriggerResultMsg(t *testing.T) { + t.Run("warning shows flash and triggers refresh", func(t *testing.T) { + m := newTuiModel("http://localhost") + m.currentView = tuiViewTasks + m.width = 80 + m.height = 24 + + msg := tuiFixTriggerResultMsg{ + job: &storage.ReviewJob{ID: 42}, + warning: "rebase job #42 enqueued but failed to mark #10 as rebased: server error", + } + + result, cmd := m.Update(msg) + updated := result.(tuiModel) + + if !strings.Contains(updated.flashMessage, "failed to mark") { + t.Errorf("expected warning in flash, got %q", updated.flashMessage) + } + if updated.flashView != tuiViewTasks { + t.Errorf("expected flash view tasks, got %v", updated.flashView) + } + if cmd == nil { + t.Error("expected refresh cmd, got nil") + } + }) + + t.Run("success shows enqueued flash and triggers refresh", func(t *testing.T) { + m := newTuiModel("http://localhost") + m.currentView = tuiViewTasks + m.width = 80 + m.height = 24 + + msg := tuiFixTriggerResultMsg{ + job: &storage.ReviewJob{ID: 99}, + } + + result, cmd := m.Update(msg) + updated := result.(tuiModel) + + if !strings.Contains(updated.flashMessage, "#99 enqueued") { + t.Errorf("expected enqueued flash, got %q", updated.flashMessage) + } + if cmd == nil { + t.Error("expected refresh cmd, got nil") + } + }) + + t.Run("error shows failure flash with no refresh", func(t *testing.T) { + m := newTuiModel("http://localhost") + m.currentView = tuiViewTasks + m.width = 80 + m.height = 24 + + msg := tuiFixTriggerResultMsg{ + err: fmt.Errorf("connection refused"), + } + + result, cmd := m.Update(msg) + updated := result.(tuiModel) + + if !strings.Contains(updated.flashMessage, "Fix failed") { + t.Errorf("expected failure flash, got %q", updated.flashMessage) + } + if cmd != nil { + t.Error("expected no cmd on error, got non-nil") + } + }) +} diff --git a/docs/plans/2026-02-17-tui-fix.md b/docs/plans/2026-02-17-tui-fix.md new file mode 100644 index 00000000..08c8d823 --- /dev/null +++ b/docs/plans/2026-02-17-tui-fix.md @@ -0,0 +1,84 @@ +# TUI-Triggered Fix via Background Worktrees + +**Status:** Implemented + +**Goal:** Allow users to trigger `roborev fix` from the TUI, running agents in isolated background worktrees and applying patches when ready. + +**Architecture:** New `fix` job type runs in daemon worker pool using temporary worktrees (same pattern as `refine.go`). Agent produces a patch stored in DB. TUI gets a new Tasks view to monitor fix jobs and apply/rebase patches to the working tree. + +**Tech Stack:** Go, SQLite, Bubble Tea TUI, git worktrees + +--- + +## Implementation Summary + +### Commits + +1. `db3b952` - feat: add fix job type with parent_job_id and patch columns +2. `5b8d6f9` - refactor: extract worktree helpers to internal/worktree package +3. `d62cfbc` - feat: add fix job processing with worktree isolation in worker pool +4. `948b452` - feat: add /api/job/fix and /api/job/patch endpoints +5. `ed7644e` - feat: add TUI Tasks view and fix prompt for background fix jobs +6. `6f48ff5` - feat: add automatic rebase flow for stale fix patches +7. `ca4e4f4` - feat: update help text and hint bars for fix/tasks features + +### What Was Built + +**Storage Layer (Phase 1)** +- `JobTypeFix = "fix"` constant in `internal/storage/models.go` +- `ParentJobID` and `Patch` fields on `ReviewJob` struct +- `IsFixJob()` helper, updated `UsesStoredPrompt()` to include fix +- DB migration for `parent_job_id` and `patch` columns +- `SaveJobPatch()` function in `internal/storage/jobs.go` + +**Worktree Package (Phase 2a)** +- Created `internal/worktree/worktree.go` with shared helpers: + - `Create(repoPath)` — creates temp worktree detached at HEAD + - `CapturePatch(worktreeDir)` — stages all changes and returns diff + - `ApplyPatch(repoPath, patch)` — applies a patch to repo + - `CheckPatch(repoPath, patch)` — dry-run check if patch applies cleanly +- Extracted from `cmd/roborev/refine.go`, updated refine to use shared package +- Tests moved to `internal/worktree/worktree_test.go` + +**Worker Pool (Phase 2b)** +- `processJob()` in `internal/daemon/worker.go` creates isolated worktree for fix jobs +- Agent runs in worktree, patch captured after completion and saved to DB + +**API Endpoints (Phase 3)** +- `POST /api/job/fix` — creates background fix job from parent review + - Accepts `parent_job_id` (required) and `prompt` (optional override) + - Builds fix prompt from parent review output if no custom prompt +- `GET /api/job/patch?job_id=N` — returns stored patch as plain text + +**TUI (Phase 4)** +- **Tasks View** (`T` key) — shows fix jobs with status (queued/running/done/failed) +- **Fix Prompt** (`F` key from queue/review) — opens modal to trigger fix for selected review +- **Apply Patch** (`A` key in Tasks view) — applies patch with dry-run check first +- **Rebase** — automatic when `git apply --check` fails: triggers new fix job with stale patch as context +- **Manual Rebase** (`R` key in Tasks view) — manually re-trigger fix with rebase prompt +- **Tail** (`t` key in Tasks view) — tail running fix job output +- **Cancel** (`x` key in Tasks view) — cancel running/queued fix job + +**Help and Hints (Phase 5)** +- Updated `helpLines()` with Tasks View section and F/T in Actions +- Queue hint bar: added `F: fix` and `T: tasks` +- Tasks hint bar: `A: apply | R: rebase | t: tail | x: cancel | r: refresh | T/esc: back` +- Fix prompt hint bar: `enter: start fix | esc: cancel` + +### Flow + +1. User views a review in the TUI, presses `F` to trigger a fix +2. Fix prompt modal shows pre-built prompt, user presses `enter` to confirm +3. Daemon enqueues a `fix` job, runs agent in isolated worktree +4. Agent makes changes, worktree diff captured as patch, stored in DB +5. User presses `T` to see Tasks view, sees fix job complete +6. User presses `A` to apply — dry-run check runs first: + - **Clean:** patch applied to working tree + - **Stale:** automatically triggers new fix job with stale patch as context (rebase) +7. User can press `R` to manually trigger rebase for any completed fix + +### Not Implemented (Future Work) + +- **Refine mode from TUI** — iterative review-fix loop (fix → re-review → fix until pass) +- **Dry-run staleness indicator** — showing green/yellow status on completed fix jobs in Tasks view +- **Patch preview** — rendering the diff in a scroll view before applying diff --git a/e2e_test.go b/e2e_test.go index eaa8ff65..e5305d3d 100644 --- a/e2e_test.go +++ b/e2e_test.go @@ -42,13 +42,15 @@ func TestE2EEnqueueAndReview(t *testing.T) { // Add handlers manually (simulating the server) mux.HandleFunc("/api/status", func(w http.ResponseWriter, r *http.Request) { - queued, running, done, failed, canceled, _ := db.GetJobCounts() + queued, running, done, failed, canceled, applied, rebased, _ := db.GetJobCounts() status := storage.DaemonStatus{ QueuedJobs: queued, RunningJobs: running, CompletedJobs: done, FailedJobs: failed, CanceledJobs: canceled, + AppliedJobs: applied, + RebasedJobs: rebased, MaxWorkers: 4, } w.Header().Set("Content-Type", "application/json") @@ -109,7 +111,7 @@ func TestDatabaseIntegration(t *testing.T) { } // Verify initial state - queued, running, done, _, _, _ := db.GetJobCounts() + queued, running, done, _, _, _, _, _ := db.GetJobCounts() if queued != 1 { t.Errorf("Expected 1 queued job, got %d", queued) } @@ -129,7 +131,7 @@ func TestDatabaseIntegration(t *testing.T) { } // Verify running state - _, running, _, _, _, _ = db.GetJobCounts() + _, running, _, _, _, _, _, _ = db.GetJobCounts() if running != 1 { t.Errorf("Expected 1 running job, got %d", running) } @@ -141,7 +143,7 @@ func TestDatabaseIntegration(t *testing.T) { } // Verify completed state - queued, running, done, _, _, _ = db.GetJobCounts() + queued, running, done, _, _, _, _, _ = db.GetJobCounts() if done != 1 { t.Errorf("Expected 1 completed job, got %d", done) } diff --git a/flake.nix b/flake.nix index 0d9c2b05..95cf3ab5 100644 --- a/flake.nix +++ b/flake.nix @@ -19,7 +19,7 @@ src = ./.; - vendorHash = "sha256-hOnrKcNJ0SHLcVR6iwYEMtcT6PFovJjrDj/QxH+QHnY="; + vendorHash = "sha256-HzQUsFOOIxV0K5LGgMA4P7TWbLw7FT59BAfN9vncMfg="; subPackages = [ "cmd/roborev" ]; diff --git a/go.mod b/go.mod index 326357b3..8d29d721 100644 --- a/go.mod +++ b/go.mod @@ -47,6 +47,7 @@ require ( github.com/ncruces/go-strftime v1.0.0 // indirect github.com/remyoudompheng/bigfft v0.0.0-20230129092748-24d4a6f8daec // indirect github.com/rivo/uniseg v0.4.7 // indirect + github.com/sourcegraph/go-diff v0.7.0 // indirect github.com/spf13/pflag v1.0.9 // indirect github.com/xo/terminfo v0.0.0-20220910002029-abceb7e1c41e // indirect github.com/yuin/goldmark v1.7.8 // indirect diff --git a/go.sum b/go.sum index b70fb42b..e9c862d8 100644 --- a/go.sum +++ b/go.sum @@ -50,6 +50,7 @@ github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f h1:Y/CXytFA4m6 github.com/erikgeiser/coninput v0.0.0-20211004153227-1c3628e74d0f/go.mod h1:vw97MGsxSvLiUE2X8qFplwetxpGLQrlU1Q9AUEIzCaM= github.com/fsnotify/fsnotify v1.9.0 h1:2Ml+OJNzbYCTzsxtv8vKSFD9PbJjmhYF14k/jKC7S9k= github.com/fsnotify/fsnotify v1.9.0/go.mod h1:8jBTzvmWwFyi3Pb8djgCCO5IBqzKJ/Jwo8TRcHyHii0= +github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e h1:ijClszYn+mADRFY17kjQEVQ1XRhq2/JR1M3sGqeJoxs= github.com/google/pprof v0.0.0-20250317173921-a4b03ec1a45e/go.mod h1:boTsfXsheKC2y+lKOCMpSfarhxDeIzfZG1jqGcPl3cA= github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= @@ -100,6 +101,10 @@ github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJ github.com/rivo/uniseg v0.4.7 h1:WUdvkW8uEhrYfLC4ZzdpI2ztxP1I582+49Oc5Mq64VQ= github.com/rivo/uniseg v0.4.7/go.mod h1:FN3SvrM+Zdj16jyLfmOkMNblXMcoc8DfTHruCPUcx88= github.com/russross/blackfriday/v2 v2.1.0/go.mod h1:+Rmxgy9KzJVeS9/2gXHxylqXiyQDYRxCVz55jmeOWTM= +github.com/shurcooL/go v0.0.0-20180423040247-9e1955d9fb6e/go.mod h1:TDJrrUr11Vxrven61rcy3hJMUqaf/CLWYhHNPmT14Lk= +github.com/shurcooL/go-goon v0.0.0-20170922171312-37c2f522c041/go.mod h1:N5mDOmsrJOB+vfqUK+7DmDyjhSLIIBnXo9lvZJj3MWQ= +github.com/sourcegraph/go-diff v0.7.0 h1:9uLlrd5T46OXs5qpp8L/MTltk0zikUGi0sNNyCpA8G0= +github.com/sourcegraph/go-diff v0.7.0/go.mod h1:iBszgVvyxdc8SFZ7gm69go2KDdt3ag071iBaWPF6cjs= github.com/spf13/cobra v1.10.2 h1:DMTTonx5m65Ic0GOoRY2c16WCbHxOOw6xxezuLaBpcU= github.com/spf13/cobra v1.10.2/go.mod h1:7C1pvHqHw5A4vrJfjNwvOdzYu0Gml16OCs2GRiTUUS4= github.com/spf13/pflag v1.0.9 h1:9exaQaMOCwffKiiiYk6/BndUBv+iRViNW+4lEMi0PvY= @@ -135,6 +140,7 @@ golang.org/x/text v0.34.0 h1:oL/Qq0Kdaqxa1KbNeMKwQq0reLCCaFtqu2eNuSeNHbk= golang.org/x/text v0.34.0/go.mod h1:homfLqTYRFyVYemLBFl5GgL/DWEiH5wcsQ5gSh1yziA= golang.org/x/tools v0.41.0 h1:a9b8iMweWG+S0OBnlU36rzLp20z1Rp10w+IY2czHTQc= golang.org/x/tools v0.41.0/go.mod h1:XSY6eDqxVNiYgezAVqqCeihT4j1U2CCsqvH3WhQpnlg= +golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= diff --git a/internal/agent/claude.go b/internal/agent/claude.go index bbc998e2..2175d395 100644 --- a/internal/agent/claude.go +++ b/internal/agent/claude.go @@ -166,10 +166,24 @@ func (a *ClaudeAgent) Review(ctx context.Context, repoPath, commitSHA, prompt st result, err := parseStreamJSON(stdoutPipe, output) if waitErr := cmd.Wait(); waitErr != nil { + // Build a detailed error including any partial output and stream errors + var detail strings.Builder + fmt.Fprintf(&detail, "%s failed", a.Name()) if err != nil { - return "", fmt.Errorf("claude failed: %w (parse error: %v)\nstderr: %s", waitErr, err, stderr.String()) + fmt.Fprintf(&detail, "\nstream: %v", err) } - return "", fmt.Errorf("claude failed: %w\nstderr: %s", waitErr, stderr.String()) + if s := stderr.String(); s != "" { + fmt.Fprintf(&detail, "\nstderr: %s", s) + } + if result != "" { + // Truncate partial output to keep error messages readable + partial := result + if len(partial) > 500 { + partial = partial[:500] + "..." + } + fmt.Fprintf(&detail, "\npartial output: %s", partial) + } + return "", fmt.Errorf("%s: %w", detail.String(), waitErr) } if err != nil { @@ -187,15 +201,21 @@ type claudeStreamMessage struct { Content string `json:"content,omitempty"` } `json:"message,omitempty"` Result string `json:"result,omitempty"` + Error struct { + Message string `json:"message,omitempty"` + } `json:"error,omitempty"` } // parseStreamJSON parses Claude's stream-json output and extracts the final result. // Uses bufio.Reader.ReadString to read lines without buffer size limits. +// On success, returns (result, nil). On failure, returns (partialOutput, error) +// where partialOutput contains any assistant messages collected before the error. func parseStreamJSON(r io.Reader, output io.Writer) (string, error) { br := bufio.NewReader(r) var lastResult string var assistantMessages []string + var errorMessages []string var validEventsParsed bool for { @@ -225,6 +245,11 @@ func parseStreamJSON(r io.Reader, output io.Writer) (string, error) { if msg.Type == "result" && msg.Result != "" { lastResult = msg.Result } + + // Capture error events from Claude Code + if msg.Type == "error" && msg.Error.Message != "" { + errorMessages = append(errorMessages, msg.Error.Message) + } } // Skip malformed JSON lines silently } @@ -239,6 +264,14 @@ func parseStreamJSON(r io.Reader, output io.Writer) (string, error) { return "", fmt.Errorf("no valid stream-json events parsed from output") } + // Build partial output for error context + partial := strings.Join(assistantMessages, "\n") + + // If error events were received but we got no result, report them with any partial output + if len(errorMessages) > 0 && lastResult == "" { + return partial, fmt.Errorf("stream errors: %s", strings.Join(errorMessages, "; ")) + } + // Prefer the result field if present, otherwise join assistant messages if lastResult != "" { return lastResult, nil diff --git a/internal/daemon/server.go b/internal/daemon/server.go index 05862182..3c299505 100644 --- a/internal/daemon/server.go +++ b/internal/daemon/server.go @@ -89,6 +89,10 @@ func NewServer(db *storage.DB, cfg *config.Config, configPath string) *Server { mux.HandleFunc("/api/remap", s.handleRemap) mux.HandleFunc("/api/sync/now", s.handleSyncNow) mux.HandleFunc("/api/sync/status", s.handleSyncStatus) + mux.HandleFunc("/api/job/fix", s.handleFixJob) + mux.HandleFunc("/api/job/patch", s.handleGetPatch) + mux.HandleFunc("/api/job/applied", s.handleMarkJobApplied) + mux.HandleFunc("/api/job/rebased", s.handleMarkJobRebased) s.httpServer = &http.Server{ Addr: cfg.ServerAddr, @@ -725,6 +729,7 @@ func (s *Server) handleListJobs(w http.ResponseWriter, r *http.Request) { writeError(w, http.StatusInternalServerError, fmt.Sprintf("database error: %v", err)) return } + job.Patch = nil // Patch is only served via /api/job/patch writeJSON(w, map[string]any{ "jobs": []storage.ReviewJob{*job}, "has_more": false, @@ -784,6 +789,12 @@ func (s *Server) handleListJobs(w http.ResponseWriter, r *http.Request) { if addrStr := r.URL.Query().Get("addressed"); addrStr == "true" || addrStr == "false" { listOpts = append(listOpts, storage.WithAddressed(addrStr == "true")) } + if jobType := r.URL.Query().Get("job_type"); jobType != "" { + listOpts = append(listOpts, storage.WithJobType(jobType)) + } + if exJobType := r.URL.Query().Get("exclude_job_type"); exJobType != "" { + listOpts = append(listOpts, storage.WithExcludeJobType(exJobType)) + } jobs, err := s.db.ListJobs(status, repo, fetchLimit, offset, listOpts...) if err != nil { @@ -1316,7 +1327,7 @@ func (s *Server) handleStatus(w http.ResponseWriter, r *http.Request) { return } - queued, running, done, failed, canceled, err := s.db.GetJobCounts() + queued, running, done, failed, canceled, applied, rebased, err := s.db.GetJobCounts() if err != nil { s.writeInternalError(w, fmt.Sprintf("get counts: %v", err)) return @@ -1336,6 +1347,8 @@ func (s *Server) handleStatus(w http.ResponseWriter, r *http.Request) { CompletedJobs: done, FailedJobs: failed, CanceledJobs: canceled, + AppliedJobs: applied, + RebasedJobs: rebased, ActiveWorkers: s.workerPool.ActiveWorkers(), MaxWorkers: s.workerPool.MaxWorkers(), MachineID: s.getMachineID(), @@ -1624,6 +1637,249 @@ func (s *Server) handleHealth(w http.ResponseWriter, r *http.Request) { writeJSON(w, status) } +// handleFixJob creates a background fix job for a review. +// It fetches the parent review, builds a fix prompt, and enqueues a new +// fix job that will run in an isolated worktree. +func (s *Server) handleFixJob(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + writeError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + r.Body = http.MaxBytesReader(w, r.Body, 50<<20) // 50MB limit + var req struct { + ParentJobID int64 `json:"parent_job_id"` + Prompt string `json:"prompt,omitempty"` // Optional custom prompt override + StaleJobID int64 `json:"stale_job_id,omitempty"` // Optional: server looks up patch from this job for rebase + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid request body") + return + } + if req.ParentJobID == 0 { + writeError(w, http.StatusBadRequest, "parent_job_id is required") + return + } + + // Fetch the parent job — must be a review (not a fix job) + parentJob, err := s.db.GetJobByID(req.ParentJobID) + if err != nil { + writeError(w, http.StatusNotFound, "parent job not found") + return + } + if parentJob.IsFixJob() { + writeError(w, http.StatusBadRequest, "parent job must be a review, not a fix job") + return + } + + // Build the fix prompt + fixPrompt := req.Prompt + if fixPrompt == "" && req.StaleJobID > 0 { + // Server-side rebase: look up stale patch from DB and build rebase prompt + staleJob, err := s.db.GetJobByID(req.StaleJobID) + if err != nil { + writeError(w, http.StatusNotFound, "stale job not found") + return + } + if staleJob.JobType != storage.JobTypeFix { + writeError(w, http.StatusBadRequest, "stale job is not a fix job") + return + } + if staleJob.RepoID != parentJob.RepoID { + writeError(w, http.StatusBadRequest, "stale job belongs to a different repo") + return + } + // Verify stale job is linked to the same parent + if staleJob.ParentJobID == nil || *staleJob.ParentJobID != req.ParentJobID { + writeError(w, http.StatusBadRequest, "stale job is not linked to the specified parent") + return + } + // Require terminal status with a usable patch + switch staleJob.Status { + case storage.JobStatusDone, storage.JobStatusApplied, storage.JobStatusRebased: + // OK + default: + writeError(w, http.StatusBadRequest, "stale job is not in a terminal state") + return + } + if staleJob.Patch == nil || *staleJob.Patch == "" { + writeError(w, http.StatusBadRequest, "stale job has no patch to rebase from") + return + } + fixPrompt = buildRebasePrompt(staleJob.Patch) + } + if fixPrompt == "" { + // Fetch the review output for the parent job + review, err := s.db.GetReviewByJobID(req.ParentJobID) + if err != nil || review == nil { + writeError(w, http.StatusBadRequest, "parent job has no review to fix") + return + } + fixPrompt = buildFixPrompt(review.Output) + } + + // Resolve agent for fix workflow + cfg := s.configWatcher.Config() + reasoning := "standard" + agentName := config.ResolveAgentForWorkflow("", parentJob.RepoPath, cfg, "fix", reasoning) + if resolved, err := agent.GetAvailable(agentName); err != nil { + writeError(w, http.StatusServiceUnavailable, fmt.Sprintf("no agent available: %v", err)) + return + } else { + agentName = resolved.Name() + } + model := config.ResolveModelForWorkflow("", parentJob.RepoPath, cfg, "fix", reasoning) + + // Enqueue the fix job + job, err := s.db.EnqueueJob(storage.EnqueueOpts{ + RepoID: parentJob.RepoID, + GitRef: parentJob.GitRef, + Branch: parentJob.Branch, + Agent: agentName, + Model: model, + Reasoning: reasoning, + Prompt: fixPrompt, + Agentic: true, + Label: fmt.Sprintf("fix #%d", req.ParentJobID), + JobType: storage.JobTypeFix, + ParentJobID: req.ParentJobID, + }) + if err != nil { + s.writeInternalError(w, fmt.Sprintf("enqueue fix job: %v", err)) + return + } + + writeJSONWithStatus(w, http.StatusCreated, job) +} + +// handleGetPatch returns the stored patch for a completed fix job. +func (s *Server) handleGetPatch(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodGet { + writeError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + jobIDStr := r.URL.Query().Get("job_id") + if jobIDStr == "" { + writeError(w, http.StatusBadRequest, "job_id parameter required") + return + } + + jobID, err := strconv.ParseInt(jobIDStr, 10, 64) + if err != nil { + writeError(w, http.StatusBadRequest, "invalid job_id") + return + } + + job, err := s.db.GetJobByID(jobID) + if err != nil { + writeError(w, http.StatusNotFound, "job not found") + return + } + + if !job.HasViewableOutput() || job.Patch == nil { + writeError(w, http.StatusNotFound, "no patch available for this job") + return + } + + w.Header().Set("Content-Type", "text/plain") + w.WriteHeader(http.StatusOK) + _, _ = w.Write([]byte(*job.Patch)) +} + +func (s *Server) handleMarkJobApplied(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + writeError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + var req struct { + JobID int64 `json:"job_id"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid request body") + return + } + if req.JobID == 0 { + writeError(w, http.StatusBadRequest, "job_id is required") + return + } + + if err := s.db.MarkJobApplied(req.JobID); err != nil { + if errors.Is(err, sql.ErrNoRows) { + writeError(w, http.StatusNotFound, "job not found or not in done state") + return + } + writeError(w, http.StatusInternalServerError, fmt.Sprintf("mark applied: %v", err)) + return + } + + writeJSON(w, map[string]string{"status": "applied"}) +} + +func (s *Server) handleMarkJobRebased(w http.ResponseWriter, r *http.Request) { + if r.Method != http.MethodPost { + writeError(w, http.StatusMethodNotAllowed, "method not allowed") + return + } + + var req struct { + JobID int64 `json:"job_id"` + } + if err := json.NewDecoder(r.Body).Decode(&req); err != nil { + writeError(w, http.StatusBadRequest, "invalid request body") + return + } + if req.JobID == 0 { + writeError(w, http.StatusBadRequest, "job_id is required") + return + } + + if err := s.db.MarkJobRebased(req.JobID); err != nil { + if errors.Is(err, sql.ErrNoRows) { + writeError(w, http.StatusNotFound, "job not found or not in done state") + return + } + writeError(w, http.StatusInternalServerError, fmt.Sprintf("mark rebased: %v", err)) + return + } + + writeJSON(w, map[string]string{"status": "rebased"}) +} + +// buildFixPrompt constructs a prompt for fixing review findings. +func buildFixPrompt(reviewOutput string) string { + return "# Fix Request\n\n" + + "An analysis was performed and produced the following findings:\n\n" + + "## Analysis Findings\n\n" + + reviewOutput + + "\n\n## Instructions\n\n" + + "Please apply the suggested changes from the analysis above. " + + "Make the necessary edits to address each finding. " + + "Focus on the highest priority items first.\n\n" + + "After making changes:\n" + + "1. Verify the code still compiles/passes linting\n" + + "2. Run any relevant tests to ensure nothing is broken\n" + + "3. Stage the changes with git add but do NOT commit — the changes will be captured as a patch\n" +} + +// buildRebasePrompt constructs a prompt for re-applying a stale patch to current HEAD. +func buildRebasePrompt(stalePatch *string) string { + prompt := "# Rebase Fix Request\n\n" + + "A previous fix attempt produced a patch that no longer applies cleanly to the current HEAD.\n" + + "Your task is to achieve the same changes but adapted to the current state of the code.\n\n" + if stalePatch != nil && *stalePatch != "" { + prompt += "## Previous Patch (stale)\n\n`````diff\n" + *stalePatch + "\n`````\n\n" + } + prompt += "## Instructions\n\n" + + "1. Review the intent of the previous patch\n" + + "2. Apply equivalent changes to the current codebase\n" + + "3. Resolve any conflicts with recent changes\n" + + "4. Verify the code compiles and tests pass\n" + + "5. Stage the changes with git add but do NOT commit\n" + return prompt +} + // formatDuration formats a duration in human-readable form (e.g., "2h 15m") func formatDuration(d time.Duration) string { d = d.Round(time.Second) diff --git a/internal/daemon/server_test.go b/internal/daemon/server_test.go index 6ddb8904..6aeba17e 100644 --- a/internal/daemon/server_test.go +++ b/internal/daemon/server_test.go @@ -1737,7 +1737,7 @@ func TestHandleEnqueueExcludedBranch(t *testing.T) { } // Verify no job was created - queued, _, _, _, _, _ := db.GetJobCounts() + queued, _, _, _, _, _, _, _ := db.GetJobCounts() if queued != 0 { t.Errorf("Expected 0 queued jobs, got %d", queued) } @@ -1762,7 +1762,7 @@ func TestHandleEnqueueExcludedBranch(t *testing.T) { } // Verify job was created - queued, _, _, _, _, _ := db.GetJobCounts() + queued, _, _, _, _, _, _, _ := db.GetJobCounts() if queued != 1 { t.Errorf("Expected 1 queued job, got %d", queued) } @@ -3461,3 +3461,332 @@ func TestHandleListCommentsJobIDParsing(t *testing.T) { }) } } + +func TestHandleListJobsJobTypeFilter(t *testing.T) { + server, db, tmpDir := newTestServer(t) + + repoDir := filepath.Join(tmpDir, "repo-jt") + testutil.InitTestGitRepo(t, repoDir) + repo, _ := db.GetOrCreateRepo(repoDir) + commit, _ := db.GetOrCreateCommit( + repo.ID, "jt-abc", "Author", "Subject", time.Now(), + ) + + // Create a review job + reviewJob, _ := db.EnqueueJob(storage.EnqueueOpts{ + RepoID: repo.ID, + CommitID: commit.ID, + GitRef: "jt-abc", + Agent: "test", + }) + + // Create a fix job parented to the review + db.EnqueueJob(storage.EnqueueOpts{ + RepoID: repo.ID, + CommitID: commit.ID, + GitRef: "jt-abc", + Agent: "test", + JobType: storage.JobTypeFix, + ParentJobID: reviewJob.ID, + }) + + t.Run("job_type=fix returns only fix jobs", func(t *testing.T) { + req := httptest.NewRequest( + http.MethodGet, "/api/jobs?job_type=fix", nil, + ) + w := httptest.NewRecorder() + server.handleListJobs(w, req) + + if w.Code != http.StatusOK { + t.Fatalf("Expected 200, got %d: %s", w.Code, w.Body.String()) + } + + var resp struct { + Jobs []storage.ReviewJob `json:"jobs"` + } + testutil.DecodeJSON(t, w, &resp) + + if len(resp.Jobs) != 1 { + t.Fatalf("Expected 1 fix job, got %d", len(resp.Jobs)) + } + if resp.Jobs[0].JobType != storage.JobTypeFix { + t.Errorf( + "Expected job_type 'fix', got %q", resp.Jobs[0].JobType, + ) + } + }) + + t.Run("no job_type returns all jobs", func(t *testing.T) { + req := httptest.NewRequest( + http.MethodGet, "/api/jobs", nil, + ) + w := httptest.NewRecorder() + server.handleListJobs(w, req) + + if w.Code != http.StatusOK { + t.Fatalf("Expected 200, got %d: %s", w.Code, w.Body.String()) + } + + var resp struct { + Jobs []storage.ReviewJob `json:"jobs"` + } + testutil.DecodeJSON(t, w, &resp) + + if len(resp.Jobs) != 2 { + t.Errorf("Expected 2 jobs total, got %d", len(resp.Jobs)) + } + }) + + t.Run("exclude_job_type=fix returns only non-fix jobs", func(t *testing.T) { + req := httptest.NewRequest( + http.MethodGet, "/api/jobs?exclude_job_type=fix", nil, + ) + w := httptest.NewRecorder() + server.handleListJobs(w, req) + + if w.Code != http.StatusOK { + t.Fatalf("Expected 200, got %d: %s", w.Code, w.Body.String()) + } + + var resp struct { + Jobs []storage.ReviewJob `json:"jobs"` + } + testutil.DecodeJSON(t, w, &resp) + + if len(resp.Jobs) != 1 { + t.Fatalf("Expected 1 non-fix job, got %d", len(resp.Jobs)) + } + if resp.Jobs[0].JobType == storage.JobTypeFix { + t.Error("Expected non-fix job, got fix") + } + }) +} + +func TestHandleFixJobStaleValidation(t *testing.T) { + server, db, tmpDir := newTestServer(t) + + repoDir := filepath.Join(tmpDir, "repo-fix-val") + testutil.InitTestGitRepo(t, repoDir) + repo, _ := db.GetOrCreateRepo(repoDir) + commit, _ := db.GetOrCreateCommit( + repo.ID, "fix-val-abc", "Author", "Subject", time.Now(), + ) + + // Create a review job and complete it with output + reviewJob, _ := db.EnqueueJob(storage.EnqueueOpts{ + RepoID: repo.ID, + CommitID: commit.ID, + GitRef: "fix-val-abc", + Agent: "test", + }) + db.ClaimJob("w1") + db.CompleteJob(reviewJob.ID, "test", "prompt", "FAIL: issues found") + + t.Run("fix job as parent is rejected", func(t *testing.T) { + // Create a fix job and try to use it as a parent + fixJob, _ := db.EnqueueJob(storage.EnqueueOpts{ + RepoID: repo.ID, + CommitID: commit.ID, + GitRef: "fix-val-abc", + Agent: "test", + JobType: storage.JobTypeFix, + ParentJobID: reviewJob.ID, + }) + db.ClaimJob("w-fix-parent") + db.CompleteJob(fixJob.ID, "test", "prompt", "done") + + body := map[string]any{ + "parent_job_id": fixJob.ID, + } + req := testutil.MakeJSONRequest( + t, http.MethodPost, "/api/job/fix", body, + ) + w := httptest.NewRecorder() + server.handleFixJob(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf( + "Expected 400 for fix-job parent, got %d: %s", + w.Code, w.Body.String(), + ) + } + }) + + t.Run("stale job that is not a fix job is rejected", func(t *testing.T) { + // reviewJob is a review, not a fix job + body := map[string]any{ + "parent_job_id": reviewJob.ID, + "stale_job_id": reviewJob.ID, + } + req := testutil.MakeJSONRequest( + t, http.MethodPost, "/api/job/fix", body, + ) + w := httptest.NewRecorder() + server.handleFixJob(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf("Expected 400 for non-fix stale job, got %d: %s", + w.Code, w.Body.String()) + } + }) + + t.Run("stale job with wrong parent is rejected", func(t *testing.T) { + // Create a second review + fix in the SAME repo, linked to the other review + review2, _ := db.EnqueueJob(storage.EnqueueOpts{ + RepoID: repo.ID, + CommitID: commit.ID, + GitRef: "fix-val-def", + Agent: "test", + }) + db.ClaimJob("w3") + db.CompleteJob(review2.ID, "test", "prompt", "FAIL: other issues") + + wrongParentFix, _ := db.EnqueueJob(storage.EnqueueOpts{ + RepoID: repo.ID, + CommitID: commit.ID, + GitRef: "fix-val-def", + Agent: "test", + JobType: storage.JobTypeFix, + ParentJobID: review2.ID, + }) + // Complete it so it has terminal status + patch + db.ClaimJob("w4") + db.CompleteJob(wrongParentFix.ID, "test", "prompt", "done") + db.SaveJobPatch(wrongParentFix.ID, "--- a/f\n+++ b/f\n") + + body := map[string]any{ + "parent_job_id": reviewJob.ID, + "stale_job_id": wrongParentFix.ID, + } + req := testutil.MakeJSONRequest( + t, http.MethodPost, "/api/job/fix", body, + ) + w := httptest.NewRecorder() + server.handleFixJob(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf("Expected 400 for wrong-parent stale job, got %d: %s", + w.Code, w.Body.String()) + } + }) + + t.Run("stale job without patch is rejected", func(t *testing.T) { + // Create a fix job linked to reviewJob but with no patch + noPatchFix, _ := db.EnqueueJob(storage.EnqueueOpts{ + RepoID: repo.ID, + CommitID: commit.ID, + GitRef: "fix-val-abc", + Agent: "test", + JobType: storage.JobTypeFix, + ParentJobID: reviewJob.ID, + }) + // Complete it (terminal status) but don't set a patch + db.ClaimJob("w5") + db.CompleteJob(noPatchFix.ID, "test", "prompt", "done but no diff") + + body := map[string]any{ + "parent_job_id": reviewJob.ID, + "stale_job_id": noPatchFix.ID, + } + req := testutil.MakeJSONRequest( + t, http.MethodPost, "/api/job/fix", body, + ) + w := httptest.NewRecorder() + server.handleFixJob(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf("Expected 400 for patchless stale job, got %d: %s", + w.Code, w.Body.String()) + } + }) + + t.Run("non-terminal stale job statuses are rejected", func(t *testing.T) { + for _, status := range []storage.JobStatus{ + storage.JobStatusQueued, + storage.JobStatusRunning, + storage.JobStatusFailed, + storage.JobStatusCanceled, + } { + t.Run(string(status), func(t *testing.T) { + fixJob, _ := db.EnqueueJob(storage.EnqueueOpts{ + RepoID: repo.ID, + CommitID: commit.ID, + GitRef: "fix-val-abc", + Agent: "test", + JobType: storage.JobTypeFix, + ParentJobID: reviewJob.ID, + }) + // Move to desired status + switch status { + case storage.JobStatusRunning: + db.ClaimJob("w-term") + case storage.JobStatusFailed: + db.ClaimJob("w-term") + db.FailJob(fixJob.ID, "w-term", "broke") + case storage.JobStatusCanceled: + db.CancelJob(fixJob.ID) + } + // queued needs no extra action + + body := map[string]any{ + "parent_job_id": reviewJob.ID, + "stale_job_id": fixJob.ID, + } + req := testutil.MakeJSONRequest( + t, http.MethodPost, "/api/job/fix", body, + ) + w := httptest.NewRecorder() + server.handleFixJob(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf( + "Expected 400 for %s stale job, got %d: %s", + status, w.Code, w.Body.String(), + ) + } + }) + } + }) + + t.Run("stale job from different repo is rejected", func(t *testing.T) { + // Create a fix job in a different repo + repo2Dir := filepath.Join(tmpDir, "repo-fix-val-2") + testutil.InitTestGitRepo(t, repo2Dir) + repo2, _ := db.GetOrCreateRepo(repo2Dir) + commit2, _ := db.GetOrCreateCommit( + repo2.ID, "other-sha", "Author", "Subject", time.Now(), + ) + otherReview, _ := db.EnqueueJob(storage.EnqueueOpts{ + RepoID: repo2.ID, + CommitID: commit2.ID, + GitRef: "other-sha", + Agent: "test", + }) + db.ClaimJob("w2") + db.CompleteJob(otherReview.ID, "test", "prompt", "FAIL") + + otherFix, _ := db.EnqueueJob(storage.EnqueueOpts{ + RepoID: repo2.ID, + CommitID: commit2.ID, + GitRef: "other-sha", + Agent: "test", + JobType: storage.JobTypeFix, + ParentJobID: otherReview.ID, + }) + + body := map[string]any{ + "parent_job_id": reviewJob.ID, + "stale_job_id": otherFix.ID, + } + req := testutil.MakeJSONRequest( + t, http.MethodPost, "/api/job/fix", body, + ) + w := httptest.NewRecorder() + server.handleFixJob(w, req) + + if w.Code != http.StatusBadRequest { + t.Errorf("Expected 400 for cross-repo stale job, got %d: %s", + w.Code, w.Body.String()) + } + }) +} diff --git a/internal/daemon/worker.go b/internal/daemon/worker.go index 022d366d..e244b4ff 100644 --- a/internal/daemon/worker.go +++ b/internal/daemon/worker.go @@ -13,6 +13,7 @@ import ( "github.com/roborev-dev/roborev/internal/config" "github.com/roborev-dev/roborev/internal/prompt" "github.com/roborev-dev/roborev/internal/storage" + "github.com/roborev-dev/roborev/internal/worktree" ) // WorkerPool manages a pool of review workers @@ -66,8 +67,8 @@ func NewWorkerPool(db *storage.DB, cfgGetter ConfigGetter, numWorkers int, broad func (wp *WorkerPool) Start() { log.Printf("Starting worker pool with %d workers", wp.numWorkers) + wp.wg.Add(wp.numWorkers) for i := 0; i < wp.numWorkers; i++ { - wp.wg.Add(1) go wp.worker(i) } } @@ -300,7 +301,7 @@ func (wp *WorkerPool) processJob(workerID string, job *storage.ReviewJob) { // Build the prompt (or use pre-stored prompt for task/compact jobs) var reviewPrompt string var err error - if job.IsPromptJob() && job.Prompt != "" { + if job.UsesStoredPrompt() && job.Prompt != "" { // Prompt-native job (task, compact) — prepend agent-specific preamble preamble := prompt.GetSystemPrompt(job.Agent, "run") if preamble != "" { @@ -308,7 +309,7 @@ func (wp *WorkerPool) processJob(workerID string, job *storage.ReviewJob) { } else { reviewPrompt = job.Prompt } - } else if job.IsPromptJob() { + } else if job.UsesStoredPrompt() { // Prompt-native job (task/compact) with missing prompt — likely a // daemon version mismatch or storage issue. Fail clearly instead // of trying to build a prompt from a non-git label. @@ -373,9 +374,26 @@ func (wp *WorkerPool) processJob(workerID string, job *storage.ReviewJob) { wp.outputBuffers.CloseJob(job.ID) }() + // For fix jobs, create an isolated worktree to run the agent in. + // The agent modifies files in the worktree; afterwards we capture the diff as a patch. + reviewRepoPath := job.RepoPath + var fixWorktree *worktree.Worktree + if job.IsFixJob() { + wt, wtErr := worktree.Create(job.RepoPath) + if wtErr != nil { + log.Printf("[%s] Error creating worktree for fix job %d: %v", workerID, job.ID, wtErr) + wp.failOrRetry(workerID, job, agentName, fmt.Sprintf("create worktree: %v", wtErr)) + return + } + defer wt.Close() + fixWorktree = wt + reviewRepoPath = wt.Dir + log.Printf("[%s] Fix job %d: running agent in worktree %s", workerID, job.ID, wt.Dir) + } + // Run the review log.Printf("[%s] Running %s review...", workerID, agentName) - output, err := a.Review(ctx, job.RepoPath, job.GitRef, reviewPrompt, outputWriter) + output, err := a.Review(ctx, reviewRepoPath, job.GitRef, reviewPrompt, outputWriter) if err != nil { // Check if this was a cancellation if ctx.Err() == context.Canceled { @@ -397,6 +415,25 @@ func (wp *WorkerPool) processJob(workerID string, job *storage.ReviewJob) { return } + // For fix jobs, capture the patch from the worktree. Patch capture + // failures are fatal — a fix job without a patch is useless. + var fixPatch string + if job.IsFixJob() { + var patchErr error + fixPatch, patchErr = fixWorktree.CapturePatch() + if patchErr != nil { + log.Printf("[%s] Fix job %d: patch capture failed: %v", workerID, job.ID, patchErr) + wp.failOrRetry(workerID, job, agentName, fmt.Sprintf("patch capture: %v", patchErr)) + return + } + if fixPatch == "" { + log.Printf("[%s] Fix job %d: agent produced no file changes", workerID, job.ID) + wp.failOrRetry(workerID, job, agentName, "agent produced no file changes") + return + } + log.Printf("[%s] Fix job %d: captured patch (%d bytes)", workerID, job.ID, len(fixPatch)) + } + // For compact jobs, validate raw agent output before storing. // Invalid output (empty, error patterns) should fail the job, // not produce a "done" review that misleads --wait callers. @@ -407,9 +444,14 @@ func (wp *WorkerPool) processJob(workerID string, job *storage.ReviewJob) { } // Store the result (use actual agent name, not requested). - // CompleteJob is a no-op (returns nil) if the job was canceled - // between agent finish and now. - if err := wp.db.CompleteJob(job.ID, agentName, reviewPrompt, output); err != nil { + // CompleteJob/CompleteFixJob is a no-op (returns nil) if the job was + // canceled between agent finish and now. + if job.IsFixJob() { + if err := wp.db.CompleteFixJob(job.ID, agentName, reviewPrompt, output, fixPatch); err != nil { + log.Printf("[%s] Error storing fix review: %v", workerID, err) + return + } + } else if err := wp.db.CompleteJob(job.ID, agentName, reviewPrompt, output); err != nil { log.Printf("[%s] Error storing review: %v", workerID, err) return } diff --git a/internal/storage/db.go b/internal/storage/db.go index a44ee8cb..f009fc6f 100644 --- a/internal/storage/db.go +++ b/internal/storage/db.go @@ -40,7 +40,7 @@ CREATE TABLE IF NOT EXISTS review_jobs ( agent TEXT NOT NULL DEFAULT 'codex', model TEXT, reasoning TEXT NOT NULL DEFAULT 'thorough', - status TEXT NOT NULL CHECK(status IN ('queued','running','done','failed','canceled')) DEFAULT 'queued', + status TEXT NOT NULL CHECK(status IN ('queued','running','done','failed','canceled','applied','rebased')) DEFAULT 'queued', enqueued_at TEXT NOT NULL DEFAULT (datetime('now')), started_at TEXT, finished_at TEXT, @@ -312,7 +312,7 @@ func (db *DB) migrate() error { agent TEXT NOT NULL DEFAULT 'codex', model TEXT, reasoning TEXT NOT NULL DEFAULT 'thorough', - status TEXT NOT NULL CHECK(status IN ('queued','running','done','failed','canceled')) DEFAULT 'queued', + status TEXT NOT NULL CHECK(status IN ('queued','running','done','failed','canceled','applied','rebased')) DEFAULT 'queued', enqueued_at TEXT NOT NULL DEFAULT (datetime('now')), started_at TEXT, finished_at TEXT, @@ -430,6 +430,18 @@ func (db *DB) migrate() error { return fmt.Errorf("create branch index: %w", err) } + // Migration: update CHECK constraint to include 'applied' and 'rebased' statuses + // Re-read the table SQL since the previous migration may have rebuilt it + err = db.QueryRow(`SELECT sql FROM sqlite_master WHERE type='table' AND name='review_jobs'`).Scan(&tableSql) + if err != nil { + return fmt.Errorf("check review_jobs schema for applied/rebased: %w", err) + } + if !strings.Contains(tableSql, "'applied'") { + if err := db.migrateJobStatusConstraint(); err != nil { + return fmt.Errorf("migrate job status constraint: %w", err) + } + } + // Migration: make commit_id nullable in responses table (for job-based responses) // Check if commit_id is NOT NULL by examining the schema var responsesSql string @@ -602,6 +614,30 @@ func (db *DB) migrate() error { return fmt.Errorf("create idx_reviews_addressed: %w", err) } + // Migration: add parent_job_id column to review_jobs if missing + err = db.QueryRow(`SELECT COUNT(*) FROM pragma_table_info('review_jobs') WHERE name = 'parent_job_id'`).Scan(&count) + if err != nil { + return fmt.Errorf("check parent_job_id column: %w", err) + } + if count == 0 { + _, err = db.Exec(`ALTER TABLE review_jobs ADD COLUMN parent_job_id INTEGER`) + if err != nil { + return fmt.Errorf("add parent_job_id column: %w", err) + } + } + + // Migration: add patch column to review_jobs if missing + err = db.QueryRow(`SELECT COUNT(*) FROM pragma_table_info('review_jobs') WHERE name = 'patch'`).Scan(&count) + if err != nil { + return fmt.Errorf("check patch column: %w", err) + } + if count == 0 { + _, err = db.Exec(`ALTER TABLE review_jobs ADD COLUMN patch TEXT`) + if err != nil { + return fmt.Errorf("add patch column: %w", err) + } + } + // Run sync-related migrations if err := db.migrateSyncColumns(); err != nil { return err @@ -662,6 +698,110 @@ func (db *DB) hasUniqueIndexOnShaOnly() (bool, error) { return false, rows.Err() } +// migrateJobStatusConstraint rebuilds the review_jobs table to update the +// CHECK constraint to include 'applied' and 'rebased' statuses. +func (db *DB) migrateJobStatusConstraint() error { + ctx := context.Background() + conn, err := db.Conn(ctx) + if err != nil { + return fmt.Errorf("get connection: %w", err) + } + defer conn.Close() + + if _, err := conn.ExecContext(ctx, `PRAGMA foreign_keys = OFF`); err != nil { + return fmt.Errorf("disable foreign keys: %w", err) + } + defer func() { _, _ = conn.ExecContext(ctx, `PRAGMA foreign_keys = ON`) }() + + tx, err := conn.BeginTx(ctx, nil) + if err != nil { + return err + } + defer func() { + if err := tx.Rollback(); err != nil && err != sql.ErrTxDone { + return + } + }() + + // Read existing columns dynamically + rows, err := tx.Query(`SELECT name FROM pragma_table_info('review_jobs')`) + if err != nil { + return fmt.Errorf("read columns: %w", err) + } + var cols []string + for rows.Next() { + var name string + if err := rows.Scan(&name); err != nil { + rows.Close() + return err + } + cols = append(cols, name) + } + rows.Close() + + // Read the current CREATE TABLE SQL and replace the old constraint + var origSQL string + if err := tx.QueryRow(`SELECT sql FROM sqlite_master WHERE type='table' AND name='review_jobs'`).Scan(&origSQL); err != nil { + return err + } + + // Replace old constraint with new one including applied and rebased + newSQL := strings.Replace(origSQL, + "CHECK(status IN ('queued','running','done','failed','canceled'))", + "CHECK(status IN ('queued','running','done','failed','canceled','applied','rebased'))", + 1) + // Handle the table name for the temp table + newSQL = strings.Replace(newSQL, "CREATE TABLE review_jobs", "CREATE TABLE review_jobs_new", 1) + + if _, err := tx.Exec(newSQL); err != nil { + return fmt.Errorf("create new table: %w", err) + } + + colList := strings.Join(cols, ", ") + if _, err := tx.Exec(fmt.Sprintf(`INSERT INTO review_jobs_new (%s) SELECT %s FROM review_jobs`, colList, colList)); err != nil { + return fmt.Errorf("copy data: %w", err) + } + + if _, err := tx.Exec(`DROP TABLE review_jobs`); err != nil { + return fmt.Errorf("drop old table: %w", err) + } + + if _, err := tx.Exec(`ALTER TABLE review_jobs_new RENAME TO review_jobs`); err != nil { + return fmt.Errorf("rename table: %w", err) + } + + // Recreate indexes + for _, idx := range []string{ + `CREATE INDEX IF NOT EXISTS idx_review_jobs_status ON review_jobs(status)`, + `CREATE INDEX IF NOT EXISTS idx_review_jobs_repo ON review_jobs(repo_id)`, + `CREATE INDEX IF NOT EXISTS idx_review_jobs_git_ref ON review_jobs(git_ref)`, + `CREATE INDEX IF NOT EXISTS idx_review_jobs_branch ON review_jobs(branch)`, + `CREATE UNIQUE INDEX IF NOT EXISTS idx_review_jobs_uuid ON review_jobs(uuid)`, + } { + if _, err := tx.Exec(idx); err != nil { + return fmt.Errorf("recreate index: %w", err) + } + } + + if err := tx.Commit(); err != nil { + return err + } + + if _, err := conn.ExecContext(ctx, `PRAGMA foreign_keys = ON`); err != nil { + return fmt.Errorf("re-enable foreign keys: %w", err) + } + + checkRows, err := conn.QueryContext(ctx, `PRAGMA foreign_key_check`) + if err != nil { + return fmt.Errorf("foreign key check: %w", err) + } + defer checkRows.Close() + if checkRows.Next() { + return fmt.Errorf("foreign key violations after migration") + } + return checkRows.Err() +} + // migrateSyncColumns adds columns needed for PostgreSQL sync functionality. // These migrations are idempotent - they check if columns exist before adding. func (db *DB) migrateSyncColumns() error { diff --git a/internal/storage/db_test.go b/internal/storage/db_test.go index 00619137..069ca147 100644 --- a/internal/storage/db_test.go +++ b/internal/storage/db_test.go @@ -695,7 +695,7 @@ func TestJobCounts(t *testing.T) { db.FailJob(claimed2.ID, "", "err") } - queued, running, done, failed, _, err := db.GetJobCounts() + queued, running, done, failed, _, _, _, err := db.GetJobCounts() if err != nil { t.Fatalf("GetJobCounts failed: %v", err) } @@ -1212,7 +1212,7 @@ func TestCancelJob(t *testing.T) { _, _, job := createJobChain(t, db, "/tmp/test-repo", "cancel-count") db.CancelJob(job.ID) - _, _, _, _, canceled, err := db.GetJobCounts() + _, _, _, _, canceled, _, _, err := db.GetJobCounts() if err != nil { t.Fatalf("GetJobCounts failed: %v", err) } @@ -1222,6 +1222,106 @@ func TestCancelJob(t *testing.T) { }) } +func TestMarkJobApplied(t *testing.T) { + db := openTestDB(t) + defer db.Close() + + repo, _ := db.GetOrCreateRepo("/tmp/test-repo") + commit, _ := db.GetOrCreateCommit(repo.ID, "applied-test", "A", "S", time.Now()) + + t.Run("mark done fix job as applied", func(t *testing.T) { + job, _ := db.EnqueueJob(EnqueueOpts{RepoID: repo.ID, CommitID: commit.ID, GitRef: "applied-test", Agent: "codex", JobType: JobTypeFix, ParentJobID: 1}) + db.ClaimJob("worker-1") + db.CompleteJob(job.ID, "codex", "prompt", "output") + + err := db.MarkJobApplied(job.ID) + if err != nil { + t.Fatalf("MarkJobApplied failed: %v", err) + } + + updated, _ := db.GetJobByID(job.ID) + if updated.Status != JobStatusApplied { + t.Errorf("Expected status 'applied', got '%s'", updated.Status) + } + }) + + t.Run("mark non-done job fails", func(t *testing.T) { + job, _ := db.EnqueueJob(EnqueueOpts{RepoID: repo.ID, CommitID: commit.ID, GitRef: "applied-test-q", Agent: "codex", JobType: JobTypeFix, ParentJobID: 1}) + + err := db.MarkJobApplied(job.ID) + if err == nil { + t.Error("MarkJobApplied should fail for queued jobs") + } + }) + + t.Run("mark applied job again fails", func(t *testing.T) { + job, _ := db.EnqueueJob(EnqueueOpts{RepoID: repo.ID, CommitID: commit.ID, GitRef: "applied-test-2", Agent: "codex", JobType: JobTypeFix, ParentJobID: 1}) + db.ClaimJob("worker-1") + db.CompleteJob(job.ID, "codex", "prompt", "output") + db.MarkJobApplied(job.ID) + + err := db.MarkJobApplied(job.ID) + if err == nil { + t.Error("MarkJobApplied should fail for already-applied jobs") + } + }) + + t.Run("mark non-fix job fails", func(t *testing.T) { + job, _ := db.EnqueueJob(EnqueueOpts{RepoID: repo.ID, CommitID: commit.ID, GitRef: "applied-review", Agent: "codex"}) + db.ClaimJob("worker-1") + db.CompleteJob(job.ID, "codex", "prompt", "output") + + err := db.MarkJobApplied(job.ID) + if err == nil { + t.Error("MarkJobApplied should fail for non-fix jobs") + } + }) +} + +func TestMarkJobRebased(t *testing.T) { + db := openTestDB(t) + defer db.Close() + + repo, _ := db.GetOrCreateRepo("/tmp/test-repo") + commit, _ := db.GetOrCreateCommit(repo.ID, "rebased-test", "A", "S", time.Now()) + + t.Run("mark done fix job as rebased", func(t *testing.T) { + job, _ := db.EnqueueJob(EnqueueOpts{RepoID: repo.ID, CommitID: commit.ID, GitRef: "rebased-test", Agent: "codex", JobType: JobTypeFix, ParentJobID: 1}) + db.ClaimJob("worker-1") + db.CompleteJob(job.ID, "codex", "prompt", "output") + + err := db.MarkJobRebased(job.ID) + if err != nil { + t.Fatalf("MarkJobRebased failed: %v", err) + } + + updated, _ := db.GetJobByID(job.ID) + if updated.Status != JobStatusRebased { + t.Errorf("Expected status 'rebased', got '%s'", updated.Status) + } + }) + + t.Run("mark non-done job fails", func(t *testing.T) { + job, _ := db.EnqueueJob(EnqueueOpts{RepoID: repo.ID, CommitID: commit.ID, GitRef: "rebased-test-q", Agent: "codex", JobType: JobTypeFix, ParentJobID: 1}) + + err := db.MarkJobRebased(job.ID) + if err == nil { + t.Error("MarkJobRebased should fail for queued jobs") + } + }) + + t.Run("mark non-fix job fails", func(t *testing.T) { + job, _ := db.EnqueueJob(EnqueueOpts{RepoID: repo.ID, CommitID: commit.ID, GitRef: "rebased-review", Agent: "codex"}) + db.ClaimJob("worker-1") + db.CompleteJob(job.ID, "codex", "prompt", "output") + + err := db.MarkJobRebased(job.ID) + if err == nil { + t.Error("MarkJobRebased should fail for non-fix jobs") + } + }) +} + func TestMigrationFromOldSchema(t *testing.T) { tmpDir := t.TempDir() dbPath := filepath.Join(tmpDir, "old.db") @@ -1354,6 +1454,24 @@ func TestMigrationFromOldSchema(t *testing.T) { t.Errorf("Expected status 'canceled', got '%s'", status) } + // Verify 'applied' and 'rebased' statuses work after migration + _, err = db.Exec(`UPDATE review_jobs SET status = 'done' WHERE id = ?`, jobID) + if err != nil { + t.Fatalf("Failed to set done status: %v", err) + } + _, err = db.Exec(`UPDATE review_jobs SET status = 'applied' WHERE id = ?`, jobID) + if err != nil { + t.Fatalf("Setting applied status failed after migration: %v", err) + } + _, err = db.Exec(`UPDATE review_jobs SET status = 'done' WHERE id = ?`, jobID) + if err != nil { + t.Fatalf("Failed to reset to done: %v", err) + } + _, err = db.Exec(`UPDATE review_jobs SET status = 'rebased' WHERE id = ?`, jobID) + if err != nil { + t.Fatalf("Setting rebased status failed after migration: %v", err) + } + // Verify constraint still rejects invalid status _, err = db.Exec(`UPDATE review_jobs SET status = 'invalid' WHERE id = ?`, jobID) if err == nil { @@ -3169,3 +3287,99 @@ func createJobChain(t *testing.T, db *DB, repoPath, sha string) (*Repo, *Commit, job := enqueueJob(t, db, repo.ID, commit.ID, sha) return repo, commit, job } + +func TestListJobsWithJobTypeFilter(t *testing.T) { + db := openTestDB(t) + defer db.Close() + + repo := createRepo(t, db, "/tmp/repo-jobtype") + commit := createCommit(t, db, repo.ID, "jt-sha") + + // Create a review job (default type) + reviewJob := enqueueJob(t, db, repo.ID, commit.ID, "jt-sha") + + // Create a fix job parented to the review + _, err := db.EnqueueJob(EnqueueOpts{ + RepoID: repo.ID, + CommitID: commit.ID, + GitRef: "jt-sha", + Agent: "codex", + JobType: JobTypeFix, + ParentJobID: reviewJob.ID, + }) + if err != nil { + t.Fatalf("EnqueueJob fix failed: %v", err) + } + + t.Run("filter by fix returns only fix jobs", func(t *testing.T) { + jobs, err := db.ListJobs("", "", 50, 0, WithJobType("fix")) + if err != nil { + t.Fatalf("ListJobs failed: %v", err) + } + if len(jobs) != 1 { + t.Fatalf("Expected 1 fix job, got %d", len(jobs)) + } + if jobs[0].JobType != JobTypeFix { + t.Errorf("Expected job_type 'fix', got %q", jobs[0].JobType) + } + }) + + t.Run("filter by review returns only review jobs", func(t *testing.T) { + jobs, err := db.ListJobs("", "", 50, 0, WithJobType("review")) + if err != nil { + t.Fatalf("ListJobs failed: %v", err) + } + if len(jobs) != 1 { + t.Fatalf("Expected 1 review job, got %d", len(jobs)) + } + if jobs[0].JobType != JobTypeReview { + t.Errorf("Expected job_type 'review', got %q", jobs[0].JobType) + } + }) + + t.Run("no filter returns all jobs", func(t *testing.T) { + jobs, err := db.ListJobs("", "", 50, 0) + if err != nil { + t.Fatalf("ListJobs failed: %v", err) + } + if len(jobs) != 2 { + t.Errorf("Expected 2 jobs with no filter, got %d", len(jobs)) + } + }) + + t.Run("nonexistent type returns empty", func(t *testing.T) { + jobs, err := db.ListJobs("", "", 50, 0, WithJobType("nonexistent")) + if err != nil { + t.Fatalf("ListJobs failed: %v", err) + } + if len(jobs) != 0 { + t.Errorf("Expected 0 jobs for nonexistent type, got %d", len(jobs)) + } + }) + + t.Run("exclude fix returns only non-fix jobs", func(t *testing.T) { + jobs, err := db.ListJobs("", "", 50, 0, WithExcludeJobType("fix")) + if err != nil { + t.Fatalf("ListJobs failed: %v", err) + } + if len(jobs) != 1 { + t.Fatalf("Expected 1 non-fix job, got %d", len(jobs)) + } + if jobs[0].JobType == JobTypeFix { + t.Error("Expected non-fix job, got fix") + } + }) + + t.Run("exclude review returns only non-review jobs", func(t *testing.T) { + jobs, err := db.ListJobs("", "", 50, 0, WithExcludeJobType("review")) + if err != nil { + t.Fatalf("ListJobs failed: %v", err) + } + if len(jobs) != 1 { + t.Fatalf("Expected 1 non-review job, got %d", len(jobs)) + } + if jobs[0].JobType != JobTypeFix { + t.Errorf("Expected fix job, got %q", jobs[0].JobType) + } + }) +} diff --git a/internal/storage/jobs.go b/internal/storage/jobs.go index e05f51be..20ab066c 100644 --- a/internal/storage/jobs.go +++ b/internal/storage/jobs.go @@ -726,7 +726,8 @@ type EnqueueOpts struct { OutputPrefix string // Prefix to prepend to review output Agentic bool // Allow file edits and command execution Label string // Display label in TUI for task jobs (default: "prompt") - JobType string // Explicit job type (review/range/dirty/task/compact); inferred if empty + JobType string // Explicit job type (review/range/dirty/task/compact/fix); inferred if empty + ParentJobID int64 // Parent job being fixed (for fix jobs) } // EnqueueJob creates a new review job. The job type is inferred from opts. @@ -779,16 +780,22 @@ func (db *DB) EnqueueJob(opts EnqueueOpts) (*ReviewJob, error) { commitIDParam = opts.CommitID } + // Use NULL for parent_job_id when not a fix job + var parentJobIDParam any + if opts.ParentJobID > 0 { + parentJobIDParam = opts.ParentJobID + } + result, err := db.Exec(` INSERT INTO review_jobs (repo_id, commit_id, git_ref, branch, agent, model, reasoning, status, job_type, review_type, patch_id, diff_content, prompt, agentic, output_prefix, - uuid, source_machine_id, updated_at) - VALUES (?, ?, ?, ?, ?, ?, ?, 'queued', ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, + parent_job_id, uuid, source_machine_id, updated_at) + VALUES (?, ?, ?, ?, ?, ?, ?, 'queued', ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, opts.RepoID, commitIDParam, gitRef, nullString(opts.Branch), opts.Agent, nullString(opts.Model), reasoning, jobType, opts.ReviewType, nullString(opts.PatchID), nullString(opts.DiffContent), nullString(opts.Prompt), agenticInt, - nullString(opts.OutputPrefix), + nullString(opts.OutputPrefix), parentJobIDParam, uid, machineID, nowStr) if err != nil { return nil, err @@ -815,6 +822,9 @@ func (db *DB) EnqueueJob(opts EnqueueOpts) (*ReviewJob, error) { SourceMachineID: machineID, UpdatedAt: &now, } + if opts.ParentJobID > 0 { + job.ParentJobID = &opts.ParentJobID + } if opts.CommitID > 0 { job.CommitID = &opts.CommitID } @@ -867,10 +877,11 @@ func (db *DB) ClaimJob(workerID string) (*ReviewJob, error) { var reviewType sql.NullString var outputPrefix sql.NullString var patchID sql.NullString + var parentJobID sql.NullInt64 err = db.QueryRow(` SELECT j.id, j.repo_id, j.commit_id, j.git_ref, j.branch, j.agent, j.model, j.reasoning, j.status, j.enqueued_at, r.root_path, r.name, c.subject, j.diff_content, j.prompt, COALESCE(j.agentic, 0), j.job_type, j.review_type, - j.output_prefix, j.patch_id + j.output_prefix, j.patch_id, j.parent_job_id FROM review_jobs j JOIN repos r ON r.id = j.repo_id LEFT JOIN commits c ON c.id = j.commit_id @@ -879,7 +890,7 @@ func (db *DB) ClaimJob(workerID string) (*ReviewJob, error) { LIMIT 1 `, workerID).Scan(&job.ID, &job.RepoID, &commitID, &job.GitRef, &branch, &job.Agent, &model, &job.Reasoning, &job.Status, &enqueuedAt, &job.RepoPath, &job.RepoName, &commitSubject, &diffContent, &prompt, &agenticInt, &jobType, &reviewType, - &outputPrefix, &patchID) + &outputPrefix, &patchID, &parentJobID) if err != nil { return nil, err } @@ -915,6 +926,9 @@ func (db *DB) ClaimJob(workerID string) (*ReviewJob, error) { if patchID.Valid { job.PatchID = patchID.String } + if parentJobID.Valid { + job.ParentJobID = &parentJobID.Int64 + } job.EnqueuedAt = parseSQLiteTime(enqueuedAt) job.Status = JobStatusRunning job.WorkerID = workerID @@ -928,6 +942,82 @@ func (db *DB) SaveJobPrompt(jobID int64, prompt string) error { return err } +// SaveJobPatch stores the generated patch for a completed fix job +func (db *DB) SaveJobPatch(jobID int64, patch string) error { + _, err := db.Exec(`UPDATE review_jobs SET patch = ? WHERE id = ?`, patch, jobID) + return err +} + +// CompleteFixJob atomically marks a fix job as done, stores the review, +// and persists the patch in a single transaction. This prevents invalid +// states where a patch is written but the job isn't done, or vice versa. +func (db *DB) CompleteFixJob(jobID int64, agent, prompt, output, patch string) error { + now := time.Now().Format(time.RFC3339) + machineID, _ := db.GetMachineID() + reviewUUID := GenerateUUID() + + ctx := context.Background() + conn, err := db.Conn(ctx) + if err != nil { + return err + } + defer conn.Close() + + if _, err := conn.ExecContext(ctx, "BEGIN IMMEDIATE"); err != nil { + return err + } + committed := false + defer func() { + if !committed { + if _, err := conn.ExecContext(ctx, "ROLLBACK"); err != nil { + log.Printf("jobs CompleteFixJob: rollback failed: %v", err) + } + } + }() + + // Fetch output_prefix from job (if any) + var outputPrefix sql.NullString + err = conn.QueryRowContext(ctx, `SELECT output_prefix FROM review_jobs WHERE id = ?`, jobID).Scan(&outputPrefix) + if err != nil && err != sql.ErrNoRows { + return err + } + + finalOutput := output + if outputPrefix.Valid && outputPrefix.String != "" { + finalOutput = outputPrefix.String + output + } + + // Atomically set status=done AND patch in one UPDATE + result, err := conn.ExecContext(ctx, + `UPDATE review_jobs SET status = 'done', finished_at = ?, updated_at = ?, patch = ? WHERE id = ? AND status = 'running'`, + now, now, patch, jobID) + if err != nil { + return err + } + + rows, err := result.RowsAffected() + if err != nil { + return err + } + if rows == 0 { + return nil // Job was canceled + } + + _, err = conn.ExecContext(ctx, + `INSERT INTO reviews (job_id, agent, prompt, output, uuid, updated_by_machine_id, updated_at) VALUES (?, ?, ?, ?, ?, ?, ?)`, + jobID, agent, prompt, finalOutput, reviewUUID, machineID, now) + if err != nil { + return err + } + + _, err = conn.ExecContext(ctx, "COMMIT") + if err != nil { + return err + } + committed = true + return nil +} + // CompleteJob marks a job as done and stores the review. // Only updates if job is still in 'running' state (respects cancellation). // If the job has an output_prefix, it will be prepended to the output. @@ -1051,6 +1141,49 @@ func (db *DB) CancelJob(jobID int64) error { return nil } +// MarkJobApplied transitions a fix job from done to applied. +func (db *DB) MarkJobApplied(jobID int64) error { + now := time.Now().Format(time.RFC3339) + result, err := db.Exec(` + UPDATE review_jobs + SET status = 'applied', updated_at = ? + WHERE id = ? AND status = 'done' AND job_type = 'fix' + `, now, jobID) + if err != nil { + return err + } + rows, err := result.RowsAffected() + if err != nil { + return err + } + if rows == 0 { + return sql.ErrNoRows + } + return nil +} + +// MarkJobRebased transitions a done fix job to the "rebased" terminal state. +// This indicates the patch was stale and a new rebase job was triggered. +func (db *DB) MarkJobRebased(jobID int64) error { + now := time.Now().Format(time.RFC3339) + result, err := db.Exec(` + UPDATE review_jobs + SET status = 'rebased', updated_at = ? + WHERE id = ? AND status = 'done' AND job_type = 'fix' + `, now, jobID) + if err != nil { + return err + } + rows, err := result.RowsAffected() + if err != nil { + return err + } + if rows == 0 { + return sql.ErrNoRows + } + return nil +} + // ReenqueueJob resets a completed, failed, or canceled job back to queued status. // This allows manual re-running of jobs to get a fresh review. // For done jobs, the existing review is deleted to avoid unique constraint violations. @@ -1083,7 +1216,7 @@ func (db *DB) ReenqueueJob(jobID int64) error { // Reset job status result, err := conn.ExecContext(ctx, ` UPDATE review_jobs - SET status = 'queued', worker_id = NULL, started_at = NULL, finished_at = NULL, error = NULL, retry_count = 0 + SET status = 'queued', worker_id = NULL, started_at = NULL, finished_at = NULL, error = NULL, retry_count = 0, patch = NULL WHERE id = ? AND status IN ('done', 'failed', 'canceled') `, jobID) if err != nil { @@ -1182,6 +1315,8 @@ type listJobsOptions struct { branch string branchIncludeEmpty bool addressed *bool + jobType string + excludeJobType string } // WithGitRef filters jobs by git ref. @@ -1208,6 +1343,16 @@ func WithAddressed(addressed bool) ListJobsOption { return func(o *listJobsOptions) { o.addressed = &addressed } } +// WithJobType filters jobs by job_type (e.g. "fix", "review"). +func WithJobType(jobType string) ListJobsOption { + return func(o *listJobsOptions) { o.jobType = jobType } +} + +// WithExcludeJobType excludes jobs of the given type. +func WithExcludeJobType(jobType string) ListJobsOption { + return func(o *listJobsOptions) { o.excludeJobType = jobType } +} + // ListJobs returns jobs with optional status, repo, branch, and addressed filters. // addressedFilter: nil = no filter, non-nil bool = filter by addressed state. func (db *DB) ListJobs(statusFilter string, repoFilter string, limit, offset int, opts ...ListJobsOption) ([]ReviewJob, error) { @@ -1215,7 +1360,8 @@ func (db *DB) ListJobs(statusFilter string, repoFilter string, limit, offset int SELECT j.id, j.repo_id, j.commit_id, j.git_ref, j.branch, j.agent, j.reasoning, j.status, j.enqueued_at, j.started_at, j.finished_at, j.worker_id, j.error, j.prompt, j.retry_count, COALESCE(j.agentic, 0), r.root_path, r.name, c.subject, rv.addressed, rv.output, - j.source_machine_id, j.uuid, j.model, j.job_type, j.review_type, j.patch_id + j.source_machine_id, j.uuid, j.model, j.job_type, j.review_type, j.patch_id, + j.parent_job_id FROM review_jobs j JOIN repos r ON r.id = j.repo_id LEFT JOIN commits c ON c.id = j.commit_id @@ -1255,6 +1401,14 @@ func (db *DB) ListJobs(statusFilter string, repoFilter string, limit, offset int conditions = append(conditions, "(rv.addressed IS NULL OR rv.addressed = 0)") } } + if o.jobType != "" { + conditions = append(conditions, "j.job_type = ?") + args = append(args, o.jobType) + } + if o.excludeJobType != "" { + conditions = append(conditions, "j.job_type != ?") + args = append(args, o.excludeJobType) + } if len(conditions) > 0 { query += " WHERE " + strings.Join(conditions, " AND ") @@ -1287,11 +1441,13 @@ func (db *DB) ListJobs(statusFilter string, repoFilter string, limit, offset int var commitSubject sql.NullString var addressed sql.NullInt64 var agentic int + var parentJobID sql.NullInt64 err := rows.Scan(&j.ID, &j.RepoID, &commitID, &j.GitRef, &branch, &j.Agent, &j.Reasoning, &j.Status, &enqueuedAt, &startedAt, &finishedAt, &workerID, &errMsg, &prompt, &j.RetryCount, &agentic, &j.RepoPath, &j.RepoName, &commitSubject, &addressed, &output, - &sourceMachineID, &jobUUID, &model, &jobTypeStr, &reviewTypeStr, &patchIDStr) + &sourceMachineID, &jobUUID, &model, &jobTypeStr, &reviewTypeStr, &patchIDStr, + &parentJobID) if err != nil { return nil, err } @@ -1346,6 +1502,9 @@ func (db *DB) ListJobs(statusFilter string, repoFilter string, limit, offset int val := addressed.Int64 != 0 j.Addressed = &val } + if parentJobID.Valid { + j.ParentJobID = &parentJobID.Int64 + } // Compute verdict only for non-task jobs (task jobs don't have PASS/FAIL verdicts) // Task jobs (run, analyze, custom) are identified by having no commit_id and not being dirty if output.Valid && !j.IsTaskJob() { @@ -1415,19 +1574,23 @@ func (db *DB) GetJobByID(id int64) (*ReviewJob, error) { var commitID sql.NullInt64 var commitSubject sql.NullString var agentic int + var parentJobID sql.NullInt64 + var patch sql.NullString var model, branch, jobTypeStr, reviewTypeStr, patchIDStr sql.NullString err := db.QueryRow(` SELECT j.id, j.repo_id, j.commit_id, j.git_ref, j.branch, j.agent, j.reasoning, j.status, j.enqueued_at, j.started_at, j.finished_at, j.worker_id, j.error, j.prompt, COALESCE(j.agentic, 0), - r.root_path, r.name, c.subject, j.model, j.job_type, j.review_type, j.patch_id + r.root_path, r.name, c.subject, j.model, j.job_type, j.review_type, j.patch_id, + j.parent_job_id, j.patch FROM review_jobs j JOIN repos r ON r.id = j.repo_id LEFT JOIN commits c ON c.id = j.commit_id WHERE j.id = ? `, id).Scan(&j.ID, &j.RepoID, &commitID, &j.GitRef, &branch, &j.Agent, &j.Reasoning, &j.Status, &enqueuedAt, &startedAt, &finishedAt, &workerID, &errMsg, &prompt, &agentic, - &j.RepoPath, &j.RepoName, &commitSubject, &model, &jobTypeStr, &reviewTypeStr, &patchIDStr) + &j.RepoPath, &j.RepoName, &commitSubject, &model, &jobTypeStr, &reviewTypeStr, &patchIDStr, + &parentJobID, &patch) if err != nil { return nil, err } @@ -1472,12 +1635,18 @@ func (db *DB) GetJobByID(id int64) (*ReviewJob, error) { if branch.Valid { j.Branch = branch.String } + if parentJobID.Valid { + j.ParentJobID = &parentJobID.Int64 + } + if patch.Valid { + j.Patch = &patch.String + } return &j, nil } // GetJobCounts returns counts of jobs by status -func (db *DB) GetJobCounts() (queued, running, done, failed, canceled int, err error) { +func (db *DB) GetJobCounts() (queued, running, done, failed, canceled, applied, rebased int, err error) { rows, err := db.Query(`SELECT status, COUNT(*) FROM review_jobs GROUP BY status`) if err != nil { return @@ -1501,6 +1670,10 @@ func (db *DB) GetJobCounts() (queued, running, done, failed, canceled int, err e failed = count case JobStatusCanceled: canceled = count + case JobStatusApplied: + applied = count + case JobStatusRebased: + rebased = count } } err = rows.Err() diff --git a/internal/storage/models.go b/internal/storage/models.go index bcede9da..d8cc41cc 100644 --- a/internal/storage/models.go +++ b/internal/storage/models.go @@ -31,6 +31,8 @@ const ( JobStatusDone JobStatus = "done" JobStatusFailed JobStatus = "failed" JobStatusCanceled JobStatus = "canceled" + JobStatusApplied JobStatus = "applied" + JobStatusRebased JobStatus = "rebased" ) // JobType classifies what kind of work a review job represents. @@ -40,6 +42,7 @@ const ( JobTypeDirty = "dirty" // Uncommitted changes review JobTypeTask = "task" // Run/analyze/design/custom prompt JobTypeCompact = "compact" // Consolidated review verification + JobTypeFix = "fix" // Background fix using worktree ) type ReviewJob struct { @@ -65,6 +68,8 @@ type ReviewJob struct { ReviewType string `json:"review_type,omitempty"` // Review type (e.g., "security") - changes system prompt PatchID string `json:"patch_id,omitempty"` // Stable patch-id for rebase tracking OutputPrefix string `json:"output_prefix,omitempty"` // Prefix to prepend to review output + ParentJobID *int64 `json:"parent_job_id,omitempty"` // Job being fixed (for fix jobs) + Patch *string `json:"patch,omitempty"` // Generated diff patch (for completed fix jobs) // Sync fields UUID string `json:"uuid,omitempty"` // Globally unique identifier for sync SourceMachineID string `json:"source_machine_id,omitempty"` // Machine that created this job @@ -114,11 +119,22 @@ func (j ReviewJob) IsTaskJob() bool { return true } -// IsPromptJob returns true if this job type uses a pre-stored prompt -// (task or compact). These job types have prompts built by the CLI at -// enqueue time, not constructed by the worker from git data. -func (j ReviewJob) IsPromptJob() bool { - return j.JobType == JobTypeTask || j.JobType == JobTypeCompact +// UsesStoredPrompt returns true if this job type uses a pre-stored prompt +// (task, compact, or fix). These job types have prompts built at enqueue +// time, not constructed by the worker from git data. +func (j ReviewJob) UsesStoredPrompt() bool { + return j.JobType == JobTypeTask || j.JobType == JobTypeCompact || j.JobType == JobTypeFix +} + +// IsFixJob returns true if this is a background fix job. +func (j ReviewJob) IsFixJob() bool { + return j.JobType == JobTypeFix +} + +// HasViewableOutput returns true if this job has completed and its review/patch +// can be viewed. This covers done, applied, and rebased terminal states. +func (j ReviewJob) HasViewableOutput() bool { + return j.Status == JobStatusDone || j.Status == JobStatusApplied || j.Status == JobStatusRebased } // JobWithReview pairs a job with its review for batch operations @@ -167,6 +183,8 @@ type DaemonStatus struct { CompletedJobs int `json:"completed_jobs"` FailedJobs int `json:"failed_jobs"` CanceledJobs int `json:"canceled_jobs"` + AppliedJobs int `json:"applied_jobs"` + RebasedJobs int `json:"rebased_jobs"` ActiveWorkers int `json:"active_workers"` MaxWorkers int `json:"max_workers"` MachineID string `json:"machine_id,omitempty"` // Local machine ID for remote job detection diff --git a/internal/storage/models_test.go b/internal/storage/models_test.go index 450cae3e..4278305c 100644 --- a/internal/storage/models_test.go +++ b/internal/storage/models_test.go @@ -193,7 +193,7 @@ func TestIsDirtyJob(t *testing.T) { }) } -func TestIsPromptJob(t *testing.T) { +func TestUsesStoredPrompt(t *testing.T) { tests := []struct { name string job storage.ReviewJob @@ -210,8 +210,8 @@ func TestIsPromptJob(t *testing.T) { for _, tt := range tests { t.Run(tt.name, func(t *testing.T) { - if got := tt.job.IsPromptJob(); got != tt.want { - t.Errorf("IsPromptJob() = %v, want %v", got, tt.want) + if got := tt.job.UsesStoredPrompt(); got != tt.want { + t.Errorf("UsesStoredPrompt() = %v, want %v", got, tt.want) } }) } diff --git a/internal/storage/repos_test.go b/internal/storage/repos_test.go index 1e46c4db..308c2339 100644 --- a/internal/storage/repos_test.go +++ b/internal/storage/repos_test.go @@ -1078,9 +1078,9 @@ func TestVerdictSuppressionForPromptJobs(t *testing.T) { // TestRetriedReviewJobNotRoutedAsPromptJob verifies that when a review // job is retried, the saved prompt from the first run does not cause // the job to be misidentified as a prompt-native job (task/compact). -// This is the storage-level regression test for the IsPromptJob gate. +// This is the storage-level regression test for the UsesStoredPrompt gate. func TestRetriedReviewJobNotRoutedAsPromptJob(t *testing.T) { - t.Run("review job: saved prompt does not make IsPromptJob true", func(t *testing.T) { + t.Run("review job: saved prompt does not make UsesStoredPrompt true", func(t *testing.T) { db := openTestDB(t) defer db.Close() @@ -1098,8 +1098,8 @@ func TestRetriedReviewJobNotRoutedAsPromptJob(t *testing.T) { if claimed.Prompt != "" { t.Fatalf("First claim: expected empty prompt, got %q", claimed.Prompt) } - if claimed.IsPromptJob() { - t.Fatal("First claim: IsPromptJob() should be false for review job") + if claimed.UsesStoredPrompt() { + t.Fatal("First claim: UsesStoredPrompt() should be false for review job") } // 3. Worker saves a built prompt (simulating processJob behavior) @@ -1117,7 +1117,7 @@ func TestRetriedReviewJobNotRoutedAsPromptJob(t *testing.T) { t.Fatal("RetryJob returned false, expected true") } - // 5. Claim again — prompt is non-empty but IsPromptJob must be false + // 5. Claim again — prompt is non-empty but UsesStoredPrompt must be false reclaimed := claimJob(t, db, "worker-2") if reclaimed.ID != claimed.ID { t.Fatalf("Expected to reclaim job %d, got %d", claimed.ID, reclaimed.ID) @@ -1128,12 +1128,12 @@ func TestRetriedReviewJobNotRoutedAsPromptJob(t *testing.T) { if reclaimed.JobType != JobTypeReview { t.Errorf("Reclaim: expected job_type=%q, got %q", JobTypeReview, reclaimed.JobType) } - if reclaimed.IsPromptJob() { - t.Error("Reclaim: IsPromptJob() must be false for review job, even with saved prompt") + if reclaimed.UsesStoredPrompt() { + t.Error("Reclaim: UsesStoredPrompt() must be false for review job, even with saved prompt") } }) - t.Run("task job: IsPromptJob true across retry", func(t *testing.T) { + t.Run("task job: UsesStoredPrompt true across retry", func(t *testing.T) { db := openTestDB(t) defer db.Close() @@ -1150,13 +1150,13 @@ func TestRetriedReviewJobNotRoutedAsPromptJob(t *testing.T) { t.Fatalf("Expected job_type=%q, got %q", JobTypeTask, job.JobType) } - // 2. Claim it — prompt and IsPromptJob should both be set + // 2. Claim it — prompt and UsesStoredPrompt should both be set claimed := claimJob(t, db, "worker-1") if claimed.Prompt != taskPrompt { t.Errorf("First claim: expected prompt %q, got %q", taskPrompt, claimed.Prompt) } - if !claimed.IsPromptJob() { - t.Error("First claim: IsPromptJob() should be true for task job") + if !claimed.UsesStoredPrompt() { + t.Error("First claim: UsesStoredPrompt() should be true for task job") } // 3. Retry @@ -1173,15 +1173,15 @@ func TestRetriedReviewJobNotRoutedAsPromptJob(t *testing.T) { if reclaimed.ID != claimed.ID { t.Fatalf("Expected to reclaim job %d, got %d", claimed.ID, reclaimed.ID) } - if !reclaimed.IsPromptJob() { - t.Error("Reclaim: IsPromptJob() must be true for task job") + if !reclaimed.UsesStoredPrompt() { + t.Error("Reclaim: UsesStoredPrompt() must be true for task job") } if reclaimed.Prompt != taskPrompt { t.Errorf("Reclaim: expected prompt %q, got %q", taskPrompt, reclaimed.Prompt) } }) - t.Run("compact job: IsPromptJob true across retry", func(t *testing.T) { + t.Run("compact job: UsesStoredPrompt true across retry", func(t *testing.T) { db := openTestDB(t) defer db.Close() @@ -1202,8 +1202,8 @@ func TestRetriedReviewJobNotRoutedAsPromptJob(t *testing.T) { // Claim, retry, reclaim claimed := claimJob(t, db, "worker-1") - if !claimed.IsPromptJob() { - t.Error("Compact job: IsPromptJob() should be true") + if !claimed.UsesStoredPrompt() { + t.Error("Compact job: UsesStoredPrompt() should be true") } retried, err := db.RetryJob(claimed.ID, "", 3) @@ -1215,15 +1215,15 @@ func TestRetriedReviewJobNotRoutedAsPromptJob(t *testing.T) { } reclaimed := claimJob(t, db, "worker-2") - if !reclaimed.IsPromptJob() { - t.Error("Reclaim: IsPromptJob() must be true for compact job") + if !reclaimed.UsesStoredPrompt() { + t.Error("Reclaim: UsesStoredPrompt() must be true for compact job") } if reclaimed.Prompt != compactPrompt { t.Errorf("Reclaim: expected prompt %q, got %q", compactPrompt, reclaimed.Prompt) } }) - t.Run("dirty job: saved prompt does not make IsPromptJob true", func(t *testing.T) { + t.Run("dirty job: saved prompt does not make UsesStoredPrompt true", func(t *testing.T) { db := openTestDB(t) defer db.Close() @@ -1255,12 +1255,12 @@ func TestRetriedReviewJobNotRoutedAsPromptJob(t *testing.T) { } reclaimed := claimJob(t, db, "worker-2") - if reclaimed.IsPromptJob() { - t.Error("Dirty job: IsPromptJob() must be false even with saved prompt") + if reclaimed.UsesStoredPrompt() { + t.Error("Dirty job: UsesStoredPrompt() must be false even with saved prompt") } }) - t.Run("range job: saved prompt does not make IsPromptJob true", func(t *testing.T) { + t.Run("range job: saved prompt does not make UsesStoredPrompt true", func(t *testing.T) { db := openTestDB(t) defer db.Close() @@ -1291,8 +1291,8 @@ func TestRetriedReviewJobNotRoutedAsPromptJob(t *testing.T) { } reclaimed := claimJob(t, db, "worker-2") - if reclaimed.IsPromptJob() { - t.Error("Range job: IsPromptJob() must be false even with saved prompt") + if reclaimed.UsesStoredPrompt() { + t.Error("Range job: UsesStoredPrompt() must be false even with saved prompt") } }) } diff --git a/internal/storage/sync_test.go b/internal/storage/sync_test.go index a18c7c1f..0f08e0ac 100644 --- a/internal/storage/sync_test.go +++ b/internal/storage/sync_test.go @@ -659,7 +659,7 @@ func createLegacyCommonTables(t *testing.T, db *sql.DB) { git_ref TEXT NOT NULL, agent TEXT NOT NULL DEFAULT 'codex', reasoning TEXT NOT NULL DEFAULT 'thorough', - status TEXT NOT NULL CHECK(status IN ('queued','running','done','failed','canceled')) DEFAULT 'queued', + status TEXT NOT NULL CHECK(status IN ('queued','running','done','failed','canceled','applied','rebased')) DEFAULT 'queued', enqueued_at TEXT NOT NULL DEFAULT (datetime('now')), started_at TEXT, finished_at TEXT, @@ -822,6 +822,7 @@ func TestDuplicateRepoIdentity_MigrationSuccess(t *testing.T) { created_at TEXT NOT NULL DEFAULT (datetime('now')), UNIQUE(repo_id, sha) ); + CREATE INDEX idx_commits_sha ON commits(sha); `) if err != nil { @@ -892,6 +893,7 @@ func TestUniqueIndexMigration(t *testing.T) { created_at TEXT NOT NULL DEFAULT (datetime('now')), UNIQUE(repo_id, sha) ); + CREATE INDEX idx_commits_sha ON commits(sha); `) if err != nil { diff --git a/internal/worktree/worktree.go b/internal/worktree/worktree.go new file mode 100644 index 00000000..1e3a5329 --- /dev/null +++ b/internal/worktree/worktree.go @@ -0,0 +1,250 @@ +package worktree + +import ( + "bufio" + "bytes" + "fmt" + "io/fs" + "log" + "os" + "os/exec" + "path/filepath" + "strconv" + "strings" +) + +// Worktree represents a temporary git worktree for isolated agent work. +// Call Close to remove the worktree and its directory. +type Worktree struct { + Dir string // Path to the worktree directory + repoPath string // Path to the parent repository + baseSHA string // SHA of the commit the worktree was detached at +} + +// Close removes the worktree and its directory. +func (w *Worktree) Close() { + _ = exec.Command("git", "-C", w.repoPath, "worktree", "remove", "--force", w.Dir).Run() + _ = os.RemoveAll(w.Dir) +} + +// Create creates a temporary git worktree detached at HEAD for isolated agent work. +func Create(repoPath string) (*Worktree, error) { + worktreeDir, err := os.MkdirTemp("", "roborev-worktree-") + if err != nil { + return nil, err + } + + // Create the worktree (without --recurse-submodules for compatibility with older git). + // Suppress hooks via core.hooksPath= — user hooks shouldn't run in internal worktrees. + cmd := exec.Command("git", "-C", repoPath, "-c", "core.hooksPath="+os.DevNull, "worktree", "add", "--detach", worktreeDir, "HEAD") + if out, err := cmd.CombinedOutput(); err != nil { + os.RemoveAll(worktreeDir) + return nil, fmt.Errorf("git worktree add: %w: %s", err, out) + } + + // Initialize and update submodules in the worktree + initArgs := []string{"-C", worktreeDir} + if submoduleRequiresFileProtocol(worktreeDir) { + initArgs = append(initArgs, "-c", "protocol.file.allow=always") + } + initArgs = append(initArgs, "submodule", "update", "--init") + cmd = exec.Command("git", initArgs...) + if out, err := cmd.CombinedOutput(); err != nil { + _ = exec.Command("git", "-C", repoPath, "worktree", "remove", "--force", worktreeDir).Run() + _ = os.RemoveAll(worktreeDir) + return nil, fmt.Errorf("git submodule update: %w: %s", err, out) + } + + updateArgs := []string{"-C", worktreeDir} + if submoduleRequiresFileProtocol(worktreeDir) { + updateArgs = append(updateArgs, "-c", "protocol.file.allow=always") + } + updateArgs = append(updateArgs, "submodule", "update", "--init", "--recursive") + cmd = exec.Command("git", updateArgs...) + if out, err := cmd.CombinedOutput(); err != nil { + _ = exec.Command("git", "-C", repoPath, "worktree", "remove", "--force", worktreeDir).Run() + _ = os.RemoveAll(worktreeDir) + return nil, fmt.Errorf("git submodule update: %w: %s", err, out) + } + + lfsCmd := exec.Command("git", "-C", worktreeDir, "lfs", "env") + if err := lfsCmd.Run(); err == nil { + cmd = exec.Command("git", "-C", worktreeDir, "lfs", "pull") + _ = cmd.Run() + } + + // Record the base SHA for patch capture + shaCmd := exec.Command("git", "-C", worktreeDir, "rev-parse", "HEAD") + shaOut, shaErr := shaCmd.Output() + baseSHA := "" + if shaErr == nil { + baseSHA = strings.TrimSpace(string(shaOut)) + } + + return &Worktree{Dir: worktreeDir, repoPath: repoPath, baseSHA: baseSHA}, nil +} + +// CapturePatch stages all changes in the worktree and returns the diff as a patch string. +// Returns empty string if there are no changes. Handles both uncommitted and committed +// changes by diffing the final tree state against the base SHA. +func (w *Worktree) CapturePatch() (string, error) { + // Stage all changes in worktree + cmd := exec.Command("git", "-C", w.Dir, "add", "-A") + if out, err := cmd.CombinedOutput(); err != nil { + return "", fmt.Errorf("git add in worktree: %w: %s", err, out) + } + + // If we have a base SHA, diff the current tree state (HEAD + staged) against it. + // This captures both committed and uncommitted changes the agent made. + if w.baseSHA != "" { + // Create a temporary tree object from the index (staged state) + treeCmd := exec.Command("git", "-C", w.Dir, "write-tree") + treeOut, err := treeCmd.Output() + if err != nil { + log.Printf("CapturePatch: write-tree failed, falling back to diff --cached: %v", err) + } else { + tree := strings.TrimSpace(string(treeOut)) + diffCmd := exec.Command("git", "-C", w.Dir, "diff-tree", "-p", "--binary", w.baseSHA, tree) + diff, err := diffCmd.Output() + if err != nil { + log.Printf("CapturePatch: diff-tree failed, falling back to diff --cached: %v", err) + } else if len(diff) > 0 { + return string(diff), nil + } + } + } + + // Fallback: diff staged changes against HEAD (works when agent didn't commit) + diffCmd := exec.Command("git", "-C", w.Dir, "diff", "--cached", "--binary") + diff, err := diffCmd.Output() + if err != nil { + return "", fmt.Errorf("git diff in worktree: %w", err) + } + return string(diff), nil +} + +// ApplyPatch applies a patch to a repository. Returns nil if patch is empty. +func ApplyPatch(repoPath, patch string) error { + if patch == "" { + return nil + } + applyCmd := exec.Command("git", "-C", repoPath, "apply", "--binary", "-") + applyCmd.Stdin = strings.NewReader(patch) + var stderr bytes.Buffer + applyCmd.Stderr = &stderr + if err := applyCmd.Run(); err != nil { + return fmt.Errorf("git apply: %w: %s", err, stderr.String()) + } + return nil +} + +// PatchConflictError indicates the patch does not apply due to merge conflicts. +// Other errors (malformed patch, permission errors) are returned as plain errors. +type PatchConflictError struct { + Detail string +} + +func (e *PatchConflictError) Error() string { + return "patch conflict: " + e.Detail +} + +// CheckPatch does a dry-run apply to check if a patch applies cleanly. +// Returns a *PatchConflictError when the patch fails due to conflicts, +// or a plain error for other failures (malformed patch, etc.). +func CheckPatch(repoPath, patch string) error { + if patch == "" { + return nil + } + applyCmd := exec.Command("git", "-C", repoPath, "apply", "--check", "--binary", "-") + applyCmd.Stdin = strings.NewReader(patch) + var stderr bytes.Buffer + applyCmd.Stderr = &stderr + if err := applyCmd.Run(); err != nil { + msg := stderr.String() + // "error: patch failed" and "does not apply" indicate merge conflicts. + // Other messages (e.g. "corrupt patch") are non-conflict errors. + if strings.Contains(msg, "patch failed") || strings.Contains(msg, "does not apply") { + return &PatchConflictError{Detail: msg} + } + return fmt.Errorf("patch check failed: %s", msg) + } + return nil +} + +func submoduleRequiresFileProtocol(repoPath string) bool { + gitmodulesPaths := findGitmodulesPaths(repoPath) + if len(gitmodulesPaths) == 0 { + return false + } + for _, gitmodulesPath := range gitmodulesPaths { + file, err := os.Open(gitmodulesPath) + if err != nil { + continue + } + scanner := bufio.NewScanner(file) + for scanner.Scan() { + line := strings.TrimSpace(scanner.Text()) + if line == "" || strings.HasPrefix(line, "#") || strings.HasPrefix(line, ";") { + continue + } + parts := strings.SplitN(line, "=", 2) + if len(parts) != 2 { + continue + } + if !strings.EqualFold(strings.TrimSpace(parts[0]), "url") { + continue + } + url := strings.TrimSpace(parts[1]) + if unquoted, err := strconv.Unquote(url); err == nil { + url = unquoted + } + if isFileProtocolURL(url) { + file.Close() + return true + } + } + file.Close() + } + return false +} + +func findGitmodulesPaths(repoPath string) []string { + var paths []string + err := filepath.WalkDir(repoPath, func(path string, d fs.DirEntry, err error) error { + if err != nil { + return nil + } + if d.IsDir() && d.Name() == ".git" { + return filepath.SkipDir + } + if d.Name() == ".gitmodules" { + paths = append(paths, path) + } + return nil + }) + if err != nil { + return nil + } + return paths +} + +func isFileProtocolURL(url string) bool { + lower := strings.ToLower(url) + if strings.HasPrefix(lower, "file:") { + return true + } + if strings.HasPrefix(url, "/") || strings.HasPrefix(url, "./") || strings.HasPrefix(url, "../") { + return true + } + if len(url) >= 2 && isAlpha(url[0]) && url[1] == ':' { + return true + } + if strings.HasPrefix(url, `\\`) { + return true + } + return false +} + +func isAlpha(b byte) bool { + return (b >= 'a' && b <= 'z') || (b >= 'A' && b <= 'Z') +} diff --git a/internal/worktree/worktree_test.go b/internal/worktree/worktree_test.go new file mode 100644 index 00000000..109d0ee3 --- /dev/null +++ b/internal/worktree/worktree_test.go @@ -0,0 +1,348 @@ +package worktree + +import ( + "fmt" + "os" + "os/exec" + "path/filepath" + "strings" + "testing" +) + +// setupGitRepo creates a minimal git repo with one commit and returns its path. +func setupGitRepo(t *testing.T) string { + t.Helper() + if _, err := exec.LookPath("git"); err != nil { + t.Skip("git not available") + } + dir := t.TempDir() + run := func(args ...string) { + t.Helper() + cmd := exec.Command("git", append([]string{"-C", dir}, args...)...) + cmd.Env = append(os.Environ(), + "GIT_AUTHOR_NAME=test", "GIT_AUTHOR_EMAIL=test@test.com", + "GIT_COMMITTER_NAME=test", "GIT_COMMITTER_EMAIL=test@test.com", + ) + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("git %v failed: %v\n%s", args, err, out) + } + } + run("init") + run("config", "user.email", "test@test.com") + run("config", "user.name", "test") + if err := os.WriteFile(filepath.Join(dir, "hello.txt"), []byte("hello"), 0644); err != nil { + t.Fatal(err) + } + run("add", "hello.txt") + run("commit", "-m", "initial") + return dir +} + +func TestCreateAndClose(t *testing.T) { + repo := setupGitRepo(t) + + wt, err := Create(repo) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + + // Worktree dir should exist and contain the file + if _, err := os.Stat(filepath.Join(wt.Dir, "hello.txt")); err != nil { + t.Fatalf("expected hello.txt in worktree: %v", err) + } + + wtDir := wt.Dir + wt.Close() + + // After Close, the directory should be removed + if _, err := os.Stat(wtDir); !os.IsNotExist(err) { + t.Fatalf("worktree dir should be removed after Close, got: %v", err) + } +} + +func TestCapturePatchNoChanges(t *testing.T) { + repo := setupGitRepo(t) + + wt, err := Create(repo) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + defer wt.Close() + + patch, err := wt.CapturePatch() + if err != nil { + t.Fatalf("CapturePatch failed: %v", err) + } + if patch != "" { + t.Fatalf("expected empty patch for unchanged worktree, got %d bytes", len(patch)) + } +} + +func TestCapturePatchWithChanges(t *testing.T) { + repo := setupGitRepo(t) + + wt, err := Create(repo) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + defer wt.Close() + + // Modify a file and add a new one + if err := os.WriteFile(filepath.Join(wt.Dir, "hello.txt"), []byte("modified"), 0644); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(wt.Dir, "new.txt"), []byte("new file"), 0644); err != nil { + t.Fatal(err) + } + + patch, err := wt.CapturePatch() + if err != nil { + t.Fatalf("CapturePatch failed: %v", err) + } + if patch == "" { + t.Fatal("expected non-empty patch") + } + if !strings.Contains(patch, "hello.txt") { + t.Error("patch should reference hello.txt") + } + if !strings.Contains(patch, "new.txt") { + t.Error("patch should reference new.txt") + } +} + +func TestCapturePatchCommittedChanges(t *testing.T) { + repo := setupGitRepo(t) + + wt, err := Create(repo) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + defer wt.Close() + + // Simulate an agent that commits changes (instead of just staging) + if err := os.WriteFile(filepath.Join(wt.Dir, "hello.txt"), []byte("committed-change"), 0644); err != nil { + t.Fatal(err) + } + run := func(args ...string) { + t.Helper() + cmd := exec.Command("git", append([]string{"-C", wt.Dir}, args...)...) + cmd.Env = append(os.Environ(), + "GIT_AUTHOR_NAME=test", "GIT_AUTHOR_EMAIL=test@test.com", + "GIT_COMMITTER_NAME=test", "GIT_COMMITTER_EMAIL=test@test.com", + ) + if out, err := cmd.CombinedOutput(); err != nil { + t.Fatalf("git %v failed: %v\n%s", args, err, out) + } + } + run("add", "-A") + run("commit", "-m", "agent commit") + + // CapturePatch should still capture the committed changes + patch, err := wt.CapturePatch() + if err != nil { + t.Fatalf("CapturePatch failed: %v", err) + } + if patch == "" { + t.Fatal("expected non-empty patch for committed changes") + } + if !strings.Contains(patch, "hello.txt") { + t.Error("patch should reference hello.txt") + } + if !strings.Contains(patch, "committed-change") { + t.Error("patch should contain the committed content") + } +} + +func TestApplyPatchEmpty(t *testing.T) { + // Empty patch should be a no-op + if err := ApplyPatch("/nonexistent", ""); err != nil { + t.Fatalf("ApplyPatch with empty patch should succeed: %v", err) + } +} + +func TestApplyPatchRoundTrip(t *testing.T) { + repo := setupGitRepo(t) + + wt, err := Create(repo) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + + // Make changes in worktree + if err := os.WriteFile(filepath.Join(wt.Dir, "hello.txt"), []byte("changed"), 0644); err != nil { + t.Fatal(err) + } + if err := os.WriteFile(filepath.Join(wt.Dir, "added.txt"), []byte("added"), 0644); err != nil { + t.Fatal(err) + } + + patch, err := wt.CapturePatch() + if err != nil { + t.Fatalf("CapturePatch failed: %v", err) + } + wt.Close() + + // Apply the patch back to the original repo + if err := ApplyPatch(repo, patch); err != nil { + t.Fatalf("ApplyPatch failed: %v", err) + } + + // Verify the changes were applied + content, err := os.ReadFile(filepath.Join(repo, "hello.txt")) + if err != nil { + t.Fatal(err) + } + if string(content) != "changed" { + t.Errorf("expected 'changed', got %q", content) + } + content, err = os.ReadFile(filepath.Join(repo, "added.txt")) + if err != nil { + t.Fatal(err) + } + if string(content) != "added" { + t.Errorf("expected 'added', got %q", content) + } +} + +func TestCheckPatchClean(t *testing.T) { + repo := setupGitRepo(t) + + wt, err := Create(repo) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + if err := os.WriteFile(filepath.Join(wt.Dir, "hello.txt"), []byte("changed"), 0644); err != nil { + t.Fatal(err) + } + patch, err := wt.CapturePatch() + if err != nil { + t.Fatal(err) + } + wt.Close() + + // Check should pass on unmodified repo + if err := CheckPatch(repo, patch); err != nil { + t.Fatalf("CheckPatch should succeed on clean repo: %v", err) + } +} + +func TestCheckPatchEmpty(t *testing.T) { + if err := CheckPatch("/nonexistent", ""); err != nil { + t.Fatalf("CheckPatch with empty patch should succeed: %v", err) + } +} + +func TestCheckPatchConflict(t *testing.T) { + repo := setupGitRepo(t) + + // Create a patch that modifies hello.txt + wt, err := Create(repo) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + if err := os.WriteFile(filepath.Join(wt.Dir, "hello.txt"), []byte("from-worktree"), 0644); err != nil { + t.Fatal(err) + } + patch, err := wt.CapturePatch() + if err != nil { + t.Fatal(err) + } + wt.Close() + + // Now modify hello.txt in the original repo to create a conflict + if err := os.WriteFile(filepath.Join(repo, "hello.txt"), []byte("conflicting-change"), 0644); err != nil { + t.Fatal(err) + } + + // CheckPatch should fail + if err := CheckPatch(repo, patch); err == nil { + t.Fatal("CheckPatch should fail when patch conflicts with working tree") + } +} + +func TestApplyPatchConflictFails(t *testing.T) { + repo := setupGitRepo(t) + + wt, err := Create(repo) + if err != nil { + t.Fatalf("Create failed: %v", err) + } + if err := os.WriteFile(filepath.Join(wt.Dir, "hello.txt"), []byte("from-worktree"), 0644); err != nil { + t.Fatal(err) + } + patch, err := wt.CapturePatch() + if err != nil { + t.Fatal(err) + } + wt.Close() + + // Create a conflict + if err := os.WriteFile(filepath.Join(repo, "hello.txt"), []byte("different"), 0644); err != nil { + t.Fatal(err) + } + + // ApplyPatch should fail + if err := ApplyPatch(repo, patch); err == nil { + t.Fatal("ApplyPatch should fail when patch conflicts") + } +} + +func TestSubmoduleRequiresFileProtocol(t *testing.T) { + tpl := `[submodule "test"] + path = test + %s = %s +` + tests := []struct { + name string + key string + url string + expected bool + }{ + {name: "file-scheme", key: "url", url: "file:///tmp/repo", expected: true}, + {name: "file-scheme-quoted", key: "url", url: `"file:///tmp/repo"`, expected: true}, + {name: "file-scheme-mixed-case-key", key: "URL", url: "file:///tmp/repo", expected: true}, + {name: "file-single-slash", key: "url", url: "file:/tmp/repo", expected: true}, + {name: "unix-absolute", key: "url", url: "/tmp/repo", expected: true}, + {name: "relative-dot", key: "url", url: "./repo", expected: true}, + {name: "relative-dotdot", key: "url", url: "../repo", expected: true}, + {name: "windows-drive-slash", key: "url", url: "C:/repo", expected: true}, + {name: "windows-drive-backslash", key: "url", url: `C:\repo`, expected: true}, + {name: "windows-unc", key: "url", url: `\\server\share\repo`, expected: true}, + {name: "https", key: "url", url: "https://example.com/repo.git", expected: false}, + } + + for _, tc := range tests { + t.Run(tc.name, func(t *testing.T) { + dir := t.TempDir() + gitmodules := filepath.Join(dir, ".gitmodules") + if err := os.WriteFile(gitmodules, fmt.Appendf(nil, tpl, tc.key, tc.url), 0644); err != nil { + t.Fatalf("write .gitmodules: %v", err) + } + if got := submoduleRequiresFileProtocol(dir); got != tc.expected { + t.Fatalf("expected %v, got %v", tc.expected, got) + } + }) + } +} + +func TestSubmoduleRequiresFileProtocolNested(t *testing.T) { + tpl := `[submodule "test"] + path = test + url = %s +` + dir := t.TempDir() + if err := os.WriteFile(filepath.Join(dir, ".gitmodules"), fmt.Appendf(nil, tpl, "https://example.com/repo.git"), 0644); err != nil { + t.Fatalf("write root .gitmodules: %v", err) + } + nestedPath := filepath.Join(dir, "sub", ".gitmodules") + if err := os.MkdirAll(filepath.Dir(nestedPath), 0755); err != nil { + t.Fatalf("mkdir nested: %v", err) + } + if err := os.WriteFile(nestedPath, fmt.Appendf(nil, tpl, "file:///tmp/repo"), 0644); err != nil { + t.Fatalf("write nested .gitmodules: %v", err) + } + + if !submoduleRequiresFileProtocol(dir) { + t.Fatalf("expected nested file URL to require file protocol") + } +}