diff --git a/.github/workflows/rust-release-prepare.yml b/.github/workflows/rust-release-prepare.yml index 0c4656c0b7a..b62a8550552 100644 --- a/.github/workflows/rust-release-prepare.yml +++ b/.github/workflows/rust-release-prepare.yml @@ -41,7 +41,7 @@ jobs: curl --http1.1 --fail --show-error --location "${headers[@]}" "${url}" | jq '.' > codex-rs/core/models.json - name: Open pull request (if changed) - uses: peter-evans/create-pull-request@v7 + uses: peter-evans/create-pull-request@v8 with: commit-message: "Update models.json" title: "Update models.json" diff --git a/.gitignore b/.gitignore index efd2f78dcc1..1a0154576ea 100644 --- a/.gitignore +++ b/.gitignore @@ -12,6 +12,7 @@ dist/ build/ out/ storybook-static/ +/codex-rs/**/target-test/ # ignore README for publishing codex-cli/README.md @@ -68,6 +69,9 @@ coverage/ personal/ .codexel/ +# lsp smoke files (temporary) +tmp/lsp-smoke/ + # os .DS_Store Thumbs.db diff --git a/AGENTS.md b/AGENTS.md index 2cefd008443..b270ccba818 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -23,6 +23,12 @@ In the `codex-rs` folder where the Rust code lives: - When cutting a release, update the release and upstream baseline SHAs in `CHANGELOG.md`, then rerun the generator. +## Releases (Codexel vs codex-rs) + +- Codexel release tags that trigger the publishing workflows use `codexel-vX.Y.Z` (for example `codexel-v0.1.4`). +- The upstream `codex-rs` Rust release workflow uses `rust-vX.Y.Z` tags. +- A plain `vX.Y.Z` tag exists historically in this repo, but does not match those workflow triggers. + ## Formatting, lint, tests Run `just fmt` (in `codex-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. @@ -105,6 +111,12 @@ If you don’t have the tool: - Prefer deep equals comparisons whenever possible. Perform `assert_eq!()` on entire objects, rather than individual fields. - Avoid mutating process environment in tests; prefer passing environment-derived flags or dependencies from above. +### Spawning workspace binaries in tests (Cargo vs Buck2) + +- Prefer `codex_utils_cargo_bin::cargo_bin("...")` over `assert_cmd::Command::cargo_bin(...)` or `escargot` when tests need to spawn first-party binaries. + - Under Buck2, `CARGO_BIN_EXE_*` may be project-relative (e.g. `buck-out/...`), which breaks if a test changes its working directory. `codex_utils_cargo_bin::cargo_bin` resolves to an absolute path first. +- When locating fixture files under Buck2, avoid `env!("CARGO_MANIFEST_DIR")` (Buck codegen sets it to `"."`). Prefer deriving paths from `codex_utils_cargo_bin::buck_project_root()` when needed. + ### Integration tests (core) - Prefer the utilities in `core_test_support::responses` when writing end-to-end Codex tests. diff --git a/CHANGELOG.md b/CHANGELOG.md index cc8dcdf6caa..24501e2fc99 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -11,14 +11,110 @@ edited between the markers. ### Highlights -- _No fork-only changes yet._ +- Resume: add turn-resume with persisted interrupted history, plus a guided prompt and usable options on small terminals. +- Agents: add `/subagent-model`, prefer mini subagents for repo exploration, and cancel spawned subagents on abort. +- Plan Mode: add `plan_explore` subagent exploration and allow `/plan` web search (when enabled). +- Plan Mode: switch `/plan` exploration to mini subagents; add `mini_subagent_*` config and deprecate `explore_model*`. +- TUI: remove `/explore-model`; add `/experimental` to TUI2. +- Core: strengthen `ask_user_question` guidance and inject SpawnSubagent guidance for execution. +- Experimental: add LSP diagnostics + navigation (autodetecting common servers like `rust-analyzer`, `gopls`, `pyright-langserver`, and `typescript-language-server`). +- Packaging/CI: add Homebrew cask workflow + macOS artifacts, and harden workflow conditionals/YAML formatting. ### Details -_No fork-only changes yet._ +#### Features + +- feat: add experimental LSP diagnostics and navigation + +#### Fixes + +- Fix YAML heredoc indentation +- Fix rust-ci job if expressions +- Fix workflows: avoid secrets in if +- Fix app-server OverrideTurnContext init +- Fix subagent badges +- fix(lsp): respect `max_tool_diagnostics` and avoid leaked servers + +#### Documentation + +- docs: keep README ASCII-only +- docs: prettier +- docs: note Codexel release tag format + +#### TUI + +- tui: update status snapshots for v0.1.4 +- tui: prompt to continue on interrupted resume +- tui: keep resume options visible on small terminals +- tui: show LSP toggle in /experimental +- tui: surface lsp tool calls in history +- TUI: render `apply_patch` diagnostics as styled card +- TUI: avoid false interrupted prompt on resume +- tui: drop /explore-model; add /experimental to tui2 +- tui: default AskUserQuestion review selection to Submit +- tui: parse LSP diagnostics entries with colons +- tui: make /diagnostics open changed files +- tui: remove /diagnostics + +#### Core + +- core: strengthen `ask_user_question` guidance +- core: add web UI quality bar instructions +- core: inject SpawnSubagent guidance for exec +- core: enable LSP prompt diagnostics by default + +#### Plan Mode + +- plan: refine plan mode + variants prompts +- plan: allow web search in /plan +- Plan mode: add `plan_explore` subagent exploration +- Plan exploration: mini subagents + `mini_subagent_*` config + +#### Branding & Packaging +- codex-cli: bump codexel npm version to 0.1.3 +- Merge upstream/main into Codexel + +#### Chores + +- chore: update changelog +- chore: make changelog generator prettier-stable +- chore(app-server): drop unused `os_info` dev-dependency +- chore: regenerate changelog + +#### Other + +- changelog: update 0.1.3 for upstream merge +- Add Homebrew cask workflow and macOS artifacts +- Use macos-15-intel for `x86_64` builds +- release: bump versions to 0.1.4 +- changelog: update unreleased +- checkpoint: pre explore-model + cancelled subagents +- Add /explore-model and cancel subagents on abort +- Persist interrupted turn history on resume +- changelog: update unreleased for v0.1.4 +- Add /subagent-model override +- Add mini/explorer subagents and UI badges +- codex-lsp: autodetect .cmd language servers on Windows +- codex-core: prefer LSP tools over rg via developer instructions +- lsp: normalize paths and clarify tool docs +- lsp: prewarm workspace and configured servers +- lsp: prewarm autodetected servers +- LSP: enable publishDiagnostics and show `apply_patch` diagnostics in TUI +- Prefer mini subagents for repo exploration +- Tighten mini subagent exploration guidance +- changelog: update unreleased +- changelog: update unreleased +- lsp: autodetect PHP and Perl servers +- changelog: update unreleased +- Ignore tmp/lsp-smoke LSP test files +- Add LSP status command and improve /lsp UI +- codex-lsp: add C# language support +- lsp: handle server requests and pull diagnostics +- lsp: seed diagnostics by opening a source file +- lsp: wait for diagnostics during warmup; show pull-diags capability ## [0.1.3] - 2025-12-20 @@ -51,7 +147,7 @@ Release commit: 44f8df17aa11051fcf3919a9c16fe3b9c3296d66 #### Documentation - docs: clarify Codexel fork positioning -- docs: move What's different up and mention ask_user_question +- docs: move What's different up and mention `ask_user_question` #### TUI @@ -85,14 +181,14 @@ Release commit: 44f8df17aa11051fcf3919a9c16fe3b9c3296d66 - Skip macOS rust-ci jobs on PRs - Skip upstream npm staging in CI for forks - Format markdown and workflow files -- Add spawn_subagent tool -- Show spawn_subagent tool calls in history +- Add `spawn_subagent` tool +- Show `spawn_subagent` tool calls in history - subagent: stream token counts - release: bump workspace version to 0.1.3 - changelog: cut 0.1.3 - changelog: update unreleased - changelog: update -- Require spawn_subagent description and refresh snapshots +- Require `spawn_subagent` description and refresh snapshots - changelog: fix 0.1.3 release ranges - Merge upstream/main into v0.1.3 - changelog: filter upstream commits in generator @@ -191,7 +287,7 @@ Release commit: 3e57f558eff5b400292a6ad3c9df2721648aed6f #### Fixes -- fix(tui2): drop disabled_reason from ask_user_question rows +- fix(tui2): drop `disabled_reason` from `ask_user_question` rows #### Documentation @@ -206,7 +302,7 @@ Release commit: 3e57f558eff5b400292a6ad3c9df2721648aed6f - tui: auto-execute approved plans - tui: polish plan-variants progress - tui: fix /plan cursor position -- tui: add review step for ask_user_question +- tui: add review step for `ask_user_question` - tui: taller plan approval overlay and wrapped summary - tui: make Plan Mode placeholder generic @@ -244,5 +340,5 @@ Release commit: 3e57f558eff5b400292a6ad3c9df2721648aed6f #### Other -- Add ask_user_question tool +- Add `ask_user_question` tool diff --git a/codex-cli/package-lock.json b/codex-cli/package-lock.json index 71b51750366..b2073e46e9d 100644 --- a/codex-cli/package-lock.json +++ b/codex-cli/package-lock.json @@ -1,11 +1,11 @@ { "name": "@ixe1/codexel", - "version": "0.1.3", + "version": "0.1.4", "lockfileVersion": 3, "packages": { "": { "name": "@ixe1/codexel", - "version": "0.1.3", + "version": "0.1.4", "license": "Apache-2.0", "bin": { "codexel": "bin/codexel.js" diff --git a/codex-cli/package.json b/codex-cli/package.json index 4ea62322815..332dec7003d 100644 --- a/codex-cli/package.json +++ b/codex-cli/package.json @@ -1,6 +1,6 @@ { "name": "@ixe1/codexel", - "version": "0.1.3", + "version": "0.1.4", "license": "Apache-2.0", "bin": { "codexel": "bin/codexel.js" diff --git a/codex-cli/scripts/install_native_deps.py b/codex-cli/scripts/install_native_deps.py index 5e52bf85bd6..c54423db2c4 100755 --- a/codex-cli/scripts/install_native_deps.py +++ b/codex-cli/scripts/install_native_deps.py @@ -2,6 +2,7 @@ """Install Codex native binaries (Rust CLI plus ripgrep helpers).""" import argparse +from contextlib import contextmanager import json import os import shutil @@ -12,6 +13,7 @@ from dataclasses import dataclass from concurrent.futures import ThreadPoolExecutor, as_completed from pathlib import Path +import sys from typing import Iterable, Sequence from urllib.parse import urlparse from urllib.request import urlopen @@ -78,6 +80,45 @@ class BinaryComponent: RG_TARGET_TO_PLATFORM = {target: platform for target, platform in RG_TARGET_PLATFORM_PAIRS} DEFAULT_RG_TARGETS = [target for target, _ in RG_TARGET_PLATFORM_PAIRS] +# urllib.request.urlopen() defaults to no timeout (can hang indefinitely), which is painful in CI. +DOWNLOAD_TIMEOUT_SECS = 60 + + +def _gha_enabled() -> bool: + # GitHub Actions supports "workflow commands" (e.g. ::group:: / ::error::) that make logs + # much easier to scan: groups collapse noisy sections and error annotations surface the + # failure in the UI without changing the actual exception/traceback output. + return os.environ.get("GITHUB_ACTIONS") == "true" + + +def _gha_escape(value: str) -> str: + # Workflow commands require percent/newline escaping. + return value.replace("%", "%25").replace("\r", "%0D").replace("\n", "%0A") + + +def _gha_error(*, title: str, message: str) -> None: + # Emit a GitHub Actions error annotation. This does not replace stdout/stderr logs; it just + # adds a prominent summary line to the job UI so the root cause is easier to spot. + if not _gha_enabled(): + return + print( + f"::error title={_gha_escape(title)}::{_gha_escape(message)}", + flush=True, + ) + + +@contextmanager +def _gha_group(title: str): + # Wrap a block in a collapsible log group on GitHub Actions. Outside of GHA this is a no-op + # so local output remains unchanged. + if _gha_enabled(): + print(f"::group::{_gha_escape(title)}", flush=True) + try: + yield + finally: + if _gha_enabled(): + print("::endgroup::", flush=True) + def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description="Install native Codex binaries.") @@ -139,18 +180,20 @@ def main() -> int: workflow_id = workflow_url.rstrip("/").split("/")[-1] print(f"Downloading native artifacts from workflow {workflow_id}...") - with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str: - artifacts_dir = Path(artifacts_dir_str) - _download_artifacts(workflow_id, artifacts_dir, repo=args.repo) - install_binary_components( - artifacts_dir, - vendor_dir, - [BINARY_COMPONENTS[name] for name in components if name in BINARY_COMPONENTS], - ) + with _gha_group(f"Download native artifacts from workflow {workflow_id}"): + with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str: + artifacts_dir = Path(artifacts_dir_str) + _download_artifacts(workflow_id, artifacts_dir, repo=args.repo) + install_binary_components( + artifacts_dir, + vendor_dir, + [BINARY_COMPONENTS[name] for name in components if name in BINARY_COMPONENTS], + ) if "rg" in components: - print("Fetching ripgrep binaries...") - fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST) + with _gha_group("Fetch ripgrep binaries"): + print("Fetching ripgrep binaries...") + fetch_rg(vendor_dir, DEFAULT_RG_TARGETS, manifest_path=RG_MANIFEST) print(f"Installed native dependencies into {vendor_dir}") return 0 @@ -211,7 +254,14 @@ def fetch_rg( for future in as_completed(future_map): target = future_map[future] - results[target] = future.result() + try: + results[target] = future.result() + except Exception as exc: + _gha_error( + title="ripgrep install failed", + message=f"target={target} error={exc!r}", + ) + raise RuntimeError(f"Failed to install ripgrep for target {target}.") from exc print(f" installed ripgrep for {target}") return [results[target] for target in targets] @@ -309,6 +359,8 @@ def _fetch_single_rg( url = providers[0]["url"] archive_format = platform_info.get("format", "zst") archive_member = platform_info.get("path") + digest = platform_info.get("digest") + expected_size = platform_info.get("size") dest_dir = vendor_dir / target / "path" dest_dir.mkdir(parents=True, exist_ok=True) @@ -321,10 +373,32 @@ def _fetch_single_rg( tmp_dir = Path(tmp_dir_str) archive_filename = os.path.basename(urlparse(url).path) download_path = tmp_dir / archive_filename - _download_file(url, download_path) + print( + f" downloading ripgrep for {target} ({platform_key}) from {url}", + flush=True, + ) + try: + _download_file(url, download_path) + except Exception as exc: + _gha_error( + title="ripgrep download failed", + message=f"target={target} platform={platform_key} url={url} error={exc!r}", + ) + raise RuntimeError( + "Failed to download ripgrep " + f"(target={target}, platform={platform_key}, format={archive_format}, " + f"expected_size={expected_size!r}, digest={digest!r}, url={url}, dest={download_path})." + ) from exc dest.unlink(missing_ok=True) - extract_archive(download_path, archive_format, archive_member, dest) + try: + extract_archive(download_path, archive_format, archive_member, dest) + except Exception as exc: + raise RuntimeError( + "Failed to extract ripgrep " + f"(target={target}, platform={platform_key}, format={archive_format}, " + f"member={archive_member!r}, url={url}, archive={download_path})." + ) from exc if not is_windows: dest.chmod(0o755) @@ -334,7 +408,9 @@ def _fetch_single_rg( def _download_file(url: str, dest: Path) -> None: dest.parent.mkdir(parents=True, exist_ok=True) - with urlopen(url) as response, open(dest, "wb") as out: + dest.unlink(missing_ok=True) + + with urlopen(url, timeout=DOWNLOAD_TIMEOUT_SECS) as response, open(dest, "wb") as out: shutil.copyfileobj(response, out) diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index 5b49e49ce9a..b7e6e6c0ce8 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -326,15 +326,15 @@ checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "app_test_support" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", - "assert_cmd", "base64", "chrono", "codex-app-server-protocol", "codex-core", "codex-protocol", + "codex-utils-cargo-bin", "core_test_support", "serde", "serde_json", @@ -365,6 +365,12 @@ dependencies = [ "x11rb", ] +[[package]] +name = "arc-swap" +version = "1.7.1" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "69f7f8c3906b62b754cd5326047894316021dcfe5a194c8ea52bdd94934a3457" + [[package]] name = "arrayvec" version = "0.7.6" @@ -883,9 +889,9 @@ dependencies = [ [[package]] name = "clap" -version = "4.5.47" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "7eac00902d9d136acd712710d71823fb8ac8004ca445a89e73a41d45aa712931" +checksum = "c9e340e012a1bf4935f5282ed1436d1489548e8f72308207ea5df0e23d2d03f8" dependencies = [ "clap_builder", "clap_derive", @@ -893,9 +899,9 @@ dependencies = [ [[package]] name = "clap_builder" -version = "4.5.47" +version = "4.5.53" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2ad9bbf750e73b5884fb8a211a9424a1906c1e156724260fdae972f31d70e1d6" +checksum = "d76b5d13eaa18c901fd2f7fca939fefe3a0727a953561fefdf3b2922b8569d00" dependencies = [ "anstream", "anstyle", @@ -915,9 +921,9 @@ dependencies = [ [[package]] name = "clap_derive" -version = "4.5.47" +version = "4.5.49" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "bbfd7eae0b0f1a6e63d4b13c9c478de77c2eb546fba158ad50b4203dc24b9f9c" +checksum = "2a0b5487afeab2deb2ff4e03a807ad1a03ac532ff5a2cee5d86884440c7f7671" dependencies = [ "heck", "proc-macro2", @@ -948,7 +954,7 @@ checksum = "e9b18233253483ce2f65329a24072ec414db782531bdbb7d0bbc4bd2ce6b7e21" [[package]] name = "codex-ansi-escape" -version = "0.1.3" +version = "0.1.4" dependencies = [ "ansi-to-tui", "ratatui", @@ -957,7 +963,7 @@ dependencies = [ [[package]] name = "codex-api" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "assert_matches", @@ -983,11 +989,10 @@ dependencies = [ [[package]] name = "codex-app-server" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "app_test_support", - "assert_cmd", "base64", "chrono", "codex-app-server-protocol", @@ -998,13 +1003,13 @@ dependencies = [ "codex-feedback", "codex-file-search", "codex-login", + "codex-lsp", "codex-protocol", "codex-rmcp-client", "codex-utils-absolute-path", "codex-utils-json-to-toml", "core_test_support", "mcp-types", - "os_info", "pretty_assertions", "serde", "serde_json", @@ -1021,7 +1026,7 @@ dependencies = [ [[package]] name = "codex-app-server-protocol" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "clap", @@ -1040,7 +1045,7 @@ dependencies = [ [[package]] name = "codex-app-server-test-client" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "clap", @@ -1053,11 +1058,12 @@ dependencies = [ [[package]] name = "codex-apply-patch" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "assert_cmd", "assert_matches", + "codex-utils-cargo-bin", "pretty_assertions", "similar", "tempfile", @@ -1068,7 +1074,7 @@ dependencies = [ [[package]] name = "codex-arg0" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "codex-apply-patch", @@ -1081,7 +1087,7 @@ dependencies = [ [[package]] name = "codex-async-utils" -version = "0.1.3" +version = "0.1.4" dependencies = [ "async-trait", "pretty_assertions", @@ -1091,7 +1097,7 @@ dependencies = [ [[package]] name = "codex-backend-client" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "codex-backend-openapi-models", @@ -1105,7 +1111,7 @@ dependencies = [ [[package]] name = "codex-backend-openapi-models" -version = "0.1.3" +version = "0.1.4" dependencies = [ "serde", "serde_json", @@ -1114,7 +1120,7 @@ dependencies = [ [[package]] name = "codex-chatgpt" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "clap", @@ -1129,7 +1135,7 @@ dependencies = [ [[package]] name = "codex-cli" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "assert_cmd", @@ -1146,6 +1152,7 @@ dependencies = [ "codex-exec", "codex-execpolicy", "codex-login", + "codex-lsp", "codex-mcp-server", "codex-process-hardening", "codex-protocol", @@ -1155,6 +1162,7 @@ dependencies = [ "codex-tui", "codex-tui2", "codex-utils-absolute-path", + "codex-utils-cargo-bin", "codex-windows-sandbox", "ctor 0.5.0", "libc", @@ -1172,7 +1180,7 @@ dependencies = [ [[package]] name = "codex-client" -version = "0.1.3" +version = "0.1.4" dependencies = [ "async-trait", "bytes", @@ -1194,7 +1202,7 @@ dependencies = [ [[package]] name = "codex-cloud-tasks" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "async-trait", @@ -1223,7 +1231,7 @@ dependencies = [ [[package]] name = "codex-cloud-tasks-client" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "async-trait", @@ -1238,7 +1246,7 @@ dependencies = [ [[package]] name = "codex-common" -version = "0.1.3" +version = "0.1.4" dependencies = [ "clap", "codex-core", @@ -1253,9 +1261,10 @@ dependencies = [ [[package]] name = "codex-core" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", + "arc-swap", "assert_cmd", "assert_matches", "async-channel", @@ -1274,10 +1283,12 @@ dependencies = [ "codex-file-search", "codex-git", "codex-keyring-store", + "codex-lsp", "codex-otel", "codex-protocol", "codex-rmcp-client", "codex-utils-absolute-path", + "codex-utils-cargo-bin", "codex-utils-pty", "codex-utils-readiness", "codex-utils-string", @@ -1327,7 +1338,7 @@ dependencies = [ "tokio", "tokio-util", "toml 0.9.5", - "toml_edit", + "toml_edit 0.24.0+spec-1.1.0", "tracing", "tracing-subscriber", "tracing-test", @@ -1343,7 +1354,7 @@ dependencies = [ [[package]] name = "codex-exec" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "assert_cmd", @@ -1353,6 +1364,7 @@ dependencies = [ "codex-core", "codex-protocol", "codex-utils-absolute-path", + "codex-utils-cargo-bin", "core_test_support", "libc", "mcp-types", @@ -1375,14 +1387,14 @@ dependencies = [ [[package]] name = "codex-exec-server" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", - "assert_cmd", "async-trait", "clap", "codex-core", "codex-execpolicy", + "codex-utils-cargo-bin", "exec_server_test_support", "libc", "maplit", @@ -1402,7 +1414,7 @@ dependencies = [ [[package]] name = "codex-execpolicy" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "clap", @@ -1418,7 +1430,7 @@ dependencies = [ [[package]] name = "codex-execpolicy-legacy" -version = "0.1.3" +version = "0.1.4" dependencies = [ "allocative", "anyhow", @@ -1438,7 +1450,7 @@ dependencies = [ [[package]] name = "codex-feedback" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "codex-protocol", @@ -1449,7 +1461,7 @@ dependencies = [ [[package]] name = "codex-file-search" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "clap", @@ -1463,7 +1475,7 @@ dependencies = [ [[package]] name = "codex-git" -version = "0.1.3" +version = "0.1.4" dependencies = [ "assert_matches", "once_cell", @@ -1479,7 +1491,7 @@ dependencies = [ [[package]] name = "codex-keyring-store" -version = "0.1.3" +version = "0.1.4" dependencies = [ "keyring", "tracing", @@ -1487,7 +1499,7 @@ dependencies = [ [[package]] name = "codex-linux-sandbox" -version = "0.1.3" +version = "0.1.4" dependencies = [ "clap", "codex-core", @@ -1501,7 +1513,7 @@ dependencies = [ [[package]] name = "codex-lmstudio" -version = "0.1.3" +version = "0.1.4" dependencies = [ "codex-core", "reqwest", @@ -1514,7 +1526,7 @@ dependencies = [ [[package]] name = "codex-login" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "base64", @@ -1536,12 +1548,31 @@ dependencies = [ "wiremock", ] +[[package]] +name = "codex-lsp" +version = "0.1.4" +dependencies = [ + "anyhow", + "dunce", + "ignore", + "notify", + "pretty_assertions", + "serde", + "serde_json", + "tempfile", + "thiserror 2.0.17", + "tokio", + "tracing", + "url", + "which", + "wildmatch", +] + [[package]] name = "codex-mcp-server" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", - "assert_cmd", "codex-arg0", "codex-common", "codex-core", @@ -1565,7 +1596,7 @@ dependencies = [ [[package]] name = "codex-ollama" -version = "0.1.3" +version = "0.1.4" dependencies = [ "assert_matches", "async-stream", @@ -1581,7 +1612,7 @@ dependencies = [ [[package]] name = "codex-otel" -version = "0.1.3" +version = "0.1.4" dependencies = [ "chrono", "codex-api", @@ -1608,7 +1639,7 @@ dependencies = [ [[package]] name = "codex-process-hardening" -version = "0.1.3" +version = "0.1.4" dependencies = [ "libc", "pretty_assertions", @@ -1616,7 +1647,7 @@ dependencies = [ [[package]] name = "codex-protocol" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "codex-git", @@ -1643,7 +1674,7 @@ dependencies = [ [[package]] name = "codex-responses-api-proxy" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "clap", @@ -1659,14 +1690,14 @@ dependencies = [ [[package]] name = "codex-rmcp-client" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "axum", "codex-keyring-store", "codex-protocol", + "codex-utils-cargo-bin", "dirs", - "escargot", "futures", "keyring", "mcp-types", @@ -1689,10 +1720,11 @@ dependencies = [ [[package]] name = "codex-stdio-to-uds" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "assert_cmd", + "codex-utils-cargo-bin", "pretty_assertions", "tempfile", "uds_windows", @@ -1700,7 +1732,7 @@ dependencies = [ [[package]] name = "codex-tui" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "arboard", @@ -1717,6 +1749,7 @@ dependencies = [ "codex-feedback", "codex-file-search", "codex-login", + "codex-lsp", "codex-protocol", "codex-utils-absolute-path", "codex-windows-sandbox", @@ -1749,6 +1782,7 @@ dependencies = [ "supports-color 3.0.2", "tempfile", "textwrap 0.16.2", + "thiserror 2.0.17", "tokio", "tokio-stream", "tokio-util", @@ -1763,11 +1797,14 @@ dependencies = [ "url", "uuid", "vt100", + "which", + "windows-sys 0.52.0", + "winsplit", ] [[package]] name = "codex-tui2" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "arboard", @@ -1785,6 +1822,7 @@ dependencies = [ "codex-feedback", "codex-file-search", "codex-login", + "codex-lsp", "codex-protocol", "codex-tui", "codex-utils-absolute-path", @@ -1836,7 +1874,7 @@ dependencies = [ [[package]] name = "codex-utils-absolute-path" -version = "0.1.3" +version = "0.1.4" dependencies = [ "path-absolutize", "schemars 0.8.22", @@ -1848,16 +1886,24 @@ dependencies = [ [[package]] name = "codex-utils-cache" -version = "0.1.3" +version = "0.1.4" dependencies = [ "lru 0.16.2", "sha1", "tokio", ] +[[package]] +name = "codex-utils-cargo-bin" +version = "0.1.4" +dependencies = [ + "assert_cmd", + "thiserror 2.0.17", +] + [[package]] name = "codex-utils-image" -version = "0.1.3" +version = "0.1.4" dependencies = [ "base64", "codex-utils-cache", @@ -1869,7 +1915,7 @@ dependencies = [ [[package]] name = "codex-utils-json-to-toml" -version = "0.1.3" +version = "0.1.4" dependencies = [ "pretty_assertions", "serde_json", @@ -1878,7 +1924,7 @@ dependencies = [ [[package]] name = "codex-utils-pty" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "filedescriptor", @@ -1892,7 +1938,7 @@ dependencies = [ [[package]] name = "codex-utils-readiness" -version = "0.1.3" +version = "0.1.4" dependencies = [ "assert_matches", "async-trait", @@ -1903,11 +1949,11 @@ dependencies = [ [[package]] name = "codex-utils-string" -version = "0.1.3" +version = "0.1.4" [[package]] name = "codex-windows-sandbox" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "base64", @@ -2050,7 +2096,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_test_support" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", "assert_cmd", @@ -2058,6 +2104,7 @@ dependencies = [ "codex-core", "codex-protocol", "codex-utils-absolute-path", + "codex-utils-cargo-bin", "notify", "pretty_assertions", "regex-lite", @@ -2774,11 +2821,11 @@ dependencies = [ [[package]] name = "exec_server_test_support" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", - "assert_cmd", "codex-core", + "codex-utils-cargo-bin", "rmcp", "serde_json", "tokio", @@ -3744,17 +3791,6 @@ dependencies = [ "rustversion", ] -[[package]] -name = "io-uring" -version = "0.7.9" -source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "d93587f37623a1a17d94ef2bc9ada592f5465fe7732084ab7beefabe5c77c0c4" -dependencies = [ - "bitflags 2.10.0", - "cfg-if", - "libc", -] - [[package]] name = "ipnet" version = "2.11.0" @@ -3954,9 +3990,9 @@ dependencies = [ [[package]] name = "landlock" -version = "0.4.2" +version = "0.4.4" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "b3d2ef408b88e913bfc6594f5e693d57676f6463ded7d8bf994175364320c706" +checksum = "49fefd6652c57d68aaa32544a4c0e642929725bdc1fd929367cdeb673ab81088" dependencies = [ "enumflags2", "libc", @@ -4133,7 +4169,7 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "mcp-types" -version = "0.1.3" +version = "0.1.4" dependencies = [ "schemars 0.8.22", "serde", @@ -4143,12 +4179,12 @@ dependencies = [ [[package]] name = "mcp_test_support" -version = "0.1.3" +version = "0.1.4" dependencies = [ "anyhow", - "assert_cmd", "codex-core", "codex-mcp-server", + "codex-utils-cargo-bin", "core_test_support", "mcp-types", "os_info", @@ -4636,9 +4672,9 @@ dependencies = [ [[package]] name = "openssl-sys" -version = "0.9.109" +version = "0.9.111" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "90096e2e47630d78b7d1c20952dc621f957103f8bc2c8359ec81290d75238571" +checksum = "82cab2d520aa75e3c58898289429321eb788c3106963d0dc886ec7a5f4adc321" dependencies = [ "cc", "libc", @@ -5095,7 +5131,7 @@ version = "3.4.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "219cb19e96be00ab2e37d6e299658a0cfa83e52429179969b0f0121b4ac46983" dependencies = [ - "toml_edit", + "toml_edit 0.23.10+spec-1.0.0", ] [[package]] @@ -5440,9 +5476,9 @@ dependencies = [ [[package]] name = "regex-lite" -version = "0.1.7" +version = "0.1.8" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "943f41321c63ef1c92fd763bfe054d2668f7f225a5c29f0105903dc2fc04ba30" +checksum = "8d942b98df5e658f56f20d592c7f868833fe38115e65c33003d8cd224b0155da" [[package]] name = "regex-syntax" @@ -6725,9 +6761,9 @@ dependencies = [ [[package]] name = "test-log" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "1e33b98a582ea0be1168eba097538ee8dd4bbe0f2b01b22ac92ea30054e5be7b" +checksum = "37d53ac171c92a39e4769491c4b4dde7022c60042254b5fc044ae409d34a24d4" dependencies = [ "env_logger", "test-log-macros", @@ -6736,9 +6772,9 @@ dependencies = [ [[package]] name = "test-log-macros" -version = "0.2.18" +version = "0.2.19" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "451b374529930d7601b1eef8d32bc79ae870b6079b069401709c2a8bf9e75f36" +checksum = "be35209fd0781c5401458ab66e4f98accf63553e8fae7425503e92fdd319783b" dependencies = [ "proc-macro2", "quote", @@ -6909,29 +6945,26 @@ checksum = "1f3ccbac311fea05f86f61904b462b55fb3df8837a366dfc601a0161d0532f20" [[package]] name = "tokio" -version = "1.47.1" +version = "1.48.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "89e49afdadebb872d3145a5638b59eb0691ea23e46ca484037cfab3b76b95038" +checksum = "ff360e02eab121e0bc37a2d3b4d4dc622e6eda3a8e5253d5435ecf5bd4c68408" dependencies = [ - "backtrace", "bytes", - "io-uring", "libc", "mio", "parking_lot", "pin-project-lite", "signal-hook-registry", - "slab", "socket2 0.6.1", "tokio-macros", - "windows-sys 0.59.0", + "windows-sys 0.61.1", ] [[package]] name = "tokio-macros" -version = "2.5.0" +version = "2.6.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6e06d43f1345a3bcd39f6a56dbb7dcab2ba47e68e8ac134855e7e2bdbaf8cab8" +checksum = "af407857209536a95c8e56f8231ef2c2e2aff839b22e07a1ffcbc617e9db9fa5" dependencies = [ "proc-macro2", "quote", @@ -7024,18 +7057,30 @@ dependencies = [ [[package]] name = "toml_datetime" -version = "0.7.3" +version = "0.7.5+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "f2cdb639ebbc97961c51720f858597f7f24c4fc295327923af55b74c3c724533" +checksum = "92e1cfed4a3038bc5a127e35a2d360f145e1f4b971b551a2ba5fd7aedf7e1347" dependencies = [ "serde_core", ] [[package]] name = "toml_edit" -version = "0.23.7" +version = "0.23.10+spec-1.0.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "84c8b9f757e028cee9fa244aea147aab2a9ec09d5325a9b01e0a49730c2b5269" +dependencies = [ + "indexmap 2.12.0", + "toml_datetime", + "toml_parser", + "winnow", +] + +[[package]] +name = "toml_edit" +version = "0.24.0+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "6485ef6d0d9b5d0ec17244ff7eb05310113c3f316f2d14200d4de56b3cb98f8d" +checksum = "8c740b185920170a6d9191122cafef7010bd6270a3824594bff6784c04d7f09e" dependencies = [ "indexmap 2.12.0", "toml_datetime", @@ -7046,18 +7091,18 @@ dependencies = [ [[package]] name = "toml_parser" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "c0cbe268d35bdb4bb5a56a2de88d0ad0eb70af5384a99d648cd4b3d04039800e" +checksum = "a3198b4b0a8e11f09dd03e133c0280504d0801269e9afa46362ffde1cbeebf44" dependencies = [ "winnow", ] [[package]] name = "toml_writer" -version = "1.0.4" +version = "1.0.6+spec-1.1.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "df8b2b54733674ad286d16267dcfc7a71ed5c776e4ac7aa3c3e2561f7c637bf2" +checksum = "ab16f14aed21ee8bfd8ec22513f7287cd4a91aa92e44edfe2c17ddd004e92607" [[package]] name = "tonic" @@ -7225,9 +7270,9 @@ dependencies = [ [[package]] name = "tracing-subscriber" -version = "0.3.20" +version = "0.3.22" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "2054a14f5307d601f88daf0553e1cbf472acc4f2c51afab632431cdcd72124d5" +checksum = "2f30143827ddab0d256fd843b7a66d164e9f271cfa0dde49142c5ca0ca291f1e" dependencies = [ "matchers", "nu-ansi-term", @@ -8372,6 +8417,12 @@ version = "0.0.19" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" +[[package]] +name = "winsplit" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "3ab703352da6a72f35c39a533526393725640575bb211f61987a2748323ad956" + [[package]] name = "wiremock" version = "0.6.5" diff --git a/codex-rs/Cargo.toml b/codex-rs/Cargo.toml index f275c162e6d..bcdf576a941 100644 --- a/codex-rs/Cargo.toml +++ b/codex-rs/Cargo.toml @@ -21,6 +21,7 @@ members = [ "execpolicy-legacy", "keyring-store", "file-search", + "lsp", "linux-sandbox", "lmstudio", "login", @@ -36,6 +37,7 @@ members = [ "tui", "tui2", "utils/absolute-path", + "utils/cargo-bin", "utils/git", "utils/cache", "utils/image", @@ -49,7 +51,7 @@ members = [ resolver = "2" [workspace.package] -version = "0.1.3" +version = "0.1.4" # Track the edition for all workspace crates in one place. Individual # crates can still override this value, but keeping it here means new # crates created with `cargo new -w ...` automatically inherit the 2024 @@ -79,6 +81,7 @@ codex-file-search = { path = "file-search" } codex-git = { path = "utils/git" } codex-keyring-store = { path = "keyring-store" } codex-linux-sandbox = { path = "linux-sandbox" } +codex-lsp = { path = "lsp" } codex-lmstudio = { path = "lmstudio" } codex-login = { path = "login" } codex-mcp-server = { path = "mcp-server" } @@ -93,6 +96,7 @@ codex-tui = { path = "tui" } codex-tui2 = { path = "tui2" } codex-utils-absolute-path = { path = "utils/absolute-path" } codex-utils-cache = { path = "utils/cache" } +codex-utils-cargo-bin = { path = "utils/cargo-bin" } codex-utils-image = { path = "utils/image" } codex-utils-json-to-toml = { path = "utils/json-to-toml" } codex-utils-pty = { path = "utils/pty" } @@ -146,7 +150,7 @@ indexmap = "2.12.0" insta = "1.44.3" itertools = "0.14.0" keyring = { version = "3.6", default-features = false } -landlock = "0.4.1" +landlock = "0.4.4" lazy_static = "1" libc = "0.2.177" log = "0.4" @@ -176,7 +180,7 @@ rand = "0.9" ratatui = "0.29.0" ratatui-macros = "0.6.0" regex = "1.12.2" -regex-lite = "0.1.7" +regex-lite = "0.1.8" reqwest = "0.12" rmcp = { version = "0.12.0", default-features = false } schemars = "0.8.22" @@ -198,7 +202,7 @@ strum_macros = "0.27.2" supports-color = "3.0.2" sys-locale = "0.3.2" tempfile = "3.23.0" -test-log = "0.2.18" +test-log = "0.2.19" textwrap = "0.16.2" thiserror = "2.0.17" time = "0.3" @@ -208,11 +212,11 @@ tokio-stream = "0.1.17" tokio-test = "0.4" tokio-util = "0.7.16" toml = "0.9.5" -toml_edit = "0.23.5" +toml_edit = "0.24.0" tonic = "0.13.1" tracing = "0.1.43" tracing-appender = "0.2.3" -tracing-subscriber = "0.3.20" +tracing-subscriber = "0.3.22" tracing-test = "0.2.5" tree-sitter = "0.25.10" tree-sitter-bash = "0.25" diff --git a/codex-rs/app-server-protocol/src/protocol/common.rs b/codex-rs/app-server-protocol/src/protocol/common.rs index 83fa53b9973..d1590308b26 100644 --- a/codex-rs/app-server-protocol/src/protocol/common.rs +++ b/codex-rs/app-server-protocol/src/protocol/common.rs @@ -287,6 +287,22 @@ client_request_definitions! { params: FuzzyFileSearchParams, response: FuzzyFileSearchResponse, }, + LspDiagnosticsGet => "lsp/diagnostics/get" { + params: v2::LspDiagnosticsGetParams, + response: v2::LspDiagnosticsGetResponse, + }, + LspDefinition => "lsp/definition" { + params: v2::LspDefinitionParams, + response: v2::LspDefinitionResponse, + }, + LspReferences => "lsp/references" { + params: v2::LspReferencesParams, + response: v2::LspReferencesResponse, + }, + LspDocumentSymbols => "lsp/documentSymbols" { + params: v2::LspDocumentSymbolsParams, + response: v2::LspDocumentSymbolsResponse, + }, /// Execute a command (argv vector) under the server's sandbox. ExecOneOffCommand { params: v1::ExecOneOffCommandParams, @@ -522,6 +538,7 @@ server_notification_definitions! { TurnStarted => "turn/started" (v2::TurnStartedNotification), TurnCompleted => "turn/completed" (v2::TurnCompletedNotification), TurnDiffUpdated => "turn/diff/updated" (v2::TurnDiffUpdatedNotification), + LspDiagnosticsUpdated => "lsp/diagnostics/updated" (v2::LspDiagnosticsUpdatedNotification), TurnPlanUpdated => "turn/plan/updated" (v2::TurnPlanUpdatedNotification), ItemStarted => "item/started" (v2::ItemStartedNotification), ItemCompleted => "item/completed" (v2::ItemCompletedNotification), diff --git a/codex-rs/app-server-protocol/src/protocol/thread_history.rs b/codex-rs/app-server-protocol/src/protocol/thread_history.rs index ba1e6261cc6..19937ee133d 100644 --- a/codex-rs/app-server-protocol/src/protocol/thread_history.rs +++ b/codex-rs/app-server-protocol/src/protocol/thread_history.rs @@ -1,13 +1,38 @@ +use crate::protocol::v2::CommandAction; +use crate::protocol::v2::CommandExecutionStatus; +use crate::protocol::v2::FileUpdateChange; +use crate::protocol::v2::McpToolCallError; +use crate::protocol::v2::McpToolCallResult; +use crate::protocol::v2::McpToolCallStatus; +use crate::protocol::v2::PatchApplyStatus; +use crate::protocol::v2::PatchChangeKind; use crate::protocol::v2::ThreadItem; use crate::protocol::v2::Turn; use crate::protocol::v2::TurnError; use crate::protocol::v2::TurnStatus; use crate::protocol::v2::UserInput; +use codex_protocol::protocol::AgentMessageDeltaEvent; +use codex_protocol::protocol::AgentReasoningDeltaEvent; use codex_protocol::protocol::AgentReasoningEvent; +use codex_protocol::protocol::AgentReasoningRawContentDeltaEvent; use codex_protocol::protocol::AgentReasoningRawContentEvent; use codex_protocol::protocol::EventMsg; +use codex_protocol::protocol::ExecCommandBeginEvent; +use codex_protocol::protocol::ExecCommandEndEvent; +use codex_protocol::protocol::ExecCommandOutputDeltaEvent; +use codex_protocol::protocol::FileChange; +use codex_protocol::protocol::McpToolCallBeginEvent; +use codex_protocol::protocol::McpToolCallEndEvent; +use codex_protocol::protocol::PatchApplyBeginEvent; +use codex_protocol::protocol::PatchApplyEndEvent; use codex_protocol::protocol::TurnAbortedEvent; use codex_protocol::protocol::UserMessageEvent; +use codex_protocol::protocol::ViewImageToolCallEvent; +use codex_protocol::protocol::WebSearchBeginEvent; +use codex_protocol::protocol::WebSearchEndEvent; +use serde_json::Value as JsonValue; +use std::collections::HashMap; +use std::time::Duration; /// Convert persisted [`EventMsg`] entries into a sequence of [`Turn`] values. /// @@ -27,6 +52,13 @@ struct ThreadHistoryBuilder { current_turn: Option, next_turn_index: i64, next_item_index: i64, + streaming_agent_message: bool, + streaming_reasoning_summary: bool, + streaming_reasoning_raw: bool, + exec_items: HashMap, + exec_output: HashMap, + patch_items: HashMap, + mcp_items: HashMap, } impl ThreadHistoryBuilder { @@ -36,6 +68,13 @@ impl ThreadHistoryBuilder { current_turn: None, next_turn_index: 1, next_item_index: 1, + streaming_agent_message: false, + streaming_reasoning_summary: false, + streaming_reasoning_raw: false, + exec_items: HashMap::new(), + exec_output: HashMap::new(), + patch_items: HashMap::new(), + mcp_items: HashMap::new(), } } @@ -50,10 +89,27 @@ impl ThreadHistoryBuilder { match event { EventMsg::UserMessage(payload) => self.handle_user_message(payload), EventMsg::AgentMessage(payload) => self.handle_agent_message(payload.message.clone()), + EventMsg::AgentMessageDelta(payload) => self.handle_agent_message_delta(payload), EventMsg::AgentReasoning(payload) => self.handle_agent_reasoning(payload), + EventMsg::AgentReasoningDelta(payload) => self.handle_agent_reasoning_delta(payload), EventMsg::AgentReasoningRawContent(payload) => { self.handle_agent_reasoning_raw_content(payload) } + EventMsg::AgentReasoningRawContentDelta(payload) => { + self.handle_agent_reasoning_raw_content_delta(payload) + } + EventMsg::ExecCommandBegin(payload) => self.handle_exec_command_begin(payload), + EventMsg::ExecCommandOutputDelta(payload) => { + self.handle_exec_command_output_delta(payload) + } + EventMsg::ExecCommandEnd(payload) => self.handle_exec_command_end(payload), + EventMsg::PatchApplyBegin(payload) => self.handle_patch_apply_begin(payload), + EventMsg::PatchApplyEnd(payload) => self.handle_patch_apply_end(payload), + EventMsg::McpToolCallBegin(payload) => self.handle_mcp_tool_call_begin(payload), + EventMsg::McpToolCallEnd(payload) => self.handle_mcp_tool_call_end(payload), + EventMsg::WebSearchBegin(payload) => self.handle_web_search_begin(payload), + EventMsg::WebSearchEnd(payload) => self.handle_web_search_end(payload), + EventMsg::ViewImageToolCall(payload) => self.handle_view_image_tool_call(payload), EventMsg::TokenCount(_) => {} EventMsg::EnteredReviewMode(_) => {} EventMsg::ExitedReviewMode(_) => {} @@ -77,24 +133,64 @@ impl ThreadHistoryBuilder { return; } + if self.streaming_agent_message + && let Some(ThreadItem::AgentMessage { text: existing, .. }) = + self.ensure_turn().items.last_mut() + { + *existing = text; + self.streaming_agent_message = false; + return; + } + + self.streaming_agent_message = false; let id = self.next_item_id(); self.ensure_turn() .items .push(ThreadItem::AgentMessage { id, text }); } + fn handle_agent_message_delta(&mut self, payload: &AgentMessageDeltaEvent) { + if payload.delta.is_empty() { + return; + } + + if self.streaming_agent_message + && let Some(ThreadItem::AgentMessage { text, .. }) = self.ensure_turn().items.last_mut() + { + text.push_str(&payload.delta); + return; + } + + let id = self.next_item_id(); + self.ensure_turn().items.push(ThreadItem::AgentMessage { + id, + text: payload.delta.clone(), + }); + self.streaming_agent_message = true; + } + fn handle_agent_reasoning(&mut self, payload: &AgentReasoningEvent) { if payload.text.is_empty() { + self.streaming_reasoning_summary = false; return; } + let was_streaming = self.streaming_reasoning_summary; // If the last item is a reasoning item, add the new text to the summary. if let Some(ThreadItem::Reasoning { summary, .. }) = self.ensure_turn().items.last_mut() { - summary.push(payload.text.clone()); + if was_streaming && !summary.is_empty() { + if let Some(last) = summary.last_mut() { + *last = payload.text.clone(); + } + } else { + summary.push(payload.text.clone()); + } + self.streaming_reasoning_summary = false; return; } // Otherwise, create a new reasoning item. + self.streaming_reasoning_summary = false; let id = self.next_item_id(); self.ensure_turn().items.push(ThreadItem::Reasoning { id, @@ -103,18 +199,53 @@ impl ThreadHistoryBuilder { }); } + fn handle_agent_reasoning_delta(&mut self, payload: &AgentReasoningDeltaEvent) { + if payload.delta.is_empty() { + return; + } + + if self.streaming_reasoning_summary + && let Some(ThreadItem::Reasoning { summary, .. }) = self.ensure_turn().items.last_mut() + { + if let Some(last) = summary.last_mut() { + last.push_str(&payload.delta); + } else { + summary.push(payload.delta.clone()); + } + return; + } + + self.streaming_reasoning_summary = true; + let id = self.next_item_id(); + self.ensure_turn().items.push(ThreadItem::Reasoning { + id, + summary: vec![payload.delta.clone()], + content: Vec::new(), + }); + } + fn handle_agent_reasoning_raw_content(&mut self, payload: &AgentReasoningRawContentEvent) { if payload.text.is_empty() { + self.streaming_reasoning_raw = false; return; } + let was_streaming = self.streaming_reasoning_raw; // If the last item is a reasoning item, add the new text to the content. if let Some(ThreadItem::Reasoning { content, .. }) = self.ensure_turn().items.last_mut() { - content.push(payload.text.clone()); + if was_streaming && !content.is_empty() { + if let Some(last) = content.last_mut() { + *last = payload.text.clone(); + } + } else { + content.push(payload.text.clone()); + } + self.streaming_reasoning_raw = false; return; } // Otherwise, create a new reasoning item. + self.streaming_reasoning_raw = false; let id = self.next_item_id(); self.ensure_turn().items.push(ThreadItem::Reasoning { id, @@ -123,6 +254,295 @@ impl ThreadHistoryBuilder { }); } + fn handle_agent_reasoning_raw_content_delta( + &mut self, + payload: &AgentReasoningRawContentDeltaEvent, + ) { + if payload.delta.is_empty() { + return; + } + + if self.streaming_reasoning_raw + && let Some(ThreadItem::Reasoning { content, .. }) = self.ensure_turn().items.last_mut() + { + if let Some(last) = content.last_mut() { + last.push_str(&payload.delta); + } else { + content.push(payload.delta.clone()); + } + return; + } + + self.streaming_reasoning_raw = true; + let id = self.next_item_id(); + self.ensure_turn().items.push(ThreadItem::Reasoning { + id, + summary: Vec::new(), + content: vec![payload.delta.clone()], + }); + } + + fn handle_exec_command_begin(&mut self, payload: &ExecCommandBeginEvent) { + let id = payload.call_id.clone(); + let command = payload.command.join(" "); + let command_actions = payload + .parsed_cmd + .iter() + .cloned() + .map(CommandAction::from) + .collect(); + + let item = ThreadItem::CommandExecution { + id: id.clone(), + command, + cwd: payload.cwd.clone(), + process_id: payload.process_id.clone(), + status: CommandExecutionStatus::InProgress, + command_actions, + aggregated_output: None, + exit_code: None, + duration_ms: None, + }; + + let turn = self.ensure_turn(); + let index = turn.items.len(); + turn.items.push(item); + self.exec_items.insert(id.clone(), index); + self.exec_output.insert(id, String::new()); + } + + fn handle_exec_command_output_delta(&mut self, payload: &ExecCommandOutputDeltaEvent) { + let Some(index) = self.exec_items.get(&payload.call_id).copied() else { + return; + }; + + let text = String::from_utf8_lossy(&payload.chunk).to_string(); + let output = { + let output = self.exec_output.entry(payload.call_id.clone()).or_default(); + output.push_str(&text); + output.clone() + }; + + let Some(ThreadItem::CommandExecution { + aggregated_output, .. + }) = self.ensure_turn().items.get_mut(index) + else { + return; + }; + + *aggregated_output = Some(output); + } + + fn handle_exec_command_end(&mut self, payload: &ExecCommandEndEvent) { + let output = self + .exec_output + .remove(&payload.call_id) + .unwrap_or_default(); + let best_output = if !payload.aggregated_output.is_empty() { + payload.aggregated_output.clone() + } else if !output.is_empty() { + output + } else if !payload.stdout.is_empty() || !payload.stderr.is_empty() { + format!( + "{stdout}{stderr}", + stdout = payload.stdout, + stderr = payload.stderr + ) + } else { + String::new() + }; + + let duration_ms = duration_to_ms(payload.duration); + let status = if payload.exit_code == 0 { + CommandExecutionStatus::Completed + } else { + CommandExecutionStatus::Failed + }; + + let Some(index) = self.exec_items.get(&payload.call_id).copied() else { + let item = ThreadItem::CommandExecution { + id: payload.call_id.clone(), + command: payload.command.join(" "), + cwd: payload.cwd.clone(), + process_id: payload.process_id.clone(), + status, + command_actions: payload + .parsed_cmd + .iter() + .cloned() + .map(CommandAction::from) + .collect(), + aggregated_output: Some(best_output), + exit_code: Some(payload.exit_code), + duration_ms, + }; + self.ensure_turn().items.push(item); + return; + }; + + let Some(ThreadItem::CommandExecution { + status: existing_status, + aggregated_output, + exit_code, + duration_ms: existing_duration_ms, + .. + }) = self.ensure_turn().items.get_mut(index) + else { + return; + }; + + *existing_status = status; + *aggregated_output = Some(best_output); + *exit_code = Some(payload.exit_code); + *existing_duration_ms = duration_ms; + } + + fn handle_patch_apply_begin(&mut self, payload: &PatchApplyBeginEvent) { + let id = payload.call_id.clone(); + let changes = build_file_updates(&payload.changes); + let item = ThreadItem::FileChange { + id: id.clone(), + changes, + status: PatchApplyStatus::InProgress, + }; + + let turn = self.ensure_turn(); + let index = turn.items.len(); + turn.items.push(item); + self.patch_items.insert(id, index); + } + + fn handle_patch_apply_end(&mut self, payload: &PatchApplyEndEvent) { + let status = if payload.success { + PatchApplyStatus::Completed + } else { + PatchApplyStatus::Failed + }; + let changes = build_file_updates(&payload.changes); + + let Some(index) = self.patch_items.get(&payload.call_id).copied() else { + self.ensure_turn().items.push(ThreadItem::FileChange { + id: payload.call_id.clone(), + changes, + status, + }); + return; + }; + + let Some(ThreadItem::FileChange { + status: existing_status, + changes: existing_changes, + .. + }) = self.ensure_turn().items.get_mut(index) + else { + return; + }; + + *existing_status = status; + *existing_changes = changes; + } + + fn handle_mcp_tool_call_begin(&mut self, payload: &McpToolCallBeginEvent) { + let id = payload.call_id.clone(); + let invocation = &payload.invocation; + let arguments = invocation.arguments.clone().unwrap_or(JsonValue::Null); + + let item = ThreadItem::McpToolCall { + id: id.clone(), + server: invocation.server.clone(), + tool: invocation.tool.clone(), + status: McpToolCallStatus::InProgress, + arguments, + result: None, + error: None, + duration_ms: None, + }; + + let turn = self.ensure_turn(); + let index = turn.items.len(); + turn.items.push(item); + self.mcp_items.insert(id, index); + } + + fn handle_mcp_tool_call_end(&mut self, payload: &McpToolCallEndEvent) { + let duration_ms = duration_to_ms(payload.duration); + let invocation = &payload.invocation; + let arguments = invocation.arguments.clone().unwrap_or(JsonValue::Null); + + let (status, result, error) = match &payload.result { + Ok(result) => { + let is_error = result.is_error.unwrap_or(false); + ( + if is_error { + McpToolCallStatus::Failed + } else { + McpToolCallStatus::Completed + }, + Some(McpToolCallResult { + content: result.content.clone(), + structured_content: result.structured_content.clone(), + }), + None, + ) + } + Err(message) => ( + McpToolCallStatus::Failed, + None, + Some(McpToolCallError { + message: message.clone(), + }), + ), + }; + + let Some(index) = self.mcp_items.get(&payload.call_id).copied() else { + self.ensure_turn().items.push(ThreadItem::McpToolCall { + id: payload.call_id.clone(), + server: invocation.server.clone(), + tool: invocation.tool.clone(), + status, + arguments, + result, + error, + duration_ms, + }); + return; + }; + + let Some(ThreadItem::McpToolCall { + status: existing_status, + arguments: existing_arguments, + result: existing_result, + error: existing_error, + duration_ms: existing_duration_ms, + .. + }) = self.ensure_turn().items.get_mut(index) + else { + return; + }; + + *existing_status = status; + *existing_arguments = arguments; + *existing_result = result; + *existing_error = error; + *existing_duration_ms = duration_ms; + } + + fn handle_web_search_begin(&mut self, _payload: &WebSearchBeginEvent) {} + + fn handle_web_search_end(&mut self, payload: &WebSearchEndEvent) { + self.ensure_turn().items.push(ThreadItem::WebSearch { + id: payload.call_id.clone(), + query: payload.query.clone(), + }); + } + + fn handle_view_image_tool_call(&mut self, payload: &ViewImageToolCallEvent) { + self.ensure_turn().items.push(ThreadItem::ImageView { + id: payload.call_id.clone(), + path: payload.path.to_string_lossy().to_string(), + }); + } + fn handle_turn_aborted(&mut self, _payload: &TurnAbortedEvent) { let Some(turn) = self.current_turn.as_mut() else { return; @@ -137,6 +557,13 @@ impl ThreadHistoryBuilder { } self.turns.push(turn.into()); } + self.exec_items.clear(); + self.exec_output.clear(); + self.patch_items.clear(); + self.mcp_items.clear(); + self.streaming_agent_message = false; + self.streaming_reasoning_summary = false; + self.streaming_reasoning_raw = false; } fn new_turn(&mut self) -> PendingTurn { @@ -207,16 +634,69 @@ impl From for Turn { } } +fn duration_to_ms(duration: Duration) -> Option { + i64::try_from(duration.as_millis()).ok() +} + +fn build_file_updates(changes: &HashMap) -> Vec { + let mut items: Vec<_> = changes.iter().collect(); + items.sort_by_key(|(path, _)| path.to_string_lossy().to_string()); + + items + .into_iter() + .map(|(path, change)| { + let (kind, diff) = match change { + FileChange::Add { content } => (PatchChangeKind::Add, content.clone()), + FileChange::Delete { content } => (PatchChangeKind::Delete, content.clone()), + FileChange::Update { + unified_diff, + move_path, + } => ( + PatchChangeKind::Update { + move_path: move_path.clone(), + }, + unified_diff.clone(), + ), + }; + FileUpdateChange { + path: path.to_string_lossy().to_string(), + kind, + diff, + } + }) + .collect() +} + #[cfg(test)] mod tests { use super::*; + use codex_protocol::protocol::AgentMessageDeltaEvent; use codex_protocol::protocol::AgentMessageEvent; + use codex_protocol::protocol::AgentReasoningEvent; + use codex_protocol::protocol::AgentReasoningRawContentEvent; + use codex_protocol::protocol::ExecCommandBeginEvent; + + use codex_protocol::protocol::ExecCommandOutputDeltaEvent; + use codex_protocol::protocol::ExecCommandSource; + use codex_protocol::protocol::ExecOutputStream; + use codex_protocol::protocol::McpInvocation; + use codex_protocol::protocol::McpToolCallBeginEvent; + use codex_protocol::protocol::McpToolCallEndEvent; + use codex_protocol::protocol::PatchApplyBeginEvent; + use codex_protocol::protocol::PatchApplyEndEvent; use codex_protocol::protocol::TurnAbortReason; use codex_protocol::protocol::TurnAbortedEvent; use codex_protocol::protocol::UserMessageEvent; + use codex_protocol::protocol::ViewImageToolCallEvent; + use codex_protocol::protocol::WebSearchEndEvent; + use mcp_types::CallToolResult; use pretty_assertions::assert_eq; + use serde_json::json; + use std::collections::HashMap; + use std::path::PathBuf; + use std::time::Duration; #[test] fn builds_multiple_turns_with_reasoning_items() { @@ -410,4 +890,188 @@ mod tests { } ); } + + #[test] + fn replays_partial_assistant_message_and_in_progress_exec_output() { + let events = vec![ + EventMsg::UserMessage(UserMessageEvent { + message: "Do the thing".into(), + images: None, + }), + EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { + delta: "Working".into(), + }), + EventMsg::ExecCommandBegin(ExecCommandBeginEvent { + call_id: "call-1".into(), + process_id: Some("pid-1".into()), + turn_id: "turn-1".into(), + command: vec!["echo".into(), "hi".into()], + cwd: PathBuf::from("/tmp"), + parsed_cmd: Vec::new(), + source: ExecCommandSource::Agent, + interaction_input: None, + }), + EventMsg::ExecCommandOutputDelta(ExecCommandOutputDeltaEvent { + call_id: "call-1".into(), + stream: ExecOutputStream::Stdout, + chunk: b"hi\n".to_vec(), + }), + EventMsg::TurnAborted(TurnAbortedEvent { + reason: TurnAbortReason::Interrupted, + }), + ]; + + let turns = build_turns_from_event_msgs(&events); + assert_eq!(turns.len(), 1); + let turn = &turns[0]; + assert_eq!(turn.status, TurnStatus::Interrupted); + assert_eq!(turn.items.len(), 3); + + assert_eq!( + turn.items[1], + ThreadItem::AgentMessage { + id: "item-2".into(), + text: "Working".into(), + } + ); + assert_eq!( + turn.items[2], + ThreadItem::CommandExecution { + id: "call-1".into(), + command: "echo hi".into(), + cwd: PathBuf::from("/tmp"), + process_id: Some("pid-1".into()), + status: CommandExecutionStatus::InProgress, + command_actions: Vec::new(), + aggregated_output: Some("hi\n".into()), + exit_code: None, + duration_ms: None, + } + ); + } + + #[test] + fn updates_mcp_tool_call_status_on_end() { + let events = vec![ + EventMsg::UserMessage(UserMessageEvent { + message: "Search".into(), + images: None, + }), + EventMsg::McpToolCallBegin(McpToolCallBeginEvent { + call_id: "mcp-1".into(), + invocation: McpInvocation { + server: "srv".into(), + tool: "tool".into(), + arguments: Some(json!({"q":"hi"})), + }, + }), + EventMsg::McpToolCallEnd(McpToolCallEndEvent { + call_id: "mcp-1".into(), + invocation: McpInvocation { + server: "srv".into(), + tool: "tool".into(), + arguments: Some(json!({"q":"hi"})), + }, + duration: Duration::from_millis(12), + result: Ok(CallToolResult { + content: Vec::new(), + structured_content: None, + is_error: Some(false), + }), + }), + ]; + + let turns = build_turns_from_event_msgs(&events); + assert_eq!(turns.len(), 1); + let turn = &turns[0]; + assert_eq!(turn.items.len(), 2); + assert_eq!( + turn.items[1], + ThreadItem::McpToolCall { + id: "mcp-1".into(), + server: "srv".into(), + tool: "tool".into(), + status: McpToolCallStatus::Completed, + arguments: json!({"q":"hi"}), + result: Some(McpToolCallResult { + content: Vec::new(), + structured_content: None, + }), + error: None, + duration_ms: Some(12), + } + ); + } + + #[test] + fn replays_patch_and_web_search_and_image_view() { + let mut changes = HashMap::new(); + changes.insert( + PathBuf::from("file.txt"), + codex_protocol::protocol::FileChange::Add { + content: "hello".into(), + }, + ); + + let events = vec![ + EventMsg::UserMessage(UserMessageEvent { + message: "Do work".into(), + images: None, + }), + EventMsg::PatchApplyBegin(PatchApplyBeginEvent { + call_id: "patch-1".into(), + turn_id: "turn-1".into(), + auto_approved: true, + changes: changes.clone(), + }), + EventMsg::PatchApplyEnd(PatchApplyEndEvent { + call_id: "patch-1".into(), + turn_id: "turn-1".into(), + stdout: "".into(), + stderr: "".into(), + success: true, + changes, + }), + EventMsg::WebSearchEnd(WebSearchEndEvent { + call_id: "ws-1".into(), + query: "hello".into(), + }), + EventMsg::ViewImageToolCall(ViewImageToolCallEvent { + call_id: "img-1".into(), + path: PathBuf::from("image.png"), + }), + ]; + + let turns = build_turns_from_event_msgs(&events); + assert_eq!(turns.len(), 1); + let turn = &turns[0]; + assert_eq!(turn.items.len(), 4); + + assert_eq!( + turn.items[1], + ThreadItem::FileChange { + id: "patch-1".into(), + changes: vec![FileUpdateChange { + path: "file.txt".into(), + kind: PatchChangeKind::Add, + diff: "hello".into(), + }], + status: PatchApplyStatus::Completed, + } + ); + assert_eq!( + turn.items[2], + ThreadItem::WebSearch { + id: "ws-1".into(), + query: "hello".into(), + } + ); + assert_eq!( + turn.items[3], + ThreadItem::ImageView { + id: "img-1".into(), + path: "image.png".into(), + } + ); + } } diff --git a/codex-rs/app-server-protocol/src/protocol/v2.rs b/codex-rs/app-server-protocol/src/protocol/v2.rs index 0aec959b9a4..422d0b83408 100644 --- a/codex-rs/app-server-protocol/src/protocol/v2.rs +++ b/codex-rs/app-server-protocol/src/protocol/v2.rs @@ -227,6 +227,8 @@ pub enum ConfigLayerSource { #[serde(rename_all = "camelCase")] #[ts(rename_all = "camelCase")] System { + /// This is the path to the system config.toml file, though it is not + /// guaranteed to exist. file: AbsolutePathBuf, }, @@ -237,9 +239,19 @@ pub enum ConfigLayerSource { #[serde(rename_all = "camelCase")] #[ts(rename_all = "camelCase")] User { + /// This is the path to the user's config.toml file, though it is not + /// guaranteed to exist. file: AbsolutePathBuf, }, + /// Path to a .codex/ folder within a project. There could be multiple of + /// these between `cwd` and the project/repo root. + #[serde(rename_all = "camelCase")] + #[ts(rename_all = "camelCase")] + Project { + dot_codex_folder: AbsolutePathBuf, + }, + /// Session-layer overrides supplied via `-c`/`--config`. SessionFlags, @@ -247,6 +259,8 @@ pub enum ConfigLayerSource { /// as the last layer on top of everything else. This scheme did not quite /// work out as intended, but we keep this variant as a "best effort" while /// we phase out `managed_config.toml` in favor of `requirements.toml`. + #[serde(rename_all = "camelCase")] + #[ts(rename_all = "camelCase")] LegacyManagedConfigTomlFromFile { file: AbsolutePathBuf, }, @@ -262,6 +276,7 @@ impl ConfigLayerSource { ConfigLayerSource::Mdm { .. } => 0, ConfigLayerSource::System { .. } => 10, ConfigLayerSource::User { .. } => 20, + ConfigLayerSource::Project { .. } => 25, ConfigLayerSource::SessionFlags => 30, ConfigLayerSource::LegacyManagedConfigTomlFromFile { .. } => 40, ConfigLayerSource::LegacyManagedConfigTomlFromMdm => 50, @@ -1259,6 +1274,8 @@ pub struct Turn { pub struct TurnError { pub message: String, pub codex_error_info: Option, + #[serde(default)] + pub additional_details: Option, } #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] @@ -1846,6 +1863,147 @@ pub struct FileChangeRequestApprovalParams { pub grant_root: Option, } +// --- LSP --- + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub enum LspDiagnosticSeverity { + Error, + Warning, + Information, + Hint, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct LspPosition { + /// 1-based line number. + pub line: u32, + /// 1-based UTF-16 character offset. + pub character: u32, +} + +#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct LspRange { + pub start: LspPosition, + pub end: LspPosition, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct LspDiagnostic { + pub path: PathBuf, + pub range: LspRange, + pub severity: Option, + pub code: Option, + pub source: Option, + pub message: String, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct LspLocation { + pub path: PathBuf, + pub range: LspRange, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct LspDocumentSymbol { + pub name: String, + pub kind: u32, + pub range: LspRange, + pub selection_range: LspRange, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub children: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct LspDiagnosticsGetParams { + pub root: PathBuf, + #[serde(default)] + pub path: Option, + #[serde(default)] + pub max_results: Option, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct LspDiagnosticsGetResponse { + pub diagnostics: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct LspDiagnosticsUpdatedNotification { + pub root: PathBuf, + pub path: PathBuf, + pub errors: usize, + pub warnings: usize, + pub infos: usize, + pub hints: usize, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct LspDefinitionParams { + pub root: PathBuf, + pub file_path: PathBuf, + pub position: LspPosition, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct LspDefinitionResponse { + pub locations: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct LspReferencesParams { + pub root: PathBuf, + pub file_path: PathBuf, + pub position: LspPosition, + #[serde(default)] + pub include_declaration: bool, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct LspReferencesResponse { + pub locations: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct LspDocumentSymbolsParams { + pub root: PathBuf, + pub file_path: PathBuf, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +#[ts(export_to = "v2/")] +pub struct LspDocumentSymbolsResponse { + pub symbols: Vec, +} + #[derive(Serialize, Deserialize, Debug, Clone, PartialEq, JsonSchema, TS)] #[ts(export_to = "v2/")] pub struct FileChangeRequestApprovalResponse { diff --git a/codex-rs/app-server/Cargo.toml b/codex-rs/app-server/Cargo.toml index ca7fac30ced..4cd6ad2daf7 100644 --- a/codex-rs/app-server/Cargo.toml +++ b/codex-rs/app-server/Cargo.toml @@ -26,6 +26,7 @@ codex-login = { workspace = true } codex-protocol = { workspace = true } codex-app-server-protocol = { workspace = true } codex-feedback = { workspace = true } +codex-lsp = { workspace = true } codex-rmcp-client = { workspace = true } codex-utils-absolute-path = { workspace = true } codex-utils-json-to-toml = { workspace = true } @@ -48,11 +49,9 @@ uuid = { workspace = true, features = ["serde", "v7"] } [dev-dependencies] app_test_support = { workspace = true } -assert_cmd = { workspace = true } base64 = { workspace = true } core_test_support = { workspace = true } mcp-types = { workspace = true } -os_info = { workspace = true } pretty_assertions = { workspace = true } serial_test = { workspace = true } wiremock = { workspace = true } diff --git a/codex-rs/app-server/README.md b/codex-rs/app-server/README.md index 8e249776e84..049b9050b98 100644 --- a/codex-rs/app-server/README.md +++ b/codex-rs/app-server/README.md @@ -302,7 +302,7 @@ Event notifications are the server-initiated event stream for thread lifecycles, The app-server streams JSON-RPC notifications while a turn is running. Each turn starts with `turn/started` (initial `turn`) and ends with `turn/completed` (final `turn` status). Token usage events stream separately via `thread/tokenUsage/updated`. Clients subscribe to the events they care about, rendering each item incrementally as updates arrive. The per-item lifecycle is always: `item/started` → zero or more item-specific deltas → `item/completed`. - `turn/started` — `{ turn }` with the turn id, empty `items`, and `status: "inProgress"`. -- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo? } }`. +- `turn/completed` — `{ turn }` where `turn.status` is `completed`, `interrupted`, or `failed`; failures carry `{ error: { message, codexErrorInfo?, additionalDetails? } }`. - `turn/diff/updated` — `{ threadId, turnId, diff }` represents the up-to-date snapshot of the turn-level unified diff, emitted after every FileChange item. `diff` is the latest aggregated unified diff across every file change in the turn. UIs can render this to show the full "what changed" view without stitching individual `fileChange` items. - `turn/plan/updated` — `{ turnId, explanation?, plan }` whenever the agent shares or changes its plan; each `plan` entry is `{ step, status }` with `status` in `pending`, `inProgress`, or `completed`. @@ -352,7 +352,7 @@ There are additional item-specific events: ### Errors -`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo? } }` payload as `turn.status: "failed"` and may precede that terminal notification. +`error` event is emitted whenever the server hits an error mid-turn (for example, upstream model errors or quota limits). Carries the same `{ error: { message, codexErrorInfo?, additionalDetails? } }` payload as `turn.status: "failed"` and may precede that terminal notification. `codexErrorInfo` maps to the `CodexErrorInfo` enum. Common values: diff --git a/codex-rs/app-server/src/bespoke_event_handling.rs b/codex-rs/app-server/src/bespoke_event_handling.rs index f7e4f709ee3..ad0455a0587 100644 --- a/codex-rs/app-server/src/bespoke_event_handling.rs +++ b/codex-rs/app-server/src/bespoke_event_handling.rs @@ -340,6 +340,7 @@ pub(crate) async fn apply_bespoke_event_handling( let turn_error = TurnError { message: ev.message, codex_error_info: ev.codex_error_info.map(V2CodexErrorInfo::from), + additional_details: None, }; handle_error(conversation_id, turn_error.clone(), &turn_summary_store).await; outgoing @@ -357,6 +358,7 @@ pub(crate) async fn apply_bespoke_event_handling( let turn_error = TurnError { message: ev.message, codex_error_info: ev.codex_error_info.map(V2CodexErrorInfo::from), + additional_details: ev.additional_details, }; outgoing .send_server_notification(ServerNotification::Error(ErrorNotification { @@ -1340,6 +1342,7 @@ mod tests { TurnError { message: "boom".to_string(), codex_error_info: Some(V2CodexErrorInfo::InternalServerError), + additional_details: None, }, &turn_summary_store, ) @@ -1351,6 +1354,7 @@ mod tests { Some(TurnError { message: "boom".to_string(), codex_error_info: Some(V2CodexErrorInfo::InternalServerError), + additional_details: None, }) ); Ok(()) @@ -1398,6 +1402,7 @@ mod tests { TurnError { message: "oops".to_string(), codex_error_info: None, + additional_details: None, }, &turn_summary_store, ) @@ -1439,6 +1444,7 @@ mod tests { TurnError { message: "bad".to_string(), codex_error_info: Some(V2CodexErrorInfo::Other), + additional_details: None, }, &turn_summary_store, ) @@ -1467,6 +1473,7 @@ mod tests { Some(TurnError { message: "bad".to_string(), codex_error_info: Some(V2CodexErrorInfo::Other), + additional_details: None, }) ); } @@ -1691,6 +1698,7 @@ mod tests { TurnError { message: "a1".to_string(), codex_error_info: Some(V2CodexErrorInfo::BadRequest), + additional_details: None, }, &turn_summary_store, ) @@ -1710,6 +1718,7 @@ mod tests { TurnError { message: "b1".to_string(), codex_error_info: None, + additional_details: None, }, &turn_summary_store, ) @@ -1746,6 +1755,7 @@ mod tests { Some(TurnError { message: "a1".to_string(), codex_error_info: Some(V2CodexErrorInfo::BadRequest), + additional_details: None, }) ); } @@ -1766,6 +1776,7 @@ mod tests { Some(TurnError { message: "b1".to_string(), codex_error_info: None, + additional_details: None, }) ); } diff --git a/codex-rs/app-server/src/codex_message_processor.rs b/codex-rs/app-server/src/codex_message_processor.rs index c6e5004681f..cdf432084d4 100644 --- a/codex-rs/app-server/src/codex_message_processor.rs +++ b/codex-rs/app-server/src/codex_message_processor.rs @@ -55,6 +55,15 @@ use codex_app_server_protocol::LoginChatGptCompleteNotification; use codex_app_server_protocol::LoginChatGptResponse; use codex_app_server_protocol::LogoutAccountResponse; use codex_app_server_protocol::LogoutChatGptResponse; +use codex_app_server_protocol::LspDefinitionParams; +use codex_app_server_protocol::LspDefinitionResponse; +use codex_app_server_protocol::LspDiagnosticsGetParams; +use codex_app_server_protocol::LspDiagnosticsGetResponse; +use codex_app_server_protocol::LspDiagnosticsUpdatedNotification; +use codex_app_server_protocol::LspDocumentSymbolsParams; +use codex_app_server_protocol::LspDocumentSymbolsResponse; +use codex_app_server_protocol::LspReferencesParams; +use codex_app_server_protocol::LspReferencesResponse; use codex_app_server_protocol::McpServerOauthLoginCompletedNotification; use codex_app_server_protocol::McpServerOauthLoginParams; use codex_app_server_protocol::McpServerOauthLoginResponse; @@ -223,6 +232,8 @@ pub(crate) struct CodexMessageProcessor { turn_summary_store: TurnSummaryStore, pending_fuzzy_searches: Arc>>>, feedback: CodexFeedback, + lsp_manager: codex_lsp::LspManager, + lsp_subscribed_roots: HashSet, } #[derive(Clone, Copy, Debug)] @@ -265,6 +276,7 @@ impl CodexMessageProcessor { cli_overrides: Vec<(String, TomlValue)>, feedback: CodexFeedback, ) -> Self { + let lsp_manager = conversation_manager.lsp_manager(); Self { auth_manager, conversation_manager, @@ -278,6 +290,8 @@ impl CodexMessageProcessor { turn_summary_store: Arc::new(Mutex::new(HashMap::new())), pending_fuzzy_searches: Arc::new(Mutex::new(HashMap::new())), feedback, + lsp_manager, + lsp_subscribed_roots: HashSet::new(), } } @@ -492,6 +506,18 @@ impl CodexMessageProcessor { ClientRequest::FuzzyFileSearch { request_id, params } => { self.fuzzy_file_search(request_id, params).await; } + ClientRequest::LspDiagnosticsGet { request_id, params } => { + self.lsp_diagnostics_get(request_id, params).await; + } + ClientRequest::LspDefinition { request_id, params } => { + self.lsp_definition(request_id, params).await; + } + ClientRequest::LspReferences { request_id, params } => { + self.lsp_references(request_id, params).await; + } + ClientRequest::LspDocumentSymbols { request_id, params } => { + self.lsp_document_symbols(request_id, params).await; + } ClientRequest::OneOffCommandExec { request_id, params } => { self.exec_one_off_command(request_id, params).await; } @@ -1992,16 +2018,6 @@ impl CodexMessageProcessor { } }; - if !config.features.enabled(Feature::RmcpClient) { - let error = JSONRPCErrorError { - code: INVALID_REQUEST_ERROR_CODE, - message: "OAuth login is only supported when [features].rmcp_client is true in config.toml".to_string(), - data: None, - }; - self.outgoing.send_error(request_id, error).await; - return; - } - let McpServerOauthLoginParams { name, scopes, @@ -2742,8 +2758,14 @@ impl CodexMessageProcessor { sandbox_policy: params.sandbox_policy.map(|p| p.to_core()), model: params.model, plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: params.effort.map(Some), plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: params.summary, }) .await; @@ -3323,6 +3345,356 @@ impl CodexMessageProcessor { Err(_) => None, } } + + async fn lsp_diagnostics_get( + &mut self, + request_id: RequestId, + params: LspDiagnosticsGetParams, + ) { + let config = match self.load_latest_config().await { + Ok(config) => config, + Err(error) => { + self.outgoing.send_error(request_id, error).await; + return; + } + }; + + let mut lsp_config = config.lsp.manager.clone(); + lsp_config.enabled = config.features.enabled(Feature::Lsp); + self.lsp_manager.set_config(lsp_config).await; + + if !config.features.enabled(Feature::Lsp) { + let error = JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message: "LSP is disabled. Enable `[features].lsp = true` in config.toml." + .to_string(), + data: None, + }; + self.outgoing.send_error(request_id, error).await; + return; + } + + let root = resolve_root(&config.cwd, params.root); + self.ensure_lsp_diagnostics_subscription(&root).await; + let path = params.path.map(|p| resolve_under_root(&root, p)); + let max_results = params.max_results.unwrap_or(200); + + let diags = match self + .lsp_manager + .diagnostics(&root, path.as_deref(), max_results) + .await + { + Ok(diags) => diags, + Err(err) => { + let error = JSONRPCErrorError { + code: INTERNAL_ERROR_CODE, + message: format!("LSP diagnostics failed: {err:#}"), + data: None, + }; + self.outgoing.send_error(request_id, error).await; + return; + } + }; + + let response = LspDiagnosticsGetResponse { + diagnostics: diags.into_iter().map(to_api_diagnostic).collect(), + }; + self.outgoing.send_response(request_id, response).await; + } + + async fn lsp_definition(&mut self, request_id: RequestId, params: LspDefinitionParams) { + let config = match self.load_latest_config().await { + Ok(config) => config, + Err(error) => { + self.outgoing.send_error(request_id, error).await; + return; + } + }; + + let mut lsp_config = config.lsp.manager.clone(); + lsp_config.enabled = config.features.enabled(Feature::Lsp); + self.lsp_manager.set_config(lsp_config).await; + + if !config.features.enabled(Feature::Lsp) { + let error = JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message: "LSP is disabled. Enable `[features].lsp = true` in config.toml." + .to_string(), + data: None, + }; + self.outgoing.send_error(request_id, error).await; + return; + } + + let root = resolve_root(&config.cwd, params.root); + self.ensure_lsp_diagnostics_subscription(&root).await; + let path = resolve_under_root(&root, params.file_path); + let locations = match self + .lsp_manager + .definition( + &root, + &path, + codex_lsp::Position { + line: params.position.line, + character: params.position.character, + }, + ) + .await + { + Ok(locations) => locations, + Err(err) => { + let error = JSONRPCErrorError { + code: INTERNAL_ERROR_CODE, + message: format!("LSP definition failed: {err:#}"), + data: None, + }; + self.outgoing.send_error(request_id, error).await; + return; + } + }; + + let response = LspDefinitionResponse { + locations: locations.into_iter().map(to_api_location).collect(), + }; + self.outgoing.send_response(request_id, response).await; + } + + async fn lsp_references(&mut self, request_id: RequestId, params: LspReferencesParams) { + let config = match self.load_latest_config().await { + Ok(config) => config, + Err(error) => { + self.outgoing.send_error(request_id, error).await; + return; + } + }; + + let mut lsp_config = config.lsp.manager.clone(); + lsp_config.enabled = config.features.enabled(Feature::Lsp); + self.lsp_manager.set_config(lsp_config).await; + + if !config.features.enabled(Feature::Lsp) { + let error = JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message: "LSP is disabled. Enable `[features].lsp = true` in config.toml." + .to_string(), + data: None, + }; + self.outgoing.send_error(request_id, error).await; + return; + } + + let root = resolve_root(&config.cwd, params.root); + self.ensure_lsp_diagnostics_subscription(&root).await; + let path = resolve_under_root(&root, params.file_path); + let locations = match self + .lsp_manager + .references( + &root, + &path, + codex_lsp::Position { + line: params.position.line, + character: params.position.character, + }, + params.include_declaration, + ) + .await + { + Ok(locations) => locations, + Err(err) => { + let error = JSONRPCErrorError { + code: INTERNAL_ERROR_CODE, + message: format!("LSP references failed: {err:#}"), + data: None, + }; + self.outgoing.send_error(request_id, error).await; + return; + } + }; + + let response = LspReferencesResponse { + locations: locations.into_iter().map(to_api_location).collect(), + }; + self.outgoing.send_response(request_id, response).await; + } + + async fn lsp_document_symbols( + &mut self, + request_id: RequestId, + params: LspDocumentSymbolsParams, + ) { + let config = match self.load_latest_config().await { + Ok(config) => config, + Err(error) => { + self.outgoing.send_error(request_id, error).await; + return; + } + }; + + let mut lsp_config = config.lsp.manager.clone(); + lsp_config.enabled = config.features.enabled(Feature::Lsp); + self.lsp_manager.set_config(lsp_config).await; + + if !config.features.enabled(Feature::Lsp) { + let error = JSONRPCErrorError { + code: INVALID_REQUEST_ERROR_CODE, + message: "LSP is disabled. Enable `[features].lsp = true` in config.toml." + .to_string(), + data: None, + }; + self.outgoing.send_error(request_id, error).await; + return; + } + + let root = resolve_root(&config.cwd, params.root); + self.ensure_lsp_diagnostics_subscription(&root).await; + let path = resolve_under_root(&root, params.file_path); + let symbols = match self.lsp_manager.document_symbols(&root, &path).await { + Ok(symbols) => symbols, + Err(err) => { + let error = JSONRPCErrorError { + code: INTERNAL_ERROR_CODE, + message: format!("LSP document symbols failed: {err:#}"), + data: None, + }; + self.outgoing.send_error(request_id, error).await; + return; + } + }; + + let response = LspDocumentSymbolsResponse { + symbols: symbols.into_iter().map(to_api_symbol).collect(), + }; + self.outgoing.send_response(request_id, response).await; + } + + async fn ensure_lsp_diagnostics_subscription(&mut self, root: &PathBuf) { + if self.lsp_subscribed_roots.contains(root) { + return; + } + self.lsp_subscribed_roots.insert(root.clone()); + + let mut rx = match self.lsp_manager.subscribe_diagnostics(root).await { + Ok(rx) => rx, + Err(err) => { + tracing::warn!("failed to subscribe to LSP diagnostics: {err:#}"); + return; + } + }; + let outgoing = Arc::clone(&self.outgoing); + let root = root.clone(); + tokio::spawn(async move { + loop { + let update = match rx.recv().await { + Ok(update) => update, + Err(tokio::sync::broadcast::error::RecvError::Lagged(_)) => continue, + Err(tokio::sync::broadcast::error::RecvError::Closed) => break, + }; + let mut errors = 0usize; + let mut warnings = 0usize; + let mut infos = 0usize; + let mut hints = 0usize; + for d in &update.diagnostics { + match d.severity { + Some(codex_lsp::DiagnosticSeverity::Error) => errors += 1, + Some(codex_lsp::DiagnosticSeverity::Warning) => warnings += 1, + Some(codex_lsp::DiagnosticSeverity::Information) => infos += 1, + Some(codex_lsp::DiagnosticSeverity::Hint) => hints += 1, + None => {} + } + } + + let notification = + ServerNotification::LspDiagnosticsUpdated(LspDiagnosticsUpdatedNotification { + root: root.clone(), + path: PathBuf::from(&update.path), + errors, + warnings, + infos, + hints, + }); + outgoing.send_server_notification(notification).await; + } + }); + } +} + +fn resolve_root(base: &Path, root: PathBuf) -> PathBuf { + if root.is_absolute() { + root + } else { + base.join(root) + } +} + +fn resolve_under_root(root: &Path, path: PathBuf) -> PathBuf { + if path.is_absolute() { + path + } else { + root.join(path) + } +} + +fn to_api_severity( + severity: Option, +) -> Option { + severity.map(|sev| match sev { + codex_lsp::DiagnosticSeverity::Error => { + codex_app_server_protocol::LspDiagnosticSeverity::Error + } + codex_lsp::DiagnosticSeverity::Warning => { + codex_app_server_protocol::LspDiagnosticSeverity::Warning + } + codex_lsp::DiagnosticSeverity::Information => { + codex_app_server_protocol::LspDiagnosticSeverity::Information + } + codex_lsp::DiagnosticSeverity::Hint => { + codex_app_server_protocol::LspDiagnosticSeverity::Hint + } + }) +} + +fn to_api_position(pos: codex_lsp::Position) -> codex_app_server_protocol::LspPosition { + codex_app_server_protocol::LspPosition { + line: pos.line, + character: pos.character, + } +} + +fn to_api_range(range: codex_lsp::Range) -> codex_app_server_protocol::LspRange { + codex_app_server_protocol::LspRange { + start: to_api_position(range.start), + end: to_api_position(range.end), + } +} + +fn to_api_diagnostic(diag: codex_lsp::Diagnostic) -> codex_app_server_protocol::LspDiagnostic { + codex_app_server_protocol::LspDiagnostic { + path: PathBuf::from(diag.path), + range: to_api_range(diag.range), + severity: to_api_severity(diag.severity), + code: diag.code, + source: diag.source, + message: diag.message, + } +} + +fn to_api_location(loc: codex_lsp::Location) -> codex_app_server_protocol::LspLocation { + codex_app_server_protocol::LspLocation { + path: PathBuf::from(loc.path), + range: to_api_range(loc.range), + } +} + +fn to_api_symbol( + symbol: codex_lsp::DocumentSymbol, +) -> codex_app_server_protocol::LspDocumentSymbol { + codex_app_server_protocol::LspDocumentSymbol { + name: symbol.name, + kind: symbol.kind, + range: to_api_range(symbol.range), + selection_range: to_api_range(symbol.selection_range), + children: symbol.children.into_iter().map(to_api_symbol).collect(), + } } fn skills_to_info( diff --git a/codex-rs/app-server/tests/common/Cargo.toml b/codex-rs/app-server/tests/common/Cargo.toml index 67ceeae4f4f..d91ed4e675d 100644 --- a/codex-rs/app-server/tests/common/Cargo.toml +++ b/codex-rs/app-server/tests/common/Cargo.toml @@ -9,12 +9,12 @@ path = "lib.rs" [dependencies] anyhow = { workspace = true } -assert_cmd = { workspace = true } base64 = { workspace = true } chrono = { workspace = true } codex-app-server-protocol = { workspace = true } codex-core = { workspace = true, features = ["test-support"] } codex-protocol = { workspace = true } +codex-utils-cargo-bin = { workspace = true } serde = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true, features = [ diff --git a/codex-rs/app-server/tests/common/mcp_process.rs b/codex-rs/app-server/tests/common/mcp_process.rs index e2da40bf2d8..98b2cabaaa0 100644 --- a/codex-rs/app-server/tests/common/mcp_process.rs +++ b/codex-rs/app-server/tests/common/mcp_process.rs @@ -11,7 +11,6 @@ use tokio::process::ChildStdin; use tokio::process::ChildStdout; use anyhow::Context; -use assert_cmd::prelude::*; use codex_app_server_protocol::AddConversationListenerParams; use codex_app_server_protocol::ArchiveConversationParams; use codex_app_server_protocol::CancelLoginAccountParams; @@ -49,7 +48,6 @@ use codex_app_server_protocol::ThreadResumeParams; use codex_app_server_protocol::ThreadStartParams; use codex_app_server_protocol::TurnInterruptParams; use codex_app_server_protocol::TurnStartParams; -use std::process::Command as StdCommand; use tokio::process::Command; pub struct McpProcess { @@ -78,12 +76,8 @@ impl McpProcess { codex_home: &Path, env_overrides: &[(&str, Option<&str>)], ) -> anyhow::Result { - // Use assert_cmd to locate the binary path and then switch to tokio::process::Command - let std_cmd = StdCommand::cargo_bin("codex-app-server") - .context("should find binary for codex-mcp-server")?; - - let program = std_cmd.get_program().to_owned(); - + let program = codex_utils_cargo_bin::cargo_bin("codex-app-server") + .context("should find binary for codex-app-server")?; let mut cmd = Command::new(program); cmd.stdin(Stdio::piped()); diff --git a/codex-rs/app-server/tests/common/models_cache.rs b/codex-rs/app-server/tests/common/models_cache.rs index acc04e58dfa..0a977aa7c28 100644 --- a/codex-rs/app-server/tests/common/models_cache.rs +++ b/codex-rs/app-server/tests/common/models_cache.rs @@ -1,12 +1,10 @@ use chrono::DateTime; use chrono::Utc; use codex_core::models_manager::model_presets::all_model_presets; -use codex_protocol::openai_models::ClientVersion; use codex_protocol::openai_models::ConfigShellToolType; use codex_protocol::openai_models::ModelInfo; use codex_protocol::openai_models::ModelPreset; use codex_protocol::openai_models::ModelVisibility; -use codex_protocol::openai_models::ReasoningSummaryFormat; use codex_protocol::openai_models::TruncationPolicyConfig; use serde_json::json; use std::path::Path; @@ -25,7 +23,6 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo { } else { ModelVisibility::Hide }, - minimal_client_version: ClientVersion(0, 1, 0), supported_in_api: true, priority, upgrade: preset.upgrade.as_ref().map(|u| u.id.clone()), @@ -37,7 +34,6 @@ fn preset_to_info(preset: &ModelPreset, priority: i32) -> ModelInfo { truncation_policy: TruncationPolicyConfig::bytes(10_000), supports_parallel_tool_calls: false, context_window: None, - reasoning_summary_format: ReasoningSummaryFormat::None, experimental_supported_tools: Vec::new(), } } diff --git a/codex-rs/app-server/tests/suite/v2/config_rpc.rs b/codex-rs/app-server/tests/suite/v2/config_rpc.rs index 805601d5992..c0be58f50c7 100644 --- a/codex-rs/app-server/tests/suite/v2/config_rpc.rs +++ b/codex-rs/app-server/tests/suite/v2/config_rpc.rs @@ -18,6 +18,7 @@ use codex_app_server_protocol::RequestId; use codex_app_server_protocol::SandboxMode; use codex_app_server_protocol::ToolsV2; use codex_app_server_protocol::WriteStatus; +use codex_core::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX; use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use serde_json::json; @@ -73,8 +74,7 @@ sandbox_mode = "workspace-write" } ); let layers = layers.expect("layers present"); - assert_eq!(layers.len(), 1); - assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file }); + assert_layers_user_then_optional_system(&layers, user_file)?; Ok(()) } @@ -136,8 +136,7 @@ view_image = false ); let layers = layers.expect("layers present"); - assert_eq!(layers.len(), 1); - assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file }); + assert_layers_user_then_optional_system(&layers, user_file)?; Ok(()) } @@ -257,12 +256,7 @@ writable_roots = [{}] ); let layers = layers.expect("layers present"); - assert_eq!(layers.len(), 2); - assert_eq!( - layers[0].name, - ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file } - ); - assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file }); + assert_layers_managed_user_then_optional_system(&layers, managed_file, user_file)?; Ok(()) } @@ -433,3 +427,50 @@ async fn config_batch_write_applies_multiple_edits() -> Result<()> { Ok(()) } + +fn assert_layers_user_then_optional_system( + layers: &[codex_app_server_protocol::ConfigLayer], + user_file: AbsolutePathBuf, +) -> Result<()> { + if cfg!(unix) { + let system_file = AbsolutePathBuf::from_absolute_path(SYSTEM_CONFIG_TOML_FILE_UNIX)?; + assert_eq!(layers.len(), 2); + assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file }); + assert_eq!( + layers[1].name, + ConfigLayerSource::System { file: system_file } + ); + } else { + assert_eq!(layers.len(), 1); + assert_eq!(layers[0].name, ConfigLayerSource::User { file: user_file }); + } + Ok(()) +} + +fn assert_layers_managed_user_then_optional_system( + layers: &[codex_app_server_protocol::ConfigLayer], + managed_file: AbsolutePathBuf, + user_file: AbsolutePathBuf, +) -> Result<()> { + if cfg!(unix) { + let system_file = AbsolutePathBuf::from_absolute_path(SYSTEM_CONFIG_TOML_FILE_UNIX)?; + assert_eq!(layers.len(), 3); + assert_eq!( + layers[0].name, + ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file } + ); + assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file }); + assert_eq!( + layers[2].name, + ConfigLayerSource::System { file: system_file } + ); + } else { + assert_eq!(layers.len(), 2); + assert_eq!( + layers[0].name, + ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file } + ); + assert_eq!(layers[1].name, ConfigLayerSource::User { file: user_file }); + } + Ok(()) +} diff --git a/codex-rs/apply-patch/Cargo.toml b/codex-rs/apply-patch/Cargo.toml index 1a918ce93b5..2fad46c2af2 100644 --- a/codex-rs/apply-patch/Cargo.toml +++ b/codex-rs/apply-patch/Cargo.toml @@ -25,5 +25,6 @@ tree-sitter-bash = { workspace = true } [dev-dependencies] assert_cmd = { workspace = true } assert_matches = { workspace = true } +codex-utils-cargo-bin = { workspace = true } pretty_assertions = { workspace = true } tempfile = { workspace = true } diff --git a/codex-rs/apply-patch/tests/suite/cli.rs b/codex-rs/apply-patch/tests/suite/cli.rs index ed95aba17c7..c982c7aa864 100644 --- a/codex-rs/apply-patch/tests/suite/cli.rs +++ b/codex-rs/apply-patch/tests/suite/cli.rs @@ -1,8 +1,13 @@ -use assert_cmd::prelude::*; +use assert_cmd::Command; use std::fs; -use std::process::Command; use tempfile::tempdir; +fn apply_patch_command() -> anyhow::Result { + Ok(Command::new(codex_utils_cargo_bin::cargo_bin( + "apply_patch", + )?)) +} + #[test] fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> { let tmp = tempdir()?; @@ -16,8 +21,7 @@ fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> { +hello *** End Patch"# ); - Command::cargo_bin("apply_patch") - .expect("should find apply_patch binary") + apply_patch_command()? .arg(add_patch) .current_dir(tmp.path()) .assert() @@ -34,8 +38,7 @@ fn test_apply_patch_cli_add_and_update() -> anyhow::Result<()> { +world *** End Patch"# ); - Command::cargo_bin("apply_patch") - .expect("should find apply_patch binary") + apply_patch_command()? .arg(update_patch) .current_dir(tmp.path()) .assert() @@ -59,10 +62,9 @@ fn test_apply_patch_cli_stdin_add_and_update() -> anyhow::Result<()> { +hello *** End Patch"# ); - let mut cmd = - assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary"); - cmd.current_dir(tmp.path()); - cmd.write_stdin(add_patch) + apply_patch_command()? + .current_dir(tmp.path()) + .write_stdin(add_patch) .assert() .success() .stdout(format!("Success. Updated the following files:\nA {file}\n")); @@ -77,10 +79,9 @@ fn test_apply_patch_cli_stdin_add_and_update() -> anyhow::Result<()> { +world *** End Patch"# ); - let mut cmd = - assert_cmd::Command::cargo_bin("apply_patch").expect("should find apply_patch binary"); - cmd.current_dir(tmp.path()); - cmd.write_stdin(update_patch) + apply_patch_command()? + .current_dir(tmp.path()) + .write_stdin(update_patch) .assert() .success() .stdout(format!("Success. Updated the following files:\nM {file}\n")); diff --git a/codex-rs/apply-patch/tests/suite/scenarios.rs b/codex-rs/apply-patch/tests/suite/scenarios.rs index 4b3eb3c84a4..0e21a7bc0a1 100644 --- a/codex-rs/apply-patch/tests/suite/scenarios.rs +++ b/codex-rs/apply-patch/tests/suite/scenarios.rs @@ -1,4 +1,3 @@ -use assert_cmd::prelude::*; use pretty_assertions::assert_eq; use std::collections::BTreeMap; use std::fs; @@ -9,7 +8,8 @@ use tempfile::tempdir; #[test] fn test_apply_patch_scenarios() -> anyhow::Result<()> { - for scenario in fs::read_dir("tests/fixtures/scenarios")? { + let scenarios_dir = Path::new(env!("CARGO_MANIFEST_DIR")).join("tests/fixtures/scenarios"); + for scenario in fs::read_dir(scenarios_dir)? { let scenario = scenario?; let path = scenario.path(); if path.is_dir() { @@ -36,7 +36,7 @@ fn run_apply_patch_scenario(dir: &Path) -> anyhow::Result<()> { // Run apply_patch in the temporary directory. We intentionally do not assert // on the exit status here; the scenarios are specified purely in terms of // final filesystem state, which we compare below. - Command::cargo_bin("apply_patch")? + Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?) .arg(patch) .current_dir(tmp.path()) .output()?; @@ -82,11 +82,15 @@ fn snapshot_dir_recursive( continue; }; let rel = stripped.to_path_buf(); - let file_type = entry.file_type()?; - if file_type.is_dir() { + + // Under Buck2, files in `__srcs` are often materialized as symlinks. + // Use `metadata()` (follows symlinks) so our fixture snapshots work + // under both Cargo and Buck2. + let metadata = fs::metadata(&path)?; + if metadata.is_dir() { entries.insert(rel.clone(), Entry::Dir); snapshot_dir_recursive(base, &path, entries)?; - } else if file_type.is_file() { + } else if metadata.is_file() { let contents = fs::read(&path)?; entries.insert(rel, Entry::File(contents)); } @@ -98,12 +102,14 @@ fn copy_dir_recursive(src: &Path, dst: &Path) -> anyhow::Result<()> { for entry in fs::read_dir(src)? { let entry = entry?; let path = entry.path(); - let file_type = entry.file_type()?; let dest_path = dst.join(entry.file_name()); - if file_type.is_dir() { + + // See note in `snapshot_dir_recursive` about Buck2 symlink trees. + let metadata = fs::metadata(&path)?; + if metadata.is_dir() { fs::create_dir_all(&dest_path)?; copy_dir_recursive(&path, &dest_path)?; - } else if file_type.is_file() { + } else if metadata.is_file() { if let Some(parent) = dest_path.parent() { fs::create_dir_all(parent)?; } diff --git a/codex-rs/apply-patch/tests/suite/tool.rs b/codex-rs/apply-patch/tests/suite/tool.rs index c937719da00..56cd6e57a74 100644 --- a/codex-rs/apply-patch/tests/suite/tool.rs +++ b/codex-rs/apply-patch/tests/suite/tool.rs @@ -5,13 +5,13 @@ use std::path::Path; use tempfile::tempdir; fn run_apply_patch_in_dir(dir: &Path, patch: &str) -> anyhow::Result { - let mut cmd = Command::cargo_bin("apply_patch")?; + let mut cmd = Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?); cmd.current_dir(dir); Ok(cmd.arg(patch).assert()) } fn apply_patch_command(dir: &Path) -> anyhow::Result { - let mut cmd = Command::cargo_bin("apply_patch")?; + let mut cmd = Command::new(codex_utils_cargo_bin::cargo_bin("apply_patch")?); cmd.current_dir(dir); Ok(cmd) } diff --git a/codex-rs/cli/Cargo.toml b/codex-rs/cli/Cargo.toml index 812ddb18a58..d9f53c0c51f 100644 --- a/codex-rs/cli/Cargo.toml +++ b/codex-rs/cli/Cargo.toml @@ -29,6 +29,7 @@ codex-core = { workspace = true } codex-exec = { workspace = true } codex-execpolicy = { workspace = true } codex-login = { workspace = true } +codex-lsp = { workspace = true } codex-mcp-server = { workspace = true } codex-process-hardening = { workspace = true } codex-protocol = { workspace = true } @@ -60,6 +61,7 @@ codex_windows_sandbox = { package = "codex-windows-sandbox", path = "../windows- [dev-dependencies] assert_cmd = { workspace = true } assert_matches = { workspace = true } +codex-utils-cargo-bin = { workspace = true } predicates = { workspace = true } pretty_assertions = { workspace = true } tempfile = { workspace = true } diff --git a/codex-rs/cli/src/main.rs b/codex-rs/cli/src/main.rs index bda75007d0b..1b24f76bfc6 100644 --- a/codex-rs/cli/src/main.rs +++ b/codex-rs/cli/src/main.rs @@ -131,6 +131,9 @@ enum Subcommand { /// Inspect feature flags. Features(FeaturesCli), + + /// Inspect Language Server Protocol (LSP) configuration and server discovery. + Lsp(LspCli), } #[derive(Debug, Parser)] @@ -239,6 +242,20 @@ struct LogoutCommand { config_overrides: CliConfigOverrides, } +#[derive(Debug, Parser)] +struct LspCli { + /// Optional working directory to inspect (defaults to current directory). + #[arg(long, value_name = "DIR")] + cwd: Option, + + /// Optional config profile to use. + #[arg(long = "profile", value_name = "PROFILE")] + config_profile: Option, + + #[clap(flatten)] + config_overrides: CliConfigOverrides, +} + #[derive(Debug, Parser)] struct AppServerCommand { /// Omit to run the app server; specify a subcommand for tooling. @@ -645,6 +662,37 @@ async fn cli_main(codex_linux_sandbox_exe: Option) -> anyhow::Result<() } } }, + Some(Subcommand::Lsp(mut lsp_cli)) => { + prepend_config_flags(&mut lsp_cli.config_overrides, root_config_overrides.clone()); + + let cli_kv_overrides = lsp_cli + .config_overrides + .parse_overrides() + .map_err(anyhow::Error::msg)?; + + let overrides = ConfigOverrides { + cwd: lsp_cli.cwd.take(), + config_profile: lsp_cli.config_profile.take(), + ..Default::default() + }; + + let config = + Config::load_with_cli_overrides_and_harness_overrides(cli_kv_overrides, overrides) + .await?; + + if !config.features.enabled(Feature::Lsp) { + println!( + "LSP is disabled. Enable `[features].lsp = true` in config.toml to use LSP." + ); + return Ok(()); + } + + let mut lsp_config = config.lsp.manager.clone(); + lsp_config.enabled = true; + let lsp = codex_lsp::LspManager::new(lsp_config); + let status = lsp.status(&config.cwd).await?; + println!("{}", codex_lsp::render_lsp_status(&status)); + } } Ok(()) diff --git a/codex-rs/cli/src/mcp_cmd.rs b/codex-rs/cli/src/mcp_cmd.rs index 1b6f5ffcbb9..73d21847090 100644 --- a/codex-rs/cli/src/mcp_cmd.rs +++ b/codex-rs/cli/src/mcp_cmd.rs @@ -13,15 +13,12 @@ use codex_core::config::find_codex_home; use codex_core::config::load_global_mcp_servers; use codex_core::config::types::McpServerConfig; use codex_core::config::types::McpServerTransportConfig; -use codex_core::features::Feature; use codex_core::mcp::auth::compute_auth_statuses; use codex_core::protocol::McpAuthStatus; use codex_rmcp_client::delete_oauth_tokens; use codex_rmcp_client::perform_oauth_login; use codex_rmcp_client::supports_oauth_login; -/// [experimental] Launch Codexel as an MCP server or manage configured MCP servers. -/// /// Subcommands: /// - `serve` — run the MCP server on stdio /// - `list` — list configured servers (with `--json`) @@ -39,24 +36,11 @@ pub struct McpCli { #[derive(Debug, clap::Subcommand)] pub enum McpSubcommand { - /// [experimental] List configured MCP servers. List(ListArgs), - - /// [experimental] Show details for a configured MCP server. Get(GetArgs), - - /// [experimental] Add a global MCP server entry. Add(AddArgs), - - /// [experimental] Remove a global MCP server entry. Remove(RemoveArgs), - - /// [experimental] Authenticate with a configured MCP server via OAuth. - /// Requires features.rmcp_client = true in config.toml. Login(LoginArgs), - - /// [experimental] Remove stored OAuth credentials for a server. - /// Requires features.rmcp_client = true in config.toml. Logout(LogoutArgs), } @@ -210,7 +194,7 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re validate_server_name(&name)?; - let codex_home = find_codex_home().context("failed to resolve Codexel home directory")?; + let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?; let mut servers = load_global_mcp_servers(&codex_home) .await .with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?; @@ -282,24 +266,17 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re { match supports_oauth_login(&url).await { Ok(true) => { - if !config.features.enabled(Feature::RmcpClient) { - println!( - "MCP server supports login. Add `features.rmcp_client = true` \ - to your config.toml and run `codexel mcp login {name}` to login." - ); - } else { - println!("Detected OAuth support. Starting OAuth flow…"); - perform_oauth_login( - &name, - &url, - config.mcp_oauth_credentials_store_mode, - http_headers.clone(), - env_http_headers.clone(), - &Vec::new(), - ) - .await?; - println!("Successfully logged in."); - } + println!("Detected OAuth support. Starting OAuth flow…"); + perform_oauth_login( + &name, + &url, + config.mcp_oauth_credentials_store_mode, + http_headers.clone(), + env_http_headers.clone(), + &Vec::new(), + ) + .await?; + println!("Successfully logged in."); } Ok(false) => {} Err(_) => println!( @@ -352,12 +329,6 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs) .await .context("failed to load configuration")?; - if !config.features.enabled(Feature::RmcpClient) { - bail!( - "OAuth login is only supported when [features].rmcp_client is true in config.toml. See docs/config.md#feature-flags for details." - ); - } - let LoginArgs { name, scopes } = login_args; let Some(server) = config.mcp_servers.get(&name) else { @@ -491,7 +462,7 @@ async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> } if entries.is_empty() { - println!("No MCP servers configured yet. Try `codexel mcp add my-tool -- my-command`."); + println!("No MCP servers configured yet. Try `codex mcp add my-tool -- my-command`."); return Ok(()); } diff --git a/codex-rs/cli/tests/execpolicy.rs b/codex-rs/cli/tests/execpolicy.rs index 4b64de53161..40384ecee77 100644 --- a/codex-rs/cli/tests/execpolicy.rs +++ b/codex-rs/cli/tests/execpolicy.rs @@ -24,7 +24,7 @@ prefix_rule( "#, )?; - let output = Command::cargo_bin("codexel")? + let output = Command::new(codex_utils_cargo_bin::cargo_bin("codexel")?) .env("CODEX_HOME", codex_home.path()) .args([ "execpolicy", diff --git a/codex-rs/cli/tests/mcp_add_remove.rs b/codex-rs/cli/tests/mcp_add_remove.rs index feabd9e1453..29c3443c118 100644 --- a/codex-rs/cli/tests/mcp_add_remove.rs +++ b/codex-rs/cli/tests/mcp_add_remove.rs @@ -8,7 +8,7 @@ use pretty_assertions::assert_eq; use tempfile::TempDir; fn codex_command(codex_home: &Path) -> Result { - let mut cmd = assert_cmd::Command::cargo_bin("codexel")?; + let mut cmd = assert_cmd::Command::new(codex_utils_cargo_bin::cargo_bin("codexel")?); cmd.env("CODEX_HOME", codex_home); Ok(cmd) } diff --git a/codex-rs/cli/tests/mcp_list.rs b/codex-rs/cli/tests/mcp_list.rs index 5e7f3f2d974..56855ec6e71 100644 --- a/codex-rs/cli/tests/mcp_list.rs +++ b/codex-rs/cli/tests/mcp_list.rs @@ -12,7 +12,7 @@ use serde_json::json; use tempfile::TempDir; fn codex_command(codex_home: &Path) -> Result { - let mut cmd = assert_cmd::Command::cargo_bin("codexel")?; + let mut cmd = assert_cmd::Command::new(codex_utils_cargo_bin::cargo_bin("codexel")?); cmd.env("CODEX_HOME", codex_home); Ok(cmd) } diff --git a/codex-rs/codex-api/src/endpoint/models.rs b/codex-rs/codex-api/src/endpoint/models.rs index b15f07fca2a..cb43f4ff5b2 100644 --- a/codex-rs/codex-api/src/endpoint/models.rs +++ b/codex-rs/codex-api/src/endpoint/models.rs @@ -227,7 +227,6 @@ mod tests { "truncation_policy": {"mode": "bytes", "limit": 10_000}, "supports_parallel_tool_calls": false, "context_window": null, - "reasoning_summary_format": "none", "experimental_supported_tools": [], })) .unwrap(), diff --git a/codex-rs/codex-api/tests/models_integration.rs b/codex-rs/codex-api/tests/models_integration.rs index 93baffd3560..f197e806fd9 100644 --- a/codex-rs/codex-api/tests/models_integration.rs +++ b/codex-rs/codex-api/tests/models_integration.rs @@ -4,14 +4,12 @@ use codex_api::provider::Provider; use codex_api::provider::RetryConfig; use codex_api::provider::WireApi; use codex_client::ReqwestTransport; -use codex_protocol::openai_models::ClientVersion; use codex_protocol::openai_models::ConfigShellToolType; use codex_protocol::openai_models::ModelInfo; use codex_protocol::openai_models::ModelVisibility; use codex_protocol::openai_models::ModelsResponse; use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::openai_models::ReasoningEffortPreset; -use codex_protocol::openai_models::ReasoningSummaryFormat; use codex_protocol::openai_models::TruncationPolicyConfig; use http::HeaderMap; use http::Method; @@ -75,7 +73,6 @@ async fn models_client_hits_models_endpoint() { ], shell_type: ConfigShellToolType::ShellCommand, visibility: ModelVisibility::List, - minimal_client_version: ClientVersion(0, 1, 0), supported_in_api: true, priority: 1, upgrade: None, @@ -87,7 +84,6 @@ async fn models_client_hits_models_endpoint() { truncation_policy: TruncationPolicyConfig::bytes(10_000), supports_parallel_tool_calls: false, context_window: None, - reasoning_summary_format: ReasoningSummaryFormat::None, experimental_supported_tools: Vec::new(), }], etag: String::new(), diff --git a/codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_payload.rs b/codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_payload.rs index 0f5caf52ff5..daf8e6af39a 100644 --- a/codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_payload.rs +++ b/codex-rs/codex-backend-openapi-models/src/models/rate_limit_status_payload.rs @@ -42,9 +42,12 @@ impl RateLimitStatusPayload { } } -#[derive(Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize)] +#[derive( + Clone, Copy, Debug, Eq, PartialEq, Ord, PartialOrd, Hash, Serialize, Deserialize, Default, +)] pub enum PlanType { #[serde(rename = "guest")] + #[default] Guest, #[serde(rename = "free")] Free, @@ -71,9 +74,3 @@ pub enum PlanType { #[serde(rename = "edu")] Edu, } - -impl Default for PlanType { - fn default() -> PlanType { - Self::Guest - } -} diff --git a/codex-rs/common/src/config_summary.rs b/codex-rs/common/src/config_summary.rs index e5b2dbec945..fe21a609a9e 100644 --- a/codex-rs/common/src/config_summary.rs +++ b/codex-rs/common/src/config_summary.rs @@ -18,6 +18,13 @@ pub fn create_config_summary_entries(config: &Config, model: &str) -> Vec<(&'sta if let Some(plan_model) = config.plan_model.as_deref() { entries.push(("plan model", plan_model.to_string())); } + if let Some(mini_subagent_model) = config + .mini_subagent_model + .as_deref() + .or(config.explore_model.as_deref()) + { + entries.push(("mini subagent model", mini_subagent_model.to_string())); + } if config.model_provider.wire_api == WireApi::Responses { let reasoning_effort = config .model_reasoning_effort @@ -35,6 +42,20 @@ pub fn create_config_summary_entries(config: &Config, model: &str) -> Vec<(&'sta plan_effort.unwrap_or_else(|| "none".to_string()), )); } + if config.mini_subagent_model.is_some() + || config.mini_subagent_model_reasoning_effort.is_some() + || config.explore_model.is_some() + || config.explore_model_reasoning_effort.is_some() + { + let explore_effort = config + .mini_subagent_model_reasoning_effort + .or(config.explore_model_reasoning_effort) + .map(|effort| effort.to_string()); + entries.push(( + "mini subagent reasoning effort", + explore_effort.unwrap_or_else(|| "none".to_string()), + )); + } entries.push(( "reasoning summaries", config.model_reasoning_summary.to_string(), diff --git a/codex-rs/core/Cargo.toml b/codex-rs/core/Cargo.toml index 7cb0eb67032..9d3c3af620a 100644 --- a/codex-rs/core/Cargo.toml +++ b/codex-rs/core/Cargo.toml @@ -16,6 +16,7 @@ workspace = true anyhow = { workspace = true } async-channel = { workspace = true } async-trait = { workspace = true } +arc-swap = "1.7.1" base64 = { workspace = true } chardetng = { workspace = true } chrono = { workspace = true, features = ["serde"] } @@ -28,6 +29,7 @@ codex-execpolicy = { workspace = true } codex-file-search = { workspace = true } codex-git = { workspace = true } codex-keyring-store = { workspace = true } +codex-lsp = { workspace = true } codex-otel = { workspace = true } codex-protocol = { workspace = true } codex-rmcp-client = { workspace = true } @@ -122,6 +124,7 @@ assert_cmd = { workspace = true } assert_matches = { workspace = true } codex-arg0 = { workspace = true } codex-core = { path = ".", features = ["deterministic_process_ids"] } +codex-utils-cargo-bin = { workspace = true } core_test_support = { workspace = true } ctor = { workspace = true } escargot = { workspace = true } diff --git a/codex-rs/core/models.json b/codex-rs/core/models.json index 00226fb3eac..42fd6574d27 100644 --- a/codex-rs/core/models.json +++ b/codex-rs/core/models.json @@ -36,11 +36,7 @@ ], "shell_type": "shell_command", "visibility": "list", - "minimal_client_version": [ - 0, - 62, - 0 - ], + "minimal_client_version": "0.62.0", "supported_in_api": true, "upgrade": "gpt-5.2-codex", "priority": 1, @@ -79,11 +75,7 @@ ], "shell_type": "shell_command", "visibility": "hide", - "minimal_client_version": [ - 0, - 60, - 0 - ], + "minimal_client_version": "0.60.0", "supported_in_api": true, "upgrade": "gpt-5.2-codex", "priority": 2, @@ -118,11 +110,7 @@ ], "shell_type": "shell_command", "visibility": "list", - "minimal_client_version": [ - 0, - 60, - 0 - ], + "minimal_client_version": "0.60.0", "supported_in_api": true, "upgrade": "gpt-5.2-codex", "priority": 3, @@ -165,11 +153,7 @@ ], "shell_type": "shell_command", "visibility": "list", - "minimal_client_version": [ - 0, - 60, - 0 - ], + "minimal_client_version": "0.60.0", "supported_in_api": true, "upgrade": "gpt-5.2-codex", "priority": 4, @@ -208,11 +192,7 @@ ], "shell_type": "shell_command", "visibility": "hide", - "minimal_client_version": [ - 0, - 60, - 0 - ], + "minimal_client_version": "0.60.0", "supported_in_api": true, "upgrade": "gpt-5.2-codex", "priority": 5, @@ -251,11 +231,7 @@ ], "shell_type": "shell_command", "visibility": "hide", - "minimal_client_version": [ - 0, - 60, - 0 - ], + "minimal_client_version": "0.60.0", "supported_in_api": true, "upgrade": "gpt-5.2-codex", "priority": 6, @@ -298,11 +274,7 @@ ], "shell_type": "default", "visibility": "hide", - "minimal_client_version": [ - 0, - 60, - 0 - ], + "minimal_client_version": "0.60.0", "supported_in_api": true, "upgrade": "gpt-5.2-codex", "priority": 7, @@ -337,11 +309,7 @@ ], "shell_type": "shell_command", "visibility": "hide", - "minimal_client_version": [ - 0, - 60, - 0 - ], + "minimal_client_version": "0.60.0", "supported_in_api": true, "upgrade": "gpt-5.2-codex", "priority": 8, @@ -380,11 +348,7 @@ ], "shell_type": "local", "visibility": "hide", - "minimal_client_version": [ - 0, - 60, - 0 - ], + "minimal_client_version": "0.60.0", "supported_in_api": true, "upgrade": null, "priority": 9, @@ -427,11 +391,7 @@ ], "shell_type": "shell_command", "visibility": "hide", - "minimal_client_version": [ - 0, - 60, - 0 - ], + "minimal_client_version": "0.60.0", "supported_in_api": true, "upgrade": null, "priority": 10, @@ -474,11 +434,7 @@ ], "shell_type": "shell_command", "visibility": "hide", - "minimal_client_version": [ - 0, - 60, - 0 - ], + "minimal_client_version": "0.60.0", "supported_in_api": true, "upgrade": null, "priority": 11, diff --git a/codex-rs/core/src/bash.rs b/codex-rs/core/src/bash.rs index cb8248ec1f6..372dcdaf951 100644 --- a/codex-rs/core/src/bash.rs +++ b/codex-rs/core/src/bash.rs @@ -46,6 +46,7 @@ pub fn try_parse_word_only_commands_sequence(tree: &Tree, src: &str) -> Option Option { + // Handle concatenated arguments like -g"*.py" + let mut concatenated = String::new(); + let mut concat_cursor = child.walk(); + for part in child.named_children(&mut concat_cursor) { + match part.kind() { + "word" | "number" => { + concatenated + .push_str(part.utf8_text(src.as_bytes()).ok()?.to_owned().as_str()); + } + "string" => { + if part.child_count() == 3 + && part.child(0)?.kind() == "\"" + && part.child(1)?.kind() == "string_content" + && part.child(2)?.kind() == "\"" + { + concatenated.push_str( + part.child(1)? + .utf8_text(src.as_bytes()) + .ok()? + .to_owned() + .as_str(), + ); + } else { + return None; + } + } + "raw_string" => { + let raw_string = part.utf8_text(src.as_bytes()).ok()?; + let stripped = raw_string + .strip_prefix('\'') + .and_then(|s| s.strip_suffix('\''))?; + concatenated.push_str(stripped); + } + _ => return None, + } + } + if concatenated.is_empty() { + return None; + } + words.push(concatenated); + } _ => return None, } } @@ -256,4 +299,47 @@ mod tests { let parsed = parse_shell_lc_plain_commands(&command).unwrap(); assert_eq!(parsed, vec![vec!["ls".to_string()]]); } + + #[test] + fn accepts_concatenated_flag_and_value() { + // Test case: -g"*.py" (flag directly concatenated with quoted value) + let cmds = parse_seq("rg -n \"foo\" -g\"*.py\"").unwrap(); + assert_eq!( + cmds, + vec![vec![ + "rg".to_string(), + "-n".to_string(), + "foo".to_string(), + "-g*.py".to_string(), + ]] + ); + } + + #[test] + fn accepts_concatenated_flag_with_single_quotes() { + let cmds = parse_seq("grep -n 'pattern' -g'*.txt'").unwrap(); + assert_eq!( + cmds, + vec![vec![ + "grep".to_string(), + "-n".to_string(), + "pattern".to_string(), + "-g*.txt".to_string(), + ]] + ); + } + + #[test] + fn rejects_concatenation_with_variable_substitution() { + // Environment variables in concatenated strings should be rejected + assert!(parse_seq("rg -g\"$VAR\" pattern").is_none()); + assert!(parse_seq("rg -g\"${VAR}\" pattern").is_none()); + } + + #[test] + fn rejects_concatenation_with_command_substitution() { + // Command substitution in concatenated strings should be rejected + assert!(parse_seq("rg -g\"$(pwd)\" pattern").is_none()); + assert!(parse_seq("rg -g\"$(echo '*.py')\" pattern").is_none()); + } } diff --git a/codex-rs/core/src/codex.rs b/codex-rs/core/src/codex.rs index 9f2caab26ee..0c9f505c151 100644 --- a/codex-rs/core/src/codex.rs +++ b/codex-rs/core/src/codex.rs @@ -5,6 +5,7 @@ use std::sync::Arc; use std::sync::atomic::AtomicBool; use std::sync::atomic::AtomicU64; use std::sync::atomic::Ordering; +use std::time::Instant; use crate::AuthManager; use crate::SandboxState; @@ -13,7 +14,7 @@ use crate::compact; use crate::compact::run_inline_auto_compact_task; use crate::compact::should_use_remote_compact_task; use crate::compact_remote::run_inline_remote_auto_compact_task; -use crate::exec_policy::load_exec_policy_for_features; +use crate::exec_policy::ExecPolicyManager; use crate::features::Feature; use crate::features::Features; use crate::models_manager::manager::ModelsManager; @@ -157,7 +158,6 @@ use crate::user_instructions::UserInstructions; use crate::user_notification::UserNotification; use crate::util::backoff; use codex_async_utils::OrCancelExt; -use codex_execpolicy::Policy as ExecPolicy; use codex_otel::otel_manager::OtelManager; use codex_protocol::config_types::ReasoningSummary as ReasoningSummaryConfig; use codex_protocol::models::ContentItem; @@ -221,6 +221,7 @@ impl Codex { auth_manager: Arc, models_manager: Arc, skills_manager: Arc, + lsp_manager: codex_lsp::LspManager, conversation_history: InitialHistory, session_source: SessionSource, ) -> CodexResult { @@ -250,10 +251,9 @@ impl Codex { ) .await; - let exec_policy = load_exec_policy_for_features(&config.features, &config.codex_home) + let exec_policy = ExecPolicyManager::load(&config.features, &config.config_layer_stack) .await .map_err(|err| CodexErr::Fatal(format!("failed to load execpolicy: {err}")))?; - let exec_policy = Arc::new(RwLock::new(exec_policy)); let config = Arc::new(config); if config.features.enabled(Feature::RemoteModels) @@ -269,6 +269,12 @@ impl Codex { model_reasoning_summary: config.model_reasoning_summary, plan_model: config.plan_model.clone(), plan_model_reasoning_effort: config.plan_model_reasoning_effort, + explore_model: config.explore_model.clone(), + explore_model_reasoning_effort: config.explore_model_reasoning_effort, + mini_subagent_model: config.mini_subagent_model.clone(), + mini_subagent_model_reasoning_effort: config.mini_subagent_model_reasoning_effort, + subagent_model: config.subagent_model.clone(), + subagent_model_reasoning_effort: config.subagent_model_reasoning_effort, developer_instructions: config.developer_instructions.clone(), user_instructions, base_instructions: config.base_instructions.clone(), @@ -277,7 +283,6 @@ impl Codex { sandbox_policy: config.sandbox_policy.clone(), cwd: config.cwd.clone(), original_config_do_not_use: Arc::clone(&config), - exec_policy, session_source, }; @@ -289,9 +294,11 @@ impl Codex { config.clone(), auth_manager.clone(), models_manager.clone(), + exec_policy, tx_event.clone(), conversation_history, session_source_clone, + lsp_manager, skills_manager, ) .await @@ -359,6 +366,13 @@ pub(crate) struct Session { pub(crate) active_turn: Mutex>, pub(crate) services: SessionServices, next_internal_sub_id: AtomicU64, + active_subagent_tool_calls: Mutex>>, +} + +#[derive(Debug, Clone)] +struct ActiveSubAgentToolCall { + invocation: crate::protocol::SubAgentInvocation, + started_at: Instant, } /// The context needed for a single turn of the conversation. @@ -368,6 +382,12 @@ pub(crate) struct TurnContext { pub(crate) client: ModelClient, pub(crate) plan_model: Option, pub(crate) plan_reasoning_effort: Option, + pub(crate) explore_model: Option, + pub(crate) explore_reasoning_effort: Option, + pub(crate) mini_subagent_model: Option, + pub(crate) mini_subagent_reasoning_effort: Option, + pub(crate) subagent_model: Option, + pub(crate) subagent_reasoning_effort: Option, /// The session's current working directory. All relative paths provided by /// the model as well as sandbox policies are resolved against this path /// instead of `std::env::current_dir()`. @@ -384,7 +404,6 @@ pub(crate) struct TurnContext { pub(crate) final_output_json_schema: Option, pub(crate) codex_linux_sandbox_exe: Option, pub(crate) tool_call_gate: Arc, - pub(crate) exec_policy: Arc>, pub(crate) truncation_policy: TruncationPolicy, } @@ -417,6 +436,18 @@ pub(crate) struct SessionConfiguration { plan_model: Option, plan_model_reasoning_effort: Option, + /// Optional model slug override used for exploration flows (e.g. `/plan` exploration subagents). + explore_model: Option, + explore_model_reasoning_effort: Option, + + /// Optional model slug override used for mini subagents (the `spawn_mini_subagent` tool flow). + mini_subagent_model: Option, + mini_subagent_model_reasoning_effort: Option, + + /// Optional model slug override used for ordinary spawned subagents (the `spawn_subagent` tool flow). + subagent_model: Option, + subagent_model_reasoning_effort: Option, + /// Developer instructions that supplement the base instructions. developer_instructions: Option, @@ -443,9 +474,6 @@ pub(crate) struct SessionConfiguration { /// operate deterministically. cwd: PathBuf, - /// Execpolicy policy, applied only when enabled by feature flag. - exec_policy: Arc>, - // TODO(pakrym): Remove config from here original_config_do_not_use: Arc, /// Source of the session (cli, vscode, exec, mcp, ...) @@ -461,12 +489,30 @@ impl SessionConfiguration { if let Some(plan_model) = updates.plan_model.clone() { next_configuration.plan_model = Some(plan_model); } + if let Some(explore_model) = updates.explore_model.clone() { + next_configuration.explore_model = Some(explore_model); + } + if let Some(mini_subagent_model) = updates.mini_subagent_model.clone() { + next_configuration.mini_subagent_model = Some(mini_subagent_model); + } + if let Some(subagent_model) = updates.subagent_model.clone() { + next_configuration.subagent_model = Some(subagent_model); + } if let Some(effort) = updates.reasoning_effort { next_configuration.model_reasoning_effort = effort; } if let Some(effort) = updates.plan_reasoning_effort { next_configuration.plan_model_reasoning_effort = effort; } + if let Some(effort) = updates.explore_reasoning_effort { + next_configuration.explore_model_reasoning_effort = effort; + } + if let Some(effort) = updates.mini_subagent_reasoning_effort { + next_configuration.mini_subagent_model_reasoning_effort = effort; + } + if let Some(effort) = updates.subagent_reasoning_effort { + next_configuration.subagent_model_reasoning_effort = effort; + } if let Some(summary) = updates.reasoning_summary { next_configuration.model_reasoning_summary = summary; } @@ -490,8 +536,14 @@ pub(crate) struct SessionSettingsUpdate { pub(crate) sandbox_policy: Option, pub(crate) model: Option, pub(crate) plan_model: Option, + pub(crate) explore_model: Option, + pub(crate) mini_subagent_model: Option, + pub(crate) subagent_model: Option, pub(crate) reasoning_effort: Option>, pub(crate) plan_reasoning_effort: Option>, + pub(crate) explore_reasoning_effort: Option>, + pub(crate) mini_subagent_reasoning_effort: Option>, + pub(crate) subagent_reasoning_effort: Option>, pub(crate) reasoning_summary: Option, pub(crate) final_output_json_schema: Option>, } @@ -548,21 +600,68 @@ impl Session { client, plan_model: session_configuration.plan_model.clone(), plan_reasoning_effort: session_configuration.plan_model_reasoning_effort, + explore_model: session_configuration.explore_model.clone(), + explore_reasoning_effort: session_configuration.explore_model_reasoning_effort, + mini_subagent_model: session_configuration.mini_subagent_model.clone(), + mini_subagent_reasoning_effort: session_configuration + .mini_subagent_model_reasoning_effort, + subagent_model: session_configuration.subagent_model.clone(), + subagent_reasoning_effort: session_configuration.subagent_model_reasoning_effort, cwd: session_configuration.cwd.clone(), developer_instructions: match session_configuration.session_source { SessionSource::Cli | SessionSource::VSCode => { + let developer_instructions = + crate::tools::spec::prepend_web_ui_quality_bar_developer_instructions( + session_configuration.developer_instructions.clone(), + ); + let developer_instructions = + crate::tools::spec::prepend_lsp_navigation_developer_instructions( + developer_instructions, + ); let developer_instructions = crate::tools::spec::prepend_ask_user_question_developer_instructions( + developer_instructions, + ); + let developer_instructions = + crate::tools::spec::prepend_clarification_policy_developer_instructions( + developer_instructions, + ); + let developer_instructions = if per_turn_config + .features + .enabled(crate::features::Feature::MiniSubagents) + { + crate::tools::spec::prepend_spawn_mini_subagent_developer_instructions( + developer_instructions, + ) + } else { + developer_instructions + }; + crate::tools::spec::prepend_spawn_subagent_developer_instructions( + developer_instructions, + ) + } + SessionSource::Exec => { + let developer_instructions = + crate::tools::spec::prepend_lsp_navigation_developer_instructions( session_configuration.developer_instructions.clone(), ); + let developer_instructions = if per_turn_config + .features + .enabled(crate::features::Feature::MiniSubagents) + { + crate::tools::spec::prepend_spawn_mini_subagent_developer_instructions( + developer_instructions, + ) + } else { + developer_instructions + }; crate::tools::spec::prepend_spawn_subagent_developer_instructions( developer_instructions, ) } - SessionSource::Exec - | SessionSource::Mcp - | SessionSource::SubAgent(_) - | SessionSource::Unknown => session_configuration.developer_instructions.clone(), + SessionSource::Mcp | SessionSource::SubAgent(_) | SessionSource::Unknown => { + session_configuration.developer_instructions.clone() + } }, base_instructions: session_configuration.base_instructions.clone(), compact_prompt: session_configuration.compact_prompt.clone(), @@ -575,7 +674,6 @@ impl Session { final_output_json_schema: None, codex_linux_sandbox_exe: per_turn_config.codex_linux_sandbox_exe.clone(), tool_call_gate: Arc::new(ReadinessFlag::new()), - exec_policy: session_configuration.exec_policy.clone(), truncation_policy: TruncationPolicy::new( per_turn_config.as_ref(), model_family.truncation_policy, @@ -589,9 +687,11 @@ impl Session { config: Arc, auth_manager: Arc, models_manager: Arc, + exec_policy: ExecPolicyManager, tx_event: Sender, initial_history: InitialHistory, session_source: SessionSource, + lsp_manager: codex_lsp::LspManager, skills_manager: Arc, ) -> anyhow::Result> { debug!( @@ -700,6 +800,10 @@ impl Session { } let state = SessionState::new(session_configuration.clone()); + let mut lsp_manager_config = config.lsp.manager.clone(); + lsp_manager_config.enabled = config.features.enabled(Feature::Lsp); + lsp_manager.set_config(lsp_manager_config).await; + let services = SessionServices { mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::default())), mcp_startup_cancellation_token: CancellationToken::new(), @@ -708,11 +812,13 @@ impl Session { rollout: Mutex::new(Some(rollout_recorder)), user_shell: Arc::new(default_shell), show_raw_agent_reasoning: config.show_raw_agent_reasoning, + exec_policy, auth_manager: Arc::clone(&auth_manager), otel_manager, models_manager: Arc::clone(&models_manager), tool_approvals: Mutex::new(ApprovalStore::default()), skills_manager, + lsp_manager, }; let sess = Arc::new(Session { @@ -723,6 +829,7 @@ impl Session { active_turn: Mutex::new(None), services, next_internal_sub_id: AtomicU64::new(0), + active_subagent_tool_calls: Mutex::new(HashMap::new()), }); // Dispatch the SessionConfiguredEvent first and then report any errors. @@ -749,6 +856,16 @@ impl Session { sess.send_event_raw(event).await; } + if config.features.enabled(Feature::Lsp) { + let lsp = sess.services.lsp_manager.clone(); + let root = session_configuration.cwd.clone(); + tokio::spawn(async move { + if let Err(err) = lsp.prewarm(&root).await { + warn!("lsp prewarm failed: {err:#}"); + } + }); + } + // Construct sandbox_state before initialize() so it can be sent to each // MCP server immediately after it becomes ready (avoiding blocking). let sandbox_state = SandboxState { @@ -1004,6 +1121,35 @@ impl Session { None => prelude, }); } + if self.enabled(Feature::Lsp) + && session_configuration + .original_config_do_not_use + .lsp + .prompt_diagnostics + { + let max = session_configuration + .original_config_do_not_use + .lsp + .max_prompt_diagnostics; + let cwd = turn_context.cwd.clone(); + if let Ok(diags) = self.services.lsp_manager.diagnostics(&cwd, None, max).await + && !diags.is_empty() + { + let summary = crate::lsp_prompt::render_diagnostics_summary(&diags); + turn_context.developer_instructions = + Some(match turn_context.developer_instructions.take() { + Some(existing) => { + let existing = existing.trim(); + if existing.is_empty() { + summary + } else { + format!("{existing}\n\n{summary}") + } + } + None => summary, + }); + } + } if let Some(final_schema) = final_output_json_schema { turn_context.final_output_json_schema = final_schema; } @@ -1046,6 +1192,8 @@ impl Session { /// Persist the event to rollout and send it to clients. pub(crate) async fn send_event(&self, turn_context: &TurnContext, msg: EventMsg) { + self.track_subagent_tool_calls(&turn_context.sub_id, &msg) + .await; let legacy_source = msg.clone(); let event = Event { id: turn_context.sub_id.clone(), @@ -1107,29 +1255,24 @@ impl Session { amendment: &ExecPolicyAmendment, ) -> Result<(), ExecPolicyUpdateError> { let features = self.features.clone(); - let (codex_home, current_policy) = { - let state = self.state.lock().await; - ( - state - .session_configuration - .original_config_do_not_use - .codex_home - .clone(), - state.session_configuration.exec_policy.clone(), - ) - }; + let codex_home = self + .state + .lock() + .await + .session_configuration + .original_config_do_not_use + .codex_home + .clone(); if !features.enabled(Feature::ExecPolicy) { error!("attempted to append execpolicy rule while execpolicy feature is disabled"); return Err(ExecPolicyUpdateError::FeatureDisabled); } - crate::exec_policy::append_execpolicy_amendment_and_update( - &codex_home, - ¤t_policy, - &amendment.command, - ) - .await?; + self.services + .exec_policy + .append_amendment_and_update(&codex_home, amendment) + .await?; Ok(()) } @@ -1646,12 +1789,14 @@ impl Session { message: impl Into, codex_error: CodexErr, ) { + let additional_details = codex_error.to_string(); let codex_error_info = CodexErrorInfo::ResponseStreamDisconnected { http_status_code: codex_error.http_status_code_value(), }; let event = EventMsg::StreamError(StreamErrorEvent { message: message.into(), codex_error_info: Some(codex_error_info), + additional_details: Some(additional_details), }); self.send_event(turn_context, event).await; } @@ -1803,6 +1948,56 @@ impl Session { async fn cancel_mcp_startup(&self) { self.services.mcp_startup_cancellation_token.cancel(); } + + async fn track_subagent_tool_calls(&self, turn_id: &str, msg: &EventMsg) { + match msg { + EventMsg::SubAgentToolCallBegin(ev) => { + let mut calls_by_turn = self.active_subagent_tool_calls.lock().await; + let calls = calls_by_turn.entry(turn_id.to_string()).or_default(); + calls.insert( + ev.call_id.clone(), + ActiveSubAgentToolCall { + invocation: ev.invocation.clone(), + started_at: Instant::now(), + }, + ); + } + EventMsg::SubAgentToolCallEnd(ev) => { + let mut calls_by_turn = self.active_subagent_tool_calls.lock().await; + if let Some(calls) = calls_by_turn.get_mut(turn_id) { + calls.remove(&ev.call_id); + if calls.is_empty() { + calls_by_turn.remove(turn_id); + } + } + } + _ => {} + } + } + + pub(crate) async fn cancel_active_subagent_tool_calls(&self, turn_context: &TurnContext) { + let calls = { + let mut calls_by_turn = self.active_subagent_tool_calls.lock().await; + calls_by_turn + .remove(&turn_context.sub_id) + .unwrap_or_default() + }; + + for (call_id, call) in calls { + self.send_event( + turn_context, + EventMsg::SubAgentToolCallEnd(crate::protocol::SubAgentToolCallEndEvent { + call_id, + invocation: call.invocation, + duration: call.started_at.elapsed(), + tokens: None, + outcome: Some(crate::protocol::SubAgentToolCallOutcome::Cancelled), + result: Err("cancelled".to_string()), + }), + ) + .await; + } + } } async fn submission_loop(sess: Arc, config: Arc, rx_sub: Receiver) { @@ -1822,8 +2017,14 @@ async fn submission_loop(sess: Arc, config: Arc, rx_sub: Receiv sandbox_policy, model, plan_model, + explore_model, + mini_subagent_model, + subagent_model, effort, plan_effort, + explore_effort, + mini_subagent_effort, + subagent_effort, summary, } => { handlers::override_turn_context( @@ -1835,8 +2036,14 @@ async fn submission_loop(sess: Arc, config: Arc, rx_sub: Receiv sandbox_policy, model, plan_model, + explore_model, + mini_subagent_model, + subagent_model, reasoning_effort: effort, plan_reasoning_effort: plan_effort, + explore_reasoning_effort: explore_effort, + mini_subagent_reasoning_effort: mini_subagent_effort, + subagent_reasoning_effort: subagent_effort, reasoning_summary: summary, ..Default::default() }, @@ -2002,8 +2209,14 @@ mod handlers { sandbox_policy: Some(sandbox_policy), model: Some(model), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, reasoning_effort: Some(effort), plan_reasoning_effort: None, + explore_reasoning_effort: None, + mini_subagent_reasoning_effort: None, + subagent_reasoning_effort: None, reasoning_summary: Some(summary), final_output_json_schema: Some(final_output_json_schema), }, @@ -2425,6 +2638,12 @@ async fn spawn_review_thread( client, plan_model: parent_turn_context.plan_model.clone(), plan_reasoning_effort: parent_turn_context.plan_reasoning_effort, + explore_model: parent_turn_context.explore_model.clone(), + explore_reasoning_effort: parent_turn_context.explore_reasoning_effort, + mini_subagent_model: parent_turn_context.mini_subagent_model.clone(), + mini_subagent_reasoning_effort: parent_turn_context.mini_subagent_reasoning_effort, + subagent_model: parent_turn_context.subagent_model.clone(), + subagent_reasoning_effort: parent_turn_context.subagent_reasoning_effort, tools_config, ghost_snapshot: parent_turn_context.ghost_snapshot.clone(), developer_instructions: None, @@ -2438,7 +2657,6 @@ async fn spawn_review_thread( final_output_json_schema: None, codex_linux_sandbox_exe: parent_turn_context.codex_linux_sandbox_exe.clone(), tool_call_gate: Arc::new(ReadinessFlag::new()), - exec_policy: parent_turn_context.exec_policy.clone(), truncation_policy: TruncationPolicy::new(&per_turn_config, model_family.truncation_policy), }; @@ -2809,6 +3027,11 @@ async fn try_run_turn( model: turn_context.client.get_model(), effort: turn_context.client.get_reasoning_effort(), summary: turn_context.client.get_reasoning_summary(), + base_instructions: turn_context.base_instructions.clone(), + user_instructions: turn_context.user_instructions.clone(), + developer_instructions: turn_context.developer_instructions.clone(), + final_output_json_schema: turn_context.final_output_json_schema.clone(), + truncation_policy: Some(turn_context.truncation_policy.into()), }); sess.persist_rollout_items(&[rollout_item]).await; @@ -3032,6 +3255,7 @@ mod tests { use crate::function_tool::FunctionCallError; use crate::shell::default_user_shell; use crate::tools::format_exec_output_str; + use codex_protocol::models::FunctionCallOutputPayload; use codex_protocol::plan_tool::PlanItemArg; use codex_protocol::plan_tool::StepStatus; @@ -3111,6 +3335,51 @@ mod tests { assert_eq!(expected, actual); } + #[tokio::test] + async fn cancel_active_subagent_tool_calls_emits_cancelled_end_event() { + let (session, turn_context, rx_event) = make_session_and_context_with_rx().await; + + let invocation = crate::protocol::SubAgentInvocation { + description: "Test subagent".to_string(), + label: "test_subagent".to_string(), + prompt: "noop".to_string(), + model: None, + }; + session + .send_event( + turn_context.as_ref(), + EventMsg::SubAgentToolCallBegin(crate::protocol::SubAgentToolCallBeginEvent { + call_id: "subagent-call-1".to_string(), + invocation, + }), + ) + .await; + + session + .cancel_active_subagent_tool_calls(turn_context.as_ref()) + .await; + + let end_event = tokio::time::timeout(StdDuration::from_secs(2), async { + loop { + let event = rx_event + .recv() + .await + .expect("receive subagent tool call end event"); + if let EventMsg::SubAgentToolCallEnd(end) = event.msg { + break end; + } + } + }) + .await + .expect("timed out waiting for subagent tool call end event"); + + assert_eq!( + end_event.outcome, + Some(crate::protocol::SubAgentToolCallOutcome::Cancelled) + ); + assert_eq!(end_event.call_id, "subagent-call-1"); + } + #[tokio::test] async fn set_rate_limits_retains_previous_credits() { let codex_home = tempfile::tempdir().expect("create temp dir"); @@ -3124,6 +3393,12 @@ mod tests { model_reasoning_summary: config.model_reasoning_summary, plan_model: None, plan_model_reasoning_effort: None, + explore_model: None, + explore_model_reasoning_effort: None, + mini_subagent_model: None, + mini_subagent_model_reasoning_effort: None, + subagent_model: None, + subagent_model_reasoning_effort: None, developer_instructions: config.developer_instructions.clone(), user_instructions: config.user_instructions.clone(), base_instructions: config.base_instructions.clone(), @@ -3132,7 +3407,6 @@ mod tests { sandbox_policy: config.sandbox_policy.clone(), cwd: config.cwd.clone(), original_config_do_not_use: Arc::clone(&config), - exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())), session_source: SessionSource::Exec, }; @@ -3193,6 +3467,12 @@ mod tests { model_reasoning_summary: config.model_reasoning_summary, plan_model: None, plan_model_reasoning_effort: None, + explore_model: None, + explore_model_reasoning_effort: None, + mini_subagent_model: None, + mini_subagent_model_reasoning_effort: None, + subagent_model: None, + subagent_model_reasoning_effort: None, developer_instructions: config.developer_instructions.clone(), user_instructions: config.user_instructions.clone(), base_instructions: config.base_instructions.clone(), @@ -3201,7 +3481,6 @@ mod tests { sandbox_policy: config.sandbox_policy.clone(), cwd: config.cwd.clone(), original_config_do_not_use: Arc::clone(&config), - exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())), session_source: SessionSource::Exec, }; @@ -3394,6 +3673,7 @@ mod tests { let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key")); let models_manager = Arc::new(ModelsManager::new(auth_manager.clone())); + let exec_policy = ExecPolicyManager::default(); let model = ModelsManager::get_model_offline(config.model.as_deref()); let session_configuration = SessionConfiguration { provider: config.model_provider.clone(), @@ -3402,6 +3682,12 @@ mod tests { model_reasoning_summary: config.model_reasoning_summary, plan_model: None, plan_model_reasoning_effort: None, + explore_model: None, + explore_model_reasoning_effort: None, + mini_subagent_model: None, + mini_subagent_model_reasoning_effort: None, + subagent_model: None, + subagent_model_reasoning_effort: None, developer_instructions: config.developer_instructions.clone(), user_instructions: config.user_instructions.clone(), base_instructions: config.base_instructions.clone(), @@ -3410,7 +3696,6 @@ mod tests { sandbox_policy: config.sandbox_policy.clone(), cwd: config.cwd.clone(), original_config_do_not_use: Arc::clone(&config), - exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())), session_source: SessionSource::Exec, }; let per_turn_config = Session::build_per_turn_config(&session_configuration); @@ -3428,6 +3713,10 @@ mod tests { let state = SessionState::new(session_configuration.clone()); let skills_manager = Arc::new(SkillsManager::new(config.codex_home.clone())); + let mut lsp_manager_config = config.lsp.manager.clone(); + lsp_manager_config.enabled = config.features.enabled(Feature::Lsp); + let lsp_manager = codex_lsp::LspManager::new(lsp_manager_config); + let services = SessionServices { mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::default())), mcp_startup_cancellation_token: CancellationToken::new(), @@ -3436,11 +3725,13 @@ mod tests { rollout: Mutex::new(None), user_shell: Arc::new(default_user_shell()), show_raw_agent_reasoning: config.show_raw_agent_reasoning, + exec_policy, auth_manager: auth_manager.clone(), otel_manager: otel_manager.clone(), models_manager, tool_approvals: Mutex::new(ApprovalStore::default()), skills_manager, + lsp_manager, }; let turn_context = Session::make_turn_context( @@ -3462,6 +3753,7 @@ mod tests { active_turn: Mutex::new(None), services, next_internal_sub_id: AtomicU64::new(0), + active_subagent_tool_calls: Mutex::new(HashMap::new()), }; (session, turn_context) @@ -3482,6 +3774,7 @@ mod tests { let auth_manager = AuthManager::from_auth_for_testing(CodexAuth::from_api_key("Test API Key")); let models_manager = Arc::new(ModelsManager::new(auth_manager.clone())); + let exec_policy = ExecPolicyManager::default(); let model = ModelsManager::get_model_offline(config.model.as_deref()); let session_configuration = SessionConfiguration { provider: config.model_provider.clone(), @@ -3490,6 +3783,12 @@ mod tests { model_reasoning_summary: config.model_reasoning_summary, plan_model: None, plan_model_reasoning_effort: None, + explore_model: None, + explore_model_reasoning_effort: None, + mini_subagent_model: None, + mini_subagent_model_reasoning_effort: None, + subagent_model: None, + subagent_model_reasoning_effort: None, developer_instructions: config.developer_instructions.clone(), user_instructions: config.user_instructions.clone(), base_instructions: config.base_instructions.clone(), @@ -3498,7 +3797,6 @@ mod tests { sandbox_policy: config.sandbox_policy.clone(), cwd: config.cwd.clone(), original_config_do_not_use: Arc::clone(&config), - exec_policy: Arc::new(RwLock::new(ExecPolicy::empty())), session_source: SessionSource::Exec, }; let per_turn_config = Session::build_per_turn_config(&session_configuration); @@ -3516,6 +3814,10 @@ mod tests { let state = SessionState::new(session_configuration.clone()); let skills_manager = Arc::new(SkillsManager::new(config.codex_home.clone())); + let mut lsp_manager_config = config.lsp.manager.clone(); + lsp_manager_config.enabled = config.features.enabled(Feature::Lsp); + let lsp_manager = codex_lsp::LspManager::new(lsp_manager_config); + let services = SessionServices { mcp_connection_manager: Arc::new(RwLock::new(McpConnectionManager::default())), mcp_startup_cancellation_token: CancellationToken::new(), @@ -3524,11 +3826,13 @@ mod tests { rollout: Mutex::new(None), user_shell: Arc::new(default_user_shell()), show_raw_agent_reasoning: config.show_raw_agent_reasoning, + exec_policy, auth_manager: Arc::clone(&auth_manager), otel_manager: otel_manager.clone(), models_manager, tool_approvals: Mutex::new(ApprovalStore::default()), skills_manager, + lsp_manager, }; let turn_context = Arc::new(Session::make_turn_context( @@ -3550,6 +3854,7 @@ mod tests { active_turn: Mutex::new(None), services, next_internal_sub_id: AtomicU64::new(0), + active_subagent_tool_calls: Mutex::new(HashMap::new()), }); (session, turn_context, rx_event) diff --git a/codex-rs/core/src/codex_delegate.rs b/codex-rs/core/src/codex_delegate.rs index b6e3094e625..5aa80f8d69b 100644 --- a/codex-rs/core/src/codex_delegate.rs +++ b/codex-rs/core/src/codex_delegate.rs @@ -56,6 +56,7 @@ pub(crate) async fn run_codex_conversation_interactive( auth_manager, models_manager, Arc::clone(&parent_session.services.skills_manager), + parent_session.services.lsp_manager.clone(), initial_history.unwrap_or(InitialHistory::New), SessionSource::SubAgent(sub_agent_source), ) diff --git a/codex-rs/core/src/compact.rs b/codex-rs/core/src/compact.rs index 1a90b7b223f..77b9303e920 100644 --- a/codex-rs/core/src/compact.rs +++ b/codex-rs/core/src/compact.rs @@ -86,6 +86,11 @@ async fn run_compact_task_inner( model: turn_context.client.get_model(), effort: turn_context.client.get_reasoning_effort(), summary: turn_context.client.get_reasoning_summary(), + base_instructions: turn_context.base_instructions.clone(), + user_instructions: turn_context.user_instructions.clone(), + developer_instructions: turn_context.developer_instructions.clone(), + final_output_json_schema: turn_context.final_output_json_schema.clone(), + truncation_policy: Some(turn_context.truncation_policy.into()), }); sess.persist_rollout_items(&[rollout_item]).await; diff --git a/codex-rs/core/src/config/edit.rs b/codex-rs/core/src/config/edit.rs index 17799b497c3..a27cf026544 100644 --- a/codex-rs/core/src/config/edit.rs +++ b/codex-rs/core/src/config/edit.rs @@ -27,6 +27,21 @@ pub enum ConfigEdit { model: Option, effort: Option, }, + /// Update the active (or default) explore model selection and optional reasoning effort. + SetExploreModel { + model: Option, + effort: Option, + }, + /// Update the active (or default) mini subagent model selection and optional reasoning effort. + SetMiniSubagentModel { + model: Option, + effort: Option, + }, + /// Update the active (or default) subagent model selection and optional reasoning effort. + SetSubagentModel { + model: Option, + effort: Option, + }, /// Toggle the acknowledgement flag under `[notice]`. SetNoticeHideFullAccessWarning(bool), /// Toggle the Windows world-writable directories warning acknowledgement flag. @@ -282,6 +297,42 @@ impl ConfigDocument { ); mutated }), + ConfigEdit::SetExploreModel { model, effort } => Ok({ + let mut mutated = false; + mutated |= self.write_profile_value( + &["explore_model"], + model.as_ref().map(|model_value| value(model_value.clone())), + ); + mutated |= self.write_profile_value( + &["explore_model_reasoning_effort"], + effort.map(|effort| value(effort.to_string())), + ); + mutated + }), + ConfigEdit::SetMiniSubagentModel { model, effort } => Ok({ + let mut mutated = false; + mutated |= self.write_profile_value( + &["mini_subagent_model"], + model.as_ref().map(|model_value| value(model_value.clone())), + ); + mutated |= self.write_profile_value( + &["mini_subagent_model_reasoning_effort"], + effort.map(|effort| value(effort.to_string())), + ); + mutated + }), + ConfigEdit::SetSubagentModel { model, effort } => Ok({ + let mut mutated = false; + mutated |= self.write_profile_value( + &["subagent_model"], + model.as_ref().map(|model_value| value(model_value.clone())), + ); + mutated |= self.write_profile_value( + &["subagent_model_reasoning_effort"], + effort.map(|effort| value(effort.to_string())), + ); + mutated + }), ConfigEdit::SetNoticeHideFullAccessWarning(acknowledged) => Ok(self.write_value( Scope::Global, &[Notice::TABLE_KEY, "hide_full_access_warning"], @@ -621,6 +672,42 @@ impl ConfigEditsBuilder { self } + pub fn set_explore_model( + mut self, + model: Option<&str>, + effort: Option, + ) -> Self { + self.edits.push(ConfigEdit::SetExploreModel { + model: model.map(ToOwned::to_owned), + effort, + }); + self + } + + pub fn set_mini_subagent_model( + mut self, + model: Option<&str>, + effort: Option, + ) -> Self { + self.edits.push(ConfigEdit::SetMiniSubagentModel { + model: model.map(ToOwned::to_owned), + effort, + }); + self + } + + pub fn set_subagent_model( + mut self, + model: Option<&str>, + effort: Option, + ) -> Self { + self.edits.push(ConfigEdit::SetSubagentModel { + model: model.map(ToOwned::to_owned), + effort, + }); + self + } + pub fn set_hide_full_access_warning(mut self, acknowledged: bool) -> Self { self.edits .push(ConfigEdit::SetNoticeHideFullAccessWarning(acknowledged)); diff --git a/codex-rs/core/src/config/mod.rs b/codex-rs/core/src/config/mod.rs index 47585d4af86..18e27a79fa1 100644 --- a/codex-rs/core/src/config/mod.rs +++ b/codex-rs/core/src/config/mod.rs @@ -8,10 +8,12 @@ use crate::config::types::OtelConfig; use crate::config::types::OtelConfigToml; use crate::config::types::OtelExporterKind; use crate::config::types::SandboxWorkspaceWrite; +use crate::config::types::ScrollInputMode; use crate::config::types::ShellEnvironmentPolicy; use crate::config::types::ShellEnvironmentPolicyToml; use crate::config::types::Tui; use crate::config::types::UriBasedFileOpener; +use crate::config_loader::ConfigLayerStack; use crate::config_loader::ConfigRequirements; use crate::config_loader::LoaderOverrides; use crate::config_loader::load_config_layers_state; @@ -36,12 +38,12 @@ use codex_protocol::config_types::SandboxMode; use codex_protocol::config_types::TrustLevel; use codex_protocol::config_types::Verbosity; use codex_protocol::openai_models::ReasoningEffort; -use codex_protocol::openai_models::ReasoningSummaryFormat; use codex_rmcp_client::OAuthCredentialsStoreMode; use codex_utils_absolute_path::AbsolutePathBuf; use codex_utils_absolute_path::AbsolutePathBufGuard; use dirs::home_dir; use serde::Deserialize; +use serde::Serialize; use similar::DiffableStr; use std::collections::BTreeMap; use std::collections::HashMap; @@ -92,6 +94,10 @@ pub(crate) fn test_config() -> Config { /// Application configuration loaded from disk and merged with overrides. #[derive(Debug, Clone, PartialEq)] pub struct Config { + /// Provenance for how this [`Config`] was derived (merged layers + enforced + /// requirements). + pub config_layer_stack: ConfigLayerStack, + /// Optional override of model selection. pub model: Option, @@ -100,6 +106,21 @@ pub struct Config { /// When unset, planning flows use the active `model`. pub plan_model: Option, + /// Optional override of model selection used for exploration flows (e.g. `/plan` exploration subagents). + /// + /// When unset, exploration flows inherit the active model for that turn. + pub explore_model: Option, + + /// Optional override of model selection used for mini subagents (the `spawn_mini_subagent` tool flow). + /// + /// When unset, mini subagents use their configured default model. + pub mini_subagent_model: Option, + + /// Optional override of model selection used for ordinary spawned subagents (the `spawn_subagent` tool flow). + /// + /// When unset, spawned subagents inherit the active model for that turn. + pub subagent_model: Option, + /// Model used specifically for review sessions. Defaults to "gpt-5.1-codex-max". pub review_model: String, @@ -183,6 +204,58 @@ pub struct Config { /// Show startup tooltips in the TUI welcome screen. pub show_tooltips: bool, + /// Override the events-per-wheel-tick factor for TUI2 scroll normalization. + /// + /// This is the same `tui.scroll_events_per_tick` value from `config.toml`, plumbed through the + /// merged [`Config`] object (see [`Tui`]) so TUI2 can normalize scroll event density per + /// terminal. + pub tui_scroll_events_per_tick: Option, + + /// Override the number of lines applied per wheel tick in TUI2. + /// + /// This is the same `tui.scroll_wheel_lines` value from `config.toml` (see [`Tui`]). TUI2 + /// applies it to wheel-like scroll streams. Trackpad-like scrolling uses a separate + /// `tui.scroll_trackpad_lines` setting. + pub tui_scroll_wheel_lines: Option, + + /// Override the number of lines per tick-equivalent used for trackpad scrolling in TUI2. + /// + /// This is the same `tui.scroll_trackpad_lines` value from `config.toml` (see [`Tui`]). + pub tui_scroll_trackpad_lines: Option, + + /// Trackpad acceleration: approximate number of events required to gain +1x speed in TUI2. + /// + /// This is the same `tui.scroll_trackpad_accel_events` value from `config.toml` (see [`Tui`]). + pub tui_scroll_trackpad_accel_events: Option, + + /// Trackpad acceleration: maximum multiplier applied to trackpad-like streams in TUI2. + /// + /// This is the same `tui.scroll_trackpad_accel_max` value from `config.toml` (see [`Tui`]). + pub tui_scroll_trackpad_accel_max: Option, + + /// Control how TUI2 interprets mouse scroll input (wheel vs trackpad). + /// + /// This is the same `tui.scroll_mode` value from `config.toml` (see [`Tui`]). + pub tui_scroll_mode: ScrollInputMode, + + /// Override the wheel tick detection threshold (ms) for TUI2 auto scroll mode. + /// + /// This is the same `tui.scroll_wheel_tick_detect_max_ms` value from `config.toml` (see + /// [`Tui`]). + pub tui_scroll_wheel_tick_detect_max_ms: Option, + + /// Override the wheel-like end-of-stream threshold (ms) for TUI2 auto scroll mode. + /// + /// This is the same `tui.scroll_wheel_like_max_duration_ms` value from `config.toml` (see + /// [`Tui`]). + pub tui_scroll_wheel_like_max_duration_ms: Option, + + /// Invert mouse scroll direction for TUI2. + /// + /// This is the same `tui.scroll_invert` value from `config.toml` (see [`Tui`]) and is applied + /// consistently to both mouse wheels and trackpads. + pub tui_scroll_invert: bool, + /// The directory that should be treated as the current working directory /// for the session. All relative paths inside the business-logic layer are /// resolved against this path. @@ -248,6 +321,21 @@ pub struct Config { /// When unset, planning flows use `model_reasoning_effort`. pub plan_model_reasoning_effort: Option, + /// Value to use for `reasoning.effort` in exploration flows (e.g. `/plan` exploration subagents). + /// + /// When unset, exploration flows inherit the active reasoning effort for that turn. + pub explore_model_reasoning_effort: Option, + + /// Value to use for `reasoning.effort` for mini subagents (the `spawn_mini_subagent` tool flow). + /// + /// When unset, mini subagents use their configured default reasoning effort. + pub mini_subagent_model_reasoning_effort: Option, + + /// Value to use for `reasoning.effort` for ordinary spawned subagents (the `spawn_subagent` tool flow). + /// + /// When unset, spawned subagents inherit the active reasoning effort for that turn. + pub subagent_model_reasoning_effort: Option, + /// If not "none", the value to use for `reasoning.summary` when making a /// request using the Responses API. pub model_reasoning_summary: ReasoningSummary, @@ -255,9 +343,6 @@ pub struct Config { /// Optional override to force-enable reasoning summaries for the configured model. pub model_supports_reasoning_summaries: Option, - /// Optional override to force reasoning summary format for the configured model. - pub model_reasoning_summary_format: Option, - /// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`). pub model_verbosity: Option, @@ -280,10 +365,6 @@ pub struct Config { /// If set to `true`, used only the experimental unified exec tool. pub use_experimental_unified_exec_tool: bool, - /// If set to `true`, use the experimental official Rust MCP client. - /// https://github.com/modelcontextprotocol/rust-sdk - pub use_experimental_use_rmcp_client: bool, - /// Settings for ghost snapshots (used for undo). pub ghost_snapshot: GhostSnapshotConfig, @@ -315,6 +396,9 @@ pub struct Config { /// OTEL configuration (exporter type, endpoint, headers, etc.). pub otel: crate::config::types::OtelConfig, + + /// Language Server Protocol (LSP) settings. + pub lsp: crate::config::types::LspConfig, } #[derive(Debug, Clone, Default)] @@ -373,11 +457,11 @@ impl ConfigBuilder { let config_toml: ConfigToml = merged_toml .try_into() .map_err(|e| std::io::Error::new(std::io::ErrorKind::InvalidData, e))?; - Config::load_config_with_requirements( + Config::load_config_with_layer_stack( config_toml, harness_overrides, codex_home, - config_layer_stack.requirements().clone(), + config_layer_stack, ) } } @@ -626,12 +710,18 @@ pub fn set_default_oss_provider(codex_home: &Path, provider: &str) -> std::io::R } /// Base config deserialized from ~/.codexel/config.toml. -#[derive(Deserialize, Debug, Clone, Default, PartialEq)] +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] pub struct ConfigToml { /// Optional override of model selection. pub model: Option, /// Optional override of model selection used for planning flows (e.g. `/plan` mode). pub plan_model: Option, + /// Optional override of model selection used for exploration flows (e.g. `/plan` exploration subagents). + pub explore_model: Option, + /// Optional override of model selection used for mini subagents (the `spawn_mini_subagent` tool flow). + pub mini_subagent_model: Option, + /// Optional override of model selection used for ordinary spawned subagents (the `spawn_subagent` tool flow). + pub subagent_model: Option, /// Review model override used by the `/review` feature. pub review_model: Option, @@ -728,6 +818,10 @@ pub struct ConfigToml { /// Collection of settings that are specific to the TUI. pub tui: Option, + /// LSP settings (diagnostics + navigation). + #[serde(default)] + pub lsp: Option, + /// When set to `true`, `AgentReasoning` events will be hidden from the /// UI/output. Defaults to `false`. pub hide_agent_reasoning: Option, @@ -738,6 +832,9 @@ pub struct ConfigToml { pub model_reasoning_effort: Option, pub plan_model_reasoning_effort: Option, + pub explore_model_reasoning_effort: Option, + pub mini_subagent_model_reasoning_effort: Option, + pub subagent_model_reasoning_effort: Option, pub model_reasoning_summary: Option, /// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`). pub model_verbosity: Option, @@ -745,9 +842,6 @@ pub struct ConfigToml { /// Override to force-enable reasoning summaries for the configured model. pub model_supports_reasoning_summaries: Option, - /// Override to force reasoning summary format for the configured model. - pub model_reasoning_summary_format: Option, - /// Base URL for requests to ChatGPT (as opposed to the OpenAI API). pub chatgpt_base_url: Option, @@ -764,6 +858,11 @@ pub struct ConfigToml { #[serde(default)] pub ghost_snapshot: Option, + /// Markers used to detect the project root when searching parent + /// directories for `.codex` folders. Defaults to [".git"] when unset. + #[serde(default)] + pub project_root_markers: Option>, + /// When `true`, checks for Codex updates on startup and surfaces update prompts. /// Set to `false` only if your Codex updates are centrally managed. /// Defaults to `true`. @@ -788,7 +887,6 @@ pub struct ConfigToml { pub experimental_instructions_file: Option, pub experimental_compact_prompt_file: Option, pub experimental_use_unified_exec_tool: Option, - pub experimental_use_rmcp_client: Option, pub experimental_use_freeform_apply_patch: Option, /// Preferred OSS provider for local models, e.g. "lmstudio" or "ollama". pub oss_provider: Option, @@ -819,7 +917,7 @@ impl From for UserSavedConfig { } } -#[derive(Deserialize, Debug, Clone, PartialEq, Eq)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] pub struct ProjectConfig { pub trust_level: Option, } @@ -834,7 +932,7 @@ impl ProjectConfig { } } -#[derive(Deserialize, Debug, Clone, Default, PartialEq)] +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] pub struct ToolsToml { #[serde(default, alias = "web_search_request")] pub web_search: Option, @@ -853,7 +951,7 @@ impl From for Tools { } } -#[derive(Deserialize, Debug, Clone, Default, PartialEq, Eq)] +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq, Eq)] pub struct GhostSnapshotToml { /// Exclude untracked files larger than this many bytes from ghost snapshots. #[serde(alias = "ignore_untracked_files_over_bytes")] @@ -1028,16 +1126,17 @@ impl Config { codex_home: PathBuf, ) -> std::io::Result { // Note this ignores requirements.toml enforcement for tests. - let requirements = ConfigRequirements::default(); - Self::load_config_with_requirements(cfg, overrides, codex_home, requirements) + let config_layer_stack = ConfigLayerStack::default(); + Self::load_config_with_layer_stack(cfg, overrides, codex_home, config_layer_stack) } - fn load_config_with_requirements( + fn load_config_with_layer_stack( cfg: ConfigToml, overrides: ConfigOverrides, codex_home: PathBuf, - requirements: ConfigRequirements, + config_layer_stack: ConfigLayerStack, ) -> std::io::Result { + let requirements = config_layer_stack.requirements().clone(); let user_instructions = Self::load_instructions(Some(&codex_home)); // Destructure ConfigOverrides fully to ensure all overrides are applied. @@ -1204,7 +1303,6 @@ impl Config { let include_apply_patch_tool_flag = features.enabled(Feature::ApplyPatchFreeform); let tools_web_search_request = features.enabled(Feature::WebSearchRequest); let use_experimental_unified_exec_tool = features.enabled(Feature::UnifiedExec); - let use_experimental_use_rmcp_client = features.enabled(Feature::RmcpClient); let forced_chatgpt_workspace_id = cfg.forced_chatgpt_workspace_id.as_ref().and_then(|value| { @@ -1220,6 +1318,19 @@ impl Config { let model = model.or(config_profile.model).or(cfg.model); let plan_model = config_profile.plan_model.or(cfg.plan_model); + let explore_model = config_profile.explore_model.or(cfg.explore_model); + let mini_subagent_model = config_profile + .mini_subagent_model + .or(cfg.mini_subagent_model) + .or_else(|| { + if explore_model.is_some() { + tracing::warn!( + "config keys `explore_model`/`explore_model_reasoning_effort` are deprecated; use `mini_subagent_model`/`mini_subagent_model_reasoning_effort` instead" + ); + } + explore_model.clone() + }); + let subagent_model = config_profile.subagent_model.or(cfg.subagent_model); let compact_prompt = compact_prompt.or(cfg.compact_prompt).and_then(|value| { let trimmed = value.trim(); @@ -1278,6 +1389,9 @@ impl Config { let config = Self { model, plan_model, + explore_model, + mini_subagent_model, + subagent_model, review_model, model_context_window: cfg.model_context_window, model_auto_compact_token_limit: cfg.model_auto_compact_token_limit, @@ -1318,6 +1432,7 @@ impl Config { .collect(), tool_output_token_limit: cfg.tool_output_token_limit, codex_home, + config_layer_stack, history, file_opener: cfg.file_opener.unwrap_or(UriBasedFileOpener::VsCode), codex_linux_sandbox_exe, @@ -1333,12 +1448,22 @@ impl Config { plan_model_reasoning_effort: config_profile .plan_model_reasoning_effort .or(cfg.plan_model_reasoning_effort), + explore_model_reasoning_effort: config_profile + .explore_model_reasoning_effort + .or(cfg.explore_model_reasoning_effort), + mini_subagent_model_reasoning_effort: config_profile + .mini_subagent_model_reasoning_effort + .or(cfg.mini_subagent_model_reasoning_effort) + .or(config_profile.explore_model_reasoning_effort) + .or(cfg.explore_model_reasoning_effort), + subagent_model_reasoning_effort: config_profile + .subagent_model_reasoning_effort + .or(cfg.subagent_model_reasoning_effort), model_reasoning_summary: config_profile .model_reasoning_summary .or(cfg.model_reasoning_summary) .unwrap_or_default(), model_supports_reasoning_summaries: cfg.model_supports_reasoning_summaries, - model_reasoning_summary_format: cfg.model_reasoning_summary_format.clone(), model_verbosity: config_profile.model_verbosity.or(cfg.model_verbosity), chatgpt_base_url: config_profile .chatgpt_base_url @@ -1349,7 +1474,6 @@ impl Config { include_apply_patch_tool: include_apply_patch_tool_flag, tools_web_search_request, use_experimental_unified_exec_tool, - use_experimental_use_rmcp_client, ghost_snapshot, features, active_profile: active_profile_name, @@ -1365,6 +1489,27 @@ impl Config { .unwrap_or_default(), animations: cfg.tui.as_ref().map(|t| t.animations).unwrap_or(true), show_tooltips: cfg.tui.as_ref().map(|t| t.show_tooltips).unwrap_or(true), + tui_scroll_events_per_tick: cfg.tui.as_ref().and_then(|t| t.scroll_events_per_tick), + tui_scroll_wheel_lines: cfg.tui.as_ref().and_then(|t| t.scroll_wheel_lines), + tui_scroll_trackpad_lines: cfg.tui.as_ref().and_then(|t| t.scroll_trackpad_lines), + tui_scroll_trackpad_accel_events: cfg + .tui + .as_ref() + .and_then(|t| t.scroll_trackpad_accel_events), + tui_scroll_trackpad_accel_max: cfg + .tui + .as_ref() + .and_then(|t| t.scroll_trackpad_accel_max), + tui_scroll_mode: cfg.tui.as_ref().map(|t| t.scroll_mode).unwrap_or_default(), + tui_scroll_wheel_tick_detect_max_ms: cfg + .tui + .as_ref() + .and_then(|t| t.scroll_wheel_tick_detect_max_ms), + tui_scroll_wheel_like_max_duration_ms: cfg + .tui + .as_ref() + .and_then(|t| t.scroll_wheel_like_max_duration_ms), + tui_scroll_invert: cfg.tui.as_ref().map(|t| t.scroll_invert).unwrap_or(false), otel: { let t: OtelConfigToml = cfg.otel.unwrap_or_default(); let log_user_prompt = t.log_user_prompt.unwrap_or(false); @@ -1380,6 +1525,7 @@ impl Config { trace_exporter, } }, + lsp: cfg.lsp.unwrap_or_default().into(), }; Ok(config) } @@ -1546,8 +1692,23 @@ persistence = "none" .expect("TUI config without notifications should succeed"); let tui = parsed.tui.expect("config should include tui section"); - assert_eq!(tui.notifications, Notifications::Enabled(true)); - assert!(tui.show_tooltips); + assert_eq!( + tui, + Tui { + notifications: Notifications::Enabled(true), + animations: true, + show_tooltips: true, + scroll_events_per_tick: None, + scroll_wheel_lines: None, + scroll_trackpad_lines: None, + scroll_trackpad_accel_events: None, + scroll_trackpad_accel_max: None, + scroll_mode: ScrollInputMode::Auto, + scroll_wheel_tick_detect_max_ms: None, + scroll_wheel_like_max_duration_ms: None, + scroll_invert: false, + } + ); } #[test] @@ -1936,7 +2097,6 @@ trust_level = "trusted" let codex_home = TempDir::new()?; let cfg = ConfigToml { experimental_use_unified_exec_tool: Some(true), - experimental_use_rmcp_client: Some(true), experimental_use_freeform_apply_patch: Some(true), ..Default::default() }; @@ -1949,12 +2109,10 @@ trust_level = "trusted" assert!(config.features.enabled(Feature::ApplyPatchFreeform)); assert!(config.features.enabled(Feature::UnifiedExec)); - assert!(config.features.enabled(Feature::RmcpClient)); assert!(config.include_apply_patch_tool); assert!(config.use_experimental_unified_exec_tool); - assert!(config.use_experimental_use_rmcp_client); Ok(()) } @@ -3096,6 +3254,9 @@ model_verbosity = "high" Config { model: Some("o3".to_string()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(), model_context_window: None, model_auto_compact_token_limit: None, @@ -3117,6 +3278,7 @@ model_verbosity = "high" project_doc_fallback_filenames: Vec::new(), tool_output_token_limit: None, codex_home: fixture.codex_home(), + config_layer_stack: Default::default(), history: History::default(), file_opener: UriBasedFileOpener::VsCode, codex_linux_sandbox_exe: None, @@ -3124,9 +3286,11 @@ model_verbosity = "high" show_raw_agent_reasoning: false, model_reasoning_effort: Some(ReasoningEffort::High), plan_model_reasoning_effort: None, + explore_model_reasoning_effort: None, + mini_subagent_model_reasoning_effort: None, + subagent_model_reasoning_effort: None, model_reasoning_summary: ReasoningSummary::Detailed, model_supports_reasoning_summaries: None, - model_reasoning_summary_format: None, model_verbosity: None, chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), base_instructions: None, @@ -3137,7 +3301,6 @@ model_verbosity = "high" include_apply_patch_tool: false, tools_web_search_request: false, use_experimental_unified_exec_tool: false, - use_experimental_use_rmcp_client: false, ghost_snapshot: GhostSnapshotConfig::default(), features: Features::with_defaults(), active_profile: Some("o3".to_string()), @@ -3149,7 +3312,17 @@ model_verbosity = "high" tui_notifications: Default::default(), animations: true, show_tooltips: true, + tui_scroll_events_per_tick: None, + tui_scroll_wheel_lines: None, + tui_scroll_trackpad_lines: None, + tui_scroll_trackpad_accel_events: None, + tui_scroll_trackpad_accel_max: None, + tui_scroll_mode: ScrollInputMode::Auto, + tui_scroll_wheel_tick_detect_max_ms: None, + tui_scroll_wheel_like_max_duration_ms: None, + tui_scroll_invert: false, otel: OtelConfig::default(), + lsp: crate::config::types::LspConfig::default(), }, o3_profile_config ); @@ -3173,6 +3346,9 @@ model_verbosity = "high" let expected_gpt3_profile_config = Config { model: Some("gpt-3.5-turbo".to_string()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(), model_context_window: None, model_auto_compact_token_limit: None, @@ -3194,6 +3370,7 @@ model_verbosity = "high" project_doc_fallback_filenames: Vec::new(), tool_output_token_limit: None, codex_home: fixture.codex_home(), + config_layer_stack: Default::default(), history: History::default(), file_opener: UriBasedFileOpener::VsCode, codex_linux_sandbox_exe: None, @@ -3201,9 +3378,11 @@ model_verbosity = "high" show_raw_agent_reasoning: false, model_reasoning_effort: None, plan_model_reasoning_effort: None, + explore_model_reasoning_effort: None, + mini_subagent_model_reasoning_effort: None, + subagent_model_reasoning_effort: None, model_reasoning_summary: ReasoningSummary::default(), model_supports_reasoning_summaries: None, - model_reasoning_summary_format: None, model_verbosity: None, chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), base_instructions: None, @@ -3214,7 +3393,6 @@ model_verbosity = "high" include_apply_patch_tool: false, tools_web_search_request: false, use_experimental_unified_exec_tool: false, - use_experimental_use_rmcp_client: false, ghost_snapshot: GhostSnapshotConfig::default(), features: Features::with_defaults(), active_profile: Some("gpt3".to_string()), @@ -3226,7 +3404,17 @@ model_verbosity = "high" tui_notifications: Default::default(), animations: true, show_tooltips: true, + tui_scroll_events_per_tick: None, + tui_scroll_wheel_lines: None, + tui_scroll_trackpad_lines: None, + tui_scroll_trackpad_accel_events: None, + tui_scroll_trackpad_accel_max: None, + tui_scroll_mode: ScrollInputMode::Auto, + tui_scroll_wheel_tick_detect_max_ms: None, + tui_scroll_wheel_like_max_duration_ms: None, + tui_scroll_invert: false, otel: OtelConfig::default(), + lsp: crate::config::types::LspConfig::default(), }; assert_eq!(expected_gpt3_profile_config, gpt3_profile_config); @@ -3265,6 +3453,9 @@ model_verbosity = "high" let expected_zdr_profile_config = Config { model: Some("o3".to_string()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(), model_context_window: None, model_auto_compact_token_limit: None, @@ -3286,6 +3477,7 @@ model_verbosity = "high" project_doc_fallback_filenames: Vec::new(), tool_output_token_limit: None, codex_home: fixture.codex_home(), + config_layer_stack: Default::default(), history: History::default(), file_opener: UriBasedFileOpener::VsCode, codex_linux_sandbox_exe: None, @@ -3293,9 +3485,11 @@ model_verbosity = "high" show_raw_agent_reasoning: false, model_reasoning_effort: None, plan_model_reasoning_effort: None, + explore_model_reasoning_effort: None, + mini_subagent_model_reasoning_effort: None, + subagent_model_reasoning_effort: None, model_reasoning_summary: ReasoningSummary::default(), model_supports_reasoning_summaries: None, - model_reasoning_summary_format: None, model_verbosity: None, chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), base_instructions: None, @@ -3306,7 +3500,6 @@ model_verbosity = "high" include_apply_patch_tool: false, tools_web_search_request: false, use_experimental_unified_exec_tool: false, - use_experimental_use_rmcp_client: false, ghost_snapshot: GhostSnapshotConfig::default(), features: Features::with_defaults(), active_profile: Some("zdr".to_string()), @@ -3318,7 +3511,17 @@ model_verbosity = "high" tui_notifications: Default::default(), animations: true, show_tooltips: true, + tui_scroll_events_per_tick: None, + tui_scroll_wheel_lines: None, + tui_scroll_trackpad_lines: None, + tui_scroll_trackpad_accel_events: None, + tui_scroll_trackpad_accel_max: None, + tui_scroll_mode: ScrollInputMode::Auto, + tui_scroll_wheel_tick_detect_max_ms: None, + tui_scroll_wheel_like_max_duration_ms: None, + tui_scroll_invert: false, otel: OtelConfig::default(), + lsp: crate::config::types::LspConfig::default(), }; assert_eq!(expected_zdr_profile_config, zdr_profile_config); @@ -3343,6 +3546,9 @@ model_verbosity = "high" let expected_gpt5_profile_config = Config { model: Some("gpt-5.1".to_string()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(), model_context_window: None, model_auto_compact_token_limit: None, @@ -3364,6 +3570,7 @@ model_verbosity = "high" project_doc_fallback_filenames: Vec::new(), tool_output_token_limit: None, codex_home: fixture.codex_home(), + config_layer_stack: Default::default(), history: History::default(), file_opener: UriBasedFileOpener::VsCode, codex_linux_sandbox_exe: None, @@ -3371,9 +3578,11 @@ model_verbosity = "high" show_raw_agent_reasoning: false, model_reasoning_effort: Some(ReasoningEffort::High), plan_model_reasoning_effort: None, + explore_model_reasoning_effort: None, + mini_subagent_model_reasoning_effort: None, + subagent_model_reasoning_effort: None, model_reasoning_summary: ReasoningSummary::Detailed, model_supports_reasoning_summaries: None, - model_reasoning_summary_format: None, model_verbosity: Some(Verbosity::High), chatgpt_base_url: "https://chatgpt.com/backend-api/".to_string(), base_instructions: None, @@ -3384,7 +3593,6 @@ model_verbosity = "high" include_apply_patch_tool: false, tools_web_search_request: false, use_experimental_unified_exec_tool: false, - use_experimental_use_rmcp_client: false, ghost_snapshot: GhostSnapshotConfig::default(), features: Features::with_defaults(), active_profile: Some("gpt5".to_string()), @@ -3396,7 +3604,17 @@ model_verbosity = "high" tui_notifications: Default::default(), animations: true, show_tooltips: true, + tui_scroll_events_per_tick: None, + tui_scroll_wheel_lines: None, + tui_scroll_trackpad_lines: None, + tui_scroll_trackpad_accel_events: None, + tui_scroll_trackpad_accel_max: None, + tui_scroll_mode: ScrollInputMode::Auto, + tui_scroll_wheel_tick_detect_max_ms: None, + tui_scroll_wheel_like_max_duration_ms: None, + tui_scroll_invert: false, otel: OtelConfig::default(), + lsp: crate::config::types::LspConfig::default(), }; assert_eq!(expected_gpt5_profile_config, gpt5_profile_config); diff --git a/codex-rs/core/src/config/profile.rs b/codex-rs/core/src/config/profile.rs index 401625f7bf6..f9de930e5e0 100644 --- a/codex-rs/core/src/config/profile.rs +++ b/codex-rs/core/src/config/profile.rs @@ -1,5 +1,6 @@ use codex_utils_absolute_path::AbsolutePathBuf; use serde::Deserialize; +use serde::Serialize; use crate::protocol::AskForApproval; use codex_protocol::config_types::ReasoningSummary; @@ -9,10 +10,13 @@ use codex_protocol::openai_models::ReasoningEffort; /// Collection of common configuration options that a user can define as a unit /// in `config.toml`. -#[derive(Debug, Clone, Default, PartialEq, Deserialize)] +#[derive(Debug, Clone, Default, PartialEq, Serialize, Deserialize)] pub struct ConfigProfile { pub model: Option, pub plan_model: Option, + pub explore_model: Option, + pub mini_subagent_model: Option, + pub subagent_model: Option, /// The key in the `model_providers` map identifying the /// [`ModelProviderInfo`] to use. pub model_provider: Option, @@ -20,6 +24,9 @@ pub struct ConfigProfile { pub sandbox_mode: Option, pub model_reasoning_effort: Option, pub plan_model_reasoning_effort: Option, + pub explore_model_reasoning_effort: Option, + pub mini_subagent_model_reasoning_effort: Option, + pub subagent_model_reasoning_effort: Option, pub model_reasoning_summary: Option, pub model_verbosity: Option, pub chatgpt_base_url: Option, @@ -27,7 +34,6 @@ pub struct ConfigProfile { pub experimental_compact_prompt_file: Option, pub include_apply_patch_tool: Option, pub experimental_use_unified_exec_tool: Option, - pub experimental_use_rmcp_client: Option, pub experimental_use_freeform_apply_patch: Option, pub tools_web_search: Option, pub tools_view_image: Option, diff --git a/codex-rs/core/src/config/service.rs b/codex-rs/core/src/config/service.rs index 27785ff0f90..bc6d96bcb84 100644 --- a/codex-rs/core/src/config/service.rs +++ b/codex-rs/core/src/config/service.rs @@ -556,6 +556,10 @@ fn override_message(layer: &ConfigLayerSource) -> String { ConfigLayerSource::System { file } => { format!("Overridden by managed config (system): {}", file.display()) } + ConfigLayerSource::Project { dot_codex_folder } => format!( + "Overridden by project config: {}/{CONFIG_TOML_FILE}", + dot_codex_folder.display(), + ), ConfigLayerSource::SessionFlags => "Overridden by session flags".to_string(), ConfigLayerSource::User { file } => { format!("Overridden by user config: {}", file.display()) @@ -774,15 +778,41 @@ remote_compaction = true }, ); let layers = response.layers.expect("layers present"); - assert_eq!(layers.len(), 2, "expected two layers"); - assert_eq!( - layers.first().unwrap().name, - ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: managed_file } - ); - assert_eq!( - layers.get(1).unwrap().name, - ConfigLayerSource::User { file: user_file } - ); + if cfg!(unix) { + let system_file = AbsolutePathBuf::from_absolute_path( + crate::config_loader::SYSTEM_CONFIG_TOML_FILE_UNIX, + ) + .expect("system file"); + assert_eq!(layers.len(), 3, "expected three layers on unix"); + assert_eq!( + layers.first().unwrap().name, + ConfigLayerSource::LegacyManagedConfigTomlFromFile { + file: managed_file.clone() + } + ); + assert_eq!( + layers.get(1).unwrap().name, + ConfigLayerSource::User { + file: user_file.clone() + } + ); + assert_eq!( + layers.get(2).unwrap().name, + ConfigLayerSource::System { file: system_file } + ); + } else { + assert_eq!(layers.len(), 2, "expected two layers"); + assert_eq!( + layers.first().unwrap().name, + ConfigLayerSource::LegacyManagedConfigTomlFromFile { + file: managed_file.clone() + } + ); + assert_eq!( + layers.get(1).unwrap().name, + ConfigLayerSource::User { file: user_file } + ); + } } #[tokio::test] diff --git a/codex-rs/core/src/config/types.rs b/codex-rs/core/src/config/types.rs index 974f62c827f..cbc52a64c2c 100644 --- a/codex-rs/core/src/config/types.rs +++ b/codex-rs/core/src/config/types.rs @@ -17,6 +17,141 @@ use serde::de::Error as SerdeError; pub const DEFAULT_OTEL_ENVIRONMENT: &str = "dev"; +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[serde(deny_unknown_fields)] +pub struct LspServerConfigToml { + pub command: String, + #[serde(default)] + pub args: Vec, +} + +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Eq)] +#[serde(deny_unknown_fields)] +pub struct LspConfigToml { + /// Whether to include a diagnostics summary in the model prompt each turn. + #[serde(default = "default_lsp_prompt_diagnostics")] + pub prompt_diagnostics: bool, + + /// Maximum number of diagnostics to include in the prompt summary. + #[serde(default = "default_lsp_max_prompt_diagnostics")] + pub max_prompt_diagnostics: usize, + + /// Maximum number of diagnostics returned by `lsp_diagnostics` by default. + #[serde(default = "default_lsp_max_tool_diagnostics")] + pub max_tool_diagnostics: usize, + + /// How long `lsp_diagnostics` should wait (in ms) for the first diagnostics update when the + /// language server is still warming up. Set to `0` to disable waiting. + #[serde(default = "default_lsp_tool_diagnostics_wait_ms")] + pub tool_diagnostics_wait_ms: usize, + + /// Max file size in bytes to send to the language server. + #[serde(default = "default_lsp_max_file_bytes")] + pub max_file_bytes: usize, + + /// Globs to ignore when watching files for updates (wildmatch syntax). + #[serde(default)] + pub ignored_globs: Vec, + + /// Optional per-language server overrides keyed by LSP language id (e.g. "rust", "go"). + #[serde(default)] + pub servers: HashMap, +} + +impl Default for LspConfigToml { + fn default() -> Self { + Self { + prompt_diagnostics: default_lsp_prompt_diagnostics(), + max_prompt_diagnostics: default_lsp_max_prompt_diagnostics(), + max_tool_diagnostics: default_lsp_max_tool_diagnostics(), + tool_diagnostics_wait_ms: default_lsp_tool_diagnostics_wait_ms(), + max_file_bytes: default_lsp_max_file_bytes(), + ignored_globs: Vec::new(), + servers: HashMap::new(), + } + } +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct LspConfig { + pub prompt_diagnostics: bool, + pub max_prompt_diagnostics: usize, + pub max_tool_diagnostics: usize, + pub tool_diagnostics_wait_ms: usize, + pub manager: codex_lsp::LspManagerConfig, +} + +impl Default for LspConfig { + fn default() -> Self { + let manager = codex_lsp::LspManagerConfig { + // Manager defaults enabled=false; we gate enablement via `[features].lsp`. + enabled: false, + ..Default::default() + }; + Self { + prompt_diagnostics: default_lsp_prompt_diagnostics(), + max_prompt_diagnostics: default_lsp_max_prompt_diagnostics(), + max_tool_diagnostics: default_lsp_max_tool_diagnostics(), + tool_diagnostics_wait_ms: default_lsp_tool_diagnostics_wait_ms(), + manager, + } + } +} + +impl From for LspConfig { + fn from(toml: LspConfigToml) -> Self { + let mut manager = codex_lsp::LspManagerConfig { + enabled: false, + max_file_bytes: toml.max_file_bytes, + ..Default::default() + }; + if !toml.ignored_globs.is_empty() { + manager.ignored_globs = toml.ignored_globs.clone(); + } + manager.servers = toml + .servers + .into_iter() + .map(|(k, v)| { + ( + k, + codex_lsp::ServerConfig { + command: v.command, + args: v.args, + }, + ) + }) + .collect(); + + Self { + prompt_diagnostics: toml.prompt_diagnostics, + max_prompt_diagnostics: toml.max_prompt_diagnostics, + max_tool_diagnostics: toml.max_tool_diagnostics, + tool_diagnostics_wait_ms: toml.tool_diagnostics_wait_ms, + manager, + } + } +} + +fn default_lsp_prompt_diagnostics() -> bool { + true +} + +fn default_lsp_max_prompt_diagnostics() -> usize { + 10 +} + +fn default_lsp_max_tool_diagnostics() -> usize { + 200 +} + +fn default_lsp_tool_diagnostics_wait_ms() -> usize { + 2000 +} + +fn default_lsp_max_file_bytes() -> usize { + 512 * 1024 +} + #[derive(Serialize, Debug, Clone, PartialEq)] pub struct McpServerConfig { #[serde(flatten)] @@ -221,7 +356,7 @@ mod option_duration_secs { } } -#[derive(Deserialize, Debug, Copy, Clone, PartialEq)] +#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq)] pub enum UriBasedFileOpener { #[serde(rename = "vscode")] VsCode, @@ -253,7 +388,7 @@ impl UriBasedFileOpener { } /// Settings that govern if and what will be written to `~/.codexel/history.jsonl`. -#[derive(Deserialize, Debug, Clone, PartialEq, Default)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] pub struct History { /// If true, history entries will not be written to disk. pub persistence: HistoryPersistence, @@ -263,7 +398,7 @@ pub struct History { pub max_bytes: Option, } -#[derive(Deserialize, Debug, Copy, Clone, PartialEq, Default)] +#[derive(Serialize, Deserialize, Debug, Copy, Clone, PartialEq, Default)] #[serde(rename_all = "kebab-case")] pub enum HistoryPersistence { /// Save all history entries to disk. @@ -275,7 +410,7 @@ pub enum HistoryPersistence { // ===== OTEL configuration ===== -#[derive(Deserialize, Debug, Clone, PartialEq)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] pub enum OtelHttpProtocol { /// Binary payload @@ -284,7 +419,7 @@ pub enum OtelHttpProtocol { Json, } -#[derive(Deserialize, Debug, Clone, PartialEq, Default)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] #[serde(rename_all = "kebab-case")] pub struct OtelTlsConfig { pub ca_certificate: Option, @@ -293,7 +428,7 @@ pub struct OtelTlsConfig { } /// Which OTEL exporter to use. -#[derive(Deserialize, Debug, Clone, PartialEq)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq)] #[serde(rename_all = "kebab-case")] pub enum OtelExporterKind { None, @@ -315,7 +450,7 @@ pub enum OtelExporterKind { } /// OTEL settings loaded from config.toml. Fields are optional so we can apply defaults. -#[derive(Deserialize, Debug, Clone, PartialEq, Default)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] pub struct OtelConfigToml { /// Log user prompt in traces pub log_user_prompt: Option, @@ -350,7 +485,7 @@ impl Default for OtelConfig { } } -#[derive(Debug, Clone, PartialEq, Eq, Deserialize)] +#[derive(Serialize, Debug, Clone, PartialEq, Eq, Deserialize)] #[serde(untagged)] pub enum Notifications { Enabled(bool), @@ -363,8 +498,26 @@ impl Default for Notifications { } } +/// How TUI2 should interpret mouse scroll events. +/// +/// Terminals generally encode both mouse wheels and trackpads as the same "scroll up/down" mouse +/// button events, without a magnitude. This setting controls whether Codex uses a heuristic to +/// infer wheel vs trackpad per stream, or forces a specific behavior. +#[derive(Serialize, Deserialize, Debug, Clone, Copy, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +#[derive(Default)] +pub enum ScrollInputMode { + /// Infer wheel vs trackpad behavior per scroll stream. + #[default] + Auto, + /// Always treat scroll events as mouse-wheel input (fixed lines per tick). + Wheel, + /// Always treat scroll events as trackpad input (fractional accumulation). + Trackpad, +} + /// Collection of settings that are specific to the TUI. -#[derive(Deserialize, Debug, Clone, PartialEq, Default)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] pub struct Tui { /// Enable desktop notifications from the TUI when the terminal is unfocused. /// Defaults to `true`. @@ -380,6 +533,109 @@ pub struct Tui { /// Defaults to `true`. #[serde(default = "default_true")] pub show_tooltips: bool, + + /// Override the *wheel* event density used to normalize TUI2 scrolling. + /// + /// Terminals generally deliver both mouse wheels and trackpads as discrete `scroll up/down` + /// mouse events with direction but no magnitude. Unfortunately, the *number* of raw events + /// per physical wheel notch varies by terminal (commonly 1, 3, or 9+). TUI2 uses this value + /// to normalize that raw event density into consistent "wheel tick" behavior. + /// + /// Wheel math (conceptually): + /// + /// - A single event contributes `1 / scroll_events_per_tick` tick-equivalents. + /// - Wheel-like streams then scale that by `scroll_wheel_lines` so one physical notch scrolls + /// a fixed number of lines. + /// + /// Trackpad math is intentionally *not* fully tied to this value: in trackpad-like mode, TUI2 + /// uses `min(scroll_events_per_tick, 3)` as the divisor so terminals with dense wheel ticks + /// (e.g. 9 events per notch) do not make trackpads feel artificially slow. + /// + /// Defaults are derived per terminal from [`crate::terminal::TerminalInfo`] when TUI2 starts. + /// See `codex-rs/tui2/docs/scroll_input_model.md` for the probe data and rationale. + pub scroll_events_per_tick: Option, + + /// Override how many transcript lines one physical *wheel notch* should scroll in TUI2. + /// + /// This is the "classic feel" knob. Defaults to 3. + /// + /// Wheel-like per-event contribution is `scroll_wheel_lines / scroll_events_per_tick`. For + /// example, in a terminal that emits 9 events per notch, the default `3 / 9` yields 1/3 of a + /// line per event and totals 3 lines once the full notch burst arrives. + /// + /// See `codex-rs/tui2/docs/scroll_input_model.md` for details on the stream model and the + /// wheel/trackpad heuristic. + pub scroll_wheel_lines: Option, + + /// Override baseline trackpad scroll sensitivity in TUI2. + /// + /// Trackpads do not have discrete notches, but terminals still emit discrete `scroll up/down` + /// events. In trackpad-like mode, TUI2 accumulates fractional scroll and only applies whole + /// lines to the viewport. + /// + /// Trackpad per-event contribution is: + /// + /// - `scroll_trackpad_lines / min(scroll_events_per_tick, 3)` + /// + /// (plus optional bounded acceleration; see `scroll_trackpad_accel_*`). The `min(..., 3)` + /// divisor is deliberate: `scroll_events_per_tick` is calibrated from *wheel* behavior and + /// can be much larger than trackpad event density, which would otherwise make trackpads feel + /// too slow in dense-wheel terminals. + /// + /// Defaults to 1, meaning one tick-equivalent maps to one transcript line. + pub scroll_trackpad_lines: Option, + + /// Trackpad acceleration: approximate number of events required to gain +1x speed in TUI2. + /// + /// This keeps small swipes precise while allowing large/faster swipes to cover more content. + /// Defaults are chosen to address terminals where trackpad event density is comparatively low. + /// + /// Concretely, TUI2 computes an acceleration multiplier for trackpad-like streams: + /// + /// - `multiplier = clamp(1 + abs(events) / scroll_trackpad_accel_events, 1..scroll_trackpad_accel_max)` + /// + /// The multiplier is applied to the stream’s computed line delta (including any carried + /// fractional remainder). + pub scroll_trackpad_accel_events: Option, + + /// Trackpad acceleration: maximum multiplier applied to trackpad-like streams. + /// + /// Set to 1 to effectively disable trackpad acceleration. + /// + /// See [`Tui::scroll_trackpad_accel_events`] for the exact multiplier formula. + pub scroll_trackpad_accel_max: Option, + + /// Select how TUI2 interprets mouse scroll input. + /// + /// - `auto` (default): infer wheel vs trackpad per scroll stream. + /// - `wheel`: always use wheel behavior (fixed lines per wheel notch). + /// - `trackpad`: always use trackpad behavior (fractional accumulation; wheel may feel slow). + #[serde(default)] + pub scroll_mode: ScrollInputMode, + + /// Auto-mode threshold: maximum time (ms) for the first tick-worth of events to arrive. + /// + /// In `scroll_mode = "auto"`, TUI2 starts a stream as trackpad-like (to avoid overshoot) and + /// promotes it to wheel-like if `scroll_events_per_tick` events arrive "quickly enough". This + /// threshold controls what "quickly enough" means. + /// + /// Most users should leave this unset; it is primarily for terminals that emit wheel ticks + /// batched over longer time spans. + pub scroll_wheel_tick_detect_max_ms: Option, + + /// Auto-mode fallback: maximum duration (ms) that a very small stream is still treated as wheel-like. + /// + /// This is only used when `scroll_events_per_tick` is effectively 1 (one event per wheel + /// notch). In that case, we cannot observe a "tick completion time", so TUI2 treats a + /// short-lived, small stream (<= 2 events) as wheel-like to preserve classic wheel behavior. + pub scroll_wheel_like_max_duration_ms: Option, + + /// Invert mouse scroll direction in TUI2. + /// + /// This flips the scroll sign after terminal detection. It is applied consistently to both + /// wheel and trackpad input. + #[serde(default)] + pub scroll_invert: bool, } const fn default_true() -> bool { @@ -389,7 +645,7 @@ const fn default_true() -> bool { /// Settings for notices we display to users via the tui and app-server clients /// (primarily the Codex IDE extension). NOTE: these are different from /// notifications - notices are warnings, NUX screens, acknowledgements, etc. -#[derive(Deserialize, Debug, Clone, PartialEq, Default)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] pub struct Notice { /// Tracks whether the user has acknowledged the full access warning prompt. pub hide_full_access_warning: Option, @@ -412,7 +668,7 @@ impl Notice { pub(crate) const TABLE_KEY: &'static str = "notice"; } -#[derive(Deserialize, Debug, Clone, PartialEq, Default)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] pub struct SandboxWorkspaceWrite { #[serde(default)] pub writable_roots: Vec, @@ -435,7 +691,7 @@ impl From for codex_app_server_protocol::SandboxSettings } } -#[derive(Deserialize, Debug, Clone, PartialEq, Default)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] #[serde(rename_all = "kebab-case")] pub enum ShellEnvironmentPolicyInherit { /// "Core" environment variables for the platform. On UNIX, this would @@ -452,7 +708,7 @@ pub enum ShellEnvironmentPolicyInherit { /// Policy for building the `env` when spawning a process via either the /// `shell` or `local_shell` tool. -#[derive(Deserialize, Debug, Clone, PartialEq, Default)] +#[derive(Serialize, Deserialize, Debug, Clone, PartialEq, Default)] pub struct ShellEnvironmentPolicyToml { pub inherit: Option, diff --git a/codex-rs/core/src/config_loader/mod.rs b/codex-rs/core/src/config_loader/mod.rs index c05825db894..73624c83c7a 100644 --- a/codex-rs/core/src/config_loader/mod.rs +++ b/codex-rs/core/src/config_loader/mod.rs @@ -11,12 +11,14 @@ mod state; mod tests; use crate::config::CONFIG_TOML_FILE; +use crate::config::ConfigToml; use crate::config_loader::config_requirements::ConfigRequirementsToml; use crate::config_loader::layer_io::LoadedConfigLayers; use codex_app_server_protocol::ConfigLayerSource; use codex_protocol::config_types::SandboxMode; use codex_protocol::protocol::AskForApproval; use codex_utils_absolute_path::AbsolutePathBuf; +use codex_utils_absolute_path::AbsolutePathBufGuard; use serde::Deserialize; use std::io; use std::path::Path; @@ -26,11 +28,19 @@ pub use config_requirements::ConfigRequirements; pub use merge::merge_toml_values; pub use state::ConfigLayerEntry; pub use state::ConfigLayerStack; +pub use state::ConfigLayerStackOrdering; pub use state::LoaderOverrides; /// On Unix systems, load requirements from this file path, if present. const DEFAULT_REQUIREMENTS_TOML_FILE_UNIX: &str = "/etc/codex/requirements.toml"; +/// On Unix systems, load default settings from this file path, if present. +/// Note that /etc/codex/ is treated as a "config folder," so subfolders such +/// as skills/ and rules/ will also be honored. +pub const SYSTEM_CONFIG_TOML_FILE_UNIX: &str = "/etc/codex/config.toml"; + +const DEFAULT_PROJECT_ROOT_MARKERS: &[&str] = &[".git"]; + /// To build up the set of admin-enforced constraints, we build up from multiple /// configuration layers in the following order, but a constraint defined in an /// earlier layer cannot be overridden by a later layer: @@ -68,7 +78,7 @@ pub async fn load_config_layers_state( ) -> io::Result { let mut config_requirements_toml = ConfigRequirementsToml::default(); - // TODO(mbolin): Support an entry in MDM for config requirements and use it + // TODO(gt): Support an entry in MDM for config requirements and use it // with `config_requirements_toml.merge_unset_fields(...)`, if present. // Honor /etc/codex/requirements.toml. @@ -91,44 +101,59 @@ pub async fn load_config_layers_state( let mut layers = Vec::::new(); - // TODO(mbolin): Honor managed preferences (macOS only). - // TODO(mbolin): Honor /etc/codex/config.toml. + // TODO(gt): Honor managed preferences (macOS only). + + // Include an entry for the "system" config folder, loading its config.toml, + // if it exists. + let system_config_toml_file = if cfg!(unix) { + Some(AbsolutePathBuf::from_absolute_path( + SYSTEM_CONFIG_TOML_FILE_UNIX, + )?) + } else { + // TODO(gt): Determine the path to load on Windows. + None + }; + if let Some(system_config_toml_file) = system_config_toml_file { + let system_layer = + load_config_toml_for_required_layer(&system_config_toml_file, |config_toml| { + ConfigLayerEntry::new( + ConfigLayerSource::System { + file: system_config_toml_file.clone(), + }, + config_toml, + ) + }) + .await?; + layers.push(system_layer); + } // Add a layer for $CODEX_HOME/config.toml if it exists. Note if the file // exists, but is malformed, then this error should be propagated to the // user. let user_file = AbsolutePathBuf::resolve_path_against_base(CONFIG_TOML_FILE, codex_home)?; - match tokio::fs::read_to_string(&user_file).await { - Ok(contents) => { - let user_config: TomlValue = toml::from_str(&contents).map_err(|e| { - io::Error::new( - io::ErrorKind::InvalidData, - format!( - "Error parsing user config file {}: {e}", - user_file.as_path().display(), - ), - ) - })?; - layers.push(ConfigLayerEntry::new( - ConfigLayerSource::User { file: user_file }, - user_config, - )); - } - Err(e) => { - if e.kind() != io::ErrorKind::NotFound { - return Err(io::Error::new( - e.kind(), - format!( - "Failed to read user config file {}: {e}", - user_file.as_path().display(), - ), - )); - } + let user_layer = load_config_toml_for_required_layer(&user_file, |config_toml| { + ConfigLayerEntry::new( + ConfigLayerSource::User { + file: user_file.clone(), + }, + config_toml, + ) + }) + .await?; + layers.push(user_layer); + + if let Some(cwd) = cwd { + let mut merged_so_far = TomlValue::Table(toml::map::Map::new()); + for layer in &layers { + merge_toml_values(&mut merged_so_far, &layer.config); } - } + let project_root_markers = project_root_markers_from_config(&merged_so_far)? + .unwrap_or_else(default_project_root_markers); - // TODO(mbolin): Add layers for cwd, tree, and repo config files. - let _ = cwd; + let project_root = find_project_root(&cwd, &project_root_markers).await?; + let project_layers = load_project_layers(&cwd, &project_root).await?; + layers.extend(project_layers); + } // Add a layer for runtime overrides from the CLI or UI, if any exist. if !cli_overrides.is_empty() { @@ -149,11 +174,20 @@ pub async fn load_config_layers_state( managed_config_from_mdm, } = loaded_config_layers; if let Some(config) = managed_config { + let managed_parent = config.file.as_path().parent().ok_or_else(|| { + io::Error::new( + io::ErrorKind::InvalidData, + format!( + "Managed config file {} has no parent directory", + config.file.as_path().display() + ), + ) + })?; + let managed_config = + resolve_relative_paths_in_config_toml(config.managed_config, managed_parent)?; layers.push(ConfigLayerEntry::new( - ConfigLayerSource::LegacyManagedConfigTomlFromFile { - file: config.file.clone(), - }, - config.managed_config, + ConfigLayerSource::LegacyManagedConfigTomlFromFile { file: config.file }, + managed_config, )); } if let Some(config) = managed_config_from_mdm { @@ -166,6 +200,52 @@ pub async fn load_config_layers_state( ConfigLayerStack::new(layers, config_requirements_toml.try_into()?) } +/// Attempts to load a config.toml file from `config_toml`. +/// - If the file exists and is valid TOML, passes the parsed `toml::Value` to +/// `create_entry` and returns the resulting layer entry. +/// - If the file does not exist, uses an empty `Table` with `create_entry` and +/// returns the resulting layer entry. +/// - If there is an error reading the file or parsing the TOML, returns an +/// error. +async fn load_config_toml_for_required_layer( + config_toml: impl AsRef, + create_entry: impl FnOnce(TomlValue) -> ConfigLayerEntry, +) -> io::Result { + let toml_file = config_toml.as_ref(); + let toml_value = match tokio::fs::read_to_string(toml_file).await { + Ok(contents) => { + let config: TomlValue = toml::from_str(&contents).map_err(|e| { + io::Error::new( + io::ErrorKind::InvalidData, + format!("Error parsing config file {}: {e}", toml_file.display()), + ) + })?; + let config_parent = toml_file.parent().ok_or_else(|| { + io::Error::new( + io::ErrorKind::InvalidData, + format!( + "Config file {} has no parent directory", + toml_file.display() + ), + ) + })?; + resolve_relative_paths_in_config_toml(config, config_parent) + } + Err(e) => { + if e.kind() == io::ErrorKind::NotFound { + Ok(TomlValue::Table(toml::map::Map::new())) + } else { + Err(io::Error::new( + e.kind(), + format!("Failed to read config file {}: {e}", toml_file.display()), + )) + } + } + }?; + + Ok(create_entry(toml_value)) +} + /// If available, apply requirements from `/etc/codex/requirements.toml` to /// `config_requirements_toml` by filling in any unset fields. async fn load_requirements_toml( @@ -235,6 +315,217 @@ async fn load_requirements_from_legacy_scheme( Ok(()) } +/// Reads `project_root_markers` from the [toml::Value] produced by merging +/// `config.toml` from the config layers in the stack preceding +/// [ConfigLayerSource::Project]. +/// +/// Invariants: +/// - If `project_root_markers` is not specified, returns `Ok(None)`. +/// - If `project_root_markers` is specified, returns `Ok(Some(markers))` where +/// `markers` is a `Vec` (including `Ok(Some(Vec::new()))` for an +/// empty array, which indicates that root detection should be disabled). +/// - Returns an error if `project_root_markers` is specified but is not an +/// array of strings. +fn project_root_markers_from_config(config: &TomlValue) -> io::Result>> { + let Some(table) = config.as_table() else { + return Ok(None); + }; + let Some(markers_value) = table.get("project_root_markers") else { + return Ok(None); + }; + let TomlValue::Array(entries) = markers_value else { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "project_root_markers must be an array of strings", + )); + }; + if entries.is_empty() { + return Ok(Some(Vec::new())); + } + let mut markers = Vec::new(); + for entry in entries { + let Some(marker) = entry.as_str() else { + return Err(io::Error::new( + io::ErrorKind::InvalidData, + "project_root_markers must be an array of strings", + )); + }; + markers.push(marker.to_string()); + } + Ok(Some(markers)) +} + +fn default_project_root_markers() -> Vec { + DEFAULT_PROJECT_ROOT_MARKERS + .iter() + .map(ToString::to_string) + .collect() +} + +/// Takes a `toml::Value` parsed from a config.toml file and walks through it, +/// resolving any `AbsolutePathBuf` fields against `base_dir`, returning a new +/// `toml::Value` with the same shape but with paths resolved. +/// +/// This ensures that multiple config layers can be merged together correctly +/// even if they were loaded from different directories. +fn resolve_relative_paths_in_config_toml( + value_from_config_toml: TomlValue, + base_dir: &Path, +) -> io::Result { + // Use the serialize/deserialize round-trip to convert the + // `toml::Value` into a `ConfigToml` with `AbsolutePath + let _guard = AbsolutePathBufGuard::new(base_dir); + let Ok(resolved) = value_from_config_toml.clone().try_into::() else { + return Ok(value_from_config_toml); + }; + drop(_guard); + + let resolved_value = TomlValue::try_from(resolved).map_err(|e| { + io::Error::new( + io::ErrorKind::InvalidData, + format!("Failed to serialize resolved config: {e}"), + ) + })?; + + Ok(copy_shape_from_original( + &value_from_config_toml, + &resolved_value, + )) +} + +/// Ensure that every field in `original` is present in the returned +/// `toml::Value`, taking the value from `resolved` where possible. This ensures +/// the fields that we "removed" during the serialize/deserialize round-trip in +/// `resolve_config_paths` are preserved, out of an abundance of caution. +fn copy_shape_from_original(original: &TomlValue, resolved: &TomlValue) -> TomlValue { + match (original, resolved) { + (TomlValue::Table(original_table), TomlValue::Table(resolved_table)) => { + let mut table = toml::map::Map::new(); + for (key, original_value) in original_table { + let resolved_value = resolved_table.get(key).unwrap_or(original_value); + table.insert( + key.clone(), + copy_shape_from_original(original_value, resolved_value), + ); + } + TomlValue::Table(table) + } + (TomlValue::Array(original_array), TomlValue::Array(resolved_array)) => { + let mut items = Vec::new(); + for (index, original_value) in original_array.iter().enumerate() { + let resolved_value = resolved_array.get(index).unwrap_or(original_value); + items.push(copy_shape_from_original(original_value, resolved_value)); + } + TomlValue::Array(items) + } + (_, resolved_value) => resolved_value.clone(), + } +} + +async fn find_project_root( + cwd: &AbsolutePathBuf, + project_root_markers: &[String], +) -> io::Result { + if project_root_markers.is_empty() { + return Ok(cwd.clone()); + } + + for ancestor in cwd.as_path().ancestors() { + for marker in project_root_markers { + let marker_path = ancestor.join(marker); + if tokio::fs::metadata(&marker_path).await.is_ok() { + return AbsolutePathBuf::from_absolute_path(ancestor); + } + } + } + Ok(cwd.clone()) +} + +/// Return the appropriate list of layers (each with +/// [ConfigLayerSource::Project] as the source) between `cwd` and +/// `project_root`, inclusive. The list is ordered in _increasing_ precdence, +/// starting from folders closest to `project_root` (which is the lowest +/// precedence) to those closest to `cwd` (which is the highest precedence). +async fn load_project_layers( + cwd: &AbsolutePathBuf, + project_root: &AbsolutePathBuf, +) -> io::Result> { + let mut dirs = cwd + .as_path() + .ancestors() + .scan(false, |done, a| { + if *done { + None + } else { + if a == project_root.as_path() { + *done = true; + } + Some(a) + } + }) + .collect::>(); + dirs.reverse(); + + let mut layers = Vec::new(); + for dir in dirs { + let dot_codex = dir.join(".codex"); + if !tokio::fs::metadata(&dot_codex) + .await + .map(|meta| meta.is_dir()) + .unwrap_or(false) + { + continue; + } + + let dot_codex_abs = AbsolutePathBuf::from_absolute_path(&dot_codex)?; + let config_file = dot_codex_abs.join(CONFIG_TOML_FILE)?; + match tokio::fs::read_to_string(&config_file).await { + Ok(contents) => { + let config: TomlValue = toml::from_str(&contents).map_err(|e| { + io::Error::new( + io::ErrorKind::InvalidData, + format!( + "Error parsing project config file {}: {e}", + config_file.as_path().display(), + ), + ) + })?; + let config = + resolve_relative_paths_in_config_toml(config, dot_codex_abs.as_path())?; + layers.push(ConfigLayerEntry::new( + ConfigLayerSource::Project { + dot_codex_folder: dot_codex_abs, + }, + config, + )); + } + Err(err) => { + if err.kind() == io::ErrorKind::NotFound { + // If there is no config.toml file, record an empty entry + // for this project layer, as this may still have subfolders + // that are significant in the overall ConfigLayerStack. + layers.push(ConfigLayerEntry::new( + ConfigLayerSource::Project { + dot_codex_folder: dot_codex_abs, + }, + TomlValue::Table(toml::map::Map::new()), + )); + } else { + return Err(io::Error::new( + err.kind(), + format!( + "Failed to read project config file {}: {err}", + config_file.as_path().display(), + ), + )); + } + } + } + } + + Ok(layers) +} + /// The legacy mechanism for specifying admin-enforced configuration is to read /// from a file like `/etc/codex/managed_config.toml` that has the same /// structure as `config.toml` where fields like `approval_policy` can specify @@ -266,3 +557,47 @@ impl From for ConfigRequirementsToml { config_requirements_toml } } + +// Cannot name this `mod tests` because of tests.rs in this folder. +#[cfg(test)] +mod unit_tests { + use super::*; + use tempfile::tempdir; + + #[test] + fn ensure_resolve_relative_paths_in_config_toml_preserves_all_fields() -> anyhow::Result<()> { + let tmp = tempdir()?; + let base_dir = tmp.path(); + let contents = r#" +# This is a field recognized by config.toml that is an AbsolutePathBuf in +# the ConfigToml struct. +experimental_instructions_file = "./some_file.md" + +# This is a field recognized by config.toml. +model = "gpt-1000" + +# This is a field not recognized by config.toml. +foo = "xyzzy" +"#; + let user_config: TomlValue = toml::from_str(contents)?; + + let normalized_toml_value = resolve_relative_paths_in_config_toml(user_config, base_dir)?; + let mut expected_toml_value = toml::map::Map::new(); + expected_toml_value.insert( + "experimental_instructions_file".to_string(), + TomlValue::String( + AbsolutePathBuf::resolve_path_against_base("./some_file.md", base_dir)? + .as_path() + .to_string_lossy() + .to_string(), + ), + ); + expected_toml_value.insert( + "model".to_string(), + TomlValue::String("gpt-1000".to_string()), + ); + expected_toml_value.insert("foo".to_string(), TomlValue::String("xyzzy".to_string())); + assert_eq!(normalized_toml_value, TomlValue::Table(expected_toml_value)); + Ok(()) + } +} diff --git a/codex-rs/core/src/config_loader/state.rs b/codex-rs/core/src/config_loader/state.rs index 864a9c7ed73..efb33dfac5b 100644 --- a/codex-rs/core/src/config_loader/state.rs +++ b/codex-rs/core/src/config_loader/state.rs @@ -19,7 +19,7 @@ pub struct LoaderOverrides { pub managed_preferences_base64: Option, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, PartialEq)] pub struct ConfigLayerEntry { pub name: ConfigLayerSource, pub config: TomlValue, @@ -50,9 +50,28 @@ impl ConfigLayerEntry { config: serde_json::to_value(&self.config).unwrap_or(JsonValue::Null), } } + + // Get the `.codex/` folder associated with this config layer, if any. + pub fn config_folder(&self) -> Option { + match &self.name { + ConfigLayerSource::Mdm { .. } => None, + ConfigLayerSource::System { file } => file.parent(), + ConfigLayerSource::User { file } => file.parent(), + ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder.clone()), + ConfigLayerSource::SessionFlags => None, + ConfigLayerSource::LegacyManagedConfigTomlFromFile { .. } => None, + ConfigLayerSource::LegacyManagedConfigTomlFromMdm => None, + } + } +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ConfigLayerStackOrdering { + LowestPrecedenceFirst, + HighestPrecedenceFirst, } -#[derive(Debug, Clone)] +#[derive(Debug, Clone, Default, PartialEq)] pub struct ConfigLayerStack { /// Layers are listed from lowest precedence (base) to highest (top), so /// later entries in the Vec override earlier ones. @@ -156,7 +175,16 @@ impl ConfigLayerStack { /// Returns the highest-precedence to lowest-precedence layers, so /// `ConfigLayerSource::SessionFlags` would be first, if present. pub fn layers_high_to_low(&self) -> Vec<&ConfigLayerEntry> { - self.layers.iter().rev().collect() + self.get_layers(ConfigLayerStackOrdering::HighestPrecedenceFirst) + } + + /// Returns the highest-precedence to lowest-precedence layers, so + /// `ConfigLayerSource::SessionFlags` would be first, if present. + pub fn get_layers(&self, ordering: ConfigLayerStackOrdering) -> Vec<&ConfigLayerEntry> { + match ordering { + ConfigLayerStackOrdering::HighestPrecedenceFirst => self.layers.iter().rev().collect(), + ConfigLayerStackOrdering::LowestPrecedenceFirst => self.layers.iter().collect(), + } } } @@ -170,7 +198,12 @@ fn verify_layer_ordering(layers: &[ConfigLayerEntry]) -> std::io::Result = None; + let mut previous_project_dot_codex_folder: Option<&AbsolutePathBuf> = None; for (index, layer) in layers.iter().enumerate() { if matches!(layer.name, ConfigLayerSource::User { .. }) { if user_layer_index.is_some() { @@ -181,6 +214,32 @@ fn verify_layer_ordering(layers: &[ConfigLayerEntry]) -> std::io::Result std::io::Result<()> { + let tmp = tempdir()?; + let project_root = tmp.path().join("project"); + let nested = project_root.join("child"); + tokio::fs::create_dir_all(nested.join(".codex")).await?; + tokio::fs::create_dir_all(project_root.join(".codex")).await?; + tokio::fs::write(project_root.join(".git"), "gitdir: here").await?; + + tokio::fs::write( + project_root.join(".codex").join(CONFIG_TOML_FILE), + "foo = \"root\"\n", + ) + .await?; + tokio::fs::write( + nested.join(".codex").join(CONFIG_TOML_FILE), + "foo = \"child\"\n", + ) + .await?; + + let codex_home = tmp.path().join("home"); + tokio::fs::create_dir_all(&codex_home).await?; + let cwd = AbsolutePathBuf::from_absolute_path(&nested)?; + let layers = load_config_layers_state( + &codex_home, + Some(cwd), + &[] as &[(String, TomlValue)], + LoaderOverrides::default(), + ) + .await?; + + let project_layers: Vec<_> = layers + .layers_high_to_low() + .into_iter() + .filter_map(|layer| match &layer.name { + super::ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder), + _ => None, + }) + .collect(); + assert_eq!(project_layers.len(), 2); + assert_eq!(project_layers[0].as_path(), nested.join(".codex").as_path()); + assert_eq!( + project_layers[1].as_path(), + project_root.join(".codex").as_path() + ); + + let config = layers.effective_config(); + let foo = config + .get("foo") + .and_then(TomlValue::as_str) + .expect("foo entry"); + assert_eq!(foo, "child"); + Ok(()) +} + +#[tokio::test] +async fn project_paths_resolve_relative_to_dot_codex_and_override_in_order() -> std::io::Result<()> +{ + let tmp = tempdir()?; + let project_root = tmp.path().join("project"); + let nested = project_root.join("child"); + tokio::fs::create_dir_all(project_root.join(".codex")).await?; + tokio::fs::create_dir_all(nested.join(".codex")).await?; + tokio::fs::write(project_root.join(".git"), "gitdir: here").await?; + + let root_cfg = r#" +experimental_instructions_file = "root.txt" +"#; + let nested_cfg = r#" +experimental_instructions_file = "child.txt" +"#; + tokio::fs::write(project_root.join(".codex").join(CONFIG_TOML_FILE), root_cfg).await?; + tokio::fs::write(nested.join(".codex").join(CONFIG_TOML_FILE), nested_cfg).await?; + tokio::fs::write( + project_root.join(".codex").join("root.txt"), + "root instructions", + ) + .await?; + tokio::fs::write( + nested.join(".codex").join("child.txt"), + "child instructions", + ) + .await?; + + let codex_home = tmp.path().join("home"); + tokio::fs::create_dir_all(&codex_home).await?; + + let config = ConfigBuilder::default() + .codex_home(codex_home) + .harness_overrides(ConfigOverrides { + cwd: Some(nested.clone()), + ..ConfigOverrides::default() + }) + .build() + .await?; + + assert_eq!( + config.base_instructions.as_deref(), + Some("child instructions") + ); + + Ok(()) +} + +#[tokio::test] +async fn project_layer_is_added_when_dot_codex_exists_without_config_toml() -> std::io::Result<()> { + let tmp = tempdir()?; + let project_root = tmp.path().join("project"); + let nested = project_root.join("child"); + tokio::fs::create_dir_all(&nested).await?; + tokio::fs::create_dir_all(project_root.join(".codex")).await?; + tokio::fs::write(project_root.join(".git"), "gitdir: here").await?; + + let codex_home = tmp.path().join("home"); + tokio::fs::create_dir_all(&codex_home).await?; + let cwd = AbsolutePathBuf::from_absolute_path(&nested)?; + let layers = load_config_layers_state( + &codex_home, + Some(cwd), + &[] as &[(String, TomlValue)], + LoaderOverrides::default(), + ) + .await?; + + let project_layers: Vec<_> = layers + .layers_high_to_low() + .into_iter() + .filter(|layer| matches!(layer.name, super::ConfigLayerSource::Project { .. })) + .collect(); + assert_eq!( + vec![&ConfigLayerEntry { + name: super::ConfigLayerSource::Project { + dot_codex_folder: AbsolutePathBuf::from_absolute_path(project_root.join(".codex"))?, + }, + config: TomlValue::Table(toml::map::Map::new()), + version: version_for_toml(&TomlValue::Table(toml::map::Map::new())), + }], + project_layers + ); + + Ok(()) +} + +#[tokio::test] +async fn project_root_markers_supports_alternate_markers() -> std::io::Result<()> { + let tmp = tempdir()?; + let project_root = tmp.path().join("project"); + let nested = project_root.join("child"); + tokio::fs::create_dir_all(project_root.join(".codex")).await?; + tokio::fs::create_dir_all(nested.join(".codex")).await?; + tokio::fs::write(project_root.join(".hg"), "hg").await?; + tokio::fs::write( + project_root.join(".codex").join(CONFIG_TOML_FILE), + "foo = \"root\"\n", + ) + .await?; + tokio::fs::write( + nested.join(".codex").join(CONFIG_TOML_FILE), + "foo = \"child\"\n", + ) + .await?; + + let codex_home = tmp.path().join("home"); + tokio::fs::create_dir_all(&codex_home).await?; + tokio::fs::write( + codex_home.join(CONFIG_TOML_FILE), + r#" +project_root_markers = [".hg"] +"#, + ) + .await?; + + let cwd = AbsolutePathBuf::from_absolute_path(&nested)?; + let layers = load_config_layers_state( + &codex_home, + Some(cwd), + &[] as &[(String, TomlValue)], + LoaderOverrides::default(), + ) + .await?; + + let project_layers: Vec<_> = layers + .layers_high_to_low() + .into_iter() + .filter_map(|layer| match &layer.name { + super::ConfigLayerSource::Project { dot_codex_folder } => Some(dot_codex_folder), + _ => None, + }) + .collect(); + assert_eq!(project_layers.len(), 2); + assert_eq!(project_layers[0].as_path(), nested.join(".codex").as_path()); + assert_eq!( + project_layers[1].as_path(), + project_root.join(".codex").as_path() + ); + + let merged = layers.effective_config(); + let foo = merged + .get("foo") + .and_then(TomlValue::as_str) + .expect("foo entry"); + assert_eq!(foo, "child"); + + Ok(()) +} diff --git a/codex-rs/core/src/conversation_manager.rs b/codex-rs/core/src/conversation_manager.rs index 5093e03c60f..8cd48c846c3 100644 --- a/codex-rs/core/src/conversation_manager.rs +++ b/codex-rs/core/src/conversation_manager.rs @@ -16,6 +16,8 @@ use crate::protocol::EventMsg; use crate::protocol::SessionConfiguredEvent; use crate::rollout::RolloutRecorder; use crate::skills::SkillsManager; +use codex_lsp::LspManager; +use codex_lsp::LspManagerConfig; use codex_protocol::ConversationId; use codex_protocol::items::TurnItem; use codex_protocol::models::ResponseItem; @@ -45,6 +47,7 @@ pub struct ConversationManager { auth_manager: Arc, models_manager: Arc, skills_manager: Arc, + lsp_manager: LspManager, session_source: SessionSource, #[cfg(any(test, feature = "test-support"))] _test_codex_home_guard: Option, @@ -59,6 +62,7 @@ impl ConversationManager { session_source, models_manager: Arc::new(ModelsManager::new(auth_manager)), skills_manager, + lsp_manager: LspManager::new(LspManagerConfig::default()), #[cfg(any(test, feature = "test-support"))] _test_codex_home_guard: None, } @@ -91,6 +95,7 @@ impl ConversationManager { session_source: SessionSource::Exec, models_manager: Arc::new(ModelsManager::with_provider(auth_manager, provider)), skills_manager, + lsp_manager: LspManager::new(LspManagerConfig::default()), _test_codex_home_guard: None, } } @@ -103,6 +108,10 @@ impl ConversationManager { self.skills_manager.clone() } + pub fn lsp_manager(&self) -> LspManager { + self.lsp_manager.clone() + } + pub async fn new_conversation(&self, config: Config) -> CodexResult { self.spawn_conversation( config, @@ -126,6 +135,7 @@ impl ConversationManager { auth_manager, models_manager, self.skills_manager.clone(), + self.lsp_manager.clone(), InitialHistory::New, self.session_source.clone(), ) @@ -204,6 +214,7 @@ impl ConversationManager { auth_manager, self.models_manager.clone(), self.skills_manager.clone(), + self.lsp_manager.clone(), initial_history, self.session_source.clone(), ) @@ -246,6 +257,7 @@ impl ConversationManager { auth_manager, self.models_manager.clone(), self.skills_manager.clone(), + self.lsp_manager.clone(), history, self.session_source.clone(), ) @@ -401,6 +413,7 @@ mod tests { RolloutItem::ResponseItem(items[0].clone()), RolloutItem::ResponseItem(items[1].clone()), RolloutItem::ResponseItem(items[2].clone()), + RolloutItem::ResponseItem(items[3].clone()), ]; assert_eq!( diff --git a/codex-rs/core/src/exec_policy.rs b/codex-rs/core/src/exec_policy.rs index 8917610ffc1..5caf7d9b82d 100644 --- a/codex-rs/core/src/exec_policy.rs +++ b/codex-rs/core/src/exec_policy.rs @@ -3,7 +3,11 @@ use std::path::Path; use std::path::PathBuf; use std::sync::Arc; +use arc_swap::ArcSwap; + use crate::command_safety::is_dangerous_command::requires_initial_appoval; +use crate::config_loader::ConfigLayerStack; +use crate::config_loader::ConfigLayerStackOrdering; use codex_execpolicy::AmendError; use codex_execpolicy::Decision; use codex_execpolicy::Error as ExecPolicyRuleError; @@ -17,7 +21,6 @@ use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::SandboxPolicy; use thiserror::Error; use tokio::fs; -use tokio::sync::RwLock; use tokio::task::spawn_blocking; use crate::bash::parse_shell_lc_plain_commands; @@ -80,20 +83,141 @@ pub enum ExecPolicyUpdateError { FeatureDisabled, } -pub(crate) async fn load_exec_policy_for_features( +pub(crate) struct ExecPolicyManager { + policy: ArcSwap, +} + +impl ExecPolicyManager { + pub(crate) fn new(policy: Arc) -> Self { + Self { + policy: ArcSwap::from(policy), + } + } + + pub(crate) async fn load( + features: &Features, + config_stack: &ConfigLayerStack, + ) -> Result { + let policy = load_exec_policy_for_features(features, config_stack).await?; + Ok(Self::new(Arc::new(policy))) + } + + pub(crate) fn current(&self) -> Arc { + self.policy.load_full() + } + + pub(crate) async fn create_exec_approval_requirement_for_command( + &self, + features: &Features, + command: &[String], + approval_policy: AskForApproval, + sandbox_policy: &SandboxPolicy, + sandbox_permissions: SandboxPermissions, + ) -> ExecApprovalRequirement { + let exec_policy = self.current(); + let commands = + parse_shell_lc_plain_commands(command).unwrap_or_else(|| vec![command.to_vec()]); + let heuristics_fallback = |cmd: &[String]| { + if requires_initial_appoval(approval_policy, sandbox_policy, cmd, sandbox_permissions) { + Decision::Prompt + } else { + Decision::Allow + } + }; + let evaluation = exec_policy.check_multiple(commands.iter(), &heuristics_fallback); + + match evaluation.decision { + Decision::Forbidden => ExecApprovalRequirement::Forbidden { + reason: FORBIDDEN_REASON.to_string(), + }, + Decision::Prompt => { + if matches!(approval_policy, AskForApproval::Never) { + ExecApprovalRequirement::Forbidden { + reason: PROMPT_CONFLICT_REASON.to_string(), + } + } else { + ExecApprovalRequirement::NeedsApproval { + reason: derive_prompt_reason(&evaluation), + proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) { + try_derive_execpolicy_amendment_for_prompt_rules( + &evaluation.matched_rules, + ) + } else { + None + }, + } + } + } + Decision::Allow => ExecApprovalRequirement::Skip { + // Bypass sandbox if execpolicy allows the command + bypass_sandbox: evaluation.matched_rules.iter().any(|rule_match| { + is_policy_match(rule_match) && rule_match.decision() == Decision::Allow + }), + proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) { + try_derive_execpolicy_amendment_for_allow_rules(&evaluation.matched_rules) + } else { + None + }, + }, + } + } + + pub(crate) async fn append_amendment_and_update( + &self, + codex_home: &Path, + amendment: &ExecPolicyAmendment, + ) -> Result<(), ExecPolicyUpdateError> { + let policy_path = default_policy_path(codex_home); + let prefix = amendment.command.clone(); + spawn_blocking({ + let policy_path = policy_path.clone(); + let prefix = prefix.clone(); + move || blocking_append_allow_prefix_rule(&policy_path, &prefix) + }) + .await + .map_err(|source| ExecPolicyUpdateError::JoinBlockingTask { source })? + .map_err(|source| ExecPolicyUpdateError::AppendRule { + path: policy_path, + source, + })?; + + let mut updated_policy = self.current().as_ref().clone(); + updated_policy.add_prefix_rule(&prefix, Decision::Allow)?; + self.policy.store(Arc::new(updated_policy)); + Ok(()) + } +} + +impl Default for ExecPolicyManager { + fn default() -> Self { + Self::new(Arc::new(Policy::empty())) + } +} + +async fn load_exec_policy_for_features( features: &Features, - codex_home: &Path, + config_stack: &ConfigLayerStack, ) -> Result { if !features.enabled(Feature::ExecPolicy) { Ok(Policy::empty()) } else { - load_exec_policy(codex_home).await + load_exec_policy(config_stack).await } } -pub async fn load_exec_policy(codex_home: &Path) -> Result { - let policy_dir = codex_home.join(RULES_DIR_NAME); - let policy_paths = collect_policy_files(&policy_dir).await?; +pub async fn load_exec_policy(config_stack: &ConfigLayerStack) -> Result { + // Iterate the layers in increasing order of precedence, adding the *.rules + // from each layer, so that higher-precedence layers can override + // rules defined in lower-precedence ones. + let mut policy_paths = Vec::new(); + for layer in config_stack.get_layers(ConfigLayerStackOrdering::LowestPrecedenceFirst) { + if let Some(config_folder) = layer.config_folder() { + #[expect(clippy::expect_used)] + let policy_dir = config_folder.join(RULES_DIR_NAME).expect("safe join"); + let layer_policy_paths = collect_policy_files(&policy_dir).await?; + policy_paths.extend(layer_policy_paths); + } + } let mut parser = PolicyParser::new(); for policy_path in &policy_paths { @@ -114,46 +238,15 @@ pub async fn load_exec_policy(codex_home: &Path) -> Result PathBuf { +fn default_policy_path(codex_home: &Path) -> PathBuf { codex_home.join(RULES_DIR_NAME).join(DEFAULT_POLICY_FILE) } -pub(crate) async fn append_execpolicy_amendment_and_update( - codex_home: &Path, - current_policy: &Arc>, - prefix: &[String], -) -> Result<(), ExecPolicyUpdateError> { - let policy_path = default_policy_path(codex_home); - let prefix = prefix.to_vec(); - spawn_blocking({ - let policy_path = policy_path.clone(); - let prefix = prefix.clone(); - move || blocking_append_allow_prefix_rule(&policy_path, &prefix) - }) - .await - .map_err(|source| ExecPolicyUpdateError::JoinBlockingTask { source })? - .map_err(|source| ExecPolicyUpdateError::AppendRule { - path: policy_path, - source, - })?; - - current_policy - .write() - .await - .add_prefix_rule(&prefix, Decision::Allow)?; - - Ok(()) -} - /// Derive a proposed execpolicy amendment when a command requires user approval /// - If any execpolicy rule prompts, return None, because an amendment would not skip that policy requirement. /// - Otherwise return the first heuristics Prompt. @@ -217,60 +310,8 @@ fn derive_prompt_reason(evaluation: &Evaluation) -> Option { }) } -pub(crate) async fn create_exec_approval_requirement_for_command( - exec_policy: &Arc>, - features: &Features, - command: &[String], - approval_policy: AskForApproval, - sandbox_policy: &SandboxPolicy, - sandbox_permissions: SandboxPermissions, -) -> ExecApprovalRequirement { - let commands = parse_shell_lc_plain_commands(command).unwrap_or_else(|| vec![command.to_vec()]); - let heuristics_fallback = |cmd: &[String]| { - if requires_initial_appoval(approval_policy, sandbox_policy, cmd, sandbox_permissions) { - Decision::Prompt - } else { - Decision::Allow - } - }; - let policy = exec_policy.read().await; - let evaluation = policy.check_multiple(commands.iter(), &heuristics_fallback); - - match evaluation.decision { - Decision::Forbidden => ExecApprovalRequirement::Forbidden { - reason: FORBIDDEN_REASON.to_string(), - }, - Decision::Prompt => { - if matches!(approval_policy, AskForApproval::Never) { - ExecApprovalRequirement::Forbidden { - reason: PROMPT_CONFLICT_REASON.to_string(), - } - } else { - ExecApprovalRequirement::NeedsApproval { - reason: derive_prompt_reason(&evaluation), - proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) { - try_derive_execpolicy_amendment_for_prompt_rules(&evaluation.matched_rules) - } else { - None - }, - } - } - } - Decision::Allow => ExecApprovalRequirement::Skip { - // Bypass sandbox if execpolicy allows the command - bypass_sandbox: evaluation.matched_rules.iter().any(|rule_match| { - is_policy_match(rule_match) && rule_match.decision() == Decision::Allow - }), - proposed_execpolicy_amendment: if features.enabled(Feature::ExecPolicy) { - try_derive_execpolicy_amendment_for_allow_rules(&evaluation.matched_rules) - } else { - None - }, - }, - } -} - -async fn collect_policy_files(dir: &Path) -> Result, ExecPolicyError> { +async fn collect_policy_files(dir: impl AsRef) -> Result, ExecPolicyError> { + let dir = dir.as_ref(); let mut read_dir = match fs::read_dir(dir).await { Ok(read_dir) => read_dir, Err(err) if err.kind() == ErrorKind::NotFound => return Ok(Vec::new()), @@ -313,30 +354,54 @@ async fn collect_policy_files(dir: &Path) -> Result, ExecPolicyErro policy_paths.sort(); + tracing::debug!( + "loaded {} .rules files in {}", + policy_paths.len(), + dir.display() + ); Ok(policy_paths) } #[cfg(test)] mod tests { use super::*; + use crate::config_loader::ConfigLayerEntry; + use crate::config_loader::ConfigLayerStack; + use crate::config_loader::ConfigRequirements; use crate::features::Feature; use crate::features::Features; + use codex_app_server_protocol::ConfigLayerSource; use codex_protocol::protocol::AskForApproval; use codex_protocol::protocol::SandboxPolicy; + use codex_utils_absolute_path::AbsolutePathBuf; use pretty_assertions::assert_eq; use std::fs; + use std::path::Path; use std::sync::Arc; use tempfile::tempdir; + use toml::Value as TomlValue; + + fn config_stack_for_dot_codex_folder(dot_codex_folder: &Path) -> ConfigLayerStack { + let dot_codex_folder = AbsolutePathBuf::from_absolute_path(dot_codex_folder) + .expect("absolute dot_codex_folder"); + let layer = ConfigLayerEntry::new( + ConfigLayerSource::Project { dot_codex_folder }, + TomlValue::Table(Default::default()), + ); + ConfigLayerStack::new(vec![layer], ConfigRequirements::default()).expect("ConfigLayerStack") + } #[tokio::test] async fn returns_empty_policy_when_feature_disabled() { let mut features = Features::with_defaults(); features.disable(Feature::ExecPolicy); let temp_dir = tempdir().expect("create temp dir"); + let config_stack = config_stack_for_dot_codex_folder(temp_dir.path()); - let policy = load_exec_policy_for_features(&features, temp_dir.path()) + let manager = ExecPolicyManager::load(&features, &config_stack) .await - .expect("policy result"); + .expect("manager result"); + let policy = manager.current(); let commands = [vec!["rm".to_string()]]; assert_eq!( @@ -367,6 +432,7 @@ mod tests { #[tokio::test] async fn loads_policies_from_policy_subdirectory() { let temp_dir = tempdir().expect("create temp dir"); + let config_stack = config_stack_for_dot_codex_folder(temp_dir.path()); let policy_dir = temp_dir.path().join(RULES_DIR_NAME); fs::create_dir_all(&policy_dir).expect("create policy dir"); fs::write( @@ -375,7 +441,7 @@ mod tests { ) .expect("write policy file"); - let policy = load_exec_policy(temp_dir.path()) + let policy = load_exec_policy(&config_stack) .await .expect("policy result"); let command = [vec!["rm".to_string()]]; @@ -394,13 +460,14 @@ mod tests { #[tokio::test] async fn ignores_policies_outside_policy_dir() { let temp_dir = tempdir().expect("create temp dir"); + let config_stack = config_stack_for_dot_codex_folder(temp_dir.path()); fs::write( temp_dir.path().join("root.rules"), r#"prefix_rule(pattern=["ls"], decision="prompt")"#, ) .expect("write policy file"); - let policy = load_exec_policy(temp_dir.path()) + let policy = load_exec_policy(&config_stack) .await .expect("policy result"); let command = [vec!["ls".to_string()]]; @@ -416,6 +483,69 @@ mod tests { ); } + #[tokio::test] + async fn loads_policies_from_multiple_config_layers() -> anyhow::Result<()> { + let user_dir = tempdir()?; + let project_dir = tempdir()?; + + let user_policy_dir = user_dir.path().join(RULES_DIR_NAME); + fs::create_dir_all(&user_policy_dir)?; + fs::write( + user_policy_dir.join("user.rules"), + r#"prefix_rule(pattern=["rm"], decision="forbidden")"#, + )?; + + let project_policy_dir = project_dir.path().join(RULES_DIR_NAME); + fs::create_dir_all(&project_policy_dir)?; + fs::write( + project_policy_dir.join("project.rules"), + r#"prefix_rule(pattern=["ls"], decision="prompt")"#, + )?; + + let user_config_toml = + AbsolutePathBuf::from_absolute_path(user_dir.path().join("config.toml"))?; + let project_dot_codex_folder = AbsolutePathBuf::from_absolute_path(project_dir.path())?; + let layers = vec![ + ConfigLayerEntry::new( + ConfigLayerSource::User { + file: user_config_toml, + }, + TomlValue::Table(Default::default()), + ), + ConfigLayerEntry::new( + ConfigLayerSource::Project { + dot_codex_folder: project_dot_codex_folder, + }, + TomlValue::Table(Default::default()), + ), + ]; + let config_stack = ConfigLayerStack::new(layers, ConfigRequirements::default())?; + + let policy = load_exec_policy(&config_stack).await?; + + assert_eq!( + Evaluation { + decision: Decision::Forbidden, + matched_rules: vec![RuleMatch::PrefixRuleMatch { + matched_prefix: vec!["rm".to_string()], + decision: Decision::Forbidden + }], + }, + policy.check_multiple([vec!["rm".to_string()]].iter(), &|_| Decision::Allow) + ); + assert_eq!( + Evaluation { + decision: Decision::Prompt, + matched_rules: vec![RuleMatch::PrefixRuleMatch { + matched_prefix: vec!["ls".to_string()], + decision: Decision::Prompt + }], + }, + policy.check_multiple([vec!["ls".to_string()]].iter(), &|_| Decision::Allow) + ); + Ok(()) + } + #[tokio::test] async fn evaluates_bash_lc_inner_commands() { let policy_src = r#" @@ -425,7 +555,7 @@ prefix_rule(pattern=["rm"], decision="forbidden") parser .parse("test.rules", policy_src) .expect("parse policy"); - let policy = Arc::new(RwLock::new(parser.build())); + let policy = Arc::new(parser.build()); let forbidden_script = vec![ "bash".to_string(), @@ -433,15 +563,16 @@ prefix_rule(pattern=["rm"], decision="forbidden") "rm -rf /tmp".to_string(), ]; - let requirement = create_exec_approval_requirement_for_command( - &policy, - &Features::with_defaults(), - &forbidden_script, - AskForApproval::OnRequest, - &SandboxPolicy::DangerFullAccess, - SandboxPermissions::UseDefault, - ) - .await; + let manager = ExecPolicyManager::new(policy); + let requirement = manager + .create_exec_approval_requirement_for_command( + &Features::with_defaults(), + &forbidden_script, + AskForApproval::OnRequest, + &SandboxPolicy::DangerFullAccess, + SandboxPermissions::UseDefault, + ) + .await; assert_eq!( requirement, @@ -458,18 +589,19 @@ prefix_rule(pattern=["rm"], decision="forbidden") parser .parse("test.rules", policy_src) .expect("parse policy"); - let policy = Arc::new(RwLock::new(parser.build())); + let policy = Arc::new(parser.build()); let command = vec!["rm".to_string()]; - let requirement = create_exec_approval_requirement_for_command( - &policy, - &Features::with_defaults(), - &command, - AskForApproval::OnRequest, - &SandboxPolicy::DangerFullAccess, - SandboxPermissions::UseDefault, - ) - .await; + let manager = ExecPolicyManager::new(policy); + let requirement = manager + .create_exec_approval_requirement_for_command( + &Features::with_defaults(), + &command, + AskForApproval::OnRequest, + &SandboxPolicy::DangerFullAccess, + SandboxPermissions::UseDefault, + ) + .await; assert_eq!( requirement, @@ -487,18 +619,19 @@ prefix_rule(pattern=["rm"], decision="forbidden") parser .parse("test.rules", policy_src) .expect("parse policy"); - let policy = Arc::new(RwLock::new(parser.build())); + let policy = Arc::new(parser.build()); let command = vec!["rm".to_string()]; - let requirement = create_exec_approval_requirement_for_command( - &policy, - &Features::with_defaults(), - &command, - AskForApproval::Never, - &SandboxPolicy::DangerFullAccess, - SandboxPermissions::UseDefault, - ) - .await; + let manager = ExecPolicyManager::new(policy); + let requirement = manager + .create_exec_approval_requirement_for_command( + &Features::with_defaults(), + &command, + AskForApproval::Never, + &SandboxPolicy::DangerFullAccess, + SandboxPermissions::UseDefault, + ) + .await; assert_eq!( requirement, @@ -512,16 +645,16 @@ prefix_rule(pattern=["rm"], decision="forbidden") async fn exec_approval_requirement_falls_back_to_heuristics() { let command = vec!["cargo".to_string(), "build".to_string()]; - let empty_policy = Arc::new(RwLock::new(Policy::empty())); - let requirement = create_exec_approval_requirement_for_command( - &empty_policy, - &Features::with_defaults(), - &command, - AskForApproval::UnlessTrusted, - &SandboxPolicy::ReadOnly, - SandboxPermissions::UseDefault, - ) - .await; + let manager = ExecPolicyManager::default(); + let requirement = manager + .create_exec_approval_requirement_for_command( + &Features::with_defaults(), + &command, + AskForApproval::UnlessTrusted, + &SandboxPolicy::ReadOnly, + SandboxPermissions::UseDefault, + ) + .await; assert_eq!( requirement, @@ -539,7 +672,7 @@ prefix_rule(pattern=["rm"], decision="forbidden") parser .parse("test.rules", policy_src) .expect("parse policy"); - let policy = Arc::new(RwLock::new(parser.build())); + let policy = Arc::new(parser.build()); let command = vec![ "bash".to_string(), "-lc".to_string(), @@ -547,15 +680,15 @@ prefix_rule(pattern=["rm"], decision="forbidden") ]; assert_eq!( - create_exec_approval_requirement_for_command( - &policy, - &Features::with_defaults(), - &command, - AskForApproval::UnlessTrusted, - &SandboxPolicy::DangerFullAccess, - SandboxPermissions::UseDefault, - ) - .await, + ExecPolicyManager::new(policy) + .create_exec_approval_requirement_for_command( + &Features::with_defaults(), + &command, + AskForApproval::UnlessTrusted, + &SandboxPolicy::DangerFullAccess, + SandboxPermissions::UseDefault, + ) + .await, ExecApprovalRequirement::NeedsApproval { reason: None, proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(vec![ @@ -568,14 +701,16 @@ prefix_rule(pattern=["rm"], decision="forbidden") #[tokio::test] async fn append_execpolicy_amendment_updates_policy_and_file() { let codex_home = tempdir().expect("create temp dir"); - let current_policy = Arc::new(RwLock::new(Policy::empty())); let prefix = vec!["echo".to_string(), "hello".to_string()]; + let manager = ExecPolicyManager::default(); - append_execpolicy_amendment_and_update(codex_home.path(), ¤t_policy, &prefix) + manager + .append_amendment_and_update(codex_home.path(), &ExecPolicyAmendment::from(prefix)) .await .expect("update policy"); + let updated_policy = manager.current(); - let evaluation = current_policy.read().await.check( + let evaluation = updated_policy.check( &["echo".to_string(), "hello".to_string(), "world".to_string()], &|_| Decision::Allow, ); @@ -599,10 +734,11 @@ prefix_rule(pattern=["rm"], decision="forbidden") #[tokio::test] async fn append_execpolicy_amendment_rejects_empty_prefix() { let codex_home = tempdir().expect("create temp dir"); - let current_policy = Arc::new(RwLock::new(Policy::empty())); + let manager = ExecPolicyManager::default(); - let result = - append_execpolicy_amendment_and_update(codex_home.path(), ¤t_policy, &[]).await; + let result = manager + .append_amendment_and_update(codex_home.path(), &ExecPolicyAmendment::from(vec![])) + .await; assert!(matches!( result, @@ -617,16 +753,16 @@ prefix_rule(pattern=["rm"], decision="forbidden") async fn proposed_execpolicy_amendment_is_present_for_single_command_without_policy_match() { let command = vec!["cargo".to_string(), "build".to_string()]; - let empty_policy = Arc::new(RwLock::new(Policy::empty())); - let requirement = create_exec_approval_requirement_for_command( - &empty_policy, - &Features::with_defaults(), - &command, - AskForApproval::UnlessTrusted, - &SandboxPolicy::ReadOnly, - SandboxPermissions::UseDefault, - ) - .await; + let manager = ExecPolicyManager::default(); + let requirement = manager + .create_exec_approval_requirement_for_command( + &Features::with_defaults(), + &command, + AskForApproval::UnlessTrusted, + &SandboxPolicy::ReadOnly, + SandboxPermissions::UseDefault, + ) + .await; assert_eq!( requirement, @@ -644,15 +780,16 @@ prefix_rule(pattern=["rm"], decision="forbidden") let mut features = Features::with_defaults(); features.disable(Feature::ExecPolicy); - let requirement = create_exec_approval_requirement_for_command( - &Arc::new(RwLock::new(Policy::empty())), - &features, - &command, - AskForApproval::UnlessTrusted, - &SandboxPolicy::ReadOnly, - SandboxPermissions::UseDefault, - ) - .await; + let manager = ExecPolicyManager::default(); + let requirement = manager + .create_exec_approval_requirement_for_command( + &features, + &command, + AskForApproval::UnlessTrusted, + &SandboxPolicy::ReadOnly, + SandboxPermissions::UseDefault, + ) + .await; assert_eq!( requirement, @@ -670,18 +807,19 @@ prefix_rule(pattern=["rm"], decision="forbidden") parser .parse("test.rules", policy_src) .expect("parse policy"); - let policy = Arc::new(RwLock::new(parser.build())); + let policy = Arc::new(parser.build()); let command = vec!["rm".to_string()]; - let requirement = create_exec_approval_requirement_for_command( - &policy, - &Features::with_defaults(), - &command, - AskForApproval::OnRequest, - &SandboxPolicy::DangerFullAccess, - SandboxPermissions::UseDefault, - ) - .await; + let manager = ExecPolicyManager::new(policy); + let requirement = manager + .create_exec_approval_requirement_for_command( + &Features::with_defaults(), + &command, + AskForApproval::OnRequest, + &SandboxPolicy::DangerFullAccess, + SandboxPermissions::UseDefault, + ) + .await; assert_eq!( requirement, @@ -699,15 +837,16 @@ prefix_rule(pattern=["rm"], decision="forbidden") "-lc".to_string(), "cargo build && echo ok".to_string(), ]; - let requirement = create_exec_approval_requirement_for_command( - &Arc::new(RwLock::new(Policy::empty())), - &Features::with_defaults(), - &command, - AskForApproval::UnlessTrusted, - &SandboxPolicy::ReadOnly, - SandboxPermissions::UseDefault, - ) - .await; + let manager = ExecPolicyManager::default(); + let requirement = manager + .create_exec_approval_requirement_for_command( + &Features::with_defaults(), + &command, + AskForApproval::UnlessTrusted, + &SandboxPolicy::ReadOnly, + SandboxPermissions::UseDefault, + ) + .await; assert_eq!( requirement, @@ -728,7 +867,7 @@ prefix_rule(pattern=["rm"], decision="forbidden") parser .parse("test.rules", policy_src) .expect("parse policy"); - let policy = Arc::new(RwLock::new(parser.build())); + let policy = Arc::new(parser.build()); let command = vec![ "bash".to_string(), @@ -737,15 +876,15 @@ prefix_rule(pattern=["rm"], decision="forbidden") ]; assert_eq!( - create_exec_approval_requirement_for_command( - &policy, - &Features::with_defaults(), - &command, - AskForApproval::UnlessTrusted, - &SandboxPolicy::ReadOnly, - SandboxPermissions::UseDefault, - ) - .await, + ExecPolicyManager::new(policy) + .create_exec_approval_requirement_for_command( + &Features::with_defaults(), + &command, + AskForApproval::UnlessTrusted, + &SandboxPolicy::ReadOnly, + SandboxPermissions::UseDefault, + ) + .await, ExecApprovalRequirement::NeedsApproval { reason: None, proposed_execpolicy_amendment: Some(ExecPolicyAmendment::new(vec![ @@ -759,15 +898,16 @@ prefix_rule(pattern=["rm"], decision="forbidden") async fn proposed_execpolicy_amendment_is_present_when_heuristics_allow() { let command = vec!["echo".to_string(), "safe".to_string()]; - let requirement = create_exec_approval_requirement_for_command( - &Arc::new(RwLock::new(Policy::empty())), - &Features::with_defaults(), - &command, - AskForApproval::OnRequest, - &SandboxPolicy::ReadOnly, - SandboxPermissions::UseDefault, - ) - .await; + let manager = ExecPolicyManager::default(); + let requirement = manager + .create_exec_approval_requirement_for_command( + &Features::with_defaults(), + &command, + AskForApproval::OnRequest, + &SandboxPolicy::ReadOnly, + SandboxPermissions::UseDefault, + ) + .await; assert_eq!( requirement, @@ -785,18 +925,19 @@ prefix_rule(pattern=["rm"], decision="forbidden") parser .parse("test.rules", policy_src) .expect("parse policy"); - let policy = Arc::new(RwLock::new(parser.build())); + let policy = Arc::new(parser.build()); let command = vec!["echo".to_string(), "safe".to_string()]; - let requirement = create_exec_approval_requirement_for_command( - &policy, - &Features::with_defaults(), - &command, - AskForApproval::OnRequest, - &SandboxPolicy::ReadOnly, - SandboxPermissions::UseDefault, - ) - .await; + let manager = ExecPolicyManager::new(policy); + let requirement = manager + .create_exec_approval_requirement_for_command( + &Features::with_defaults(), + &command, + AskForApproval::OnRequest, + &SandboxPolicy::ReadOnly, + SandboxPermissions::UseDefault, + ) + .await; assert_eq!( requirement, diff --git a/codex-rs/core/src/features.rs b/codex-rs/core/src/features.rs index 22fd310b992..9168b7d697d 100644 --- a/codex-rs/core/src/features.rs +++ b/codex-rs/core/src/features.rs @@ -8,6 +8,7 @@ use crate::config::ConfigToml; use crate::config::profile::ConfigProfile; use serde::Deserialize; +use serde::Serialize; use std::collections::BTreeMap; use std::collections::BTreeSet; @@ -69,8 +70,6 @@ pub enum Feature { // Experimental /// Use the single unified PTY-backed exec tool. UnifiedExec, - /// Enable experimental RMCP features such as OAuth login. - RmcpClient, /// Include the freeform apply_patch tool. ApplyPatchFreeform, /// Allow the model to request web searches. @@ -93,6 +92,12 @@ pub enum Feature { Tui2, /// Enable discovery and injection of skills. Skills, + /// Enable Language Server Protocol integration (diagnostics + navigation). + Lsp, + /// Allow the agent to spawn a read-only mini-model subagent. + MiniSubagents, + /// Enforce UTF8 output in Powershell. + PowershellUtf8, } impl Feature { @@ -226,7 +231,6 @@ impl Features { let base_legacy = LegacyFeatureToggles { experimental_use_freeform_apply_patch: cfg.experimental_use_freeform_apply_patch, experimental_use_unified_exec_tool: cfg.experimental_use_unified_exec_tool, - experimental_use_rmcp_client: cfg.experimental_use_rmcp_client, tools_web_search: cfg.tools.as_ref().and_then(|t| t.web_search), tools_view_image: cfg.tools.as_ref().and_then(|t| t.view_image), ..Default::default() @@ -243,7 +247,6 @@ impl Features { .experimental_use_freeform_apply_patch, experimental_use_unified_exec_tool: config_profile.experimental_use_unified_exec_tool, - experimental_use_rmcp_client: config_profile.experimental_use_rmcp_client, tools_web_search: config_profile.tools_web_search, tools_view_image: config_profile.tools_view_image, }; @@ -274,7 +277,7 @@ pub fn is_known_feature_key(key: &str) -> bool { } /// Deserializable features table for TOML. -#[derive(Deserialize, Debug, Clone, Default, PartialEq)] +#[derive(Serialize, Deserialize, Debug, Clone, Default, PartialEq)] pub struct FeaturesToml { #[serde(flatten)] pub entries: BTreeMap, @@ -295,7 +298,7 @@ pub const FEATURES: &[FeatureSpec] = &[ id: Feature::GhostCommit, key: "undo", stage: Stage::Stable, - default_enabled: true, + default_enabled: false, }, FeatureSpec { id: Feature::ParallelToolCalls, @@ -348,12 +351,15 @@ pub const FEATURES: &[FeatureSpec] = &[ }, default_enabled: false, }, - // Unstable features. FeatureSpec { - id: Feature::RmcpClient, - key: "rmcp_client", - stage: Stage::Experimental, - default_enabled: false, + id: Feature::MiniSubagents, + key: "mini_subagents", + stage: Stage::Beta { + name: "Mini subagents", + menu_description: "Enable a dedicated read-only subagent tool that always runs on gpt-5.1-codex-mini.", + announcement: "NEW! Try mini subagents for faster, cheaper repo exploration. Enable in /experimental!", + }, + default_enabled: true, }, FeatureSpec { id: Feature::ApplyPatchFreeform, @@ -397,6 +403,22 @@ pub const FEATURES: &[FeatureSpec] = &[ stage: Stage::Experimental, default_enabled: true, }, + FeatureSpec { + id: Feature::Lsp, + key: "lsp", + stage: Stage::Beta { + name: "LSP", + menu_description: "Enable Language Server Protocol diagnostics + navigation (/diagnostics, lsp_* tools).", + announcement: "NEW! Try LSP diagnostics and navigation. Enable in /experimental!", + }, + default_enabled: false, + }, + FeatureSpec { + id: Feature::PowershellUtf8, + key: "powershell_utf8", + stage: Stage::Experimental, + default_enabled: cfg!(windows), + }, FeatureSpec { id: Feature::Tui2, key: "tui2", diff --git a/codex-rs/core/src/features/legacy.rs b/codex-rs/core/src/features/legacy.rs index 19210c67591..09a982569f6 100644 --- a/codex-rs/core/src/features/legacy.rs +++ b/codex-rs/core/src/features/legacy.rs @@ -17,10 +17,6 @@ const ALIASES: &[Alias] = &[ legacy_key: "experimental_use_unified_exec_tool", feature: Feature::UnifiedExec, }, - Alias { - legacy_key: "experimental_use_rmcp_client", - feature: Feature::RmcpClient, - }, Alias { legacy_key: "experimental_use_freeform_apply_patch", feature: Feature::ApplyPatchFreeform, @@ -50,7 +46,6 @@ pub struct LegacyFeatureToggles { pub include_apply_patch_tool: Option, pub experimental_use_freeform_apply_patch: Option, pub experimental_use_unified_exec_tool: Option, - pub experimental_use_rmcp_client: Option, pub tools_web_search: Option, pub tools_view_image: Option, } @@ -75,12 +70,6 @@ impl LegacyFeatureToggles { self.experimental_use_unified_exec_tool, "experimental_use_unified_exec_tool", ); - set_if_some( - features, - Feature::RmcpClient, - self.experimental_use_rmcp_client, - "experimental_use_rmcp_client", - ); set_if_some( features, Feature::WebSearchRequest, diff --git a/codex-rs/core/src/lib.rs b/codex-rs/core/src/lib.rs index c59a347bb52..0e055a81246 100644 --- a/codex-rs/core/src/lib.rs +++ b/codex-rs/core/src/lib.rs @@ -31,6 +31,7 @@ pub mod features; mod flags; pub mod git_info; pub mod landlock; +mod lsp_prompt; pub mod mcp; mod mcp_connection_manager; pub mod models_manager; diff --git a/codex-rs/core/src/lsp_prompt.rs b/codex-rs/core/src/lsp_prompt.rs new file mode 100644 index 00000000000..67ec7e4ec3c --- /dev/null +++ b/codex-rs/core/src/lsp_prompt.rs @@ -0,0 +1,36 @@ +use codex_lsp::Diagnostic; +use codex_lsp::DiagnosticSeverity; + +pub(crate) fn render_diagnostics_summary(diags: &[Diagnostic]) -> String { + let mut errors = 0usize; + let mut warnings = 0usize; + let mut infos = 0usize; + let mut hints = 0usize; + for d in diags { + match d.severity { + Some(DiagnosticSeverity::Error) => errors += 1, + Some(DiagnosticSeverity::Warning) => warnings += 1, + Some(DiagnosticSeverity::Information) => infos += 1, + Some(DiagnosticSeverity::Hint) => hints += 1, + None => {} + } + } + + let mut lines = Vec::new(); + lines.push("## LSP diagnostics".to_string()); + lines.push(format!( + "Errors: {errors}, Warnings: {warnings}, Info: {infos}, Hint: {hints}" + )); + lines.push("Diagnostics use 1-based line/character positions.".to_string()); + lines.push("".to_string()); + for d in diags { + lines.push(format!( + "- {path}:{line}:{character} {message}", + path = d.path, + line = d.range.start.line, + character = d.range.start.character, + message = d.message + )); + } + lines.join("\n") +} diff --git a/codex-rs/core/src/models_manager/manager.rs b/codex-rs/core/src/models_manager/manager.rs index 315380ade10..5c53f5fc166 100644 --- a/codex-rs/core/src/models_manager/manager.rs +++ b/codex-rs/core/src/models_manager/manager.rs @@ -354,7 +354,6 @@ mod tests { "truncation_policy": {"mode": "bytes", "limit": 10_000}, "supports_parallel_tool_calls": false, "context_window": null, - "reasoning_summary_format": "none", "experimental_supported_tools": [], })) .expect("valid model") diff --git a/codex-rs/core/src/models_manager/model_family.rs b/codex-rs/core/src/models_manager/model_family.rs index 21e20bcc043..9a6904cea1e 100644 --- a/codex-rs/core/src/models_manager/model_family.rs +++ b/codex-rs/core/src/models_manager/model_family.rs @@ -3,7 +3,6 @@ use codex_protocol::openai_models::ApplyPatchToolType; use codex_protocol::openai_models::ConfigShellToolType; use codex_protocol::openai_models::ModelInfo; use codex_protocol::openai_models::ReasoningEffort; -use codex_protocol::openai_models::ReasoningSummaryFormat; use crate::config::Config; use crate::truncate::TruncationPolicy; @@ -48,9 +47,6 @@ pub struct ModelFamily { // The reasoning effort to use for this model family when none is explicitly chosen. pub default_reasoning_effort: Option, - // Define if we need a special handling of reasoning summary - pub reasoning_summary_format: ReasoningSummaryFormat, - /// Whether this model supports parallel tool calls when using the /// Responses API. pub supports_parallel_tool_calls: bool, @@ -88,9 +84,6 @@ impl ModelFamily { if let Some(supports_reasoning_summaries) = config.model_supports_reasoning_summaries { self.supports_reasoning_summaries = supports_reasoning_summaries; } - if let Some(reasoning_summary_format) = config.model_reasoning_summary_format.as_ref() { - self.reasoning_summary_format = reasoning_summary_format.clone(); - } if let Some(context_window) = config.model_context_window { self.context_window = Some(context_window); } @@ -117,7 +110,6 @@ impl ModelFamily { supported_reasoning_levels: _, shell_type, visibility: _, - minimal_client_version: _, supported_in_api: _, priority: _, upgrade: _, @@ -129,7 +121,6 @@ impl ModelFamily { truncation_policy, supports_parallel_tool_calls, context_window, - reasoning_summary_format, experimental_supported_tools, } = model; @@ -145,7 +136,6 @@ impl ModelFamily { self.truncation_policy = truncation_policy.into(); self.supports_parallel_tool_calls = supports_parallel_tool_calls; self.context_window = context_window; - self.reasoning_summary_format = reasoning_summary_format; self.experimental_supported_tools = experimental_supported_tools; } @@ -176,7 +166,6 @@ macro_rules! model_family { context_window: Some(CONTEXT_WINDOW_272K), auto_compact_token_limit: None, supports_reasoning_summaries: false, - reasoning_summary_format: ReasoningSummaryFormat::None, supports_parallel_tool_calls: false, apply_patch_tool_type: None, base_instructions: BASE_INSTRUCTIONS.to_string(), @@ -251,7 +240,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily { model_family!( slug, slug, supports_reasoning_summaries: true, - reasoning_summary_format: ReasoningSummaryFormat::Experimental, base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(), experimental_supported_tools: vec![ "grep_files".to_string(), @@ -271,7 +259,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily { model_family!( slug, slug, supports_reasoning_summaries: true, - reasoning_summary_format: ReasoningSummaryFormat::Experimental, base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), shell_type: ConfigShellToolType::ShellCommand, @@ -300,7 +287,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily { model_family!( slug, slug, supports_reasoning_summaries: true, - reasoning_summary_format: ReasoningSummaryFormat::Experimental, base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), shell_type: ConfigShellToolType::ShellCommand, @@ -313,7 +299,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily { model_family!( slug, slug, supports_reasoning_summaries: true, - reasoning_summary_format: ReasoningSummaryFormat::Experimental, base_instructions: GPT_5_2_CODEX_INSTRUCTIONS.to_string(), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), shell_type: ConfigShellToolType::ShellCommand, @@ -326,7 +311,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily { model_family!( slug, slug, supports_reasoning_summaries: true, - reasoning_summary_format: ReasoningSummaryFormat::Experimental, base_instructions: GPT_5_1_CODEX_MAX_INSTRUCTIONS.to_string(), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), shell_type: ConfigShellToolType::ShellCommand, @@ -342,7 +326,6 @@ pub(super) fn find_family_for_model(slug: &str) -> ModelFamily { model_family!( slug, slug, supports_reasoning_summaries: true, - reasoning_summary_format: ReasoningSummaryFormat::Experimental, base_instructions: GPT_5_CODEX_INSTRUCTIONS.to_string(), apply_patch_tool_type: Some(ApplyPatchToolType::Freeform), shell_type: ConfigShellToolType::ShellCommand, @@ -417,7 +400,6 @@ fn derive_default_model_family(model: &str) -> ModelFamily { context_window: None, auto_compact_token_limit: None, supports_reasoning_summaries: false, - reasoning_summary_format: ReasoningSummaryFormat::None, supports_parallel_tool_calls: false, apply_patch_tool_type: None, base_instructions: BASE_INSTRUCTIONS.to_string(), @@ -434,7 +416,6 @@ fn derive_default_model_family(model: &str) -> ModelFamily { #[cfg(test)] mod tests { use super::*; - use codex_protocol::openai_models::ClientVersion; use codex_protocol::openai_models::ModelVisibility; use codex_protocol::openai_models::ReasoningEffortPreset; use codex_protocol::openai_models::TruncationPolicyConfig; @@ -451,7 +432,6 @@ mod tests { }], shell_type: shell, visibility: ModelVisibility::List, - minimal_client_version: ClientVersion(0, 1, 0), supported_in_api: true, priority: 1, upgrade: None, @@ -463,7 +443,6 @@ mod tests { truncation_policy: TruncationPolicyConfig::bytes(10_000), supports_parallel_tool_calls: false, context_window: None, - reasoning_summary_format: ReasoningSummaryFormat::None, experimental_supported_tools: Vec::new(), } } @@ -527,7 +506,6 @@ mod tests { experimental_supported_tools: vec!["local".to_string()], truncation_policy: TruncationPolicy::Bytes(10_000), context_window: Some(100), - reasoning_summary_format: ReasoningSummaryFormat::None, ); let updated = family.with_remote_overrides(vec![ModelInfo { @@ -541,7 +519,6 @@ mod tests { }], shell_type: ConfigShellToolType::ShellCommand, visibility: ModelVisibility::List, - minimal_client_version: ClientVersion(0, 1, 0), supported_in_api: true, priority: 10, upgrade: None, @@ -553,7 +530,6 @@ mod tests { truncation_policy: TruncationPolicyConfig::tokens(2_000), supports_parallel_tool_calls: true, context_window: Some(400_000), - reasoning_summary_format: ReasoningSummaryFormat::Experimental, experimental_supported_tools: vec!["alpha".to_string(), "beta".to_string()], }]); @@ -572,10 +548,6 @@ mod tests { assert_eq!(updated.truncation_policy, TruncationPolicy::Tokens(2_000)); assert!(updated.supports_parallel_tool_calls); assert_eq!(updated.context_window, Some(400_000)); - assert_eq!( - updated.reasoning_summary_format, - ReasoningSummaryFormat::Experimental - ); assert_eq!( updated.experimental_supported_tools, vec!["alpha".to_string(), "beta".to_string()] diff --git a/codex-rs/core/src/powershell.rs b/codex-rs/core/src/powershell.rs index 3863ff401be..148605b4b3f 100644 --- a/codex-rs/core/src/powershell.rs +++ b/codex-rs/core/src/powershell.rs @@ -8,6 +8,30 @@ use crate::shell::detect_shell_type; const POWERSHELL_FLAGS: &[&str] = &["-nologo", "-noprofile", "-command", "-c"]; +/// Prefixed command for powershell shell calls to force UTF-8 console output. +pub(crate) const UTF8_OUTPUT_PREFIX: &str = + "[Console]::OutputEncoding=[System.Text.Encoding]::UTF8;\n"; + +pub(crate) fn prefix_powershell_script_with_utf8(command: &[String]) -> Vec { + let Some((_, script)) = extract_powershell_command(command) else { + return command.to_vec(); + }; + + let trimmed = script.trim_start(); + let script = if trimmed.starts_with(UTF8_OUTPUT_PREFIX) { + script.to_string() + } else { + format!("{UTF8_OUTPUT_PREFIX}{script}") + }; + + let mut command: Vec = command[..(command.len() - 1)] + .iter() + .map(std::string::ToString::to_string) + .collect(); + command.push(script); + command +} + /// Extract the PowerShell script body from an invocation such as: /// /// - ["pwsh", "-NoProfile", "-Command", "Get-ChildItem -Recurse | Select-String foo"] @@ -22,7 +46,10 @@ pub fn extract_powershell_command(command: &[String]) -> Option<(&str, &str)> { } let shell = &command[0]; - if detect_shell_type(&PathBuf::from(shell)) != Some(ShellType::PowerShell) { + if !matches!( + detect_shell_type(&PathBuf::from(shell)), + Some(ShellType::PowerShell) + ) { return None; } @@ -36,7 +63,7 @@ pub fn extract_powershell_command(command: &[String]) -> Option<(&str, &str)> { } if flag.eq_ignore_ascii_case("-Command") || flag.eq_ignore_ascii_case("-c") { let script = &command[i + 1]; - return Some((shell, script.as_str())); + return Some((shell, script)); } i += 1; } diff --git a/codex-rs/core/src/rollout/policy.rs b/codex-rs/core/src/rollout/policy.rs index 35b1a90f33e..90ab727c0d2 100644 --- a/codex-rs/core/src/rollout/policy.rs +++ b/codex-rs/core/src/rollout/policy.rs @@ -39,8 +39,12 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool { match ev { EventMsg::UserMessage(_) | EventMsg::AgentMessage(_) + | EventMsg::AgentMessageDelta(_) | EventMsg::AgentReasoning(_) + | EventMsg::AgentReasoningDelta(_) | EventMsg::AgentReasoningRawContent(_) + | EventMsg::AgentReasoningRawContentDelta(_) + | EventMsg::AgentReasoningSectionBreak(_) | EventMsg::TokenCount(_) | EventMsg::ContextCompacted(_) | EventMsg::EnteredReviewMode(_) @@ -48,17 +52,7 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool { | EventMsg::EnteredPlanMode(_) | EventMsg::ExitedPlanMode(_) | EventMsg::UndoCompleted(_) - | EventMsg::TurnAborted(_) => true, - EventMsg::Error(_) - | EventMsg::Warning(_) - | EventMsg::TaskStarted(_) - | EventMsg::TaskComplete(_) - | EventMsg::AgentMessageDelta(_) - | EventMsg::AgentReasoningDelta(_) - | EventMsg::AgentReasoningRawContentDelta(_) - | EventMsg::AgentReasoningSectionBreak(_) - | EventMsg::RawResponseItem(_) - | EventMsg::SessionConfigured(_) + | EventMsg::TurnAborted(_) | EventMsg::McpToolCallBegin(_) | EventMsg::McpToolCallEnd(_) | EventMsg::SubAgentToolCallBegin(_) @@ -71,6 +65,15 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool { | EventMsg::TerminalInteraction(_) | EventMsg::ExecCommandOutputDelta(_) | EventMsg::ExecCommandEnd(_) + | EventMsg::PatchApplyBegin(_) + | EventMsg::PatchApplyEnd(_) + | EventMsg::ViewImageToolCall(_) => true, + EventMsg::Error(_) + | EventMsg::Warning(_) + | EventMsg::TaskStarted(_) + | EventMsg::TaskComplete(_) + | EventMsg::RawResponseItem(_) + | EventMsg::SessionConfigured(_) | EventMsg::ExecApprovalRequest(_) | EventMsg::ElicitationRequest(_) | EventMsg::AskUserQuestionRequest(_) @@ -78,8 +81,6 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool { | EventMsg::ApplyPatchApprovalRequest(_) | EventMsg::BackgroundEvent(_) | EventMsg::StreamError(_) - | EventMsg::PatchApplyBegin(_) - | EventMsg::PatchApplyEnd(_) | EventMsg::TurnDiff(_) | EventMsg::GetHistoryEntryResponse(_) | EventMsg::UndoStarted(_) @@ -90,7 +91,6 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool { | EventMsg::ListSkillsResponse(_) | EventMsg::PlanUpdate(_) | EventMsg::ShutdownComplete - | EventMsg::ViewImageToolCall(_) | EventMsg::DeprecationNotice(_) | EventMsg::ItemStarted(_) | EventMsg::ItemCompleted(_) @@ -100,3 +100,108 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool { | EventMsg::SkillsUpdateAvailable => false, } } + +#[cfg(test)] +mod tests { + use super::*; + use crate::protocol::AgentMessageDeltaEvent; + use crate::protocol::ExecCommandBeginEvent; + use crate::protocol::ExecCommandOutputDeltaEvent; + use crate::protocol::ExecCommandSource; + use crate::protocol::ExecOutputStream; + use crate::protocol::McpInvocation; + use crate::protocol::McpToolCallBeginEvent; + use crate::protocol::PatchApplyBeginEvent; + use crate::protocol::SubAgentInvocation; + use crate::protocol::SubAgentToolCallBeginEvent; + use crate::protocol::TerminalInteractionEvent; + use crate::protocol::ViewImageToolCallEvent; + use crate::protocol::WebSearchEndEvent; + use codex_protocol::protocol::FileChange; + use std::collections::HashMap; + use std::path::PathBuf; + + #[test] + fn persists_streaming_deltas_and_tool_lifecycle_events() { + assert!(should_persist_event_msg(&EventMsg::AgentMessageDelta( + AgentMessageDeltaEvent { + delta: "hello".to_string(), + } + ))); + + assert!(should_persist_event_msg(&EventMsg::ExecCommandBegin( + ExecCommandBeginEvent { + call_id: "call-1".to_string(), + process_id: None, + turn_id: "turn-1".to_string(), + command: vec!["echo".to_string(), "hi".to_string()], + cwd: PathBuf::from("/tmp"), + parsed_cmd: Vec::new(), + source: ExecCommandSource::Agent, + interaction_input: None, + } + ))); + + assert!(should_persist_event_msg(&EventMsg::ExecCommandOutputDelta( + ExecCommandOutputDeltaEvent { + call_id: "call-1".to_string(), + stream: ExecOutputStream::Stdout, + chunk: b"hi".to_vec(), + } + ))); + + assert!(should_persist_event_msg(&EventMsg::TerminalInteraction( + TerminalInteractionEvent { + call_id: "call-1".to_string(), + process_id: "pid-1".to_string(), + stdin: "ls\n".to_string(), + } + ))); + + assert!(should_persist_event_msg(&EventMsg::McpToolCallBegin( + McpToolCallBeginEvent { + call_id: "call-2".to_string(), + invocation: McpInvocation { + server: "srv".to_string(), + tool: "tool".to_string(), + arguments: None, + }, + } + ))); + + assert!(should_persist_event_msg(&EventMsg::SubAgentToolCallBegin( + SubAgentToolCallBeginEvent { + call_id: "call-3".to_string(), + invocation: SubAgentInvocation { + description: "desc".to_string(), + label: "label".to_string(), + prompt: "prompt".to_string(), + model: None, + }, + } + ))); + + assert!(should_persist_event_msg(&EventMsg::WebSearchEnd( + WebSearchEndEvent { + call_id: "call-4".to_string(), + query: "query".to_string(), + } + ))); + + assert!(should_persist_event_msg(&EventMsg::PatchApplyBegin( + PatchApplyBeginEvent { + call_id: "call-5".to_string(), + turn_id: "turn-1".to_string(), + auto_approved: true, + changes: HashMap::::new(), + } + ))); + + assert!(should_persist_event_msg(&EventMsg::ViewImageToolCall( + ViewImageToolCallEvent { + call_id: "call-6".to_string(), + path: PathBuf::from("image.png"), + } + ))); + } +} diff --git a/codex-rs/core/src/skills/assets/samples/plan/LICENSE.txt b/codex-rs/core/src/skills/assets/samples/plan/LICENSE.txt deleted file mode 100644 index d6456956733..00000000000 --- a/codex-rs/core/src/skills/assets/samples/plan/LICENSE.txt +++ /dev/null @@ -1,202 +0,0 @@ - - Apache License - Version 2.0, January 2004 - http://www.apache.org/licenses/ - - TERMS AND CONDITIONS FOR USE, REPRODUCTION, AND DISTRIBUTION - - 1. Definitions. - - "License" shall mean the terms and conditions for use, reproduction, - and distribution as defined by Sections 1 through 9 of this document. - - "Licensor" shall mean the copyright owner or entity authorized by - the copyright owner that is granting the License. - - "Legal Entity" shall mean the union of the acting entity and all - other entities that control, are controlled by, or are under common - control with that entity. For the purposes of this definition, - "control" means (i) the power, direct or indirect, to cause the - direction or management of such entity, whether by contract or - otherwise, or (ii) ownership of fifty percent (50%) or more of the - outstanding shares, or (iii) beneficial ownership of such entity. - - "You" (or "Your") shall mean an individual or Legal Entity - exercising permissions granted by this License. - - "Source" form shall mean the preferred form for making modifications, - including but not limited to software source code, documentation - source, and configuration files. - - "Object" form shall mean any form resulting from mechanical - transformation or translation of a Source form, including but - not limited to compiled object code, generated documentation, - and conversions to other media types. - - "Work" shall mean the work of authorship, whether in Source or - Object form, made available under the License, as indicated by a - copyright notice that is included in or attached to the work - (an example is provided in the Appendix below). - - "Derivative Works" shall mean any work, whether in Source or Object - form, that is based on (or derived from) the Work and for which the - editorial revisions, annotations, elaborations, or other modifications - represent, as a whole, an original work of authorship. For the purposes - of this License, Derivative Works shall not include works that remain - separable from, or merely link (or bind by name) to the interfaces of, - the Work and Derivative Works thereof. - - "Contribution" shall mean any work of authorship, including - the original version of the Work and any modifications or additions - to that Work or Derivative Works thereof, that is intentionally - submitted to Licensor for inclusion in the Work by the copyright owner - or by an individual or Legal Entity authorized to submit on behalf of - the copyright owner. For the purposes of this definition, "submitted" - means any form of electronic, verbal, or written communication sent - to the Licensor or its representatives, including but not limited to - communication on electronic mailing lists, source code control systems, - and issue tracking systems that are managed by, or on behalf of, the - Licensor for the purpose of discussing and improving the Work, but - excluding communication that is conspicuously marked or otherwise - designated in writing by the copyright owner as "Not a Contribution." - - "Contributor" shall mean Licensor and any individual or Legal Entity - on behalf of whom a Contribution has been received by Licensor and - subsequently incorporated within the Work. - - 2. Grant of Copyright License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - copyright license to reproduce, prepare Derivative Works of, - publicly display, publicly perform, sublicense, and distribute the - Work and such Derivative Works in Source or Object form. - - 3. Grant of Patent License. Subject to the terms and conditions of - this License, each Contributor hereby grants to You a perpetual, - worldwide, non-exclusive, no-charge, royalty-free, irrevocable - (except as stated in this section) patent license to make, have made, - use, offer to sell, sell, import, and otherwise transfer the Work, - where such license applies only to those patent claims licensable - by such Contributor that are necessarily infringed by their - Contribution(s) alone or by combination of their Contribution(s) - with the Work to which such Contribution(s) was submitted. If You - institute patent litigation against any entity (including a - cross-claim or counterclaim in a lawsuit) alleging that the Work - or a Contribution incorporated within the Work constitutes direct - or contributory patent infringement, then any patent licenses - granted to You under this License for that Work shall terminate - as of the date such litigation is filed. - - 4. Redistribution. You may reproduce and distribute copies of the - Work or Derivative Works thereof in any medium, with or without - modifications, and in Source or Object form, provided that You - meet the following conditions: - - (a) You must give any other recipients of the Work or - Derivative Works a copy of this License; and - - (b) You must cause any modified files to carry prominent notices - stating that You changed the files; and - - (c) You must retain, in the Source form of any Derivative Works - that You distribute, all copyright, patent, trademark, and - attribution notices from the Source form of the Work, - excluding those notices that do not pertain to any part of - the Derivative Works; and - - (d) If the Work includes a "NOTICE" text file as part of its - distribution, then any Derivative Works that You distribute must - include a readable copy of the attribution notices contained - within such NOTICE file, excluding those notices that do not - pertain to any part of the Derivative Works, in at least one - of the following places: within a NOTICE text file distributed - as part of the Derivative Works; within the Source form or - documentation, if provided along with the Derivative Works; or, - within a display generated by the Derivative Works, if and - wherever such third-party notices normally appear. The contents - of the NOTICE file are for informational purposes only and - do not modify the License. You may add Your own attribution - notices within Derivative Works that You distribute, alongside - or as an addendum to the NOTICE text from the Work, provided - that such additional attribution notices cannot be construed - as modifying the License. - - You may add Your own copyright statement to Your modifications and - may provide additional or different license terms and conditions - for use, reproduction, or distribution of Your modifications, or - for any such Derivative Works as a whole, provided Your use, - reproduction, and distribution of the Work otherwise complies with - the conditions stated in this License. - - 5. Submission of Contributions. Unless You explicitly state otherwise, - any Contribution intentionally submitted for inclusion in the Work - by You to the Licensor shall be under the terms and conditions of - this License, without any additional terms or conditions. - Notwithstanding the above, nothing herein shall supersede or modify - the terms of any separate license agreement you may have executed - with Licensor regarding such Contributions. - - 6. Trademarks. This License does not grant permission to use the trade - names, trademarks, service marks, or product names of the Licensor, - except as required for reasonable and customary use in describing the - origin of the Work and reproducing the content of the NOTICE file. - - 7. Disclaimer of Warranty. Unless required by applicable law or - agreed to in writing, Licensor provides the Work (and each - Contributor provides its Contributions) on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or - implied, including, without limitation, any warranties or conditions - of TITLE, NON-INFRINGEMENT, MERCHANTABILITY, or FITNESS FOR A - PARTICULAR PURPOSE. You are solely responsible for determining the - appropriateness of using or redistributing the Work and assume any - risks associated with Your exercise of permissions under this License. - - 8. Limitation of Liability. In no event and under no legal theory, - whether in tort (including negligence), contract, or otherwise, - unless required by applicable law (such as deliberate and grossly - negligent acts) or agreed to in writing, shall any Contributor be - liable to You for damages, including any direct, indirect, special, - incidental, or consequential damages of any character arising as a - result of this License or out of the use or inability to use the - Work (including but not limited to damages for loss of goodwill, - work stoppage, computer failure or malfunction, or any and all - other commercial damages or losses), even if such Contributor - has been advised of the possibility of such damages. - - 9. Accepting Warranty or Additional Liability. While redistributing - the Work or Derivative Works thereof, You may choose to offer, - and charge a fee for, acceptance of support, warranty, indemnity, - or other liability obligations and/or rights consistent with this - License. However, in accepting such obligations, You may act only - on Your own behalf and on Your sole responsibility, not on behalf - of any other Contributor, and only if You agree to indemnify, - defend, and hold each Contributor harmless for any liability - incurred by, or claims asserted against, such Contributor by reason - of your accepting any such warranty or additional liability. - - END OF TERMS AND CONDITIONS - - APPENDIX: How to apply the Apache License to your work. - - To apply the Apache License to your work, attach the following - boilerplate notice, with the fields enclosed by brackets "[]" - replaced with your own identifying information. (Don't include - the brackets!) The text should be enclosed in the appropriate - comment syntax for the file format. We also recommend that a - file or class name and description of purpose be included on the - same "printed page" as the copyright notice for easier - identification within third-party archives. - - Copyright [yyyy] [name of copyright owner] - - Licensed under the Apache License, Version 2.0 (the "License"); - you may not use this file except in compliance with the License. - You may obtain a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, - WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. - See the License for the specific language governing permissions and - limitations under the License. diff --git a/codex-rs/core/src/skills/assets/samples/plan/SKILL.md b/codex-rs/core/src/skills/assets/samples/plan/SKILL.md deleted file mode 100644 index 5d49c33945a..00000000000 --- a/codex-rs/core/src/skills/assets/samples/plan/SKILL.md +++ /dev/null @@ -1,180 +0,0 @@ ---- -name: plan -description: Generate a plan for how an agent should accomplish a complex coding task. Use when a user asks for a plan, and optionally when they want to save, find, read, update, or delete plan files in $CODEX_HOME/plans (default ~/.codex/plans). -metadata: - short-description: Generate a plan for a complex task ---- - -# Plan - -## Overview - -Draft structured plans that clarify intent, scope, requirements, action items, testing/validation, and risks. - -Optionally, save plans to disk as markdown files with YAML frontmatter and free-form content. When drafting in chat, output only the plan body without frontmatter; add frontmatter only when saving to disk. Only write to the plans folder; do not modify the repository codebase. - -This skill can also be used to draft codebase or system overviews. - -## Core rules - -- Resolve the plans directory as `$CODEX_HOME/plans` or `~/.codex/plans` when `CODEX_HOME` is not set. -- Create the plans directory if it does not exist. -- Never write to the repo; only read files to understand context. -- Require frontmatter with **only** `name` and `description` (single-line values) for on-disk plans. -- When presenting a draft plan in chat, omit frontmatter and start at `# Plan`. -- Enforce naming rules: short, lower-case, hyphen-delimited; filename must equal `.md`. -- If a plan is not found, state it clearly and offer to create one. -- Allow overview-style plans that document flows, architecture, or context without a work checklist. - -## Decide the task - -1. **Find/list**: discover plans by frontmatter summary; confirm if multiple matches exist. -2. **Read/use**: validate frontmatter; present summary and full contents. -3. **Create**: inspect repo read-only; choose plan style (implementation vs overview); draft plan; write to plans directory only. -4. **Update**: load plan; revise content and/or description; preserve frontmatter keys; overwrite the plan file. -5. **Delete**: confirm intent, then remove the plan file if asked. - -## Plan discovery - -- Prefer `scripts/list_plans.py` for quick summaries. -- Use `scripts/read_plan_frontmatter.py` to validate a specific plan. -- If name mismatches filename or frontmatter is missing fields, call it out and ask whether to fix. - -## Plan creation workflow - -1. Scan context quickly: read README.md and obvious docs (docs/, CONTRIBUTING.md, ARCHITECTURE.md); skim likely touched files; identify constraints (language, frameworks, CI/test commands, deployment). -2. Ask follow-ups only if blocked: at most 1-2 questions, prefer multiple-choice. If unsure but not blocked, state assumptions and proceed. -3. Identify scope, constraints, and data model/API implications (or capture existing behavior for an overview). -4. Draft either an ordered implementation plan or a structured overview plan with diagrams/notes as needed. -5. Immediately output the plan body only (no frontmatter), then ask the user if they want to 1. Make changes, 2. Implement it, 3. Save it as per plan. -6. If the user wants to save it, prepend frontmatter and save the plan under the computed plans directory using `scripts/create_plan.py`. - - -## Plan update workflow - -- Re-read the plan and related code/docs before updating. -- Keep the plan name stable unless the user explicitly wants a rename. -- If renaming, update both frontmatter `name` and filename together. - -## Scripts (low-freedom helpers) - -Create a plan file (body only; frontmatter is written for you). Run from the plan skill directory: - -```bash -python ./scripts/create_plan.py \ - --name codex-rate-limit-overview \ - --description "Scope and update plan for Codex rate limiting" \ - --body-file /tmp/plan-body.md -``` - -Read frontmatter summary for a plan (run from the plan skill directory): - -```bash -python ./scripts/read_plan_frontmatter.py ~/.codex/plans/codex-rate-limit-overview.md -``` - -List plan summaries (optional filter; run from the plan skill directory): - -```bash -python ./scripts/list_plans.py --query "rate limit" -``` - -## Plan file format - -Use one of the structures below for the plan body. When drafting, output only the body (no frontmatter). When saving, prepend this frontmatter: - -```markdown ---- -name: -description: <1-line summary> ---- -``` - -### Implementation plan body template - -```markdown -# Plan - -<1-3 sentences: intent, scope, and approach.> - -## Requirements -- -- - -## Scope -- In: -- Out: - -## Files and entry points -- -- - -## Data model / API changes -- - -## Action items -[ ] -[ ] -[ ] -[ ] -[ ] -[ ] - -## Testing and validation -- - -## Risks and edge cases -- -- - -## Open questions -- -- -``` - -### Overview plan body template - -```markdown -# Plan - -<1-3 sentences: intent and scope of the overview.> - -## Overview - - -## Diagrams - - -## Key file references -- -- - -## Auth / routing / behavior notes -- - -## Current status -- - -## Action items -- None (overview only). - -## Testing and validation -- None (overview only). - -## Risks and edge cases -- None (overview only). - -## Open questions -- None. -``` - -## Writing guidance - -- Start with 1 short paragraph describing intent and approach. -- Keep action items ordered and atomic (discovery -> changes -> tests -> rollout); use verb-first phrasing. -- Scale action item count to complexity (simple: 1-2; complex: up to about 10). -- Include file/entry-point hints and concrete validation steps where useful. -- Always include testing/validation and risks/edge cases in implementation plans; include safe rollout/rollback when relevant. -- Use open questions only when necessary (max 3). -- Avoid vague steps, micro-steps, and code snippets; keep the plan implementation-agnostic. -- For overview plans, keep action items minimal and set non-applicable sections to "None." diff --git a/codex-rs/core/src/skills/assets/samples/plan/scripts/create_plan.py b/codex-rs/core/src/skills/assets/samples/plan/scripts/create_plan.py deleted file mode 100644 index 4cbfa8f5e59..00000000000 --- a/codex-rs/core/src/skills/assets/samples/plan/scripts/create_plan.py +++ /dev/null @@ -1,114 +0,0 @@ -#!/usr/bin/env python3 -"""Create or overwrite a plan markdown file in $CODEX_HOME/plans.""" - -from __future__ import annotations - -import argparse -import sys -from pathlib import Path - -from plan_utils import get_plans_dir, validate_plan_name - -DEFAULT_TEMPLATE = """# Plan - -<1-3 sentences: intent, scope, and approach.> - -## Requirements -- -- - -## Scope -- In: -- Out: - -## Files and entry points -- -- - -## Data model / API changes -- - -## Action items -[ ] -[ ] -[ ] -[ ] -[ ] -[ ] - -## Testing and validation -- - -## Risks and edge cases -- -- - -## Open questions -- -- -""" - - -def read_body(args: argparse.Namespace) -> str | None: - if args.template: - return DEFAULT_TEMPLATE - if args.body_file: - return Path(args.body_file).read_text(encoding="utf-8") - if not sys.stdin.isatty(): - return sys.stdin.read() - return None - - -def main() -> int: - parser = argparse.ArgumentParser( - description="Create a plan file under $CODEX_HOME/plans or ~/.codex/plans." - ) - parser.add_argument("--name", required=True, help="Plan name (lower-case, hyphen-delimited).") - parser.add_argument("--description", required=True, help="Short plan description.") - parser.add_argument( - "--body-file", - help="Path to markdown body (without frontmatter). If omitted, read from stdin.", - ) - parser.add_argument( - "--template", - action="store_true", - help="Write a template body instead of reading from stdin or --body-file.", - ) - parser.add_argument( - "--overwrite", - action="store_true", - help="Overwrite the plan file if it already exists.", - ) - args = parser.parse_args() - - name = args.name.strip() - description = args.description.strip() - validate_plan_name(name) - if not description or "\n" in description: - raise SystemExit("Description must be a single line.") - - body = read_body(args) - if body is None: - raise SystemExit("Provide --body-file, stdin, or --template to supply plan content.") - - body = body.strip() - if not body: - raise SystemExit("Plan body cannot be empty.") - if body.lstrip().startswith("---"): - raise SystemExit("Plan body should not include frontmatter.") - - plans_dir = get_plans_dir() - plans_dir.mkdir(parents=True, exist_ok=True) - plan_path = plans_dir / f"{name}.md" - - if plan_path.exists() and not args.overwrite: - raise SystemExit(f"Plan already exists: {plan_path}. Use --overwrite to replace.") - - content = f"---\nname: {name}\ndescription: {description}\n---\n\n{body}\n" - plan_path.write_text(content, encoding="utf-8") - print(str(plan_path)) - return 0 - - -if __name__ == "__main__": - raise SystemExit(main()) diff --git a/codex-rs/core/src/skills/assets/samples/plan/scripts/list_plans.py b/codex-rs/core/src/skills/assets/samples/plan/scripts/list_plans.py deleted file mode 100644 index db6c412dca7..00000000000 --- a/codex-rs/core/src/skills/assets/samples/plan/scripts/list_plans.py +++ /dev/null @@ -1,49 +0,0 @@ -#!/usr/bin/env python3 -"""List plan summaries by reading frontmatter only.""" - -from __future__ import annotations - -import argparse -import json - -from plan_utils import get_plans_dir, parse_frontmatter - - -def main() -> int: - parser = argparse.ArgumentParser(description="List plan summaries from $CODEX_HOME/plans.") - parser.add_argument("--query", help="Case-insensitive substring to filter name/description.") - parser.add_argument("--json", action="store_true", help="Emit JSON output.") - args = parser.parse_args() - - plans_dir = get_plans_dir() - if not plans_dir.exists(): - raise SystemExit(f"Plans directory not found: {plans_dir}") - - query = args.query.lower() if args.query else None - items = [] - for path in sorted(plans_dir.glob("*.md")): - try: - data = parse_frontmatter(path) - except ValueError: - continue - name = data.get("name") - description = data.get("description") - if not name or not description: - continue - if query: - haystack = f"{name} {description}".lower() - if query not in haystack: - continue - items.append({"name": name, "description": description, "path": str(path)}) - - if args.json: - print(json.dumps(items)) - else: - for item in items: - print(f"{item['name']}\t{item['description']}\t{item['path']}") - - return 0 - - -if __name__ == "__main__": - raise SystemExit(main()) diff --git a/codex-rs/core/src/skills/assets/samples/plan/scripts/plan_utils.py b/codex-rs/core/src/skills/assets/samples/plan/scripts/plan_utils.py deleted file mode 100644 index 1a36a4bf2b4..00000000000 --- a/codex-rs/core/src/skills/assets/samples/plan/scripts/plan_utils.py +++ /dev/null @@ -1,53 +0,0 @@ -#!/usr/bin/env python3 -"""Shared helpers for plan scripts.""" - -from __future__ import annotations - -import os -import re -from pathlib import Path - -_NAME_RE = re.compile(r"^[a-z0-9]+(-[a-z0-9]+)*$") - - -def get_codex_home() -> Path: - """Return CODEX_HOME if set, else ~/.codex.""" - return Path(os.environ.get("CODEX_HOME", "~/.codex")).expanduser() - - -def get_plans_dir() -> Path: - return get_codex_home() / "plans" - - -def validate_plan_name(name: str) -> None: - if not name or not _NAME_RE.match(name): - raise ValueError( - "Invalid plan name. Use short, lower-case, hyphen-delimited names " - "(e.g., codex-rate-limit-overview)." - ) - - -def parse_frontmatter(path: Path) -> dict: - """Parse YAML frontmatter from a markdown file without reading the body.""" - with path.open("r", encoding="utf-8") as handle: - first = handle.readline() - if first.strip() != "---": - raise ValueError("Frontmatter must start with '---'.") - - data: dict[str, str] = {} - for line in handle: - stripped = line.strip() - if stripped == "---": - return data - if not stripped or stripped.startswith("#"): - continue - if ":" not in line: - raise ValueError(f"Invalid frontmatter line: {line.rstrip()}") - key, value = line.split(":", 1) - key = key.strip() - value = value.strip() - if value and len(value) >= 2 and value[0] == value[-1] and value[0] in ('"', "'"): - value = value[1:-1] - data[key] = value - - raise ValueError("Frontmatter must end with '---'.") diff --git a/codex-rs/core/src/skills/assets/samples/plan/scripts/read_plan_frontmatter.py b/codex-rs/core/src/skills/assets/samples/plan/scripts/read_plan_frontmatter.py deleted file mode 100644 index 1c881ee01b0..00000000000 --- a/codex-rs/core/src/skills/assets/samples/plan/scripts/read_plan_frontmatter.py +++ /dev/null @@ -1,41 +0,0 @@ -#!/usr/bin/env python3 -"""Read plan frontmatter without loading the full markdown body.""" - -from __future__ import annotations - -import argparse -import json -from pathlib import Path - -from plan_utils import parse_frontmatter - - -def main() -> int: - parser = argparse.ArgumentParser(description="Read name/description from plan frontmatter.") - parser.add_argument("plan_path", help="Path to the plan markdown file.") - parser.add_argument("--json", action="store_true", help="Emit JSON output.") - args = parser.parse_args() - - path = Path(args.plan_path).expanduser() - if not path.exists(): - raise SystemExit(f"Plan not found: {path}") - - data = parse_frontmatter(path) - name = data.get("name") - description = data.get("description") - if not name or not description: - raise SystemExit("Frontmatter must include name and description.") - - payload = {"name": name, "description": description, "path": str(path)} - if args.json: - print(json.dumps(payload)) - else: - print(f"name: {name}") - print(f"description: {description}") - print(f"path: {path}") - - return 0 - - -if __name__ == "__main__": - raise SystemExit(main()) diff --git a/codex-rs/core/src/state/service.rs b/codex-rs/core/src/state/service.rs index 722c86274be..9fe8a4f7d8c 100644 --- a/codex-rs/core/src/state/service.rs +++ b/codex-rs/core/src/state/service.rs @@ -2,12 +2,14 @@ use std::sync::Arc; use crate::AuthManager; use crate::RolloutRecorder; +use crate::exec_policy::ExecPolicyManager; use crate::mcp_connection_manager::McpConnectionManager; use crate::models_manager::manager::ModelsManager; use crate::skills::SkillsManager; use crate::tools::sandboxing::ApprovalStore; use crate::unified_exec::UnifiedExecSessionManager; use crate::user_notification::UserNotifier; +use codex_lsp::LspManager; use codex_otel::otel_manager::OtelManager; use tokio::sync::Mutex; use tokio::sync::RwLock; @@ -21,9 +23,11 @@ pub(crate) struct SessionServices { pub(crate) rollout: Mutex>, pub(crate) user_shell: Arc, pub(crate) show_raw_agent_reasoning: bool, + pub(crate) exec_policy: ExecPolicyManager, pub(crate) auth_manager: Arc, pub(crate) models_manager: Arc, pub(crate) otel_manager: OtelManager, pub(crate) tool_approvals: Mutex, pub(crate) skills_manager: Arc, + pub(crate) lsp_manager: LspManager, } diff --git a/codex-rs/core/src/tasks/mod.rs b/codex-rs/core/src/tasks/mod.rs index 8c8b9ab0554..c85e1ab8857 100644 --- a/codex-rs/core/src/tasks/mod.rs +++ b/codex-rs/core/src/tasks/mod.rs @@ -33,6 +33,7 @@ use codex_protocol::user_input::UserInput; pub(crate) use compact::CompactTask; pub(crate) use ghost_snapshot::GhostSnapshotTask; pub(crate) use plan::PlanTask; +pub(crate) use plan::constrain_features_for_planning; pub(crate) use regular::RegularTask; pub(crate) use review::ReviewTask; pub(crate) use undo::UndoTask; @@ -237,8 +238,12 @@ impl Session { .abort(session_ctx, Arc::clone(&task.turn_context)) .await; + self.cancel_active_subagent_tool_calls(task.turn_context.as_ref()) + .await; + let event = EventMsg::TurnAborted(TurnAbortedEvent { reason }); self.send_event(task.turn_context.as_ref(), event).await; + self.flush_rollout().await; } } diff --git a/codex-rs/core/src/tasks/plan.rs b/codex-rs/core/src/tasks/plan.rs index 032dbf76e5b..cb883e4ec3c 100644 --- a/codex-rs/core/src/tasks/plan.rs +++ b/codex-rs/core/src/tasks/plan.rs @@ -43,16 +43,25 @@ You are planning only. Do not call `apply_patch` or execute mutating commands. Output quality bar: - The plan must be actionable by another engineer without extra back-and-forth. -- Prefer 8-16 steps. Each step should describe a concrete deliverable and, when helpful, name key files/components to touch. +- Scale the number of steps to the task. Avoid filler (small: 4-8; typical: 8-12; complex: 12-16). +- Each step should describe a concrete deliverable and, when helpful, name key files/components to touch. - Put detailed substeps, rationale, trade-offs, risks, and validation commands in `plan.explanation` (multi-paragraph is fine). - `plan.explanation` MUST be a practical runbook. Use clear section headings. Include ALL of: - Assumptions - Scope (in-scope + non-goals) - Touchpoints (files/modules/components to change, with what/why) - Approach (sequence notes; include a short "discovery checklist" of 2-6 read-only commands/files if the task is ambiguous) + - Optional: Web search (only if available; keep it minimal and tolerate failures) - Risks (failure modes + mitigations + rollback) - Acceptance criteria (observable outcomes; 3-8 bullets) - Validation (exact commands, and where to run them) + - Open questions (optional; write "None." if none) + - If the task involves UI (web, mobile, TUI), add an explicit "UI quality bar" subsection under Approach or Acceptance criteria that covers: + - Visual/UX goals (what should feel better, not just what changes) + - Design system plan (tokens, spacing, typography, component reuse) + - Responsiveness (breakpoints, layout constraints) and accessibility (keyboard, contrast, screen readers where applicable) + - Interaction polish (loading/empty/error states, animations/micro-interactions only if appropriate) + - Verification: how to manually validate the UX (exact steps) plus any automated UI tests to run Mini-example (illustrative; do not copy verbatim): - Step: "Add `--dry-run` flag to CLI" @@ -61,6 +70,9 @@ Mini-example (illustrative; do not copy verbatim): - Validation: "`cd mytool; cargo test -p mytool-cli`" Process: +- At the beginning of /plan, get repo grounding via `plan_explore` (unless the user provided explicit touchpoints/paths and you truly don't need discovery). +- If you learn new constraints (e.g. from clarification answers) that materially change touchpoints, you may call `plan_explore` again. +- Do not do repo exploration directly in the plan-mode main context unless touchpoints are already known or `plan_explore` fails. - Once you understand the goal, call `propose_plan_variants` to generate 3 alternative plans (at most once per draft). - Synthesize the final plan (do not just pick a variant verbatim). - Present the final plan via `approve_plan`. @@ -74,23 +86,33 @@ const PLAN_MODE_DEVELOPER_PREFIX: &str = r#"## Plan Mode (Slash Command) Goal: produce a clear, actionable implementation plan for the user's request without making code changes. Rules: -- You may explore the repo with read-only commands, but keep it minimal (2-6 targeted commands) and avoid dumping large files. +- Prefer `plan_explore` for repo grounding: do not run repo exploration commands directly unless touchpoints are already known (e.g. user specified files) or `plan_explore` fails. +- If you must explore directly, keep it minimal (2-6 targeted commands) and avoid dumping large files. +- If the `web_search` tool is available, you may use it sparingly for up-to-date or niche details; prefer repo-local sources and tolerate tool failures. - Do not attempt to edit files or run mutating commands (no installs, no git writes, no redirects/heredocs that write files). -- You may ask clarifying questions via AskUserQuestion when requirements are ambiguous or missing. -- Do not call `spawn_subagent` in plan mode (it is not available from this session type). +- When the goal is ambiguous in a way that would change the plan materially, ask clarifying questions via AskUserQuestion instead of guessing. Batch questions and avoid prolonged back-and-forth. +- Discovery order: if touchpoints are not already known, call `plan_explore` early (usually as your first action). Ask clarification questions after reading the reports. Re-run `plan_explore` if clarification answers change the likely touchpoints. - Use `propose_plan_variants` to generate 3 alternative plans as input (at most once per plan draft). If it fails, proceed without it. - When you have a final plan, call `approve_plan` with: - Title: short and specific. - Summary: 2-4 sentences with key approach + scope boundaries. - Steps: concise, ordered, and checkable. - - Explanation: use the required section headings (Assumptions; Scope; Touchpoints; Approach; Risks; Acceptance criteria; Validation) and make it a junior-executable runbook. + - Steps status: set every step status to `"pending"` (this is an approved plan, not execution progress). + - Explanation: use the required section headings (Assumptions; Scope; Touchpoints; Approach; Risks; Acceptance criteria; Validation; Open questions) and make it a junior-executable runbook. For headings that don't apply, write "None.". - If the user requests revisions, incorporate feedback and propose a revised plan (you may call `propose_plan_variants` again only if the plan materially changes or the user asks for alternatives). - If the user rejects, stop. When the plan is approved, your final assistant message MUST be ONLY valid JSON matching: { "title": string, "summary": string, "plan": { "explanation": string|null, "plan": [ { "step": string, "status": "pending"|"in_progress"|"completed" } ] } } +Do not wrap the JSON in markdown code fences. "#; +pub(crate) fn constrain_features_for_planning(features: &mut crate::features::Features) { + features + .disable(crate::features::Feature::ApplyPatchFreeform) + .disable(crate::features::Feature::ViewImageTool); +} + fn build_plan_mode_developer_instructions(existing: &str, ask: &str) -> String { let mut developer_instructions = String::new(); developer_instructions.push_str(PLAN_MODE_DEVELOPER_PREFIX); @@ -179,16 +201,20 @@ async fn start_plan_conversation( .developer_instructions .clone() .unwrap_or_default(); - sub_agent_config.developer_instructions = Some(build_plan_mode_developer_instructions( - existing.as_str(), - ask.as_str(), - )); + // Keep plan-mode guidance at the top; append the global clarification policy after it (as part + // of the inherited developer instructions) to avoid it taking precedence over plan-mode's + // discovery order. + let existing = + crate::tools::spec::prepend_clarification_policy_developer_instructions(Some(existing)) + .unwrap_or_default(); + let existing = + crate::tools::spec::prepend_lsp_navigation_developer_instructions(Some(existing)) + .unwrap_or_default(); + let developer_instructions = + build_plan_mode_developer_instructions(existing.as_str(), ask.as_str()); + sub_agent_config.developer_instructions = Some(developer_instructions); - sub_agent_config - .features - .disable(crate::features::Feature::ApplyPatchFreeform) - .disable(crate::features::Feature::WebSearchRequest) - .disable(crate::features::Feature::ViewImageTool); + constrain_features_for_planning(&mut sub_agent_config.features); sub_agent_config.approval_policy = crate::config::Constrained::allow_any(codex_protocol::protocol::AskForApproval::Never); @@ -352,9 +378,10 @@ mod tests { #[cfg(target_os = "linux")] { use assert_cmd::cargo::cargo_bin; - let mut overrides = crate::config::ConfigOverrides::default(); - overrides.codex_linux_sandbox_exe = Some(cargo_bin("codex-linux-sandbox")); - overrides + crate::config::ConfigOverrides { + codex_linux_sandbox_exe: Some(cargo_bin("codex-linux-sandbox")), + ..Default::default() + } } #[cfg(not(target_os = "linux"))] { @@ -376,17 +403,25 @@ mod tests { let existing_base = cfg.base_instructions.clone(); let existing = cfg.developer_instructions.clone().unwrap_or_default(); - cfg.developer_instructions = Some(build_plan_mode_developer_instructions( - existing.as_str(), - ask.as_str(), - )); + let developer_instructions = + build_plan_mode_developer_instructions(existing.as_str(), ask.as_str()); + cfg.developer_instructions = + crate::tools::spec::prepend_clarification_policy_developer_instructions(Some( + developer_instructions, + )); assert_eq!(cfg.base_instructions, existing_base); assert!( cfg.developer_instructions .as_deref() .unwrap_or_default() - .starts_with("## Plan Mode") + .contains("## Plan Mode") + ); + assert!( + cfg.developer_instructions + .as_deref() + .unwrap_or_default() + .contains("## Clarification Policy") ); assert!( cfg.developer_instructions @@ -420,6 +455,21 @@ mod tests { )); } + #[test] + fn planning_constraints_keep_web_search_if_enabled() { + let mut features = crate::features::Features::with_defaults(); + features + .enable(crate::features::Feature::ApplyPatchFreeform) + .enable(crate::features::Feature::ViewImageTool) + .enable(crate::features::Feature::WebSearchRequest); + + constrain_features_for_planning(&mut features); + + assert!(!features.enabled(crate::features::Feature::ApplyPatchFreeform)); + assert!(!features.enabled(crate::features::Feature::ViewImageTool)); + assert!(features.enabled(crate::features::Feature::WebSearchRequest)); + } + #[tokio::test] async fn persist_approved_plan_writes_plan_markdown() -> anyhow::Result<()> { let temp = TempDir::new().expect("tmp dir"); diff --git a/codex-rs/core/src/tools/handlers/apply_patch.rs b/codex-rs/core/src/tools/handlers/apply_patch.rs index 14a481f4eae..fc86ee7e18a 100644 --- a/codex-rs/core/src/tools/handlers/apply_patch.rs +++ b/codex-rs/core/src/tools/handlers/apply_patch.rs @@ -10,7 +10,9 @@ use crate::client_common::tools::ResponsesApiTool; use crate::client_common::tools::ToolSpec; use crate::codex::Session; use crate::codex::TurnContext; +use crate::features::Feature; use crate::function_tool::FunctionCallError; +use crate::lsp_prompt::render_diagnostics_summary; use crate::tools::context::SharedTurnDiffTracker; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolOutput; @@ -26,6 +28,9 @@ use crate::tools::sandboxing::ToolCtx; use crate::tools::spec::ApplyPatchToolArgs; use crate::tools::spec::JsonSchema; use async_trait::async_trait; +use codex_apply_patch::ApplyPatchAction; +use codex_lsp::Diagnostic; +use codex_lsp::DiagnosticSeverity; pub struct ApplyPatchHandler; @@ -131,6 +136,13 @@ impl ToolHandler for ApplyPatchHandler { Some(&tracker), ); let content = emitter.finish(event_ctx, out).await?; + let content = append_post_apply_patch_diagnostics( + &content, + session.as_ref(), + turn.as_ref(), + &apply.action, + ) + .await; Ok(ToolOutput::Function { content, content_items: None, @@ -218,6 +230,9 @@ pub(crate) async fn intercept_apply_patch( let event_ctx = ToolEventCtx::new(session, turn, call_id, tracker.as_ref().copied()); let content = emitter.finish(event_ctx, out).await?; + let content = + append_post_apply_patch_diagnostics(&content, session, turn, &apply.action) + .await; Ok(Some(ToolOutput::Function { content, content_items: None, @@ -342,3 +357,78 @@ It is important to remember: }, }) } + +async fn append_post_apply_patch_diagnostics( + content: &str, + session: &Session, + turn: &TurnContext, + action: &ApplyPatchAction, +) -> String { + if !session.enabled(Feature::Lsp) || action.is_empty() { + return content.to_string(); + } + + let cwd = turn.cwd.clone(); + let mut changed_paths: Vec<_> = action + .changes() + .keys() + .filter(|path| path.starts_with(&cwd)) + .cloned() + .collect(); + // Avoid doing too much LSP work for very large patches. + changed_paths.truncate(10); + if changed_paths.is_empty() { + return content.to_string(); + } + + let mut existing_paths = Vec::with_capacity(changed_paths.len()); + for path in changed_paths { + if tokio::fs::metadata(&path).await.is_ok() { + existing_paths.push(path); + } + } + if existing_paths.is_empty() { + return content.to_string(); + } + + let lsp = session.services.lsp_manager.clone(); + // Best-effort: "touch" changed files so servers see them and can publish diagnostics. + for path in &existing_paths { + let _ = lsp.document_symbols(&cwd, path).await; + } + + let max_per_file = 50usize; + let mut diags = Vec::::new(); + // LSP servers often publish diagnostics asynchronously, sometimes a few seconds after open/change. + // Bound the wait so apply_patch doesn't hang indefinitely. + for _ in 0..10 { + diags.clear(); + for path in &existing_paths { + if let Ok(mut file_diags) = lsp + .diagnostics(&cwd, Some(path.as_path()), max_per_file) + .await + { + diags.append(&mut file_diags); + } + } + + diags.retain(|d| { + matches!( + d.severity, + Some(DiagnosticSeverity::Error | DiagnosticSeverity::Warning) + ) + }); + if !diags.is_empty() { + break; + } + + tokio::time::sleep(std::time::Duration::from_millis(500)).await; + } + + if diags.is_empty() { + return content.to_string(); + } + + let summary = render_diagnostics_summary(&diags); + format!("{content}\n\n{summary}") +} diff --git a/codex-rs/core/src/tools/handlers/lsp.rs b/codex-rs/core/src/tools/handlers/lsp.rs new file mode 100644 index 00000000000..77bddf59bea --- /dev/null +++ b/codex-rs/core/src/tools/handlers/lsp.rs @@ -0,0 +1,216 @@ +use std::path::Path; +use std::path::PathBuf; +use std::time::Duration; + +use async_trait::async_trait; +use serde::Deserialize; + +use crate::features::Feature; +use crate::function_tool::FunctionCallError; +use crate::tools::context::ToolInvocation; +use crate::tools::context::ToolOutput; +use crate::tools::context::ToolPayload; +use crate::tools::registry::ToolHandler; +use crate::tools::registry::ToolKind; + +pub struct LspHandler; + +#[derive(Deserialize)] +struct LspDiagnosticsArgs { + #[serde(default)] + root: Option, + #[serde(default)] + path: Option, + #[serde(default)] + max_results: Option, +} + +#[derive(Deserialize)] +struct LspPositionArgs { + file_path: String, + line: u32, + character: u32, + #[serde(default)] + root: Option, +} + +#[derive(Deserialize)] +struct LspReferencesArgs { + file_path: String, + line: u32, + character: u32, + #[serde(default)] + include_declaration: bool, + #[serde(default)] + root: Option, +} + +#[derive(Deserialize)] +struct LspDocumentSymbolsArgs { + file_path: String, + #[serde(default)] + root: Option, +} + +#[async_trait] +impl ToolHandler for LspHandler { + fn kind(&self) -> ToolKind { + ToolKind::Function + } + + async fn handle(&self, invocation: ToolInvocation) -> Result { + let ToolInvocation { + payload, + session, + turn, + tool_name, + .. + } = invocation; + + if !session.enabled(Feature::Lsp) { + return Err(FunctionCallError::RespondToModel( + "LSP tools are disabled. Enable `[features].lsp = true` in config.toml." + .to_string(), + )); + } + + let arguments = match payload { + ToolPayload::Function { arguments } => arguments, + _ => { + return Err(FunctionCallError::RespondToModel( + "lsp tool handler received unsupported payload".to_string(), + )); + } + }; + + let lsp = session.services.lsp_manager.clone(); + let max_default = turn.client.config().lsp.max_tool_diagnostics; + let wait_ms = turn.client.config().lsp.tool_diagnostics_wait_ms; + + match tool_name.as_str() { + "lsp_diagnostics" => { + let args: LspDiagnosticsArgs = serde_json::from_str(&arguments).map_err(|err| { + FunctionCallError::RespondToModel(format!( + "failed to parse function arguments: {err:?}", + )) + })?; + + let root = turn.resolve_path(args.root); + let path = args + .path + .as_deref() + .map(|p| resolve_path_under_root(&root, p)); + let max_results = args.max_results.unwrap_or(max_default); + + let diags = lsp + .diagnostics_wait( + &root, + path.as_deref(), + max_results, + Duration::from_millis(wait_ms as u64), + ) + .await + .map_err(|err| FunctionCallError::RespondToModel(format!("{err:#}")))?; + + let out = serde_json::json!({ + "root": root, + "diagnostics": diags, + }); + Ok(ToolOutput::Function { + content: serde_json::to_string_pretty(&out).unwrap_or_else(|_| out.to_string()), + content_items: None, + success: Some(true), + }) + } + "lsp_definition" => { + let args: LspPositionArgs = serde_json::from_str(&arguments).map_err(|err| { + FunctionCallError::RespondToModel(format!( + "failed to parse function arguments: {err:?}", + )) + })?; + let root = turn.resolve_path(args.root); + let path = resolve_path_under_root(&root, &args.file_path); + let locations = lsp + .definition( + &root, + &path, + codex_lsp::Position { + line: args.line, + character: args.character, + }, + ) + .await + .map_err(|err| FunctionCallError::RespondToModel(format!("{err:#}")))?; + + let out = serde_json::json!({ "locations": locations }); + Ok(ToolOutput::Function { + content: serde_json::to_string_pretty(&out).unwrap_or_else(|_| out.to_string()), + content_items: None, + success: Some(true), + // It's useful to return `success=false` if we got no locations. + }) + } + "lsp_references" => { + let args: LspReferencesArgs = serde_json::from_str(&arguments).map_err(|err| { + FunctionCallError::RespondToModel(format!( + "failed to parse function arguments: {err:?}", + )) + })?; + let root = turn.resolve_path(args.root); + let path = resolve_path_under_root(&root, &args.file_path); + let locations = lsp + .references( + &root, + &path, + codex_lsp::Position { + line: args.line, + character: args.character, + }, + args.include_declaration, + ) + .await + .map_err(|err| FunctionCallError::RespondToModel(format!("{err:#}")))?; + + let out = serde_json::json!({ "locations": locations }); + Ok(ToolOutput::Function { + content: serde_json::to_string_pretty(&out).unwrap_or_else(|_| out.to_string()), + content_items: None, + success: Some(true), + }) + } + "lsp_document_symbols" => { + let args: LspDocumentSymbolsArgs = + serde_json::from_str(&arguments).map_err(|err| { + FunctionCallError::RespondToModel(format!( + "failed to parse function arguments: {err:?}", + )) + })?; + let root = turn.resolve_path(args.root); + let path = resolve_path_under_root(&root, &args.file_path); + let symbols = lsp + .document_symbols(&root, &path) + .await + .map_err(|err| FunctionCallError::RespondToModel(format!("{err:#}")))?; + + let out = serde_json::json!({ "symbols": symbols }); + Ok(ToolOutput::Function { + content: serde_json::to_string_pretty(&out).unwrap_or_else(|_| out.to_string()), + content_items: None, + success: Some(true), + }) + } + _ => Err(FunctionCallError::RespondToModel(format!( + "unknown lsp tool: {tool_name}", + ))), + } + } +} + +fn resolve_path_under_root(root: &Path, input: &str) -> PathBuf { + let path = PathBuf::from(input); + if path.is_absolute() { + path + } else { + root.join(path) + } +} diff --git a/codex-rs/core/src/tools/handlers/mod.rs b/codex-rs/core/src/tools/handlers/mod.rs index 8e6e61fdf01..786f752ad9c 100644 --- a/codex-rs/core/src/tools/handlers/mod.rs +++ b/codex-rs/core/src/tools/handlers/mod.rs @@ -2,13 +2,16 @@ pub mod apply_patch; mod ask_user_question; mod grep_files; mod list_dir; +mod lsp; mod mcp; mod mcp_resource; mod plan; mod plan_approval; +mod plan_explore; mod plan_variants; mod read_file; mod shell; +mod spawn_mini_subagent; mod spawn_subagent; mod test_sync; mod unified_exec; @@ -21,16 +24,23 @@ pub(crate) use ask_user_question::ASK_USER_QUESTION_TOOL_NAME; pub use ask_user_question::AskUserQuestionHandler; pub use grep_files::GrepFilesHandler; pub use list_dir::ListDirHandler; +pub use lsp::LspHandler; pub use mcp::McpHandler; pub use mcp_resource::McpResourceHandler; pub use plan::PlanHandler; pub(crate) use plan_approval::APPROVE_PLAN_TOOL_NAME; pub use plan_approval::PlanApprovalHandler; +pub(crate) use plan_explore::PLAN_EXPLORE_TOOL_NAME; +pub use plan_explore::PlanExploreHandler; pub(crate) use plan_variants::PROPOSE_PLAN_VARIANTS_TOOL_NAME; pub use plan_variants::PlanVariantsHandler; pub use read_file::ReadFileHandler; pub use shell::ShellCommandHandler; pub use shell::ShellHandler; +pub(crate) use spawn_mini_subagent::DEFAULT_MINI_SUBAGENT_MODEL_SLUG; +pub(crate) use spawn_mini_subagent::SPAWN_MINI_SUBAGENT_LABEL_PREFIX; +pub(crate) use spawn_mini_subagent::SPAWN_MINI_SUBAGENT_TOOL_NAME; +pub use spawn_mini_subagent::SpawnMiniSubagentHandler; pub(crate) use spawn_subagent::SPAWN_SUBAGENT_LABEL_PREFIX; pub(crate) use spawn_subagent::SPAWN_SUBAGENT_TOOL_NAME; pub use spawn_subagent::SpawnSubagentHandler; diff --git a/codex-rs/core/src/tools/handlers/plan_explore.rs b/codex-rs/core/src/tools/handlers/plan_explore.rs new file mode 100644 index 00000000000..4c13c127eab --- /dev/null +++ b/codex-rs/core/src/tools/handlers/plan_explore.rs @@ -0,0 +1,282 @@ +use async_trait::async_trait; +use codex_protocol::protocol::SessionSource; +use codex_protocol::protocol::SubAgentInvocation; +use codex_protocol::protocol::SubAgentSource; +use codex_protocol::user_input::UserInput; +use serde::Deserialize; +use serde_json::json; +use std::sync::Arc; +use std::time::Instant; +use tokio::task::JoinSet; + +use crate::config::Config; +use crate::function_tool::FunctionCallError; +use crate::tools::context::ToolInvocation; +use crate::tools::context::ToolOutput; +use crate::tools::context::ToolPayload; +use crate::tools::handlers::DEFAULT_MINI_SUBAGENT_MODEL_SLUG; +use crate::tools::registry::ToolHandler; +use crate::tools::registry::ToolKind; + +pub(crate) const PLAN_EXPLORE_TOOL_NAME: &str = "plan_explore"; + +pub struct PlanExploreHandler; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +struct PlanExploreArgs { + goal: String, + explorers: Option>, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +struct ExplorerArgs { + focus: String, + description: Option, + prompt: Option, +} + +const DEFAULT_TOTAL_EXPLORERS: usize = 3; + +const PLAN_EXPLORE_PROMPT: &str = r#"You are a read-only exploration subagent helping another agent write an implementation plan. + +Hard rules: +- Do not ask the user questions. +- Do not propose or perform edits. Do not call apply_patch. +- Do not call propose_plan_variants. +- Do not call spawn_subagent. +- You may explore the repo with read-only commands, but keep it minimal (2-6 targeted commands) and avoid dumping large files. + +Output requirements: +- Return a concise plain-text report (aim for ~10-25 lines). +- Include a short "Key paths" list of the most relevant files/modules. +- Include a short "Entry points / call chain" if applicable. +- Include any gotchas (tests, platform constraints, config knobs) that affect the plan."#; + +fn explorer_focus(idx: usize) -> &'static str { + match idx { + 1 => "Repo map + likely touchpoints (where changes would land).", + 2 => "Entry points + control flow (where the behavior is wired).", + 3 => "Validation + risks (tests/commands, edge cases, rollback notes).", + _ => "General exploration.", + } +} + +async fn run_one_explorer( + call_id: String, + invocation: SubAgentInvocation, + base_config: Config, + idx: usize, + focus: String, + parent_session: Arc, + parent_ctx: Arc, +) -> String { + let mut cfg = base_config; + let mut invocation = invocation; + + // Keep this prompt focused and small; avoid inheriting large caller developer instructions. + let prompt = format!("{PLAN_EXPLORE_PROMPT}\n\nFocus:\n- {focus}\n"); + cfg.developer_instructions = + crate::tools::spec::prepend_lsp_navigation_developer_instructions(Some(prompt)); + + cfg.model = Some( + parent_ctx + .mini_subagent_model + .clone() + .unwrap_or_else(|| DEFAULT_MINI_SUBAGENT_MODEL_SLUG.to_string()), + ); + invocation.model = cfg.model.clone(); + cfg.model_reasoning_effort = parent_ctx + .mini_subagent_reasoning_effort + .or(Some(codex_protocol::openai_models::ReasoningEffort::Medium)); + cfg.model_reasoning_summary = parent_ctx.client.get_reasoning_summary(); + + let mut features = cfg.features.clone(); + crate::tasks::constrain_features_for_planning(&mut features); + cfg.features = features; + + cfg.approval_policy = + crate::config::Constrained::allow_any(codex_protocol::protocol::AskForApproval::Never); + cfg.sandbox_policy = + crate::config::Constrained::allow_any(codex_protocol::protocol::SandboxPolicy::ReadOnly); + + let input = vec![UserInput::Text { + text: invocation.prompt.clone(), + }]; + + match crate::tools::subagent_runner::run_subagent_tool_call( + parent_session, + parent_ctx, + call_id, + invocation, + cfg, + input, + SubAgentSource::Other(format!("plan_explore_{idx}")), + ) + .await + { + Ok(response) => response, + Err(err) => err, + } +} + +#[async_trait] +impl ToolHandler for PlanExploreHandler { + fn kind(&self) -> ToolKind { + ToolKind::Function + } + + async fn handle(&self, invocation: ToolInvocation) -> Result { + let ToolInvocation { + session, + turn, + call_id, + payload, + tool_name, + .. + } = invocation; + + let ToolPayload::Function { arguments } = payload else { + return Err(FunctionCallError::RespondToModel(format!( + "unsupported payload for {tool_name}" + ))); + }; + + let source = turn.client.get_session_source(); + let is_plan_mode = matches!( + source, + SessionSource::SubAgent(SubAgentSource::Other(label)) if label == "plan_mode" + ); + if !is_plan_mode { + return Err(FunctionCallError::RespondToModel( + "plan_explore is only available in /plan mode".to_string(), + )); + } + + let args: PlanExploreArgs = serde_json::from_str(&arguments).map_err(|e| { + FunctionCallError::RespondToModel(format!("failed to parse function arguments: {e:?}")) + })?; + + let goal = args.goal.trim(); + if goal.is_empty() { + return Err(FunctionCallError::RespondToModel( + "goal must be non-empty".to_string(), + )); + } + + let base_config = turn.client.config().as_ref().clone(); + let goal = goal.to_string(); + + let explorers = match args.explorers { + Some(explorers) => { + if explorers.is_empty() { + return Err(FunctionCallError::RespondToModel( + "explorers must be non-empty when provided".to_string(), + )); + } + explorers + } + None => (1..=DEFAULT_TOTAL_EXPLORERS) + .map(|idx| ExplorerArgs { + focus: explorer_focus(idx).to_string(), + description: None, + prompt: None, + }) + .collect(), + }; + + let total_explorers = explorers.len(); + + let started_at = Instant::now(); + let mut join_set = JoinSet::new(); + for (zero_idx, explorer) in explorers.into_iter().enumerate() { + let idx = zero_idx + 1; + let focus = explorer.focus.trim().to_string(); + if focus.is_empty() { + return Err(FunctionCallError::RespondToModel(format!( + "explorers[{zero_idx}] focus must be non-empty" + ))); + } + + let description = explorer.description.map(|d| d.trim().to_string()); + let description = if description.as_deref().is_some_and(str::is_empty) { + None + } else { + description + }; + let description = + description.unwrap_or_else(|| format!("{focus} ({idx}/{total_explorers})")); + + let prompt = explorer.prompt.map(|p| p.trim().to_string()); + let prompt = if prompt.as_deref().is_some_and(str::is_empty) { + None + } else { + prompt + }; + let prompt = prompt.unwrap_or_else(|| format!("Goal: {goal}\n\nFocus: {focus}")); + + let explorer_call_id = format!("{call_id}:plan_explore:{idx}"); + let explorer_invocation = SubAgentInvocation { + description, + label: format!("plan_explore_{idx}"), + prompt, + model: None, + }; + let base_config = base_config.clone(); + let session = Arc::clone(&session); + let turn = Arc::clone(&turn); + let focus = focus.clone(); + join_set.spawn(async move { + let out = run_one_explorer( + explorer_call_id, + explorer_invocation, + base_config, + idx, + focus.clone(), + session, + turn, + ) + .await; + (idx, focus, out) + }); + } + + let mut reports_by_idx = vec![String::new(); total_explorers]; + let mut focus_by_idx = vec![String::new(); total_explorers]; + while let Some(result) = join_set.join_next().await { + match result { + Ok((idx, focus, out)) => { + if idx > 0 && idx <= total_explorers { + reports_by_idx[idx - 1] = out; + focus_by_idx[idx - 1] = focus; + } + } + Err(err) => { + return Err(FunctionCallError::RespondToModel(format!( + "failed to join plan exploration subagent: {err:?}" + ))); + } + } + } + + Ok(ToolOutput::Function { + content: json!({ + "duration_ms": started_at.elapsed().as_millis(), + "reports": reports_by_idx + .into_iter() + .zip(focus_by_idx) + .enumerate() + .map(|(i, (text, focus))| json!({ + "idx": i + 1, + "focus": focus, + "text": text, + })) + .collect::>(), + }) + .to_string(), + content_items: None, + success: Some(true), + }) + } +} diff --git a/codex-rs/core/src/tools/handlers/plan_variants.rs b/codex-rs/core/src/tools/handlers/plan_variants.rs index 650d9b88350..efc0b5e5037 100644 --- a/codex-rs/core/src/tools/handlers/plan_variants.rs +++ b/codex-rs/core/src/tools/handlers/plan_variants.rs @@ -1,22 +1,16 @@ use async_trait::async_trait; use codex_protocol::plan_mode::PlanOutputEvent; use codex_protocol::plan_tool::UpdatePlanArgs; -use codex_protocol::protocol::Event; -use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::SessionSource; +use codex_protocol::protocol::SubAgentInvocation; use codex_protocol::protocol::SubAgentSource; use codex_protocol::user_input::UserInput; use serde::Deserialize; use serde_json::json; use std::sync::Arc; -use std::time::Duration; -use std::time::Instant; use tokio::task::JoinSet; -use tokio_util::sync::CancellationToken; -use crate::codex_delegate::run_codex_conversation_one_shot; use crate::config::Config; -use crate::features::Feature; use crate::function_tool::FunctionCallError; use crate::tools::context::ToolInvocation; use crate::tools::context::ToolOutput; @@ -41,12 +35,15 @@ Hard rules: - Do not propose or perform edits. Do not call apply_patch. - Do not call propose_plan_variants. - You may explore the repo with read-only commands, but keep it minimal (2-6 targeted commands) and avoid dumping large files. +- If the `web_search` tool is available, you may use it sparingly for up-to-date or niche details; prefer repo-local sources and tolerate tool failures. - Output ONLY valid JSON matching this shape: { "title": string, "summary": string, "plan": { "explanation": string|null, "plan": [ { "step": string, "status": "pending"|"in_progress"|"completed" } ] } } + Do not wrap the JSON in markdown code fences. Quality bar: -- Prefer 8-16 steps that are checkable and ordered. -- `plan.explanation` MUST be a practical runbook with clear section headings. Include ALL of: +- Scale the number of steps to the task. Avoid filler (small: 4-8; typical: 8-12; complex: 12-16). +- In `summary`, state when to choose this variant and the biggest trade-off. +- `plan.explanation` MUST be a practical runbook with clear section headings. Keep it concise and focus on what makes this variant distinct. Include ALL of: - Assumptions - Scope (in-scope + non-goals) - Touchpoints (files/modules/components to change, with what/why) @@ -54,6 +51,7 @@ Quality bar: - Risks (failure modes + mitigations + rollback) - Acceptance criteria (observable outcomes; 3-8 bullets) - Validation (exact commands, and where to run them) + - Open questions (optional; write "None." if none) - Make this variant meaningfully different from other plausible variants (trade-offs, sequencing, scope, risk posture). "#; @@ -85,19 +83,41 @@ fn variant_title(idx: usize, total: usize) -> String { fn plan_variant_focus(idx: usize) -> &'static str { match idx { 1 => { - "Variant 1 (Minimal): minimal-risk, minimal-diff path (pragmatic, incremental; avoid refactors). Title MUST be \"Minimal\"." + "Variant 1 (Minimal): minimal-risk, minimal-diff path (pragmatic, incremental; avoid refactors)." } 2 => { - "Variant 2 (Correctness): correctness-first path (tests, invariants, edge cases, careful validation/rollback). Title MUST be \"Correctness\"." + "Variant 2 (Correctness): correctness-first path (tests, invariants, edge cases, careful validation/rollback)." } 3 => { - "Variant 3 (DX): architecture/DX-first path (refactors that pay down tech debt, clearer abstractions, better ergonomics). Title MUST be \"DX\"." + "Variant 3 (DX): architecture/DX-first path (refactors that pay down tech debt, clearer abstractions, better ergonomics)." } _ => "Use a distinct angle and trade-offs.", } } +fn strip_clarification_policy(existing: &str) -> String { + const HEADER: &str = "## Clarification Policy"; + let Some(start) = existing.find(HEADER) else { + return existing.to_string(); + }; + + let after_header = &existing[start + HEADER.len()..]; + let end_rel = after_header.find("\n## ").unwrap_or(after_header.len()); + let end = start + HEADER.len() + end_rel; + + let before = existing[..start].trim_end(); + let after = existing[end..].trim_start(); + if before.is_empty() { + return after.to_string(); + } + if after.is_empty() { + return before.to_string(); + } + format!("{before}\n{after}") +} + fn build_plan_variant_developer_instructions(idx: usize, total: usize, existing: &str) -> String { + let existing = strip_clarification_policy(existing); let existing = existing.trim(); if existing.is_empty() { return format!( @@ -121,6 +141,7 @@ impl ToolHandler for PlanVariantsHandler { let ToolInvocation { session, turn, + call_id, payload, tool_name, .. @@ -157,49 +178,34 @@ impl ToolHandler for PlanVariantsHandler { let mut join_set = JoinSet::new(); for idx in 1..=TOTAL { let label = format!("plan_variant_{idx}"); + let variant_call_id = format!("{call_id}:plan_variant:{idx}"); let base_config = turn.client.config().as_ref().clone(); let goal = goal.to_string(); let session = Arc::clone(&session); let turn = Arc::clone(&turn); join_set.spawn(async move { - let started_at = Instant::now(); - - session - .notify_background_event( - turn.as_ref(), - format!("Plan variants: generating {idx}/{TOTAL}…"), - ) - .await; - - session - .notify_background_event( - turn.as_ref(), - format!("Plan variant {idx}/{TOTAL}: starting"), - ) - .await; - - let out = run_one_variant( + let invocation = SubAgentInvocation { + description: format!( + "Plan variant {idx}/{TOTAL}: {}", + variant_title(idx, TOTAL) + ), + label: format!("plan_variant_{idx}"), + prompt: format!("Goal: {goal}\n\nReturn plan variant #{idx}."), + model: None, + }; + let out = run_one_variant(PlanVariantRunArgs { + call_id: variant_call_id, + invocation, base_config, goal, idx, - TOTAL, + total: TOTAL, label, - Arc::clone(&session), - Arc::clone(&turn), - ) + parent_session: Arc::clone(&session), + parent_ctx: Arc::clone(&turn), + }) .await; - let elapsed = started_at.elapsed(); - session - .notify_background_event( - turn.as_ref(), - format!( - "Plan variants: finished {idx}/{TOTAL} ({})", - fmt_variant_duration(elapsed) - ), - ) - .await; - (idx, out) }); } @@ -243,99 +249,21 @@ impl ToolHandler for PlanVariantsHandler { } } -fn fmt_variant_duration(elapsed: Duration) -> String { - let secs = elapsed.as_secs_f64(); - if secs < 60.0 { - return format!("{secs:.1}s"); - } - - let whole_secs = elapsed.as_secs(); - let minutes = whole_secs / 60; - let seconds = whole_secs % 60; - format!("{minutes}m {seconds:02}s") -} - -fn fmt_exec_activity_command(command: &[String]) -> String { - if command.is_empty() { - return "shell".to_string(); - } - - let cmd = if let Some((_shell, script)) = crate::parse_command::extract_shell_command(command) { - let script = script.trim(); - if script.is_empty() { - "shell".to_string() - } else { - script - .lines() - .map(str::trim) - .filter(|line| !line.is_empty()) - .collect::>() - .join(" ") - } - } else { - crate::parse_command::shlex_join(command) - }; - - if cmd.is_empty() { - "shell".to_string() - } else { - cmd - } -} - -fn activity_for_event(msg: &EventMsg) -> Option { - match msg { - EventMsg::TaskStarted(_) => Some("starting".to_string()), - EventMsg::UserMessage(_) => Some("sending prompt".to_string()), - EventMsg::AgentReasoning(_) - | EventMsg::AgentReasoningDelta(_) - | EventMsg::AgentReasoningRawContent(_) - | EventMsg::AgentReasoningRawContentDelta(_) - | EventMsg::AgentReasoningSectionBreak(_) => Some("thinking".to_string()), - EventMsg::AgentMessage(_) | EventMsg::AgentMessageDelta(_) => Some("writing".to_string()), - EventMsg::ExecCommandBegin(ev) => Some(fmt_exec_activity_command(&ev.command)), - EventMsg::McpToolCallBegin(ev) => Some(format!( - "mcp {}/{}", - ev.invocation.server.trim(), - ev.invocation.tool.trim() - )), - EventMsg::WebSearchBegin(_) => Some("web_search".to_string()), - _ => None, - } -} - -fn fmt_variant_tokens(tokens: i64) -> Option { - if tokens <= 0 { - return None; - } - - let tokens_f = tokens as f64; - if tokens < 1_000 { - return Some(format!("{tokens}")); - } - if tokens < 100_000 { - return Some(format!("{:.1}k", tokens_f / 1_000.0)); - } - if tokens < 1_000_000 { - return Some(format!("{}k", tokens / 1_000)); - } - if tokens < 100_000_000 { - return Some(format!("{:.1}M", tokens_f / 1_000_000.0)); - } - - Some(format!("{}M", tokens / 1_000_000)) -} +async fn run_one_variant(args: PlanVariantRunArgs) -> PlanOutputEvent { + let PlanVariantRunArgs { + call_id, + invocation, + base_config, + goal, + idx, + total, + label, + parent_session, + parent_ctx, + } = args; -async fn run_one_variant( - base_config: Config, - goal: String, - idx: usize, - total: usize, - label: String, - parent_session: Arc, - parent_ctx: Arc, -) -> PlanOutputEvent { let mut cfg = base_config.clone(); + let mut invocation = invocation; // Do not override the base/system prompt; some environments restrict it to whitelisted prompts. // Put plan-variant guidance in developer instructions instead. @@ -353,16 +281,14 @@ async fn run_one_variant( .clone() .unwrap_or_else(|| parent_ctx.client.get_model()), ); + invocation.model = cfg.model.clone(); cfg.model_reasoning_effort = parent_ctx .plan_reasoning_effort .or(parent_ctx.client.get_reasoning_effort()); cfg.model_reasoning_summary = parent_ctx.client.get_reasoning_summary(); let mut features = cfg.features.clone(); - features - .disable(Feature::ApplyPatchFreeform) - .disable(Feature::WebSearchRequest) - .disable(Feature::ViewImageTool); + crate::tasks::constrain_features_for_planning(&mut features); cfg.features = features; cfg.approval_policy = crate::config::Constrained::allow_any(codex_protocol::protocol::AskForApproval::Never); @@ -373,22 +299,18 @@ async fn run_one_variant( text: format!("Goal: {goal}\n\nReturn plan variant #{idx}."), }]; - let cancel = CancellationToken::new(); - let session_for_events = Arc::clone(&parent_session); - let io = match run_codex_conversation_one_shot( - cfg, - Arc::clone(&parent_session.services.auth_manager), - Arc::clone(&parent_session.services.models_manager), - input, + let response = match crate::tools::subagent_runner::run_subagent_tool_call( parent_session, Arc::clone(&parent_ctx), - cancel, - None, + call_id, + invocation, + cfg, + input, SubAgentSource::Other(label), ) .await { - Ok(io) => io, + Ok(response) => response, Err(err) => { return PlanOutputEvent { title: variant_title(idx, total), @@ -401,61 +323,19 @@ async fn run_one_variant( } }; - let mut last_agent_message: Option = None; - let mut last_activity: Option = None; - let mut last_reported_tokens: Option = None; - let mut last_token_update_at: Option = None; - while let Ok(Event { msg, .. }) = io.rx_event.recv().await { - if let EventMsg::TokenCount(ev) = &msg - && let Some(info) = &ev.info - { - let tokens = info.total_token_usage.blended_total(); - let now = Instant::now(); - let should_report = match (last_reported_tokens, last_token_update_at) { - (Some(prev), Some(prev_at)) => { - tokens > prev - && (tokens - prev >= 250 || now.duration_since(prev_at).as_secs() >= 2) - } - (Some(prev), None) => tokens > prev, - (None, _) => tokens > 0, - }; - - if should_report && let Some(formatted) = fmt_variant_tokens(tokens) { - session_for_events - .notify_background_event( - parent_ctx.as_ref(), - format!("Plan variant {idx}/{total}: tokens {formatted}"), - ) - .await; - last_reported_tokens = Some(tokens); - last_token_update_at = Some(now); - } - } - - if let Some(activity) = activity_for_event(&msg) - && last_activity.as_deref() != Some(activity.as_str()) - { - session_for_events - .notify_background_event( - parent_ctx.as_ref(), - format!("Plan variant {idx}/{total}: {activity}"), - ) - .await; - last_activity = Some(activity); - } - - match msg { - EventMsg::TaskComplete(ev) => { - last_agent_message = ev.last_agent_message; - break; - } - EventMsg::TurnAborted(_) => break, - _ => {} - } - } + parse_plan_output_event(idx, total, response.as_str()) +} - let text = last_agent_message.unwrap_or_default(); - parse_plan_output_event(idx, total, text.as_str()) +struct PlanVariantRunArgs { + call_id: String, + invocation: SubAgentInvocation, + base_config: Config, + goal: String, + idx: usize, + total: usize, + label: String, + parent_session: Arc, + parent_ctx: Arc, } fn parse_plan_output_event(idx: usize, total: usize, text: &str) -> PlanOutputEvent { @@ -485,32 +365,6 @@ fn parse_plan_output_event(idx: usize, total: usize, text: &str) -> PlanOutputEv mod tests { use super::*; - #[test] - fn exec_activity_command_strips_powershell_wrapper() { - let shell = if cfg!(windows) { - "C:\\windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe" - } else { - "/usr/local/bin/powershell.exe" - }; - let cmd = vec![ - shell.to_string(), - "-NoProfile".to_string(), - "-Command".to_string(), - "rg --version".to_string(), - ]; - assert_eq!(fmt_exec_activity_command(&cmd), "rg --version"); - } - - #[test] - fn exec_activity_command_strips_bash_lc_wrapper() { - let cmd = vec![ - "bash".to_string(), - "-lc".to_string(), - "rg --version".to_string(), - ]; - assert_eq!(fmt_exec_activity_command(&cmd), "rg --version"); - } - #[test] fn plan_variant_titles_are_stable() { assert_eq!(variant_title(1, 3), "Minimal"); @@ -520,6 +374,16 @@ mod tests { assert_eq!(variant_title(1, 2), "Variant 1/2"); } + #[test] + fn plan_variants_strip_clarification_policy_from_existing_instructions() { + let existing = + "## Clarification Policy\n- Ask questions\n\n## Something Else\nKeep this.\n"; + let out = build_plan_variant_developer_instructions(1, 3, existing); + assert!(!out.contains("## Clarification Policy")); + assert!(out.contains("## Something Else")); + assert!(out.contains("Keep this.")); + } + #[test] fn plan_variant_output_titles_are_normalized() { let ev = parse_plan_output_event( @@ -537,9 +401,10 @@ mod tests { #[cfg(target_os = "linux")] { use assert_cmd::cargo::cargo_bin; - let mut overrides = crate::config::ConfigOverrides::default(); - overrides.codex_linux_sandbox_exe = Some(cargo_bin("codex-linux-sandbox")); - overrides + crate::config::ConfigOverrides { + codex_linux_sandbox_exe: Some(cargo_bin("codex-linux-sandbox")), + ..Default::default() + } } #[cfg(not(target_os = "linux"))] { diff --git a/codex-rs/core/src/tools/handlers/shell.rs b/codex-rs/core/src/tools/handlers/shell.rs index 624094a5adc..72036177116 100644 --- a/codex-rs/core/src/tools/handlers/shell.rs +++ b/codex-rs/core/src/tools/handlers/shell.rs @@ -6,7 +6,6 @@ use std::sync::Arc; use crate::codex::TurnContext; use crate::exec::ExecParams; use crate::exec_env::create_env; -use crate::exec_policy::create_exec_approval_requirement_for_command; use crate::function_tool::FunctionCallError; use crate::is_safe_command::is_known_safe_command; use crate::protocol::ExecCommandSource; @@ -252,15 +251,17 @@ impl ShellHandler { emitter.begin(event_ctx).await; let features = session.features(); - let exec_approval_requirement = create_exec_approval_requirement_for_command( - &turn.exec_policy, - &features, - &exec_params.command, - turn.approval_policy, - &turn.sandbox_policy, - exec_params.sandbox_permissions, - ) - .await; + let exec_approval_requirement = session + .services + .exec_policy + .create_exec_approval_requirement_for_command( + &features, + &exec_params.command, + turn.approval_policy, + &turn.sandbox_policy, + exec_params.sandbox_permissions, + ) + .await; let req = ShellRequest { command: exec_params.command.clone(), diff --git a/codex-rs/core/src/tools/handlers/spawn_mini_subagent.rs b/codex-rs/core/src/tools/handlers/spawn_mini_subagent.rs new file mode 100644 index 00000000000..393afa3014b --- /dev/null +++ b/codex-rs/core/src/tools/handlers/spawn_mini_subagent.rs @@ -0,0 +1,134 @@ +use async_trait::async_trait; +use codex_protocol::protocol::SessionSource; +use codex_protocol::protocol::SubAgentSource; +use codex_protocol::user_input::UserInput; +use serde_json::json; +use std::sync::Arc; + +use crate::features::Feature; +use crate::function_tool::FunctionCallError; +use crate::tools::context::ToolInvocation; +use crate::tools::context::ToolOutput; +use crate::tools::context::ToolPayload; +use crate::tools::handlers::parse_spawn_subagent_invocation; +use crate::tools::registry::ToolHandler; +use crate::tools::registry::ToolKind; + +pub(crate) const SPAWN_MINI_SUBAGENT_TOOL_NAME: &str = "spawn_mini_subagent"; +pub(crate) const SPAWN_MINI_SUBAGENT_LABEL_PREFIX: &str = "spawn_mini_subagent"; + +pub(crate) const DEFAULT_MINI_SUBAGENT_MODEL_SLUG: &str = "gpt-5.1-codex-mini"; + +const MINI_SUBAGENT_DEVELOPER_PROMPT: &str = r#"You are a read-only subagent running on a smaller/cheaper model. + +Hard rules: +- Do not ask the user questions. +- Do not propose or perform edits. Do not call apply_patch. +- Do not call spawn_subagent or spawn_mini_subagent. +- You may explore the repo with read-only commands, but keep it minimal and avoid dumping large files. + +Role: +You are a read-only subagent for Codex. Given the user's prompt, use the available tools to research and report back. Do what was asked; nothing more, nothing less. + +Guidelines: +- Prefer fast, low-risk work: quick repo search, entry-point mapping, and pattern matching. +- If the correct answer requires deep reasoning or large-scale redesign judgment, say so plainly and suggest using a stronger model. +- Keep your report concise and concrete: include relevant file paths and small snippets. Prefer workspace-relative paths."#; + +pub struct SpawnMiniSubagentHandler; + +#[async_trait] +impl ToolHandler for SpawnMiniSubagentHandler { + fn kind(&self) -> ToolKind { + ToolKind::Function + } + + async fn handle(&self, invocation: ToolInvocation) -> Result { + let ToolInvocation { + session, + turn, + call_id, + payload, + tool_name, + .. + } = invocation; + + let ToolPayload::Function { arguments } = payload else { + return Err(FunctionCallError::RespondToModel(format!( + "unsupported payload for {tool_name}" + ))); + }; + + let source = turn.client.get_session_source(); + if let SessionSource::SubAgent(_) = source { + return Err(FunctionCallError::RespondToModel( + "spawn_mini_subagent is not supported inside subagents".to_string(), + )); + } + + let mut invocation = parse_spawn_subagent_invocation(&arguments) + .map_err(FunctionCallError::RespondToModel)?; + + let label = invocation.label.clone(); + let subagent_label = format!("{SPAWN_MINI_SUBAGENT_LABEL_PREFIX}_{label}"); + + let model = turn + .mini_subagent_model + .clone() + .unwrap_or_else(|| DEFAULT_MINI_SUBAGENT_MODEL_SLUG.to_string()); + invocation.model = Some(model.clone()); + + let mut cfg = turn.client.config().as_ref().clone(); + cfg.developer_instructions = Some(build_mini_subagent_developer_instructions( + cfg.developer_instructions.as_deref().unwrap_or_default(), + )); + cfg.model = Some(model); + cfg.model_reasoning_effort = turn + .mini_subagent_reasoning_effort + .or(Some(codex_protocol::openai_models::ReasoningEffort::Medium)); + cfg.model_reasoning_summary = turn.client.get_reasoning_summary(); + + let mut features = cfg.features.clone(); + features.disable(Feature::ApplyPatchFreeform); + cfg.features = features; + cfg.approval_policy = + crate::config::Constrained::allow_any(codex_protocol::protocol::AskForApproval::Never); + cfg.sandbox_policy = crate::config::Constrained::allow_any( + codex_protocol::protocol::SandboxPolicy::ReadOnly, + ); + + let input = vec![UserInput::Text { + text: invocation.prompt.clone(), + }]; + + let response = crate::tools::subagent_runner::run_subagent_tool_call( + Arc::clone(&session), + Arc::clone(&turn), + call_id, + invocation, + cfg, + input, + SubAgentSource::Other(subagent_label), + ) + .await + .map_err(FunctionCallError::RespondToModel)?; + + Ok(ToolOutput::Function { + content: json!({ + "label": label, + "response": response, + }) + .to_string(), + content_items: None, + success: Some(true), + }) + } +} + +fn build_mini_subagent_developer_instructions(existing: &str) -> String { + let existing = existing.trim(); + if existing.is_empty() { + return MINI_SUBAGENT_DEVELOPER_PROMPT.to_string(); + } + format!("{MINI_SUBAGENT_DEVELOPER_PROMPT}\n\n{existing}") +} diff --git a/codex-rs/core/src/tools/handlers/spawn_subagent.rs b/codex-rs/core/src/tools/handlers/spawn_subagent.rs index d0ea5b0803c..91947cb3aaa 100644 --- a/codex-rs/core/src/tools/handlers/spawn_subagent.rs +++ b/codex-rs/core/src/tools/handlers/spawn_subagent.rs @@ -1,22 +1,12 @@ use async_trait::async_trait; -use codex_protocol::protocol::Event; -use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::SubAgentInvocation; use codex_protocol::protocol::SubAgentSource; -use codex_protocol::protocol::SubAgentToolCallActivityEvent; -use codex_protocol::protocol::SubAgentToolCallBeginEvent; -use codex_protocol::protocol::SubAgentToolCallEndEvent; -use codex_protocol::protocol::SubAgentToolCallTokensEvent; -use codex_protocol::protocol::TokenCountEvent; use codex_protocol::user_input::UserInput; use serde::Deserialize; use serde_json::json; use std::sync::Arc; -use std::time::Instant; -use tokio_util::sync::CancellationToken; -use crate::codex_delegate::run_codex_conversation_one_shot; use crate::features::Feature; use crate::function_tool::FunctionCallError; use crate::tools::context::ToolInvocation; @@ -82,6 +72,7 @@ pub(crate) fn parse_spawn_subagent_invocation( description, label, prompt: prompt.to_string(), + model: None, }) } @@ -116,7 +107,7 @@ impl ToolHandler for SpawnSubagentHandler { )); } - let invocation = parse_spawn_subagent_invocation(&arguments) + let mut invocation = parse_spawn_subagent_invocation(&arguments) .map_err(FunctionCallError::RespondToModel)?; let label = invocation.label.clone(); let subagent_label = format!("{SPAWN_SUBAGENT_LABEL_PREFIX}_{label}"); @@ -125,8 +116,15 @@ impl ToolHandler for SpawnSubagentHandler { cfg.developer_instructions = Some(build_subagent_developer_instructions( cfg.developer_instructions.as_deref().unwrap_or_default(), )); - cfg.model = Some(turn.client.get_model()); - cfg.model_reasoning_effort = turn.client.get_reasoning_effort(); + cfg.model = Some( + turn.subagent_model + .clone() + .unwrap_or_else(|| turn.client.get_model()), + ); + invocation.model = cfg.model.clone(); + cfg.model_reasoning_effort = turn + .subagent_reasoning_effort + .or(turn.client.get_reasoning_effort()); cfg.model_reasoning_summary = turn.client.get_reasoning_summary(); let mut features = cfg.features.clone(); @@ -138,142 +136,21 @@ impl ToolHandler for SpawnSubagentHandler { codex_protocol::protocol::SandboxPolicy::ReadOnly, ); - session - .send_event( - turn.as_ref(), - EventMsg::SubAgentToolCallBegin(SubAgentToolCallBeginEvent { - call_id: call_id.clone(), - invocation: invocation.clone(), - }), - ) - .await; - session - .send_event( - turn.as_ref(), - EventMsg::SubAgentToolCallActivity(SubAgentToolCallActivityEvent { - call_id: call_id.clone(), - activity: "starting".to_string(), - }), - ) - .await; - - let started_at = Instant::now(); - let cancel = session - .turn_cancellation_token(&turn.sub_id) - .await - .map_or_else(CancellationToken::new, |token| token.child_token()); - let _cancel_guard = CancelOnDrop::new(cancel.clone()); - let input = vec![UserInput::Text { text: invocation.prompt.clone(), }]; - let io = match run_codex_conversation_one_shot( - cfg, - Arc::clone(&session.services.auth_manager), - Arc::clone(&session.services.models_manager), - input, + let response = crate::tools::subagent_runner::run_subagent_tool_call( Arc::clone(&session), Arc::clone(&turn), - cancel, - None, + call_id, + invocation, + cfg, + input, SubAgentSource::Other(subagent_label), ) .await - { - Ok(io) => io, - Err(err) => { - let message = format!("failed to start subagent: {err}"); - session - .send_event( - turn.as_ref(), - EventMsg::SubAgentToolCallEnd(SubAgentToolCallEndEvent { - call_id: call_id.clone(), - invocation: invocation.clone(), - duration: started_at.elapsed(), - tokens: None, - result: Err(message.clone()), - }), - ) - .await; - return Err(FunctionCallError::RespondToModel(message)); - } - }; - - let mut last_agent_message: Option = None; - let mut last_activity: Option = None; - let mut tokens: i64 = 0; - let mut last_reported_tokens: Option = None; - let mut last_reported_at = Instant::now(); - while let Ok(event) = io.rx_event.recv().await { - let Event { id: _, msg } = event; - - if let Some(activity) = activity_for_event(&msg) - && last_activity.as_deref() != Some(activity.as_str()) - { - last_activity = Some(activity.clone()); - session - .send_event( - turn.as_ref(), - EventMsg::SubAgentToolCallActivity(SubAgentToolCallActivityEvent { - call_id: call_id.clone(), - activity, - }), - ) - .await; - } - - match msg { - EventMsg::TaskComplete(ev) => { - last_agent_message = ev.last_agent_message; - break; - } - EventMsg::TurnAborted(_) => break, - EventMsg::TokenCount(TokenCountEvent { - info: Some(info), .. - }) => { - tokens = tokens.saturating_add(info.last_token_usage.total_tokens.max(0)); - let now = Instant::now(); - let should_report = - match (last_reported_tokens, last_reported_at.elapsed().as_secs()) { - (Some(prev), secs) => { - tokens > prev && (tokens - prev >= 250 || secs >= 2) - } - (None, _) => tokens > 0, - }; - if should_report { - session - .send_event( - turn.as_ref(), - EventMsg::SubAgentToolCallTokens(SubAgentToolCallTokensEvent { - call_id: call_id.clone(), - tokens, - }), - ) - .await; - last_reported_tokens = Some(tokens); - last_reported_at = now; - } - } - _ => {} - } - } - - let response = last_agent_message.unwrap_or_default().trim().to_string(); - let tokens = if tokens > 0 { Some(tokens) } else { None }; - let result = Ok(response.clone()); - session - .send_event( - turn.as_ref(), - EventMsg::SubAgentToolCallEnd(SubAgentToolCallEndEvent { - call_id, - invocation, - duration: started_at.elapsed(), - tokens, - result: result.clone(), - }), - ) - .await; + .map_err(FunctionCallError::RespondToModel)?; Ok(ToolOutput::Function { content: json!({ @@ -287,55 +164,6 @@ impl ToolHandler for SpawnSubagentHandler { } } -fn fmt_exec_activity_command(command: &[String]) -> String { - if command.is_empty() { - return "shell".to_string(); - } - - let cmd = if let Some((_shell, script)) = crate::parse_command::extract_shell_command(command) { - let script = script.trim(); - if script.is_empty() { - "shell".to_string() - } else { - script - .lines() - .map(str::trim) - .filter(|line| !line.is_empty()) - .collect::>() - .join(" ") - } - } else { - crate::parse_command::shlex_join(command) - }; - - if cmd.is_empty() { - "shell".to_string() - } else { - cmd - } -} - -fn activity_for_event(msg: &EventMsg) -> Option { - match msg { - EventMsg::TaskStarted(_) => Some("starting".to_string()), - EventMsg::UserMessage(_) => Some("sending prompt".to_string()), - EventMsg::AgentReasoning(_) - | EventMsg::AgentReasoningDelta(_) - | EventMsg::AgentReasoningRawContent(_) - | EventMsg::AgentReasoningRawContentDelta(_) - | EventMsg::AgentReasoningSectionBreak(_) => Some("thinking".to_string()), - EventMsg::AgentMessage(_) | EventMsg::AgentMessageDelta(_) => Some("writing".to_string()), - EventMsg::ExecCommandBegin(ev) => Some(fmt_exec_activity_command(&ev.command)), - EventMsg::McpToolCallBegin(ev) => Some(format!( - "mcp {}/{}", - ev.invocation.server.trim(), - ev.invocation.tool.trim() - )), - EventMsg::WebSearchBegin(_) => Some("web_search".to_string()), - _ => None, - } -} - fn build_subagent_developer_instructions(existing: &str) -> String { let existing = existing.trim(); if existing.is_empty() { @@ -373,22 +201,6 @@ fn normalize_description(description: &str) -> String { .to_string() } -struct CancelOnDrop { - token: CancellationToken, -} - -impl CancelOnDrop { - fn new(token: CancellationToken) -> Self { - Self { token } - } -} - -impl Drop for CancelOnDrop { - fn drop(&mut self) { - self.token.cancel(); - } -} - #[cfg(test)] mod tests { use super::*; diff --git a/codex-rs/core/src/tools/mod.rs b/codex-rs/core/src/tools/mod.rs index 9536eedafe6..5d87a0460eb 100644 --- a/codex-rs/core/src/tools/mod.rs +++ b/codex-rs/core/src/tools/mod.rs @@ -8,6 +8,7 @@ pub mod router; pub mod runtimes; pub mod sandboxing; pub mod spec; +pub(crate) mod subagent_runner; use crate::exec::ExecToolCallOutput; use crate::truncate::TruncationPolicy; diff --git a/codex-rs/core/src/tools/parallel.rs b/codex-rs/core/src/tools/parallel.rs index 293f6ed2557..92dd8e09fae 100644 --- a/codex-rs/core/src/tools/parallel.rs +++ b/codex-rs/core/src/tools/parallel.rs @@ -16,6 +16,7 @@ use crate::error::CodexErr; use crate::function_tool::FunctionCallError; use crate::protocol::EventMsg; use crate::protocol::SubAgentToolCallEndEvent; +use crate::protocol::SubAgentToolCallOutcome; use crate::tools::context::SharedTurnDiffTracker; use crate::tools::context::ToolPayload; use crate::tools::handlers::SPAWN_SUBAGENT_TOOL_NAME; @@ -100,6 +101,7 @@ impl ToolCallRuntime { invocation, duration: elapsed, tokens: None, + outcome: Some(SubAgentToolCallOutcome::Cancelled), result: Err(message), }), ) diff --git a/codex-rs/core/src/tools/runtimes/shell.rs b/codex-rs/core/src/tools/runtimes/shell.rs index 2c6c6cd29c4..528efad7409 100644 --- a/codex-rs/core/src/tools/runtimes/shell.rs +++ b/codex-rs/core/src/tools/runtimes/shell.rs @@ -5,8 +5,11 @@ Executes shell requests under the orchestrator: asks for approval when needed, builds a CommandSpec, and runs it under the current SandboxAttempt. */ use crate::exec::ExecToolCallOutput; +use crate::features::Feature; +use crate::powershell::prefix_powershell_script_with_utf8; use crate::sandboxing::SandboxPermissions; use crate::sandboxing::execute_env; +use crate::shell::ShellType; use crate::tools::runtimes::build_command_spec; use crate::tools::runtimes::maybe_wrap_shell_lc_with_snapshot; use crate::tools::sandboxing::Approvable; @@ -144,6 +147,13 @@ impl ToolRuntime for ShellRuntime { let base_command = &req.command; let session_shell = ctx.session.user_shell(); let command = maybe_wrap_shell_lc_with_snapshot(base_command, session_shell.as_ref()); + let command = if matches!(session_shell.shell_type, ShellType::PowerShell) + && ctx.session.features().enabled(Feature::PowershellUtf8) + { + prefix_powershell_script_with_utf8(&command) + } else { + command + }; let spec = build_command_spec( &command, diff --git a/codex-rs/core/src/tools/runtimes/unified_exec.rs b/codex-rs/core/src/tools/runtimes/unified_exec.rs index a38ac9bc3a5..da1f518e8fb 100644 --- a/codex-rs/core/src/tools/runtimes/unified_exec.rs +++ b/codex-rs/core/src/tools/runtimes/unified_exec.rs @@ -7,7 +7,10 @@ the session manager to spawn PTYs once an ExecEnv is prepared. use crate::error::CodexErr; use crate::error::SandboxErr; use crate::exec::ExecExpiration; +use crate::features::Feature; +use crate::powershell::prefix_powershell_script_with_utf8; use crate::sandboxing::SandboxPermissions; +use crate::shell::ShellType; use crate::tools::runtimes::build_command_spec; use crate::tools::runtimes::maybe_wrap_shell_lc_with_snapshot; use crate::tools::sandboxing::Approvable; @@ -165,6 +168,13 @@ impl<'a> ToolRuntime for UnifiedExecRunt let base_command = &req.command; let session_shell = ctx.session.user_shell(); let command = maybe_wrap_shell_lc_with_snapshot(base_command, session_shell.as_ref()); + let command = if matches!(session_shell.shell_type, ShellType::PowerShell) + && ctx.session.features().enabled(Feature::PowershellUtf8) + { + prefix_powershell_script_with_utf8(&command) + } else { + command + }; let spec = build_command_spec( &command, diff --git a/codex-rs/core/src/tools/spec.rs b/codex-rs/core/src/tools/spec.rs index 34495128ddf..5834b0bf5b2 100644 --- a/codex-rs/core/src/tools/spec.rs +++ b/codex-rs/core/src/tools/spec.rs @@ -7,6 +7,8 @@ use crate::tools::handlers::APPROVE_PLAN_TOOL_NAME; use crate::tools::handlers::ASK_USER_QUESTION_TOOL_NAME; use crate::tools::handlers::PLAN_TOOL; use crate::tools::handlers::PROPOSE_PLAN_VARIANTS_TOOL_NAME; +use crate::tools::handlers::SPAWN_MINI_SUBAGENT_LABEL_PREFIX; +use crate::tools::handlers::SPAWN_MINI_SUBAGENT_TOOL_NAME; use crate::tools::handlers::SPAWN_SUBAGENT_LABEL_PREFIX; use crate::tools::handlers::SPAWN_SUBAGENT_TOOL_NAME; use crate::tools::handlers::apply_patch::create_apply_patch_freeform_tool; @@ -24,12 +26,17 @@ use std::collections::BTreeMap; use std::collections::HashMap; pub(crate) const ASK_USER_QUESTION_DEVELOPER_INSTRUCTIONS: &str = r#"## AskUserQuestion -Use `ask_user_question` when you need user input to proceed during execution. This helps you: +Use `ask_user_question` whenever you need user input to proceed during execution. This helps you: 1. Gather preferences or requirements (e.g., scope, trade-offs). 2. Clarify ambiguous instructions. 3. Get a decision on implementation choices as you work. 4. Offer a small set of clear options when multiple directions are reasonable. +Decision rule: +- If you're about to ask the user a question (e.g., "should I...?", "which one...?", "can you...?"), do not ask it in plain text. Call `ask_user_question` and wait for the tool result. +- If there are multiple reasonable interpretations or choices that would materially change the output, side effects, or time/cost, pause and ask via this tool instead of guessing. +- If the user explicitly delegates (e.g., "use your best judgment") or a safe default is clearly implied, proceed and state your assumption. + Usage notes: - Do not ask questions in plain text; call `ask_user_question` and wait for the tool result. - If you have multiple questions, include them in a single call (up to 4). @@ -38,12 +45,43 @@ Usage notes: - If you recommend an option, make it the first option and add "(Recommended)" to the label. - Do not include numbering in option labels (e.g. "1:", "2.", "A)"); the UI provides numbering. +Tool constraints: +- 1-4 questions per call. +- Each question: `header` max 12 chars; `question` must end with a '?'. +- Each question must include 2-4 `options`; do not include an "Other" option (the UI provides it automatically). +- Each option: `label` is 1-5 words; include a `description` with trade-offs. + Example: Call `ask_user_question` with a single question and a few options, then wait for the answer and proceed. "#; +pub(crate) const CLARIFICATION_POLICY_DEVELOPER_INSTRUCTIONS: &str = r#"## Clarification Policy +When a request is underspecified, avoid guessing. Ask clarifying questions before producing a plan or taking actions that depend on missing details. + +Rules: +- If there are multiple reasonable interpretations that would materially change the outcome (scope, approach, output, trade-offs, time/cost, risk), pause and ask questions instead of assuming. +- Ask as many questions as needed to remove material ambiguity. `ask_user_question` supports 1-4 questions per call; if more are needed, ask another set. +- Prefer multiple-choice options with clear trade-offs, and keep a free-text escape hatch (the UI provides an "Other" option automatically). +- If the user explicitly delegates (e.g., "use your best judgment" / "surprise me"), proceed and state the assumptions you chose, plus the main knobs the user can tweak. +"#; + +pub(crate) const LSP_NAVIGATION_DEVELOPER_INSTRUCTIONS: &str = r#"## LSP-first navigation +When LSP tools are available and the file/language is supported: +- Prefer symbol-aware navigation via `lsp_definition` and `lsp_references` over `rg` for definition/usage questions. +- Use `lsp_diagnostics` for current errors/warnings. +- Use `rg` for broad text search (log strings, config keys) and as a fallback if LSP returns errors or empty results unexpectedly. +- Treat `lsp_document_symbols` as best-effort (it may return empty in some environments). +- Prefer absolute paths for `root`, `file_path`, and `path`. If a path is relative, it is resolved relative to `root` (which defaults to the session working directory). + +For broad repo exploration (mapping an unfamiliar area, finding entry points, summarizing how something works), prefer `spawn_mini_subagent` before doing multiple sequential `lsp_*` calls or reaching for `rg`. +"#; + pub(crate) const SPAWN_SUBAGENT_DEVELOPER_INSTRUCTIONS: &str = r#"## SpawnSubagent -Use `spawn_subagent` to delegate short, read-only research tasks. Subagents cannot edit files, cannot ask the user questions, and should return a concise plain-text response. +Use `spawn_subagent` to delegate short, read-only research tasks (repo exploration, tracing control flow, summarizing how something works). Subagents cannot edit files, cannot ask the user questions, and should return a concise plain-text response. + +Default behavior: +- Prefer `spawn_mini_subagent` for quick/cheap repo exploration and mapping. +- Use `spawn_subagent` when a mini subagent is likely insufficient (complex architecture decisions, subtle concurrency issues, high-stakes security work, or if mini results are missing key context). When to use it: - Broad context gathering (you don't know the entry point yet). @@ -51,7 +89,7 @@ When to use it: - Focused research tasks (e.g. “find where X is configured”, “summarize how Y works”). When not to use it: -- Needle queries where you already know the file/symbol, or you're only checking 1–3 files (do a direct `rg` / targeted read instead). +- Needle queries where you already know the file/symbol, or you're only checking 1–3 files (do a direct `lsp_definition`/`lsp_references` or `rg` / targeted read instead). - Anything that requires writing code or asking the user a question. Requirements: @@ -62,9 +100,12 @@ Requirements: Prompt tips: - Ask for specific outputs (e.g. “list the relevant files and explain the control flow”). - Prefer small, targeted file reads over dumping large files. +- If you need a control-flow answer, ask for a short “entry point → key calls → result” trace. +- If you need multiple answers, assign one question per subagent (avoid “do everything” prompts). Parallelism: - If you have multiple independent research questions, prefer launching multiple subagents in parallel rather than running them serially. +- Prefer 2–3 small, differently-scoped prompts over 1 big prompt (faster convergence and fewer blind spots). Using results: - The subagent response is input for you. Summarize the relevant findings back to the user (include key file paths and small snippets when helpful). @@ -78,6 +119,51 @@ Example tool call: Example (parallel): `spawn_subagent({ "description": "Locate config schema for auth", "prompt": "Find where auth config is defined and how it is loaded. Return files + key functions.", "label": "auth_cfg" })` `spawn_subagent({ "description": "Trace token usage in requests", "prompt": "Find where tokens are attached to outbound requests. Return files + key call sites.", "label": "auth_use" })` + +Example (control flow): +`spawn_subagent({ "description": "Trace request path for /responses", "prompt": "Trace the control flow from the public API entry point to the outbound HTTP request for /responses. Return files + key functions + a short call chain.", "label": "responses_flow" })` +"#; + +pub(crate) const SPAWN_MINI_SUBAGENT_DEVELOPER_INSTRUCTIONS: &str = r#"## SpawnMiniSubagent +Use `spawn_mini_subagent` for the same kind of short, read-only research as `spawn_subagent`, but when you explicitly want a fast/cheap subagent. + +Key properties: +- This tool defaults to `gpt-5.1-codex-mini` (configurable via `mini_subagent_model`). +- Mini subagents cannot edit files and cannot ask the user questions. + +Default behavior (strongly preferred): +- For repo exploration (quick mapping, finding relevant files, entry points, or call chains), treat mini subagents as your first move. +- If you expect to do 2+ repo-search/read steps (multiple `lsp_*` calls or `rg` searches, multiple file opens), spawn 2–3 mini subagents first, unless you already know the exact file/symbol to read. + +When to use it: +- Quick repo mapping (find relevant files, entry points, call chains). +- Mechanical migrations where you mostly need search + pattern matching. +- Lint/snapshot churn where verification is easy. + +When NOT to use it: +- Complex architecture decisions, subtle concurrency issues, or high-stakes security work. +- Situations where you need deep reasoning and correctness guarantees beyond what a small model can provide. + +Usage guidance: +- Provide a self-contained prompt with explicit output requirements. +- Prefer 1 question per mini subagent; spawn multiple in parallel if needed. +"#; + +pub(crate) const WEB_UI_QUALITY_BAR_DEVELOPER_INSTRUCTIONS: &str = r#"## Web UI Quality Bar (only when building/changing web UI) +When the user's request involves a web UI, raise the quality bar beyond "it works". + +Principles: +- Prioritize usability and clarity: clear hierarchy, predictable behavior, good defaults, minimal surprise. +- Prefer a small design system over ad-hoc styles: define tokens (CSS variables) for color, spacing, radius, shadow, typography; reuse them consistently. +- Make states first-class: loading, empty, error, disabled, slow network, long text, and small screens should look intentional. +- Accessibility is required: semantic HTML, keyboard navigation, visible focus, sufficient contrast, sensible ARIA only when needed. +- Responsive by default: mobile-first layout that scales up; avoid hard-coded widths and fragile pixel-perfect positioning. +- Polish with restraint: subtle transitions, hover/pressed feedback, and micro-interactions only when they reduce confusion. + +Deliverables to include in the plan/output: +- "UX goals" paragraph (what should feel better, not just what changes). +- A short "Design system" section (tokens + reusable components; avoid one-off styling). +- Verification: exact manual steps (mouse + keyboard) and any automated checks/tests (e.g. unit/e2e/lint). "#; pub(crate) fn prepend_ask_user_question_developer_instructions( @@ -97,11 +183,63 @@ pub(crate) fn prepend_ask_user_question_developer_instructions( } } +pub(crate) fn prepend_clarification_policy_developer_instructions( + developer_instructions: Option, +) -> Option { + if let Some(existing) = developer_instructions.as_deref() + && existing.contains("## Clarification Policy") + { + return developer_instructions; + } + + match developer_instructions { + Some(existing) => Some(format!( + "{CLARIFICATION_POLICY_DEVELOPER_INSTRUCTIONS}\n{existing}" + )), + None => Some(CLARIFICATION_POLICY_DEVELOPER_INSTRUCTIONS.to_string()), + } +} + +pub(crate) fn prepend_lsp_navigation_developer_instructions( + developer_instructions: Option, +) -> Option { + if let Some(existing) = developer_instructions.as_deref() + && existing.contains("## LSP-first navigation") + { + return developer_instructions; + } + + match developer_instructions { + Some(existing) => Some(format!( + "{LSP_NAVIGATION_DEVELOPER_INSTRUCTIONS}\n{existing}" + )), + None => Some(LSP_NAVIGATION_DEVELOPER_INSTRUCTIONS.to_string()), + } +} + +pub(crate) fn prepend_web_ui_quality_bar_developer_instructions( + developer_instructions: Option, +) -> Option { + if let Some(existing) = developer_instructions.as_deref() + && (existing.contains("## Web UI Quality Bar") + || existing.contains("Web UI Quality Bar (only when building/changing web UI)")) + { + return developer_instructions; + } + + match developer_instructions { + Some(existing) => Some(format!( + "{WEB_UI_QUALITY_BAR_DEVELOPER_INSTRUCTIONS}\n{existing}" + )), + None => Some(WEB_UI_QUALITY_BAR_DEVELOPER_INSTRUCTIONS.to_string()), + } +} + pub(crate) fn prepend_spawn_subagent_developer_instructions( developer_instructions: Option, ) -> Option { if let Some(existing) = developer_instructions.as_deref() - && (existing.contains(SPAWN_SUBAGENT_TOOL_NAME) || existing.contains("SpawnSubagent")) + && existing.contains("## SpawnSubagent") { return developer_instructions; } @@ -114,13 +252,32 @@ pub(crate) fn prepend_spawn_subagent_developer_instructions( } } +pub(crate) fn prepend_spawn_mini_subagent_developer_instructions( + developer_instructions: Option, +) -> Option { + if let Some(existing) = developer_instructions.as_deref() + && existing.contains("## SpawnMiniSubagent") + { + return developer_instructions; + } + + match developer_instructions { + Some(existing) => Some(format!( + "{SPAWN_MINI_SUBAGENT_DEVELOPER_INSTRUCTIONS}\n{existing}" + )), + None => Some(SPAWN_MINI_SUBAGENT_DEVELOPER_INSTRUCTIONS.to_string()), + } +} + #[derive(Debug, Clone)] pub(crate) struct ToolsConfig { pub shell_type: ConfigShellToolType, pub apply_patch_tool_type: Option, pub web_search_request: bool, pub include_view_image_tool: bool, + pub include_plan_explore_tool: bool, pub include_spawn_subagent_tool: bool, + pub include_spawn_mini_subagent_tool: bool, pub experimental_supported_tools: Vec, } @@ -141,12 +298,31 @@ impl ToolsConfig { session_source, SessionSource::SubAgent(SubAgentSource::Other(label)) if label.starts_with(SPAWN_SUBAGENT_LABEL_PREFIX) + || label.starts_with(SPAWN_MINI_SUBAGENT_LABEL_PREFIX) ); let allow_apply_patch_tool = !disable_apply_patch_tool; let include_apply_patch_tool = allow_apply_patch_tool && features.enabled(Feature::ApplyPatchFreeform); let include_web_search_request = features.enabled(Feature::WebSearchRequest); let include_view_image_tool = features.enabled(Feature::ViewImageTool); + let include_plan_explore_tool = matches!( + session_source, + SessionSource::SubAgent(SubAgentSource::Other(label)) if label == "plan_mode" + ); + let allow_subagent_tools = !matches!(session_source, SessionSource::SubAgent(_)); + let mut experimental_supported_tools = model_family.experimental_supported_tools.clone(); + if features.enabled(Feature::Lsp) { + for name in [ + "lsp_diagnostics", + "lsp_definition", + "lsp_references", + "lsp_document_symbols", + ] { + if !experimental_supported_tools.iter().any(|t| t == name) { + experimental_supported_tools.push(name.to_string()); + } + } + } let shell_type = if !features.enabled(Feature::ShellTool) { ConfigShellToolType::Disabled @@ -182,8 +358,11 @@ impl ToolsConfig { apply_patch_tool_type, web_search_request: include_web_search_request, include_view_image_tool, - include_spawn_subagent_tool: !matches!(session_source, SessionSource::SubAgent(_)), - experimental_supported_tools: model_family.experimental_supported_tools.clone(), + include_plan_explore_tool, + include_spawn_subagent_tool: allow_subagent_tools, + include_spawn_mini_subagent_tool: allow_subagent_tools + && features.enabled(Feature::MiniSubagents), + experimental_supported_tools, } } } @@ -446,7 +625,7 @@ fn create_ask_user_question_tool() -> ToolSpec { ToolSpec::Function(ResponsesApiTool { name: ASK_USER_QUESTION_TOOL_NAME.to_string(), - description: "Ask the user 1-4 multiple-choice questions during execution to clarify requirements. Do not ask these questions in plain text; call this tool to pause and wait. The UI always provides an 'Other' choice for custom text input." + description: "Ask the user 1-4 multiple-choice questions during execution to clarify requirements. Use this tool whenever you'd otherwise ask the user a question. Do not ask these questions in plain text; call this tool to pause and wait. The UI always provides an 'Other' choice for custom text input." .to_string(), strict: false, parameters: JsonSchema::Object { @@ -561,6 +740,66 @@ fn create_propose_plan_variants_tool() -> ToolSpec { }) } +fn create_plan_explore_tool() -> ToolSpec { + let mut root_props = BTreeMap::new(); + root_props.insert( + "goal".to_string(), + JsonSchema::String { + description: Some("The user's goal to plan for.".to_string()), + }, + ); + let mut explorer_props = BTreeMap::new(); + explorer_props.insert( + "focus".to_string(), + JsonSchema::String { + description: Some( + "Required. One-sentence focus for this explorer (what to look for).".to_string(), + ), + }, + ); + explorer_props.insert( + "description".to_string(), + JsonSchema::String { + description: Some( + "Optional. Human-friendly label shown in history; defaults to focus.".to_string(), + ), + }, + ); + explorer_props.insert( + "prompt".to_string(), + JsonSchema::String { + description: Some( + "Optional. Prompt to send to the subagent; defaults to goal + focus.".to_string(), + ), + }, + ); + root_props.insert( + "explorers".to_string(), + JsonSchema::Array { + items: Box::new(JsonSchema::Object { + properties: explorer_props, + required: Some(vec!["focus".to_string()]), + additional_properties: Some(false.into()), + }), + description: Some( + "Optional. Override the default 3 exploration subagents (order matters)." + .to_string(), + ), + }, + ); + + ToolSpec::Function(ResponsesApiTool { + name: crate::tools::handlers::PLAN_EXPLORE_TOOL_NAME.to_string(), + description: "Run a short, read-only repo exploration pass (via a few exploration subagents) to ground /plan in concrete file paths and entry points.".to_string(), + strict: false, + parameters: JsonSchema::Object { + properties: root_props, + required: Some(vec!["goal".to_string()]), + additional_properties: Some(false.into()), + }, + }) +} + fn create_spawn_subagent_tool() -> ToolSpec { let mut root_props = BTreeMap::new(); root_props.insert( @@ -574,7 +813,10 @@ fn create_spawn_subagent_tool() -> ToolSpec { root_props.insert( "prompt".to_string(), JsonSchema::String { - description: Some("Prompt to send to the read-only subagent.".to_string()), + description: Some( + "Self-contained prompt to send to the read-only subagent; ask for specific outputs (files, symbols, short control-flow traces). Use one question per subagent when parallelizing." + .to_string(), + ), }, ); root_props.insert( @@ -589,9 +831,49 @@ fn create_spawn_subagent_tool() -> ToolSpec { ToolSpec::Function(ResponsesApiTool { name: SPAWN_SUBAGENT_TOOL_NAME.to_string(), - description: - "Spawn a read-only subagent to handle a focused prompt and return its response." - .to_string(), + description: "Spawn a read-only subagent for focused repo research (no edits, no user questions) and return its response.".to_string(), + strict: false, + parameters: JsonSchema::Object { + properties: root_props, + required: Some(vec!["description".to_string(), "prompt".to_string()]), + additional_properties: Some(false.into()), + }, + }) +} + +fn create_spawn_mini_subagent_tool() -> ToolSpec { + let mut root_props = BTreeMap::new(); + root_props.insert( + "description".to_string(), + JsonSchema::String { + description: Some( + "Required one-sentence, human-friendly description shown in history.".to_string(), + ), + }, + ); + root_props.insert( + "prompt".to_string(), + JsonSchema::String { + description: Some( + "Self-contained prompt to send to the read-only mini subagent; ask for specific outputs (files, symbols, short control-flow traces). Use one question per subagent when parallelizing." + .to_string(), + ), + }, + ); + root_props.insert( + "label".to_string(), + JsonSchema::String { + description: Some( + "Optional short label for the subagent session (letters, numbers, _ or -)." + .to_string(), + ), + }, + ); + + ToolSpec::Function(ResponsesApiTool { + name: SPAWN_MINI_SUBAGENT_TOOL_NAME.to_string(), + description: "Spawn a read-only mini subagent (always runs on gpt-5.1-codex-mini) for fast repo research; no edits, no user questions." + .to_string(), strict: false, parameters: JsonSchema::Object { properties: root_props, @@ -971,6 +1253,188 @@ fn create_read_file_tool() -> ToolSpec { }) } +fn create_lsp_diagnostics_tool() -> ToolSpec { + let mut properties = BTreeMap::new(); + properties.insert( + "root".to_string(), + JsonSchema::String { + description: Some( + "Workspace root directory (absolute path preferred). Defaults to the session working directory." + .to_string(), + ), + }, + ); + properties.insert( + "path".to_string(), + JsonSchema::String { + description: Some( + "Optional file path (absolute path preferred). If relative, it is resolved relative to root. When omitted, returns diagnostics for all tracked files." + .to_string(), + ), + }, + ); + properties.insert( + "max_results".to_string(), + JsonSchema::Number { + description: Some( + "Maximum number of diagnostics to return (defaults to 200).".to_string(), + ), + }, + ); + + ToolSpec::Function(ResponsesApiTool { + name: "lsp_diagnostics".to_string(), + description: + "Returns current Language Server Protocol diagnostics for a workspace or file." + .to_string(), + strict: false, + parameters: JsonSchema::Object { + properties, + required: None, + additional_properties: Some(false.into()), + }, + }) +} + +fn create_lsp_definition_tool() -> ToolSpec { + let mut properties = BTreeMap::new(); + properties.insert( + "root".to_string(), + JsonSchema::String { + description: Some( + "Workspace root directory (absolute path preferred). Defaults to the session working directory." + .to_string(), + ), + }, + ); + properties.insert( + "file_path".to_string(), + JsonSchema::String { + description: Some( + "File path (absolute path preferred). If relative, it is resolved relative to root." + .to_string(), + ), + }, + ); + properties.insert( + "line".to_string(), + JsonSchema::Number { + description: Some("1-based line number.".to_string()), + }, + ); + properties.insert( + "character".to_string(), + JsonSchema::Number { + description: Some("1-based character offset (UTF-16).".to_string()), + }, + ); + + ToolSpec::Function(ResponsesApiTool { + name: "lsp_definition".to_string(), + description: "Finds the definition location for the symbol at a position.".to_string(), + strict: false, + parameters: JsonSchema::Object { + properties, + required: Some(vec![ + "file_path".to_string(), + "line".to_string(), + "character".to_string(), + ]), + additional_properties: Some(false.into()), + }, + }) +} + +fn create_lsp_references_tool() -> ToolSpec { + let mut properties = BTreeMap::new(); + properties.insert( + "root".to_string(), + JsonSchema::String { + description: Some( + "Workspace root directory (absolute path preferred). Defaults to the session working directory." + .to_string(), + ), + }, + ); + properties.insert( + "file_path".to_string(), + JsonSchema::String { + description: Some( + "File path (absolute path preferred). If relative, it is resolved relative to root." + .to_string(), + ), + }, + ); + properties.insert( + "line".to_string(), + JsonSchema::Number { + description: Some("1-based line number.".to_string()), + }, + ); + properties.insert( + "character".to_string(), + JsonSchema::Number { + description: Some("1-based character offset (UTF-16).".to_string()), + }, + ); + properties.insert( + "include_declaration".to_string(), + JsonSchema::Boolean { + description: Some( + "Whether to include the symbol's declaration in the results.".to_string(), + ), + }, + ); + + ToolSpec::Function(ResponsesApiTool { + name: "lsp_references".to_string(), + description: "Finds references to the symbol at a position.".to_string(), + strict: false, + parameters: JsonSchema::Object { + properties, + required: Some(vec![ + "file_path".to_string(), + "line".to_string(), + "character".to_string(), + ]), + additional_properties: Some(false.into()), + }, + }) +} + +fn create_lsp_document_symbols_tool() -> ToolSpec { + let mut properties = BTreeMap::new(); + properties.insert( + "root".to_string(), + JsonSchema::String { + description: Some( + "Workspace root directory (absolute path preferred). Defaults to the session working directory." + .to_string(), + ), + }, + ); + properties.insert( + "file_path".to_string(), + JsonSchema::String { + description: Some( + "File path (absolute path preferred). If relative, it is resolved relative to root." + .to_string(), + ), + }, + ); + + ToolSpec::Function(ResponsesApiTool { + name: "lsp_document_symbols".to_string(), + description: "Returns document symbols for a file (outline).".to_string(), + strict: false, + parameters: JsonSchema::Object { + properties, + required: Some(vec!["file_path".to_string()]), + additional_properties: Some(false.into()), + }, + }) +} + fn create_list_dir_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( @@ -1329,14 +1793,17 @@ pub(crate) fn build_specs( use crate::tools::handlers::AskUserQuestionHandler; use crate::tools::handlers::GrepFilesHandler; use crate::tools::handlers::ListDirHandler; + use crate::tools::handlers::LspHandler; use crate::tools::handlers::McpHandler; use crate::tools::handlers::McpResourceHandler; use crate::tools::handlers::PlanApprovalHandler; + use crate::tools::handlers::PlanExploreHandler; use crate::tools::handlers::PlanHandler; use crate::tools::handlers::PlanVariantsHandler; use crate::tools::handlers::ReadFileHandler; use crate::tools::handlers::ShellCommandHandler; use crate::tools::handlers::ShellHandler; + use crate::tools::handlers::SpawnMiniSubagentHandler; use crate::tools::handlers::SpawnSubagentHandler; use crate::tools::handlers::TestSyncHandler; use crate::tools::handlers::UnifiedExecHandler; @@ -1349,11 +1816,13 @@ pub(crate) fn build_specs( let unified_exec_handler = Arc::new(UnifiedExecHandler); let plan_handler = Arc::new(PlanHandler); let plan_approval_handler = Arc::new(PlanApprovalHandler); + let plan_explore_handler = Arc::new(PlanExploreHandler); let plan_variants_handler = Arc::new(PlanVariantsHandler); let apply_patch_handler = Arc::new(ApplyPatchHandler); let view_image_handler = Arc::new(ViewImageHandler); let ask_user_question_handler = Arc::new(AskUserQuestionHandler); let spawn_subagent_handler = Arc::new(SpawnSubagentHandler); + let spawn_mini_subagent_handler = Arc::new(SpawnMiniSubagentHandler); let mcp_handler = Arc::new(McpHandler); let mcp_resource_handler = Arc::new(McpResourceHandler); let shell_command_handler = Arc::new(ShellCommandHandler); @@ -1406,11 +1875,24 @@ pub(crate) fn build_specs( builder.push_spec(create_propose_plan_variants_tool()); builder.register_handler(PROPOSE_PLAN_VARIANTS_TOOL_NAME, plan_variants_handler); + if config.include_plan_explore_tool { + builder.push_spec(create_plan_explore_tool()); + builder.register_handler( + crate::tools::handlers::PLAN_EXPLORE_TOOL_NAME, + plan_explore_handler, + ); + } + if config.include_spawn_subagent_tool { builder.push_spec_with_parallel_support(create_spawn_subagent_tool(), true); builder.register_handler(SPAWN_SUBAGENT_TOOL_NAME, spawn_subagent_handler); } + if config.include_spawn_mini_subagent_tool { + builder.push_spec_with_parallel_support(create_spawn_mini_subagent_tool(), true); + builder.register_handler(SPAWN_MINI_SUBAGENT_TOOL_NAME, spawn_mini_subagent_handler); + } + if let Some(apply_patch_tool_type) = &config.apply_patch_tool_type { match apply_patch_tool_type { ApplyPatchToolType::Freeform => { @@ -1432,6 +1914,46 @@ pub(crate) fn build_specs( builder.register_handler("grep_files", grep_files_handler); } + if config + .experimental_supported_tools + .iter() + .any(|tool| tool.starts_with("lsp_")) + { + let lsp_handler = Arc::new(LspHandler); + if config + .experimental_supported_tools + .iter() + .any(|tool| tool == "lsp_diagnostics") + { + builder.push_spec_with_parallel_support(create_lsp_diagnostics_tool(), true); + builder.register_handler("lsp_diagnostics", lsp_handler.clone()); + } + if config + .experimental_supported_tools + .iter() + .any(|tool| tool == "lsp_definition") + { + builder.push_spec_with_parallel_support(create_lsp_definition_tool(), true); + builder.register_handler("lsp_definition", lsp_handler.clone()); + } + if config + .experimental_supported_tools + .iter() + .any(|tool| tool == "lsp_references") + { + builder.push_spec_with_parallel_support(create_lsp_references_tool(), true); + builder.register_handler("lsp_references", lsp_handler.clone()); + } + if config + .experimental_supported_tools + .iter() + .any(|tool| tool == "lsp_document_symbols") + { + builder.push_spec_with_parallel_support(create_lsp_document_symbols_tool(), true); + builder.register_handler("lsp_document_symbols", lsp_handler); + } + } + if config .experimental_supported_tools .contains(&"read_file".to_string()) @@ -1597,6 +2119,48 @@ mod tests { }) } + #[test] + fn test_ask_user_question_developer_instructions_regression() { + let instructions = ASK_USER_QUESTION_DEVELOPER_INSTRUCTIONS; + assert!( + instructions.contains("Do not ask questions in plain text"), + "expected the plain-text prohibition to be present" + ); + assert!( + instructions.contains("If you're about to ask the user a question"), + "expected the decision rule to be present" + ); + assert!( + instructions.contains("1-4 questions per call"), + "expected the question-count constraint to be present" + ); + assert!( + instructions.contains("2-4 `options`"), + "expected the option-count constraint to be present" + ); + assert!( + instructions.contains("must end with a '?'"), + "expected the question punctuation constraint to be present" + ); + } + + #[test] + fn test_clarification_policy_developer_instructions_regression() { + let instructions = CLARIFICATION_POLICY_DEVELOPER_INSTRUCTIONS; + assert!( + instructions.contains("If there are multiple reasonable interpretations"), + "expected the ambiguity decision rule to be present" + ); + assert!( + instructions.contains("1-4 questions per call"), + "expected the ask_user_question per-call limit to be present" + ); + assert!( + instructions.contains("use your best judgment"), + "expected the explicit delegation escape hatch to be present" + ); + } + #[test] fn test_full_toolset_specs_for_gpt5_codex_unified_exec_web_search() { let config = test_config(); @@ -1637,6 +2201,7 @@ mod tests { create_approve_plan_tool(), create_propose_plan_variants_tool(), create_spawn_subagent_tool(), + create_spawn_mini_subagent_tool(), create_apply_patch_freeform_tool(), ToolSpec::WebSearch {}, create_view_image_tool(), @@ -1683,6 +2248,7 @@ mod tests { "approve_plan", "propose_plan_variants", "spawn_subagent", + "spawn_mini_subagent", "apply_patch", "view_image", ], @@ -1704,6 +2270,7 @@ mod tests { "approve_plan", "propose_plan_variants", "spawn_subagent", + "spawn_mini_subagent", "apply_patch", "view_image", ], @@ -1728,6 +2295,7 @@ mod tests { "approve_plan", "propose_plan_variants", "spawn_subagent", + "spawn_mini_subagent", "apply_patch", "web_search", "view_image", @@ -1753,6 +2321,7 @@ mod tests { "approve_plan", "propose_plan_variants", "spawn_subagent", + "spawn_mini_subagent", "apply_patch", "web_search", "view_image", @@ -1775,6 +2344,7 @@ mod tests { "approve_plan", "propose_plan_variants", "spawn_subagent", + "spawn_mini_subagent", "view_image", ], ); @@ -1795,6 +2365,7 @@ mod tests { "approve_plan", "propose_plan_variants", "spawn_subagent", + "spawn_mini_subagent", "apply_patch", "view_image", ], @@ -1816,6 +2387,7 @@ mod tests { "approve_plan", "propose_plan_variants", "spawn_subagent", + "spawn_mini_subagent", "view_image", ], ); @@ -1836,6 +2408,7 @@ mod tests { "approve_plan", "propose_plan_variants", "spawn_subagent", + "spawn_mini_subagent", "apply_patch", "view_image", ], @@ -1858,6 +2431,7 @@ mod tests { let (tools, _) = build_specs(&tools_config, None).build(); let tool_names = tools.iter().map(|t| t.spec.name()).collect::>(); assert!(!tool_names.contains(&"spawn_subagent")); + assert!(!tool_names.contains(&"spawn_mini_subagent")); assert!(!tool_names.contains(&"apply_patch")); } @@ -1877,6 +2451,7 @@ mod tests { "approve_plan", "propose_plan_variants", "spawn_subagent", + "spawn_mini_subagent", "apply_patch", "view_image", ], @@ -1901,6 +2476,7 @@ mod tests { "approve_plan", "propose_plan_variants", "spawn_subagent", + "spawn_mini_subagent", "web_search", "view_image", ], diff --git a/codex-rs/core/src/tools/subagent_runner.rs b/codex-rs/core/src/tools/subagent_runner.rs new file mode 100644 index 00000000000..3af618e2467 --- /dev/null +++ b/codex-rs/core/src/tools/subagent_runner.rs @@ -0,0 +1,282 @@ +use codex_protocol::protocol::Event; +use codex_protocol::protocol::EventMsg; +use codex_protocol::protocol::SubAgentInvocation; +use codex_protocol::protocol::SubAgentSource; +use codex_protocol::protocol::SubAgentToolCallActivityEvent; +use codex_protocol::protocol::SubAgentToolCallBeginEvent; +use codex_protocol::protocol::SubAgentToolCallEndEvent; +use codex_protocol::protocol::SubAgentToolCallOutcome; +use codex_protocol::protocol::SubAgentToolCallTokensEvent; +use codex_protocol::protocol::TokenCountEvent; +use codex_protocol::user_input::UserInput; +use std::sync::Arc; +use std::time::Instant; +use tokio_util::sync::CancellationToken; + +use crate::codex::Session; +use crate::codex::TurnContext; +use crate::codex_delegate::run_codex_conversation_one_shot; +use crate::config::Config; + +pub(crate) async fn run_subagent_tool_call( + session: Arc, + turn: Arc, + call_id: String, + invocation: SubAgentInvocation, + cfg: Config, + input: Vec, + source: SubAgentSource, +) -> Result { + session + .send_event( + turn.as_ref(), + EventMsg::SubAgentToolCallBegin(SubAgentToolCallBeginEvent { + call_id: call_id.clone(), + invocation: invocation.clone(), + }), + ) + .await; + session + .send_event( + turn.as_ref(), + EventMsg::SubAgentToolCallActivity(SubAgentToolCallActivityEvent { + call_id: call_id.clone(), + activity: "starting".to_string(), + }), + ) + .await; + + let started_at = Instant::now(); + let cancel = session + .turn_cancellation_token(&turn.sub_id) + .await + .map_or_else(CancellationToken::new, |token| token.child_token()); + let _cancel_guard = CancelOnDrop::new(cancel.clone()); + + let io = match run_codex_conversation_one_shot( + cfg, + Arc::clone(&session.services.auth_manager), + Arc::clone(&session.services.models_manager), + input, + Arc::clone(&session), + Arc::clone(&turn), + cancel, + None, + source, + ) + .await + { + Ok(io) => io, + Err(err) => { + let message = format!("failed to start subagent: {err}"); + session + .send_event( + turn.as_ref(), + EventMsg::SubAgentToolCallEnd(SubAgentToolCallEndEvent { + call_id, + invocation, + duration: started_at.elapsed(), + tokens: None, + outcome: Some(SubAgentToolCallOutcome::Completed), + result: Err(message.clone()), + }), + ) + .await; + return Err(message); + } + }; + + let mut last_agent_message: Option = None; + let mut last_activity: Option = None; + let mut tokens: i64 = 0; + let mut last_reported_tokens: Option = None; + let mut last_reported_at = Instant::now(); + let mut aborted = false; + + while let Ok(Event { msg, .. }) = io.rx_event.recv().await { + if let Some(activity) = activity_for_event(&msg) + && last_activity.as_deref() != Some(activity.as_str()) + { + last_activity = Some(activity.clone()); + session + .send_event( + turn.as_ref(), + EventMsg::SubAgentToolCallActivity(SubAgentToolCallActivityEvent { + call_id: call_id.clone(), + activity, + }), + ) + .await; + } + + match msg { + EventMsg::TaskComplete(ev) => { + last_agent_message = ev.last_agent_message; + break; + } + EventMsg::TurnAborted(_) => { + aborted = true; + break; + } + EventMsg::TokenCount(TokenCountEvent { + info: Some(info), .. + }) => { + tokens = tokens.saturating_add(info.last_token_usage.total_tokens.max(0)); + let now = Instant::now(); + let should_report = + match (last_reported_tokens, last_reported_at.elapsed().as_secs()) { + (Some(prev), secs) => tokens > prev && (tokens - prev >= 250 || secs >= 2), + (None, _) => tokens > 0, + }; + if should_report { + session + .send_event( + turn.as_ref(), + EventMsg::SubAgentToolCallTokens(SubAgentToolCallTokensEvent { + call_id: call_id.clone(), + tokens, + }), + ) + .await; + last_reported_tokens = Some(tokens); + last_reported_at = now; + } + } + _ => {} + } + } + + let response = last_agent_message.unwrap_or_default().trim().to_string(); + let tokens = if tokens > 0 { Some(tokens) } else { None }; + + if aborted { + let message = "cancelled".to_string(); + session + .send_event( + turn.as_ref(), + EventMsg::SubAgentToolCallEnd(SubAgentToolCallEndEvent { + call_id, + invocation, + duration: started_at.elapsed(), + tokens: None, + outcome: Some(SubAgentToolCallOutcome::Cancelled), + result: Err(message.clone()), + }), + ) + .await; + return Err(message); + } + + session + .send_event( + turn.as_ref(), + EventMsg::SubAgentToolCallEnd(SubAgentToolCallEndEvent { + call_id, + invocation, + duration: started_at.elapsed(), + tokens, + outcome: Some(SubAgentToolCallOutcome::Completed), + result: Ok(response.clone()), + }), + ) + .await; + + Ok(response) +} + +fn fmt_exec_activity_command(command: &[String]) -> String { + if command.is_empty() { + return "shell".to_string(); + } + + let cmd = if let Some((_shell, script)) = crate::parse_command::extract_shell_command(command) { + let script = script.trim(); + if script.is_empty() { + "shell".to_string() + } else { + script + .lines() + .map(str::trim) + .filter(|line| !line.is_empty()) + .collect::>() + .join(" ") + } + } else { + crate::parse_command::shlex_join(command) + }; + + if cmd.is_empty() { + "shell".to_string() + } else { + cmd + } +} + +fn activity_for_event(msg: &EventMsg) -> Option { + match msg { + EventMsg::TaskStarted(_) => Some("starting".to_string()), + EventMsg::UserMessage(_) => Some("sending prompt".to_string()), + EventMsg::AgentReasoning(_) + | EventMsg::AgentReasoningDelta(_) + | EventMsg::AgentReasoningRawContent(_) + | EventMsg::AgentReasoningRawContentDelta(_) + | EventMsg::AgentReasoningSectionBreak(_) => Some("thinking".to_string()), + EventMsg::AgentMessage(_) | EventMsg::AgentMessageDelta(_) => Some("writing".to_string()), + EventMsg::ExecCommandBegin(ev) => Some(fmt_exec_activity_command(&ev.command)), + EventMsg::McpToolCallBegin(ev) => Some(format!( + "mcp {}/{}", + ev.invocation.server.trim(), + ev.invocation.tool.trim() + )), + EventMsg::WebSearchBegin(_) => Some("web_search".to_string()), + _ => None, + } +} + +struct CancelOnDrop { + token: CancellationToken, +} + +impl CancelOnDrop { + fn new(token: CancellationToken) -> Self { + Self { token } + } +} + +impl Drop for CancelOnDrop { + fn drop(&mut self) { + self.token.cancel(); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + + #[test] + fn exec_activity_command_strips_powershell_wrapper() { + let shell = if cfg!(windows) { + "C:\\windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe" + } else { + "/usr/local/bin/powershell.exe" + }; + let cmd = vec![ + shell.to_string(), + "-NoProfile".to_string(), + "-Command".to_string(), + "rg --version".to_string(), + ]; + assert_eq!(fmt_exec_activity_command(&cmd), "rg --version"); + } + + #[test] + fn exec_activity_command_strips_bash_lc_wrapper() { + let cmd = vec![ + "bash".to_string(), + "-lc".to_string(), + "rg --version".to_string(), + ]; + assert_eq!(fmt_exec_activity_command(&cmd), "rg --version"); + } +} diff --git a/codex-rs/core/src/truncate.rs b/codex-rs/core/src/truncate.rs index 9f0672363e3..7cced9970a0 100644 --- a/codex-rs/core/src/truncate.rs +++ b/codex-rs/core/src/truncate.rs @@ -6,6 +6,7 @@ use crate::config::Config; use codex_protocol::models::FunctionCallOutputContentItem; use codex_protocol::openai_models::TruncationMode; use codex_protocol::openai_models::TruncationPolicyConfig; +use codex_protocol::protocol::TruncationPolicy as ProtocolTruncationPolicy; const APPROX_BYTES_PER_TOKEN: usize = 4; @@ -15,6 +16,15 @@ pub enum TruncationPolicy { Tokens(usize), } +impl From for ProtocolTruncationPolicy { + fn from(value: TruncationPolicy) -> Self { + match value { + TruncationPolicy::Bytes(bytes) => Self::Bytes(bytes), + TruncationPolicy::Tokens(tokens) => Self::Tokens(tokens), + } + } +} + impl From for TruncationPolicy { fn from(config: TruncationPolicyConfig) -> Self { match config.mode { diff --git a/codex-rs/core/src/unified_exec/async_watcher.rs b/codex-rs/core/src/unified_exec/async_watcher.rs index 19d91dbccf2..53b6b7c77d4 100644 --- a/codex-rs/core/src/unified_exec/async_watcher.rs +++ b/codex-rs/core/src/unified_exec/async_watcher.rs @@ -10,6 +10,7 @@ use tokio::time::Sleep; use crate::codex::Session; use crate::codex::TurnContext; use crate::exec::ExecToolCallOutput; +use crate::exec::MAX_EXEC_OUTPUT_DELTAS_PER_CALL; use crate::exec::StreamOutput; use crate::protocol::EventMsg; use crate::protocol::ExecCommandOutputDeltaEvent; @@ -25,6 +26,14 @@ use super::session::UnifiedExecSession; pub(crate) const TRAILING_OUTPUT_GRACE: Duration = Duration::from_millis(100); +/// Upper bound for a single ExecCommandOutputDelta chunk emitted by unified exec. +/// +/// The unified exec output buffer already caps *retained* output (see +/// `UNIFIED_EXEC_OUTPUT_MAX_BYTES`), but we also cap per-event payload size so +/// downstream event consumers (especially app-server JSON-RPC) don't have to +/// process arbitrarily large delta payloads. +const UNIFIED_EXEC_OUTPUT_DELTA_MAX_BYTES: usize = 8192; + /// Spawn a background task that continuously reads from the PTY, appends to the /// shared transcript, and emits ExecCommandOutputDelta events on UTF‑8 /// boundaries. @@ -45,6 +54,7 @@ pub(crate) fn start_streaming_output( use tokio::sync::broadcast::error::RecvError; let mut pending = Vec::::new(); + let mut emitted_deltas: usize = 0; let mut grace_sleep: Option>> = None; @@ -82,6 +92,7 @@ pub(crate) fn start_streaming_output( &call_id, &session_ref, &turn_ref, + &mut emitted_deltas, chunk, ).await; } @@ -135,6 +146,7 @@ async fn process_chunk( call_id: &str, session_ref: &Arc, turn_ref: &Arc, + emitted_deltas: &mut usize, chunk: Vec, ) { pending.extend_from_slice(&chunk); @@ -144,6 +156,10 @@ async fn process_chunk( guard.append(&prefix); } + if *emitted_deltas >= MAX_EXEC_OUTPUT_DELTAS_PER_CALL { + continue; + } + let event = ExecCommandOutputDeltaEvent { call_id: call_id.to_string(), stream: ExecOutputStream::Stdout, @@ -152,6 +168,7 @@ async fn process_chunk( session_ref .send_event(turn_ref.as_ref(), EventMsg::ExecCommandOutputDelta(event)) .await; + *emitted_deltas += 1; } } @@ -193,12 +210,16 @@ pub(crate) async fn emit_exec_end_for_unified_exec( } fn split_valid_utf8_prefix(buffer: &mut Vec) -> Option> { + split_valid_utf8_prefix_with_max(buffer, UNIFIED_EXEC_OUTPUT_DELTA_MAX_BYTES) +} + +fn split_valid_utf8_prefix_with_max(buffer: &mut Vec, max_bytes: usize) -> Option> { if buffer.is_empty() { return None; } - let len = buffer.len(); - let mut split = len; + let max_len = buffer.len().min(max_bytes); + let mut split = max_len; while split > 0 { if std::str::from_utf8(&buffer[..split]).is_ok() { let prefix = buffer[..split].to_vec(); @@ -206,7 +227,7 @@ fn split_valid_utf8_prefix(buffer: &mut Vec) -> Option> { return Some(prefix); } - if len - split > 4 { + if max_len - split > 4 { break; } split -= 1; @@ -229,3 +250,42 @@ async fn resolve_aggregated_output( String::from_utf8_lossy(&guard.data).to_string() } + +#[cfg(test)] +mod tests { + use super::split_valid_utf8_prefix_with_max; + + use pretty_assertions::assert_eq; + + #[test] + fn split_valid_utf8_prefix_respects_max_bytes_for_ascii() { + let mut buf = b"hello word!".to_vec(); + + let first = split_valid_utf8_prefix_with_max(&mut buf, 5).expect("expected prefix"); + assert_eq!(first, b"hello".to_vec()); + assert_eq!(buf, b" word!".to_vec()); + + let second = split_valid_utf8_prefix_with_max(&mut buf, 5).expect("expected prefix"); + assert_eq!(second, b" word".to_vec()); + assert_eq!(buf, b"!".to_vec()); + } + + #[test] + fn split_valid_utf8_prefix_avoids_splitting_utf8_codepoints() { + // "é" is 2 bytes in UTF-8. With a max of 3 bytes, we should only emit 1 char (2 bytes). + let mut buf = "ééé".as_bytes().to_vec(); + + let first = split_valid_utf8_prefix_with_max(&mut buf, 3).expect("expected prefix"); + assert_eq!(std::str::from_utf8(&first).unwrap(), "é"); + assert_eq!(buf, "éé".as_bytes().to_vec()); + } + + #[test] + fn split_valid_utf8_prefix_makes_progress_on_invalid_utf8() { + let mut buf = vec![0xff, b'a', b'b']; + + let first = split_valid_utf8_prefix_with_max(&mut buf, 2).expect("expected prefix"); + assert_eq!(first, vec![0xff]); + assert_eq!(buf, b"ab".to_vec()); + } +} diff --git a/codex-rs/core/src/unified_exec/session_manager.rs b/codex-rs/core/src/unified_exec/session_manager.rs index 17d5ef671a0..c97820e4643 100644 --- a/codex-rs/core/src/unified_exec/session_manager.rs +++ b/codex-rs/core/src/unified_exec/session_manager.rs @@ -14,7 +14,6 @@ use crate::bash::extract_bash_command; use crate::codex::Session; use crate::codex::TurnContext; use crate::exec_env::create_env; -use crate::exec_policy::create_exec_approval_requirement_for_command; use crate::protocol::BackgroundEventEvent; use crate::protocol::EventMsg; use crate::sandboxing::ExecEnv; @@ -484,15 +483,18 @@ impl UnifiedExecSessionManager { let features = context.session.features(); let mut orchestrator = ToolOrchestrator::new(); let mut runtime = UnifiedExecRuntime::new(self); - let exec_approval_requirement = create_exec_approval_requirement_for_command( - &context.turn.exec_policy, - &features, - command, - context.turn.approval_policy, - &context.turn.sandbox_policy, - sandbox_permissions, - ) - .await; + let exec_approval_requirement = context + .session + .services + .exec_policy + .create_exec_approval_requirement_for_command( + &features, + command, + context.turn.approval_policy, + &context.turn.sandbox_policy, + sandbox_permissions, + ) + .await; let req = UnifiedExecToolRequest::new( command.to_vec(), cwd, diff --git a/codex-rs/core/src/util.rs b/codex-rs/core/src/util.rs index 5304a89ac9f..db2d0e74ebb 100644 --- a/codex-rs/core/src/util.rs +++ b/codex-rs/core/src/util.rs @@ -17,7 +17,7 @@ pub(crate) fn backoff(attempt: u64) -> Duration { } pub(crate) fn error_or_panic(message: impl std::string::ToString) { - if cfg!(debug_assertions) || env!("CARGO_PKG_VERSION").contains("alpha") { + if cfg!(debug_assertions) { panic!("{}", message.to_string()); } else { error!("{}", message.to_string()); diff --git a/codex-rs/core/tests/common/Cargo.toml b/codex-rs/core/tests/common/Cargo.toml index 0f0abbb3cb2..c61a0956862 100644 --- a/codex-rs/core/tests/common/Cargo.toml +++ b/codex-rs/core/tests/common/Cargo.toml @@ -14,6 +14,7 @@ base64 = { workspace = true } codex-core = { workspace = true, features = ["test-support"] } codex-protocol = { workspace = true } codex-utils-absolute-path = { workspace = true } +codex-utils-cargo-bin = { workspace = true } notify = { workspace = true } regex-lite = { workspace = true } serde_json = { workspace = true } diff --git a/codex-rs/core/tests/common/lib.rs b/codex-rs/core/tests/common/lib.rs index 63791127bc0..9568ec2786c 100644 --- a/codex-rs/core/tests/common/lib.rs +++ b/codex-rs/core/tests/common/lib.rs @@ -10,9 +10,6 @@ use codex_utils_absolute_path::AbsolutePathBuf; use regex_lite::Regex; use std::path::PathBuf; -#[cfg(target_os = "linux")] -use assert_cmd::cargo::cargo_bin; - pub mod process; pub mod responses; pub mod streaming_sse; @@ -87,7 +84,10 @@ pub async fn load_default_config_for_test(codex_home: &TempDir) -> Config { #[cfg(target_os = "linux")] fn default_test_overrides() -> ConfigOverrides { ConfigOverrides { - codex_linux_sandbox_exe: Some(cargo_bin("codex-linux-sandbox")), + codex_linux_sandbox_exe: Some( + codex_utils_cargo_bin::cargo_bin("codex-linux-sandbox") + .expect("should find binary for codex-linux-sandbox"), + ), ..ConfigOverrides::default() } } diff --git a/codex-rs/core/tests/common/test_codex.rs b/codex-rs/core/tests/common/test_codex.rs index e4a806a8652..27116021998 100644 --- a/codex-rs/core/tests/common/test_codex.rs +++ b/codex-rs/core/tests/common/test_codex.rs @@ -184,8 +184,8 @@ impl TestCodexBuilder { for hook in self.pre_build_hooks.drain(..) { hook(home.path()); } - if let Ok(cmd) = assert_cmd::Command::cargo_bin("codexel") { - config.codex_linux_sandbox_exe = Some(PathBuf::from(cmd.get_program().to_os_string())); + if let Ok(path) = codex_utils_cargo_bin::cargo_bin("codexel") { + config.codex_linux_sandbox_exe = Some(path); } let mut mutators = vec![]; diff --git a/codex-rs/core/tests/common/test_codex_exec.rs b/codex-rs/core/tests/common/test_codex_exec.rs index 478d0c57f4b..5f3ea0d5ace 100644 --- a/codex-rs/core/tests/common/test_codex_exec.rs +++ b/codex-rs/core/tests/common/test_codex_exec.rs @@ -11,8 +11,10 @@ pub struct TestCodexExecBuilder { impl TestCodexExecBuilder { pub fn cmd(&self) -> assert_cmd::Command { - let mut cmd = assert_cmd::Command::cargo_bin("codex-exec") - .expect("should find binary for codex-exec"); + let mut cmd = assert_cmd::Command::new( + codex_utils_cargo_bin::cargo_bin("codex-exec") + .expect("should find binary for codex-exec"), + ); cmd.current_dir(self.cwd.path()) .env("CODEX_HOME", self.home.path()) .env(CODEX_API_KEY_ENV_VAR, "dummy"); diff --git a/codex-rs/core/tests/responses_headers.rs b/codex-rs/core/tests/responses_headers.rs index c406fdbc879..3b0ffd2983c 100644 --- a/codex-rs/core/tests/responses_headers.rs +++ b/codex-rs/core/tests/responses_headers.rs @@ -14,7 +14,6 @@ use codex_core::models_manager::manager::ModelsManager; use codex_otel::otel_manager::OtelManager; use codex_protocol::ConversationId; use codex_protocol::config_types::ReasoningSummary; -use codex_protocol::openai_models::ReasoningSummaryFormat; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::SubAgentSource; use core_test_support::load_default_config_for_test; @@ -246,7 +245,6 @@ async fn responses_respects_model_family_overrides_from_config() { config.model_provider_id = provider.name.clone(); config.model_provider = provider.clone(); config.model_supports_reasoning_summaries = Some(true); - config.model_reasoning_summary_format = Some(ReasoningSummaryFormat::Experimental); config.model_reasoning_summary = ReasoningSummary::Detailed; let effort = config.model_reasoning_effort; let summary = config.model_reasoning_summary; diff --git a/codex-rs/core/tests/suite/apply_patch_cli.rs b/codex-rs/core/tests/suite/apply_patch_cli.rs index 880e74d9592..1f99c846afd 100644 --- a/codex-rs/core/tests/suite/apply_patch_cli.rs +++ b/codex-rs/core/tests/suite/apply_patch_cli.rs @@ -2,10 +2,13 @@ use anyhow::Result; use core_test_support::responses::ev_apply_patch_call; +use core_test_support::responses::ev_apply_patch_custom_tool_call; use core_test_support::responses::ev_shell_command_call; use core_test_support::test_codex::ApplyPatchModelOutput; use pretty_assertions::assert_eq; use std::fs; +use std::sync::atomic::AtomicI32; +use std::sync::atomic::Ordering; use codex_core::features::Feature; use codex_core::protocol::AskForApproval; @@ -29,6 +32,11 @@ use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use serde_json::json; use test_case::test_case; +use wiremock::Mock; +use wiremock::Respond; +use wiremock::ResponseTemplate; +use wiremock::matchers::method; +use wiremock::matchers::path_regex; pub async fn apply_patch_harness() -> Result { apply_patch_harness_with(|builder| builder).await @@ -718,6 +726,132 @@ async fn apply_patch_shell_command_heredoc_with_cd_updates_relative_workdir() -> Ok(()) } +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn apply_patch_cli_can_use_shell_command_output_as_patch_input() -> Result<()> { + skip_if_no_network!(Ok(())); + + let harness = apply_patch_harness_with(|builder| builder.with_model("gpt-5.1")).await?; + + let source_contents = "line1\nnaïve café\nline3\n"; + let source_path = harness.path("source.txt"); + fs::write(&source_path, source_contents)?; + + let read_call_id = "read-source"; + let apply_call_id = "apply-from-read"; + + fn stdout_from_shell_output(output: &str) -> String { + let normalized = output.replace("\r\n", "\n").replace('\r', "\n"); + normalized + .split_once("Output:\n") + .map(|x| x.1) + .unwrap_or("") + .trim_end_matches('\n') + .to_string() + } + + fn function_call_output_text(body: &serde_json::Value, call_id: &str) -> String { + body.get("input") + .and_then(serde_json::Value::as_array) + .and_then(|items| { + items.iter().find(|item| { + item.get("type").and_then(serde_json::Value::as_str) + == Some("function_call_output") + && item.get("call_id").and_then(serde_json::Value::as_str) == Some(call_id) + }) + }) + .and_then(|item| item.get("output").and_then(serde_json::Value::as_str)) + .expect("function_call_output output string") + .to_string() + } + + struct DynamicApplyFromRead { + num_calls: AtomicI32, + read_call_id: String, + apply_call_id: String, + } + + impl Respond for DynamicApplyFromRead { + fn respond(&self, request: &wiremock::Request) -> ResponseTemplate { + let call_num = self.num_calls.fetch_add(1, Ordering::SeqCst); + match call_num { + 0 => { + let command = if cfg!(windows) { + "Get-Content -Encoding utf8 source.txt" + } else { + "cat source.txt" + }; + let body = sse(vec![ + ev_response_created("resp-1"), + ev_shell_command_call(&self.read_call_id, command), + ev_completed("resp-1"), + ]); + ResponseTemplate::new(200) + .insert_header("content-type", "text/event-stream") + .set_body_string(body) + } + 1 => { + let body_json: serde_json::Value = + request.body_json().expect("request body should be json"); + let read_output = function_call_output_text(&body_json, &self.read_call_id); + eprintln!("read_output: \n{read_output}"); + let stdout = stdout_from_shell_output(&read_output); + eprintln!("stdout: \n{stdout}"); + let patch_lines = stdout + .lines() + .map(|line| format!("+{line}")) + .collect::>() + .join("\n"); + let patch = format!( + "*** Begin Patch\n*** Add File: target.txt\n{patch_lines}\n*** End Patch" + ); + + eprintln!("patch: \n{patch}"); + + let body = sse(vec![ + ev_response_created("resp-2"), + ev_apply_patch_custom_tool_call(&self.apply_call_id, &patch), + ev_completed("resp-2"), + ]); + ResponseTemplate::new(200) + .insert_header("content-type", "text/event-stream") + .set_body_string(body) + } + 2 => { + let body = sse(vec![ + ev_assistant_message("msg-1", "ok"), + ev_completed("resp-3"), + ]); + ResponseTemplate::new(200) + .insert_header("content-type", "text/event-stream") + .set_body_string(body) + } + _ => panic!("no response for call {call_num}"), + } + } + } + + let responder = DynamicApplyFromRead { + num_calls: AtomicI32::new(0), + read_call_id: read_call_id.to_string(), + apply_call_id: apply_call_id.to_string(), + }; + Mock::given(method("POST")) + .and(path_regex(".*/responses$")) + .respond_with(responder) + .expect(3) + .mount(harness.server()) + .await; + + harness + .submit("read source.txt, then apply it to target.txt") + .await?; + + let target_contents = fs::read_to_string(harness.path("target.txt"))?; + assert_eq!(target_contents, source_contents); + + Ok(()) +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn apply_patch_shell_command_heredoc_with_cd_emits_turn_diff() -> Result<()> { skip_if_no_network!(Ok(())); diff --git a/codex-rs/core/tests/suite/cli_stream.rs b/codex-rs/core/tests/suite/cli_stream.rs index 316e6e3fc42..7388d1a63d4 100644 --- a/codex-rs/core/tests/suite/cli_stream.rs +++ b/codex-rs/core/tests/suite/cli_stream.rs @@ -1,12 +1,9 @@ use assert_cmd::Command as AssertCommand; -use assert_cmd::cargo::cargo_bin; use codex_core::RolloutRecorder; use codex_core::protocol::GitInfo; use core_test_support::fs_wait; use core_test_support::skip_if_no_network; -use escargot::CargoBuild; use std::path::PathBuf; -use std::sync::OnceLock; use std::time::Duration; use tempfile::TempDir; use uuid::Uuid; @@ -16,25 +13,9 @@ use wiremock::ResponseTemplate; use wiremock::matchers::method; use wiremock::matchers::path; -static CODEX_CLI_BIN: OnceLock = OnceLock::new(); - fn codex_cli_bin() -> PathBuf { - CODEX_CLI_BIN - .get_or_init(|| { - let candidate = cargo_bin("codexel"); - if candidate.is_file() { - return candidate; - } - - CargoBuild::new() - .package("codex-cli") - .bin("codexel") - .run() - .unwrap_or_else(|err| panic!("failed to build codexel binary: {err}")) - .path() - .to_path_buf() - }) - .clone() + codex_utils_cargo_bin::cargo_bin("codexel") + .unwrap_or_else(|err| panic!("failed to locate codexel binary: {err}")) } /// Tests streaming chat completions through the CLI using a mock server. diff --git a/codex-rs/core/tests/suite/client.rs b/codex-rs/core/tests/suite/client.rs index 6d728b62aa9..d2834086c58 100644 --- a/codex-rs/core/tests/suite/client.rs +++ b/codex-rs/core/tests/suite/client.rs @@ -625,17 +625,19 @@ async fn includes_user_instructions_message_in_request() { .unwrap() .contains("be nice") ); - assert_message_role(&request_body["input"][0], "user"); - assert_message_starts_with(&request_body["input"][0], "# AGENTS.md instructions for "); - assert_message_ends_with(&request_body["input"][0], ""); - let ui_text = request_body["input"][0]["content"][0]["text"] + assert_message_role(&request_body["input"][0], "developer"); + assert_message_starts_with(&request_body["input"][0], "## SpawnSubagent"); + assert_message_role(&request_body["input"][1], "user"); + assert_message_starts_with(&request_body["input"][1], "# AGENTS.md instructions for "); + assert_message_ends_with(&request_body["input"][1], ""); + let ui_text = request_body["input"][1]["content"][0]["text"] .as_str() .expect("invalid message content"); assert!(ui_text.contains("")); assert!(ui_text.contains("be nice")); - assert_message_role(&request_body["input"][1], "user"); - assert_message_starts_with(&request_body["input"][1], ""); - assert_message_ends_with(&request_body["input"][1], ""); + assert_message_role(&request_body["input"][2], "user"); + assert_message_starts_with(&request_body["input"][2], ""); + assert_message_ends_with(&request_body["input"][2], ""); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] @@ -689,8 +691,10 @@ async fn skills_append_to_instructions() { let request = resp_mock.single_request(); let request_body = request.body_json(); - assert_message_role(&request_body["input"][0], "user"); - let instructions_text = request_body["input"][0]["content"][0]["text"] + assert_message_role(&request_body["input"][0], "developer"); + assert_message_starts_with(&request_body["input"][0], "## SpawnSubagent"); + assert_message_role(&request_body["input"][1], "user"); + let instructions_text = request_body["input"][1]["content"][0]["text"] .as_str() .expect("instructions text"); assert!( @@ -1054,7 +1058,11 @@ async fn includes_developer_instructions_message_in_request() { .contains("be nice") ); assert_message_role(&request_body["input"][0], "developer"); - assert_message_starts_with(&request_body["input"][0], "be useful"); + assert_message_starts_with(&request_body["input"][0], "## SpawnSubagent"); + let developer_text = request_body["input"][0]["content"][0]["text"] + .as_str() + .expect("invalid message content"); + assert!(developer_text.contains("## SpawnSubagent")); assert_message_ends_with(&request_body["input"][0], "be useful"); assert_message_role(&request_body["input"][1], "user"); assert_message_starts_with(&request_body["input"][1], "# AGENTS.md instructions for "); diff --git a/codex-rs/core/tests/suite/compact.rs b/codex-rs/core/tests/suite/compact.rs index 4f57330a28f..f8b960af6a1 100644 --- a/codex-rs/core/tests/suite/compact.rs +++ b/codex-rs/core/tests/suite/compact.rs @@ -21,7 +21,6 @@ use core_test_support::skip_if_no_network; use core_test_support::test_codex::test_codex; use core_test_support::wait_for_event; use core_test_support::wait_for_event_match; -use std::collections::VecDeque; use tempfile::TempDir; use core_test_support::responses::ev_assistant_message; @@ -69,23 +68,6 @@ fn summary_with_prefix(summary: &str) -> String { format!("{SUMMARY_PREFIX}\n{summary}") } -fn drop_call_id(value: &mut serde_json::Value) { - match value { - serde_json::Value::Object(obj) => { - obj.retain(|k, _| k != "call_id"); - for v in obj.values_mut() { - drop_call_id(v); - } - } - serde_json::Value::Array(arr) => { - for v in arr { - drop_call_id(v); - } - } - _ => {} - } -} - fn set_test_compact_prompt(config: &mut Config) { config.compact_prompt = Some(SUMMARIZATION_PROMPT.to_string()); } @@ -594,6 +576,15 @@ async fn multiple_auto_compact_per_task_runs_after_token_limit_hit() { values .iter() .filter(|value| { + if value + .get("type") + .and_then(|ty| ty.as_str()) + .is_some_and(|ty| ty == "message") + && value.get("role").and_then(|role| role.as_str()) == Some("developer") + { + return false; + } + if value .get("type") .and_then(|ty| ty.as_str()) @@ -1639,58 +1630,47 @@ async fn manual_compact_twice_preserves_latest_user_messages() { "compact requests should consistently include or omit the summarization prompt" ); - let mut final_output = requests + let final_input = requests .last() .unwrap_or_else(|| panic!("final turn request missing for {final_user_message}")) - .input() - .into_iter() - .collect::>(); - - // System prompt - final_output.pop_front(); - // Developer instructions - final_output.pop_front(); - - let _ = final_output - .iter_mut() - .map(drop_call_id) - .collect::>(); - - let expected = vec![ - json!({ - "content": vec![json!({ - "text": first_user_message, - "type": "input_text", - })], - "role": "user", - "type": "message", - }), - json!({ - "content": vec![json!({ - "text": second_user_message, - "type": "input_text", - })], - "role": "user", - "type": "message", - }), - json!({ - "content": vec![json!({ - "text": expected_second_summary, - "type": "input_text", - })], - "role": "user", - "type": "message", - }), - json!({ - "content": vec![json!({ - "text": final_user_message, - "type": "input_text", - })], - "role": "user", - "type": "message", - }), - ]; - assert_eq!(final_output, expected); + .input(); + + let relevant_user_texts: Vec<&str> = final_input + .iter() + .filter_map(|item| { + if item.get("type").and_then(|v| v.as_str()) != Some("message") + || item.get("role").and_then(|v| v.as_str()) != Some("user") + { + return None; + } + + let text = item + .get("content") + .and_then(|v| v.as_array()) + .and_then(|arr| arr.first()) + .and_then(|entry| entry.get("text")) + .and_then(|v| v.as_str())?; + + let trimmed = text.trim_start(); + if trimmed.starts_with("# AGENTS.md instructions for ") + || trimmed.starts_with("") + { + return None; + } + + Some(text) + }) + .collect(); + + assert_eq!( + relevant_user_texts, + vec![ + first_user_message, + second_user_message, + expected_second_summary.as_str(), + final_user_message, + ] + ); } #[tokio::test(flavor = "multi_thread", worker_threads = 2)] diff --git a/codex-rs/core/tests/suite/compact_resume_fork.rs b/codex-rs/core/tests/suite/compact_resume_fork.rs index 75468ae145c..14972e0fd27 100644 --- a/codex-rs/core/tests/suite/compact_resume_fork.rs +++ b/codex-rs/core/tests/suite/compact_resume_fork.rs @@ -137,6 +137,30 @@ fn normalize_compact_prompts(requests: &mut [Value]) { } } +fn prepend_developer_message_to_requests(expected: &mut Value, developer_text: &str) { + let developer_message = json!({ + "type": "message", + "role": "developer", + "content": [ + { + "type": "input_text", + "text": developer_text, + } + ], + }); + + let Some(expected_arr) = expected.as_array_mut() else { + return; + }; + + for request in expected_arr { + let Some(input) = request.get_mut("input").and_then(Value::as_array_mut) else { + continue; + }; + input.insert(0, developer_message.clone()); + } +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] /// Scenario: compact an initial conversation, resume it, fork one turn back, and /// ensure the model-visible history matches expectations at each request. @@ -216,11 +240,15 @@ async fn compact_resume_and_fork_preserve_model_history_view() { .as_str() .unwrap_or_default() .to_string(); - let user_instructions = requests[0]["input"][0]["content"][0]["text"] + let developer_instructions = requests[0]["input"][0]["content"][0]["text"] .as_str() .unwrap_or_default() .to_string(); - let environment_context = requests[0]["input"][1]["content"][0]["text"] + let user_instructions = requests[0]["input"][1]["content"][0]["text"] + .as_str() + .unwrap_or_default() + .to_string(); + let environment_context = requests[0]["input"][2]["content"][0]["text"] .as_str() .unwrap_or_default() .to_string(); @@ -581,6 +609,7 @@ async fn compact_resume_and_fork_preserve_model_history_view() { usert_turn_3_after_resume, user_turn_3_after_fork ]); + prepend_developer_message_to_requests(&mut expected, &developer_instructions); normalize_line_endings(&mut expected); if let Some(arr) = expected.as_array_mut() { normalize_compact_prompts(arr); @@ -664,11 +693,15 @@ async fn compact_resume_after_second_compaction_preserves_history() { .as_str() .unwrap_or_default() .to_string(); - let user_instructions = requests[0]["input"][0]["content"][0]["text"] + let developer_instructions = requests[0]["input"][0]["content"][0]["text"] + .as_str() + .unwrap_or_default() + .to_string(); + let user_instructions = requests[0]["input"][1]["content"][0]["text"] .as_str() .unwrap_or_default() .to_string(); - let environment_instructions = requests[0]["input"][1]["content"][0]["text"] + let environment_instructions = requests[0]["input"][2]["content"][0]["text"] .as_str() .unwrap_or_default() .to_string(); @@ -736,6 +769,7 @@ async fn compact_resume_after_second_compaction_preserves_history() { ], } ]); + prepend_developer_message_to_requests(&mut expected, &developer_instructions); normalize_line_endings(&mut expected); let mut last_request_after_2_compacts = json!([{ "instructions": requests[requests.len() -1]["instructions"], diff --git a/codex-rs/core/tests/suite/live_cli.rs b/codex-rs/core/tests/suite/live_cli.rs index e77fe6998e1..c5c26a1c8e7 100644 --- a/codex-rs/core/tests/suite/live_cli.rs +++ b/codex-rs/core/tests/suite/live_cli.rs @@ -30,7 +30,7 @@ fn run_live(prompt: &str) -> (assert_cmd::assert::Assert, TempDir) { // implementation). Instead we configure the std `Command` ourselves, then later hand the // resulting `Output` to `assert_cmd` for the familiar assertions. - let mut cmd = Command::cargo_bin("codex-rs").unwrap(); + let mut cmd = Command::new(codex_utils_cargo_bin::cargo_bin("codex-rs").unwrap()); cmd.current_dir(dir.path()); cmd.env("OPENAI_API_KEY", require_api_key()); diff --git a/codex-rs/core/tests/suite/model_overrides.rs b/codex-rs/core/tests/suite/model_overrides.rs index 55e09d71417..39c41dad4bc 100644 --- a/codex-rs/core/tests/suite/model_overrides.rs +++ b/codex-rs/core/tests/suite/model_overrides.rs @@ -39,8 +39,14 @@ async fn override_turn_context_does_not_persist_when_config_exists() { sandbox_policy: None, model: Some("o3".to_string()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: Some(Some(ReasoningEffort::High)), plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, }) .await @@ -83,8 +89,14 @@ async fn override_turn_context_does_not_create_config_file() { sandbox_policy: None, model: Some("o3".to_string()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: Some(Some(ReasoningEffort::Medium)), plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, }) .await diff --git a/codex-rs/core/tests/suite/model_tools.rs b/codex-rs/core/tests/suite/model_tools.rs index 3efb557d799..37087ef4587 100644 --- a/codex-rs/core/tests/suite/model_tools.rs +++ b/codex-rs/core/tests/suite/model_tools.rs @@ -62,6 +62,7 @@ async fn model_selects_expected_tools() { "approve_plan".to_string(), "propose_plan_variants".to_string(), "spawn_subagent".to_string(), + "spawn_mini_subagent".to_string(), "view_image".to_string() ], "codex-mini-latest should expose the local shell tool", @@ -80,6 +81,7 @@ async fn model_selects_expected_tools() { "approve_plan".to_string(), "propose_plan_variants".to_string(), "spawn_subagent".to_string(), + "spawn_mini_subagent".to_string(), "apply_patch".to_string(), "view_image".to_string() ], @@ -99,6 +101,7 @@ async fn model_selects_expected_tools() { "approve_plan".to_string(), "propose_plan_variants".to_string(), "spawn_subagent".to_string(), + "spawn_mini_subagent".to_string(), "apply_patch".to_string(), "view_image".to_string() ], @@ -118,6 +121,7 @@ async fn model_selects_expected_tools() { "approve_plan".to_string(), "propose_plan_variants".to_string(), "spawn_subagent".to_string(), + "spawn_mini_subagent".to_string(), "view_image".to_string() ], "gpt-5 should expose the apply_patch tool", @@ -136,6 +140,7 @@ async fn model_selects_expected_tools() { "approve_plan".to_string(), "propose_plan_variants".to_string(), "spawn_subagent".to_string(), + "spawn_mini_subagent".to_string(), "apply_patch".to_string(), "view_image".to_string() ], @@ -155,6 +160,7 @@ async fn model_selects_expected_tools() { "approve_plan".to_string(), "propose_plan_variants".to_string(), "spawn_subagent".to_string(), + "spawn_mini_subagent".to_string(), "apply_patch".to_string(), "view_image".to_string() ], diff --git a/codex-rs/core/tests/suite/prompt_caching.rs b/codex-rs/core/tests/suite/prompt_caching.rs index 453efdc47ff..c0f761242ac 100644 --- a/codex-rs/core/tests/suite/prompt_caching.rs +++ b/codex-rs/core/tests/suite/prompt_caching.rs @@ -125,6 +125,7 @@ async fn prompt_tools_are_consistent_across_requests() -> anyhow::Result<()> { "approve_plan", "propose_plan_variants", "spawn_subagent", + "spawn_mini_subagent", "apply_patch", "view_image", ]; @@ -257,9 +258,9 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests let body1 = req1.single_request().body_json(); let input1 = body1["input"].as_array().expect("input array"); - assert_eq!(input1.len(), 3, "expected cached prefix + env + user msg"); + assert_eq!(input1.len(), 4, "expected cached prefix + env + user msg"); - let ui_text = input1[0]["content"][0]["text"] + let ui_text = input1[1]["content"][0]["text"] .as_str() .expect("ui message text"); assert!( @@ -271,11 +272,11 @@ async fn prefixes_context_and_instructions_once_and_consistently_across_requests let cwd_str = config.cwd.to_string_lossy(); let expected_env_text = default_env_context_str(&cwd_str, &shell); assert_eq!( - input1[1], + input1[2], text_user_input(expected_env_text), "expected environment context after UI message" ); - assert_eq!(input1[2], text_user_input("hello 1".to_string())); + assert_eq!(input1[3], text_user_input("hello 1".to_string())); let body2 = req2.single_request().body_json(); let input2 = body2["input"].as_array().expect("input array"); @@ -328,8 +329,14 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() -> an }), model: Some("o3".to_string()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: Some(Some(ReasoningEffort::High)), plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: Some(ReasoningSummary::Detailed), }) .await?; @@ -409,8 +416,14 @@ async fn override_before_first_turn_emits_environment_context() -> anyhow::Resul sandbox_policy: None, model: None, plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: None, plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, }) .await?; @@ -653,7 +666,8 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a let body1 = req1.single_request().body_json(); let body2 = req2.single_request().body_json(); - let expected_ui_msg = body1["input"][0].clone(); + let expected_developer_msg = body1["input"][0].clone(); + let expected_ui_msg = body1["input"][1].clone(); let shell = default_user_shell(); let default_cwd_lossy = default_cwd.to_string_lossy(); @@ -662,6 +676,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a let expected_user_message_1 = text_user_input("hello 1".to_string()); let expected_input_1 = serde_json::Value::Array(vec![ + expected_developer_msg.clone(), expected_ui_msg.clone(), expected_env_msg_1.clone(), expected_user_message_1.clone(), @@ -670,6 +685,7 @@ async fn send_user_turn_with_no_changes_does_not_send_environment_context() -> a let expected_user_message_2 = text_user_input("hello 2".to_string()); let expected_input_2 = serde_json::Value::Array(vec![ + expected_developer_msg, expected_ui_msg, expected_env_msg_1, expected_user_message_1, @@ -743,13 +759,15 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu let body1 = req1.single_request().body_json(); let body2 = req2.single_request().body_json(); - let expected_ui_msg = body1["input"][0].clone(); + let expected_developer_msg = body1["input"][0].clone(); + let expected_ui_msg = body1["input"][1].clone(); let shell = default_user_shell(); let expected_env_text_1 = default_env_context_str(&default_cwd.to_string_lossy(), &shell); let expected_env_msg_1 = text_user_input(expected_env_text_1); let expected_user_message_1 = text_user_input("hello 1".to_string()); let expected_input_1 = serde_json::Value::Array(vec![ + expected_developer_msg.clone(), expected_ui_msg.clone(), expected_env_msg_1.clone(), expected_user_message_1.clone(), @@ -767,6 +785,7 @@ async fn send_user_turn_with_changes_sends_environment_context() -> anyhow::Resu )); let expected_user_message_2 = text_user_input("hello 2".to_string()); let expected_input_2 = serde_json::Value::Array(vec![ + expected_developer_msg, expected_ui_msg, expected_env_msg_1, expected_user_message_1, diff --git a/codex-rs/core/tests/suite/remote_models.rs b/codex-rs/core/tests/suite/remote_models.rs index 816b1990ce0..0b758f64884 100644 --- a/codex-rs/core/tests/suite/remote_models.rs +++ b/codex-rs/core/tests/suite/remote_models.rs @@ -17,7 +17,6 @@ use codex_core::protocol::ExecCommandSource; use codex_core::protocol::Op; use codex_core::protocol::SandboxPolicy; use codex_protocol::config_types::ReasoningSummary; -use codex_protocol::openai_models::ClientVersion; use codex_protocol::openai_models::ConfigShellToolType; use codex_protocol::openai_models::ModelInfo; use codex_protocol::openai_models::ModelPreset; @@ -25,7 +24,6 @@ use codex_protocol::openai_models::ModelVisibility; use codex_protocol::openai_models::ModelsResponse; use codex_protocol::openai_models::ReasoningEffort; use codex_protocol::openai_models::ReasoningEffortPreset; -use codex_protocol::openai_models::ReasoningSummaryFormat; use codex_protocol::openai_models::TruncationPolicyConfig; use codex_protocol::user_input::UserInput; use core_test_support::load_default_config_for_test; @@ -73,7 +71,6 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> { }], shell_type: ConfigShellToolType::UnifiedExec, visibility: ModelVisibility::List, - minimal_client_version: ClientVersion(0, 1, 0), supported_in_api: true, priority: 1, upgrade: None, @@ -85,7 +82,6 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> { truncation_policy: TruncationPolicyConfig::bytes(10_000), supports_parallel_tool_calls: false, context_window: None, - reasoning_summary_format: ReasoningSummaryFormat::None, experimental_supported_tools: Vec::new(), }; @@ -138,8 +134,14 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> { sandbox_policy: None, model: Some(REMOTE_MODEL_SLUG.to_string()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: None, plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, }) .await?; @@ -215,7 +217,6 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> { }], shell_type: ConfigShellToolType::ShellCommand, visibility: ModelVisibility::List, - minimal_client_version: ClientVersion(0, 1, 0), supported_in_api: true, priority: 1, upgrade: None, @@ -227,7 +228,6 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> { truncation_policy: TruncationPolicyConfig::bytes(10_000), supports_parallel_tool_calls: false, context_window: None, - reasoning_summary_format: ReasoningSummaryFormat::None, experimental_supported_tools: Vec::new(), }; mount_models_once( @@ -273,8 +273,14 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> { sandbox_policy: None, model: Some(model.to_string()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: None, plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, }) .await?; @@ -482,7 +488,6 @@ fn test_remote_model(slug: &str, visibility: ModelVisibility, priority: i32) -> }], shell_type: ConfigShellToolType::ShellCommand, visibility, - minimal_client_version: ClientVersion(0, 1, 0), supported_in_api: true, priority, upgrade: None, @@ -494,7 +499,6 @@ fn test_remote_model(slug: &str, visibility: ModelVisibility, priority: i32) -> truncation_policy: TruncationPolicyConfig::bytes(10_000), supports_parallel_tool_calls: false, context_window: None, - reasoning_summary_format: ReasoningSummaryFormat::None, experimental_supported_tools: Vec::new(), } } diff --git a/codex-rs/core/tests/suite/resume_warning.rs b/codex-rs/core/tests/suite/resume_warning.rs index 5369398a313..92acf7de0f0 100644 --- a/codex-rs/core/tests/suite/resume_warning.rs +++ b/codex-rs/core/tests/suite/resume_warning.rs @@ -28,6 +28,11 @@ fn resume_history( model: previous_model.to_string(), effort: config.model_reasoning_effort, summary: config.model_reasoning_summary, + base_instructions: None, + user_instructions: None, + developer_instructions: None, + final_output_json_schema: None, + truncation_policy: None, }; InitialHistory::Resumed(ResumedHistory { diff --git a/codex-rs/core/tests/suite/rmcp_client.rs b/codex-rs/core/tests/suite/rmcp_client.rs index ef2fc16ede0..617b3b8a212 100644 --- a/codex-rs/core/tests/suite/rmcp_client.rs +++ b/codex-rs/core/tests/suite/rmcp_client.rs @@ -10,7 +10,6 @@ use std::time::UNIX_EPOCH; use codex_core::config::types::McpServerConfig; use codex_core::config::types::McpServerTransportConfig; -use codex_core::features::Feature; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; @@ -79,7 +78,6 @@ async fn stdio_server_round_trip() -> anyhow::Result<()> { let fixture = test_codex() .with_config(move |config| { - config.features.enable(Feature::RmcpClient); config.mcp_servers.insert( server_name.to_string(), McpServerConfig { @@ -217,7 +215,6 @@ async fn stdio_image_responses_round_trip() -> anyhow::Result<()> { let fixture = test_codex() .with_config(move |config| { - config.features.enable(Feature::RmcpClient); config.mcp_servers.insert( server_name.to_string(), McpServerConfig { @@ -413,7 +410,6 @@ async fn stdio_image_completions_round_trip() -> anyhow::Result<()> { let fixture = test_codex() .with_config(move |config| { config.model_provider.wire_api = codex_core::WireApi::Chat; - config.features.enable(Feature::RmcpClient); config.mcp_servers.insert( server_name.to_string(), McpServerConfig { @@ -560,7 +556,6 @@ async fn stdio_server_propagates_whitelisted_env_vars() -> anyhow::Result<()> { let fixture = test_codex() .with_config(move |config| { - config.features.enable(Feature::RmcpClient); config.mcp_servers.insert( server_name.to_string(), McpServerConfig { @@ -710,7 +705,6 @@ async fn streamable_http_tool_call_round_trip() -> anyhow::Result<()> { let fixture = test_codex() .with_config(move |config| { - config.features.enable(Feature::RmcpClient); config.mcp_servers.insert( server_name.to_string(), McpServerConfig { @@ -891,7 +885,6 @@ async fn streamable_http_with_oauth_round_trip() -> anyhow::Result<()> { let fixture = test_codex() .with_config(move |config| { - config.features.enable(Feature::RmcpClient); config.mcp_servers.insert( server_name.to_string(), McpServerConfig { diff --git a/codex-rs/core/tests/suite/shell_command.rs b/codex-rs/core/tests/suite/shell_command.rs index 34124b35dc3..6aa0da2d347 100644 --- a/codex-rs/core/tests/suite/shell_command.rs +++ b/codex-rs/core/tests/suite/shell_command.rs @@ -1,4 +1,5 @@ use anyhow::Result; +use codex_core::features::Feature; use core_test_support::assert_regex_match; use core_test_support::responses::ev_assistant_message; use core_test_support::responses::ev_completed; @@ -12,6 +13,7 @@ use core_test_support::test_codex::TestCodexBuilder; use core_test_support::test_codex::TestCodexHarness; use core_test_support::test_codex::test_codex; use serde_json::json; +use test_case::test_case; fn shell_responses_with_timeout( call_id: &str, @@ -201,7 +203,6 @@ async fn shell_command_times_out_with_timeout_ms() -> anyhow::Result<()> { skip_if_no_network!(Ok(())); let harness = shell_command_harness_with(|builder| builder.with_model("gpt-5.1")).await?; - let call_id = "shell-command-timeout"; let command = if cfg!(windows) { "timeout /t 5" @@ -224,3 +225,61 @@ async fn shell_command_times_out_with_timeout_ms() -> anyhow::Result<()> { Ok(()) } + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +#[test_case(true ; "with_login")] +#[test_case(false ; "without_login")] +async fn unicode_output(login: bool) -> anyhow::Result<()> { + skip_if_no_network!(Ok(())); + + let harness = shell_command_harness_with(|builder| { + builder.with_model("gpt-5.2").with_config(|config| { + config.features.enable(Feature::PowershellUtf8); + }) + }) + .await?; + + let call_id = "unicode_output"; + mount_shell_responses( + &harness, + call_id, + "git -c alias.say='!printf \"%s\" \"naïve_café\"' say", + Some(login), + ) + .await; + harness.submit("run the command without login").await?; + + let output = harness.function_call_stdout(call_id).await; + assert_shell_command_output(&output, "naïve_café")?; + + Ok(()) +} + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +#[test_case(true ; "with_login")] +#[test_case(false ; "without_login")] +async fn unicode_output_with_newlines(login: bool) -> anyhow::Result<()> { + skip_if_no_network!(Ok(())); + + let harness = shell_command_harness_with(|builder| { + builder.with_model("gpt-5.2").with_config(|config| { + config.features.enable(Feature::PowershellUtf8); + }) + }) + .await?; + + let call_id = "unicode_output"; + mount_shell_responses( + &harness, + call_id, + "echo 'line1\nnaïve café\nline3'", + Some(login), + ) + .await; + harness.submit("run the command without login").await?; + + let output = harness.function_call_stdout(call_id).await; + assert_shell_command_output(&output, "line1\\nnaïve café\\nline3")?; + + Ok(()) +} diff --git a/codex-rs/core/tests/suite/skills.rs b/codex-rs/core/tests/suite/skills.rs index 35576a4bafb..e64b5db3e79 100644 --- a/codex-rs/core/tests/suite/skills.rs +++ b/codex-rs/core/tests/suite/skills.rs @@ -27,6 +27,14 @@ fn write_skill(home: &Path, name: &str, description: &str, body: &str) -> std::p path } +fn system_skill_md_path(home: impl AsRef, name: &str) -> std::path::PathBuf { + home.as_ref() + .join("skills") + .join(".system") + .join(name) + .join("SKILL.md") +} + #[tokio::test(flavor = "multi_thread", worker_threads = 2)] async fn user_turn_includes_skill_instructions() -> Result<()> { skip_if_no_network!(Ok(())); @@ -158,13 +166,15 @@ async fn skill_load_errors_surface_in_session_configured() -> Result<()> { async fn list_skills_includes_system_cache_entries() -> Result<()> { skip_if_no_network!(Ok(())); + const SYSTEM_SKILL_NAME: &str = "skill-creator"; + let server = start_mock_server().await; let mut builder = test_codex() .with_config(|config| { config.features.enable(Feature::Skills); }) .with_pre_build_hook(|home| { - let system_skill_path = home.join("skills/.system/plan/SKILL.md"); + let system_skill_path = system_skill_md_path(home, SYSTEM_SKILL_NAME); assert!( !system_skill_path.exists(), "expected embedded system skills not yet installed, but {system_skill_path:?} exists" @@ -172,14 +182,15 @@ async fn list_skills_includes_system_cache_entries() -> Result<()> { }); let test = builder.build(&server).await?; - let system_skill_path = test.codex_home_path().join("skills/.system/plan/SKILL.md"); + let system_skill_path = system_skill_md_path(test.codex_home_path(), SYSTEM_SKILL_NAME); assert!( system_skill_path.exists(), "expected embedded system skills installed to {system_skill_path:?}" ); let system_skill_contents = fs::read_to_string(&system_skill_path)?; + let expected_name_line = format!("name: {SYSTEM_SKILL_NAME}"); assert!( - system_skill_contents.contains("name: plan"), + system_skill_contents.contains(&expected_name_line), "expected embedded system skill file, got:\n{system_skill_contents}" ); @@ -206,12 +217,13 @@ async fn list_skills_includes_system_cache_entries() -> Result<()> { let skill = skills .iter() - .find(|skill| skill.name == "plan") + .find(|skill| skill.name == SYSTEM_SKILL_NAME) .expect("expected system skill to be present"); assert_eq!(skill.scope, codex_protocol::protocol::SkillScope::System); let path_str = skill.path.to_string_lossy().replace('\\', "/"); + let expected_path_suffix = format!("/skills/.system/{SYSTEM_SKILL_NAME}/SKILL.md"); assert!( - path_str.ends_with("/skills/.system/plan/SKILL.md"), + path_str.ends_with(&expected_path_suffix), "unexpected skill path: {path_str}" ); diff --git a/codex-rs/core/tests/suite/truncation.rs b/codex-rs/core/tests/suite/truncation.rs index e5ab28a5ebb..4b916e51b49 100644 --- a/codex-rs/core/tests/suite/truncation.rs +++ b/codex-rs/core/tests/suite/truncation.rs @@ -5,7 +5,6 @@ use anyhow::Context; use anyhow::Result; use codex_core::config::types::McpServerConfig; use codex_core::config::types::McpServerTransportConfig; -use codex_core::features::Feature; use codex_core::protocol::AskForApproval; use codex_core::protocol::EventMsg; use codex_core::protocol::Op; @@ -421,7 +420,6 @@ async fn mcp_tool_call_output_exceeds_limit_truncated_for_model() -> Result<()> .into_owned(); let mut builder = test_codex().with_config(move |config| { - config.features.enable(Feature::RmcpClient); config.mcp_servers.insert( server_name.to_string(), codex_core::config::types::McpServerConfig { @@ -511,7 +509,6 @@ async fn mcp_image_output_preserves_image_and_no_text_summary() -> Result<()> { let openai_png = "data:image/png;base64,iVBORw0KGgoAAAANSUhEUgAAAAEAAAABCAQAAAC1HAwCAAAAC0lEQVR42mP8/x8AAwMB/ee9bQAAAABJRU5ErkJggg=="; let mut builder = test_codex().with_config(move |config| { - config.features.enable(Feature::RmcpClient); config.mcp_servers.insert( server_name.to_string(), McpServerConfig { @@ -774,7 +771,6 @@ async fn mcp_tool_call_output_not_truncated_with_custom_limit() -> Result<()> { .into_owned(); let mut builder = test_codex().with_config(move |config| { - config.features.enable(Feature::RmcpClient); config.tool_output_token_limit = Some(50_000); config.mcp_servers.insert( server_name.to_string(), diff --git a/codex-rs/exec-server/Cargo.toml b/codex-rs/exec-server/Cargo.toml index 4f8df05dbef..9346db63c1f 100644 --- a/codex-rs/exec-server/Cargo.toml +++ b/codex-rs/exec-server/Cargo.toml @@ -56,7 +56,7 @@ tracing = { workspace = true } tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] } [dev-dependencies] -assert_cmd = { workspace = true } +codex-utils-cargo-bin = { workspace = true } exec_server_test_support = { workspace = true } maplit = { workspace = true } pretty_assertions = { workspace = true } diff --git a/codex-rs/exec-server/src/posix.rs b/codex-rs/exec-server/src/posix.rs index ba481264e2f..12d0055cdde 100644 --- a/codex-rs/exec-server/src/posix.rs +++ b/codex-rs/exec-server/src/posix.rs @@ -230,7 +230,21 @@ fn format_program_name(path: &Path, preserve_program_paths: bool) -> Option anyhow::Result { let codex_home = find_codex_home().context("failed to resolve codex_home for execpolicy")?; - codex_core::load_exec_policy(&codex_home) + + // TODO(mbolin): At a minimum, `cwd` should be configurable via + // `codex/sandbox-state/update` or some other custom MCP call. + let cwd = None; + let cli_overrides = Vec::new(); + let overrides = codex_core::config_loader::LoaderOverrides::default(); + let config_layer_stack = codex_core::config_loader::load_config_layers_state( + &codex_home, + cwd, + &cli_overrides, + overrides, + ) + .await?; + + codex_core::load_exec_policy(&config_layer_stack) .await .map_err(anyhow::Error::from) } diff --git a/codex-rs/exec-server/tests/common/Cargo.toml b/codex-rs/exec-server/tests/common/Cargo.toml index ba7d2af0a17..6444b61f97e 100644 --- a/codex-rs/exec-server/tests/common/Cargo.toml +++ b/codex-rs/exec-server/tests/common/Cargo.toml @@ -8,9 +8,9 @@ license.workspace = true path = "lib.rs" [dependencies] -assert_cmd = { workspace = true } anyhow = { workspace = true } codex-core = { workspace = true } +codex-utils-cargo-bin = { workspace = true } rmcp = { workspace = true } serde_json = { workspace = true } tokio = { workspace = true } diff --git a/codex-rs/exec-server/tests/common/lib.rs b/codex-rs/exec-server/tests/common/lib.rs index c2202a168a8..a5a6a3cac58 100644 --- a/codex-rs/exec-server/tests/common/lib.rs +++ b/codex-rs/exec-server/tests/common/lib.rs @@ -32,14 +32,18 @@ pub async fn create_transport

( where P: AsRef, { - let mcp_executable = assert_cmd::Command::cargo_bin("codex-exec-mcp-server")?; - let execve_wrapper = assert_cmd::Command::cargo_bin("codex-execve-wrapper")?; - let bash = Path::new(env!("CARGO_MANIFEST_DIR")) - .join("..") - .join("..") - .join("tests") - .join("suite") - .join("bash"); + let mcp_executable = codex_utils_cargo_bin::cargo_bin("codex-exec-mcp-server")?; + let execve_wrapper = codex_utils_cargo_bin::cargo_bin("codex-execve-wrapper")?; + // `bash` requires a special lookup when running under Buck because it is a + // _resource_ rather than a binary target. + let bash = if let Some(root) = codex_utils_cargo_bin::buck_project_root()? { + root.join("codex-rs/exec-server/tests/suite/bash") + } else { + Path::new(env!("CARGO_MANIFEST_DIR")) + .join("..") + .join("suite") + .join("bash") + }; // Need to ensure the artifact associated with the bash DotSlash file is // available before it is run in a read-only sandbox. @@ -52,20 +56,19 @@ where .await?; assert!(status.success(), "dotslash fetch failed: {status:?}"); - let transport = - TokioChildProcess::new(Command::new(mcp_executable.get_program()).configure(|cmd| { - cmd.arg("--bash").arg(bash); - cmd.arg("--execve").arg(execve_wrapper.get_program()); - cmd.env("CODEX_HOME", codex_home.as_ref()); - cmd.env("DOTSLASH_CACHE", dotslash_cache.as_ref()); + let transport = TokioChildProcess::new(Command::new(&mcp_executable).configure(|cmd| { + cmd.arg("--bash").arg(bash); + cmd.arg("--execve").arg(&execve_wrapper); + cmd.env("CODEX_HOME", codex_home.as_ref()); + cmd.env("DOTSLASH_CACHE", dotslash_cache.as_ref()); - // Important: pipe stdio so rmcp can speak JSON-RPC over stdin/stdout - cmd.stdin(Stdio::piped()); - cmd.stdout(Stdio::piped()); + // Important: pipe stdio so rmcp can speak JSON-RPC over stdin/stdout + cmd.stdin(Stdio::piped()); + cmd.stdout(Stdio::piped()); - // Optional but very helpful while debugging: - cmd.stderr(Stdio::inherit()); - }))?; + // Optional but very helpful while debugging: + cmd.stderr(Stdio::inherit()); + }))?; Ok(transport) } diff --git a/codex-rs/exec-server/tests/suite/accept_elicitation.rs b/codex-rs/exec-server/tests/suite/accept_elicitation.rs index 491c4bcee4e..84af90879ea 100644 --- a/codex-rs/exec-server/tests/suite/accept_elicitation.rs +++ b/codex-rs/exec-server/tests/suite/accept_elicitation.rs @@ -142,11 +142,7 @@ prefix_rule( } fn ensure_codex_cli() -> Result { - let codex_cli = PathBuf::from( - assert_cmd::Command::cargo_bin("codexel")? - .get_program() - .to_os_string(), - ); + let codex_cli = codex_utils_cargo_bin::cargo_bin("codexel")?; let metadata = codex_cli.metadata().with_context(|| { format!( diff --git a/codex-rs/exec/Cargo.toml b/codex-rs/exec/Cargo.toml index c2052154217..9156c22ea3c 100644 --- a/codex-rs/exec/Cargo.toml +++ b/codex-rs/exec/Cargo.toml @@ -51,6 +51,7 @@ ts-rs = { workspace = true, features = [ [dev-dependencies] assert_cmd = { workspace = true } +codex-utils-cargo-bin = { workspace = true } core_test_support = { workspace = true } libc = { workspace = true } mcp-types = { workspace = true } diff --git a/codex-rs/exec/src/event_processor_with_human_output.rs b/codex-rs/exec/src/event_processor_with_human_output.rs index 462e5963a56..f1fe37e8849 100644 --- a/codex-rs/exec/src/event_processor_with_human_output.rs +++ b/codex-rs/exec/src/event_processor_with_human_output.rs @@ -218,7 +218,15 @@ impl EventProcessor for EventProcessorWithHumanOutput { EventMsg::BackgroundEvent(BackgroundEventEvent { message }) => { ts_msg!(self, "{}", message.style(self.dimmed)); } - EventMsg::StreamError(StreamErrorEvent { message, .. }) => { + EventMsg::StreamError(StreamErrorEvent { + message, + additional_details, + .. + }) => { + let message = match additional_details { + Some(details) if !details.trim().is_empty() => format!("{message} ({details})"), + _ => message, + }; ts_msg!(self, "{}", message.style(self.dimmed)); } EventMsg::TaskStarted(_) => { diff --git a/codex-rs/exec/src/event_processor_with_jsonl_output.rs b/codex-rs/exec/src/event_processor_with_jsonl_output.rs index 03c51662b14..0b2df544559 100644 --- a/codex-rs/exec/src/event_processor_with_jsonl_output.rs +++ b/codex-rs/exec/src/event_processor_with_jsonl_output.rs @@ -145,9 +145,15 @@ impl EventProcessorWithJsonOutput { }; vec![ThreadEvent::ItemCompleted(ItemCompletedEvent { item })] } - EventMsg::StreamError(ev) => vec![ThreadEvent::Error(ThreadErrorEvent { - message: ev.message.clone(), - })], + EventMsg::StreamError(ev) => { + let message = match &ev.additional_details { + Some(details) if !details.trim().is_empty() => { + format!("{} ({})", ev.message, details) + } + _ => ev.message.clone(), + }; + vec![ThreadEvent::Error(ThreadErrorEvent { message })] + } EventMsg::PlanUpdate(ev) => self.handle_plan_update(ev), _ => Vec::new(), } diff --git a/codex-rs/exec/tests/event_processor_with_json_output.rs b/codex-rs/exec/tests/event_processor_with_json_output.rs index 2b3673f5a6d..d288f568e8b 100644 --- a/codex-rs/exec/tests/event_processor_with_json_output.rs +++ b/codex-rs/exec/tests/event_processor_with_json_output.rs @@ -583,6 +583,7 @@ fn stream_error_event_produces_error() { EventMsg::StreamError(codex_core::protocol::StreamErrorEvent { message: "retrying".to_string(), codex_error_info: Some(CodexErrorInfo::Other), + additional_details: None, }), )); assert_eq!( diff --git a/codex-rs/exec/tests/suite/apply_patch.rs b/codex-rs/exec/tests/suite/apply_patch.rs index a32f83ba5eb..fdc5415dc6c 100644 --- a/codex-rs/exec/tests/suite/apply_patch.rs +++ b/codex-rs/exec/tests/suite/apply_patch.rs @@ -23,8 +23,7 @@ fn test_standalone_exec_cli_can_use_apply_patch() -> anyhow::Result<()> { let absolute_path = tmp.path().join(relative_path); fs::write(&absolute_path, "original content\n")?; - Command::cargo_bin("codex-exec") - .context("should find binary for codex-exec")? + Command::new(codex_utils_cargo_bin::cargo_bin("codex-exec")?) .arg(CODEX_APPLY_PATCH_ARG1) .arg( r#"*** Begin Patch diff --git a/codex-rs/exec/tests/suite/sandbox.rs b/codex-rs/exec/tests/suite/sandbox.rs index d995d910308..303023b158b 100644 --- a/codex-rs/exec/tests/suite/sandbox.rs +++ b/codex-rs/exec/tests/suite/sandbox.rs @@ -42,7 +42,8 @@ async fn spawn_command_under_sandbox( env: HashMap, ) -> std::io::Result { use codex_core::landlock::spawn_command_under_linux_sandbox; - let codex_linux_sandbox_exe = assert_cmd::cargo::cargo_bin("codex-exec"); + let codex_linux_sandbox_exe = codex_utils_cargo_bin::cargo_bin("codex-exec") + .map_err(|err| io::Error::new(io::ErrorKind::NotFound, err))?; spawn_command_under_linux_sandbox( codex_linux_sandbox_exe, command, diff --git a/codex-rs/lsp/Cargo.toml b/codex-rs/lsp/Cargo.toml new file mode 100644 index 00000000000..cd5d589e531 --- /dev/null +++ b/codex-rs/lsp/Cargo.toml @@ -0,0 +1,27 @@ +[package] +name = "codex-lsp" +version.workspace = true +edition.workspace = true +license.workspace = true + +[lints] +workspace = true + +[dependencies] +anyhow.workspace = true +dunce.workspace = true +ignore.workspace = true +notify.workspace = true +serde = { workspace = true, features = ["derive"] } +serde_json.workspace = true +thiserror.workspace = true +tokio = { workspace = true, features = ["fs", "io-util", "macros", "process", "rt", "sync", "time"] } +tracing.workspace = true +url.workspace = true +which.workspace = true +wildmatch.workspace = true + +[dev-dependencies] +pretty_assertions.workspace = true +tempfile.workspace = true +tokio = { workspace = true, features = ["macros", "rt"] } diff --git a/codex-rs/lsp/src/jsonrpc.rs b/codex-rs/lsp/src/jsonrpc.rs new file mode 100644 index 00000000000..d10567d04df --- /dev/null +++ b/codex-rs/lsp/src/jsonrpc.rs @@ -0,0 +1,423 @@ +use std::collections::HashMap; +use std::io; +use std::sync::Arc; + +use serde::Serialize; +use serde_json::Value; +use tokio::io::AsyncRead; +use tokio::io::AsyncReadExt; +use tokio::io::AsyncWrite; +use tokio::io::AsyncWriteExt; +use tokio::sync::Mutex; +use tokio::sync::oneshot; + +#[derive(Debug, thiserror::Error)] +pub enum JsonRpcError { + #[error("io error: {0}")] + Io(#[from] io::Error), + #[error("json error: {0}")] + Json(#[from] serde_json::Error), + #[error("protocol error: {0}")] + Protocol(String), + #[error("request cancelled")] + Cancelled, + #[error("server error: {0}")] + Server(String), +} + +#[derive(Debug, Clone, Serialize)] +struct JsonRpcRequest<'a, T> { + jsonrpc: &'static str, + id: u64, + method: &'a str, + #[serde(skip_serializing_if = "Option::is_none")] + params: Option, +} + +#[derive(Debug, Clone, Serialize)] +struct JsonRpcNotification<'a, T> { + jsonrpc: &'static str, + method: &'a str, + #[serde(skip_serializing_if = "Option::is_none")] + params: Option, +} + +#[derive(Debug, Clone, Serialize)] +struct JsonRpcOutgoingResponse { + jsonrpc: &'static str, + id: u64, + result: Value, +} + +#[derive(Debug, Clone, Serialize)] +struct JsonRpcOutgoingError { + code: i64, + message: String, +} + +#[derive(Debug, Clone, Serialize)] +struct JsonRpcOutgoingErrorResponse { + jsonrpc: &'static str, + id: u64, + error: JsonRpcOutgoingError, +} + +#[derive(Debug, Clone, serde::Deserialize)] +#[serde(deny_unknown_fields)] +pub(crate) struct JsonRpcResponse { + #[serde(rename = "jsonrpc")] + #[serde(default)] + _jsonrpc: Option, + id: u64, + #[serde(default)] + result: Value, + #[serde(default)] + error: Option, +} + +#[derive(Debug, Clone, serde::Deserialize)] +pub(crate) struct JsonRpcResponseError { + #[serde(default)] + _code: Option, + message: String, +} + +#[derive(Debug, Clone, serde::Deserialize)] +#[serde(untagged)] +pub enum IncomingMessage { + Request { + #[serde(default)] + _jsonrpc: Option, + #[serde(rename = "id")] + id: u64, + #[serde(rename = "method")] + method: String, + #[serde(default)] + #[serde(rename = "params")] + params: Option, + }, + Notification { + #[serde(default)] + _jsonrpc: Option, + method: String, + #[serde(default)] + params: Option, + }, + Response(JsonRpcResponse), +} + +pub(crate) struct JsonRpcClient { + next_id: Mutex, + pending: Mutex>>>, + writer: Mutex>, +} + +impl JsonRpcClient { + pub(crate) fn new(writer: Box) -> Self { + Self { + next_id: Mutex::new(1), + pending: Mutex::new(HashMap::new()), + writer: Mutex::new(writer), + } + } + + pub(crate) async fn request( + &self, + method: &str, + params: Option

, + ) -> Result { + let id = { + let mut guard = self.next_id.lock().await; + let id = *guard; + *guard = guard.saturating_add(1); + id + }; + + let (tx, rx) = oneshot::channel(); + self.pending.lock().await.insert(id, tx); + + let msg = JsonRpcRequest { + jsonrpc: "2.0", + id, + method, + params, + }; + self.write_message(&msg).await?; + + match rx.await { + Ok(res) => res, + Err(_) => Err(JsonRpcError::Cancelled), + } + } + + pub(crate) async fn notify( + &self, + method: &str, + params: Option

, + ) -> Result<(), JsonRpcError> { + let msg = JsonRpcNotification { + jsonrpc: "2.0", + method, + params, + }; + self.write_message(&msg).await + } + + pub(crate) async fn respond(&self, id: u64, result: Value) -> Result<(), JsonRpcError> { + let msg = JsonRpcOutgoingResponse { + jsonrpc: "2.0", + id, + result, + }; + self.write_message(&msg).await + } + + pub(crate) async fn respond_method_not_found( + &self, + id: u64, + method: &str, + ) -> Result<(), JsonRpcError> { + let msg = JsonRpcOutgoingErrorResponse { + jsonrpc: "2.0", + id, + error: JsonRpcOutgoingError { + code: -32601, + message: format!("method not found: {method}"), + }, + }; + self.write_message(&msg).await + } + + async fn write_message(&self, msg: &T) -> Result<(), JsonRpcError> { + let json = serde_json::to_vec(msg)?; + let mut writer = self.writer.lock().await; + writer + .write_all(format!("Content-Length: {}\r\n\r\n", json.len()).as_bytes()) + .await?; + writer.write_all(&json).await?; + writer.flush().await?; + Ok(()) + } + + pub(crate) async fn handle_incoming(&self, msg: IncomingMessage) -> Result<(), JsonRpcError> { + match msg { + IncomingMessage::Response(resp) => { + let tx = self.pending.lock().await.remove(&resp.id); + if let Some(tx) = tx { + let result = match resp.error { + Some(err) => Err(JsonRpcError::Server(err.message)), + None => Ok(resp.result), + }; + let _ = tx.send(result); + } + } + IncomingMessage::Notification { .. } | IncomingMessage::Request { .. } => {} + } + Ok(()) + } +} + +pub(crate) async fn read_message( + reader: &mut R, +) -> Result { + let content_len = read_content_length(reader).await?; + let mut buf = vec![0u8; content_len]; + reader.read_exact(&mut buf).await?; + let msg = serde_json::from_slice::(&buf)?; + Ok(msg) +} + +async fn read_content_length(reader: &mut R) -> Result { + let mut header = Vec::new(); + loop { + let line = read_header_line(reader).await?; + if line.is_empty() { + break; + } + header.push(line); + if header.len() > 128 { + return Err(JsonRpcError::Protocol("too many headers".to_string())); + } + } + + for line in header { + let mut parts = line.splitn(2, ':'); + let key = parts.next().unwrap_or_default().trim().to_ascii_lowercase(); + let value = parts.next().unwrap_or_default().trim(); + if key == "content-length" { + return value + .parse::() + .map_err(|_| JsonRpcError::Protocol(format!("invalid Content-Length: {value}"))); + } + } + + Err(JsonRpcError::Protocol( + "missing Content-Length header".to_string(), + )) +} + +async fn read_header_line(reader: &mut R) -> Result { + let mut buf = Vec::new(); + loop { + let b = reader.read_u8().await?; + buf.push(b); + if buf.len() > 16 * 1024 { + return Err(JsonRpcError::Protocol("header line too long".to_string())); + } + if buf.ends_with(b"\n") { + break; + } + } + let line = String::from_utf8_lossy(&buf); + Ok(line.trim_end_matches(['\r', '\n']).to_string()) +} + +pub(crate) fn file_uri(path: &std::path::Path) -> Result { + url::Url::from_file_path(path).map_err(|_| { + JsonRpcError::Protocol(format!("failed to create file uri for {}", path.display())) + }) +} + +pub(crate) struct JsonRpcPump { + client: Arc, + root_uri: String, +} + +impl JsonRpcPump { + pub(crate) fn new(client: Arc, root_uri: String) -> Self { + Self { client, root_uri } + } + + pub(crate) async fn run( + self, + mut reader: R, + notification_tx: tokio::sync::mpsc::UnboundedSender<(String, Option)>, + ) -> Result<(), JsonRpcError> { + loop { + let msg = read_message(&mut reader).await?; + match &msg { + IncomingMessage::Notification { method, params, .. } => { + let _ = notification_tx.send((method.clone(), params.clone())); + } + IncomingMessage::Request { + id, method, params, .. + } => { + tracing::debug!("lsp server request: method={method}"); + match method.as_str() { + // Commonly used by LSP servers for dynamic registration. + // If the client doesn't respond, many servers will hang during startup. + "client/registerCapability" | "client/unregisterCapability" => { + self.client.respond(*id, Value::Null).await?; + } + // Many servers query config via this request. + // Return `null` for each requested section if we don't have a value. + "workspace/configuration" => { + let items_len = params + .as_ref() + .and_then(|v| v.get("items")) + .and_then(Value::as_array) + .map_or(0, std::vec::Vec::len); + tracing::debug!("lsp workspace/configuration: items_len={items_len}"); + self.client + .respond( + *id, + Value::Array( + std::iter::repeat_n(Value::Null, items_len).collect(), + ), + ) + .await?; + } + // Some servers ask for the workspace folders even if they got rootUri. + "workspace/workspaceFolders" => { + self.client + .respond( + *id, + serde_json::json!([{ + "uri": self.root_uri, + "name": "root", + }]), + ) + .await?; + } + // Make this a no-op instead of hanging the server. + "window/showMessageRequest" => { + self.client.respond(*id, Value::Null).await?; + } + // Some servers use this to create progress notifications; respond OK even + // though we don't render progress. + "window/workDoneProgress/create" => { + self.client.respond(*id, Value::Null).await?; + } + other => { + self.client.respond_method_not_found(*id, other).await?; + } + } + } + IncomingMessage::Response(_) => {} + } + self.client.handle_incoming(msg).await?; + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + use std::io::Cursor; + use std::time::Duration; + use tokio::io::AsyncWriteExt; + use tokio::io::BufReader; + + #[tokio::test] + async fn reads_single_message() { + let payload = br#"{"jsonrpc":"2.0","method":"foo","params":{"x":1}}"#; + let mut bytes = Vec::new(); + bytes.extend_from_slice(format!("Content-Length: {}\r\n\r\n", payload.len()).as_bytes()); + bytes.extend_from_slice(payload); + let mut reader = BufReader::new(Cursor::new(bytes)); + let msg = read_message(&mut reader).await.unwrap(); + match msg { + IncomingMessage::Notification { method, params, .. } => { + assert_eq!(method, "foo"); + assert_eq!(params.unwrap()["x"], 1); + } + other => panic!("unexpected: {other:?}"), + } + } + + #[tokio::test] + async fn responds_to_register_capability_request() { + let (mut server_write, pump_read) = tokio::io::duplex(8 * 1024); + let (client_write, mut server_read) = tokio::io::duplex(8 * 1024); + + let client = Arc::new(JsonRpcClient::new(Box::new(client_write))); + let pump = JsonRpcPump::new(Arc::clone(&client), "file:///tmp".to_string()); + + let (notif_tx, _notif_rx) = tokio::sync::mpsc::unbounded_channel(); + let pump_task = tokio::spawn(async move { pump.run(pump_read, notif_tx).await }); + + let req = br#"{"jsonrpc":"2.0","id":7,"method":"client/registerCapability","params":{"registrations":[]}}"#; + server_write + .write_all(format!("Content-Length: {}\r\n\r\n", req.len()).as_bytes()) + .await + .unwrap(); + server_write.write_all(req).await.unwrap(); + server_write.shutdown().await.unwrap(); + drop(server_write); + + let resp = tokio::time::timeout(Duration::from_secs(5), read_message(&mut server_read)) + .await + .unwrap() + .unwrap(); + match resp { + IncomingMessage::Response(resp) => { + assert_eq!(resp.id, 7); + assert_eq!(resp.result, Value::Null); + } + other => panic!("unexpected: {other:?}"), + } + + pump_task.abort(); + } +} diff --git a/codex-rs/lsp/src/lib.rs b/codex-rs/lsp/src/lib.rs new file mode 100644 index 00000000000..239526431e7 --- /dev/null +++ b/codex-rs/lsp/src/lib.rs @@ -0,0 +1,21 @@ +mod jsonrpc; +mod lsp; +mod normalize; +mod servers; +mod status; +mod watcher; + +pub use crate::lsp::LspManager; +pub use crate::lsp::LspManagerConfig; +pub use crate::normalize::Diagnostic; +pub use crate::normalize::DiagnosticSeverity; +pub use crate::normalize::DiagnosticsUpdate; +pub use crate::normalize::DocumentSymbol; +pub use crate::normalize::Location; +pub use crate::normalize::Position; +pub use crate::normalize::Range; +pub use crate::servers::ServerConfig; +pub use crate::status::LspLanguageStatus; +pub use crate::status::LspServerSource; +pub use crate::status::LspStatus; +pub use crate::status::render_lsp_status; diff --git a/codex-rs/lsp/src/lsp.rs b/codex-rs/lsp/src/lsp.rs new file mode 100644 index 00000000000..80b0ce5eeb7 --- /dev/null +++ b/codex-rs/lsp/src/lsp.rs @@ -0,0 +1,1511 @@ +use std::collections::BTreeMap; +use std::collections::HashMap; +use std::path::Path; +use std::path::PathBuf; +use std::sync::Arc; +use std::time::Duration; + +use anyhow::Context; +use dunce::canonicalize as normalize_path; +use ignore::Match as GitignoreMatch; +use ignore::gitignore::Gitignore; +use ignore::gitignore::GitignoreBuilder; +use serde_json::Value; +use tokio::io::AsyncBufReadExt; +use tokio::process::Child; +use tokio::process::Command; +use tokio::sync::Mutex; +use tokio::sync::RwLock; +use tokio::sync::mpsc; +use tracing::warn; +use wildmatch::WildMatchPattern; + +use crate::jsonrpc::JsonRpcClient; +use crate::jsonrpc::JsonRpcPump; +use crate::jsonrpc::file_uri; +use crate::normalize::Diagnostic; +use crate::normalize::DiagnosticSeverity; +use crate::normalize::DiagnosticsUpdate; +use crate::normalize::DocumentSymbol; +use crate::normalize::Location; +use crate::normalize::Position; +use crate::normalize::Range; +use crate::servers::ServerConfig; +use crate::servers::autodetect_server; +use crate::servers::language_id_for_path; +use crate::status::LspStatus; +use crate::status::effective_server_config; +use crate::status::normalize_status_root; +use crate::status::status_language_ids; +use crate::watcher::ChangeKind; +use crate::watcher::FileChange; +use crate::watcher::start_watcher; + +fn is_prewarm_language_id(language_id: &str) -> bool { + matches!( + language_id, + "rust" + | "go" + | "csharp" + | "python" + | "typescript" + | "typescriptreact" + | "javascript" + | "javascriptreact" + | "php" + | "perl" + ) +} + +fn build_root_gitignore(root: &Path) -> Option { + let mut builder = GitignoreBuilder::new(root); + let mut any_added = false; + + let gitignore_path = root.join(".gitignore"); + if gitignore_path.is_file() { + if let Some(err) = builder.add(&gitignore_path) { + warn!("failed to add {}: {err}", gitignore_path.display()); + } else { + any_added = true; + } + } + + let git_exclude_path = root.join(".git").join("info").join("exclude"); + if git_exclude_path.is_file() { + if let Some(err) = builder.add(&git_exclude_path) { + warn!("failed to add {}: {err}", git_exclude_path.display()); + } else { + any_added = true; + } + } + + if !any_added { + return None; + } + + match builder.build() { + Ok(gitignore) => Some(gitignore), + Err(err) => { + warn!("failed to build gitignore matcher: {err}"); + None + } + } +} + +fn is_gitignored(gitignore: &Gitignore, root: &Path, path: &Path, is_dir: bool) -> bool { + if !path.starts_with(root) { + return false; + } + matches!( + gitignore.matched_path_or_any_parents(path, is_dir), + GitignoreMatch::Ignore(_) + ) +} + +fn detect_language_ids_for_root( + root: &Path, + ignored_globs: &[String], +) -> std::collections::BTreeSet { + const MAX_ENTRIES: usize = 10_000; + const MAX_DEPTH: usize = 6; + + let ignored: Vec> = ignored_globs + .iter() + .map(|s| WildMatchPattern::new_case_insensitive(s)) + .collect(); + let gitignore = build_root_gitignore(root); + + let mut found: std::collections::BTreeSet = std::collections::BTreeSet::new(); + let mut queue = vec![(root.to_path_buf(), 0usize)]; + let mut seen = 0usize; + + while let Some((dir, depth)) = queue.pop() { + if seen >= MAX_ENTRIES || depth > MAX_DEPTH { + continue; + } + + if ignored.iter().any(|p| p.matches(&dir.to_string_lossy())) { + continue; + } + if gitignore + .as_ref() + .is_some_and(|gitignore| is_gitignored(gitignore, root, &dir, true)) + { + continue; + } + + let Ok(rd) = std::fs::read_dir(&dir) else { + continue; + }; + + for entry in rd.flatten() { + if seen >= MAX_ENTRIES { + break; + } + seen += 1; + + let path = entry.path(); + let ft = match entry.file_type() { + Ok(ft) => ft, + Err(_) => continue, + }; + + if ft.is_dir() { + let name = path + .file_name() + .and_then(|n| n.to_str()) + .unwrap_or_default(); + if matches!(name, ".git" | "node_modules" | "target") { + continue; + } + if ignored.iter().any(|p| p.matches(&path.to_string_lossy())) { + continue; + } + if gitignore + .as_ref() + .is_some_and(|gitignore| is_gitignored(gitignore, root, &path, true)) + { + continue; + } + queue.push((path, depth.saturating_add(1))); + continue; + } + + if !ft.is_file() { + continue; + } + + if ignored.iter().any(|p| p.matches(&path.to_string_lossy())) { + continue; + } + if gitignore + .as_ref() + .is_some_and(|gitignore| is_gitignored(gitignore, root, &path, false)) + { + continue; + } + + if let Some(language_id) = language_id_for_path(&path) + && is_prewarm_language_id(language_id) + { + found.insert(language_id.to_string()); + if found.len() >= 9 { + return found; + } + } + } + } + + found +} + +pub(crate) fn normalize_root_path(root: &Path) -> PathBuf { + let root = if root.is_absolute() { + root.to_path_buf() + } else if let Ok(cwd) = std::env::current_dir() { + cwd.join(root) + } else { + root.to_path_buf() + }; + normalize_path(&root).unwrap_or(root) +} + +fn normalize_file_path(root: &Path, path: &Path) -> PathBuf { + let path = if path.is_absolute() { + path.to_path_buf() + } else { + root.join(path) + }; + normalize_path(&path).unwrap_or(path) +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct LspManagerConfig { + pub enabled: bool, + pub servers: HashMap, + pub ignored_globs: Vec, + pub max_file_bytes: usize, +} + +impl Default for LspManagerConfig { + fn default() -> Self { + Self { + enabled: false, + servers: HashMap::new(), + ignored_globs: vec![ + "**/.git/**".to_string(), + "**/node_modules/**".to_string(), + "**/target/**".to_string(), + ], + max_file_bytes: 512 * 1024, + } + } +} + +#[derive(Default)] +struct LspState { + workspaces: HashMap, +} + +#[derive(Clone)] +pub struct LspManager { + inner: Arc, +} + +struct Inner { + config: RwLock, + state: Mutex, +} + +impl LspManager { + pub fn new(config: LspManagerConfig) -> Self { + Self { + inner: Arc::new(Inner { + config: RwLock::new(config), + state: Mutex::new(LspState::default()), + }), + } + } + + pub async fn set_config(&self, config: LspManagerConfig) { + *self.inner.config.write().await = config; + } + + pub async fn enabled(&self) -> bool { + self.inner.config.read().await.enabled + } + + pub async fn ensure_workspace(&self, root: &Path) -> anyhow::Result<()> { + let root = normalize_root_path(root); + let config = self.inner.config.read().await.clone(); + if !config.enabled { + return Ok(()); + } + + let mut state = self.inner.state.lock().await; + if state.workspaces.contains_key(&root) { + return Ok(()); + } + + let ignored = config + .ignored_globs + .iter() + .map(|s| WildMatchPattern::new_case_insensitive(s)) + .collect(); + let gitignore = build_root_gitignore(&root); + + let (tx, rx) = mpsc::unbounded_channel(); + let watcher = start_watcher(&root, tx).context("start filesystem watcher")?; + + state.workspaces.insert( + root.clone(), + WorkspaceState { + root: root.clone(), + _watcher: watcher, + ignored, + gitignore, + servers: HashMap::new(), + open_docs: HashMap::new(), + diagnostics: BTreeMap::new(), + diag_updates: tokio::sync::broadcast::channel(512).0, + }, + ); + + let manager = self.clone(); + let root = root.clone(); + tokio::spawn(async move { + manager.run_change_loop(root, rx).await; + }); + + Ok(()) + } + + /// Best-effort background warmup for the LSP manager and configured language servers. + /// + /// Intended to run at session start so the first user-triggered LSP request doesn’t pay the + /// full server spawn + initialize + indexing cost. + pub async fn prewarm(&self, root: &Path) -> anyhow::Result<()> { + let root = normalize_root_path(root); + let config = self.inner.config.read().await.clone(); + if !config.enabled { + return Ok(()); + } + + self.ensure_workspace(&root).await?; + + let detected = tokio::task::spawn_blocking({ + let root = root.clone(); + let ignored_globs = config.ignored_globs.clone(); + move || detect_language_ids_for_root(&root, &ignored_globs) + }) + .await + .unwrap_or_default(); + + let mut language_ids: std::collections::BTreeSet = config + .servers + .keys() + .filter(|id| is_prewarm_language_id(id)) + .cloned() + .collect(); + language_ids.extend(detected); + + for language_id in language_ids { + { + let state = self.inner.state.lock().await; + let Some(ws) = state.workspaces.get(&root) else { + continue; + }; + if ws.servers.contains_key(&language_id) { + continue; + } + } + + if !config.servers.contains_key(&language_id) + && autodetect_server(&language_id).is_none() + { + continue; + } + + match start_server(&root, &language_id, &config).await { + Ok(server) => { + let mut inserted = false; + { + let mut state = self.inner.state.lock().await; + let Some(ws) = state.workspaces.get_mut(&root) else { + continue; + }; + if !ws.servers.contains_key(&language_id) { + ws.servers + .insert(language_id.to_string(), Arc::clone(&server)); + inserted = true; + } + } + if inserted { + self.spawn_notification_loop(root.clone(), server); + } else { + shutdown_unused_server(&language_id, server).await; + } + } + Err(err) => { + warn!("lsp prewarm failed for {language_id}: {err:#}"); + } + } + } + + Ok(()) + } + + async fn run_change_loop(&self, root: PathBuf, mut rx: mpsc::UnboundedReceiver) { + let mut last_seen: HashMap = HashMap::new(); + while let Some(change) = rx.recv().await { + if change.kind == ChangeKind::Remove { + continue; + } + if let Some(prev) = last_seen.get(&change.path) + && prev.elapsed() < Duration::from_millis(75) + { + continue; + } + last_seen.insert(change.path.clone(), tokio::time::Instant::now()); + + let language_id = match language_id_for_path(&change.path) { + Some(id) => id, + None => continue, + }; + + let bytes = match tokio::fs::read(&change.path).await { + Ok(bytes) => bytes, + Err(_) => continue, + }; + if let Err(err) = self + .open_or_update_with_text(&root, &change.path, language_id, bytes) + .await + { + warn!("lsp sync failed: {err:#}"); + } + } + } + + pub async fn diagnostics( + &self, + root: &Path, + path: Option<&Path>, + max_results: usize, + ) -> anyhow::Result> { + let root = normalize_root_path(root); + let config = self.inner.config.read().await.clone(); + if !config.enabled { + return Ok(Vec::new()); + } + self.ensure_workspace(&root).await?; + let path = path.map(|path| normalize_file_path(&root, path)); + + // Best-effort: make sure we have something to report for explicit queries. + // Many servers don't proactively publish diagnostics unless a document is opened/updated. + if let Some(path) = path.as_deref() { + let _ = self.open_or_update(&root, path).await; + } else { + let _ = self.seed_diagnostics(&root).await; + } + + let state = self.inner.state.lock().await; + let ws = state + .workspaces + .get(&root) + .context("workspace not initialized")?; + Ok(ws.collect_diagnostics(path.as_deref(), max_results)) + } + + pub async fn diagnostics_wait( + &self, + root: &Path, + path: Option<&Path>, + max_results: usize, + timeout: Duration, + ) -> anyhow::Result> { + let root = normalize_root_path(root); + let config = self.inner.config.read().await.clone(); + if !config.enabled { + return Ok(Vec::new()); + } + + self.ensure_workspace(&root).await?; + let path = path.map(|path| normalize_file_path(&root, path)); + + // Subscribe before triggering open/update so we don't miss a fast publishDiagnostics. + let mut updates = self.subscribe_diagnostics(&root).await?; + + // Best-effort: make sure we have something to report for explicit queries. + // Many servers don't proactively publish diagnostics unless a document is opened/updated. + if let Some(path) = path.as_deref() { + let _ = self.open_or_update(&root, path).await; + + // For pull-diagnostics servers, try again with a caller-provided timeout. + // Some servers can take longer than the default per-request timeout while warming up. + if timeout > Duration::from_secs(3) + && let Ok(server) = self.server_for_path(&root, path).await + { + let _ = self + .refresh_pull_diagnostics_with_timeout( + root.clone(), + path.to_path_buf(), + server, + timeout, + ) + .await; + } + } else { + let _ = self.seed_diagnostics(&root).await; + } + + if timeout.is_zero() { + return self.diagnostics(&root, path.as_deref(), max_results).await; + } + + let start = tokio::time::Instant::now(); + let deadline = start.checked_add(timeout).unwrap_or(start); + let target_path = path.map(|p| normalize_path(&p).unwrap_or(p)); + + loop { + let diags = self + .diagnostics(&root, target_path.as_deref(), max_results) + .await?; + if !diags.is_empty() { + return Ok(diags); + } + + let now = tokio::time::Instant::now(); + let remaining = deadline.saturating_duration_since(now); + if remaining.is_zero() { + return Ok(diags); + } + + let update = match tokio::time::timeout(remaining, updates.recv()).await { + Ok(Ok(update)) => update, + Ok(Err(tokio::sync::broadcast::error::RecvError::Lagged(_))) => continue, + Ok(Err(tokio::sync::broadcast::error::RecvError::Closed)) => return Ok(diags), + Err(_) => return Ok(diags), + }; + + if let Some(target_path) = &target_path { + let update_path = PathBuf::from(&update.path); + if update_path != *target_path { + continue; + } + } + + // We saw an update relevant to this request. Return whatever we have now (which may be + // empty if the server explicitly cleared diagnostics). + return self + .diagnostics(&root, target_path.as_deref(), max_results) + .await; + } + } + + async fn seed_diagnostics(&self, root: &Path) -> anyhow::Result<()> { + let root = normalize_root_path(root); + let config = self.inner.config.read().await.clone(); + if !config.enabled { + return Ok(()); + } + + let ignored: Vec> = config + .ignored_globs + .iter() + .map(|s| WildMatchPattern::new_case_insensitive(s)) + .collect(); + let gitignore = build_root_gitignore(&root); + + // Only do work if we have no diagnostics yet. + { + let state = self.inner.state.lock().await; + if let Some(ws) = state.workspaces.get(&root) + && (!ws.diagnostics.is_empty() || !ws.open_docs.is_empty()) + { + return Ok(()); + } + } + + let mut queue = vec![(root.clone(), 0usize)]; + let mut seen = 0usize; + while let Some((dir, depth)) = queue.pop() { + if depth > 4 || seen > 2000 { + break; + } + if ignored.iter().any(|p| p.matches(&dir.to_string_lossy())) { + continue; + } + if gitignore + .as_ref() + .is_some_and(|gitignore| is_gitignored(gitignore, &root, &dir, true)) + { + continue; + } + let Ok(rd) = std::fs::read_dir(&dir) else { + continue; + }; + for entry in rd.flatten() { + if seen > 2000 { + break; + } + seen += 1; + + let path = entry.path(); + let file_type = match entry.file_type() { + Ok(t) => t, + Err(_) => continue, + }; + if file_type.is_dir() { + let name = path + .file_name() + .and_then(|s| s.to_str()) + .map(str::to_ascii_lowercase) + .unwrap_or_default(); + if matches!( + name.as_str(), + ".git" | "node_modules" | "target" | "bin" | "obj" + ) { + continue; + } + if ignored.iter().any(|p| p.matches(&path.to_string_lossy())) { + continue; + } + if gitignore + .as_ref() + .is_some_and(|gitignore| is_gitignored(gitignore, &root, &path, true)) + { + continue; + } + queue.push((path, depth.saturating_add(1))); + continue; + } + + if !file_type.is_file() { + continue; + } + + if ignored.iter().any(|p| p.matches(&path.to_string_lossy())) { + continue; + } + if gitignore + .as_ref() + .is_some_and(|gitignore| is_gitignored(gitignore, &root, &path, false)) + { + continue; + } + + if let Some(language_id) = language_id_for_path(&path) + && (is_prewarm_language_id(language_id) + || config.servers.contains_key(language_id)) + { + self.open_or_update(&root, &path).await?; + return Ok(()); + } + } + } + + Ok(()) + } + + pub async fn status(&self, root: &Path) -> anyhow::Result { + let root = normalize_status_root(root); + let config = self.inner.config.read().await.clone(); + + if config.enabled { + self.ensure_workspace(&root).await?; + } + + let (workspace_initialized, running_servers, pull_diagnostics) = { + let state = self.inner.state.lock().await; + match state.workspaces.get(&root) { + Some(ws) => ( + true, + ws.servers.keys().cloned().collect::>(), + ws.servers + .iter() + .map(|(language_id, server)| { + (language_id.clone(), server.supports_pull_diagnostics) + }) + .collect::>(), + ), + None => (false, Vec::new(), HashMap::new()), + } + }; + + let language_ids = status_language_ids( + config.servers.keys().cloned(), + running_servers.clone().into_iter(), + ); + + let mut languages = Vec::new(); + for language_id in language_ids { + let running = running_servers.contains(&language_id); + let supports_pull_diagnostics = if running { + pull_diagnostics.get(&language_id).copied() + } else { + None + }; + let configured = config.servers.get(&language_id); + let (configured, autodetected, effective) = + effective_server_config(&language_id, configured); + + languages.push(crate::status::LspLanguageStatus { + language_id, + configured, + autodetected, + effective, + running, + supports_pull_diagnostics, + }); + } + + Ok(LspStatus { + enabled: config.enabled, + root, + ignored_globs: config.ignored_globs, + max_file_bytes: config.max_file_bytes, + workspace_initialized, + languages, + }) + } + + pub async fn subscribe_diagnostics( + &self, + root: &Path, + ) -> anyhow::Result> { + let root = normalize_root_path(root); + self.ensure_workspace(&root).await?; + let state = self.inner.state.lock().await; + let ws = state + .workspaces + .get(&root) + .context("workspace not initialized")?; + Ok(ws.diag_updates.subscribe()) + } + + pub async fn definition( + &self, + root: &Path, + path: &Path, + position: Position, + ) -> anyhow::Result> { + let root = normalize_root_path(root); + let path = normalize_file_path(&root, path); + self.ensure_workspace(&root).await?; + self.open_or_update(&root, &path).await?; + let server = self.server_for_path(&root, &path).await?; + server.definition(&path, position).await + } + + pub async fn references( + &self, + root: &Path, + path: &Path, + position: Position, + include_declaration: bool, + ) -> anyhow::Result> { + let root = normalize_root_path(root); + let path = normalize_file_path(&root, path); + self.ensure_workspace(&root).await?; + self.open_or_update(&root, &path).await?; + let server = self.server_for_path(&root, &path).await?; + server + .references(&path, position, include_declaration) + .await + } + + pub async fn document_symbols( + &self, + root: &Path, + path: &Path, + ) -> anyhow::Result> { + let root = normalize_root_path(root); + let path = normalize_file_path(&root, path); + self.ensure_workspace(&root).await?; + self.open_or_update(&root, &path).await?; + let server = self.server_for_path(&root, &path).await?; + server.document_symbols(&path).await + } + + async fn server_for_path(&self, root: &Path, path: &Path) -> anyhow::Result> { + let language_id = language_id_for_path(path).context("unknown language id for file")?; + let state = self.inner.state.lock().await; + let ws = state + .workspaces + .get(root) + .context("workspace not initialized")?; + ws.servers + .get(language_id) + .cloned() + .with_context(|| format!("language server not started for '{language_id}'")) + } + + async fn open_or_update(&self, root: &Path, path: &Path) -> anyhow::Result<()> { + let bytes = tokio::fs::read(path) + .await + .with_context(|| format!("read {}", path.display()))?; + let language_id = language_id_for_path(path).context("unknown language id for file")?; + self.open_or_update_with_text(root, path, language_id, bytes) + .await + } + + async fn open_or_update_with_text( + &self, + root: &Path, + path: &Path, + language_id: &str, + bytes: Vec, + ) -> anyhow::Result<()> { + let root = normalize_root_path(root); + let path = normalize_file_path(&root, path); + let config = self.inner.config.read().await.clone(); + if !config.enabled { + return Ok(()); + } + + let text = String::from_utf8_lossy(&bytes).to_string(); + if config.max_file_bytes > 0 && bytes.len() > config.max_file_bytes { + return Ok(()); + } + + let (server, version, is_open) = { + let mut state = self.inner.state.lock().await; + let ws = state + .workspaces + .get_mut(&root) + .context("workspace not initialized")?; + + if ws.is_ignored(&path) { + return Ok(()); + } + + let server = ws.servers.get(language_id).cloned(); + + let doc = ws + .open_docs + .entry(path.clone()) + .or_insert(OpenDocState { version: 0 }); + doc.version = doc.version.saturating_add(1); + + let version = doc.version; + (server, version, version == 1) + }; + + let server = match server { + Some(server) => server, + None => { + let spawned = start_server(&root, language_id, &config).await?; + let mut inserted = false; + let effective = { + let mut state = self.inner.state.lock().await; + let ws = state + .workspaces + .get_mut(&root) + .context("workspace not initialized")?; + if ws.is_ignored(&path) { + return Ok(()); + } + if let Some(existing) = ws.servers.get(language_id).cloned() { + existing + } else { + ws.servers + .insert(language_id.to_string(), Arc::clone(&spawned)); + inserted = true; + Arc::clone(&spawned) + } + }; + if inserted { + self.spawn_notification_loop(root.clone(), Arc::clone(&effective)); + } else { + shutdown_unused_server(language_id, spawned).await; + } + effective + } + }; + + if is_open { + server.did_open(&path, language_id, version, &text).await?; + } else { + server.did_change(&path, version, &text).await?; + } + + if let Err(err) = self + .refresh_pull_diagnostics(root.clone(), path.clone(), Arc::clone(&server)) + .await + { + warn!("lsp pull diagnostics failed: {err:#}"); + } + Ok(()) + } + + async fn refresh_pull_diagnostics( + &self, + root: PathBuf, + path: PathBuf, + server: Arc, + ) -> anyhow::Result<()> { + self.refresh_pull_diagnostics_with_timeout(root, path, server, Duration::from_secs(3)) + .await + } + + async fn refresh_pull_diagnostics_with_timeout( + &self, + root: PathBuf, + path: PathBuf, + server: Arc, + timeout: Duration, + ) -> anyhow::Result<()> { + let Some(diags) = server.pull_diagnostics_with_timeout(&path, timeout).await? else { + return Ok(()); + }; + + let path = normalize_path(&path).unwrap_or(path); + let update = DiagnosticsUpdate { + path: path.to_string_lossy().into_owned(), + diagnostics: diags.clone(), + }; + let update_sender = { + let mut state = self.inner.state.lock().await; + let Some(ws) = state.workspaces.get_mut(&root) else { + return Ok(()); + }; + ws.diagnostics.insert(path, diags); + ws.diag_updates.clone() + }; + let _ = update_sender.send(update); + Ok(()) + } + + fn spawn_notification_loop(&self, root: PathBuf, server: Arc) { + let manager = self.clone(); + let mut rx = server.notifications.subscribe(); + tokio::spawn(async move { + while let Ok((method, params)) = rx.recv().await { + if method != "textDocument/publishDiagnostics" { + continue; + } + let Some(params) = params else { continue }; + let Some((path, diags)) = parse_publish_diagnostics(¶ms) else { + continue; + }; + let path = normalize_path(&path).unwrap_or(path); + let update = DiagnosticsUpdate { + path: path.to_string_lossy().into_owned(), + diagnostics: diags.clone(), + }; + let update_sender = { + let mut state = manager.inner.state.lock().await; + let Some(ws) = state.workspaces.get_mut(&root) else { + continue; + }; + ws.diagnostics.insert(path, diags); + ws.diag_updates.clone() + }; + let _ = update_sender.send(update); + } + }); + } +} + +struct WorkspaceState { + root: PathBuf, + _watcher: notify::RecommendedWatcher, + ignored: Vec>, + gitignore: Option, + servers: HashMap>, + open_docs: HashMap, + diagnostics: BTreeMap>, + diag_updates: tokio::sync::broadcast::Sender, +} + +impl WorkspaceState { + fn is_ignored(&self, path: &Path) -> bool { + let text = path.to_string_lossy(); + if self.ignored.iter().any(|p| p.matches(&text)) { + return true; + } + let Some(gitignore) = &self.gitignore else { + return false; + }; + is_gitignored(gitignore, &self.root, path, false) + } + + fn collect_diagnostics(&self, path: Option<&Path>, max_results: usize) -> Vec { + let mut out = Vec::new(); + if let Some(path) = path { + if let Some(diags) = self.diagnostics.get(path) { + out.extend(diags.iter().cloned()); + } + } else { + for diags in self.diagnostics.values() { + out.extend(diags.iter().cloned()); + } + } + out.sort_by(|a, b| { + ( + severity_rank(a.severity), + &a.path, + a.range.start.line, + a.range.start.character, + ) + .cmp(&( + severity_rank(b.severity), + &b.path, + b.range.start.line, + b.range.start.character, + )) + }); + if max_results > 0 && out.len() > max_results { + out.truncate(max_results); + } + out + } +} + +fn severity_rank(sev: Option) -> u8 { + match sev { + Some(DiagnosticSeverity::Error) => 0, + Some(DiagnosticSeverity::Warning) => 1, + Some(DiagnosticSeverity::Information) => 2, + Some(DiagnosticSeverity::Hint) => 3, + None => 4, + } +} + +#[derive(Debug, Clone)] +struct OpenDocState { + version: i32, +} + +struct ServerState { + client: Arc, + _child: Mutex, + notifications: tokio::sync::broadcast::Sender<(String, Option)>, + supports_pull_diagnostics: bool, +} + +impl Drop for ServerState { + fn drop(&mut self) { + if let Ok(mut child) = self._child.try_lock() { + let _ = child.start_kill(); + } + } +} + +impl ServerState { + async fn did_open( + &self, + path: &Path, + language_id: &str, + version: i32, + text: &str, + ) -> anyhow::Result<()> { + let uri = file_uri(path)?.to_string(); + tracing::debug!( + "lsp didOpen: uri={} language_id={} version={} bytes={}", + uri, + language_id, + version, + text.len() + ); + let params = serde_json::json!({ + "textDocument": { + "uri": uri, + "languageId": language_id, + "version": version, + "text": text, + } + }); + self.client + .notify("textDocument/didOpen", Some(params)) + .await + .map_err(anyhow::Error::from)?; + Ok(()) + } + + async fn did_change(&self, path: &Path, version: i32, text: &str) -> anyhow::Result<()> { + let uri = file_uri(path)?.to_string(); + let params = serde_json::json!({ + "textDocument": { "uri": uri, "version": version }, + "contentChanges": [{ "text": text }] + }); + self.client + .notify("textDocument/didChange", Some(params)) + .await + .map_err(anyhow::Error::from)?; + Ok(()) + } + + async fn definition(&self, path: &Path, position: Position) -> anyhow::Result> { + let uri = file_uri(path)?.to_string(); + let params = serde_json::json!({ + "textDocument": { "uri": uri }, + "position": { "line": position.line.saturating_sub(1), "character": position.character.saturating_sub(1) } + }); + let value = self + .client + .request("textDocument/definition", Some(params)) + .await + .map_err(anyhow::Error::from)?; + Ok(parse_locations(value)) + } + + async fn references( + &self, + path: &Path, + position: Position, + include_declaration: bool, + ) -> anyhow::Result> { + let uri = file_uri(path)?.to_string(); + let params = serde_json::json!({ + "textDocument": { "uri": uri }, + "position": { "line": position.line.saturating_sub(1), "character": position.character.saturating_sub(1) }, + "context": { "includeDeclaration": include_declaration } + }); + let value = self + .client + .request("textDocument/references", Some(params)) + .await + .map_err(anyhow::Error::from)?; + Ok(parse_locations(value)) + } + + async fn document_symbols(&self, path: &Path) -> anyhow::Result> { + let uri = file_uri(path)?.to_string(); + let params = serde_json::json!({ "textDocument": { "uri": uri }}); + let value = self + .client + .request("textDocument/documentSymbol", Some(params)) + .await + .map_err(anyhow::Error::from)?; + Ok(parse_document_symbols(value)) + } + + async fn pull_diagnostics_with_timeout( + &self, + path: &Path, + timeout: Duration, + ) -> anyhow::Result>> { + if !self.supports_pull_diagnostics { + return Ok(None); + } + + let uri = file_uri(path)?.to_string(); + tracing::debug!("lsp pull diagnostics: uri={uri}"); + let params = serde_json::json!({ "textDocument": { "uri": uri }}); + let value = tokio::time::timeout( + timeout, + self.client + .request("textDocument/diagnostic", Some(params.clone())), + ) + .await + .with_context(|| { + format!( + "LSP textDocument/diagnostic timed out after {}ms", + timeout.as_millis() + ) + })? + .map_err(anyhow::Error::from)?; + + if value.get("kind").and_then(Value::as_str) == Some("unchanged") { + return Ok(None); + } + + let diags = parse_text_document_diagnostic(path, &value).unwrap_or_default(); + tracing::debug!("lsp pull diagnostics: count={}", diags.len()); + Ok(Some(diags)) + } +} + +async fn start_server( + root: &Path, + language_id: &str, + config: &LspManagerConfig, +) -> anyhow::Result> { + let server_config = config + .servers + .get(language_id) + .cloned() + .or_else(|| autodetect_server(language_id)) + .with_context(|| format!("no language server configured for '{language_id}'"))?; + + let mut args = server_config.args.clone(); + + // `csharp-ls` supports `--solution ` and needs a workspace to provide project-level + // diagnostics. If we can unambiguously pick a solution file at the workspace root, pass it. + if language_id == "csharp" && args.is_empty() { + let looks_like_csharp_ls = std::path::Path::new(&server_config.command) + .file_name() + .and_then(|s| s.to_str()) + .map(|s| s.to_ascii_lowercase().starts_with("csharp-ls")) + .unwrap_or(false); + + if looks_like_csharp_ls { + let mut slns = Vec::new(); + if let Ok(rd) = std::fs::read_dir(root) { + for entry in rd.flatten() { + let path = entry.path(); + if !path.is_file() { + continue; + } + let ext = path + .extension() + .and_then(|s| s.to_str()) + .map(str::to_ascii_lowercase); + if matches!(ext.as_deref(), Some("sln" | "slnx")) + && let Some(name) = path.file_name().and_then(|s| s.to_str()) + { + slns.push(name.to_string()); + } + } + } + slns.sort(); + slns.dedup(); + if slns.len() == 1 { + args.push("--solution".to_string()); + args.push(slns[0].clone()); + } + } + } + + let mut cmd = Command::new(&server_config.command); + cmd.args(&args) + .current_dir(root) + .stdin(std::process::Stdio::piped()) + .stdout(std::process::Stdio::piped()) + .stderr(std::process::Stdio::piped()); + let mut child = cmd.spawn().with_context(|| { + format!( + "spawn language server {} {}", + server_config.command, + server_config.args.join(" ") + ) + })?; + + let stdin = child + .stdin + .take() + .context("missing language server stdin")?; + let stdout = child + .stdout + .take() + .context("missing language server stdout")?; + let stderr = child + .stderr + .take() + .context("missing language server stderr")?; + + let client = Arc::new(JsonRpcClient::new(Box::new(stdin))); + + let (tx, _) = tokio::sync::broadcast::channel(512); + let pump_tx = tx.clone(); + let root_uri = file_uri(root)?.to_string(); + let pump = JsonRpcPump::new(Arc::clone(&client), root_uri); + tokio::spawn(async move { + let (notif_tx, mut notif_rx) = mpsc::unbounded_channel(); + tokio::spawn(async move { + while let Some((method, params)) = notif_rx.recv().await { + let _ = pump_tx.send((method, params)); + } + }); + if let Err(err) = pump.run(stdout, notif_tx).await { + warn!("jsonrpc pump exited: {err}"); + } + }); + + tokio::spawn(async move { + let mut reader = tokio::io::BufReader::new(stderr); + let mut line = String::new(); + loop { + line.clear(); + match reader.read_line(&mut line).await { + Ok(0) => break, + Ok(_) => tracing::debug!("lsp stderr: {}", line.trim_end()), + Err(_) => break, + } + } + }); + + let supports_pull_diagnostics = initialize(&client, root).await?; + + Ok(Arc::new(ServerState { + client, + _child: Mutex::new(child), + notifications: tx, + supports_pull_diagnostics, + })) +} + +async fn shutdown_unused_server(language_id: &str, server: Arc) { + let mut child = server._child.lock().await; + if let Err(err) = child.kill().await { + tracing::debug!("failed to kill unused lsp server {language_id}: {err}"); + } +} + +async fn initialize(client: &JsonRpcClient, root: &Path) -> anyhow::Result { + let root_uri = file_uri(root)?.to_string(); + let root_uri_folder = root_uri.clone(); + let process_id = std::process::id(); + let params = serde_json::json!({ + "processId": process_id, + "rootUri": root_uri, + "workspaceFolders": [{ "uri": root_uri_folder, "name": "root" }], + "capabilities": { + "textDocument": { + "definition": { "dynamicRegistration": false }, + "references": { "dynamicRegistration": false }, + "documentSymbol": { "dynamicRegistration": false }, + // Some servers (notably `typescript-language-server`) only publish diagnostics if + // the client advertises `textDocument.publishDiagnostics` support. + "publishDiagnostics": { "relatedInformation": true }, + // Some servers (notably `csharp-ls`) support pull diagnostics via + // `textDocument/diagnostic` and require the client to advertise this capability. + "diagnostic": { "dynamicRegistration": false, "relatedDocumentSupport": true }, + "synchronization": { "didSave": true } + }, + "workspace": { "workspaceFolders": true } + }, + "clientInfo": { "name": "codexel", "version": env!("CARGO_PKG_VERSION") } + }); + let init_result = tokio::time::timeout( + Duration::from_secs(30), + client.request::("initialize", Some(params)), + ) + .await + .context("LSP initialize timed out")??; + client + .notify::("initialized", Some(serde_json::json!({}))) + .await?; + + let supports_pull_diagnostics = init_result + .get("capabilities") + .and_then(|caps| caps.get("diagnosticProvider")) + .is_some(); + + Ok(supports_pull_diagnostics) +} + +fn parse_publish_diagnostics(params: &Value) -> Option<(PathBuf, Vec)> { + let uri = params.get("uri")?.as_str()?; + let path = url::Url::parse(uri) + .ok()? + .to_file_path() + .ok() + .map(|path| normalize_path(&path).unwrap_or(path))?; + let raw = params.get("diagnostics")?.as_array()?; + let mut out = Vec::with_capacity(raw.len()); + for d in raw { + if let Some(diag) = parse_diagnostic(&path, d) { + out.push(diag); + } + } + Some((path, out)) +} + +fn parse_text_document_diagnostic(path: &Path, value: &Value) -> Option> { + let Value::Object(map) = value else { + return None; + }; + if map.get("kind").and_then(Value::as_str) == Some("unchanged") { + return None; + } + let items = map.get("items")?.as_array()?; + + let mut out = Vec::with_capacity(items.len()); + for item in items { + if let Some(diag) = parse_diagnostic(path, item) { + out.push(diag); + } + } + Some(out) +} + +fn parse_diagnostic(path: &Path, v: &Value) -> Option { + let message = v.get("message")?.as_str()?.to_string(); + let range = parse_range(v.get("range")?)?; + let severity = v + .get("severity") + .and_then(Value::as_u64) + .and_then(|n| match n { + 1 => Some(DiagnosticSeverity::Error), + 2 => Some(DiagnosticSeverity::Warning), + 3 => Some(DiagnosticSeverity::Information), + 4 => Some(DiagnosticSeverity::Hint), + _ => None, + }); + let source = v + .get("source") + .and_then(Value::as_str) + .map(ToString::to_string); + let code = match v.get("code") { + Some(Value::String(s)) => Some(s.clone()), + Some(Value::Number(n)) => Some(n.to_string()), + _ => None, + }; + Some(Diagnostic { + path: path.to_string_lossy().into_owned(), + range, + severity, + code, + source, + message, + }) +} + +fn parse_range(v: &Value) -> Option { + let start = parse_position(v.get("start")?)?; + let end = parse_position(v.get("end")?)?; + Some(Range { start, end }) +} + +fn parse_position(v: &Value) -> Option { + let line = v.get("line")?.as_u64()? as u32; + let character = v.get("character")?.as_u64()? as u32; + Some(Position { + line: line.saturating_add(1), + character: character.saturating_add(1), + }) +} + +fn parse_locations(value: Value) -> Vec { + match value { + Value::Null => Vec::new(), + Value::Array(arr) => arr.into_iter().flat_map(parse_location_like).collect(), + other => parse_location_like(other), + } +} + +fn parse_location_like(value: Value) -> Vec { + // Location + if let Some(uri) = value.get("uri").and_then(Value::as_str) + && let Some(range_value) = value.get("range") + && let Some(range) = parse_range(range_value) + && let Some(path) = url::Url::parse(uri) + .ok() + .and_then(|u| u.to_file_path().ok()) + { + let path = normalize_path(&path).unwrap_or(path); + return vec![Location { + path: path.to_string_lossy().into_owned(), + range, + }]; + } + + // LocationLink + if let Some(target_uri) = value.get("targetUri").and_then(Value::as_str) + && let Some(range_value) = value.get("targetRange") + && let Some(range) = parse_range(range_value) + && let Some(path) = url::Url::parse(target_uri) + .ok() + .and_then(|u| u.to_file_path().ok()) + { + let path = normalize_path(&path).unwrap_or(path); + return vec![Location { + path: path.to_string_lossy().into_owned(), + range, + }]; + } + + Vec::new() +} + +fn parse_document_symbols(value: Value) -> Vec { + match value { + Value::Array(arr) => arr.into_iter().filter_map(parse_document_symbol).collect(), + _ => Vec::new(), + } +} + +fn parse_document_symbol(value: Value) -> Option { + let name = value.get("name")?.as_str()?.to_string(); + let kind = value.get("kind")?.as_u64()? as u32; + let range = parse_range(value.get("range")?)?; + let selection_range = parse_range(value.get("selectionRange")?)?; + let children = value + .get("children") + .and_then(Value::as_array) + .map(|arr| { + arr.iter() + .cloned() + .filter_map(parse_document_symbol) + .collect() + }) + .unwrap_or_default(); + Some(DocumentSymbol { + name, + kind, + range, + selection_range, + children, + }) +} + +#[cfg(test)] +mod tests { + use std::fs; + + use pretty_assertions::assert_eq; + + use super::detect_language_ids_for_root; + + #[test] + fn prewarm_detection_respects_gitignore() { + let dir = tempfile::tempdir().unwrap(); + + fs::write(dir.path().join(".gitignore"), "tmp/lsp-smoke/\n").unwrap(); + fs::create_dir_all(dir.path().join("tmp/lsp-smoke")).unwrap(); + fs::write( + dir.path().join("tmp/lsp-smoke/php_lsp_smoke.php"), + ", + pub code: Option, + pub source: Option, + pub message: String, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct DiagnosticsUpdate { + pub path: String, + pub diagnostics: Vec, +} + +#[derive(Debug, Clone, PartialEq, Eq, Serialize, Deserialize)] +pub struct DocumentSymbol { + pub name: String, + pub kind: u32, + pub range: Range, + pub selection_range: Range, + #[serde(default, skip_serializing_if = "Vec::is_empty")] + pub children: Vec, +} diff --git a/codex-rs/lsp/src/servers.rs b/codex-rs/lsp/src/servers.rs new file mode 100644 index 00000000000..892cea4306e --- /dev/null +++ b/codex-rs/lsp/src/servers.rs @@ -0,0 +1,410 @@ +use std::path::Path; +use std::path::PathBuf; + +use which::which; + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ServerConfig { + pub command: String, + pub args: Vec, +} + +#[cfg(windows)] +const PYRIGHT_CANDIDATES: [&str; 3] = [ + "pyright-langserver.cmd", + "pyright-langserver.exe", + "pyright-langserver", +]; +#[cfg(not(windows))] +const PYRIGHT_CANDIDATES: [&str; 1] = ["pyright-langserver"]; + +#[cfg(windows)] +const TYPESCRIPT_CANDIDATES: [&str; 3] = [ + "typescript-language-server.cmd", + "typescript-language-server.exe", + "typescript-language-server", +]; +#[cfg(not(windows))] +const TYPESCRIPT_CANDIDATES: [&str; 1] = ["typescript-language-server"]; + +#[cfg(windows)] +const INTELEPHENSE_CANDIDATES: [&str; 3] = ["intelephense.cmd", "intelephense.exe", "intelephense"]; +#[cfg(not(windows))] +const INTELEPHENSE_CANDIDATES: [&str; 1] = ["intelephense"]; + +#[cfg(windows)] +const CSHARP_LS_CANDIDATES: [&str; 3] = ["csharp-ls.cmd", "csharp-ls.exe", "csharp-ls"]; +#[cfg(not(windows))] +const CSHARP_LS_CANDIDATES: [&str; 1] = ["csharp-ls"]; + +#[cfg(windows)] +const OMNISHARP_CANDIDATES: [&str; 6] = [ + "OmniSharp.cmd", + "OmniSharp.exe", + "OmniSharp", + "omnisharp.cmd", + "omnisharp.exe", + "omnisharp", +]; +#[cfg(not(windows))] +const OMNISHARP_CANDIDATES: [&str; 2] = ["omnisharp", "OmniSharp"]; + +#[cfg(windows)] +const PERL_NAVIGATOR_CANDIDATES: [&str; 6] = [ + "perlnavigator.cmd", + "perlnavigator.exe", + "perlnavigator", + "perl-navigator.cmd", + "perl-navigator.exe", + "perl-navigator", +]; +#[cfg(not(windows))] +const PERL_NAVIGATOR_CANDIDATES: [&str; 2] = ["perlnavigator", "perl-navigator"]; + +fn which_best(candidates: &[&str]) -> Option { + candidates + .iter() + .find_map(|candidate| which(candidate).ok()) +} + +#[cfg(test)] +fn which_best_in(candidates: &[&str], path_list: &std::ffi::OsStr, cwd: &Path) -> Option { + candidates + .iter() + .find_map(|candidate| which::which_in(candidate, Some(path_list), cwd).ok()) +} + +fn autodetect_server_with( + language_id: &str, + which_best_fn: impl Fn(&[&str]) -> Option, +) -> Option { + match language_id { + "rust" => which_best_fn(&["rust-analyzer", "rust-analyzer.exe"]).map(|path| ServerConfig { + command: path.to_string_lossy().to_string(), + args: Vec::new(), + }), + "go" => which_best_fn(&["gopls", "gopls.exe"]).map(|path| ServerConfig { + command: path.to_string_lossy().to_string(), + args: Vec::new(), + }), + "python" => which_best_fn(&PYRIGHT_CANDIDATES).map(|path| ServerConfig { + command: path.to_string_lossy().to_string(), + args: vec!["--stdio".to_string()], + }), + "typescript" | "javascript" | "typescriptreact" | "javascriptreact" => { + which_best_fn(&TYPESCRIPT_CANDIDATES).map(|path| ServerConfig { + command: path.to_string_lossy().to_string(), + args: vec!["--stdio".to_string()], + }) + } + "php" => which_best_fn(&INTELEPHENSE_CANDIDATES).map(|path| ServerConfig { + command: path.to_string_lossy().to_string(), + args: vec!["--stdio".to_string()], + }), + "csharp" => which_best_fn(&CSHARP_LS_CANDIDATES) + .map(|path| ServerConfig { + command: path.to_string_lossy().to_string(), + // `csharp-ls` uses stdio by default and does not accept `--stdio`. + args: Vec::new(), + }) + .or_else(|| { + which_best_fn(&OMNISHARP_CANDIDATES).map(|path| ServerConfig { + command: path.to_string_lossy().to_string(), + args: vec!["--languageserver".to_string()], + }) + }), + "perl" => which_best_fn(&PERL_NAVIGATOR_CANDIDATES).map(|path| ServerConfig { + command: path.to_string_lossy().to_string(), + args: Vec::new(), + }), + _ => None, + } +} + +pub(crate) fn autodetect_server(language_id: &str) -> Option { + autodetect_server_with(language_id, which_best) +} + +pub(crate) fn language_id_for_path(path: &Path) -> Option<&'static str> { + let ext = path.extension()?.to_string_lossy().to_ascii_lowercase(); + match ext.as_str() { + "rs" => Some("rust"), + "go" => Some("go"), + "py" => Some("python"), + "ts" => Some("typescript"), + "tsx" => Some("typescriptreact"), + "js" => Some("javascript"), + "jsx" => Some("javascriptreact"), + "php" | "phtml" | "php5" | "php7" | "phps" => Some("php"), + "cs" | "csproj" | "sln" => Some("csharp"), + "pl" | "pm" | "t" | "psgi" => Some("perl"), + "json" => Some("json"), + "toml" => Some("toml"), + _ => None, + } +} + +#[cfg(test)] +mod tests { + use std::ffi::OsString; + use std::fs; + use std::path::Path; + use std::time::SystemTime; + use std::time::UNIX_EPOCH; + + use pretty_assertions::assert_eq; + + use super::ServerConfig; + use super::autodetect_server_with; + use super::language_id_for_path; + use super::which_best_in; + + fn unique_temp_dir() -> std::path::PathBuf { + let now = SystemTime::now() + .duration_since(UNIX_EPOCH) + .unwrap_or_default() + .as_nanos(); + let pid = std::process::id(); + std::env::temp_dir().join(format!("codex-lsp-which-test-{pid}-{now}")) + } + + #[cfg(unix)] + fn make_executable(path: &std::path::Path) -> anyhow::Result<()> { + use std::os::unix::fs::PermissionsExt; + + let mut perms = fs::metadata(path)?.permissions(); + perms.set_mode(0o755); + fs::set_permissions(path, perms)?; + Ok(()) + } + + #[cfg(windows)] + fn path_list_for(dir: &std::path::Path) -> OsString { + OsString::from(dir) + } + + #[cfg(unix)] + fn path_list_for(dir: &std::path::Path) -> OsString { + OsString::from(dir) + } + + #[cfg(windows)] + #[test] + fn which_best_prefers_cmd_over_bare() -> anyhow::Result<()> { + // `npm` commonly creates `.cmd` shims on Windows; ensure we prefer those. + let dir = unique_temp_dir(); + fs::create_dir_all(&dir)?; + + let cmd = dir.join("typescript-language-server.cmd"); + let bare = dir.join("typescript-language-server"); + fs::write(&cmd, "@echo off\r\necho ok\r\n")?; + fs::write(&bare, "not used")?; + + let best = which_best_in( + &[ + "typescript-language-server.cmd", + "typescript-language-server", + ], + &path_list_for(&dir), + &dir, + ) + .unwrap(); + assert_eq!(best, cmd); + + fs::remove_dir_all(&dir)?; + Ok(()) + } + + #[cfg(unix)] + #[test] + fn which_best_finds_executable_in_custom_path() -> anyhow::Result<()> { + let dir = unique_temp_dir(); + fs::create_dir_all(&dir)?; + + let server = dir.join("typescript-language-server"); + fs::write(&server, "#!/bin/sh\nexit 0\n")?; + make_executable(&server)?; + + let found = + which_best_in(&["typescript-language-server"], &path_list_for(&dir), &dir).unwrap(); + assert_eq!(found, server); + + fs::remove_dir_all(&dir)?; + Ok(()) + } + + #[test] + fn language_id_for_path_includes_csharp_php_and_perl() { + let cases = [ + ("Program.cs", Some("csharp")), + ("Project.csproj", Some("csharp")), + ("Solution.sln", Some("csharp")), + ("main.php", Some("php")), + ("view.PHTML", Some("php")), + ("legacy.php5", Some("php")), + ("modern.php7", Some("php")), + ("source.PHPS", Some("php")), + ("script.pl", Some("perl")), + ("lib.pm", Some("perl")), + ("basic.t", Some("perl")), + ("app.psgi", Some("perl")), + ("nope.txt", None), + ]; + + for (path, expected) in cases { + assert_eq!( + language_id_for_path(Path::new(path)), + expected, + "path={path}" + ); + } + } + + #[cfg(windows)] + #[test] + fn autodetect_server_includes_csharp_php_and_perl() -> anyhow::Result<()> { + let dir = unique_temp_dir(); + fs::create_dir_all(&dir)?; + + let csharp_ls = dir.join("csharp-ls.cmd"); + fs::write(&csharp_ls, "@echo off\r\necho ok\r\n")?; + + let omnisharp = dir.join("OmniSharp.cmd"); + fs::write(&omnisharp, "@echo off\r\necho ok\r\n")?; + + let php = dir.join("intelephense.cmd"); + fs::write(&php, "@echo off\r\necho ok\r\n")?; + + let perl = dir.join("perlnavigator.cmd"); + fs::write(&perl, "@echo off\r\necho ok\r\n")?; + + let path_list = path_list_for(&dir); + let csharp_cfg = autodetect_server_with("csharp", |candidates| { + which_best_in(candidates, &path_list, &dir) + }) + .unwrap(); + assert_eq!( + csharp_cfg, + ServerConfig { + command: csharp_ls.to_string_lossy().to_string(), + args: Vec::new(), + } + ); + + fs::remove_file(&csharp_ls)?; + let csharp_cfg = autodetect_server_with("csharp", |candidates| { + which_best_in(candidates, &path_list, &dir) + }) + .unwrap(); + assert_eq!( + csharp_cfg, + ServerConfig { + command: omnisharp.to_string_lossy().to_string(), + args: vec!["--languageserver".to_string()], + } + ); + + let php_cfg = autodetect_server_with("php", |candidates| { + which_best_in(candidates, &path_list, &dir) + }) + .unwrap(); + assert_eq!( + php_cfg, + ServerConfig { + command: php.to_string_lossy().to_string(), + args: vec!["--stdio".to_string()], + } + ); + + let perl_cfg = autodetect_server_with("perl", |candidates| { + which_best_in(candidates, &path_list, &dir) + }) + .unwrap(); + assert_eq!( + perl_cfg, + ServerConfig { + command: perl.to_string_lossy().to_string(), + args: Vec::new(), + } + ); + + fs::remove_dir_all(&dir)?; + Ok(()) + } + + #[cfg(unix)] + #[test] + fn autodetect_server_includes_csharp_php_and_perl() -> anyhow::Result<()> { + let dir = unique_temp_dir(); + fs::create_dir_all(&dir)?; + + let csharp_ls = dir.join("csharp-ls"); + fs::write(&csharp_ls, "#!/bin/sh\nexit 0\n")?; + make_executable(&csharp_ls)?; + + let omnisharp = dir.join("omnisharp"); + fs::write(&omnisharp, "#!/bin/sh\nexit 0\n")?; + make_executable(&omnisharp)?; + + let php = dir.join("intelephense"); + fs::write(&php, "#!/bin/sh\nexit 0\n")?; + make_executable(&php)?; + + let perl = dir.join("perlnavigator"); + fs::write(&perl, "#!/bin/sh\nexit 0\n")?; + make_executable(&perl)?; + + let path_list = path_list_for(&dir); + let csharp_cfg = autodetect_server_with("csharp", |candidates| { + which_best_in(candidates, &path_list, &dir) + }) + .unwrap(); + assert_eq!( + csharp_cfg, + ServerConfig { + command: csharp_ls.to_string_lossy().to_string(), + args: Vec::new(), + } + ); + + fs::remove_file(&csharp_ls)?; + let csharp_cfg = autodetect_server_with("csharp", |candidates| { + which_best_in(candidates, &path_list, &dir) + }) + .unwrap(); + assert_eq!( + csharp_cfg, + ServerConfig { + command: omnisharp.to_string_lossy().to_string(), + args: vec!["--languageserver".to_string()], + } + ); + + let php_cfg = autodetect_server_with("php", |candidates| { + which_best_in(candidates, &path_list, &dir) + }) + .unwrap(); + assert_eq!( + php_cfg, + ServerConfig { + command: php.to_string_lossy().to_string(), + args: vec!["--stdio".to_string()], + } + ); + + let perl_cfg = autodetect_server_with("perl", |candidates| { + which_best_in(candidates, &path_list, &dir) + }) + .unwrap(); + assert_eq!( + perl_cfg, + ServerConfig { + command: perl.to_string_lossy().to_string(), + args: Vec::new(), + } + ); + + fs::remove_dir_all(&dir)?; + Ok(()) + } +} diff --git a/codex-rs/lsp/src/status.rs b/codex-rs/lsp/src/status.rs new file mode 100644 index 00000000000..1f179a58375 --- /dev/null +++ b/codex-rs/lsp/src/status.rs @@ -0,0 +1,169 @@ +use std::collections::BTreeSet; +use std::path::Path; +use std::path::PathBuf; + +use crate::ServerConfig; +use crate::lsp::normalize_root_path; +use crate::servers::autodetect_server; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum LspServerSource { + Config, + Autodetect, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct LspLanguageStatus { + pub language_id: String, + pub configured: Option, + pub autodetected: Option, + pub effective: Option<(ServerConfig, LspServerSource)>, + pub running: bool, + /// Whether the running server advertises `textDocument/diagnostic` (LSP pull diagnostics). + /// + /// `None` when the server isn't running. + pub supports_pull_diagnostics: Option, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct LspStatus { + pub enabled: bool, + pub root: PathBuf, + pub ignored_globs: Vec, + pub max_file_bytes: usize, + pub workspace_initialized: bool, + pub languages: Vec, +} + +pub(crate) fn status_language_ids( + configured: impl Iterator, + running: impl Iterator, +) -> Vec { + // Keep this list small and aligned with codex-lsp's autodetect map. + const BUILTIN: [&str; 9] = [ + "rust", + "go", + "csharp", + "python", + "typescript", + "javascript", + "typescriptreact", + "javascriptreact", + "php", + // "perl" is intentionally appended last; it may depend on a Perl runtime being installed. + ]; + + let mut out: BTreeSet = BUILTIN.into_iter().map(str::to_string).collect(); + out.insert("perl".to_string()); + out.extend(configured); + out.extend(running); + out.into_iter().collect() +} + +pub(crate) fn normalize_status_root(root: &Path) -> PathBuf { + normalize_root_path(root) +} + +pub fn render_lsp_status(status: &LspStatus) -> String { + let enabled = status.enabled; + let root = status.root.display(); + + let mut lines = Vec::new(); + lines.push("## LSP status".to_string()); + lines.push(format!("- enabled: {enabled}")); + lines.push(format!("- root: {root}")); + lines.push(format!( + "- workspace: {}", + if status.workspace_initialized { + "initialized" + } else { + "not initialized" + } + )); + + if !enabled { + lines.push("- note: enable `[features].lsp = true` in config.toml".to_string()); + return lines.join("\n"); + } + + if !status.ignored_globs.is_empty() { + let ignored = status + .ignored_globs + .iter() + .map(|g| format!("`{g}`")) + .collect::>() + .join(", "); + lines.push(format!("- ignored: {ignored}")); + } + + let max_file_bytes = status.max_file_bytes; + lines.push(format!("- max_file_bytes: {max_file_bytes}")); + + lines.push("- servers:".to_string()); + for lang in &status.languages { + let language_id = &lang.language_id; + let running = if lang.running { "yes" } else { "no" }; + let pull = match lang.supports_pull_diagnostics { + Some(true) => Some("pull-diags"), + _ => None, + }; + + let configured = lang.configured.as_ref().map(server_cfg_display); + let autodetected = lang.autodetected.as_ref().map(server_cfg_display); + let effective = lang.effective.as_ref().map(|(cfg, source)| { + let source = match source { + LspServerSource::Config => "config", + LspServerSource::Autodetect => "autodetect", + }; + format!("{} ({source})", server_cfg_display(cfg)) + }); + + let configured = configured.as_deref().unwrap_or("(none)"); + let autodetected = autodetected.as_deref().unwrap_or("(not found on PATH)"); + let effective = effective.as_deref().unwrap_or("(none)"); + + let caps = pull.into_iter().collect::>().join(","); + let caps = if caps.is_empty() { + String::new() + } else { + format!(", caps={caps}") + }; + + lines.push(format!( + " - {language_id}: running={running}{caps}, effective={effective}, configured={configured}, detected={autodetected}", + )); + } + + lines.join("\n") +} + +fn server_cfg_display(cfg: &ServerConfig) -> String { + let command = cfg.command.trim(); + if cfg.args.is_empty() { + return format!("`{command}`"); + } + let args = cfg.args.join(" "); + format!("`{command}` `{args}`") +} + +pub(crate) fn effective_server_config( + language_id: &str, + configured: Option<&ServerConfig>, +) -> ( + Option, + Option, + Option<(ServerConfig, LspServerSource)>, +) { + let autodetected = autodetect_server(language_id); + let configured = configured.cloned(); + + let effective = if let Some(cfg) = &configured { + Some((cfg.clone(), LspServerSource::Config)) + } else { + autodetected + .clone() + .map(|cfg| (cfg, LspServerSource::Autodetect)) + }; + + (configured, autodetected, effective) +} diff --git a/codex-rs/lsp/src/watcher.rs b/codex-rs/lsp/src/watcher.rs new file mode 100644 index 00000000000..f4769c717b9 --- /dev/null +++ b/codex-rs/lsp/src/watcher.rs @@ -0,0 +1,44 @@ +use std::path::Path; +use std::path::PathBuf; + +use notify::EventKind; +use notify::RecommendedWatcher; +use notify::RecursiveMode; +use notify::Watcher; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ChangeKind { + CreateOrModify, + Remove, +} + +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct FileChange { + pub path: PathBuf, + pub kind: ChangeKind, +} + +pub(crate) fn start_watcher( + root: &Path, + tx: tokio::sync::mpsc::UnboundedSender, +) -> notify::Result { + let root = root.to_path_buf(); + let root_for_filter = root.clone(); + let mut watcher = notify::recommended_watcher(move |res: notify::Result| { + if let Ok(event) = res { + let kind = match event.kind { + EventKind::Create(_) | EventKind::Modify(_) => ChangeKind::CreateOrModify, + EventKind::Remove(_) => ChangeKind::Remove, + _ => return, + }; + for path in event.paths { + if path.starts_with(&root_for_filter) { + let _ = tx.send(FileChange { path, kind }); + } + } + } + })?; + + watcher.watch(root.as_path(), RecursiveMode::Recursive)?; + Ok(watcher) +} diff --git a/codex-rs/mcp-server/Cargo.toml b/codex-rs/mcp-server/Cargo.toml index 00a22045786..6236384c958 100644 --- a/codex-rs/mcp-server/Cargo.toml +++ b/codex-rs/mcp-server/Cargo.toml @@ -38,7 +38,6 @@ tracing = { workspace = true, features = ["log"] } tracing-subscriber = { workspace = true, features = ["env-filter", "fmt"] } [dev-dependencies] -assert_cmd = { workspace = true } core_test_support = { workspace = true } mcp_test_support = { workspace = true } os_info = { workspace = true } diff --git a/codex-rs/mcp-server/tests/common/Cargo.toml b/codex-rs/mcp-server/tests/common/Cargo.toml index c549a03fc95..aba984edabc 100644 --- a/codex-rs/mcp-server/tests/common/Cargo.toml +++ b/codex-rs/mcp-server/tests/common/Cargo.toml @@ -9,9 +9,9 @@ path = "lib.rs" [dependencies] anyhow = { workspace = true } -assert_cmd = { workspace = true } codex-core = { workspace = true } codex-mcp-server = { workspace = true } +codex-utils-cargo-bin = { workspace = true } mcp-types = { workspace = true } os_info = { workspace = true } pretty_assertions = { workspace = true } diff --git a/codex-rs/mcp-server/tests/common/mcp_process.rs b/codex-rs/mcp-server/tests/common/mcp_process.rs index a924ac10b01..93d271ee169 100644 --- a/codex-rs/mcp-server/tests/common/mcp_process.rs +++ b/codex-rs/mcp-server/tests/common/mcp_process.rs @@ -10,7 +10,6 @@ use tokio::process::ChildStdin; use tokio::process::ChildStdout; use anyhow::Context; -use assert_cmd::prelude::*; use codex_mcp_server::CodexToolCallParam; use mcp_types::CallToolRequestParams; @@ -27,7 +26,6 @@ use mcp_types::ModelContextProtocolRequest; use mcp_types::RequestId; use pretty_assertions::assert_eq; use serde_json::json; -use std::process::Command as StdCommand; use tokio::process::Command; pub struct McpProcess { @@ -55,12 +53,8 @@ impl McpProcess { codex_home: &Path, env_overrides: &[(&str, Option<&str>)], ) -> anyhow::Result { - // Use assert_cmd to locate the binary path and then switch to tokio::process::Command - let std_cmd = StdCommand::cargo_bin("codex-mcp-server") + let program = codex_utils_cargo_bin::cargo_bin("codex-mcp-server") .context("should find binary for codex-mcp-server")?; - - let program = std_cmd.get_program().to_owned(); - let mut cmd = Command::new(program); cmd.stdin(Stdio::piped()); diff --git a/codex-rs/protocol/src/openai_models.rs b/codex-rs/protocol/src/openai_models.rs index 28b25bb604e..f2fc08fcdb2 100644 --- a/codex-rs/protocol/src/openai_models.rs +++ b/codex-rs/protocol/src/openai_models.rs @@ -125,14 +125,6 @@ pub enum ApplyPatchToolType { Function, } -#[derive(Deserialize, Debug, Clone, PartialEq, Eq, Default, Hash, TS, JsonSchema, Serialize)] -#[serde(rename_all = "snake_case")] -pub enum ReasoningSummaryFormat { - #[default] - None, - Experimental, -} - /// Server-provided truncation policy metadata for a model. #[derive(Debug, Serialize, Deserialize, Clone, Copy, PartialEq, Eq, TS, JsonSchema)] #[serde(rename_all = "snake_case")] @@ -177,7 +169,6 @@ pub struct ModelInfo { pub supported_reasoning_levels: Vec, pub shell_type: ConfigShellToolType, pub visibility: ModelVisibility, - pub minimal_client_version: ClientVersion, pub supported_in_api: bool, pub priority: i32, pub upgrade: Option, @@ -189,7 +180,6 @@ pub struct ModelInfo { pub truncation_policy: TruncationPolicyConfig, pub supports_parallel_tool_calls: bool, pub context_window: Option, - pub reasoning_summary_format: ReasoningSummaryFormat, pub experimental_supported_tools: Vec, } diff --git a/codex-rs/protocol/src/protocol.rs b/codex-rs/protocol/src/protocol.rs index ee5539e989d..44999ee3c01 100644 --- a/codex-rs/protocol/src/protocol.rs +++ b/codex-rs/protocol/src/protocol.rs @@ -146,6 +146,24 @@ pub enum Op { #[serde(skip_serializing_if = "Option::is_none")] plan_model: Option, + /// Updated model slug used for exploration flows (e.g. `/plan` exploration subagents). + /// + /// When omitted, exploration flows use the active model for that turn. + #[serde(skip_serializing_if = "Option::is_none")] + explore_model: Option, + + /// Updated model slug used for mini subagents (the `spawn_mini_subagent` tool flow). + /// + /// When omitted, mini subagents use their configured default model. + #[serde(skip_serializing_if = "Option::is_none")] + mini_subagent_model: Option, + + /// Updated model slug used for ordinary spawned subagents (the `spawn_subagent` tool flow). + /// + /// When omitted, spawned subagents use the active model for that turn. + #[serde(skip_serializing_if = "Option::is_none")] + subagent_model: Option, + /// Updated reasoning effort (honored only for reasoning-capable models). /// /// Use `Some(Some(_))` to set a specific effort, `Some(None)` to clear @@ -160,6 +178,27 @@ pub enum Op { #[serde(skip_serializing_if = "Option::is_none")] plan_effort: Option>, + /// Updated reasoning effort for exploration flows (honored only for reasoning-capable models). + /// + /// Use `Some(Some(_))` to set a specific effort, `Some(None)` to clear + /// the effort, or `None` to leave the existing value unchanged. + #[serde(skip_serializing_if = "Option::is_none")] + explore_effort: Option>, + + /// Updated reasoning effort for mini subagents (honored only for reasoning-capable models). + /// + /// Use `Some(Some(_))` to set a specific effort, `Some(None)` to clear + /// the effort, or `None` to leave the existing value unchanged. + #[serde(skip_serializing_if = "Option::is_none")] + mini_subagent_effort: Option>, + + /// Updated reasoning effort for ordinary spawned subagents (honored only for reasoning-capable models). + /// + /// Use `Some(Some(_))` to set a specific effort, `Some(None)` to clear + /// the effort, or `None` to leave the existing value unchanged. + #[serde(skip_serializing_if = "Option::is_none")] + subagent_effort: Option>, + /// Updated reasoning summary preference (honored only for reasoning-capable models). #[serde(skip_serializing_if = "Option::is_none")] summary: Option, @@ -1182,6 +1221,17 @@ pub struct SubAgentInvocation { pub label: String, /// Prompt sent to the subagent. pub prompt: String, + /// Model slug used for this subagent invocation (when known). + #[serde(default, skip_serializing_if = "Option::is_none")] + #[ts(optional)] + pub model: Option, +} + +#[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS, PartialEq, Eq)] +#[serde(rename_all = "snake_case")] +pub enum SubAgentToolCallOutcome { + Completed, + Cancelled, } #[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS, PartialEq)] @@ -1245,6 +1295,10 @@ pub struct SubAgentToolCallEndEvent { #[serde(default, skip_serializing_if = "Option::is_none")] #[ts(optional)] pub tokens: Option, + /// Outcome of the subagent run. + #[serde(default, skip_serializing_if = "Option::is_none")] + #[ts(optional)] + pub outcome: Option, /// Result of the subagent call. Note this could be an error. pub result: Result, } @@ -1437,6 +1491,23 @@ pub struct TurnContextItem { #[serde(skip_serializing_if = "Option::is_none")] pub effort: Option, pub summary: ReasoningSummaryConfig, + #[serde(skip_serializing_if = "Option::is_none")] + pub base_instructions: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub user_instructions: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub developer_instructions: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub final_output_json_schema: Option, + #[serde(skip_serializing_if = "Option::is_none")] + pub truncation_policy: Option, +} + +#[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)] +#[serde(tag = "mode", content = "limit", rename_all = "snake_case")] +pub enum TruncationPolicy { + Bytes(usize), + Tokens(usize), } #[derive(Serialize, Deserialize, Clone, JsonSchema)] @@ -1692,6 +1763,11 @@ pub struct StreamErrorEvent { pub message: String, #[serde(default)] pub codex_error_info: Option, + /// Optional details about the underlying stream failure (often the same + /// human-readable message that is surfaced as the terminal error if retries + /// are exhausted). + #[serde(default)] + pub additional_details: Option, } #[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)] diff --git a/codex-rs/rmcp-client/Cargo.toml b/codex-rs/rmcp-client/Cargo.toml index f2525c18d32..efcea2d805b 100644 --- a/codex-rs/rmcp-client/Cargo.toml +++ b/codex-rs/rmcp-client/Cargo.toml @@ -55,7 +55,7 @@ webbrowser = { workspace = true } which = { workspace = true } [dev-dependencies] -escargot = { workspace = true } +codex-utils-cargo-bin = { workspace = true } pretty_assertions = { workspace = true } serial_test = { workspace = true } tempfile = { workspace = true } diff --git a/codex-rs/rmcp-client/tests/resources.rs b/codex-rs/rmcp-client/tests/resources.rs index fda21d14e21..3d627ebbf4a 100644 --- a/codex-rs/rmcp-client/tests/resources.rs +++ b/codex-rs/rmcp-client/tests/resources.rs @@ -5,7 +5,7 @@ use std::time::Duration; use codex_rmcp_client::ElicitationAction; use codex_rmcp_client::ElicitationResponse; use codex_rmcp_client::RmcpClient; -use escargot::CargoBuild; +use codex_utils_cargo_bin::CargoBinError; use futures::FutureExt as _; use mcp_types::ClientCapabilities; use mcp_types::Implementation; @@ -20,12 +20,8 @@ use serde_json::json; const RESOURCE_URI: &str = "memo://codex/example-note"; -fn stdio_server_bin() -> anyhow::Result { - let build = CargoBuild::new() - .package("codex-rmcp-client") - .bin("test_stdio_server") - .run()?; - Ok(build.path().to_path_buf()) +fn stdio_server_bin() -> Result { + codex_utils_cargo_bin::cargo_bin("test_stdio_server") } fn init_params() -> InitializeRequestParams { diff --git a/codex-rs/stdio-to-uds/Cargo.toml b/codex-rs/stdio-to-uds/Cargo.toml index 71ed3722c34..20e3ade7205 100644 --- a/codex-rs/stdio-to-uds/Cargo.toml +++ b/codex-rs/stdio-to-uds/Cargo.toml @@ -23,5 +23,6 @@ uds_windows = { workspace = true } [dev-dependencies] assert_cmd = { workspace = true } +codex-utils-cargo-bin = { workspace = true } pretty_assertions = { workspace = true } tempfile = { workspace = true } diff --git a/codex-rs/stdio-to-uds/tests/stdio_to_uds.rs b/codex-rs/stdio-to-uds/tests/stdio_to_uds.rs index 11888a81344..c6062d50dd4 100644 --- a/codex-rs/stdio-to-uds/tests/stdio_to_uds.rs +++ b/codex-rs/stdio-to-uds/tests/stdio_to_uds.rs @@ -47,7 +47,7 @@ fn pipes_stdin_and_stdout_through_socket() -> anyhow::Result<()> { Ok(()) }); - Command::cargo_bin("codex-stdio-to-uds")? + Command::new(codex_utils_cargo_bin::cargo_bin("codex-stdio-to-uds")?) .arg(&socket_path) .write_stdin("request") .assert() diff --git a/codex-rs/tui/Cargo.toml b/codex-rs/tui/Cargo.toml index 06167c973a3..3e948b5e68f 100644 --- a/codex-rs/tui/Cargo.toml +++ b/codex-rs/tui/Cargo.toml @@ -39,6 +39,7 @@ codex-core = { workspace = true } codex-feedback = { workspace = true } codex-file-search = { workspace = true } codex-login = { workspace = true } +codex-lsp = { workspace = true } codex-protocol = { workspace = true } codex-utils-absolute-path = { workspace = true } color-eyre = { workspace = true } @@ -71,7 +72,9 @@ strum_macros = { workspace = true } supports-color = { workspace = true } tempfile = { workspace = true } textwrap = { workspace = true } +thiserror = { workspace = true } tokio = { workspace = true, features = [ + "fs", "io-std", "macros", "process", @@ -97,6 +100,14 @@ tokio-util = { workspace = true, features = ["time"] } [target.'cfg(unix)'.dependencies] libc = { workspace = true } +[target.'cfg(windows)'.dependencies] +which = { workspace = true } +windows-sys = { version = "0.52", features = [ + "Win32_Foundation", + "Win32_System_Console", +] } +winsplit = "0.1" + # Clipboard support via `arboard` is not available on Android/Termux. # Only include it for non-Android targets so the crate builds on Android. [target.'cfg(not(target_os = "android"))'.dependencies] diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs index bdff204106c..cec207e2d34 100644 --- a/codex-rs/tui/src/app.rs +++ b/codex-rs/tui/src/app.rs @@ -3,9 +3,12 @@ use crate::app_event::AppEvent; use crate::app_event_sender::AppEventSender; use crate::bottom_pane::ApprovalRequest; use crate::chatwidget::ChatWidget; +use crate::chatwidget::ExternalEditorState; use crate::diff_render::DiffSummary; use crate::exec_command::strip_bash_lc_and_escape; +use crate::external_editor; use crate::file_search::FileSearchManager; +use crate::history_cell; use crate::history_cell::HistoryCell; use crate::model_migration::ModelMigrationOutcome; use crate::model_migration::migration_copy_for_models; @@ -62,6 +65,8 @@ use tokio::sync::mpsc::unbounded_channel; #[cfg(not(debug_assertions))] use crate::history_cell::UpdateAvailableHistoryCell; +const EXTERNAL_EDITOR_HINT: &str = "Save and close external editor to continue."; + #[derive(Debug, Clone)] pub struct AppExitInfo { pub token_usage: TokenUsage, @@ -414,6 +419,7 @@ impl App { }; ChatWidget::new_from_existing( init, + conversation_manager.clone(), resumed.conversation, resumed.session_configured, ) @@ -542,6 +548,11 @@ impl App { } }, )?; + if self.chat_widget.external_editor_state() == ExternalEditorState::Requested { + self.chat_widget + .set_external_editor_state(ExternalEditorState::Active); + self.app_event_tx.send(AppEvent::LaunchExternalEditor); + } } } } @@ -626,6 +637,7 @@ impl App { }; self.chat_widget = ChatWidget::new_from_existing( init, + self.server.clone(), resumed.conversation, resumed.session_configured, ); @@ -726,6 +738,7 @@ impl App { return Ok(false); } AppEvent::CodexOp(op) => self.chat_widget.submit_op(op), + AppEvent::QueueUserText(text) => self.chat_widget.queue_user_text(text), AppEvent::DiffResult(text) => { // Clear the in-progress state in the bottom pane self.chat_widget.on_diff_complete(); @@ -742,6 +755,15 @@ impl App { )); tui.frame_requester().schedule_frame(); } + AppEvent::LspStatusLoaded(status) => { + self.chat_widget + .add_plain_history_lines(crate::lsp_status::render(&status)); + tui.frame_requester().schedule_frame(); + } + AppEvent::LspStatusLoadFailed(message) => { + self.chat_widget.add_error_message(message); + tui.frame_requester().schedule_frame(); + } AppEvent::StartFileSearch(query) => { if !query.is_empty() { self.file_search.on_user_query(query); @@ -773,6 +795,14 @@ impl App { self.config.plan_model_reasoning_effort = effort; self.chat_widget.set_plan_reasoning_effort(effort); } + AppEvent::UpdateSubagentModel(model) => { + self.config.subagent_model = Some(model.clone()); + self.chat_widget.set_subagent_model(&model); + } + AppEvent::UpdateSubagentReasoningEffort(effort) => { + self.config.subagent_model_reasoning_effort = effort; + self.chat_widget.set_subagent_reasoning_effort(effort); + } AppEvent::OpenReasoningPopup { model, target } => { self.chat_widget.open_reasoning_popup(target, model); } @@ -804,6 +834,11 @@ impl App { AppEvent::OpenFeedbackConsent { category } => { self.chat_widget.open_feedback_consent(category); } + AppEvent::LaunchExternalEditor => { + if self.chat_widget.external_editor_state() == ExternalEditorState::Active { + self.launch_external_editor(tui).await; + } + } AppEvent::OpenWindowsSandboxEnablePrompt { preset } => { self.chat_widget.open_windows_sandbox_enable_prompt(preset); } @@ -840,8 +875,14 @@ impl App { sandbox_policy: Some(preset.sandbox.clone()), model: None, plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: None, plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, }, )); @@ -947,6 +988,45 @@ impl App { } } } + AppEvent::PersistSubagentModelSelection { model, effort } => { + let profile = self.active_profile.as_deref(); + match ConfigEditsBuilder::new(&self.config.codex_home) + .with_profile(profile) + .set_subagent_model(Some(model.as_str()), effort) + .apply() + .await + { + Ok(()) => { + let mut message = format!("Subagent model changed to {model}"); + if let Some(label) = Self::reasoning_label_for(&model, effort) { + message.push(' '); + message.push_str(label); + } + message.push_str(" (used for spawned subagents)"); + if let Some(profile) = profile { + message.push_str(" for "); + message.push_str(profile); + message.push_str(" profile"); + } + self.chat_widget.add_info_message(message, None); + } + Err(err) => { + tracing::error!( + error = %err, + "failed to persist subagent model selection" + ); + if let Some(profile) = profile { + self.chat_widget.add_error_message(format!( + "Failed to save subagent model for profile `{profile}`: {err}" + )); + } else { + self.chat_widget.add_error_message(format!( + "Failed to save default subagent model: {err}" + )); + } + } + } + } AppEvent::UpdateAskForApprovalPolicy(policy) => { self.chat_widget.set_approval_policy(policy); } @@ -1197,6 +1277,68 @@ impl App { self.config.model_reasoning_effort = effort; } + async fn launch_external_editor(&mut self, tui: &mut tui::Tui) { + let editor_cmd = match external_editor::resolve_editor_command() { + Ok(cmd) => cmd, + Err(external_editor::EditorError::MissingEditor) => { + self.chat_widget + .add_to_history(history_cell::new_error_event( + "Cannot open external editor: set $VISUAL or $EDITOR".to_string(), + )); + self.reset_external_editor_state(tui); + return; + } + Err(err) => { + self.chat_widget + .add_to_history(history_cell::new_error_event(format!( + "Failed to open editor: {err}", + ))); + self.reset_external_editor_state(tui); + return; + } + }; + + let seed = self.chat_widget.composer_text_with_pending(); + let editor_result = tui + .with_restored(tui::RestoreMode::KeepRaw, || async { + external_editor::run_editor(&seed, &editor_cmd).await + }) + .await; + self.reset_external_editor_state(tui); + + match editor_result { + Ok(new_text) => { + // Trim trailing whitespace + let cleaned = new_text.trim_end().to_string(); + self.chat_widget.apply_external_edit(cleaned); + } + Err(err) => { + self.chat_widget + .add_to_history(history_cell::new_error_event(format!( + "Failed to open editor: {err}", + ))); + } + } + tui.frame_requester().schedule_frame(); + } + + fn request_external_editor_launch(&mut self, tui: &mut tui::Tui) { + self.chat_widget + .set_external_editor_state(ExternalEditorState::Requested); + self.chat_widget.set_footer_hint_override(Some(vec![( + EXTERNAL_EDITOR_HINT.to_string(), + String::new(), + )])); + tui.frame_requester().schedule_frame(); + } + + fn reset_external_editor_state(&mut self, tui: &mut tui::Tui) { + self.chat_widget + .set_external_editor_state(ExternalEditorState::Closed); + self.chat_widget.set_footer_hint_override(None); + tui.frame_requester().schedule_frame(); + } + async fn handle_key_event(&mut self, tui: &mut tui::Tui, key_event: KeyEvent) { match key_event { KeyEvent { @@ -1210,6 +1352,21 @@ impl App { self.overlay = Some(Overlay::new_transcript(self.transcript_cells.clone())); tui.frame_requester().schedule_frame(); } + KeyEvent { + code: KeyCode::Char('g'), + modifiers: crossterm::event::KeyModifiers::CONTROL, + kind: KeyEventKind::Press, + .. + } => { + // Only launch the external editor if there is no overlay and the bottom pane is not in use. + // Note that it can be launched while a task is running to enable editing while the previous turn is ongoing. + if self.overlay.is_none() + && self.chat_widget.can_launch_external_editor() + && self.chat_widget.external_editor_state() == ExternalEditorState::Closed + { + self.request_external_editor_launch(tui); + } + } // Esc primes/advances backtracking only in normal (not working) mode // with the composer focused and empty. In any other state, forward // Esc so the active UI (e.g. status indicator, modals, popups) diff --git a/codex-rs/tui/src/app_backtrack.rs b/codex-rs/tui/src/app_backtrack.rs index 671702d3082..e8c0231b461 100644 --- a/codex-rs/tui/src/app_backtrack.rs +++ b/codex-rs/tui/src/app_backtrack.rs @@ -352,8 +352,12 @@ impl App { feedback: self.feedback.clone(), is_first_run: false, }; - self.chat_widget = - crate::chatwidget::ChatWidget::new_from_existing(init, conv, session_configured); + self.chat_widget = crate::chatwidget::ChatWidget::new_from_existing( + init, + self.server.clone(), + conv, + session_configured, + ); self.current_model = model_family.get_model_slug().to_string(); // Trim transcript up to the selected user message and re-render it. self.trim_transcript_for_backtrack(nth_user_message); diff --git a/codex-rs/tui/src/app_event.rs b/codex-rs/tui/src/app_event.rs index 5e6bd7cf829..2e8452a0e61 100644 --- a/codex-rs/tui/src/app_event.rs +++ b/codex-rs/tui/src/app_event.rs @@ -5,6 +5,7 @@ use codex_core::protocol::ConversationPathResponseEvent; use codex_core::protocol::Event; use codex_core::protocol::RateLimitSnapshot; use codex_file_search::FileMatch; +use codex_lsp::LspStatus; use codex_protocol::openai_models::ModelPreset; use crate::bottom_pane::ApprovalRequest; @@ -19,6 +20,7 @@ use codex_protocol::openai_models::ReasoningEffort; pub(crate) enum ModelPickerTarget { Chat, Plan, + Subagent, } #[allow(clippy::large_enum_variant)] @@ -39,6 +41,10 @@ pub(crate) enum AppEvent { /// bubbling channels through layers of widgets. CodexOp(codex_core::protocol::Op), + /// Queue a synthetic user message (e.g. resume helpers) through the normal + /// ChatWidget path so it is persisted to cross-session message history. + QueueUserText(String), + /// Kick off an asynchronous file search for the given query (text after /// the `@`). Previous searches may be cancelled by the app layer so there /// is at most one in-flight search. @@ -58,6 +64,12 @@ pub(crate) enum AppEvent { /// Result of computing a `/diff` command. DiffResult(String), + /// Result of fetching current LSP server status for a workspace. + LspStatusLoaded(LspStatus), + + /// Fetching LSP status failed. + LspStatusLoadFailed(String), + InsertHistoryCell(Box), StartCommitAnimation, @@ -76,6 +88,12 @@ pub(crate) enum AppEvent { /// Update the current plan reasoning effort in the running app and widget. UpdatePlanReasoningEffort(Option), + /// Update the current subagent model slug in the running app and widget. + UpdateSubagentModel(String), + + /// Update the current subagent reasoning effort in the running app and widget. + UpdateSubagentReasoningEffort(Option), + /// Persist the selected model and reasoning effort to the appropriate config. PersistModelSelection { model: String, @@ -88,6 +106,12 @@ pub(crate) enum AppEvent { effort: Option, }, + /// Persist the selected subagent model and reasoning effort to the appropriate config. + PersistSubagentModelSelection { + model: String, + effort: Option, + }, + /// Open the reasoning selection popup after picking a model. OpenReasoningPopup { model: ModelPreset, @@ -201,6 +225,9 @@ pub(crate) enum AppEvent { OpenFeedbackConsent { category: FeedbackCategory, }, + + /// Launch the external editor after a normal draw has completed. + LaunchExternalEditor, } #[derive(Debug, Clone, Copy, PartialEq, Eq)] diff --git a/codex-rs/tui/src/bottom_pane/ask_user_question_overlay.rs b/codex-rs/tui/src/bottom_pane/ask_user_question_overlay.rs index dcefb69ad6d..f4826f47aaa 100644 --- a/codex-rs/tui/src/bottom_pane/ask_user_question_overlay.rs +++ b/codex-rs/tui/src/bottom_pane/ask_user_question_overlay.rs @@ -392,7 +392,9 @@ impl AskUserQuestionOverlay { self.mode = Mode::Review; self.error = None; self.state.reset(); - self.state.selected_idx = Some(0); + self.state.selected_idx = Some(self.questions.len()); + self.state + .ensure_visible(self.rows_len(), self.max_visible_rows()); self.return_to_review = true; } @@ -1050,10 +1052,9 @@ mod tests { overlay.handle_key_event(KeyEvent::new(KeyCode::Char('1'), KeyModifiers::NONE)); assert_eq!(overlay.mode, Mode::Review); + assert_eq!(overlay.state.selected_idx, Some(overlay.questions.len())); assert!(rx.try_recv().is_err()); - // Down to "Submit" row. - overlay.handle_key_event(KeyEvent::new(KeyCode::Down, KeyModifiers::NONE)); overlay.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); let AppEvent::CodexOp(op) = rx.try_recv().expect("submit op") else { @@ -1099,18 +1100,17 @@ mod tests { overlay.handle_key_event(KeyEvent::new(KeyCode::Char('1'), KeyModifiers::NONE)); overlay.handle_key_event(KeyEvent::new(KeyCode::Char('2'), KeyModifiers::NONE)); assert_eq!(overlay.mode, Mode::Review); + assert_eq!(overlay.state.selected_idx, Some(overlay.questions.len())); assert!(rx.try_recv().is_err()); // Edit Q1 from review. - overlay.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('1'), KeyModifiers::NONE)); assert_eq!(overlay.current_idx, 0); overlay.handle_key_event(KeyEvent::new(KeyCode::Char('2'), KeyModifiers::NONE)); assert_eq!(overlay.mode, Mode::Review); // Submit. - overlay.handle_key_event(KeyEvent::new(KeyCode::Down, KeyModifiers::NONE)); // Q2 - overlay.handle_key_event(KeyEvent::new(KeyCode::Down, KeyModifiers::NONE)); // Submit overlay.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); let AppEvent::CodexOp(op) = rx.try_recv().expect("submit op") else { diff --git a/codex-rs/tui/src/bottom_pane/chat_composer.rs b/codex-rs/tui/src/bottom_pane/chat_composer.rs index 17877e4bfab..d6d01045b4b 100644 --- a/codex-rs/tui/src/bottom_pane/chat_composer.rs +++ b/codex-rs/tui/src/bottom_pane/chat_composer.rs @@ -282,6 +282,85 @@ impl ChatComposer { } } + /// Replace the composer content with text from an external editor. + /// Clears pending paste placeholders and keeps only attachments whose + /// placeholder labels still appear in the new text. Cursor is placed at + /// the end after rebuilding elements. + pub(crate) fn apply_external_edit(&mut self, text: String) { + self.pending_pastes.clear(); + + // Count placeholder occurrences in the new text. + let mut placeholder_counts: HashMap = HashMap::new(); + for placeholder in self.attached_images.iter().map(|img| &img.placeholder) { + if placeholder_counts.contains_key(placeholder) { + continue; + } + let count = text.match_indices(placeholder).count(); + if count > 0 { + placeholder_counts.insert(placeholder.clone(), count); + } + } + + // Keep attachments only while we have matching occurrences left. + let mut kept_images = Vec::new(); + for img in self.attached_images.drain(..) { + if let Some(count) = placeholder_counts.get_mut(&img.placeholder) + && *count > 0 + { + *count -= 1; + kept_images.push(img); + } + } + self.attached_images = kept_images; + + // Rebuild textarea so placeholders become elements again. + self.textarea.set_text(""); + let mut remaining: HashMap<&str, usize> = HashMap::new(); + for img in &self.attached_images { + *remaining.entry(img.placeholder.as_str()).or_insert(0) += 1; + } + + let mut occurrences: Vec<(usize, &str)> = Vec::new(); + for placeholder in remaining.keys() { + for (pos, _) in text.match_indices(placeholder) { + occurrences.push((pos, *placeholder)); + } + } + occurrences.sort_unstable_by_key(|(pos, _)| *pos); + + let mut idx = 0usize; + for (pos, ph) in occurrences { + let Some(count) = remaining.get_mut(ph) else { + continue; + }; + if *count == 0 { + continue; + } + if pos > idx { + self.textarea.insert_str(&text[idx..pos]); + } + self.textarea.insert_element(ph); + *count -= 1; + idx = pos + ph.len(); + } + if idx < text.len() { + self.textarea.insert_str(&text[idx..]); + } + + self.textarea.set_cursor(self.textarea.text().len()); + self.sync_popups(); + } + + pub(crate) fn current_text_with_pending(&self) -> String { + let mut text = self.textarea.text().to_string(); + for (placeholder, actual) in &self.pending_pastes { + if text.contains(placeholder) { + text = text.replace(placeholder, actual); + } + } + text + } + /// Override the footer hint items displayed beneath the composer. Passing /// `None` restores the default shortcut footer. pub(crate) fn set_footer_hint_override(&mut self, items: Option>) { @@ -321,7 +400,8 @@ impl ChatComposer { .file_name() .map(|name| name.to_string_lossy().into_owned()) .unwrap_or_else(|| "image".to_string()); - let placeholder = format!("[{file_label} {width}x{height}]"); + let base_placeholder = format!("{file_label} {width}x{height}"); + let placeholder = self.next_image_placeholder(&base_placeholder); // Insert as an element to match large paste placeholder behavior: // styled distinctly and treated atomically for cursor/mutations. self.textarea.insert_element(&placeholder); @@ -384,6 +464,22 @@ impl ChatComposer { } } + fn next_image_placeholder(&mut self, base: &str) -> String { + let text = self.textarea.text(); + let mut suffix = 1; + loop { + let placeholder = if suffix == 1 { + format!("[{base}]") + } else { + format!("[{base} #{suffix}]") + }; + if !text.contains(&placeholder) { + return placeholder; + } + suffix += 1; + } + } + pub(crate) fn insert_str(&mut self, text: &str) { self.textarea.insert_str(text); self.sync_popups(); @@ -3180,6 +3276,35 @@ mod tests { assert!(composer.attached_images.is_empty()); } + #[test] + fn duplicate_image_placeholders_get_suffix() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + let path = PathBuf::from("/tmp/image_dup.png"); + composer.attach_image(path.clone(), 10, 5, "PNG"); + composer.handle_paste(" ".into()); + composer.attach_image(path, 10, 5, "PNG"); + + let text = composer.textarea.text().to_string(); + assert!(text.contains("[image_dup.png 10x5]")); + assert!(text.contains("[image_dup.png 10x5 #2]")); + assert_eq!( + composer.attached_images[0].placeholder, + "[image_dup.png 10x5]" + ); + assert_eq!( + composer.attached_images[1].placeholder, + "[image_dup.png 10x5 #2]" + ); + } + #[test] fn image_placeholder_backspace_behaves_like_text_placeholder() { let (tx, _rx) = unbounded_channel::(); @@ -4029,4 +4154,116 @@ mod tests { "'/zzz' should not activate slash popup because it is not a prefix of any built-in command" ); } + + #[test] + fn apply_external_edit_rebuilds_text_and_attachments() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + + let placeholder = "[image 10x10]".to_string(); + composer.textarea.insert_element(&placeholder); + composer.attached_images.push(AttachedImage { + placeholder: placeholder.clone(), + path: PathBuf::from("img.png"), + }); + composer + .pending_pastes + .push(("[Pasted]".to_string(), "data".to_string())); + + composer.apply_external_edit(format!("Edited {placeholder} text")); + + assert_eq!( + composer.current_text(), + format!("Edited {placeholder} text") + ); + assert!(composer.pending_pastes.is_empty()); + assert_eq!(composer.attached_images.len(), 1); + assert_eq!(composer.attached_images[0].placeholder, placeholder); + assert_eq!(composer.textarea.cursor(), composer.current_text().len()); + } + + #[test] + fn apply_external_edit_drops_missing_attachments() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + + let placeholder = "[image 10x10]".to_string(); + composer.textarea.insert_element(&placeholder); + composer.attached_images.push(AttachedImage { + placeholder: placeholder.clone(), + path: PathBuf::from("img.png"), + }); + + composer.apply_external_edit("No images here".to_string()); + + assert_eq!(composer.current_text(), "No images here".to_string()); + assert!(composer.attached_images.is_empty()); + } + + #[test] + fn current_text_with_pending_expands_placeholders() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + + let placeholder = "[Pasted Content 5 chars]".to_string(); + composer.textarea.insert_element(&placeholder); + composer + .pending_pastes + .push((placeholder.clone(), "hello".to_string())); + + assert_eq!( + composer.current_text_with_pending(), + "hello".to_string(), + "placeholder should expand to actual text" + ); + } + + #[test] + fn apply_external_edit_limits_duplicates_to_occurrences() { + let (tx, _rx) = unbounded_channel::(); + let sender = AppEventSender::new(tx); + let mut composer = ChatComposer::new( + true, + sender, + false, + "Ask Codex to do anything".to_string(), + false, + ); + + let placeholder = "[image 10x10]".to_string(); + composer.textarea.insert_element(&placeholder); + composer.attached_images.push(AttachedImage { + placeholder: placeholder.clone(), + path: PathBuf::from("img.png"), + }); + + composer.apply_external_edit(format!("{placeholder} extra {placeholder}")); + + assert_eq!( + composer.current_text(), + format!("{placeholder} extra {placeholder}") + ); + assert_eq!(composer.attached_images.len(), 1); + } } diff --git a/codex-rs/tui/src/bottom_pane/footer.rs b/codex-rs/tui/src/bottom_pane/footer.rs index d47ffec98b6..c3f2da0e350 100644 --- a/codex-rs/tui/src/bottom_pane/footer.rs +++ b/codex-rs/tui/src/bottom_pane/footer.rs @@ -162,6 +162,7 @@ fn shortcut_overlay_lines(state: ShortcutsState) -> Vec> { let mut newline = Line::from(""); let mut file_paths = Line::from(""); let mut paste_image = Line::from(""); + let mut external_editor = Line::from(""); let mut edit_previous = Line::from(""); let mut quit = Line::from(""); let mut show_transcript = Line::from(""); @@ -173,6 +174,7 @@ fn shortcut_overlay_lines(state: ShortcutsState) -> Vec> { ShortcutId::InsertNewline => newline = text, ShortcutId::FilePaths => file_paths = text, ShortcutId::PasteImage => paste_image = text, + ShortcutId::ExternalEditor => external_editor = text, ShortcutId::EditPrevious => edit_previous = text, ShortcutId::Quit => quit = text, ShortcutId::ShowTranscript => show_transcript = text, @@ -185,6 +187,7 @@ fn shortcut_overlay_lines(state: ShortcutsState) -> Vec> { newline, file_paths, paste_image, + external_editor, edit_previous, quit, Line::from(""), @@ -261,6 +264,7 @@ enum ShortcutId { InsertNewline, FilePaths, PasteImage, + ExternalEditor, EditPrevious, Quit, ShowTranscript, @@ -381,6 +385,15 @@ const SHORTCUTS: &[ShortcutDescriptor] = &[ prefix: "", label: " to paste images", }, + ShortcutDescriptor { + id: ShortcutId::ExternalEditor, + bindings: &[ShortcutBinding { + key: key_hint::ctrl(KeyCode::Char('g')), + condition: DisplayCondition::Always, + }], + prefix: "", + label: " to edit in external editor", + }, ShortcutDescriptor { id: ShortcutId::EditPrevious, bindings: &[ShortcutBinding { diff --git a/codex-rs/tui/src/bottom_pane/mod.rs b/codex-rs/tui/src/bottom_pane/mod.rs index 21556463ce8..f719010a623 100644 --- a/codex-rs/tui/src/bottom_pane/mod.rs +++ b/codex-rs/tui/src/bottom_pane/mod.rs @@ -34,6 +34,7 @@ mod list_selection_view; mod plan_approval_overlay; mod plan_request_overlay; mod prompt_args; +mod resume_prompt_overlay; mod skill_popup; pub(crate) use list_selection_view::SelectionViewParams; mod feedback_view; @@ -66,6 +67,7 @@ pub(crate) use list_selection_view::SelectionAction; pub(crate) use list_selection_view::SelectionItem; pub(crate) use plan_approval_overlay::PlanApprovalOverlay; pub(crate) use plan_request_overlay::PlanRequestOverlay; +pub(crate) use resume_prompt_overlay::ResumePromptOverlay; /// Pane displayed in the lower half of the chat UI. pub(crate) struct BottomPane { @@ -280,12 +282,27 @@ impl BottomPane { self.composer.current_text() } - /// Update the animated header shown to the left of the brackets in the - /// status indicator (defaults to "Working"). No-ops if the status - /// indicator is not active. - pub(crate) fn update_status_header(&mut self, header: String) { + pub(crate) fn composer_text_with_pending(&self) -> String { + self.composer.current_text_with_pending() + } + + pub(crate) fn apply_external_edit(&mut self, text: String) { + self.composer.apply_external_edit(text); + self.request_redraw(); + } + + pub(crate) fn set_footer_hint_override(&mut self, items: Option>) { + self.composer.set_footer_hint_override(items); + self.request_redraw(); + } + + /// Update the status indicator header (defaults to "Working") and details below it. + /// + /// Passing `None` clears any existing details. No-ops if the status indicator is not active. + pub(crate) fn update_status(&mut self, header: String, details: Option) { if let Some(status) = self.status.as_mut() { status.update_header(header); + status.update_details(details); self.request_redraw(); } } @@ -451,6 +468,11 @@ impl BottomPane { !self.is_task_running && self.view_stack.is_empty() && !self.composer.popup_active() } + /// Return true when no popups or modal views are active, regardless of task state. + pub(crate) fn can_launch_external_editor(&self) -> bool { + self.view_stack.is_empty() && !self.composer.popup_active() + } + pub(crate) fn show_view(&mut self, view: Box) { self.push_view(view); } diff --git a/codex-rs/tui/src/bottom_pane/resume_prompt_overlay.rs b/codex-rs/tui/src/bottom_pane/resume_prompt_overlay.rs new file mode 100644 index 00000000000..65d1e519633 --- /dev/null +++ b/codex-rs/tui/src/bottom_pane/resume_prompt_overlay.rs @@ -0,0 +1,194 @@ +use crossterm::event::KeyCode; +use crossterm::event::KeyEvent; +use crossterm::event::KeyEventKind; +use crossterm::event::KeyModifiers; +use ratatui::buffer::Buffer; +use ratatui::layout::Constraint; +use ratatui::layout::Layout; +use ratatui::layout::Rect; +use ratatui::style::Stylize as _; +use ratatui::text::Line; +use ratatui::widgets::Block; +use ratatui::widgets::Clear; +use ratatui::widgets::Widget as _; + +use crate::app_event::AppEvent; +use crate::app_event_sender::AppEventSender; +use crate::render::Insets; +use crate::render::RectExt as _; +use crate::selection_list::selection_option_row; + +use super::CancellationEvent; +use super::bottom_pane_view::BottomPaneView; + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum ResumeSelection { + ResumePaused, + Continue, +} + +impl ResumeSelection { + fn next(self) -> Self { + match self { + ResumeSelection::ResumePaused => ResumeSelection::Continue, + ResumeSelection::Continue => ResumeSelection::ResumePaused, + } + } + + fn prev(self) -> Self { + match self { + ResumeSelection::ResumePaused => ResumeSelection::Continue, + ResumeSelection::Continue => ResumeSelection::ResumePaused, + } + } +} + +pub(crate) struct ResumePromptOverlay { + app_event_tx: AppEventSender, + highlighted: ResumeSelection, + selection: Option, + had_partial_output: bool, + had_in_progress_tools: bool, +} + +impl ResumePromptOverlay { + pub(crate) fn new( + app_event_tx: AppEventSender, + had_partial_output: bool, + had_in_progress_tools: bool, + ) -> Self { + Self { + app_event_tx, + highlighted: ResumeSelection::ResumePaused, + selection: None, + had_partial_output, + had_in_progress_tools, + } + } + + fn select(&mut self, selection: ResumeSelection) { + self.highlighted = selection; + self.selection = Some(selection); + if selection == ResumeSelection::Continue { + self.app_event_tx + .send(AppEvent::QueueUserText(continue_prompt())); + } + } +} + +impl BottomPaneView for ResumePromptOverlay { + fn handle_key_event(&mut self, key_event: KeyEvent) { + if key_event.kind == KeyEventKind::Release { + return; + } + if key_event.modifiers.contains(KeyModifiers::CONTROL) + && matches!(key_event.code, KeyCode::Char('c') | KeyCode::Char('d')) + { + self.select(ResumeSelection::ResumePaused); + return; + } + + match key_event.code { + KeyCode::Up | KeyCode::Char('k') => self.highlighted = self.highlighted.prev(), + KeyCode::Down | KeyCode::Char('j') => self.highlighted = self.highlighted.next(), + KeyCode::Char('1') => self.select(ResumeSelection::ResumePaused), + KeyCode::Char('2') => self.select(ResumeSelection::Continue), + KeyCode::Enter => self.select(self.highlighted), + KeyCode::Esc => self.select(ResumeSelection::ResumePaused), + _ => {} + } + } + + fn is_complete(&self) -> bool { + self.selection.is_some() + } + + fn on_ctrl_c(&mut self) -> CancellationEvent { + self.select(ResumeSelection::ResumePaused); + CancellationEvent::Handled + } +} + +impl crate::render::renderable::Renderable for ResumePromptOverlay { + fn render(&self, area: Rect, buf: &mut Buffer) { + Clear.render(area, buf); + let block = Block::bordered().title("Resume".bold()); + let inner = block.inner(area); + block.render(area, buf); + + let inset = inner.inset(Insets::vh(1, 2)); + let option_height: u16 = 1; + let options_needed = option_height.saturating_mul(2); + let header_height = inset.height.saturating_sub(options_needed); + let [header_area, opt0_area, opt1_area] = Layout::vertical([ + Constraint::Length(header_height), + Constraint::Length(option_height), + Constraint::Length(option_height), + ]) + .areas(inset); + + let mut header = Vec::new(); + header.push(Line::from(vec![ + "Previous turn was interrupted.".bold(), + " ".into(), + "Nothing will run until you choose.".dim(), + ])); + if self.had_partial_output || self.had_in_progress_tools { + let mut details = Vec::new(); + if self.had_partial_output { + details.push("partial assistant output".to_string()); + } + if self.had_in_progress_tools { + details.push("in-progress tool calls".to_string()); + } + header.push(Line::from(vec![ + "Detected: ".dim(), + details.join(", ").into(), + ])); + } + header.push(Line::from(vec![ + "Tip: ".dim(), + "Continue sends a new message; it does not auto-replay tool calls.".dim(), + ])); + + if header_area.height > 0 { + for (i, line) in header.into_iter().enumerate() { + let y = header_area.y.saturating_add(i as u16); + let bottom = header_area.y.saturating_add(header_area.height); + if y >= bottom { + break; + } + let line_area = Rect { + x: header_area.x, + y, + width: header_area.width, + height: 1, + }; + line.render(line_area, buf); + } + } + + let option_0 = selection_option_row( + 0, + "Resume paused (recommended)".to_string(), + self.highlighted == ResumeSelection::ResumePaused, + ); + option_0.render(opt0_area, buf); + + let option_1 = selection_option_row( + 1, + "Continue from where you left off".to_string(), + self.highlighted == ResumeSelection::Continue, + ); + option_1.render(opt1_area, buf); + } + + fn desired_height(&self, _width: u16) -> u16 { + 9 + } +} + +fn continue_prompt() -> String { + "Continue from where you left off. Do not re-run tool calls that already completed; use the outputs above. If you need to rerun a tool, ask first." + .to_string() +} diff --git a/codex-rs/tui/src/bottom_pane/snapshots/codex_tui__bottom_pane__chat_composer__tests__footer_mode_shortcut_overlay.snap b/codex-rs/tui/src/bottom_pane/snapshots/codex_tui__bottom_pane__chat_composer__tests__footer_mode_shortcut_overlay.snap index 3b6782d06d6..7d05a922370 100644 --- a/codex-rs/tui/src/bottom_pane/snapshots/codex_tui__bottom_pane__chat_composer__tests__footer_mode_shortcut_overlay.snap +++ b/codex-rs/tui/src/bottom_pane/snapshots/codex_tui__bottom_pane__chat_composer__tests__footer_mode_shortcut_overlay.snap @@ -10,7 +10,8 @@ expression: terminal.backend() " " " " " " -" / for commands shift + enter for newline " -" @ for file paths ctrl + v to paste images " -" esc again to edit previous message ctrl + c to exit " -" ctrl + t to view transcript " +" / for commands shift + enter for newline " +" @ for file paths ctrl + v to paste images " +" ctrl + g to edit in external editor esc again to edit previous message " +" ctrl + c to exit " +" ctrl + t to view transcript " diff --git a/codex-rs/tui/src/bottom_pane/snapshots/codex_tui__bottom_pane__footer__tests__footer_shortcuts_shift_and_esc.snap b/codex-rs/tui/src/bottom_pane/snapshots/codex_tui__bottom_pane__footer__tests__footer_shortcuts_shift_and_esc.snap index 264515a6c2b..445fa44484c 100644 --- a/codex-rs/tui/src/bottom_pane/snapshots/codex_tui__bottom_pane__footer__tests__footer_shortcuts_shift_and_esc.snap +++ b/codex-rs/tui/src/bottom_pane/snapshots/codex_tui__bottom_pane__footer__tests__footer_shortcuts_shift_and_esc.snap @@ -2,7 +2,8 @@ source: tui/src/bottom_pane/footer.rs expression: terminal.backend() --- -" / for commands shift + enter for newline " -" @ for file paths ctrl + v to paste images " -" esc again to edit previous message ctrl + c to exit " -" ctrl + t to view transcript " +" / for commands shift + enter for newline " +" @ for file paths ctrl + v to paste images " +" ctrl + g to edit in external editor esc again to edit previous message " +" ctrl + c to exit " +" ctrl + t to view transcript " diff --git a/codex-rs/tui/src/chatwidget.rs b/codex-rs/tui/src/chatwidget.rs index 37efd595e15..d10807967f6 100644 --- a/codex-rs/tui/src/chatwidget.rs +++ b/codex-rs/tui/src/chatwidget.rs @@ -2,7 +2,6 @@ use std::collections::HashMap; use std::collections::HashSet; use std::collections::VecDeque; use std::path::PathBuf; -use std::str::FromStr; use std::sync::Arc; use std::time::Duration; @@ -51,6 +50,7 @@ use codex_core::protocol::PatchApplyBeginEvent; use codex_core::protocol::PlanApprovalRequestEvent; use codex_core::protocol::PlanRequest; use codex_core::protocol::RateLimitSnapshot; +use codex_core::protocol::RawResponseItemEvent; use codex_core::protocol::ReviewRequest; use codex_core::protocol::ReviewTarget; use codex_core::protocol::SkillsListEntry; @@ -76,6 +76,7 @@ use codex_core::skills::model::SkillMetadata; use codex_protocol::ConversationId; use codex_protocol::account::PlanType; use codex_protocol::approvals::ElicitationRequestEvent; +use codex_protocol::models::ResponseItem; use codex_protocol::parse_command::ParsedCommand; use codex_protocol::user_input::UserInput; use crossterm::event::KeyCode; @@ -106,6 +107,7 @@ use crate::bottom_pane::ExperimentalFeaturesView; use crate::bottom_pane::InputResult; use crate::bottom_pane::PlanApprovalOverlay; use crate::bottom_pane::PlanRequestOverlay; +use crate::bottom_pane::ResumePromptOverlay; use crate::bottom_pane::SelectionAction; use crate::bottom_pane::SelectionItem; use crate::bottom_pane::SelectionViewParams; @@ -136,6 +138,7 @@ use crate::status::RateLimitSnapshotDisplay; use crate::text_formatting::truncate_text; use crate::tui::FrameRequester; mod interrupts; +use self::interrupts::CompletedLspToolCall; use self::interrupts::InterruptManager; mod agent; use self::agent::spawn_agent; @@ -315,12 +318,21 @@ enum RateLimitSwitchPromptState { Shown, } +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub(crate) enum ExternalEditorState { + #[default] + Closed, + Requested, + Active, +} + pub(crate) struct ChatWidget { app_event_tx: AppEventSender, codex_op_tx: UnboundedSender, bottom_pane: BottomPane, active_cell: Option>, config: Config, + lsp_manager: codex_lsp::LspManager, model_family: ModelFamily, auth_manager: Arc, models_manager: Arc, @@ -337,6 +349,8 @@ pub(crate) struct ChatWidget { stream_controller: Option, running_commands: HashMap, suppressed_exec_calls: HashSet, + pending_lsp_tool_calls: HashMap, + pending_apply_patch_calls: HashSet, last_unified_wait: Option, task_complete_pending: bool, unified_exec_sessions: Vec, @@ -351,7 +365,6 @@ pub(crate) struct ChatWidget { current_status_header: String, // Previous status header to restore after a transient stream retry. retry_status_header: Option, - plan_variants_progress: Option, conversation_id: Option, frame_requester: FrameRequester, // Whether to include the initial welcome banner on session configured @@ -359,6 +372,9 @@ pub(crate) struct ChatWidget { // When resuming an existing session (selected via resume picker), avoid an // immediate redraw on SessionConfigured to prevent a gratuitous UI flicker. suppress_session_configured_redraw: bool, + resume_last_turn_aborted: bool, + resume_had_partial_output: bool, + resume_had_in_progress_tools: bool, // User messages queued while a turn is in progress queued_user_messages: VecDeque, // Pending notification to show when unfocused on next Draw @@ -375,6 +391,13 @@ pub(crate) struct ChatWidget { feedback: codex_feedback::CodexFeedback, // Current session rollout path (if known) current_rollout_path: Option, + external_editor_state: ExternalEditorState, +} + +struct PendingLspToolCall { + tool_name: String, + arguments: String, + start_time: std::time::Instant, } struct UserMessage { @@ -382,127 +405,6 @@ struct UserMessage { image_paths: Vec, } -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum ProgressStatus { - Pending, - InProgress, - Completed, -} - -#[derive(Debug, Clone)] -struct PlanVariantsProgress { - total: usize, - steps: Vec, - durations: Vec>, - last_activity: Vec>, - tokens: Vec>, -} - -impl PlanVariantsProgress { - fn new(total: usize) -> Self { - Self { - total, - steps: vec![ProgressStatus::Pending; total], - durations: vec![None; total], - last_activity: vec![None; total], - tokens: vec![None; total], - } - } - - fn variant_label(&self, idx: usize) -> String { - if self.total == 3 { - match idx { - 0 => "Minimal".to_string(), - 1 => "Correctness".to_string(), - 2 => "DX".to_string(), - _ => format!("Variant {}/{}", idx + 1, self.total), - } - } else { - format!("Variant {}/{}", idx + 1, self.total) - } - } - - fn set_in_progress(&mut self, idx: usize) { - if idx < self.steps.len() { - self.steps[idx] = ProgressStatus::InProgress; - } - } - - fn set_completed(&mut self, idx: usize) { - if idx < self.steps.len() { - self.steps[idx] = ProgressStatus::Completed; - } - } - - fn set_duration(&mut self, idx: usize, duration: Option) { - if idx < self.durations.len() { - self.durations[idx] = duration; - } - } - - fn set_activity(&mut self, idx: usize, activity: Option) { - if idx < self.last_activity.len() { - self.last_activity[idx] = activity; - } - } - - fn set_tokens(&mut self, idx: usize, tokens: Option) { - if idx < self.tokens.len() { - self.tokens[idx] = tokens; - } - } - - fn render_detail_lines(&self) -> Vec> { - use ratatui::style::Stylize; - let mut lines = Vec::with_capacity(self.total); - for (idx, status) in self.steps.iter().copied().enumerate() { - let label = self.variant_label(idx); - let status_span = match status { - ProgressStatus::Pending => "○".dim(), - ProgressStatus::InProgress => "●".cyan(), - ProgressStatus::Completed => "✓".green(), - }; - - let mut spans = vec![" ".into(), status_span, " ".into(), label.into()]; - let duration = self.durations.get(idx).and_then(|d| d.as_deref()); - let tokens = self.tokens.get(idx).and_then(|t| t.as_deref()); - if duration.is_some() || tokens.is_some() { - let mut meta = String::new(); - meta.push('('); - if let Some(duration) = duration { - meta.push_str(duration); - } - if let Some(tokens) = tokens { - if duration.is_some() { - meta.push_str(", "); - } - meta.push_str(tokens); - meta.push_str(" tok"); - } - meta.push(')'); - spans.push(" ".into()); - spans.push(meta.dim()); - } - if status == ProgressStatus::Completed { - spans.push(" ".into()); - spans.push("—".dim()); - spans.push(" ".into()); - spans.push("done".dim()); - } else if let Some(activity) = self.last_activity.get(idx).and_then(|a| a.as_deref()) { - let activity = activity.strip_prefix("shell ").unwrap_or(activity).trim(); - if !activity.is_empty() { - spans.push(" ".into()); - spans.push("—".dim()); - spans.push(" ".into()); - spans.push(activity.to_string().dim()); - } - } - lines.push(spans.into()); - } - lines - } -} - impl From for UserMessage { fn from(text: String) -> Self { Self { @@ -557,13 +459,18 @@ impl ChatWidget { } } - fn set_status_header(&mut self, header: String) { - if self.plan_variants_progress.is_some() && header != "Planning plan variants" { - self.plan_variants_progress = None; - self.clear_status_detail_lines(); - } + /// Update the status indicator header and details. + /// + /// Passing `None` clears any existing details. + fn set_status(&mut self, header: String, details: Option) { self.current_status_header = header.clone(); - self.bottom_pane.update_status_header(header); + self.bottom_pane.update_status(header, details); + } + + /// Convenience wrapper around [`Self::set_status`]; + /// updates the status indicator header and clears any existing details. + fn set_status_header(&mut self, header: String) { + self.set_status(header, None); } fn set_status_detail_lines(&mut self, lines: Vec>) { @@ -575,9 +482,7 @@ impl ChatWidget { } fn restore_retry_status_header_if_present(&mut self) { - if let Some(header) = self.retry_status_header.take() - && self.current_status_header != header - { + if let Some(header) = self.retry_status_header.take() { self.set_status_header(header); } } @@ -589,7 +494,11 @@ impl ChatWidget { self.set_skills(None); self.conversation_id = Some(event.session_id); self.current_rollout_path = Some(event.rollout_path.clone()); + self.resume_last_turn_aborted = false; + self.resume_had_partial_output = false; + self.resume_had_in_progress_tools = false; let initial_messages = event.initial_messages.clone(); + let is_resume_replay = initial_messages.is_some(); let model_for_header = event.model.clone(); self.session_header.set_model(&model_for_header); self.add_to_history(history_cell::new_session_info( @@ -601,6 +510,32 @@ impl ChatWidget { if let Some(messages) = initial_messages { self.replay_initial_messages(messages); } + if is_resume_replay { + if self.stream_controller.is_some() { + self.resume_had_partial_output = true; + self.flush_answer_stream_with_separator(); + } + + self.flush_completed_active_cell_if_needed(); + + if self.active_cell_is_in_progress() || !self.running_commands.is_empty() { + self.resume_had_in_progress_tools = true; + self.finalize_turn(); + } + + if self.initial_user_message.is_none() + && (self.resume_last_turn_aborted + || self.resume_had_partial_output + || self.resume_had_in_progress_tools) + { + self.bottom_pane + .show_view(Box::new(ResumePromptOverlay::new( + self.app_event_tx.clone(), + self.resume_had_partial_output, + self.resume_had_in_progress_tools, + ))); + } + } // Ask codex-core to enumerate custom prompts for this session. self.submit_op(Op::ListCustomPrompts); self.submit_op(Op::ListSkills { @@ -657,6 +592,10 @@ impl ChatWidget { self.request_redraw(); } + pub(crate) fn queue_user_text(&mut self, text: String) { + self.queue_user_message(text.into()); + } + fn on_agent_message(&mut self, message: String) { // If we have a stream_controller, then the final agent message is redundant and will be a // duplicate of what has already been streamed. @@ -688,14 +627,11 @@ impl ChatWidget { } fn on_agent_reasoning_final(&mut self) { - let reasoning_summary_format = self.get_model_family().reasoning_summary_format; // At the end of a reasoning block, record transcript-only content. self.full_reasoning_buffer.push_str(&self.reasoning_buffer); if !self.full_reasoning_buffer.is_empty() { - let cell = history_cell::new_reasoning_summary_block( - self.full_reasoning_buffer.clone(), - reasoning_summary_format, - ); + let cell = + history_cell::new_reasoning_summary_block(self.full_reasoning_buffer.clone()); self.add_boxed_history(cell); } self.reasoning_buffer.clear(); @@ -716,7 +652,6 @@ impl ChatWidget { self.bottom_pane.clear_ctrl_c_quit_hint(); self.bottom_pane.set_task_running(true); self.retry_status_header = None; - self.plan_variants_progress = None; self.bottom_pane.set_interrupt_hint_visible(true); self.set_status_header(String::from("Working")); self.full_reasoning_buffer.clear(); @@ -728,6 +663,7 @@ impl ChatWidget { // If a stream is currently active, finalize it. self.flush_answer_stream_with_separator(); self.flush_wait_cell(); + self.flush_completed_active_cell_if_needed(); // Mark task stopped and request redraw now that all content is in history. self.bottom_pane.set_task_running(false); self.running_commands.clear(); @@ -745,6 +681,16 @@ impl ChatWidget { self.maybe_show_pending_rate_limit_prompt(); } + fn on_task_complete_replay(&mut self) { + self.flush_answer_stream_with_separator(); + self.flush_wait_cell(); + self.flush_completed_active_cell_if_needed(); + self.bottom_pane.set_task_running(false); + self.running_commands.clear(); + self.suppressed_exec_calls.clear(); + self.last_unified_wait = None; + } + pub(crate) fn set_token_info(&mut self, info: Option) { match info { Some(info) => self.apply_token_info(info), @@ -898,11 +844,11 @@ impl ChatWidget { self.bottom_pane.set_task_running(true); if let Some(current) = &self.mcp_startup_status { let total = current.len(); - let mut starting: Vec<_> = current + let mut starting: Vec = current .iter() .filter_map(|(name, state)| { if matches!(state, McpStartupStatus::Starting) { - Some(name) + Some(name.clone()) } else { None } @@ -929,6 +875,20 @@ impl ChatWidget { format!("Booting MCP server: {first}") }; self.set_status_header(header); + + let max_details = 4; + let mut detail_lines: Vec> = starting + .iter() + .take(max_details) + .map(|name| vec![" └ ".dim(), name.clone().into(), " starting".dim()].into()) + .collect(); + if starting.len() > max_details { + let extra = starting.len().saturating_sub(max_details); + detail_lines.push(vec![" └ ".dim(), format!("… +{extra} more").dim()].into()); + } + self.set_status_detail_lines(detail_lines); + } else { + self.clear_status_detail_lines(); } } self.request_redraw(); @@ -950,6 +910,7 @@ impl ChatWidget { self.on_warning(format!("MCP startup incomplete ({})", parts.join("; "))); } + self.clear_status_detail_lines(); self.mcp_startup_status = None; self.bottom_pane.set_task_running(false); self.maybe_send_next_queued_input(); @@ -994,6 +955,18 @@ impl ChatWidget { self.request_redraw(); } + fn on_interrupted_turn_replay(&mut self, _reason: TurnAbortReason) { + self.resume_last_turn_aborted = true; + self.resume_had_partial_output = + self.resume_had_partial_output || self.stream_controller.is_some(); + self.resume_had_in_progress_tools = self.resume_had_in_progress_tools + || self.active_cell.is_some() + || !self.running_commands.is_empty(); + + self.flush_answer_stream_with_separator(); + self.finalize_turn(); + } + fn on_plan_update(&mut self, update: UpdatePlanArgs) { let update_key = serde_json::to_string(&update).ok(); if let Some(key) = update_key.as_deref() @@ -1256,6 +1229,101 @@ impl ChatWidget { self.add_to_history(history_cell::new_web_search_call(ev.query)); } + fn on_raw_response_item(&mut self, ev: RawResponseItemEvent) { + match ev.item { + ResponseItem::CustomToolCall { name, call_id, .. } => { + if name == "apply_patch" { + self.pending_apply_patch_calls.insert(call_id); + } + } + ResponseItem::CustomToolCallOutput { call_id, output } => { + if self.pending_apply_patch_calls.remove(&call_id) + && let Some(idx) = output.find("## LSP diagnostics") + && let Some(cell) = history_cell::new_lsp_diagnostics_event( + output[idx..].to_string(), + self.config.cwd.clone(), + ) + { + self.flush_answer_stream_with_separator(); + self.flush_active_cell(); + self.add_boxed_history(Box::new(cell)); + self.request_redraw(); + } + } + ResponseItem::FunctionCall { + name, + arguments, + call_id, + .. + } => { + if name == "apply_patch" { + self.pending_apply_patch_calls.insert(call_id); + return; + } + if !name.starts_with("lsp_") { + return; + } + self.pending_lsp_tool_calls.insert( + call_id, + PendingLspToolCall { + tool_name: name, + arguments, + start_time: std::time::Instant::now(), + }, + ); + } + ResponseItem::FunctionCallOutput { call_id, output } => { + if self.pending_apply_patch_calls.remove(&call_id) { + if let Some(idx) = output.content.find("## LSP diagnostics") + && let Some(cell) = history_cell::new_lsp_diagnostics_event( + output.content[idx..].to_string(), + self.config.cwd.clone(), + ) + { + self.flush_answer_stream_with_separator(); + self.flush_active_cell(); + self.add_boxed_history(Box::new(cell)); + self.request_redraw(); + } + return; + } + let Some(call) = self.pending_lsp_tool_calls.remove(&call_id) else { + return; + }; + if !call.tool_name.starts_with("lsp_") { + return; + } + + let completed = CompletedLspToolCall { + tool_name: call.tool_name, + arguments: call.arguments, + output: output.content, + success: output.success.unwrap_or(true), + duration: Some(call.start_time.elapsed()), + }; + let completed2 = completed.clone(); + self.defer_or_handle( + |q| q.push_lsp_tool_call(completed), + |s| s.handle_lsp_tool_call_now(completed2), + ); + } + _ => {} + } + } + + pub(crate) fn handle_lsp_tool_call_now(&mut self, ev: CompletedLspToolCall) { + self.flush_answer_stream_with_separator(); + self.flush_active_cell(); + self.add_boxed_history(Box::new(history_cell::FunctionToolCallCell::new( + ev.tool_name, + ev.arguments, + ev.output, + ev.success, + ev.duration, + ))); + self.request_redraw(); + } + fn on_get_history_entry_response( &mut self, event: codex_core::protocol::GetHistoryEntryResponseEvent, @@ -1287,121 +1355,10 @@ impl ChatWidget { debug!("BackgroundEvent: {message}"); self.bottom_pane.ensure_status_indicator(); self.bottom_pane.set_interrupt_hint_visible(true); - - if let Some(progress) = self.maybe_update_plan_variants_progress(message.as_str()) { - self.plan_variants_progress = Some(progress); - self.set_status_header("Planning plan variants".to_string()); - self.set_status_detail_lines( - self.plan_variants_progress - .as_ref() - .map(PlanVariantsProgress::render_detail_lines) - .unwrap_or_default(), - ); - return; - } - - self.plan_variants_progress = None; self.clear_status_detail_lines(); self.set_status_header(message); } - fn maybe_update_plan_variants_progress( - &mut self, - message: &str, - ) -> Option { - let message = message.trim(); - if message.starts_with("Plan variants:") { - // Expected shapes: - // - "Plan variants: generating 1/3…" - // - "Plan variants: finished 1/3 (12.3s)" - let tokens: Vec<&str> = message.split_whitespace().collect(); - if tokens.len() < 4 { - return None; - } - - let action = tokens.get(2).copied()?; - let fraction = tokens.get(3).copied()?; - let fraction = fraction.trim_end_matches('…'); - let (idx_str, total_str) = fraction.split_once('/')?; - let idx = usize::from_str(idx_str).ok()?.saturating_sub(1); - let total = usize::from_str(total_str).ok()?; - if total == 0 { - return None; - } - - let duration = message - .find('(') - .and_then(|start| message.rfind(')').map(|end| (start, end))) - .and_then(|(start, end)| { - if end > start + 1 { - Some(message[start + 1..end].to_string()) - } else { - None - } - }); - - let mut progress = self - .plan_variants_progress - .clone() - .filter(|p| p.total == total) - .unwrap_or_else(|| PlanVariantsProgress::new(total)); - - match action { - "generating" => { - progress.set_in_progress(idx); - progress.set_duration(idx, None); - } - "finished" => { - progress.set_completed(idx); - progress.set_duration(idx, duration); - progress.set_activity(idx, None); - } - _ => return None, - } - - return Some(progress); - } - - if let Some(rest) = message.strip_prefix("Plan variant ") { - // Expected shape: - // - "Plan variant 2/3: rg -n ..." - // - "Plan variant 2/3: shell rg -n ..." (legacy) - let (fraction, activity) = rest.split_once(':')?; - let fraction = fraction.trim(); - let (idx_str, total_str) = fraction.split_once('/')?; - let idx = usize::from_str(idx_str).ok()?.saturating_sub(1); - let total = usize::from_str(total_str).ok()?; - if total == 0 { - return None; - } - - let mut progress = self - .plan_variants_progress - .clone() - .filter(|p| p.total == total) - .unwrap_or_else(|| PlanVariantsProgress::new(total)); - - if idx < progress.steps.len() && progress.steps[idx] == ProgressStatus::Pending { - progress.set_in_progress(idx); - } - - let activity = activity.trim(); - if let Some(tokens) = activity.strip_prefix("tokens ") { - progress.set_tokens(idx, Some(tokens.trim().to_string())); - } else { - let activity = activity.strip_prefix("shell ").unwrap_or(activity).trim(); - if activity.is_empty() { - progress.set_activity(idx, None); - } else { - progress.set_activity(idx, Some(activity.to_string())); - } - } - return Some(progress); - } - - None - } - fn on_undo_started(&mut self, event: UndoStartedEvent) { self.bottom_pane.ensure_status_indicator(); self.bottom_pane.set_interrupt_hint_visible(false); @@ -1428,11 +1385,11 @@ impl ChatWidget { } } - fn on_stream_error(&mut self, message: String) { + fn on_stream_error(&mut self, message: String, additional_details: Option) { if self.retry_status_header.is_none() { self.retry_status_header = Some(self.current_status_header.clone()); } - self.set_status_header(message); + self.set_status(message, additional_details); } /// Periodic tick to commit at most one queued line to history with a small delay, @@ -1793,6 +1750,7 @@ impl ChatWidget { invocation, duration, tokens, + outcome, result, } = ev; @@ -1801,7 +1759,7 @@ impl ChatWidget { .downcast_mut::() }) && active.contains_call_id(&call_id) { - active.complete_call(&call_id, duration, tokens, result); + active.complete_call(&call_id, duration, tokens, outcome, result); if active.is_complete() { self.flush_active_cell(); } else { @@ -1813,7 +1771,7 @@ impl ChatWidget { self.flush_active_cell(); let mut cell = history_cell::new_active_subagent_tool_call_group(call_id.clone(), invocation); - cell.complete_call(&call_id, duration, tokens, result); + cell.complete_call(&call_id, duration, tokens, outcome, result); self.active_cell = Some(Box::new(cell)); self.flush_active_cell(); self.request_redraw(); @@ -1861,11 +1819,12 @@ impl ChatWidget { let model_slug = model_family.get_model_slug().to_string(); let mut config = config; config.model = Some(model_slug.clone()); + let lsp_manager = conversation_manager.lsp_manager(); let mut rng = rand::rng(); let placeholder = EXAMPLE_PROMPTS[rng.random_range(0..EXAMPLE_PROMPTS.len())].to_string(); let codex_op_tx = spawn_agent(config.clone(), app_event_tx.clone(), conversation_manager); - Self { + let mut widget = Self { app_event_tx: app_event_tx.clone(), frame_requester: frame_requester.clone(), codex_op_tx, @@ -1881,6 +1840,7 @@ impl ChatWidget { }), active_cell: None, config, + lsp_manager, model_family, auth_manager, models_manager, @@ -1899,6 +1859,8 @@ impl ChatWidget { stream_controller: None, running_commands: HashMap::new(), suppressed_exec_calls: HashSet::new(), + pending_lsp_tool_calls: HashMap::new(), + pending_apply_patch_calls: HashSet::new(), last_unified_wait: None, task_complete_pending: false, unified_exec_sessions: Vec::new(), @@ -1908,11 +1870,13 @@ impl ChatWidget { full_reasoning_buffer: String::new(), current_status_header: String::from("Ready"), retry_status_header: None, - plan_variants_progress: None, conversation_id: None, queued_user_messages: VecDeque::new(), show_welcome_banner: is_first_run, suppress_session_configured_redraw: false, + resume_last_turn_aborted: false, + resume_had_partial_output: false, + resume_had_in_progress_tools: false, pending_notification: None, is_review_mode: false, pre_review_token_info: None, @@ -1920,12 +1884,18 @@ impl ChatWidget { last_rendered_width: std::cell::Cell::new(None), feedback, current_rollout_path: None, - } + external_editor_state: ExternalEditorState::Closed, + }; + + widget.prefetch_rate_limits(); + + widget } /// Create a ChatWidget attached to an existing conversation (e.g., a fork). pub(crate) fn new_from_existing( common: ChatWidgetInit, + conversation_manager: Arc, conversation: std::sync::Arc, session_configured: codex_core::protocol::SessionConfiguredEvent, ) -> Self { @@ -1945,11 +1915,12 @@ impl ChatWidget { let model_slug = model_family.get_model_slug().to_string(); let mut rng = rand::rng(); let placeholder = EXAMPLE_PROMPTS[rng.random_range(0..EXAMPLE_PROMPTS.len())].to_string(); + let lsp_manager = conversation_manager.lsp_manager(); let codex_op_tx = spawn_agent_from_existing(conversation, session_configured, app_event_tx.clone()); - Self { + let mut widget = Self { app_event_tx: app_event_tx.clone(), frame_requester: frame_requester.clone(), codex_op_tx, @@ -1965,6 +1936,7 @@ impl ChatWidget { }), active_cell: None, config, + lsp_manager, model_family, auth_manager, models_manager, @@ -1983,6 +1955,8 @@ impl ChatWidget { stream_controller: None, running_commands: HashMap::new(), suppressed_exec_calls: HashSet::new(), + pending_lsp_tool_calls: HashMap::new(), + pending_apply_patch_calls: HashSet::new(), last_unified_wait: None, task_complete_pending: false, unified_exec_sessions: Vec::new(), @@ -1992,11 +1966,13 @@ impl ChatWidget { full_reasoning_buffer: String::new(), current_status_header: String::from("Ready"), retry_status_header: None, - plan_variants_progress: None, conversation_id: None, queued_user_messages: VecDeque::new(), show_welcome_banner: false, suppress_session_configured_redraw: true, + resume_last_turn_aborted: false, + resume_had_partial_output: false, + resume_had_in_progress_tools: false, pending_notification: None, is_review_mode: false, pre_review_token_info: None, @@ -2004,7 +1980,12 @@ impl ChatWidget { last_rendered_width: std::cell::Cell::new(None), feedback, current_rollout_path: None, - } + external_editor_state: ExternalEditorState::Closed, + }; + + widget.prefetch_rate_limits(); + + widget } pub(crate) fn handle_key_event(&mut self, key_event: KeyEvent) { @@ -2098,6 +2079,31 @@ impl ChatWidget { self.request_redraw(); } + pub(crate) fn composer_text_with_pending(&self) -> String { + self.bottom_pane.composer_text_with_pending() + } + + pub(crate) fn apply_external_edit(&mut self, text: String) { + self.bottom_pane.apply_external_edit(text); + self.request_redraw(); + } + + pub(crate) fn external_editor_state(&self) -> ExternalEditorState { + self.external_editor_state + } + + pub(crate) fn set_external_editor_state(&mut self, state: ExternalEditorState) { + self.external_editor_state = state; + } + + pub(crate) fn set_footer_hint_override(&mut self, items: Option>) { + self.bottom_pane.set_footer_hint_override(items); + } + + pub(crate) fn can_launch_external_editor(&self) -> bool { + self.bottom_pane.can_launch_external_editor() + } + fn dispatch_command(&mut self, cmd: SlashCommand) { if !cmd.available_during_task() && self.bottom_pane.is_task_running() { let message = format!( @@ -2152,6 +2158,9 @@ impl ChatWidget { SlashCommand::PlanModel => { self.open_plan_model_popup(); } + SlashCommand::SubagentModel => { + self.open_subagent_model_popup(); + } SlashCommand::Approvals => { self.open_approvals_popup(); } @@ -2170,9 +2179,9 @@ impl ChatWidget { } self.request_exit(); } - SlashCommand::Undo => { - self.app_event_tx.send(AppEvent::CodexOp(Op::Undo)); - } + // SlashCommand::Undo => { + // self.app_event_tx.send(AppEvent::CodexOp(Op::Undo)); + // } SlashCommand::Diff => { self.add_diff_in_progress(); let tx = self.app_event_tx.clone(); @@ -2199,6 +2208,9 @@ impl ChatWidget { SlashCommand::Status => { self.add_status_output(); } + SlashCommand::Lsp => { + self.open_lsp_status(); + } SlashCommand::Ps => { self.add_ps_output(); } @@ -2286,6 +2298,45 @@ impl ChatWidget { } } + fn active_cell_is_in_progress(&self) -> bool { + let Some(cell) = self.active_cell.as_ref() else { + return false; + }; + + if cell + .as_any() + .downcast_ref::() + .is_some() + { + return true; + } + if let Some(exec) = cell.as_any().downcast_ref::() { + return exec.is_active(); + } + if let Some(tool) = cell + .as_any() + .downcast_ref::() + { + return tool.is_active(); + } + if let Some(tool) = cell.as_any().downcast_ref::() { + return !tool.is_complete(); + } + + false + } + + fn flush_completed_active_cell_if_needed(&mut self) -> bool { + if self.active_cell.is_none() { + return false; + } + if self.active_cell_is_in_progress() { + return false; + } + self.flush_active_cell(); + true + } + // Only flush a live wait cell here; other active cells must finalize via their end events. fn flush_wait_cell(&mut self) { // Wait cells are transient: convert them into "(waited)" history entries if present. @@ -2307,7 +2358,7 @@ impl ChatWidget { .send(AppEvent::InsertHistoryCell(Box::new(cell))); } - fn add_to_history(&mut self, cell: impl HistoryCell + 'static) { + pub(crate) fn add_to_history(&mut self, cell: impl HistoryCell + 'static) { self.add_boxed_history(Box::new(cell)); } @@ -2461,10 +2512,14 @@ impl ChatWidget { } EventMsg::AgentReasoningSectionBreak(_) => self.on_reasoning_section_break(), EventMsg::TaskStarted(_) if !from_replay => self.on_task_started(), - EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }) if !from_replay => { - self.on_task_complete(last_agent_message) + EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }) => { + if from_replay { + self.on_task_complete_replay(); + } else { + self.on_task_complete(last_agent_message); + } } - EventMsg::TaskStarted(_) | EventMsg::TaskComplete(_) => {} + EventMsg::TaskStarted(_) => {} EventMsg::TokenCount(ev) => { self.set_token_info(ev.info); self.on_rate_limit_snapshot(ev.rate_limits); @@ -2473,17 +2528,23 @@ impl ChatWidget { EventMsg::Error(ErrorEvent { message, .. }) => self.on_error(message), EventMsg::McpStartupUpdate(ev) => self.on_mcp_startup_update(ev), EventMsg::McpStartupComplete(ev) => self.on_mcp_startup_complete(ev), - EventMsg::TurnAborted(ev) => match ev.reason { - TurnAbortReason::Interrupted => { - self.on_interrupted_turn(ev.reason); - } - TurnAbortReason::Replaced => { - self.on_error("Turn aborted: replaced by a new task".to_owned()) - } - TurnAbortReason::ReviewEnded => { - self.on_interrupted_turn(ev.reason); + EventMsg::TurnAborted(ev) => { + if from_replay { + self.on_interrupted_turn_replay(ev.reason); + } else { + match ev.reason { + TurnAbortReason::Interrupted => { + self.on_interrupted_turn(ev.reason); + } + TurnAbortReason::Replaced => { + self.on_error("Turn aborted: replaced by a new task".to_owned()) + } + TurnAbortReason::ReviewEnded => { + self.on_interrupted_turn(ev.reason); + } + } } - }, + } EventMsg::PlanUpdate(update) => self.on_plan_update(update), EventMsg::ExecApprovalRequest(ev) => { // For replayed events, synthesize an empty id (these should not occur). @@ -2534,9 +2595,11 @@ impl ChatWidget { } EventMsg::UndoStarted(ev) if !from_replay => self.on_undo_started(ev), EventMsg::UndoCompleted(ev) => self.on_undo_completed(ev), - EventMsg::StreamError(StreamErrorEvent { message, .. }) if !from_replay => { - self.on_stream_error(message) - } + EventMsg::StreamError(StreamErrorEvent { + message, + additional_details, + .. + }) if !from_replay => self.on_stream_error(message, additional_details), EventMsg::BackgroundEvent(_) | EventMsg::UndoStarted(_) | EventMsg::StreamError(_) => {} EventMsg::UserMessage(ev) => { if from_replay { @@ -2556,8 +2619,8 @@ impl ChatWidget { } } EventMsg::ContextCompacted(_) => self.on_agent_message("Context compacted".to_owned()), - EventMsg::RawResponseItem(_) - | EventMsg::ItemStarted(_) + EventMsg::RawResponseItem(ev) => self.on_raw_response_item(ev), + EventMsg::ItemStarted(_) | EventMsg::ItemCompleted(_) | EventMsg::AgentMessageContentDelta(_) | EventMsg::ReasoningContentDelta(_) @@ -2844,8 +2907,14 @@ impl ChatWidget { sandbox_policy: None, model: Some(switch_model.clone()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: Some(Some(default_effort)), plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, })); tx.send(AppEvent::UpdateModel(switch_model.clone())); @@ -2914,6 +2983,10 @@ impl ChatWidget { self.open_model_popup_for_target(crate::app_event::ModelPickerTarget::Plan); } + pub(crate) fn open_subagent_model_popup(&mut self) { + self.open_model_popup_for_target(crate::app_event::ModelPickerTarget::Subagent); + } + fn open_model_popup_for_target(&mut self, target: crate::app_event::ModelPickerTarget) { let chat_model = self.model_family.get_model_slug(); let current_model = match target { @@ -2923,6 +2996,11 @@ impl ChatWidget { .plan_model .clone() .unwrap_or_else(|| chat_model.to_string()), + crate::app_event::ModelPickerTarget::Subagent => self + .config + .subagent_model + .clone() + .unwrap_or_else(|| chat_model.to_string()), }; let presets: Vec = // todo(aibrahim): make this async function @@ -2999,6 +3077,11 @@ impl ChatWidget { "Choose a specific model and reasoning level for /plan (current: {current_label})" ) } + crate::app_event::ModelPickerTarget::Subagent => { + format!( + "Choose a specific model and reasoning level for spawned subagents (current: {current_label})" + ) + } }); items.push(SelectionItem { @@ -3015,6 +3098,9 @@ impl ChatWidget { title: Some(match target { crate::app_event::ModelPickerTarget::Chat => "Select Model".to_string(), crate::app_event::ModelPickerTarget::Plan => "Select Plan Model".to_string(), + crate::app_event::ModelPickerTarget::Subagent => { + "Select Subagent Model".to_string() + } }), subtitle: Some(match target { crate::app_event::ModelPickerTarget::Chat => { @@ -3023,6 +3109,9 @@ impl ChatWidget { crate::app_event::ModelPickerTarget::Plan => { "Pick a quick auto mode or browse all models for /plan.".to_string() } + crate::app_event::ModelPickerTarget::Subagent => { + "Pick a quick auto mode or browse all models for spawned subagents.".to_string() + } }), footer_hint: Some(standard_popup_hint_line()), items, @@ -3064,6 +3153,11 @@ impl ChatWidget { .plan_model .clone() .unwrap_or_else(|| chat_model.to_string()), + crate::app_event::ModelPickerTarget::Subagent => self + .config + .subagent_model + .clone() + .unwrap_or_else(|| chat_model.to_string()), }; let mut items: Vec = Vec::new(); for preset in presets.into_iter() { @@ -3096,6 +3190,9 @@ impl ChatWidget { crate::app_event::ModelPickerTarget::Plan => { "Select Plan Model and Effort".to_string() } + crate::app_event::ModelPickerTarget::Subagent => { + "Select Subagent Model and Effort".to_string() + } }), subtitle: Some( "Access legacy models by running codex -m or in your config.toml" @@ -3124,8 +3221,14 @@ impl ChatWidget { sandbox_policy: None, model: Some(model_for_action.clone()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: Some(effort_for_action), plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, })); tx.send(AppEvent::UpdateModel(model_for_action.clone())); @@ -3147,8 +3250,14 @@ impl ChatWidget { sandbox_policy: None, model: None, plan_model: Some(model_for_action.clone()), + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: None, plan_effort: Some(effort_for_action), + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, })); tx.send(AppEvent::UpdatePlanModel(model_for_action.clone())); @@ -3163,6 +3272,35 @@ impl ChatWidget { effort_label ); } + crate::app_event::ModelPickerTarget::Subagent => { + tx.send(AppEvent::CodexOp(Op::OverrideTurnContext { + cwd: None, + approval_policy: None, + sandbox_policy: None, + model: None, + plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: Some(model_for_action.clone()), + effort: None, + plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: Some(effort_for_action), + summary: None, + })); + tx.send(AppEvent::UpdateSubagentModel(model_for_action.clone())); + tx.send(AppEvent::UpdateSubagentReasoningEffort(effort_for_action)); + tx.send(AppEvent::PersistSubagentModelSelection { + model: model_for_action.clone(), + effort: effort_for_action, + }); + tracing::info!( + "Selected subagent model: {}, Selected effort: {}", + model_for_action, + effort_label + ); + } } })] } @@ -3241,6 +3379,9 @@ impl ChatWidget { crate::app_event::ModelPickerTarget::Plan => { self.config.plan_model.as_deref().unwrap_or(chat_model) } + crate::app_event::ModelPickerTarget::Subagent => { + self.config.subagent_model.as_deref().unwrap_or(chat_model) + } }; let is_current_model = effective_current_model == preset.model; let highlight_choice = if is_current_model { @@ -3253,6 +3394,13 @@ impl ChatWidget { self.config.model_reasoning_effort } } + crate::app_event::ModelPickerTarget::Subagent => { + if self.config.subagent_model.as_deref() == Some(preset.model.as_str()) { + self.config.subagent_model_reasoning_effort + } else { + self.config.model_reasoning_effort + } + } } } else { default_choice @@ -3352,8 +3500,14 @@ impl ChatWidget { sandbox_policy: None, model: Some(model.clone()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: Some(effort), plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, })); self.app_event_tx.send(AppEvent::UpdateModel(model.clone())); @@ -3377,8 +3531,14 @@ impl ChatWidget { sandbox_policy: None, model: None, plan_model: Some(model.clone()), + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: None, plan_effort: Some(effort), + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, })); self.app_event_tx @@ -3395,6 +3555,39 @@ impl ChatWidget { effort_label ); } + crate::app_event::ModelPickerTarget::Subagent => { + self.app_event_tx + .send(AppEvent::CodexOp(Op::OverrideTurnContext { + cwd: None, + approval_policy: None, + sandbox_policy: None, + model: None, + plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: Some(model.clone()), + effort: None, + plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: Some(effort), + summary: None, + })); + self.app_event_tx + .send(AppEvent::UpdateSubagentModel(model.clone())); + self.app_event_tx + .send(AppEvent::UpdateSubagentReasoningEffort(effort)); + self.app_event_tx + .send(AppEvent::PersistSubagentModelSelection { + model: model.clone(), + effort, + }); + tracing::info!( + "Selected subagent model: {}, Selected effort: {}", + model, + effort_label + ); + } } } @@ -3510,8 +3703,14 @@ impl ChatWidget { sandbox_policy: Some(sandbox_clone.clone()), model: None, plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: None, plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, })); tx.send(AppEvent::UpdateAskForApprovalPolicy(approval)); @@ -3885,6 +4084,11 @@ impl ChatWidget { self.config.plan_model_reasoning_effort = effort; } + /// Set the subagent reasoning effort in the widget's config copy. + pub(crate) fn set_subagent_reasoning_effort(&mut self, effort: Option) { + self.config.subagent_model_reasoning_effort = effort; + } + /// Set the model in the widget's config copy. pub(crate) fn set_model(&mut self, model: &str, model_family: ModelFamily) { self.session_header.set_model(model); @@ -3896,6 +4100,11 @@ impl ChatWidget { self.config.plan_model = Some(model.to_string()); } + /// Set the subagent model in the widget's config copy. + pub(crate) fn set_subagent_model(&mut self, model: &str) { + self.config.subagent_model = Some(model.to_string()); + } + pub(crate) fn add_info_message(&mut self, message: String, hint: Option) { self.add_to_history(history_cell::new_info_event(message, hint)); self.request_redraw(); @@ -4193,6 +4402,30 @@ impl ChatWidget { ); RenderableItem::Owned(Box::new(flex)) } + + fn open_lsp_status(&mut self) { + if !self.config.features.enabled(Feature::Lsp) { + self.add_error_message( + "LSP is disabled. Enable `[features].lsp = true` in config.toml.".to_string(), + ); + return; + }; + + let cwd = self.config.cwd.clone(); + let mut lsp_config = self.config.lsp.manager.clone(); + lsp_config.enabled = true; + let lsp = self.lsp_manager.clone(); + let tx = self.app_event_tx.clone(); + tokio::spawn(async move { + lsp.set_config(lsp_config).await; + match lsp.status(&cwd).await { + Ok(status) => tx.send(AppEvent::LspStatusLoaded(status)), + Err(err) => tx.send(AppEvent::LspStatusLoadFailed(format!( + "Failed to fetch LSP status: {err:#}", + ))), + } + }); + } } impl Drop for ChatWidget { diff --git a/codex-rs/tui/src/chatwidget/interrupts.rs b/codex-rs/tui/src/chatwidget/interrupts.rs index ab650ed7628..7f0fd0990a1 100644 --- a/codex-rs/tui/src/chatwidget/interrupts.rs +++ b/codex-rs/tui/src/chatwidget/interrupts.rs @@ -17,6 +17,15 @@ use codex_protocol::approvals::ElicitationRequestEvent; use super::ChatWidget; +#[derive(Debug, Clone)] +pub(crate) struct CompletedLspToolCall { + pub(crate) tool_name: String, + pub(crate) arguments: String, + pub(crate) output: String, + pub(crate) success: bool, + pub(crate) duration: Option, +} + #[derive(Debug)] pub(crate) enum QueuedInterrupt { ExecApproval(String, ExecApprovalRequestEvent), @@ -28,6 +37,7 @@ pub(crate) enum QueuedInterrupt { ExecEnd(ExecCommandEndEvent), McpBegin(McpToolCallBeginEvent), McpEnd(McpToolCallEndEvent), + LspToolCall(CompletedLspToolCall), SubAgentBegin(SubAgentToolCallBeginEvent), SubAgentActivity(SubAgentToolCallActivityEvent), SubAgentTokens(SubAgentToolCallTokensEvent), @@ -94,6 +104,10 @@ impl InterruptManager { self.queue.push_back(QueuedInterrupt::McpEnd(ev)); } + pub(crate) fn push_lsp_tool_call(&mut self, ev: CompletedLspToolCall) { + self.queue.push_back(QueuedInterrupt::LspToolCall(ev)); + } + pub(crate) fn push_subagent_begin(&mut self, ev: SubAgentToolCallBeginEvent) { self.queue.push_back(QueuedInterrupt::SubAgentBegin(ev)); } @@ -132,6 +146,7 @@ impl InterruptManager { QueuedInterrupt::ExecEnd(ev) => chat.handle_exec_end_now(ev), QueuedInterrupt::McpBegin(ev) => chat.handle_mcp_begin_now(ev), QueuedInterrupt::McpEnd(ev) => chat.handle_mcp_end_now(ev), + QueuedInterrupt::LspToolCall(ev) => chat.handle_lsp_tool_call_now(ev), QueuedInterrupt::SubAgentBegin(ev) => chat.handle_subagent_begin_now(ev), QueuedInterrupt::SubAgentActivity(ev) => chat.handle_subagent_activity_now(ev), QueuedInterrupt::SubAgentTokens(ev) => chat.handle_subagent_tokens_now(ev), diff --git a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__mcp_startup_header_booting.snap b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__mcp_startup_header_booting.snap index c6866c1b511..c01f99d11ae 100644 --- a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__mcp_startup_header_booting.snap +++ b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__mcp_startup_header_booting.snap @@ -4,6 +4,7 @@ expression: terminal.backend() --- " " "• Booting MCP server: alpha (0s • esc to interrupt) " +" └ alpha starting " " " " " "› Ask Codex to do anything " diff --git a/codex-rs/tui/src/chatwidget/tests.rs b/codex-rs/tui/src/chatwidget/tests.rs index 2286663fa9a..c1d692e5e43 100644 --- a/codex-rs/tui/src/chatwidget/tests.rs +++ b/codex-rs/tui/src/chatwidget/tests.rs @@ -241,6 +241,136 @@ async fn resumed_session_does_not_auto_execute_plan() { ); } +#[tokio::test] +async fn resumed_interrupted_session_prompts_before_continuing() { + let (mut chat, mut rx, _ops) = make_chatwidget_manual(None).await; + + let conversation_id = ConversationId::new(); + let rollout_file = NamedTempFile::new().unwrap(); + let configured = codex_core::protocol::SessionConfiguredEvent { + session_id: conversation_id, + model: "test-model".to_string(), + model_provider_id: "test-provider".to_string(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::ReadOnly, + cwd: PathBuf::from("/home/user/project"), + reasoning_effort: Some(ReasoningEffortConfig::default()), + history_log_id: 0, + history_entry_count: 0, + initial_messages: Some(vec![ + EventMsg::UserMessage(UserMessageEvent { + message: "hello".to_string(), + images: None, + }), + EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { + delta: "partial".to_string(), + }), + EventMsg::TurnAborted(codex_core::protocol::TurnAbortedEvent { + reason: codex_core::protocol::TurnAbortReason::Interrupted, + }), + ]), + rollout_path: rollout_file.path().to_path_buf(), + }; + + chat.handle_codex_event(Event { + id: "initial".into(), + msg: EventMsg::SessionConfigured(configured), + }); + + assert!( + !chat.is_normal_backtrack_mode(), + "expected resume prompt overlay to be active" + ); + + chat.handle_key_event(KeyEvent::new(KeyCode::Char('2'), KeyModifiers::NONE)); + + let mut continue_text = None; + while let Ok(ev) = rx.try_recv() { + if let AppEvent::QueueUserText(text) = ev { + continue_text = Some(text); + break; + } + } + + assert_eq!( + continue_text.expect("expected QueueUserText from resume prompt"), + "Continue from where you left off. Do not re-run tool calls that already completed; use the outputs above. If you need to rerun a tool, ask first." + ); +} + +#[tokio::test] +async fn resumed_completed_exploring_commands_do_not_prompt() { + let (mut chat, _rx, _ops) = make_chatwidget_manual(None).await; + + let conversation_id = ConversationId::new(); + let rollout_file = NamedTempFile::new().unwrap(); + let configured = codex_core::protocol::SessionConfiguredEvent { + session_id: conversation_id, + model: "test-model".to_string(), + model_provider_id: "test-provider".to_string(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::ReadOnly, + cwd: PathBuf::from("/home/user/project"), + reasoning_effort: Some(ReasoningEffortConfig::default()), + history_log_id: 0, + history_entry_count: 0, + initial_messages: Some(vec![ + EventMsg::UserMessage(UserMessageEvent { + message: "hello".to_string(), + images: None, + }), + EventMsg::ExecCommandBegin(ExecCommandBeginEvent { + call_id: "call-1".to_string(), + process_id: None, + turn_id: "turn-1".to_string(), + command: vec!["cat".to_string(), "README.md".to_string()], + cwd: PathBuf::from("/home/user/project"), + parsed_cmd: vec![ParsedCommand::Read { + cmd: "cat README.md".to_string(), + name: "cat".to_string(), + path: PathBuf::from("/home/user/project/README.md"), + }], + source: ExecCommandSource::Agent, + interaction_input: None, + }), + EventMsg::ExecCommandEnd(ExecCommandEndEvent { + call_id: "call-1".to_string(), + process_id: None, + turn_id: "turn-1".to_string(), + command: vec!["cat".to_string(), "README.md".to_string()], + cwd: PathBuf::from("/home/user/project"), + parsed_cmd: vec![ParsedCommand::Read { + cmd: "cat README.md".to_string(), + name: "cat".to_string(), + path: PathBuf::from("/home/user/project/README.md"), + }], + source: ExecCommandSource::Agent, + interaction_input: None, + stdout: "contents".to_string(), + stderr: String::new(), + aggregated_output: "contents".to_string(), + exit_code: 0, + duration: std::time::Duration::from_millis(5), + formatted_output: "contents".to_string(), + }), + EventMsg::TaskComplete(TaskCompleteEvent { + last_agent_message: Some("done".to_string()), + }), + ]), + rollout_path: rollout_file.path().to_path_buf(), + }; + + chat.handle_codex_event(Event { + id: "initial".into(), + msg: EventMsg::SessionConfigured(configured), + }); + + assert!( + chat.is_normal_backtrack_mode(), + "expected resume replay to avoid showing interruption prompt overlay" + ); +} + /// Entering review mode uses the hint provided by the review request. #[tokio::test] async fn entered_review_mode_uses_request_hint() { @@ -494,6 +624,7 @@ async fn make_chatwidget_manual( bottom_pane: bottom, active_cell: None, config: cfg.clone(), + lsp_manager: codex_lsp::LspManager::new(codex_lsp::LspManagerConfig::default()), model_family: ModelsManager::construct_model_family_offline(&resolved_model, &cfg), auth_manager: auth_manager.clone(), models_manager: Arc::new(ModelsManager::new(auth_manager)), @@ -509,6 +640,8 @@ async fn make_chatwidget_manual( stream_controller: None, running_commands: HashMap::new(), suppressed_exec_calls: HashSet::new(), + pending_lsp_tool_calls: HashMap::new(), + pending_apply_patch_calls: HashSet::new(), last_unified_wait: None, task_complete_pending: false, unified_exec_sessions: Vec::new(), @@ -518,12 +651,14 @@ async fn make_chatwidget_manual( full_reasoning_buffer: String::new(), current_status_header: String::from("Ready"), retry_status_header: None, - plan_variants_progress: None, conversation_id: None, frame_requester: FrameRequester::test_dummy(), show_welcome_banner: true, queued_user_messages: VecDeque::new(), suppress_session_configured_redraw: false, + resume_last_turn_aborted: false, + resume_had_partial_output: false, + resume_had_in_progress_tools: false, pending_notification: None, is_review_mode: false, pre_review_token_info: None, @@ -531,6 +666,7 @@ async fn make_chatwidget_manual( last_rendered_width: std::cell::Cell::new(None), feedback: codex_feedback::CodexFeedback::new(), current_rollout_path: None, + external_editor_state: ExternalEditorState::Closed, }; (widget, rx, op_rx) } @@ -594,6 +730,55 @@ fn make_token_info(total_tokens: i64, context_window: i64) -> TokenUsageInfo { } } +#[tokio::test] +async fn lsp_tool_calls_render_in_history() { + let (mut chat, _app_event_tx, mut app_ev_rx, _op_rx) = + make_chatwidget_manual_with_sender().await; + + chat.dispatch_event_msg( + None, + EventMsg::RawResponseItem(codex_core::protocol::RawResponseItemEvent { + item: codex_protocol::models::ResponseItem::FunctionCall { + id: None, + name: "lsp_definition".to_string(), + arguments: r#"{"file_path":"C:\\repo\\src\\main.rs","line":1,"character":1}"# + .to_string(), + call_id: "call-1".to_string(), + }, + }), + false, + ); + + chat.dispatch_event_msg( + None, + EventMsg::RawResponseItem(codex_core::protocol::RawResponseItemEvent { + item: codex_protocol::models::ResponseItem::FunctionCallOutput { + call_id: "call-1".to_string(), + output: codex_protocol::models::FunctionCallOutputPayload { + content: r#"{"locations":[]}"#.to_string(), + success: Some(true), + ..Default::default() + }, + }, + }), + false, + ); + + let rendered: String = drain_insert_history(&mut app_ev_rx) + .into_iter() + .map(|lines| lines_to_single_string(&lines)) + .collect(); + + assert!( + rendered.contains("lsp_definition"), + "expected lsp tool call to be visible in history, got:\n{rendered}" + ); + assert!( + rendered.contains("\"locations\""), + "expected lsp tool output to be visible in history, got:\n{rendered}" + ); +} + #[tokio::test] async fn rate_limit_warnings_emit_thresholds() { let mut state = RateLimitWarningState::default(); @@ -1578,18 +1763,6 @@ async fn slash_resume_opens_picker() { assert_matches!(rx.try_recv(), Ok(AppEvent::OpenResumePicker)); } -#[tokio::test] -async fn slash_undo_sends_op() { - let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None).await; - - chat.dispatch_command(SlashCommand::Undo); - - match rx.try_recv() { - Ok(AppEvent::CodexOp(Op::Undo)) => {} - other => panic!("expected AppEvent::CodexOp(Op::Undo), got {other:?}"), - } -} - #[tokio::test] async fn slash_rollout_displays_current_path() { let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None).await; @@ -1636,6 +1809,7 @@ async fn subagent_history_cell_keeps_updating_after_other_history_is_inserted() description: "Alpha task".to_string(), label: "alpha".to_string(), prompt: "Prompt".to_string(), + model: None, }, }); @@ -3425,11 +3599,13 @@ async fn stream_error_updates_status_indicator() { let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None).await; chat.bottom_pane.set_task_running(true); let msg = "Reconnecting... 2/5"; + let details = "Idle timeout waiting for SSE"; chat.handle_codex_event(Event { id: "sub-1".into(), msg: EventMsg::StreamError(StreamErrorEvent { message: msg.to_string(), codex_error_info: Some(CodexErrorInfo::Other), + additional_details: Some(details.to_string()), }), }); @@ -3443,6 +3619,7 @@ async fn stream_error_updates_status_indicator() { .status_widget() .expect("status indicator should be visible"); assert_eq!(status.header(), msg); + assert_eq!(status.details(), Some(details)); } #[tokio::test] @@ -3479,6 +3656,7 @@ async fn stream_recovery_restores_previous_status_header() { msg: EventMsg::StreamError(StreamErrorEvent { message: "Reconnecting... 1/5".to_string(), codex_error_info: Some(CodexErrorInfo::Other), + additional_details: None, }), }); drain_insert_history(&mut rx); @@ -3494,6 +3672,7 @@ async fn stream_recovery_restores_previous_status_header() { .status_widget() .expect("status indicator should be visible"); assert_eq!(status.header(), "Working"); + assert_eq!(status.details(), None); assert!(chat.retry_status_header.is_none()); } diff --git a/codex-rs/tui/src/diff_render.rs b/codex-rs/tui/src/diff_render.rs index 24c5be597b7..ff0fb509b8a 100644 --- a/codex-rs/tui/src/diff_render.rs +++ b/codex-rs/tui/src/diff_render.rs @@ -557,23 +557,15 @@ mod tests { #[test] fn ui_snapshot_apply_delete_block() { - // Write a temporary file so the delete renderer can read original content - let tmp_path = PathBuf::from("tmp_delete_example.txt"); - std::fs::write(&tmp_path, "first\nsecond\nthird\n").expect("write tmp file"); - let mut changes: HashMap = HashMap::new(); changes.insert( - tmp_path.clone(), + PathBuf::from("tmp_delete_example.txt"), FileChange::Delete { content: "first\nsecond\nthird\n".to_string(), }, ); let lines = diff_summary_for_tests(&changes); - - // Cleanup best-effort; rendering has already read the file - let _ = std::fs::remove_file(&tmp_path); - snapshot_lines("apply_delete_block", lines, 80, 12); } diff --git a/codex-rs/tui/src/external_editor.rs b/codex-rs/tui/src/external_editor.rs new file mode 100644 index 00000000000..2818503d03b --- /dev/null +++ b/codex-rs/tui/src/external_editor.rs @@ -0,0 +1,171 @@ +use std::env; +use std::fs; +use std::process::Stdio; + +use color_eyre::eyre::Report; +use color_eyre::eyre::Result; +use tempfile::Builder; +use thiserror::Error; +use tokio::process::Command; + +#[derive(Debug, Error)] +pub(crate) enum EditorError { + #[error("neither VISUAL nor EDITOR is set")] + MissingEditor, + #[cfg(not(windows))] + #[error("failed to parse editor command")] + ParseFailed, + #[error("editor command is empty")] + EmptyCommand, +} + +/// Tries to resolve the full path to a Windows program, respecting PATH + PATHEXT. +/// Falls back to the original program name if resolution fails. +#[cfg(windows)] +fn resolve_windows_program(program: &str) -> std::path::PathBuf { + // On Windows, `Command::new("code")` will not resolve `code.cmd` shims on PATH. + // Use `which` so we respect PATH + PATHEXT (e.g., `code` -> `code.cmd`). + which::which(program).unwrap_or_else(|_| std::path::PathBuf::from(program)) +} + +/// Resolve the editor command from environment variables. +/// Prefers `VISUAL` over `EDITOR`. +pub(crate) fn resolve_editor_command() -> std::result::Result, EditorError> { + let raw = env::var("VISUAL") + .or_else(|_| env::var("EDITOR")) + .map_err(|_| EditorError::MissingEditor)?; + let parts = { + #[cfg(windows)] + { + winsplit::split(&raw) + } + #[cfg(not(windows))] + { + shlex::split(&raw).ok_or(EditorError::ParseFailed)? + } + }; + if parts.is_empty() { + return Err(EditorError::EmptyCommand); + } + Ok(parts) +} + +/// Write `seed` to a temp file, launch the editor command, and return the updated content. +pub(crate) async fn run_editor(seed: &str, editor_cmd: &[String]) -> Result { + if editor_cmd.is_empty() { + return Err(Report::msg("editor command is empty")); + } + + // Convert to TempPath immediately so no file handle stays open on Windows. + let temp_path = Builder::new().suffix(".md").tempfile()?.into_temp_path(); + fs::write(&temp_path, seed)?; + + let mut cmd = { + #[cfg(windows)] + { + // handles .cmd/.bat shims + Command::new(resolve_windows_program(&editor_cmd[0])) + } + #[cfg(not(windows))] + { + Command::new(&editor_cmd[0]) + } + }; + if editor_cmd.len() > 1 { + cmd.args(&editor_cmd[1..]); + } + let status = cmd + .arg(&temp_path) + .stdin(Stdio::inherit()) + .stdout(Stdio::inherit()) + .stderr(Stdio::inherit()) + .status() + .await?; + + if !status.success() { + return Err(Report::msg(format!("editor exited with status {status}"))); + } + + let contents = fs::read_to_string(&temp_path)?; + Ok(contents) +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + use serial_test::serial; + #[cfg(unix)] + use tempfile::tempdir; + + struct EnvGuard { + visual: Option, + editor: Option, + } + + impl EnvGuard { + fn new() -> Self { + Self { + visual: env::var("VISUAL").ok(), + editor: env::var("EDITOR").ok(), + } + } + } + + impl Drop for EnvGuard { + fn drop(&mut self) { + restore_env("VISUAL", self.visual.take()); + restore_env("EDITOR", self.editor.take()); + } + } + + fn restore_env(key: &str, value: Option) { + match value { + Some(val) => unsafe { env::set_var(key, val) }, + None => unsafe { env::remove_var(key) }, + } + } + + #[test] + #[serial] + fn resolve_editor_prefers_visual() { + let _guard = EnvGuard::new(); + unsafe { + env::set_var("VISUAL", "vis"); + env::set_var("EDITOR", "ed"); + } + let cmd = resolve_editor_command().unwrap(); + assert_eq!(cmd, vec!["vis".to_string()]); + } + + #[test] + #[serial] + fn resolve_editor_errors_when_unset() { + let _guard = EnvGuard::new(); + unsafe { + env::remove_var("VISUAL"); + env::remove_var("EDITOR"); + } + assert!(matches!( + resolve_editor_command(), + Err(EditorError::MissingEditor) + )); + } + + #[tokio::test] + #[cfg(unix)] + async fn run_editor_returns_updated_content() { + use std::os::unix::fs::PermissionsExt; + + let dir = tempdir().unwrap(); + let script_path = dir.path().join("edit.sh"); + fs::write(&script_path, "#!/bin/sh\nprintf \"edited\" > \"$1\"\n").unwrap(); + let mut perms = fs::metadata(&script_path).unwrap().permissions(); + perms.set_mode(0o755); + fs::set_permissions(&script_path, perms).unwrap(); + + let cmd = vec![script_path.to_string_lossy().to_string()]; + let result = run_editor("seed", &cmd).await.unwrap(); + assert_eq!(result, "edited".to_string()); + } +} diff --git a/codex-rs/tui/src/history_cell.rs b/codex-rs/tui/src/history_cell.rs index 7f07e8aff43..3fb489c88ca 100644 --- a/codex-rs/tui/src/history_cell.rs +++ b/codex-rs/tui/src/history_cell.rs @@ -33,8 +33,8 @@ use codex_core::protocol::McpAuthStatus; use codex_core::protocol::McpInvocation; use codex_core::protocol::SessionConfiguredEvent; use codex_core::protocol::SubAgentInvocation; +use codex_core::protocol::SubAgentToolCallOutcome; use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig; -use codex_protocol::openai_models::ReasoningSummaryFormat; use codex_protocol::plan_tool::PlanItemArg; use codex_protocol::plan_tool::StepStatus; use codex_protocol::plan_tool::UpdatePlanArgs; @@ -1097,6 +1097,103 @@ pub(crate) struct McpToolCallCell { animations_enabled: bool, } +#[derive(Debug)] +pub(crate) struct FunctionToolCallCell { + tool_name: String, + arguments: String, + output: String, + success: bool, + duration: Option, +} + +impl FunctionToolCallCell { + pub(crate) fn new( + tool_name: String, + arguments: String, + output: String, + success: bool, + duration: Option, + ) -> Self { + Self { + tool_name, + arguments, + output, + success, + duration, + } + } + + fn invocation_text(&self) -> String { + if self.arguments.trim().is_empty() { + return self.tool_name.clone(); + } + format!("{} {}", self.tool_name, self.arguments) + } +} + +impl HistoryCell for FunctionToolCallCell { + fn display_lines(&self, width: u16) -> Vec> { + let mut lines: Vec> = Vec::new(); + + let bullet = if self.success { + "•".green().bold() + } else { + "•".red().bold() + }; + let header_text = "Called".bold(); + + let mut header_spans = vec![bullet, " ".into(), header_text, " ".into()]; + if let Some(duration) = self.duration { + header_spans.push(format!("({})", fmt_subagent_duration(duration)).dim()); + header_spans.push(" ".into()); + } + + let invocation_line: Line<'static> = Line::from(self.invocation_text()); + let mut compact_header = Line::from(header_spans.clone()); + let reserved = compact_header.width(); + let inline_invocation = + invocation_line.width() <= (width as usize).saturating_sub(reserved); + + if inline_invocation { + compact_header.extend(invocation_line.spans.clone()); + lines.push(compact_header); + } else { + header_spans.pop(); // drop trailing space for standalone header + lines.push(Line::from(header_spans)); + + let opts = RtOptions::new((width as usize).saturating_sub(4)) + .initial_indent("".into()) + .subsequent_indent(" ".into()); + let wrapped = word_wrap_line(&invocation_line, opts); + let body_lines: Vec> = wrapped.iter().map(line_to_static).collect(); + lines.extend(prefix_lines(body_lines, " └ ".dim(), " ".into())); + } + + if !self.output.trim().is_empty() { + let detail_wrap_width = (width as usize).saturating_sub(4).max(1); + let text = format_and_truncate_tool_result( + &self.output, + TOOL_CALL_MAX_LINES, + detail_wrap_width, + ); + let mut detail_lines: Vec> = Vec::new(); + for segment in text.split('\n') { + let line = Line::from(segment.to_string().dim()); + let wrapped = word_wrap_line( + &line, + RtOptions::new(detail_wrap_width) + .initial_indent("".into()) + .subsequent_indent(" ".into()), + ); + detail_lines.extend(wrapped.iter().map(line_to_static)); + } + lines.extend(prefix_lines(detail_lines, " └ ".dim(), " ".into())); + } + + lines + } +} + impl McpToolCallCell { pub(crate) fn new( call_id: String, @@ -1143,6 +1240,10 @@ impl McpToolCallCell { self.result = Some(Err("interrupted".to_string())); } + pub(crate) fn is_active(&self) -> bool { + self.result.is_none() + } + fn render_content_block(block: &mcp_types::ContentBlock, width: usize) -> String { match block { mcp_types::ContentBlock::TextContent(text) => { @@ -1264,6 +1365,7 @@ pub(crate) struct SubAgentToolCallCell { duration: Option, activity: Option, tokens: Option, + outcome: Option, result: Option>, } @@ -1276,6 +1378,7 @@ impl SubAgentToolCallCell { duration: None, activity: None, tokens: None, + outcome: None, result: None, } } @@ -1288,10 +1391,12 @@ impl SubAgentToolCallCell { &mut self, duration: Duration, tokens: Option, + outcome: Option, result: Result, ) { self.duration = Some(duration); self.tokens = tokens; + self.outcome = outcome; self.result = Some(result); } @@ -1320,6 +1425,7 @@ impl SubAgentToolCallCell { let elapsed = self.start_time.elapsed(); self.duration = Some(elapsed); self.tokens = None; + self.outcome = Some(SubAgentToolCallOutcome::Cancelled); self.result = Some(Err("interrupted".to_string())); } } @@ -1328,6 +1434,7 @@ impl HistoryCell for SubAgentToolCallCell { fn display_lines(&self, width: u16) -> Vec> { let status = self.success(); let indicator = match status { + _ if self.outcome == Some(SubAgentToolCallOutcome::Cancelled) => "⏹".dim(), Some(true) => "✓".green(), Some(false) => "✗".red(), None => "●".cyan(), @@ -1337,13 +1444,21 @@ impl HistoryCell for SubAgentToolCallCell { let elapsed = self.duration.unwrap_or_else(|| self.start_time.elapsed()); let elapsed = fmt_subagent_duration(elapsed); - let mut header_spans: Vec> = vec![ - indicator, - " ".into(), - "Subagent:".bold(), - " ".into(), - summary.into(), - ]; + let is_mini = self.invocation.model.as_deref() == Some("gpt-5.1-codex-mini"); + let is_explorer = self.invocation.label.starts_with("plan_explore_"); + + let mut header_spans: Vec> = vec![indicator, " ".into(), "Subagent".bold()]; + if is_explorer && !is_mini { + header_spans.push(" ".into()); + header_spans.push("[Explorer]".cyan().bold()); + } + if is_mini { + header_spans.push(" ".into()); + header_spans.push("[Mini]".magenta().bold()); + } + header_spans.push(":".bold()); + header_spans.push(" ".into()); + header_spans.push(summary.into()); let mut meta = format!(" ({elapsed}"); if let Some(tokens) = self.tokens.and_then(fmt_subagent_tokens) { meta.push_str(&format!(", {tokens} tok")); @@ -1357,6 +1472,7 @@ impl HistoryCell for SubAgentToolCallCell { let activity = self.activity.as_deref().unwrap_or("working…"); let activity = activity.strip_prefix("shell ").unwrap_or(activity).trim(); let detail_span = match &self.result { + _ if self.outcome == Some(SubAgentToolCallOutcome::Cancelled) => "cancelled".dim(), Some(Ok(_)) => "done".dim(), Some(Err(_)) => "failed".red(), None => activity.to_string().dim(), @@ -1444,6 +1560,7 @@ impl SubAgentToolCallGroupCell { call_id: &str, duration: Duration, tokens: Option, + outcome: Option, result: Result, ) -> bool { match self @@ -1453,7 +1570,7 @@ impl SubAgentToolCallGroupCell { .find(|cell| cell.call_id() == call_id) { Some(cell) => { - cell.complete(duration, tokens, result); + cell.complete(duration, tokens, outcome, result); true } None => false, @@ -1774,6 +1891,202 @@ pub(crate) fn new_info_event(message: String, hint: Option) -> PlainHist PlainHistoryCell { lines } } +#[derive(Debug, PartialEq, Eq)] +struct LspDiagnosticsEntry { + path: PathBuf, + line: u32, + character: u32, + message: String, +} + +#[derive(Debug)] +struct LspDiagnosticsSummary { + errors: usize, + warnings: usize, + entries: Vec, +} + +pub(crate) fn new_lsp_diagnostics_event(text: String, cwd: PathBuf) -> Option { + let summary = parse_lsp_diagnostics_summary(&text)?; + if summary.errors == 0 && summary.warnings == 0 { + return None; + } + if summary.entries.is_empty() { + return None; + } + + let mut lines: Vec> = Vec::new(); + + let icon = if summary.errors > 0 { + "!".red().bold() + } else { + "!".magenta().bold() + }; + lines.push(vec![icon, " ".into(), "LSP diagnostics".bold()].into()); + + let errors = if summary.errors > 0 { + summary.errors.to_string().red().bold() + } else { + summary.errors.to_string().dim() + }; + let warnings = if summary.warnings > 0 { + summary.warnings.to_string().magenta().bold() + } else { + summary.warnings.to_string().dim() + }; + lines.push( + vec![ + " ".into(), + "Errors: ".dim(), + errors, + ", ".dim(), + "Warnings: ".dim(), + warnings, + ] + .into(), + ); + lines.push(Line::from("")); + + for entry in summary.entries { + let display_path = display_path_for(&entry.path, &cwd); + let loc = format!("{display_path}:{}:{}", entry.line, entry.character); + lines.push(vec![" - ".dim(), loc.dim(), " ".into(), entry.message.into()].into()); + } + + Some(PlainHistoryCell { lines }) +} + +fn parse_lsp_diagnostics_summary(text: &str) -> Option { + let mut lines = text.lines(); + let header = lines.next()?.trim(); + if header != "## LSP diagnostics" && header != "LSP diagnostics" { + return None; + } + let counts = lines.next()?.trim(); + let errors = parse_lsp_count(counts, "Errors")?; + let warnings = parse_lsp_count(counts, "Warnings")?; + + let mut entries = Vec::new(); + for line in lines { + let line = line.trim(); + let Some(stripped) = line.strip_prefix("- ") else { + continue; + }; + if let Some(entry) = parse_lsp_diagnostic_entry(stripped) { + entries.push(entry); + } + } + + Some(LspDiagnosticsSummary { + errors, + warnings, + entries, + }) +} + +fn parse_lsp_count(line: &str, key: &str) -> Option { + let needle = format!("{key}:"); + let start = line.find(&needle)? + needle.len(); + let rest = line.get(start..)?.trim_start(); + let digits: String = rest.chars().take_while(char::is_ascii_digit).collect(); + digits.parse().ok() +} + +fn parse_lsp_diagnostic_entry(text: &str) -> Option { + let text = text.trim(); + // Format: ":: " + // + // `` can contain ':' on Windows (drive letters), and `` can also contain ':'. + // Parse from the right by looking for ": " (character) and a preceding ":" (line). + let colon_positions: Vec = text.match_indices(':').map(|(idx, _)| idx).collect(); + if colon_positions.len() < 2 { + return None; + } + + for idx in (1..colon_positions.len()).rev() { + let colon_character = colon_positions[idx]; + let after_character = text.get(colon_character + 1..)?; + + let digits_len = after_character + .chars() + .take_while(char::is_ascii_digit) + .map(char::len_utf8) + .sum::(); + if digits_len == 0 { + continue; + } + + let after_digits = after_character.get(digits_len..)?; + if !after_digits.starts_with(' ') { + continue; + } + + let character = after_character.get(..digits_len)?.parse::().ok()?; + let message = after_digits.trim_start().to_string(); + + let colon_line = colon_positions[idx - 1]; + let line_str = text.get(colon_line + 1..colon_character)?; + if line_str.is_empty() || !line_str.chars().all(|c| c.is_ascii_digit()) { + continue; + } + let line = line_str.parse::().ok()?; + + let path = PathBuf::from(text.get(..colon_line)?); + return Some(LspDiagnosticsEntry { + path, + line, + character, + message, + }); + } + + None +} + +#[cfg(test)] +mod lsp_diagnostics_tests { + use super::*; + use pretty_assertions::assert_eq; + + #[test] + fn parse_lsp_diagnostic_entry_handles_colons_in_path_and_message() { + let entry = parse_lsp_diagnostic_entry( + r#"F:\GitHub\codex\codex-rs\mcp-server\tests\common\mcp_process.rs:43:14 Syntax Error: expected expression"#, + ) + .expect("should parse entry"); + assert_eq!( + entry, + LspDiagnosticsEntry { + path: PathBuf::from( + r#"F:\GitHub\codex\codex-rs\mcp-server\tests\common\mcp_process.rs"#, + ), + line: 43, + character: 14, + message: "Syntax Error: expected expression".to_string(), + } + ); + } + + #[test] + fn parse_lsp_diagnostic_entry_handles_messages_without_colons() { + let entry = parse_lsp_diagnostic_entry( + r#"F:\GitHub\codex\codex-rs\mcp-server\tests\common\mcp_process.rs:75:17 Variable `None` should have snake_case name, e.g. `none`"#, + ) + .expect("should parse entry"); + assert_eq!( + entry, + LspDiagnosticsEntry { + path: PathBuf::from( + r#"F:\GitHub\codex\codex-rs\mcp-server\tests\common\mcp_process.rs"#, + ), + line: 75, + character: 17, + message: "Variable `None` should have snake_case name, e.g. `none`".to_string(), + } + ); + } +} + pub(crate) fn new_error_event(message: String) -> PlainHistoryCell { // Use a hair space (U+200A) to create a subtle, near-invisible separation // before the text. VS16 is intentionally omitted to keep spacing tighter @@ -1898,39 +2211,28 @@ pub(crate) fn new_view_image_tool_call(path: PathBuf, cwd: &Path) -> PlainHistor PlainHistoryCell { lines } } -pub(crate) fn new_reasoning_summary_block( - full_reasoning_buffer: String, - reasoning_summary_format: ReasoningSummaryFormat, -) -> Box { - if reasoning_summary_format == ReasoningSummaryFormat::Experimental { - // Experimental format is following: - // ** header ** - // - // reasoning summary - // - // So we need to strip header from reasoning summary - let full_reasoning_buffer = full_reasoning_buffer.trim(); - if let Some(open) = full_reasoning_buffer.find("**") { - let after_open = &full_reasoning_buffer[(open + 2)..]; - if let Some(close) = after_open.find("**") { - let after_close_idx = open + 2 + close + 2; - // if we don't have anything beyond `after_close_idx` - // then we don't have a summary to inject into history - if after_close_idx < full_reasoning_buffer.len() { - let header_buffer = full_reasoning_buffer[..after_close_idx].to_string(); - let summary_buffer = full_reasoning_buffer[after_close_idx..].to_string(); - return Box::new(ReasoningSummaryCell::new( - header_buffer, - summary_buffer, - false, - )); - } +pub(crate) fn new_reasoning_summary_block(full_reasoning_buffer: String) -> Box { + let full_reasoning_buffer = full_reasoning_buffer.trim(); + if let Some(open) = full_reasoning_buffer.find("**") { + let after_open = &full_reasoning_buffer[(open + 2)..]; + if let Some(close) = after_open.find("**") { + let after_close_idx = open + 2 + close + 2; + // if we don't have anything beyond `after_close_idx` + // then we don't have a summary to inject into history + if after_close_idx < full_reasoning_buffer.len() { + let header_buffer = full_reasoning_buffer[..after_close_idx].to_string(); + let summary_buffer = full_reasoning_buffer[after_close_idx..].to_string(); + return Box::new(ReasoningSummaryCell::new( + header_buffer, + summary_buffer, + false, + )); } } } Box::new(ReasoningSummaryCell::new( "".to_string(), - full_reasoning_buffer, + full_reasoning_buffer.to_string(), true, )) } @@ -2048,13 +2350,13 @@ mod tests { use codex_core::config::ConfigBuilder; use codex_core::config::types::McpServerConfig; use codex_core::config::types::McpServerTransportConfig; - use codex_core::models_manager::manager::ModelsManager; use codex_core::protocol::McpAuthStatus; use codex_protocol::parse_command::ParsedCommand; use dirs::home_dir; use pretty_assertions::assert_eq; use serde_json::json; use std::collections::HashMap; + use std::time::Duration; use codex_core::protocol::ExecCommandSource; use mcp_types::CallToolResult; @@ -2087,16 +2389,84 @@ mod tests { render_lines(&cell.transcript_lines(u16::MAX)) } + #[test] + fn function_tool_call_cell_renders_invocation_and_output() { + let cell = FunctionToolCallCell::new( + "lsp_definition".to_string(), + r#"{"file_path":"C:\\repo\\src\\main.rs","line":1,"character":1}"#.to_string(), + r#"{"locations":[]}"#.to_string(), + true, + Some(Duration::from_millis(120)), + ); + let rendered = render_transcript(&cell).join("\n"); + assert!(rendered.contains("Called")); + assert!(rendered.contains("lsp_definition")); + assert!(rendered.contains("\"locations\"")); + } + #[test] fn subagent_summary_prefers_description() { let invocation = SubAgentInvocation { description: "Summarize the auth flow".to_string(), label: "alpha".to_string(), prompt: "Prompt".to_string(), + model: None, }; assert_eq!(subagent_summary(&invocation), "Summarize the auth flow"); } + #[test] + fn subagent_cell_renders_mini_badge() { + let invocation = SubAgentInvocation { + description: "Trace control flow".to_string(), + label: "mini".to_string(), + prompt: "Prompt".to_string(), + model: Some("gpt-5.1-codex-mini".to_string()), + }; + let mut cell = SubAgentToolCallCell::new("call-1".to_string(), invocation); + cell.duration = Some(Duration::from_secs(0)); + let lines = render_lines(&cell.display_lines(200)); + assert!( + lines + .first() + .is_some_and(|line| line.contains("Subagent [Mini]:")) + ); + } + + #[test] + fn subagent_cell_renders_explorer_badge() { + let invocation = SubAgentInvocation { + description: "Repo map".to_string(), + label: "plan_explore_1".to_string(), + prompt: "Prompt".to_string(), + model: None, + }; + let mut cell = SubAgentToolCallCell::new("call-1".to_string(), invocation); + cell.duration = Some(Duration::from_secs(0)); + let lines = render_lines(&cell.display_lines(200)); + assert!( + lines + .first() + .is_some_and(|line| line.contains("Subagent [Explorer]:")) + ); + } + + #[test] + fn subagent_cell_renders_mini_badge_when_explorer_and_mini() { + let invocation = SubAgentInvocation { + description: "Repo map".to_string(), + label: "plan_explore_1".to_string(), + prompt: "Prompt".to_string(), + model: Some("gpt-5.1-codex-mini".to_string()), + }; + let mut cell = SubAgentToolCallCell::new("call-1".to_string(), invocation); + cell.duration = Some(Duration::from_secs(0)); + let lines = render_lines(&cell.display_lines(200)); + let first = lines.first(); + assert!(first.is_some_and(|line| line.contains("Subagent [Mini]:"))); + assert!(first.is_some_and(|line| !line.contains("[Explorer]"))); + } + #[test] fn unified_exec_interaction_cell_renders_input() { let cell = @@ -2971,10 +3341,8 @@ mod tests { } #[test] fn reasoning_summary_block() { - let reasoning_format = ReasoningSummaryFormat::Experimental; let cell = new_reasoning_summary_block( "**High level reasoning**\n\nDetailed reasoning goes here.".to_string(), - reasoning_format, ); let rendered_display = render_lines(&cell.display_lines(80)); @@ -2986,11 +3354,7 @@ mod tests { #[test] fn reasoning_summary_block_returns_reasoning_cell_when_feature_disabled() { - let reasoning_format = ReasoningSummaryFormat::Experimental; - let cell = new_reasoning_summary_block( - "Detailed reasoning goes here.".to_string(), - reasoning_format, - ); + let cell = new_reasoning_summary_block("Detailed reasoning goes here.".to_string()); let rendered = render_transcript(cell.as_ref()); assert_eq!(rendered, vec!["• Detailed reasoning goes here."]); @@ -3001,17 +3365,8 @@ mod tests { let mut config = test_config().await; config.model = Some("gpt-3.5-turbo".to_string()); config.model_supports_reasoning_summaries = Some(true); - config.model_reasoning_summary_format = Some(ReasoningSummaryFormat::Experimental); - let model_family = - ModelsManager::construct_model_family_offline(&config.model.clone().unwrap(), &config); - assert_eq!( - model_family.reasoning_summary_format, - ReasoningSummaryFormat::Experimental - ); - let cell = new_reasoning_summary_block( "**High level reasoning**\n\nDetailed reasoning goes here.".to_string(), - model_family.reasoning_summary_format, ); let rendered_display = render_lines(&cell.display_lines(80)); @@ -3020,11 +3375,8 @@ mod tests { #[test] fn reasoning_summary_block_falls_back_when_header_is_missing() { - let reasoning_format = ReasoningSummaryFormat::Experimental; - let cell = new_reasoning_summary_block( - "**High level reasoning without closing".to_string(), - reasoning_format, - ); + let cell = + new_reasoning_summary_block("**High level reasoning without closing".to_string()); let rendered = render_transcript(cell.as_ref()); assert_eq!(rendered, vec!["• **High level reasoning without closing"]); @@ -3032,18 +3384,14 @@ mod tests { #[test] fn reasoning_summary_block_falls_back_when_summary_is_missing() { - let reasoning_format = ReasoningSummaryFormat::Experimental; - let cell = new_reasoning_summary_block( - "**High level reasoning without closing**".to_string(), - reasoning_format.clone(), - ); + let cell = + new_reasoning_summary_block("**High level reasoning without closing**".to_string()); let rendered = render_transcript(cell.as_ref()); assert_eq!(rendered, vec!["• High level reasoning without closing"]); let cell = new_reasoning_summary_block( "**High level reasoning without closing**\n\n ".to_string(), - reasoning_format, ); let rendered = render_transcript(cell.as_ref()); @@ -3052,10 +3400,8 @@ mod tests { #[test] fn reasoning_summary_block_splits_header_and_summary_when_present() { - let reasoning_format = ReasoningSummaryFormat::Experimental; let cell = new_reasoning_summary_block( "**High level plan**\n\nWe should fix the bug next.".to_string(), - reasoning_format, ); let rendered_display = render_lines(&cell.display_lines(80)); diff --git a/codex-rs/tui/src/key_hint.rs b/codex-rs/tui/src/key_hint.rs index f277f073845..7147a03851a 100644 --- a/codex-rs/tui/src/key_hint.rs +++ b/codex-rs/tui/src/key_hint.rs @@ -75,8 +75,17 @@ impl From for Span<'static> { impl From<&KeyBinding> for Span<'static> { fn from(binding: &KeyBinding) -> Self { let KeyBinding { key, modifiers } = binding; - let modifiers = modifiers_to_string(*modifiers); - let key = match key { + // `KeyCode::BackTab` is effectively "shift + tab", but crossterm formats it as "back tab", + // which is confusing in our UI hints. + let mut display_key = *key; + let mut display_modifiers = *modifiers; + if display_key == KeyCode::BackTab { + display_key = KeyCode::Tab; + display_modifiers |= KeyModifiers::SHIFT; + } + + let modifiers = modifiers_to_string(display_modifiers); + let key = match display_key { KeyCode::Enter => "enter".to_string(), KeyCode::Char(' ') => "space".to_string(), KeyCode::Up => "↑".to_string(), @@ -85,7 +94,7 @@ impl From<&KeyBinding> for Span<'static> { KeyCode::Right => "→".to_string(), KeyCode::PageUp => "pgup".to_string(), KeyCode::PageDown => "pgdn".to_string(), - _ => format!("{key}").to_ascii_lowercase(), + _ => format!("{display_key}").to_ascii_lowercase(), }; Span::styled(format!("{modifiers}{key}"), key_hint_style()) } diff --git a/codex-rs/tui/src/lib.rs b/codex-rs/tui/src/lib.rs index f1d16f1f72b..9ea56e88a1b 100644 --- a/codex-rs/tui/src/lib.rs +++ b/codex-rs/tui/src/lib.rs @@ -47,6 +47,7 @@ pub mod custom_terminal; mod diff_render; mod exec_cell; mod exec_command; +mod external_editor; mod file_search; mod frames; mod get_git_diff; @@ -54,6 +55,7 @@ mod history_cell; pub mod insert_history; mod key_hint; pub mod live_wrap; +mod lsp_status; mod markdown; mod markdown_render; mod markdown_stream; @@ -272,7 +274,10 @@ pub async fn run_main( let file_layer = tracing_subscriber::fmt::layer() .with_writer(non_blocking) - .with_target(false) + // `with_target(true)` is the default, but we previously disabled it for file output. + // Keep it enabled so we can selectively enable targets via `RUST_LOG=...` and then + // grep for a specific module/target while troubleshooting. + .with_target(true) .with_ansi(false) .with_span_events(tracing_subscriber::fmt::format::FmtSpan::FULL) .with_filter(env_filter()); diff --git a/codex-rs/tui/src/lsp_status.rs b/codex-rs/tui/src/lsp_status.rs new file mode 100644 index 00000000000..43f0df0607b --- /dev/null +++ b/codex-rs/tui/src/lsp_status.rs @@ -0,0 +1,127 @@ +use codex_lsp::LspServerSource; +use codex_lsp::LspStatus; +use ratatui::style::Style; +use ratatui::style::Stylize as _; +use ratatui::text::Line; +use ratatui::text::Span; + +pub(crate) fn render(status: &LspStatus) -> Vec> { + let mut lines: Vec> = Vec::new(); + + lines.push(Line::from("LSP status".bold())); + + let enabled = status.enabled.to_string(); + lines.push(vec!["enabled: ".dim(), enabled.into()].into()); + + lines.push(vec!["root: ".dim(), status.root.display().to_string().into()].into()); + + let workspace = if status.workspace_initialized { + "initialized".to_string() + } else { + "not initialized".to_string() + }; + lines.push(vec!["workspace: ".dim(), workspace.into()].into()); + + if !status.ignored_globs.is_empty() { + lines.push(vec!["ignored: ".dim(), status.ignored_globs.join(", ").into()].into()); + } + + lines.push( + vec![ + "max_file_bytes: ".dim(), + status.max_file_bytes.to_string().into(), + ] + .into(), + ); + + lines.push(Line::from("")); + lines.push(Line::from("Servers".bold())); + + for lang in &status.languages { + let supports_pull_diagnostics = lang.supports_pull_diagnostics == Some(true); + let header = vec![ + "• ".into(), + Span::from(lang.language_id.clone()).bold(), + " ".into(), + "running=".dim(), + if lang.running { + "yes".green() + } else { + "no".dim() + }, + if supports_pull_diagnostics { + ", caps=pull-diags".dim() + } else { + Span::default() + }, + ]; + lines.push(header.into()); + + let effective = lang.effective.as_ref().map(|(cfg, source)| { + let source = match source { + LspServerSource::Config => "config", + LspServerSource::Autodetect => "autodetect", + }; + format!("{} ({source})", cfg.command) + }); + lines.extend(render_kv( + "effective", + effective.as_deref().unwrap_or("(none)"), + 2, + )); + if let Some((cfg, _)) = &lang.effective + && !cfg.args.is_empty() + { + lines.extend(render_kv("args", &cfg.args.join(" "), 2)); + } + + if let Some(cfg) = &lang.configured { + lines.extend(render_kv("configured", &cfg.command, 2)); + if !cfg.args.is_empty() { + lines.extend(render_kv("args", &cfg.args.join(" "), 2)); + } + } else { + lines.extend(render_kv("configured", "(none)", 2)); + } + + match (&lang.autodetected, &lang.effective) { + (Some(det), Some((eff, LspServerSource::Autodetect))) + if det.command == eff.command && det.args == eff.args => + { + // Skip the redundant "detected" entry when it matches the effective autodetected + // server config. + } + (Some(det), _) => { + lines.extend(render_kv("detected", &det.command, 2)); + if !det.args.is_empty() { + lines.extend(render_kv("args", &det.args.join(" "), 2)); + } + } + (None, _) => { + lines.extend(render_kv("detected", "(not found on PATH)", 2)); + } + } + + lines.push(Line::from("")); + } + + while matches!(lines.last(), Some(line) if line.spans.is_empty()) { + lines.pop(); + } + + lines +} + +fn render_kv(key: &str, value: &str, indent: usize) -> Vec> { + let prefix = " ".repeat(indent); + let key = format!("{prefix}{key}: "); + + let key_span: Span<'static> = Span::styled(key, Style::new().dim()); + let value_span: Span<'static> = if value.starts_with('(') { + Span::styled(value.to_string(), Style::new().dim()) + } else { + Span::from(value.to_string()) + }; + + vec![vec![key_span, value_span].into()] +} diff --git a/codex-rs/tui/src/slash_command.rs b/codex-rs/tui/src/slash_command.rs index be6c5e9b9d8..bf8178ee95a 100644 --- a/codex-rs/tui/src/slash_command.rs +++ b/codex-rs/tui/src/slash_command.rs @@ -14,6 +14,7 @@ pub enum SlashCommand { // more frequently used commands should be listed first. Model, PlanModel, + SubagentModel, Approvals, Experimental, Skills, @@ -23,10 +24,11 @@ pub enum SlashCommand { Resume, Init, Compact, - Undo, + // Undo, Diff, Mention, Status, + Lsp, Mcp, Logout, Quit, @@ -48,15 +50,19 @@ impl SlashCommand { SlashCommand::Review => "review my current changes and find issues", SlashCommand::Plan => "plan a task before making changes", SlashCommand::Resume => "resume a saved chat", - SlashCommand::Undo => "ask Codex to undo a turn", + // SlashCommand::Undo => "ask Codex to undo a turn", SlashCommand::Quit | SlashCommand::Exit => "exit Codex", SlashCommand::Diff => "show git diff (including untracked files)", SlashCommand::Mention => "mention a file", SlashCommand::Skills => "use skills to improve how Codex performs specific tasks", SlashCommand::Status => "show current session configuration and token usage", + SlashCommand::Lsp => "show current LSP status (detected/configured/running)", SlashCommand::Ps => "list background terminals", SlashCommand::Model => "choose what model and reasoning effort to use", SlashCommand::PlanModel => "choose what model and reasoning effort to use for /plan", + SlashCommand::SubagentModel => { + "choose what model and reasoning effort to use for spawned subagents" + } SlashCommand::Approvals => "choose what Codex can do without approval", SlashCommand::Experimental => "toggle beta features", SlashCommand::Mcp => "list configured MCP tools", @@ -79,9 +85,10 @@ impl SlashCommand { | SlashCommand::Resume | SlashCommand::Init | SlashCommand::Compact - | SlashCommand::Undo + // | SlashCommand::Undo | SlashCommand::Model | SlashCommand::PlanModel + | SlashCommand::SubagentModel | SlashCommand::Approvals | SlashCommand::Experimental | SlashCommand::Review @@ -91,6 +98,7 @@ impl SlashCommand { | SlashCommand::Mention | SlashCommand::Skills | SlashCommand::Status + | SlashCommand::Lsp | SlashCommand::Ps | SlashCommand::Mcp | SlashCommand::Feedback diff --git a/codex-rs/tui/src/snapshots/codex_tui__status_indicator_widget__tests__renders_wrapped_details_panama_two_lines.snap b/codex-rs/tui/src/snapshots/codex_tui__status_indicator_widget__tests__renders_wrapped_details_panama_two_lines.snap new file mode 100644 index 00000000000..565d5451fff --- /dev/null +++ b/codex-rs/tui/src/snapshots/codex_tui__status_indicator_widget__tests__renders_wrapped_details_panama_two_lines.snap @@ -0,0 +1,7 @@ +--- +source: tui/src/status_indicator_widget.rs +expression: terminal.backend() +--- +"• Working (0s) " +" └ A man a plan a canal " +" panama " diff --git a/codex-rs/tui/src/status/rate_limits.rs b/codex-rs/tui/src/status/rate_limits.rs index e8dc689a628..3fae3ac295c 100644 --- a/codex-rs/tui/src/status/rate_limits.rs +++ b/codex-rs/tui/src/status/rate_limits.rs @@ -1,4 +1,5 @@ use crate::chatwidget::get_limits_duration; +use crate::text_formatting::capitalize_first; use super::helpers::format_reset_timestamp; use chrono::DateTime; @@ -221,15 +222,3 @@ fn format_credit_balance(raw: &str) -> Option { None } - -fn capitalize_first(label: &str) -> String { - let mut chars = label.chars(); - match chars.next() { - Some(first) => { - let mut capitalized = first.to_uppercase().collect::(); - capitalized.push_str(chars.as_str()); - capitalized - } - None => String::new(), - } -} diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap index a74aab3dd05..6cd3aedfcf5 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭─────────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_credits_and_limits.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_credits_and_limits.snap index c841ebe6722..0e946b808ab 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_credits_and_limits.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_credits_and_limits.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap index b5de8b0efe3..393bc8431f8 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭────────────────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap index 83069005d12..a4abeeb7fe9 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap index 394f3b8eaf2..77a3c125169 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap index 394f3b8eaf2..77a3c125169 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap index 8106bf393de..2c8425de966 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap index 581a5c84223..0e8e2bd880c 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭────────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status_indicator_widget.rs b/codex-rs/tui/src/status_indicator_widget.rs index f391b267ac9..ebf943b8194 100644 --- a/codex-rs/tui/src/status_indicator_widget.rs +++ b/codex-rs/tui/src/status_indicator_widget.rs @@ -10,7 +10,11 @@ use ratatui::buffer::Buffer; use ratatui::layout::Rect; use ratatui::style::Stylize; use ratatui::text::Line; +use ratatui::text::Span; +use ratatui::text::Text; +use ratatui::widgets::Paragraph; use ratatui::widgets::WidgetRef; +use unicode_width::UnicodeWidthStr; use crate::app_event::AppEvent; use crate::app_event_sender::AppEventSender; @@ -18,11 +22,18 @@ use crate::exec_cell::spinner; use crate::key_hint; use crate::render::renderable::Renderable; use crate::shimmer::shimmer_spans; +use crate::text_formatting::capitalize_first; use crate::tui::FrameRequester; +use crate::wrapping::RtOptions; +use crate::wrapping::word_wrap_lines; + +const DETAILS_MAX_LINES: usize = 3; +const DETAILS_PREFIX: &str = " └ "; pub(crate) struct StatusIndicatorWidget { /// Animated header text (defaults to "Working"). header: String, + details: Option, detail_lines: Vec>, show_interrupt_hint: bool, @@ -59,6 +70,7 @@ impl StatusIndicatorWidget { ) -> Self { Self { header: String::from("Working"), + details: None, detail_lines: Vec::new(), show_interrupt_hint: true, elapsed_running: Duration::ZERO, @@ -80,6 +92,13 @@ impl StatusIndicatorWidget { self.header = header; } + /// Update the details text shown below the header. + pub(crate) fn update_details(&mut self, details: Option) { + self.details = details + .filter(|details| !details.is_empty()) + .map(|details| capitalize_first(details.trim_start())); + } + pub(crate) fn set_detail_lines(&mut self, lines: Vec>) { self.detail_lines = lines; } @@ -93,6 +112,11 @@ impl StatusIndicatorWidget { &self.header } + #[cfg(test)] + pub(crate) fn details(&self) -> Option<&str> { + self.details.as_deref() + } + pub(crate) fn set_interrupt_hint_visible(&mut self, visible: bool) { self.show_interrupt_hint = visible; } @@ -142,11 +166,44 @@ impl StatusIndicatorWidget { pub fn elapsed_seconds(&self) -> u64 { self.elapsed_seconds_at(Instant::now()) } + + /// Wrap the details text into a fixed width and return the lines, truncating if necessary. + fn wrapped_details_lines(&self, width: u16) -> Vec> { + let Some(details) = self.details.as_deref() else { + return Vec::new(); + }; + if width == 0 { + return Vec::new(); + } + + let prefix_width = UnicodeWidthStr::width(DETAILS_PREFIX); + let opts = RtOptions::new(usize::from(width)) + .initial_indent(Line::from(DETAILS_PREFIX.dim())) + .subsequent_indent(Line::from(Span::from(" ".repeat(prefix_width)).dim())) + .break_words(true); + + let mut out = word_wrap_lines(details.lines().map(|line| vec![line.dim()]), opts); + + if out.len() > DETAILS_MAX_LINES { + out.truncate(DETAILS_MAX_LINES); + let content_width = usize::from(width).saturating_sub(prefix_width).max(1); + let max_base_len = content_width.saturating_sub(1); + if let Some(last) = out.last_mut() + && let Some(span) = last.spans.last_mut() + { + let trimmed: String = span.content.as_ref().chars().take(max_base_len).collect(); + *span = format!("{trimmed}…").dim(); + } + } + + out + } } impl Renderable for StatusIndicatorWidget { - fn desired_height(&self, _width: u16) -> u16 { - 1u16.saturating_add(self.detail_lines.len().try_into().unwrap_or(u16::MAX)) + fn desired_height(&self, width: u16) -> u16 { + 1u16.saturating_add(u16::try_from(self.wrapped_details_lines(width).len()).unwrap_or(0)) + .saturating_add(u16::try_from(self.detail_lines.len()).unwrap_or(u16::MAX)) } fn render(&self, area: Rect, buf: &mut Buffer) { @@ -180,23 +237,21 @@ impl Renderable for StatusIndicatorWidget { spans.push(format!("({pretty_elapsed})").dim()); } - let mut row = area; - row.height = 1; - Line::from(spans).render_ref(row, buf); - - for (idx, line) in self.detail_lines.iter().enumerate() { - let y = area.y.saturating_add((idx as u16).saturating_add(1)); - if y >= area.y.saturating_add(area.height) { - break; - } - let detail_area = Rect { - x: area.x, - y, - width: area.width, - height: 1, - }; - line.render_ref(detail_area, buf); + let mut lines = Vec::new(); + lines.push(Line::from(spans)); + let mut remaining = usize::from(area.height.saturating_sub(1)); + if remaining > 0 { + // If there is enough space, add the details lines below the header. + let details = self.wrapped_details_lines(area.width); + let take = details.len().min(remaining); + lines.extend(details.into_iter().take(take)); + remaining = remaining.saturating_sub(take); } + if remaining > 0 { + lines.extend(self.detail_lines.iter().take(remaining).cloned()); + } + + Paragraph::new(Text::from(lines)).render_ref(area, buf); } } @@ -255,6 +310,27 @@ mod tests { insta::assert_snapshot!(terminal.backend()); } + #[test] + fn renders_wrapped_details_panama_two_lines() { + let (tx_raw, _rx) = unbounded_channel::(); + let tx = AppEventSender::new(tx_raw); + let mut w = StatusIndicatorWidget::new(tx, crate::tui::FrameRequester::test_dummy(), false); + w.update_details(Some("A man a plan a canal panama".to_string())); + w.set_interrupt_hint_visible(false); + + // Freeze time-dependent rendering (elapsed + spinner) to keep the snapshot stable. + w.is_paused = true; + w.elapsed_running = Duration::ZERO; + + // Prefix is 4 columns, so a width of 30 yields a content width of 26: one column + // short of fitting the whole phrase (27 cols), forcing exactly one wrap without ellipsis. + let mut terminal = Terminal::new(TestBackend::new(30, 3)).expect("terminal"); + terminal + .draw(|f| w.render(f.area(), f.buffer_mut())) + .expect("draw"); + insta::assert_snapshot!(terminal.backend()); + } + #[test] fn timer_pauses_when_requested() { let (tx_raw, _rx) = unbounded_channel::(); @@ -276,4 +352,20 @@ mod tests { let after_resume = widget.elapsed_seconds_at(baseline + Duration::from_secs(13)); assert_eq!(after_resume, before_pause + 3); } + + #[test] + fn details_overflow_adds_ellipsis() { + let (tx_raw, _rx) = unbounded_channel::(); + let tx = AppEventSender::new(tx_raw); + let mut w = StatusIndicatorWidget::new(tx, crate::tui::FrameRequester::test_dummy(), true); + w.update_details(Some("abcd abcd abcd abcd".to_string())); + + let lines = w.wrapped_details_lines(6); + assert_eq!(lines.len(), DETAILS_MAX_LINES); + let last = lines.last().expect("expected last details line"); + assert!( + last.spans[1].content.as_ref().ends_with("…"), + "expected ellipsis in last line: {last:?}" + ); + } } diff --git a/codex-rs/tui/src/text_formatting.rs b/codex-rs/tui/src/text_formatting.rs index 91d1c84f22d..f747b2fef2a 100644 --- a/codex-rs/tui/src/text_formatting.rs +++ b/codex-rs/tui/src/text_formatting.rs @@ -2,6 +2,18 @@ use unicode_segmentation::UnicodeSegmentation; use unicode_width::UnicodeWidthChar; use unicode_width::UnicodeWidthStr; +pub(crate) fn capitalize_first(input: &str) -> String { + let mut chars = input.chars(); + match chars.next() { + Some(first) => { + let mut capitalized = first.to_uppercase().collect::(); + capitalized.push_str(chars.as_str()); + capitalized + } + None => String::new(), + } +} + /// Truncate a tool result to fit within the given height and width. If the text is valid JSON, we format it in a compact way before truncating. /// This is a best-effort approach that may not work perfectly for text where 1 grapheme is rendered as multiple terminal cells. pub(crate) fn format_and_truncate_tool_result( diff --git a/codex-rs/tui/src/tui.rs b/codex-rs/tui/src/tui.rs index 0770b76648a..ff9ce94d24f 100644 --- a/codex-rs/tui/src/tui.rs +++ b/codex-rs/tui/src/tui.rs @@ -1,4 +1,5 @@ use std::fmt; +use std::future::Future; use std::io::IsTerminal; use std::io::Result; use std::io::Stdout; @@ -46,6 +47,7 @@ use crate::tui::event_stream::TuiEventStream; use crate::tui::job_control::SuspendContext; mod event_stream; +mod frame_rate_limiter; mod frame_requester; #[cfg(unix)] mod job_control; @@ -118,18 +120,86 @@ impl Command for DisableAlternateScroll { } } -/// Restore the terminal to its original state. -/// Inverse of `set_modes`. -pub fn restore() -> Result<()> { +fn restore_common(should_disable_raw_mode: bool) -> Result<()> { // Pop may fail on platforms that didn't support the push; ignore errors. let _ = execute!(stdout(), PopKeyboardEnhancementFlags); execute!(stdout(), DisableBracketedPaste)?; let _ = execute!(stdout(), DisableFocusChange); - disable_raw_mode()?; + if should_disable_raw_mode { + disable_raw_mode()?; + } let _ = execute!(stdout(), crossterm::cursor::Show); Ok(()) } +/// Restore the terminal to its original state. +/// Inverse of `set_modes`. +pub fn restore() -> Result<()> { + let should_disable_raw_mode = true; + restore_common(should_disable_raw_mode) +} + +/// Restore the terminal to its original state, but keep raw mode enabled. +pub fn restore_keep_raw() -> Result<()> { + let should_disable_raw_mode = false; + restore_common(should_disable_raw_mode) +} + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum RestoreMode { + #[allow(dead_code)] + Full, // Fully restore the terminal (disables raw mode). + KeepRaw, // Restore the terminal but keep raw mode enabled. +} + +impl RestoreMode { + fn restore(self) -> Result<()> { + match self { + RestoreMode::Full => restore(), + RestoreMode::KeepRaw => restore_keep_raw(), + } + } +} + +/// Flush the underlying stdin buffer to clear any input that may be buffered at the terminal level. +/// For example, clears any user input that occurred while the crossterm EventStream was dropped. +#[cfg(unix)] +fn flush_terminal_input_buffer() { + // Safety: flushing the stdin queue is safe and does not move ownership. + let result = unsafe { libc::tcflush(libc::STDIN_FILENO, libc::TCIFLUSH) }; + if result != 0 { + let err = std::io::Error::last_os_error(); + tracing::warn!("failed to tcflush stdin: {err}"); + } +} + +/// Flush the underlying stdin buffer to clear any input that may be buffered at the terminal level. +/// For example, clears any user input that occurred while the crossterm EventStream was dropped. +#[cfg(windows)] +fn flush_terminal_input_buffer() { + use windows_sys::Win32::Foundation::GetLastError; + use windows_sys::Win32::Foundation::INVALID_HANDLE_VALUE; + use windows_sys::Win32::System::Console::FlushConsoleInputBuffer; + use windows_sys::Win32::System::Console::GetStdHandle; + use windows_sys::Win32::System::Console::STD_INPUT_HANDLE; + + let handle = unsafe { GetStdHandle(STD_INPUT_HANDLE) }; + if handle == INVALID_HANDLE_VALUE || handle == 0 { + let err = unsafe { GetLastError() }; + tracing::warn!("failed to get stdin handle for flush: error {err}"); + return; + } + + let result = unsafe { FlushConsoleInputBuffer(handle) }; + if result == 0 { + let err = unsafe { GetLastError() }; + tracing::warn!("failed to flush stdin buffer: error {err}"); + } +} + +#[cfg(not(any(unix, windows)))] +pub(crate) fn flush_terminal_input_buffer() {} + /// Initialize the terminal (inline viewport; history stays in normal scrollback) pub fn init() -> Result { if !stdin().is_terminal() { @@ -215,18 +285,60 @@ impl Tui { self.enhanced_keys_supported } - // todo(sayan) unused for now; intend to use to enable opening external editors - #[allow(unused)] + pub fn is_alt_screen_active(&self) -> bool { + self.alt_screen_active.load(Ordering::Relaxed) + } + + // Drop crossterm EventStream to avoid stdin conflicts with other processes. pub fn pause_events(&mut self) { self.event_broker.pause_events(); } - // todo(sayan) unused for now; intend to use to enable opening external editors - #[allow(unused)] + // Resume crossterm EventStream to resume stdin polling. + // Inverse of `pause_events`. pub fn resume_events(&mut self) { self.event_broker.resume_events(); } + /// Temporarily restore terminal state to run an external interactive program `f`. + /// + /// This pauses crossterm's stdin polling by dropping the underlying event stream, restores + /// terminal modes (optionally keeping raw mode enabled), then re-applies Codex TUI modes and + /// flushes pending stdin input before resuming events. + pub async fn with_restored(&mut self, mode: RestoreMode, f: F) -> R + where + F: FnOnce() -> Fut, + Fut: Future, + { + // Pause crossterm events to avoid stdin conflicts with external program `f`. + self.pause_events(); + + // Leave alt screen if active to avoid conflicts with external program `f`. + let was_alt_screen = self.is_alt_screen_active(); + if was_alt_screen { + let _ = self.leave_alt_screen(); + } + + if let Err(err) = mode.restore() { + tracing::warn!("failed to restore terminal modes before external program: {err}"); + } + + let output = f().await; + + if let Err(err) = set_modes() { + tracing::warn!("failed to re-enable terminal modes after external program: {err}"); + } + // After the external program `f` finishes, reset terminal state and flush any buffered keypresses. + flush_terminal_input_buffer(); + + if was_alt_screen { + let _ = self.enter_alt_screen(); + } + + self.resume_events(); + output + } + /// Emit a desktop notification now if the terminal is unfocused. /// Returns true if a notification was posted. pub fn notify(&mut self, message: impl AsRef) -> bool { diff --git a/codex-rs/tui/src/tui/frame_rate_limiter.rs b/codex-rs/tui/src/tui/frame_rate_limiter.rs new file mode 100644 index 00000000000..56dd752e643 --- /dev/null +++ b/codex-rs/tui/src/tui/frame_rate_limiter.rs @@ -0,0 +1,62 @@ +//! Limits how frequently frame draw notifications may be emitted. +//! +//! Widgets sometimes call `FrameRequester::schedule_frame()` more frequently than a user can +//! perceive. This limiter clamps draw notifications to a maximum of 60 FPS to avoid wasted work. +//! +//! This is intentionally a small, pure helper so it can be unit-tested in isolation and used by +//! the async frame scheduler without adding complexity to the app/event loop. + +use std::time::Duration; +use std::time::Instant; + +/// A 60 FPS minimum frame interval (≈16.67ms). +pub(super) const MIN_FRAME_INTERVAL: Duration = Duration::from_nanos(16_666_667); + +/// Remembers the most recent emitted draw, allowing deadlines to be clamped forward. +#[derive(Debug, Default)] +pub(super) struct FrameRateLimiter { + last_emitted_at: Option, +} + +impl FrameRateLimiter { + /// Returns `requested`, clamped forward if it would exceed the maximum frame rate. + pub(super) fn clamp_deadline(&self, requested: Instant) -> Instant { + let Some(last_emitted_at) = self.last_emitted_at else { + return requested; + }; + let min_allowed = last_emitted_at + .checked_add(MIN_FRAME_INTERVAL) + .unwrap_or(last_emitted_at); + requested.max(min_allowed) + } + + /// Records that a draw notification was emitted at `emitted_at`. + pub(super) fn mark_emitted(&mut self, emitted_at: Instant) { + self.last_emitted_at = Some(emitted_at); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + + #[test] + fn default_does_not_clamp() { + let t0 = Instant::now(); + let limiter = FrameRateLimiter::default(); + assert_eq!(limiter.clamp_deadline(t0), t0); + } + + #[test] + fn clamps_to_min_interval_since_last_emit() { + let t0 = Instant::now(); + let mut limiter = FrameRateLimiter::default(); + + assert_eq!(limiter.clamp_deadline(t0), t0); + limiter.mark_emitted(t0); + + let too_soon = t0 + Duration::from_millis(1); + assert_eq!(limiter.clamp_deadline(too_soon), t0 + MIN_FRAME_INTERVAL); + } +} diff --git a/codex-rs/tui/src/tui/frame_requester.rs b/codex-rs/tui/src/tui/frame_requester.rs index 4f7886aa22c..88db9d71f3c 100644 --- a/codex-rs/tui/src/tui/frame_requester.rs +++ b/codex-rs/tui/src/tui/frame_requester.rs @@ -18,6 +18,8 @@ use std::time::Instant; use tokio::sync::broadcast; use tokio::sync::mpsc; +use super::frame_rate_limiter::FrameRateLimiter; + /// A requester for scheduling future frame draws on the TUI event loop. /// /// This is the handler side of an actor/handler pair with `FrameScheduler`, which coalesces @@ -68,15 +70,23 @@ impl FrameRequester { /// A scheduler for coalescing frame draw requests and notifying the TUI event loop. /// /// This type is internal to `FrameRequester` and is spawned as a task to handle scheduling logic. +/// +/// To avoid wasted redraw work, draw notifications are clamped to a maximum of 60 FPS (see +/// [`FrameRateLimiter`]). struct FrameScheduler { receiver: mpsc::UnboundedReceiver, draw_tx: broadcast::Sender<()>, + rate_limiter: FrameRateLimiter, } impl FrameScheduler { /// Create a new FrameScheduler with the provided receiver and draw notification sender. fn new(receiver: mpsc::UnboundedReceiver, draw_tx: broadcast::Sender<()>) -> Self { - Self { receiver, draw_tx } + Self { + receiver, + draw_tx, + rate_limiter: FrameRateLimiter::default(), + } } /// Run the scheduling loop, coalescing frame requests and notifying the TUI event loop. @@ -97,6 +107,7 @@ impl FrameScheduler { // All senders dropped; exit the scheduler. break }; + let draw_at = self.rate_limiter.clamp_deadline(draw_at); next_deadline = Some(next_deadline.map_or(draw_at, |cur| cur.min(draw_at))); // Do not send a draw immediately here. By continuing the loop, @@ -107,6 +118,7 @@ impl FrameScheduler { _ = &mut deadline => { if next_deadline.is_some() { next_deadline = None; + self.rate_limiter.mark_emitted(target); let _ = self.draw_tx.send(()); } } @@ -116,6 +128,7 @@ impl FrameScheduler { } #[cfg(test)] mod tests { + use super::super::frame_rate_limiter::MIN_FRAME_INTERVAL; use super::*; use tokio::time; use tokio_util::time::FutureExt; @@ -218,6 +231,98 @@ mod tests { assert!(second.is_err(), "unexpected extra draw received"); } + #[tokio::test(flavor = "current_thread", start_paused = true)] + async fn test_limits_draw_notifications_to_60fps() { + let (draw_tx, mut draw_rx) = broadcast::channel(16); + let requester = FrameRequester::new(draw_tx); + + requester.schedule_frame(); + time::advance(Duration::from_millis(1)).await; + let first = draw_rx + .recv() + .timeout(Duration::from_millis(50)) + .await + .expect("timed out waiting for first draw"); + assert!(first.is_ok(), "broadcast closed unexpectedly"); + + requester.schedule_frame(); + time::advance(Duration::from_millis(1)).await; + let early = draw_rx.recv().timeout(Duration::from_millis(1)).await; + assert!( + early.is_err(), + "draw fired too early; expected max 60fps (min interval {MIN_FRAME_INTERVAL:?})" + ); + + time::advance(MIN_FRAME_INTERVAL).await; + let second = draw_rx + .recv() + .timeout(Duration::from_millis(50)) + .await + .expect("timed out waiting for second draw"); + assert!(second.is_ok(), "broadcast closed unexpectedly"); + } + + #[tokio::test(flavor = "current_thread", start_paused = true)] + async fn test_rate_limit_clamps_early_delayed_requests() { + let (draw_tx, mut draw_rx) = broadcast::channel(16); + let requester = FrameRequester::new(draw_tx); + + requester.schedule_frame(); + time::advance(Duration::from_millis(1)).await; + let first = draw_rx + .recv() + .timeout(Duration::from_millis(50)) + .await + .expect("timed out waiting for first draw"); + assert!(first.is_ok(), "broadcast closed unexpectedly"); + + requester.schedule_frame_in(Duration::from_millis(1)); + + time::advance(Duration::from_millis(10)).await; + let too_early = draw_rx.recv().timeout(Duration::from_millis(1)).await; + assert!( + too_early.is_err(), + "draw fired too early; expected max 60fps (min interval {MIN_FRAME_INTERVAL:?})" + ); + + time::advance(MIN_FRAME_INTERVAL).await; + let second = draw_rx + .recv() + .timeout(Duration::from_millis(50)) + .await + .expect("timed out waiting for clamped draw"); + assert!(second.is_ok(), "broadcast closed unexpectedly"); + } + + #[tokio::test(flavor = "current_thread", start_paused = true)] + async fn test_rate_limit_does_not_delay_future_draws() { + let (draw_tx, mut draw_rx) = broadcast::channel(16); + let requester = FrameRequester::new(draw_tx); + + requester.schedule_frame(); + time::advance(Duration::from_millis(1)).await; + let first = draw_rx + .recv() + .timeout(Duration::from_millis(50)) + .await + .expect("timed out waiting for first draw"); + assert!(first.is_ok(), "broadcast closed unexpectedly"); + + requester.schedule_frame_in(Duration::from_millis(50)); + + time::advance(Duration::from_millis(49)).await; + let early = draw_rx.recv().timeout(Duration::from_millis(1)).await; + assert!(early.is_err(), "draw fired too early"); + + time::advance(Duration::from_millis(1)).await; + let second = draw_rx + .recv() + .timeout(Duration::from_millis(50)) + .await + .expect("timed out waiting for delayed draw"); + assert!(second.is_ok(), "broadcast closed unexpectedly"); + } + #[tokio::test(flavor = "current_thread", start_paused = true)] async fn test_multiple_delayed_requests_coalesce_to_earliest() { let (draw_tx, mut draw_rx) = broadcast::channel(16); diff --git a/codex-rs/tui/tooltips.txt b/codex-rs/tui/tooltips.txt index 94ad487bbb0..f2fd1f55c72 100644 --- a/codex-rs/tui/tooltips.txt +++ b/codex-rs/tui/tooltips.txt @@ -1,6 +1,5 @@ Use /compact when the conversation gets long to summarize history and free up context. Start a fresh idea with /new; the previous session stays in history. -If a turn went sideways, /undo asks Codexel to revert the last changes. Use /feedback to send logs to the maintainers when something looks off. Switch models or reasoning effort quickly with /model. You can run any shell command from Codexel using `!` (e.g. `!ls`) diff --git a/codex-rs/tui2/Cargo.toml b/codex-rs/tui2/Cargo.toml index eb4e9cebde5..894b062e1d1 100644 --- a/codex-rs/tui2/Cargo.toml +++ b/codex-rs/tui2/Cargo.toml @@ -40,6 +40,7 @@ codex-core = { workspace = true } codex-feedback = { workspace = true } codex-file-search = { workspace = true } codex-login = { workspace = true } +codex-lsp = { workspace = true } codex-protocol = { workspace = true } codex-utils-absolute-path = { workspace = true } codex-tui = { workspace = true } @@ -74,6 +75,7 @@ supports-color = { workspace = true } tempfile = { workspace = true } textwrap = { workspace = true } tokio = { workspace = true, features = [ + "fs", "io-std", "macros", "process", diff --git a/codex-rs/tui2/docs/scroll_input_model.md b/codex-rs/tui2/docs/scroll_input_model.md new file mode 100644 index 00000000000..f6eef6a6cb6 --- /dev/null +++ b/codex-rs/tui2/docs/scroll_input_model.md @@ -0,0 +1,610 @@ +# TUI2 Scroll Input: Model and Implementation + +This is the single "scrolling doc of record" for TUI2. + +It describes what we implemented, why it works, and what we tried before this approach. +It also preserves the scroll-probe findings (see Appendix) that motivated the model. + +Code reference: `codex-rs/tui2/src/tui/scrolling/mouse.rs`. + +## Goals and constraints + +Goals: + +- Mouse wheel: scroll about **3 transcript lines per physical wheel tick** regardless of terminal + event density (classic feel). +- Trackpad: remain **higher fidelity**, meaning small movements can accumulate fractionally and + should not be forced into wheel behavior. +- Work across terminals where a single wheel tick may produce 1, 3, 9, or more raw events. + +Constraints: + +- Terminals typically encode both wheels and trackpads as the same "scroll up/down" mouse button + events without a magnitude. We cannot reliably observe device type directly. +- Timing alone is not a reliable discriminator (wheel and trackpad bursts overlap). + +## Current implementation (stream-based; data-driven) + +TUI2 uses a stream model: scroll events are grouped into short streams separated by silence. +Within a stream, we normalize by a per-terminal "events per tick" factor and then apply either +wheel-like (fixed lines per tick) or trackpad-like (fractional) semantics. + +### 1. Stream detection + +- A stream begins on the first scroll event. +- A stream ends when the gap since the last event exceeds `STREAM_GAP_MS` or when direction flips. +- Direction flips always close the current stream and start a new one, so we never blend "up" and + "down" into a single accumulator. + +This makes behavior stable across: + +- Dense bursts (Warp/Ghostty-style sub-ms intervals). +- Sparse bursts (single events separated by tens or hundreds of ms). +- Mixed wheel + trackpad input where direction changes quickly. + +### 2. Normalization: events-per-tick + +Different terminals emit different numbers of raw events per physical wheel notch. +We normalize by converting raw events into tick-equivalents: + +`tick_equivalents = raw_events / events_per_tick` + +Per-terminal defaults come from the probe logs (Appendix), and users can override them. + +Config key: `tui.scroll_events_per_tick`. + +### 3. Wheel vs trackpad behavior (and why it is heuristic) + +Because device type is not directly observable, the implementation provides a mode setting: + +- `tui.scroll_mode = "auto"` (default): infer wheel-like vs trackpad-like behavior per stream. +- `tui.scroll_mode = "wheel"`: always treat streams as wheel-like. +- `tui.scroll_mode = "trackpad"`: always treat streams as trackpad-like. + +In auto mode: + +- Streams start trackpad-like (safer: avoids overshoot when we guess wrong). +- Streams promote to wheel-like when the first tick-worth of events arrives quickly. +- For 1-event-per-tick terminals, "first tick completion time" is not observable, so there is a + conservative end-of-stream fallback for very small bursts. + +This design assumes that auto classification is a best-effort heuristic and must be overridable. + +### 4. Applying scroll: wheel-like streams + +Wheel-like streams target the "classic feel" requirement. + +- Each raw event contributes `tui.scroll_wheel_lines / events_per_tick` lines. +- Deltas flush immediately (not cadence-gated) so wheels feel snappy even on dense streams. +- Wheel-like streams apply a minimum +/- 1 line when events were received but rounding would yield 0. + +Defaults: + +- `tui.scroll_wheel_lines = 3` + +### 5. Applying scroll: trackpad-like streams + +Trackpad-like streams are designed for fidelity first. + +- Each raw event contributes `tui.scroll_trackpad_lines / trackpad_events_per_tick` lines. +- Fractional remainder is carried across streams, so tiny gestures accumulate instead of being lost. +- Trackpad deltas are cadence-gated to ~60 Hz (`REDRAW_CADENCE_MS`) to avoid redraw floods and to + reduce "stop lag" / overshoot. +- Trackpad streams intentionally do not apply a minimum +/- 1 line at stream end; if a gesture is + small enough to round to 0, it should feel like "no movement", not a forced jump. + +Dense wheel terminals (e.g. Ghostty/Warp) can emit trackpad streams with high event density. +Using a wheel-derived `events_per_tick = 9` for trackpad would make trackpads feel slow, so we use +a capped divisor for trackpad normalization: + +- `trackpad_events_per_tick = min(events_per_tick, 3)` + +Additionally, to keep small gestures precise while making large/fast swipes cover more content, +trackpad-like streams apply bounded acceleration based on event count: + +- `tui.scroll_trackpad_accel_events`: how many events correspond to +1x multiplier. +- `tui.scroll_trackpad_accel_max`: maximum multiplier. + +### 6. Guard rails and axis handling + +- Horizontal scroll events are ignored for vertical scrolling. +- Streams clamp event counts and accumulated line deltas to avoid floods. + +## Terminal defaults and per-terminal tuning + +Defaults are keyed by `TerminalName` (terminal family), not exact version. +Probe data is version-specific, so defaults should be revalidated as more logs arrive. + +Events-per-tick defaults derived from `wheel_single` medians: + +- AppleTerminal: 3 +- WarpTerminal: 9 +- WezTerm: 1 +- Alacritty: 3 +- Ghostty: 3 +- Iterm2: 1 +- VsCode: 1 +- Kitty: 3 +- Unknown: 3 + +Note: probe logs measured Ghostty at ~9 events per tick, but we default to 3 because an upstream +Ghostty change is expected to reduce wheel event density. Users can override with +`tui.scroll_events_per_tick`. + +Auto-mode wheel promotion thresholds can also be tuned per terminal if needed (see config below). + +## Configuration knobs (TUI2) + +These are user-facing knobs in `config.toml` under `[tui]`: + +In this repo, "tick" always refers to a physical mouse wheel notch. Trackpads do not have ticks, so +trackpad settings are expressed in terms of "tick-equivalents" (raw events normalized to a common +scale). + +The core normalization formulas are: + +- Wheel-like streams: + - `lines_per_event = scroll_wheel_lines / scroll_events_per_tick` +- Trackpad-like streams: + - `lines_per_event = scroll_trackpad_lines / min(scroll_events_per_tick, 3)` + - (plus bounded acceleration from `scroll_trackpad_accel_*` and fractional carry across streams) + +Keys: + +- `scroll_events_per_tick` (number): + - Raw vertical scroll events per physical wheel notch in your terminal (normalization input). + - Affects wheel-like scroll speed and auto-mode wheel promotion timing. + - Trackpad-like mode uses `min(..., 3)` as the divisor so dense wheel ticks (e.g. 9 events per + notch) do not make trackpads feel artificially slow. +- `scroll_wheel_lines` (number): + - Lines per physical wheel notch (default 3). + - Change this if you want "classic" wheel scrolling to be more/less aggressive globally. +- `scroll_trackpad_lines` (number): + - Baseline trackpad sensitivity in trackpad-like mode (default 1). + - Change this if your trackpad feels consistently too slow/fast for small motions. +- `scroll_trackpad_accel_events` (number): + - Trackpad acceleration tuning (default 30). Smaller values accelerate earlier. + - Trackpad-like streams compute a multiplier: + - `multiplier = clamp(1 + abs(events) / scroll_trackpad_accel_events, 1..scroll_trackpad_accel_max)` + - The multiplier is applied to the trackpad stream’s computed line delta (including any carried + fractional remainder). +- `scroll_trackpad_accel_max` (number): + - Trackpad acceleration cap (default 3). Set to 1 to effectively disable acceleration. +- `scroll_mode` (`auto` | `wheel` | `trackpad`): + - `auto` (default): infer wheel-like vs trackpad-like per stream. + - `wheel`: always wheel-like (good for wheel-only setups; trackpads will feel jumpy). + - `trackpad`: always trackpad-like (good if auto misclassifies; wheels may feel slow). +- `scroll_wheel_tick_detect_max_ms` (number): + - Auto-mode promotion threshold: how quickly the first tick-worth of events must arrive to + consider the stream wheel-like. + - If wheel feels slow in a dense-wheel terminal, increasing this is usually better than changing + `scroll_events_per_tick`. +- `scroll_wheel_like_max_duration_ms` (number): + - Auto-mode fallback for 1-event-per-tick terminals (WezTerm/iTerm/VS Code). + - If wheel feels like trackpad (too slow) in those terminals, increasing this can help. +- `scroll_invert` (bool): + - Invert direction after terminal detection; applies consistently to wheel and trackpad. + +## Previous approaches tried (and why they were replaced) + +1. Cadence-based inference (rolling inter-event thresholds) + +- Approach: infer wheel vs trackpad using inter-event timing thresholds (burst vs frame cadence vs slow), + with terminal-specific tuning. +- Problem: terminals differ more in event density and batching than in timing; timing overlaps heavily + between wheel and trackpad. Small threshold changes had outsized, terminal-specific effects. + +2. Pure event-count or pure duration classification + +- Approach: classify wheel-like vs trackpad-like by event count <= N or duration <= M. +- Problem: burst length overlaps heavily across devices/terminals; duration is more separable but still + not strong enough to be authoritative. + +3. Why streams + normalization won + +- Streams give a stable unit ("what did the user do in one gesture?") that we can bound and reason about. +- Normalization directly addresses the main cross-terminal source of variation: raw event density. +- Classification remains heuristic, but is isolated and configurable. + +## Appendix A: Follow-up analysis (latest log per terminal; 2025-12-20) + +This section is derived from a "latest log per terminal" subset analysis. The exact event count is +not significant; it is included only as a note about which subset was used. + +Key takeaways: + +- Burst length overlaps heavily between wheel and trackpad. Simple "event count <= N" classifiers perform poorly. +- Burst span (duration) is more separable: wheel bursts typically complete in < ~180-200 ms, while trackpad + bursts are often hundreds of milliseconds. +- Conclusion: explicit wheel vs trackpad classification is inherently weak from these events; prefer a + stream model, plus a small heuristic and a config override (`tui.scroll_mode`) for edge cases. + +Data notes (latest per terminal label): + +- Logs used (one per terminal, by filename timestamp): + - mouse_scroll_log_Apple_Terminal_2025-12-19T19-53-54Z.jsonl + - mouse_scroll_log_WarpTerminal_2025-12-19T19-59-38Z.jsonl + - mouse_scroll_log_WezTerm_2025-12-19T20-00-36Z.jsonl + - mouse_scroll_log_alacritty_2025-12-19T19-56-45Z.jsonl + - mouse_scroll_log_ghostty_2025-12-19T19-52-44Z.jsonl + - mouse_scroll_log_iTerm_app_2025-12-19T19-55-08Z.jsonl + - mouse_scroll_log_vscode_2025-12-19T19-51-20Z.jsonl + - mouse_scroll_log_xterm-kitty_2025-12-19T19-58-19Z.jsonl + +Per-terminal burst separability (wheel vs trackpad), summarized as median and p90: + +- Apple Terminal: + - Wheel: length median 9.5 (p90 49), span median 94 ms (p90 136) + - Trackpad: length median 13.5 (p90 104), span median 238 ms (p90 616) +- Warp: + - Wheel: length median 43 (p90 169), span median 88 ms (p90 178) + - Trackpad: length median 60 (p90 82), span median 358 ms (p90 721) +- WezTerm: + - Wheel: length median 4 (p90 10), span median 91 ms (p90 156) + - Trackpad: length median 10.5 (p90 36), span median 270 ms (p90 348) +- alacritty: + - Wheel: length median 14 (p90 63), span median 109 ms (p90 158) + - Trackpad: length median 12.5 (p90 63), span median 372 ms (p90 883) +- ghostty: + - Wheel: length median 32.5 (p90 163), span median 99 ms (p90 157) + - Trackpad: length median 14.5 (p90 60), span median 366 ms (p90 719) +- iTerm: + - Wheel: length median 4 (p90 9), span median 91 ms (p90 230) + - Trackpad: length median 9 (p90 36), span median 223 ms (p90 540) +- VS Code: + - Wheel: length median 3 (p90 9), span median 94 ms (p90 120) + - Trackpad: length median 3 (p90 12), span median 192 ms (p90 468) +- Kitty: + - Wheel: length median 15.5 (p90 59), span median 87 ms (p90 233) + - Trackpad: length median 15.5 (p90 68), span median 292 ms (p90 563) + +Wheel_single medians (events per tick) in the latest logs: + +- Apple: 3 +- Warp: 9 +- WezTerm: 1 +- alacritty: 3 +- ghostty: 9 (measured); TUI2 defaults use 3 because an upstream Ghostty change is expected to + reduce wheel event density. If your Ghostty build still emits ~9 events per wheel tick, set + `tui.scroll_events_per_tick = 9`. +- iTerm: 1 +- VS Code: 1 +- Kitty: 3 + +## Appendix B: Scroll probe findings (authoritative; preserved verbatim) + +The remainder of this document is preserved from the original scroll-probe spec. +It is intentionally not rewritten so the data and rationale remain auditable. + +Note: the original text uses "events per line" terminology; the implementation treats this as an +events-per-wheel-tick normalization factor (see "Normalization: events-per-tick"). + +Note: the pseudocode in the preserved spec is not the exact current implementation; it is kept as +historical context for how the probe data originally mapped into an algorithm. The current +implementation is described in the sections above. + +## 1. TL;DR + +Analysis of 16 scroll-probe logs (13,734 events) across 8 terminals shows large per-terminal variation in how many raw events are emitted per physical wheel tick (1-9+ events). Timing alone does not distinguish wheel vs trackpad; event counts and burst duration are more reliable. The algorithm below treats scroll input as short streams separated by gaps, normalizes events into line deltas using a per-terminal events-per-line factor, coalesces redraws at 60 Hz, and applies a minimum 1-line delta for discrete bursts. This yields stable behavior across dense streams, sparse bursts, and terminals that emit horizontal events. + +## 2. Data overview + +- Logs analyzed: 16 +- Total events: 13,734 +- Terminals covered: + - Apple_Terminal 455.1 + - WarpTerminal v0.2025.12.17.17.stable_02 + - WezTerm 20240203-110809-5046fc22 + - alacritty + - ghostty 1.2.3 + - iTerm.app 3.6.6 + - vscode 1.107.1 + - xterm-kitty +- Scenarios captured: `wheel_single`, `wheel_small`, `wheel_long`, `trackpad_single`, `trackpad_slow`, `trackpad_fast` (directional up/down variants treated as distinct bursts). +- Legacy `wheel_scroll_*` logs are mapped to `wheel_small` in analysis. + +## 3. Cross-terminal comparison table + +| Terminal | Scenario | Median Dt (ms) | P95 Dt (ms) | Typical burst | Notes | +| --------------------------------------- | --------------- | -------------: | ----------: | ------------: | ----------- | +| Apple_Terminal 455.1 | wheel_single | 0.14 | 97.68 | 3 | +| Apple_Terminal 455.1 | wheel_small | 0.12 | 23.81 | 19 | +| Apple_Terminal 455.1 | wheel_long | 0.03 | 15.93 | 48 | +| Apple_Terminal 455.1 | trackpad_single | 92.35 | 213.15 | 2 | +| Apple_Terminal 455.1 | trackpad_slow | 11.30 | 75.46 | 14 | +| Apple_Terminal 455.1 | trackpad_fast | 0.13 | 8.92 | 96 | +| WarpTerminal v0.2025.12.17.17.stable_02 | wheel_single | 0.07 | 0.34 | 9 | +| WarpTerminal v0.2025.12.17.17.stable_02 | wheel_small | 0.05 | 5.04 | 65 | +| WarpTerminal v0.2025.12.17.17.stable_02 | wheel_long | 0.01 | 0.42 | 166 | +| WarpTerminal v0.2025.12.17.17.stable_02 | trackpad_single | 9.77 | 32.64 | 10 | +| WarpTerminal v0.2025.12.17.17.stable_02 | trackpad_slow | 7.93 | 16.44 | 74 | +| WarpTerminal v0.2025.12.17.17.stable_02 | trackpad_fast | 5.40 | 10.04 | 74 | +| WezTerm 20240203-110809-5046fc22 | wheel_single | 416.07 | 719.64 | 1 | +| WezTerm 20240203-110809-5046fc22 | wheel_small | 19.41 | 50.19 | 6 | +| WezTerm 20240203-110809-5046fc22 | wheel_long | 13.19 | 29.96 | 10 | +| WezTerm 20240203-110809-5046fc22 | trackpad_single | 237.56 | 237.56 | 1 | +| WezTerm 20240203-110809-5046fc22 | trackpad_slow | 23.54 | 76.10 | 10 | 12.5% horiz | +| WezTerm 20240203-110809-5046fc22 | trackpad_fast | 7.10 | 24.86 | 32 | 12.6% horiz | +| alacritty | wheel_single | 0.09 | 0.33 | 3 | +| alacritty | wheel_small | 0.11 | 37.24 | 24 | +| alacritty | wheel_long | 0.01 | 15.96 | 56 | +| alacritty | trackpad_single | n/a | n/a | 1 | +| alacritty | trackpad_slow | 41.90 | 97.36 | 11 | +| alacritty | trackpad_fast | 3.07 | 25.13 | 62 | +| ghostty 1.2.3 | wheel_single | 0.05 | 0.20 | 9 | +| ghostty 1.2.3 | wheel_small | 0.05 | 7.18 | 52 | +| ghostty 1.2.3 | wheel_long | 0.02 | 1.16 | 146 | +| ghostty 1.2.3 | trackpad_single | 61.28 | 124.28 | 3 | 23.5% horiz | +| ghostty 1.2.3 | trackpad_slow | 23.10 | 76.30 | 14 | 34.7% horiz | +| ghostty 1.2.3 | trackpad_fast | 3.84 | 37.72 | 47 | 23.4% horiz | +| iTerm.app 3.6.6 | wheel_single | 74.96 | 80.61 | 1 | +| iTerm.app 3.6.6 | wheel_small | 20.79 | 84.83 | 6 | +| iTerm.app 3.6.6 | wheel_long | 16.70 | 50.91 | 9 | +| iTerm.app 3.6.6 | trackpad_single | n/a | n/a | 1 | +| iTerm.app 3.6.6 | trackpad_slow | 17.25 | 94.05 | 9 | +| iTerm.app 3.6.6 | trackpad_fast | 7.12 | 24.54 | 33 | +| vscode 1.107.1 | wheel_single | 58.01 | 58.01 | 1 | +| vscode 1.107.1 | wheel_small | 16.76 | 66.79 | 5 | +| vscode 1.107.1 | wheel_long | 9.86 | 32.12 | 8 | +| vscode 1.107.1 | trackpad_single | n/a | n/a | 1 | +| vscode 1.107.1 | trackpad_slow | 164.19 | 266.90 | 3 | +| vscode 1.107.1 | trackpad_fast | 16.78 | 61.05 | 11 | +| xterm-kitty | wheel_single | 0.16 | 51.74 | 3 | +| xterm-kitty | wheel_small | 0.10 | 24.12 | 26 | +| xterm-kitty | wheel_long | 0.01 | 16.10 | 56 | +| xterm-kitty | trackpad_single | 155.65 | 289.87 | 1 | 12.5% horiz | +| xterm-kitty | trackpad_slow | 16.89 | 67.04 | 16 | 30.4% horiz | +| xterm-kitty | trackpad_fast | 0.23 | 16.37 | 78 | 20.6% horiz | + +## 4. Key findings + +- Raw wheel ticks vary by terminal: median events per tick are 1 (WezTerm/iTerm/vscode), 3 (Apple/alacritty/kitty), and 9 (Warp/ghostty). +- Trackpad bursts are longer than wheel ticks but overlap in timing; inter-event timing alone does not distinguish device type. +- Continuous streams have short gaps: overall inter-event p99 is 70.67 ms; trackpad_slow p95 is 66.98 ms. +- Horizontal events appear only in trackpad scenarios and only in WezTerm/ghostty/kitty; ignore horizontal events for vertical scrolling. +- Burst duration is a reliable discrete/continuous signal: + - wheel_single median 0.15 ms (p95 80.61 ms) + - trackpad_single median 0 ms (p95 237.56 ms) + - wheel_small median 96.88 ms (p95 182.90 ms) + - trackpad_slow median 320.69 ms (p95 812.10 ms) + +## 5. Scrolling model (authoritative) + +**Stream detection.** Treat scroll input as short streams separated by silence. A stream begins on the first scroll event and ends when the gap since the last event exceeds `STREAM_GAP_MS` or the direction flips. Direction flip immediately closes the current stream and starts a new one. + +**Normalization.** Convert raw events into line deltas using a per-terminal `EVENTS_PER_LINE` factor derived from the terminal's median `wheel_single` burst length. If no terminal override matches, use the global default (`3`). + +**Discrete vs continuous.** Classify the stream after it ends: + +- If `event_count <= DISCRETE_MAX_EVENTS` **and** `duration_ms <= DISCRETE_MAX_DURATION_MS`, treat as discrete. +- Otherwise treat as continuous. + +**Discrete streams.** Apply the accumulated line delta immediately. If the stream's accumulated lines rounds to 0 but events were received, apply a minimum +/-1 line (respecting direction). + +**Continuous streams.** Accumulate fractional lines and coalesce redraws to `REDRAW_CADENCE_MS`. Flush any remaining fractional lines on stream end (with the same +/-1 minimum if the stream had events but rounded to 0). + +**Direction.** Always use the raw event direction. Provide a separate user-level invert option if needed; do not infer inversion from timing. + +**Horizontal events.** Ignore horizontal events in vertical scroll logic. + +## 6. Concrete constants (data-derived) + +```text +STREAM_GAP_MS = 80 +DISCRETE_MAX_EVENTS = 10 +DISCRETE_MAX_DURATION_MS = 250 +REDRAW_CADENCE_MS = 16 +DEFAULT_EVENTS_PER_LINE = 3 +MAX_EVENTS_PER_STREAM = 256 +MAX_ACCUMULATED_LINES = 256 +MIN_LINES_PER_DISCRETE_STREAM = 1 +DEFAULT_WHEEL_LINES_PER_TICK = 3 +``` + +Why these values: + +- `STREAM_GAP_MS=80`: overall p99 inter-event gap is 70.67 ms; trackpad_slow p95 is 66.98 ms. 80 ms ends streams without splitting most continuous input. +- `DISCRETE_MAX_EVENTS=10`: wheel_single p95 burst = 9; trackpad_single p95 burst = 10. +- `DISCRETE_MAX_DURATION_MS=250`: trackpad_single p95 duration = 237.56 ms. +- `REDRAW_CADENCE_MS=16`: coalesces dense streams to ~60 Hz; trackpad_fast p95 Dt = 19.83 ms. +- `DEFAULT_EVENTS_PER_LINE=3`: global median wheel_single burst length. +- `MAX_EVENTS_PER_STREAM=256` and `MAX_ACCUMULATED_LINES=256`: highest observed burst is 206; cap to avoid floods. +- `DEFAULT_WHEEL_LINES_PER_TICK=3`: restores classic wheel speed; this is a UX choice rather than a data-derived constant. + +## 7. Pseudocode (Rust-oriented) + +```rust +// This is intentionally a simplified sketch of the current implementation. +// For the authoritative behavior, see `codex-rs/tui2/src/tui/scrolling/mouse.rs`. + +enum StreamKind { + Unknown, + Wheel, + Trackpad, +} + +struct Stream { + start: Instant, + last: Instant, + dir: i32, + event_count: usize, + accumulated_events: i32, + applied_lines: i32, + kind: StreamKind, + just_promoted: bool, +} + +struct State { + stream: Option, + carry_lines: f32, + last_redraw_at: Instant, + cfg: Config, +} + +struct Config { + events_per_tick: u16, + wheel_lines_per_tick: u16, + trackpad_lines_per_tick: u16, + trackpad_accel_events: u16, + trackpad_accel_max: u16, + wheel_tick_detect_max: Duration, +} + +fn on_scroll_event(dir: i32, now: Instant, st: &mut State) -> i32 { + // Close stream on idle gap or direction flip. + if let Some(stream) = st.stream.as_ref() { + let gap = now.duration_since(stream.last); + if gap > STREAM_GAP || stream.dir != dir { + finalize_stream(now, st); + st.stream = None; + } + } + + let stream = st.stream.get_or_insert_with(|| Stream { + start: now, + last: now, + dir, + event_count: 0, + accumulated_events: 0, + applied_lines: 0, + kind: StreamKind::Unknown, + just_promoted: false, + }); + + stream.last = now; + stream.dir = dir; + stream.event_count = (stream.event_count + 1).min(MAX_EVENTS_PER_STREAM); + stream.accumulated_events = + (stream.accumulated_events + dir).clamp(-(MAX_EVENTS_PER_STREAM as i32), MAX_EVENTS_PER_STREAM as i32); + + // Auto-mode promotion: promote to wheel-like when the first tick-worth of events arrives quickly. + if matches!(stream.kind, StreamKind::Unknown) { + let ept = st.cfg.events_per_tick.max(1) as usize; + if ept >= 2 && stream.event_count >= ept && now.duration_since(stream.start) <= st.cfg.wheel_tick_detect_max { + stream.kind = StreamKind::Wheel; + stream.just_promoted = true; + } + } + + flush_lines(now, st) +} + +fn on_tick(now: Instant, st: &mut State) -> i32 { + if let Some(stream) = st.stream.as_ref() { + let gap = now.duration_since(stream.last); + if gap > STREAM_GAP { + return finalize_stream(now, st); + } + } + flush_lines(now, st) +} + +fn finalize_stream(now: Instant, st: &mut State) -> i32 { + // In auto mode, any stream that isn't wheel-like by promotion stays trackpad-like. + if let Some(stream) = st.stream.as_mut() { + if matches!(stream.kind, StreamKind::Unknown) { + stream.kind = StreamKind::Trackpad; + } + } + + let lines = flush_lines(now, st); + + // Carry fractional remainder across streams for trackpad-like input. + if let Some(stream) = st.stream.as_ref() { + if matches!(stream.kind, StreamKind::Trackpad) { + st.carry_lines = desired_lines_f32(st, stream) - stream.applied_lines as f32; + } else { + st.carry_lines = 0.0; + } + } + + lines +} + +fn flush_lines(now: Instant, st: &mut State) -> i32 { + let Some(stream) = st.stream.as_mut() else { return 0; }; + + let wheel_like = matches!(stream.kind, StreamKind::Wheel); + let cadence_elapsed = now.duration_since(st.last_redraw_at) >= REDRAW_CADENCE; + let should_flush = wheel_like || cadence_elapsed || stream.just_promoted; + if !should_flush { + return 0; + } + + let desired_total = desired_lines_f32(st, stream); + let mut desired_lines = desired_total.trunc() as i32; + + // Wheel guardrail: ensure we never produce a "dead tick" for non-zero input. + if wheel_like && desired_lines == 0 && stream.accumulated_events != 0 { + desired_lines = stream.accumulated_events.signum() * MIN_LINES_PER_DISCRETE_STREAM; + } + + let mut delta = desired_lines - stream.applied_lines; + if delta == 0 { + return 0; + } + + delta = delta.clamp(-MAX_ACCUMULATED_LINES, MAX_ACCUMULATED_LINES); + stream.applied_lines += delta; + stream.just_promoted = false; + st.last_redraw_at = now; + delta +} + +fn desired_lines_f32(st: &State, stream: &Stream) -> f32 { + let wheel_like = matches!(stream.kind, StreamKind::Wheel); + + let events_per_tick = if wheel_like { + st.cfg.events_per_tick.max(1) as f32 + } else { + // Trackpad divisor is capped so dense wheel terminals don't feel slow for trackpads. + st.cfg.events_per_tick.clamp(1, DEFAULT_EVENTS_PER_LINE).max(1) as f32 + }; + + let lines_per_tick = if wheel_like { + st.cfg.wheel_lines_per_tick.max(1) as f32 + } else { + st.cfg.trackpad_lines_per_tick.max(1) as f32 + }; + + let mut total = (stream.accumulated_events as f32 * (lines_per_tick / events_per_tick)) + .clamp(-(MAX_ACCUMULATED_LINES as f32), MAX_ACCUMULATED_LINES as f32); + + if !wheel_like { + total = (total + st.carry_lines).clamp(-(MAX_ACCUMULATED_LINES as f32), MAX_ACCUMULATED_LINES as f32); + + // Bounded acceleration for large swipes (keep small swipes precise). + let event_count = stream.accumulated_events.abs() as f32; + let accel = (1.0 + (event_count / st.cfg.trackpad_accel_events.max(1) as f32)) + .clamp(1.0, st.cfg.trackpad_accel_max.max(1) as f32); + total = (total * accel).clamp(-(MAX_ACCUMULATED_LINES as f32), MAX_ACCUMULATED_LINES as f32); + } + + total +} +``` + +## 8. Terminal-specific adjustments (minimal) + +Use per-terminal `EVENTS_PER_LINE` overrides derived from median `wheel_single` bursts: + +```text +Apple_Terminal 455.1 = 3 +WarpTerminal v0.2025.12.17.17.stable_02 = 9 +WezTerm 20240203-110809-5046fc22 = 1 +alacritty = 3 +ghostty 1.2.3 = 9 +iTerm.app 3.6.6 = 1 +vscode 1.107.1 = 1 +xterm-kitty = 3 +``` + +If terminal is not matched, use `DEFAULT_EVENTS_PER_LINE = 3`. + +## 9. Known weird cases and guardrails + +- Extremely dense streams (sub-ms Dt) occur in Warp/ghostty/kitty; redraw coalescing is mandatory. +- Sparse bursts (hundreds of ms between events) occur in trackpad_single; do not merge them into long streams. +- Horizontal scroll events (12-35% of trackpad events in some terminals) must be ignored for vertical scrolling. +- Direction inversion is user-configurable in terminals; always use event direction and expose an application-level invert setting. +- Guard against floods: cap event counts and accumulated line deltas per stream. diff --git a/codex-rs/tui2/docs/tui_viewport_and_history.md b/codex-rs/tui2/docs/tui_viewport_and_history.md index 3f7733f1037..50d23ffa142 100644 --- a/codex-rs/tui2/docs/tui_viewport_and_history.md +++ b/codex-rs/tui2/docs/tui_viewport_and_history.md @@ -183,11 +183,18 @@ Mouse interaction is a first‑class part of the new design: that we use for bullets/prefixes. - **Copy.** - - When the user triggers copy, the TUI re‑renders just the transcript region off‑screen using the - same wrapping as the visible view. - - It then walks the selected lines and columns in that off‑screen buffer to reconstruct the exact - text region the user highlighted (including internal spaces and empty lines). - - That text is sent to the system clipboard and a status footer indicates success or failure. + - When the user triggers copy, the TUI reconstructs the wrapped transcript lines using the same + flattening/wrapping rules as the visible view. + - It then reconstructs a high‑fidelity clipboard string from the selected logical lines: + - Preserves meaningful indentation (especially for code blocks). + - Treats soft-wrapped prose as a single logical line by joining wrap continuations instead of + inserting hard newlines. + - Emits Markdown source markers (e.g. backticks and fences) for copy/paste, even if the UI + chooses to render those constructs without showing the literal markers. + - Copy operates on the full selection range, even if the selection extends outside the current + viewport. + - The resulting text is sent to the system clipboard and a status footer indicates success or + failure. Because scrolling, selection, and copy all operate on the same flattened transcript representation, they remain consistent even as the viewport resizes or the chat composer grows/shrinks. Owning our @@ -195,6 +202,9 @@ own scrolling also means we must own mouse interactions end‑to‑end: if we le to the terminal, we could not reliably line up selections with transcript content or avoid accidentally copying gutter/margin characters instead of just the conversation text. +Scroll normalization details and the data behind it live in +`codex-rs/tui2/docs/scroll_input_model.md`. + --- ## 5. Printing History to Scrollback @@ -389,8 +399,8 @@ The main app struct (`codex-rs/tui2/src/app.rs`) tracks the transcript and viewp - `transcript_cells: Vec>` – the logical history. - `transcript_scroll: TranscriptScroll` – whether the viewport is pinned to the bottom or anchored at a specific cell/line pair. -- `transcript_selection: TranscriptSelection` – a selection expressed in screen coordinates over - the flattened transcript region. +- `transcript_selection: TranscriptSelection` – a selection expressed in content-relative + coordinates over the flattened, wrapped transcript (line index + column). - `transcript_view_top` / `transcript_total_lines` – the current viewport’s top line index and total number of wrapped lines for the inline transcript area. @@ -407,8 +417,9 @@ Streaming wrapping details live in `codex-rs/tui2/docs/streaming_wrapping_design Mouse handling lives in `App::handle_mouse_event`, keyboard scrolling in `App::handle_key_event`, selection rendering in `App::apply_transcript_selection`, and copy in -`App::copy_transcript_selection` plus `codex-rs/tui2/src/clipboard_copy.rs`. Scroll/selection UI -state is forwarded through `ChatWidget::set_transcript_ui_state`, +`App::copy_transcript_selection` plus `codex-rs/tui2/src/transcript_selection.rs` and +`codex-rs/tui2/src/clipboard_copy.rs`. Scroll/selection UI state is forwarded through +`ChatWidget::set_transcript_ui_state`, `BottomPane::set_transcript_ui_state`, and `ChatComposer::footer_props`, with footer text assembled in `codex-rs/tui2/src/bottom_pane/footer.rs`. @@ -428,9 +439,14 @@ feedback are already implemented: - Bottom pane positioning is pegged high with an empty transcript and moves down as the transcript fills (including on resume). -- Wheel-based transcript scrolling is enabled on top of the new scroll model. +- Wheel-based transcript scrolling uses the stream-based normalization model derived from scroll + probe data (see `codex-rs/tui2/docs/scroll_input_model.md`). - While a selection is active, streaming stops “follow latest output” so the selection remains stable, and follow mode resumes after the selection is cleared. +- Copy operates on the full selection range (including offscreen lines), using the same wrapping as + on-screen rendering. +- Copy selection uses `Ctrl+Shift+C` (VS Code uses `Ctrl+Y` because `Ctrl+Shift+C` is unavailable in + the terminal) and shows an on-screen “copy” affordance near the selection. ### 10.2 Roadmap (prioritized) @@ -440,13 +456,11 @@ Vim) behavior as we can while still owning the viewport. **P0 — must-have (usability/correctness):** -- **Scrolling behavior.** Default to small scroll increments (ideally 1 line per wheel tick) with +- **Scrolling behavior.** Default to a classic multi-line wheel tick (3 lines, configurable) with acceleration/velocity for faster navigation, and ensure we stop scrolling when the user stops input (avoid redraw/event-loop backlog that makes scrolling feel “janky”). - **Mouse event bounds.** Ignore mouse events outside the transcript region so clicks in the composer/footer don’t start or mutate transcript selection state. -- **Copy includes offscreen lines.** Make copy operate on the full selection range even when part (or - all) of the selection is outside the current viewport. - **Copy fidelity.** Preserve meaningful indentation (especially code blocks), treat soft-wrapped prose as a single logical line when copying, and copy markdown _source_ (including backticks and heading markers) even if we render it differently. @@ -455,9 +469,6 @@ Vim) behavior as we can while still owning the viewport. - **Streaming wrapping polish.** Ensure all streaming paths use display-time wrapping only, and add tests that cover resizing after streaming has started. -- **Copy shortcut and discoverability.** Switch copy from `Ctrl+Y` to `Ctrl+Shift+C`, and add an - on-screen copy affordance (e.g. a small button near the selection) that also displays the - shortcut. - **Selection semantics.** Define and implement selection behavior across multi-step output (and whether step boundaries should be copy boundaries), while continuing to exclude the left gutter from copied text. @@ -525,9 +536,9 @@ explicit discussion before we commit to further UI changes. - **Selection affordances.** - - Today, the primary hint that selection is active is the reversed text and the “Ctrl+Y copy - selection” footer text. Do we want an explicit “Selecting… (Esc to cancel)” status while a drag - is in progress, or would that be redundant/clutter for most users? + - Today, the primary hint that selection is active is the reversed text plus the on-screen “copy” + affordance (`Ctrl+Shift+C`) and the footer hint. Do we want an explicit “Selecting… (Esc to + cancel)” status while a drag is in progress, or would that be redundant/clutter for most users? - **Suspend banners in scrollback.** diff --git a/codex-rs/tui2/src/app.rs b/codex-rs/tui2/src/app.rs index c135529e6d2..a5d001fca64 100644 --- a/codex-rs/tui2/src/app.rs +++ b/codex-rs/tui2/src/app.rs @@ -17,21 +17,27 @@ use crate::pager_overlay::Overlay; use crate::render::highlight::highlight_bash_to_lines; use crate::render::renderable::Renderable; use crate::resume_picker::ResumeSelection; +use crate::transcript_copy_ui::TranscriptCopyUi; +use crate::transcript_multi_click::TranscriptMultiClick; +use crate::transcript_selection::TRANSCRIPT_GUTTER_COLS; +use crate::transcript_selection::TranscriptSelection; +use crate::transcript_selection::TranscriptSelectionPoint; use crate::tui; use crate::tui::TuiEvent; +use crate::tui::scrolling::MouseScrollState; +use crate::tui::scrolling::ScrollConfig; +use crate::tui::scrolling::ScrollConfigOverrides; +use crate::tui::scrolling::ScrollDirection; +use crate::tui::scrolling::ScrollUpdate; use crate::tui::scrolling::TranscriptLineMeta; use crate::tui::scrolling::TranscriptScroll; use crate::update_action::UpdateAction; -use crate::wrapping::RtOptions; -use crate::wrapping::word_wrap_line; -use crate::wrapping::word_wrap_lines_borrowed; use codex_ansi_escape::ansi_escape_line; use codex_core::AuthManager; use codex_core::ConversationManager; use codex_core::config::Config; +use codex_core::config::edit::ConfigEdit; use codex_core::config::edit::ConfigEditsBuilder; -#[cfg(target_os = "windows")] -use codex_core::features::Feature; use codex_core::models_manager::manager::ModelsManager; use codex_core::models_manager::model_presets::HIDE_GPT_5_1_CODEX_MAX_MIGRATION_PROMPT_CONFIG; use codex_core::models_manager::model_presets::HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG; @@ -42,6 +48,7 @@ use codex_core::protocol::Op; use codex_core::protocol::SessionSource; use codex_core::protocol::SkillErrorInfo; use codex_core::protocol::TokenUsage; +use codex_core::terminal::terminal_info; use codex_protocol::ConversationId; use codex_protocol::openai_models::ModelPreset; use codex_protocol::openai_models::ModelUpgrade; @@ -70,11 +77,13 @@ use std::thread; use std::time::Duration; use tokio::select; use tokio::sync::mpsc::unbounded_channel; -use unicode_width::UnicodeWidthStr; #[cfg(not(debug_assertions))] use crate::history_cell::UpdateAvailableHistoryCell; +#[cfg(windows)] +use codex_core::features::Feature; + #[derive(Debug, Clone)] pub struct AppExitInfo { pub token_usage: TokenUsage, @@ -323,8 +332,10 @@ pub(crate) struct App { #[allow(dead_code)] transcript_scroll: TranscriptScroll, transcript_selection: TranscriptSelection, + transcript_multi_click: TranscriptMultiClick, transcript_view_top: usize, transcript_total_lines: usize, + transcript_copy_ui: TranscriptCopyUi, // Pager overlay state (Transcript or Static like Diff) pub(crate) overlay: Option, @@ -336,6 +347,9 @@ pub(crate) struct App { /// Controls the animation thread that sends CommitTick events. pub(crate) commit_anim_running: Arc, + scroll_config: ScrollConfig, + scroll_state: MouseScrollState, + // Esc-backtracking state grouped pub(crate) backtrack: crate::app_backtrack::BacktrackState, pub(crate) feedback: codex_feedback::CodexFeedback, @@ -349,28 +363,6 @@ pub(crate) struct App { // One-shot suppression of the next world-writable scan after user confirmation. skip_world_writable_scan_once: bool, } - -/// Content-relative selection within the inline transcript viewport. -/// -/// Selection endpoints are expressed in terms of flattened, wrapped transcript -/// line indices and columns, so the highlight tracks logical conversation -/// content even when the viewport scrolls or the terminal is resized. -#[derive(Debug, Clone, Copy, Default)] -struct TranscriptSelection { - anchor: Option, - head: Option, -} - -/// A single endpoint of a transcript selection. -/// -/// `line_index` is an index into the flattened wrapped transcript lines, and -/// `column` is a zero-based column offset within that visual line, counted from -/// the first content column to the right of the transcript gutter. -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -struct TranscriptSelectionPoint { - line_index: usize, - column: u16, -} impl App { async fn shutdown_current_conversation(&mut self) { if let Some(conversation_id) = self.chat_widget.conversation_id() { @@ -467,6 +459,7 @@ impl App { }; ChatWidget::new_from_existing( init, + conversation_manager.clone(), resumed.conversation, resumed.session_configured, ) @@ -478,6 +471,22 @@ impl App { let file_search = FileSearchManager::new(config.cwd.clone(), app_event_tx.clone()); #[cfg(not(debug_assertions))] let upgrade_version = crate::updates::get_upgrade_version(&config); + let scroll_config = ScrollConfig::from_terminal( + &terminal_info(), + ScrollConfigOverrides { + events_per_tick: config.tui_scroll_events_per_tick, + wheel_lines_per_tick: config.tui_scroll_wheel_lines, + trackpad_lines_per_tick: config.tui_scroll_trackpad_lines, + trackpad_accel_events: config.tui_scroll_trackpad_accel_events, + trackpad_accel_max: config.tui_scroll_trackpad_accel_max, + mode: Some(config.tui_scroll_mode), + wheel_tick_detect_max_ms: config.tui_scroll_wheel_tick_detect_max_ms, + wheel_like_max_duration_ms: config.tui_scroll_wheel_like_max_duration_ms, + invert_direction: config.tui_scroll_invert, + }, + ); + + let copy_selection_shortcut = crate::transcript_copy_ui::detect_copy_selection_shortcut(); let mut app = Self { server: conversation_manager.clone(), @@ -492,12 +501,16 @@ impl App { transcript_cells: Vec::new(), transcript_scroll: TranscriptScroll::default(), transcript_selection: TranscriptSelection::default(), + transcript_multi_click: TranscriptMultiClick::default(), transcript_view_top: 0, transcript_total_lines: 0, + transcript_copy_ui: TranscriptCopyUi::new_with_shortcut(copy_selection_shortcut), overlay: None, deferred_history_lines: Vec::new(), has_emitted_history_lines: false, commit_anim_running: Arc::new(AtomicBool::new(false)), + scroll_config, + scroll_state: MouseScrollState::default(), backtrack: BacktrackState::default(), feedback: feedback.clone(), pending_update_action: None, @@ -558,13 +571,15 @@ impl App { let session_lines = if width == 0 { Vec::new() } else { - let (lines, line_meta) = Self::build_transcript_lines(&app.transcript_cells, width); + let transcript = + crate::transcript_render::build_transcript_lines(&app.transcript_cells, width); + let (lines, line_meta) = (transcript.lines, transcript.meta); let is_user_cell: Vec = app .transcript_cells .iter() .map(|cell| cell.as_any().is::()) .collect(); - Self::render_lines_to_ansi(&lines, &line_meta, &is_user_cell, width) + crate::transcript_render::render_lines_to_ansi(&lines, &line_meta, &is_user_cell, width) }; tui.terminal.clear()?; @@ -581,6 +596,10 @@ impl App { tui: &mut tui::Tui, event: TuiEvent, ) -> Result { + if matches!(&event, TuiEvent::Draw) { + self.handle_scroll_tick(tui); + } + if self.overlay.is_some() { let _ = self.handle_backtrack_overlay_event(tui, event).await?; } else { @@ -657,6 +676,7 @@ impl App { transcript_scrolled, selection_active, scroll_position, + self.copy_selection_key(), ); } } @@ -694,7 +714,9 @@ impl App { height: max_transcript_height, }; - let (lines, line_meta) = Self::build_transcript_lines(cells, transcript_area.width); + let transcript = + crate::transcript_render::build_wrapped_transcript_lines(cells, transcript_area.width); + let (lines, line_meta) = (transcript.lines, transcript.meta); if lines.is_empty() { Clear.render_ref(transcript_area, frame.buffer); self.transcript_scroll = TranscriptScroll::default(); @@ -703,40 +725,12 @@ impl App { return area.y; } - let wrapped = word_wrap_lines_borrowed(&lines, transcript_area.width.max(1) as usize); - if wrapped.is_empty() { - self.transcript_scroll = TranscriptScroll::default(); - self.transcript_view_top = 0; - self.transcript_total_lines = 0; - return area.y; - } - let is_user_cell: Vec = cells .iter() .map(|c| c.as_any().is::()) .collect(); - let base_opts: RtOptions<'_> = RtOptions::new(transcript_area.width.max(1) as usize); - let mut wrapped_is_user_row: Vec = Vec::with_capacity(wrapped.len()); - let mut first = true; - for (idx, line) in lines.iter().enumerate() { - let opts = if first { - base_opts.clone() - } else { - base_opts - .clone() - .initial_indent(base_opts.subsequent_indent.clone()) - }; - let seg_count = word_wrap_line(line, opts).len(); - let is_user_row = line_meta - .get(idx) - .and_then(TranscriptLineMeta::cell_index) - .map(|cell_index| is_user_cell.get(cell_index).copied().unwrap_or(false)) - .unwrap_or(false); - wrapped_is_user_row.extend(std::iter::repeat_n(is_user_row, seg_count)); - first = false; - } - let total_lines = wrapped.len(); + let total_lines = lines.len(); self.transcript_total_lines = total_lines; let max_visible = std::cmp::min(max_transcript_height as usize, total_lines); let max_start = total_lines.saturating_sub(max_visible); @@ -788,11 +782,12 @@ impl App { height: 1, }; - if wrapped_is_user_row + let is_user_row = line_meta .get(line_index) - .copied() - .unwrap_or(false) - { + .and_then(TranscriptLineMeta::cell_index) + .map(|cell_index| is_user_cell.get(cell_index).copied().unwrap_or(false)) + .unwrap_or(false); + if is_user_row { let base_style = crate::style::user_message_style(); for x in row_area.x..row_area.right() { let cell = &mut frame.buffer[(x, y)]; @@ -801,18 +796,35 @@ impl App { } } - wrapped[line_index].render_ref(row_area, frame.buffer); + lines[line_index].render_ref(row_area, frame.buffer); } self.apply_transcript_selection(transcript_area, frame.buffer); + if let (Some(anchor), Some(head)) = ( + self.transcript_selection.anchor, + self.transcript_selection.head, + ) && anchor != head + { + self.transcript_copy_ui.render_copy_pill( + transcript_area, + frame.buffer, + (anchor.line_index, anchor.column), + (head.line_index, head.column), + self.transcript_view_top, + self.transcript_total_lines, + ); + } else { + self.transcript_copy_ui.clear_affordance(); + } chat_top } /// Handle mouse interaction in the main transcript view. /// - /// - Mouse wheel movement scrolls the conversation history by small, fixed increments, + /// - Mouse wheel movement scrolls the conversation history using stream-based + /// normalization (events-per-line factor, discrete vs. continuous streams), /// independent of the terminal's own scrollback. - /// - Mouse clicks and drags adjust a text selection defined in terms of + /// - Mouse drags adjust a text selection defined in terms of /// flattened transcript lines and columns, so the selection is anchored /// to the underlying content rather than absolute screen rows. /// - When the user drags to extend a selection while the view is following the bottom @@ -820,6 +832,9 @@ impl App { /// first converted into an anchored position so that ongoing updates no longer move /// the viewport under the selection. A simple click without a drag does not change /// scroll behavior. + /// - Mouse events outside the transcript area (e.g. over the composer/footer) must not + /// start or mutate transcript selection state. A left-click outside the transcript + /// clears any existing transcript selection so the user can dismiss the highlight. fn handle_mouse_event( &mut self, tui: &mut tui::Tui, @@ -855,15 +870,31 @@ impl App { width, height: transcript_height, }; - let base_x = transcript_area.x.saturating_add(2); + let base_x = transcript_area.x.saturating_add(TRANSCRIPT_GUTTER_COLS); let max_x = transcript_area.right().saturating_sub(1); - let mut clamped_x = mouse_event.column; - let mut clamped_y = mouse_event.row; - - if clamped_y < transcript_area.y || clamped_y >= transcript_area.bottom() { - clamped_y = transcript_area.y; + // Treat the transcript as the only interactive region for transcript selection. + // + // This prevents clicks in the composer/footer from starting or extending a transcript + // selection, while still allowing a left-click outside the transcript to clear an + // existing highlight. + if mouse_event.row < transcript_area.y || mouse_event.row >= transcript_area.bottom() { + if matches!( + mouse_event.kind, + MouseEventKind::Down(MouseButton::Left) | MouseEventKind::Up(MouseButton::Left) + ) && (self.transcript_selection.anchor.is_some() + || self.transcript_selection.head.is_some()) + { + self.transcript_selection = TranscriptSelection::default(); + // Mouse events do not inherently trigger a redraw; schedule one so the cleared + // highlight is reflected immediately. + tui.frame_requester().schedule_frame(); + } + return; } + + let mut clamped_x = mouse_event.column; + let clamped_y = mouse_event.row; if clamped_x < base_x { clamped_x = base_x; } @@ -873,89 +904,222 @@ impl App { let streaming = self.chat_widget.is_task_running(); + if matches!(mouse_event.kind, MouseEventKind::Down(MouseButton::Left)) + && self + .transcript_copy_ui + .hit_test(mouse_event.column, mouse_event.row) + { + self.copy_transcript_selection(tui); + return; + } + match mouse_event.kind { MouseEventKind::ScrollUp => { - self.scroll_transcript( + let scroll_update = self.mouse_scroll_update(ScrollDirection::Up); + self.apply_scroll_update( tui, - -3, + scroll_update, transcript_area.height as usize, transcript_area.width, + true, ); } MouseEventKind::ScrollDown => { - self.scroll_transcript( + let scroll_update = self.mouse_scroll_update(ScrollDirection::Down); + self.apply_scroll_update( tui, - 3, + scroll_update, transcript_area.height as usize, transcript_area.width, + true, ); } + MouseEventKind::ScrollLeft | MouseEventKind::ScrollRight => {} MouseEventKind::Down(MouseButton::Left) => { - if let Some(point) = self.transcript_point_from_coordinates( + self.transcript_copy_ui.set_dragging(true); + let point = self.transcript_point_from_coordinates( transcript_area, base_x, clamped_x, clamped_y, + ); + if self.transcript_multi_click.on_mouse_down( + &mut self.transcript_selection, + &self.transcript_cells, + transcript_area.width, + point, ) { - self.transcript_selection.anchor = Some(point); - self.transcript_selection.head = Some(point); + tui.frame_requester().schedule_frame(); } } MouseEventKind::Drag(MouseButton::Left) => { - if let Some(anchor) = self.transcript_selection.anchor - && let Some(point) = self.transcript_point_from_coordinates( - transcript_area, - base_x, - clamped_x, - clamped_y, - ) - { - if streaming - && matches!(self.transcript_scroll, TranscriptScroll::ToBottom) - && point != anchor - { - self.lock_transcript_scroll_to_current_view( - transcript_area.height as usize, - transcript_area.width, - ); - } - self.transcript_selection.head = Some(point); + let point = self.transcript_point_from_coordinates( + transcript_area, + base_x, + clamped_x, + clamped_y, + ); + let outcome = crate::transcript_selection::on_mouse_drag( + &mut self.transcript_selection, + &self.transcript_scroll, + point, + streaming, + ); + self.transcript_multi_click + .on_mouse_drag(&self.transcript_selection, point); + if outcome.lock_scroll { + self.lock_transcript_scroll_to_current_view( + transcript_area.height as usize, + transcript_area.width, + ); + } + if outcome.changed { + tui.frame_requester().schedule_frame(); } } MouseEventKind::Up(MouseButton::Left) => { - if self.transcript_selection.anchor == self.transcript_selection.head { - self.transcript_selection = TranscriptSelection::default(); + self.transcript_copy_ui.set_dragging(false); + let selection_changed = + crate::transcript_selection::on_mouse_up(&mut self.transcript_selection); + let has_active_selection = self.transcript_selection.anchor.is_some() + && self.transcript_selection.head.is_some(); + if selection_changed || has_active_selection { + tui.frame_requester().schedule_frame(); } } _ => {} } } - /// Scroll the transcript by a fixed number of visual lines. + /// Convert a single mouse scroll event (direction-only) into a normalized scroll update. + /// + /// This delegates to [`MouseScrollState::on_scroll_event`] using the current [`ScrollConfig`]. + /// The returned [`ScrollUpdate`] is intentionally split into: + /// + /// - `lines`: a *delta* in visual lines to apply immediately to the transcript viewport. + /// - Sign convention matches [`ScrollDirection`] (`Up` is negative; `Down` is positive). + /// - May be 0 in trackpad-like mode while sub-line fractions are still accumulating. + /// - `next_tick_in`: an optional delay after which we should trigger a follow-up tick. + /// This is required because stream closure is defined by a *time gap* rather than an + /// explicit "gesture end" event. See [`App::apply_scroll_update`] and + /// [`App::handle_scroll_tick`]. + /// + /// In TUI2, that follow-up tick is driven via `TuiEvent::Draw`: we schedule a frame, and on + /// the next draw we call [`MouseScrollState::on_tick`] to close idle streams and flush any + /// newly-reached whole lines. This prevents perceived "stop lag" where accumulated scroll only + /// applies once the next user input arrives. + fn mouse_scroll_update(&mut self, direction: ScrollDirection) -> ScrollUpdate { + self.scroll_state + .on_scroll_event(direction, self.scroll_config) + } + + /// Apply a [`ScrollUpdate`] to the transcript viewport and schedule any needed follow-up tick. + /// + /// `update.lines` is applied immediately via [`App::scroll_transcript`]. + /// + /// If `update.next_tick_in` is `Some`, we schedule a future frame so `TuiEvent::Draw` can call + /// [`App::handle_scroll_tick`] and close the stream after it goes idle and/or cadence-flush + /// pending whole lines. + /// + /// `schedule_frame` is forwarded to [`App::scroll_transcript`] and controls whether scrolling + /// should request an additional draw. Pass `false` when applying scroll during a + /// `TuiEvent::Draw` tick to avoid redundant frames. + fn apply_scroll_update( + &mut self, + tui: &mut tui::Tui, + update: ScrollUpdate, + visible_lines: usize, + width: u16, + schedule_frame: bool, + ) { + if update.lines != 0 { + self.scroll_transcript(tui, update.lines, visible_lines, width, schedule_frame); + } + if let Some(delay) = update.next_tick_in { + tui.frame_requester().schedule_frame_in(delay); + } + } + + /// Drive stream closure and cadence-based flushing for mouse scrolling. + /// + /// This is called on every `TuiEvent::Draw` before rendering. If a scroll stream is active, it + /// may: + /// + /// - Close the stream once it has been idle for longer than the stream-gap threshold. + /// - Flush whole-line deltas on the redraw cadence for trackpad-like streams, even if no new + /// events arrive. + /// + /// The resulting update is applied with `schedule_frame = false` because we are already in a + /// draw tick. + fn handle_scroll_tick(&mut self, tui: &mut tui::Tui) { + let Some((visible_lines, width)) = self.transcript_scroll_dimensions(tui) else { + return; + }; + let update = self.scroll_state.on_tick(); + self.apply_scroll_update(tui, update, visible_lines, width, false); + } + + /// Compute the transcript viewport dimensions used for scrolling. + /// + /// Mouse scrolling is applied in terms of "visible transcript lines": the terminal height + /// minus the chat composer height. We compute this from the last known terminal size to avoid + /// querying the terminal during non-draw events. + /// + /// Returns `(visible_lines, width)` or `None` when the terminal is not yet sized or the chat + /// area consumes the full height. + fn transcript_scroll_dimensions(&self, tui: &tui::Tui) -> Option<(usize, u16)> { + let size = tui.terminal.last_known_screen_size; + let width = size.width; + let height = size.height; + if width == 0 || height == 0 { + return None; + } + + let chat_height = self.chat_widget.desired_height(width); + if chat_height >= height { + return None; + } + + let transcript_height = height.saturating_sub(chat_height); + if transcript_height == 0 { + return None; + } + + Some((transcript_height as usize, width)) + } + + /// Scroll the transcript by a number of visual lines. /// /// This is the shared implementation behind mouse wheel movement and PgUp/PgDn keys in /// the main view. Scroll state is expressed in terms of transcript cells and their /// internal line indices, so scrolling refers to logical conversation content and /// remains stable even as wrapping or streaming causes visual reflows. + /// + /// `schedule_frame` controls whether to request an extra draw; pass `false` when applying + /// scroll during a `TuiEvent::Draw` tick to avoid redundant frames. fn scroll_transcript( &mut self, tui: &mut tui::Tui, delta_lines: i32, visible_lines: usize, width: u16, + schedule_frame: bool, ) { if visible_lines == 0 { return; } - let (_, line_meta) = Self::build_transcript_lines(&self.transcript_cells, width); + let transcript = + crate::transcript_render::build_wrapped_transcript_lines(&self.transcript_cells, width); + let line_meta = transcript.meta; self.transcript_scroll = self.transcript_scroll .scrolled_by(delta_lines, &line_meta, visible_lines); - // Delay redraws slightly so scroll bursts coalesce into a single frame. - tui.frame_requester() - .schedule_frame_in(Duration::from_millis(16)); + if schedule_frame { + // Request a redraw; the frame scheduler coalesces bursts and clamps to 60fps. + tui.frame_requester().schedule_frame(); + } } /// Convert a `ToBottom` (auto-follow) scroll state into a fixed anchor at the current view. @@ -970,7 +1134,9 @@ impl App { return; } - let (lines, line_meta) = Self::build_transcript_lines(&self.transcript_cells, width); + let transcript = + crate::transcript_render::build_wrapped_transcript_lines(&self.transcript_cells, width); + let (lines, line_meta) = (transcript.lines, transcript.meta); if lines.is_empty() || line_meta.is_empty() { return; } @@ -995,111 +1161,6 @@ impl App { } } - /// Build the flattened transcript lines for rendering, scrolling, and exit transcripts. - /// - /// Returns both the visible `Line` buffer and a parallel metadata vector - /// that maps each line back to its originating `(cell_index, line_in_cell)` - /// pair (see `TranscriptLineMeta::CellLine`), or `TranscriptLineMeta::Spacer` for - /// synthetic spacer rows inserted between cells. This allows the scroll state - /// to anchor to a specific history cell even as new content arrives or the - /// viewport size changes, and gives exit transcript renderers enough structure - /// to style user rows differently from agent rows. - fn build_transcript_lines( - cells: &[Arc], - width: u16, - ) -> (Vec>, Vec) { - let mut lines: Vec> = Vec::new(); - let mut line_meta: Vec = Vec::new(); - let mut has_emitted_lines = false; - - for (cell_index, cell) in cells.iter().enumerate() { - let cell_lines = cell.display_lines(width); - if cell_lines.is_empty() { - continue; - } - - if !cell.is_stream_continuation() { - if has_emitted_lines { - lines.push(Line::from("")); - line_meta.push(TranscriptLineMeta::Spacer); - } else { - has_emitted_lines = true; - } - } - - for (line_in_cell, line) in cell_lines.into_iter().enumerate() { - line_meta.push(TranscriptLineMeta::CellLine { - cell_index, - line_in_cell, - }); - lines.push(line); - } - } - - (lines, line_meta) - } - - /// Render flattened transcript lines into ANSI strings suitable for - /// printing after the TUI exits. - /// - /// This helper mirrors the original TUI viewport behavior: - /// - Merges line-level style into each span so the ANSI output matches - /// the on-screen styling (e.g., blockquotes, lists). - /// - For user-authored rows, pads the background style out to the full - /// terminal width so prompts appear as solid blocks in scrollback. - /// - Streams spans through the shared vt100 writer so downstream tests - /// and tools see consistent escape sequences. - fn render_lines_to_ansi( - lines: &[Line<'static>], - line_meta: &[TranscriptLineMeta], - is_user_cell: &[bool], - width: u16, - ) -> Vec { - lines - .iter() - .enumerate() - .map(|(idx, line)| { - let is_user_row = line_meta - .get(idx) - .and_then(TranscriptLineMeta::cell_index) - .map(|cell_index| is_user_cell.get(cell_index).copied().unwrap_or(false)) - .unwrap_or(false); - - let mut merged_spans: Vec> = line - .spans - .iter() - .map(|span| ratatui::text::Span { - style: span.style.patch(line.style), - content: span.content.clone(), - }) - .collect(); - - if is_user_row && width > 0 { - let text: String = merged_spans - .iter() - .map(|span| span.content.as_ref()) - .collect(); - let text_width = UnicodeWidthStr::width(text.as_str()); - let total_width = usize::from(width); - if text_width < total_width { - let pad_len = total_width.saturating_sub(text_width); - if pad_len > 0 { - let pad_style = crate::style::user_message_style(); - merged_spans.push(ratatui::text::Span { - style: pad_style, - content: " ".repeat(pad_len).into(), - }); - } - } - } - - let mut buf: Vec = Vec::new(); - let _ = crate::insert_history::write_spans(&mut buf, merged_spans.iter()); - String::from_utf8(buf).unwrap_or_default() - }) - .collect() - } - /// Apply the current transcript selection to the given buffer. /// /// The selection is defined in terms of flattened wrapped transcript line @@ -1120,16 +1181,10 @@ impl App { return; } - let base_x = area.x.saturating_add(2); + let base_x = area.x.saturating_add(TRANSCRIPT_GUTTER_COLS); let max_x = area.right().saturating_sub(1); - let mut start = anchor; - let mut end = head; - if (end.line_index < start.line_index) - || (end.line_index == start.line_index && end.column < start.column) - { - std::mem::swap(&mut start, &mut end); - } + let (start, end) = crate::transcript_selection::ordered_endpoints(anchor, head); let visible_start = self.transcript_view_top; let visible_end = self @@ -1204,15 +1259,13 @@ impl App { /// indices and columns, and this method reconstructs the same wrapped /// transcript used for on-screen rendering so the copied text closely /// matches the highlighted region. + /// + /// Important: copy operates on the selection's full content-relative range, + /// not just the current viewport. A selection can extend outside the visible + /// region (for example, by scrolling after selecting, or by selecting while + /// autoscrolling), and we still want the clipboard payload to reflect the + /// entire selected transcript. fn copy_transcript_selection(&mut self, tui: &tui::Tui) { - let (anchor, head) = match ( - self.transcript_selection.anchor, - self.transcript_selection.head, - ) { - (Some(a), Some(h)) if a != h => (a, h), - _ => return, - }; - let size = tui.terminal.last_known_screen_size; let width = size.width; let height = size.height; @@ -1230,137 +1283,22 @@ impl App { return; } - let transcript_area = Rect { - x: 0, - y: 0, + let Some(text) = crate::transcript_copy::selection_to_copy_text_for_cells( + &self.transcript_cells, + self.transcript_selection, width, - height: transcript_height, - }; - - let cells = self.transcript_cells.clone(); - let (lines, _) = Self::build_transcript_lines(&cells, transcript_area.width); - if lines.is_empty() { + ) else { return; - } - - let wrapped = crate::wrapping::word_wrap_lines_borrowed( - &lines, - transcript_area.width.max(1) as usize, - ); - let total_lines = wrapped.len(); - if total_lines == 0 { - return; - } - - let max_visible = transcript_area.height as usize; - let visible_start = self - .transcript_view_top - .min(total_lines.saturating_sub(max_visible)); - let visible_end = std::cmp::min(visible_start + max_visible, total_lines); - - let mut buf = Buffer::empty(transcript_area); - Clear.render_ref(transcript_area, &mut buf); - - for (row_index, line_index) in (visible_start..visible_end).enumerate() { - let row_area = Rect { - x: transcript_area.x, - y: transcript_area.y + row_index as u16, - width: transcript_area.width, - height: 1, - }; - wrapped[line_index].render_ref(row_area, &mut buf); - } - - let base_x = transcript_area.x.saturating_add(2); - let max_x = transcript_area.right().saturating_sub(1); - - let mut start = anchor; - let mut end = head; - if (end.line_index < start.line_index) - || (end.line_index == start.line_index && end.column < start.column) - { - std::mem::swap(&mut start, &mut end); - } - - let mut lines_out: Vec = Vec::new(); - - for (row_index, line_index) in (visible_start..visible_end).enumerate() { - if line_index < start.line_index || line_index > end.line_index { - continue; - } - - let y = transcript_area.y + row_index as u16; - - let line_start_col = if line_index == start.line_index { - start.column - } else { - 0 - }; - let line_end_col = if line_index == end.line_index { - end.column - } else { - max_x.saturating_sub(base_x) - }; - - let row_sel_start = base_x.saturating_add(line_start_col); - let row_sel_end = base_x.saturating_add(line_end_col).min(max_x); - - if row_sel_start > row_sel_end { - continue; - } - - let mut first_text_x = None; - let mut last_text_x = None; - for x in base_x..=max_x { - let cell = &buf[(x, y)]; - if cell.symbol() != " " { - if first_text_x.is_none() { - first_text_x = Some(x); - } - last_text_x = Some(x); - } - } - - let (text_start, text_end) = match (first_text_x, last_text_x) { - // Treat indentation spaces as part of the copyable region by - // starting from the first content column to the right of the - // transcript gutter, but still clamp to the last non-space - // glyph so trailing padding is not included. - (Some(_), Some(e)) => (base_x, e), - _ => { - lines_out.push(String::new()); - continue; - } - }; - - let from_x = row_sel_start.max(text_start); - let to_x = row_sel_end.min(text_end); - if from_x > to_x { - continue; - } - - let mut line_text = String::new(); - for x in from_x..=to_x { - let cell = &buf[(x, y)]; - let symbol = cell.symbol(); - if !symbol.is_empty() { - line_text.push_str(symbol); - } - } - - lines_out.push(line_text); - } - - if lines_out.is_empty() { - return; - } - - let text = lines_out.join("\n"); + }; if let Err(err) = clipboard_copy::copy_text(text) { tracing::error!(error = %err, "failed to copy selection to clipboard"); } } + fn copy_selection_key(&self) -> crate::key_hint::KeyBinding { + self.transcript_copy_ui.key_binding() + } + /// Map a mouse position in the transcript area to a content-relative /// selection point, if there is transcript content to select. fn transcript_point_from_coordinates( @@ -1470,6 +1408,7 @@ impl App { }; self.chat_widget = ChatWidget::new_from_existing( init, + self.server.clone(), resumed.conversation, resumed.session_configured, ); @@ -1568,6 +1507,7 @@ impl App { return Ok(false); } AppEvent::CodexOp(op) => self.chat_widget.submit_op(op), + AppEvent::QueueUserText(text) => self.chat_widget.queue_user_text(text), AppEvent::DiffResult(text) => { // Clear the in-progress state in the bottom pane self.chat_widget.on_diff_complete(); @@ -1584,6 +1524,15 @@ impl App { )); tui.frame_requester().schedule_frame(); } + AppEvent::LspStatusLoaded(status) => { + self.chat_widget + .add_plain_history_lines(crate::lsp_status::render(&status)); + tui.frame_requester().schedule_frame(); + } + AppEvent::LspStatusLoadFailed(message) => { + self.chat_widget.add_error_message(message); + tui.frame_requester().schedule_frame(); + } AppEvent::StartFileSearch(query) => { if !query.is_empty() { self.file_search.on_user_query(query); @@ -1615,6 +1564,14 @@ impl App { self.config.plan_model_reasoning_effort = effort; self.chat_widget.set_plan_reasoning_effort(effort); } + AppEvent::UpdateSubagentModel(model) => { + self.config.subagent_model = Some(model.clone()); + self.chat_widget.set_subagent_model(&model); + } + AppEvent::UpdateSubagentReasoningEffort(effort) => { + self.config.subagent_model_reasoning_effort = effort; + self.chat_widget.set_subagent_reasoning_effort(effort); + } AppEvent::OpenReasoningPopup { model, target } => { self.chat_widget.open_reasoning_popup(target, model); } @@ -1682,8 +1639,14 @@ impl App { sandbox_policy: Some(preset.sandbox.clone()), model: None, plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: None, plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, }, )); @@ -1789,6 +1752,45 @@ impl App { } } } + AppEvent::PersistSubagentModelSelection { model, effort } => { + let profile = self.active_profile.as_deref(); + match ConfigEditsBuilder::new(&self.config.codex_home) + .with_profile(profile) + .set_subagent_model(Some(model.as_str()), effort) + .apply() + .await + { + Ok(()) => { + let mut message = format!("Subagent model changed to {model}"); + if let Some(label) = Self::reasoning_label_for(&model, effort) { + message.push(' '); + message.push_str(label); + } + message.push_str(" (used for spawned subagents)"); + if let Some(profile) = profile { + message.push_str(" for "); + message.push_str(profile); + message.push_str(" profile"); + } + self.chat_widget.add_info_message(message, None); + } + Err(err) => { + tracing::error!( + error = %err, + "failed to persist subagent model selection" + ); + if let Some(profile) = profile { + self.chat_widget.add_error_message(format!( + "Failed to save subagent model for profile `{profile}`: {err}" + )); + } else { + self.chat_widget.add_error_message(format!( + "Failed to save default subagent model: {err}" + )); + } + } + } + } AppEvent::UpdateAskForApprovalPolicy(policy) => { self.chat_widget.set_approval_policy(policy); } @@ -1848,6 +1850,41 @@ impl App { } } } + AppEvent::UpdateFeatureFlags { updates } => { + if updates.is_empty() { + return Ok(true); + } + let mut builder = ConfigEditsBuilder::new(&self.config.codex_home) + .with_profile(self.active_profile.as_deref()); + for (feature, enabled) in &updates { + let feature_key = feature.key(); + if *enabled { + // Update the in-memory configs. + self.config.features.enable(*feature); + self.chat_widget.set_feature_enabled(*feature, true); + builder = builder.set_feature_enabled(feature_key, true); + } else { + // Update the in-memory configs. + self.config.features.disable(*feature); + self.chat_widget.set_feature_enabled(*feature, false); + if feature.default_enabled() { + builder = builder.set_feature_enabled(feature_key, false); + } else { + // If the feature already defaults to `false`, drop the key + // so the user doesn't miss it once it becomes globally released. + builder = builder.with_edits(vec![ConfigEdit::ClearPath { + segments: vec!["features".to_string(), feature_key.to_string()], + }]); + } + } + } + if let Err(err) = builder.apply().await { + tracing::error!(error = %err, "failed to persist feature flags"); + self.chat_widget.add_error_message(format!( + "Failed to update experimental features: {err}" + )); + } + } AppEvent::SkipNextWorldWritableScan => { self.skip_world_writable_scan_once = true; } @@ -2034,11 +2071,11 @@ impl App { } } KeyEvent { - code: KeyCode::Char('y'), - modifiers: crossterm::event::KeyModifiers::CONTROL, + code: KeyCode::Char(ch), + modifiers, kind: KeyEventKind::Press | KeyEventKind::Repeat, .. - } => { + } if self.transcript_copy_ui.is_copy_key(ch, modifiers) => { self.copy_transcript_selection(tui); } KeyEvent { @@ -2060,6 +2097,7 @@ impl App { delta, usize::from(transcript_height), width, + true, ); } } @@ -2084,6 +2122,7 @@ impl App { delta, usize::from(transcript_height), width, + true, ); } } @@ -2180,6 +2219,7 @@ mod tests { use crate::history_cell::HistoryCell; use crate::history_cell::UserHistoryCell; use crate::history_cell::new_session_info; + use crate::transcript_copy_ui::CopySelectionShortcut; use codex_core::AuthManager; use codex_core::CodexAuth; use codex_core::ConversationManager; @@ -2219,13 +2259,19 @@ mod tests { transcript_cells: Vec::new(), transcript_scroll: TranscriptScroll::default(), transcript_selection: TranscriptSelection::default(), + transcript_multi_click: TranscriptMultiClick::default(), transcript_view_top: 0, transcript_total_lines: 0, + transcript_copy_ui: TranscriptCopyUi::new_with_shortcut( + CopySelectionShortcut::CtrlShiftC, + ), overlay: None, deferred_history_lines: Vec::new(), has_emitted_history_lines: false, enhanced_keys_supported: false, commit_anim_running: Arc::new(AtomicBool::new(false)), + scroll_config: ScrollConfig::default(), + scroll_state: MouseScrollState::default(), backtrack: BacktrackState::default(), feedback: codex_feedback::CodexFeedback::new(), pending_update_action: None, @@ -2263,13 +2309,19 @@ mod tests { transcript_cells: Vec::new(), transcript_scroll: TranscriptScroll::default(), transcript_selection: TranscriptSelection::default(), + transcript_multi_click: TranscriptMultiClick::default(), transcript_view_top: 0, transcript_total_lines: 0, + transcript_copy_ui: TranscriptCopyUi::new_with_shortcut( + CopySelectionShortcut::CtrlShiftC, + ), overlay: None, deferred_history_lines: Vec::new(), has_emitted_history_lines: false, enhanced_keys_supported: false, commit_anim_running: Arc::new(AtomicBool::new(false)), + scroll_config: ScrollConfig::default(), + scroll_state: MouseScrollState::default(), backtrack: BacktrackState::default(), feedback: codex_feedback::CodexFeedback::new(), pending_update_action: None, @@ -2320,6 +2372,38 @@ mod tests { )); } + #[tokio::test] + async fn transcript_selection_copy_includes_offscreen_lines() { + let mut app = make_test_app().await; + app.transcript_cells = vec![Arc::new(AgentMessageCell::new( + vec![ + Line::from("one"), + Line::from("two"), + Line::from("three"), + Line::from("four"), + ], + true, + ))]; + + app.transcript_view_top = 2; + app.transcript_selection.anchor = Some(TranscriptSelectionPoint { + line_index: 0, + column: 0, + }); + app.transcript_selection.head = Some(TranscriptSelectionPoint { + line_index: 3, + column: u16::MAX, + }); + + let text = crate::transcript_copy::selection_to_copy_text_for_cells( + &app.transcript_cells, + app.transcript_selection, + 40, + ) + .expect("expected text"); + assert_eq!(text, "one\ntwo\nthree\nfour"); + } + #[tokio::test] async fn model_migration_prompt_respects_hide_flag_and_self_target() { let mut seen = BTreeMap::new(); @@ -2486,6 +2570,184 @@ mod tests { } } + #[tokio::test] + async fn transcript_selection_renders_copy_affordance() { + use ratatui::buffer::Buffer; + use ratatui::layout::Rect; + + let mut app = make_test_app().await; + app.transcript_total_lines = 3; + app.transcript_view_top = 0; + + let area = Rect { + x: 0, + y: 0, + width: 60, + height: 3, + }; + + app.transcript_selection = TranscriptSelection { + anchor: Some(TranscriptSelectionPoint { + line_index: 1, + column: 2, + }), + head: Some(TranscriptSelectionPoint { + line_index: 1, + column: 6, + }), + }; + + let mut buf = Buffer::empty(area); + for y in 0..area.height { + for x in 2..area.width.saturating_sub(1) { + buf[(x, y)].set_symbol("X"); + } + } + + app.apply_transcript_selection(area, &mut buf); + let anchor = app.transcript_selection.anchor.expect("anchor"); + let head = app.transcript_selection.head.expect("head"); + app.transcript_copy_ui.render_copy_pill( + area, + &mut buf, + (anchor.line_index, anchor.column), + (head.line_index, head.column), + app.transcript_view_top, + app.transcript_total_lines, + ); + + let mut s = String::new(); + for y in area.y..area.bottom() { + for x in area.x..area.right() { + s.push(buf[(x, y)].symbol().chars().next().unwrap_or(' ')); + } + s.push('\n'); + } + + assert!(s.contains("copy")); + assert!(s.contains("ctrl + shift + c")); + assert!(app.transcript_copy_ui.hit_test(10, 2)); + } + + #[tokio::test] + async fn transcript_selection_renders_ctrl_y_copy_affordance_in_vscode_mode() { + use ratatui::buffer::Buffer; + use ratatui::layout::Rect; + + let mut app = make_test_app().await; + app.transcript_copy_ui = TranscriptCopyUi::new_with_shortcut(CopySelectionShortcut::CtrlY); + app.transcript_total_lines = 3; + app.transcript_view_top = 0; + + let area = Rect { + x: 0, + y: 0, + width: 60, + height: 3, + }; + + app.transcript_selection = TranscriptSelection { + anchor: Some(TranscriptSelectionPoint { + line_index: 1, + column: 2, + }), + head: Some(TranscriptSelectionPoint { + line_index: 1, + column: 6, + }), + }; + + let mut buf = Buffer::empty(area); + for y in 0..area.height { + for x in 2..area.width.saturating_sub(1) { + buf[(x, y)].set_symbol("X"); + } + } + + app.apply_transcript_selection(area, &mut buf); + let anchor = app.transcript_selection.anchor.expect("anchor"); + let head = app.transcript_selection.head.expect("head"); + app.transcript_copy_ui.render_copy_pill( + area, + &mut buf, + (anchor.line_index, anchor.column), + (head.line_index, head.column), + app.transcript_view_top, + app.transcript_total_lines, + ); + + let mut s = String::new(); + for y in area.y..area.bottom() { + for x in area.x..area.right() { + s.push(buf[(x, y)].symbol().chars().next().unwrap_or(' ')); + } + s.push('\n'); + } + + assert!(s.contains("copy")); + assert!(s.contains("ctrl + y")); + assert!(!s.contains("ctrl + shift + c")); + assert!(app.transcript_copy_ui.hit_test(10, 2)); + } + + #[tokio::test] + async fn transcript_selection_hides_copy_affordance_while_dragging() { + use ratatui::buffer::Buffer; + use ratatui::layout::Rect; + + let mut app = make_test_app().await; + app.transcript_total_lines = 3; + app.transcript_view_top = 0; + app.transcript_copy_ui.set_dragging(true); + + let area = Rect { + x: 0, + y: 0, + width: 60, + height: 3, + }; + + app.transcript_selection = TranscriptSelection { + anchor: Some(TranscriptSelectionPoint { + line_index: 1, + column: 2, + }), + head: Some(TranscriptSelectionPoint { + line_index: 1, + column: 6, + }), + }; + + let mut buf = Buffer::empty(area); + for y in 0..area.height { + for x in 2..area.width.saturating_sub(1) { + buf[(x, y)].set_symbol("X"); + } + } + + let anchor = app.transcript_selection.anchor.expect("anchor"); + let head = app.transcript_selection.head.expect("head"); + app.transcript_copy_ui.render_copy_pill( + area, + &mut buf, + (anchor.line_index, anchor.column), + (head.line_index, head.column), + app.transcript_view_top, + app.transcript_total_lines, + ); + + let mut s = String::new(); + for y in area.y..area.bottom() { + for x in area.x..area.right() { + s.push(buf[(x, y)].symbol().chars().next().unwrap_or(' ')); + } + s.push('\n'); + } + + assert!(!s.contains("copy")); + assert!(!app.transcript_copy_ui.hit_test(10, 2)); + } + #[tokio::test] async fn new_session_requests_shutdown_for_previous_conversation() { let (mut app, mut app_event_rx, mut op_rx) = make_test_app_with_channels().await; @@ -2538,7 +2800,12 @@ mod tests { let is_user_cell = vec![true]; let width: u16 = 10; - let rendered = App::render_lines_to_ansi(&lines, &line_meta, &is_user_cell, width); + let rendered = crate::transcript_render::render_lines_to_ansi( + &lines, + &line_meta, + &is_user_cell, + width, + ); assert_eq!(rendered.len(), 1); assert!(rendered[0].contains("hi")); } diff --git a/codex-rs/tui2/src/app_backtrack.rs b/codex-rs/tui2/src/app_backtrack.rs index 671702d3082..e8c0231b461 100644 --- a/codex-rs/tui2/src/app_backtrack.rs +++ b/codex-rs/tui2/src/app_backtrack.rs @@ -352,8 +352,12 @@ impl App { feedback: self.feedback.clone(), is_first_run: false, }; - self.chat_widget = - crate::chatwidget::ChatWidget::new_from_existing(init, conv, session_configured); + self.chat_widget = crate::chatwidget::ChatWidget::new_from_existing( + init, + self.server.clone(), + conv, + session_configured, + ); self.current_model = model_family.get_model_slug().to_string(); // Trim transcript up to the selected user message and re-render it. self.trim_transcript_for_backtrack(nth_user_message); diff --git a/codex-rs/tui2/src/app_event.rs b/codex-rs/tui2/src/app_event.rs index 374b1e2c37d..c06da480a12 100644 --- a/codex-rs/tui2/src/app_event.rs +++ b/codex-rs/tui2/src/app_event.rs @@ -5,11 +5,13 @@ use codex_core::protocol::ConversationPathResponseEvent; use codex_core::protocol::Event; use codex_core::protocol::RateLimitSnapshot; use codex_file_search::FileMatch; +use codex_lsp::LspStatus; use codex_protocol::openai_models::ModelPreset; use crate::bottom_pane::ApprovalRequest; use crate::history_cell::HistoryCell; +use codex_core::features::Feature; use codex_core::protocol::AskForApproval; use codex_core::protocol::SandboxPolicy; use codex_protocol::openai_models::ReasoningEffort; @@ -18,6 +20,7 @@ use codex_protocol::openai_models::ReasoningEffort; pub(crate) enum ModelPickerTarget { Chat, Plan, + Subagent, } #[allow(clippy::large_enum_variant)] @@ -38,6 +41,10 @@ pub(crate) enum AppEvent { /// bubbling channels through layers of widgets. CodexOp(codex_core::protocol::Op), + /// Queue a synthetic user message (e.g. resume helpers) through the normal + /// ChatWidget path so it is persisted to cross-session message history. + QueueUserText(String), + /// Kick off an asynchronous file search for the given query (text after /// the `@`). Previous searches may be cancelled by the app layer so there /// is at most one in-flight search. @@ -57,6 +64,12 @@ pub(crate) enum AppEvent { /// Result of computing a `/diff` command. DiffResult(String), + /// Result of fetching current LSP server status for a workspace. + LspStatusLoaded(LspStatus), + + /// Fetching LSP status failed. + LspStatusLoadFailed(String), + InsertHistoryCell(Box), StartCommitAnimation, @@ -75,6 +88,17 @@ pub(crate) enum AppEvent { /// Update the current plan reasoning effort in the running app and widget. UpdatePlanReasoningEffort(Option), + /// Update the current subagent model slug in the running app and widget. + UpdateSubagentModel(String), + + /// Update the current subagent reasoning effort in the running app and widget. + UpdateSubagentReasoningEffort(Option), + + /// Persist updated feature flags (e.g. via `/experimental`). + UpdateFeatureFlags { + updates: Vec<(Feature, bool)>, + }, + /// Persist the selected model and reasoning effort to the appropriate config. PersistModelSelection { model: String, @@ -87,6 +111,12 @@ pub(crate) enum AppEvent { effort: Option, }, + /// Persist the selected subagent model and reasoning effort to the appropriate config. + PersistSubagentModelSelection { + model: String, + effort: Option, + }, + /// Open the reasoning selection popup after picking a model. OpenReasoningPopup { model: ModelPreset, diff --git a/codex-rs/tui2/src/bottom_pane/ask_user_question_overlay.rs b/codex-rs/tui2/src/bottom_pane/ask_user_question_overlay.rs index 59c69890ee8..d411982a47e 100644 --- a/codex-rs/tui2/src/bottom_pane/ask_user_question_overlay.rs +++ b/codex-rs/tui2/src/bottom_pane/ask_user_question_overlay.rs @@ -392,7 +392,9 @@ impl AskUserQuestionOverlay { self.mode = Mode::Review; self.error = None; self.state.reset(); - self.state.selected_idx = Some(0); + self.state.selected_idx = Some(self.questions.len()); + self.state + .ensure_visible(self.rows_len(), self.max_visible_rows()); self.return_to_review = true; } @@ -1000,3 +1002,123 @@ impl crate::render::renderable::Renderable for AskUserQuestionOverlay { self.footer_hint().dim().render(hint_area, buf); } } + +#[cfg(test)] +mod tests { + use super::*; + + use pretty_assertions::assert_eq; + use tokio::sync::mpsc::unbounded_channel; + + fn option(label: &str) -> codex_core::protocol::AskUserQuestionOption { + codex_core::protocol::AskUserQuestionOption { + label: label.to_string(), + description: "".to_string(), + } + } + + fn question(header: &str, multi_select: bool, options: &[&str]) -> AskUserQuestion { + AskUserQuestion { + question: format!("Question {header}?"), + header: header.to_string(), + options: options.iter().copied().map(option).collect(), + multi_select, + } + } + + fn make_overlay( + questions: Vec, + ) -> ( + AskUserQuestionOverlay, + tokio::sync::mpsc::UnboundedReceiver, + ) { + let (tx, rx) = unbounded_channel::(); + let app_event_tx = AppEventSender::new(tx); + let ev = AskUserQuestionRequestEvent { + call_id: "call-1".to_string(), + questions, + }; + ( + AskUserQuestionOverlay::new("ask-1".to_string(), ev, app_event_tx), + rx, + ) + } + + #[test] + fn review_defaults_to_submit() { + let (mut overlay, mut rx) = make_overlay(vec![question("Q1", false, &["A", "B"])]); + + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('1'), KeyModifiers::NONE)); + assert_eq!(overlay.mode, Mode::Review); + assert_eq!(overlay.state.selected_idx, Some(overlay.questions.len())); + assert!(rx.try_recv().is_err()); + } + + #[test] + fn enter_in_review_submits() { + let (mut overlay, mut rx) = make_overlay(vec![question("Q1", false, &["A", "B"])]); + + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('1'), KeyModifiers::NONE)); + assert_eq!(overlay.mode, Mode::Review); + assert_eq!(overlay.state.selected_idx, Some(overlay.questions.len())); + + overlay.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + let AppEvent::CodexOp(op) = rx.try_recv().expect("submit op") else { + panic!("expected CodexOp"); + }; + match op { + Op::ResolveAskUserQuestion { id, response } => { + assert_eq!(id, "ask-1"); + assert_eq!( + response, + AskUserQuestionResponse::Answered { + answers: HashMap::from([("Q1".to_string(), "A".to_string())]) + } + ); + } + other => panic!("unexpected op: {other:?}"), + } + } + + #[test] + fn editing_from_review_still_possible() { + let (mut overlay, mut rx) = make_overlay(vec![ + question("Q1", false, &["A", "B"]), + question("Q2", false, &["C", "D"]), + ]); + + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('1'), KeyModifiers::NONE)); + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('2'), KeyModifiers::NONE)); + assert_eq!(overlay.mode, Mode::Review); + assert_eq!(overlay.state.selected_idx, Some(overlay.questions.len())); + assert!(rx.try_recv().is_err()); + + // Edit Q1 from review. + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('1'), KeyModifiers::NONE)); + assert_eq!(overlay.current_idx, 0); + + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('2'), KeyModifiers::NONE)); + assert_eq!(overlay.mode, Mode::Review); + assert_eq!(overlay.state.selected_idx, Some(overlay.questions.len())); + + // Submit. + overlay.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + let AppEvent::CodexOp(op) = rx.try_recv().expect("submit op") else { + panic!("expected CodexOp"); + }; + match op { + Op::ResolveAskUserQuestion { response, .. } => { + assert_eq!( + response, + AskUserQuestionResponse::Answered { + answers: HashMap::from([ + ("Q1".to_string(), "B".to_string()), + ("Q2".to_string(), "D".to_string()) + ]) + } + ); + } + other => panic!("unexpected op: {other:?}"), + } + } +} diff --git a/codex-rs/tui2/src/bottom_pane/chat_composer.rs b/codex-rs/tui2/src/bottom_pane/chat_composer.rs index 7ca10c6d245..3d5de81a9fb 100644 --- a/codex-rs/tui2/src/bottom_pane/chat_composer.rs +++ b/codex-rs/tui2/src/bottom_pane/chat_composer.rs @@ -1,3 +1,5 @@ +use crate::key_hint; +use crate::key_hint::KeyBinding; use crate::key_hint::has_ctrl_or_alt; use crossterm::event::KeyCode; use crossterm::event::KeyEvent; @@ -121,6 +123,7 @@ pub(crate) struct ChatComposer { transcript_scrolled: bool, transcript_selection_active: bool, transcript_scroll_position: Option<(usize, usize)>, + transcript_copy_selection_key: KeyBinding, skills: Option>, dismissed_skill_popup_token: Option, } @@ -172,6 +175,7 @@ impl ChatComposer { transcript_scrolled: false, transcript_selection_active: false, transcript_scroll_position: None, + transcript_copy_selection_key: key_hint::ctrl_shift(KeyCode::Char('c')), skills: None, dismissed_skill_popup_token: None, }; @@ -1540,6 +1544,7 @@ impl ChatComposer { transcript_scrolled: self.transcript_scrolled, transcript_selection_active: self.transcript_selection_active, transcript_scroll_position: self.transcript_scroll_position, + transcript_copy_selection_key: self.transcript_copy_selection_key, } } @@ -1571,10 +1576,12 @@ impl ChatComposer { scrolled: bool, selection_active: bool, scroll_position: Option<(usize, usize)>, + copy_selection_key: KeyBinding, ) { self.transcript_scrolled = scrolled; self.transcript_selection_active = selection_active; self.transcript_scroll_position = scroll_position; + self.transcript_copy_selection_key = copy_selection_key; } fn sync_popups(&mut self) { diff --git a/codex-rs/tui2/src/bottom_pane/experimental_features_view.rs b/codex-rs/tui2/src/bottom_pane/experimental_features_view.rs new file mode 100644 index 00000000000..7a082f550f0 --- /dev/null +++ b/codex-rs/tui2/src/bottom_pane/experimental_features_view.rs @@ -0,0 +1,294 @@ +use crossterm::event::KeyCode; +use crossterm::event::KeyEvent; +use crossterm::event::KeyModifiers; +use ratatui::buffer::Buffer; +use ratatui::layout::Constraint; +use ratatui::layout::Layout; +use ratatui::layout::Rect; +use ratatui::style::Stylize; +use ratatui::text::Line; +use ratatui::widgets::Block; +use ratatui::widgets::Widget; + +use crate::app_event::AppEvent; +use crate::app_event_sender::AppEventSender; +use crate::key_hint; +use crate::render::Insets; +use crate::render::RectExt as _; +use crate::render::renderable::ColumnRenderable; +use crate::render::renderable::Renderable; +use crate::style::user_message_style; + +use codex_core::features::Feature; + +use super::CancellationEvent; +use super::bottom_pane_view::BottomPaneView; +use super::popup_consts::MAX_POPUP_ROWS; +use super::scroll_state::ScrollState; +use super::selection_popup_common::GenericDisplayRow; +use super::selection_popup_common::measure_rows_height; +use super::selection_popup_common::render_rows; + +pub(crate) struct BetaFeatureItem { + pub feature: Feature, + pub name: String, + pub description: String, + pub enabled: bool, +} + +pub(crate) struct ExperimentalFeaturesView { + features: Vec, + state: ScrollState, + complete: bool, + app_event_tx: AppEventSender, + header: Box, + footer_hint: Line<'static>, +} + +impl ExperimentalFeaturesView { + pub(crate) fn new(features: Vec, app_event_tx: AppEventSender) -> Self { + let mut header = ColumnRenderable::new(); + header.push(Line::from("Experimental features".bold())); + header.push(Line::from( + "Toggle beta features. Changes are saved to config.toml.".dim(), + )); + + let mut view = Self { + features, + state: ScrollState::new(), + complete: false, + app_event_tx, + header: Box::new(header), + footer_hint: experimental_popup_hint_line(), + }; + view.initialize_selection(); + view + } + + fn initialize_selection(&mut self) { + if self.visible_len() == 0 { + self.state.selected_idx = None; + } else if self.state.selected_idx.is_none() { + self.state.selected_idx = Some(0); + } + } + + fn visible_len(&self) -> usize { + self.features.len() + } + + fn build_rows(&self) -> Vec { + let mut rows = Vec::with_capacity(self.features.len()); + let selected_idx = self.state.selected_idx; + for (idx, item) in self.features.iter().enumerate() { + let prefix = if selected_idx == Some(idx) { + '›' + } else { + ' ' + }; + let marker = if item.enabled { 'x' } else { ' ' }; + let name = format!("{prefix} [{marker}] {}", item.name); + rows.push(GenericDisplayRow { + name, + description: Some(item.description.clone()), + display_shortcut: None, + match_indices: None, + wrap_indent: None, + }); + } + + rows + } + + fn move_up(&mut self) { + let len = self.visible_len(); + if len == 0 { + return; + } + self.state.move_up_wrap(len); + self.state.ensure_visible(len, MAX_POPUP_ROWS.min(len)); + } + + fn move_down(&mut self) { + let len = self.visible_len(); + if len == 0 { + return; + } + self.state.move_down_wrap(len); + self.state.ensure_visible(len, MAX_POPUP_ROWS.min(len)); + } + + fn toggle_selected(&mut self) { + let Some(selected_idx) = self.state.selected_idx else { + return; + }; + + if let Some(item) = self.features.get_mut(selected_idx) { + item.enabled = !item.enabled; + } + } + + fn rows_width(total_width: u16) -> u16 { + total_width.saturating_sub(2) + } +} + +impl BottomPaneView for ExperimentalFeaturesView { + fn handle_key_event(&mut self, key_event: KeyEvent) { + match key_event { + KeyEvent { + code: KeyCode::Up, .. + } + | KeyEvent { + code: KeyCode::Char('p'), + modifiers: KeyModifiers::CONTROL, + .. + } + | KeyEvent { + code: KeyCode::Char('\u{0010}'), + modifiers: KeyModifiers::NONE, + .. + } => self.move_up(), + KeyEvent { + code: KeyCode::Char('k'), + modifiers: KeyModifiers::NONE, + .. + } => self.move_up(), + KeyEvent { + code: KeyCode::Down, + .. + } + | KeyEvent { + code: KeyCode::Char('n'), + modifiers: KeyModifiers::CONTROL, + .. + } + | KeyEvent { + code: KeyCode::Char('\u{000e}'), + modifiers: KeyModifiers::NONE, + .. + } => self.move_down(), + KeyEvent { + code: KeyCode::Char('j'), + modifiers: KeyModifiers::NONE, + .. + } => self.move_down(), + KeyEvent { + code: KeyCode::Enter, + modifiers: KeyModifiers::NONE, + .. + } => self.toggle_selected(), + KeyEvent { + code: KeyCode::Esc, .. + } => { + self.on_ctrl_c(); + } + _ => {} + } + } + + fn is_complete(&self) -> bool { + self.complete + } + + fn on_ctrl_c(&mut self) -> CancellationEvent { + // Save the updates + if !self.features.is_empty() { + let updates = self + .features + .iter() + .map(|item| (item.feature, item.enabled)) + .collect(); + self.app_event_tx + .send(AppEvent::UpdateFeatureFlags { updates }); + } + + self.complete = true; + CancellationEvent::Handled + } +} + +impl Renderable for ExperimentalFeaturesView { + fn render(&self, area: Rect, buf: &mut Buffer) { + if area.height == 0 || area.width == 0 { + return; + } + + let [content_area, footer_area] = + Layout::vertical([Constraint::Fill(1), Constraint::Length(1)]).areas(area); + + Block::default() + .style(user_message_style()) + .render(content_area, buf); + + let header_height = self + .header + .desired_height(content_area.width.saturating_sub(4)); + let rows = self.build_rows(); + let rows_width = Self::rows_width(content_area.width); + let rows_height = measure_rows_height( + &rows, + &self.state, + MAX_POPUP_ROWS, + rows_width.saturating_add(1), + ); + let [header_area, _, list_area] = Layout::vertical([ + Constraint::Max(header_height), + Constraint::Max(1), + Constraint::Length(rows_height), + ]) + .areas(content_area.inset(Insets::vh(1, 2))); + + self.header.render(header_area, buf); + + if list_area.height > 0 { + let render_area = Rect { + x: list_area.x.saturating_sub(2), + y: list_area.y, + width: rows_width.max(1), + height: list_area.height, + }; + render_rows( + render_area, + buf, + &rows, + &self.state, + MAX_POPUP_ROWS, + " No experimental features available for now", + ); + } + + let hint_area = Rect { + x: footer_area.x + 2, + y: footer_area.y, + width: footer_area.width.saturating_sub(2), + height: footer_area.height, + }; + self.footer_hint.clone().dim().render(hint_area, buf); + } + + fn desired_height(&self, width: u16) -> u16 { + let rows = self.build_rows(); + let rows_width = Self::rows_width(width); + let rows_height = measure_rows_height( + &rows, + &self.state, + MAX_POPUP_ROWS, + rows_width.saturating_add(1), + ); + + let mut height = self.header.desired_height(width.saturating_sub(4)); + height = height.saturating_add(rows_height + 3); + height.saturating_add(1) + } +} + +fn experimental_popup_hint_line() -> Line<'static> { + Line::from(vec![ + "Press ".into(), + key_hint::plain(KeyCode::Enter).into(), + " to toggle or ".into(), + key_hint::plain(KeyCode::Esc).into(), + " to save for next conversation".into(), + ]) +} diff --git a/codex-rs/tui2/src/bottom_pane/footer.rs b/codex-rs/tui2/src/bottom_pane/footer.rs index 7f2b4e62856..57bffd5639b 100644 --- a/codex-rs/tui2/src/bottom_pane/footer.rs +++ b/codex-rs/tui2/src/bottom_pane/footer.rs @@ -25,6 +25,7 @@ pub(crate) struct FooterProps { pub(crate) transcript_scrolled: bool, pub(crate) transcript_selection_active: bool, pub(crate) transcript_scroll_position: Option<(usize, usize)>, + pub(crate) transcript_copy_selection_key: KeyBinding, } #[derive(Clone, Copy, Debug, Eq, PartialEq)] @@ -115,7 +116,7 @@ fn footer_lines(props: FooterProps) -> Vec> { } if props.transcript_selection_active { line.push_span(" · ".dim()); - line.push_span(key_hint::ctrl(KeyCode::Char('y'))); + line.push_span(props.transcript_copy_selection_key); line.push_span(" copy selection".dim()); } vec![line] @@ -467,6 +468,7 @@ mod tests { transcript_scrolled: false, transcript_selection_active: false, transcript_scroll_position: None, + transcript_copy_selection_key: key_hint::ctrl_shift(KeyCode::Char('c')), }, ); @@ -482,6 +484,7 @@ mod tests { transcript_scrolled: true, transcript_selection_active: true, transcript_scroll_position: Some((3, 42)), + transcript_copy_selection_key: key_hint::ctrl_shift(KeyCode::Char('c')), }, ); @@ -497,6 +500,7 @@ mod tests { transcript_scrolled: false, transcript_selection_active: false, transcript_scroll_position: None, + transcript_copy_selection_key: key_hint::ctrl_shift(KeyCode::Char('c')), }, ); @@ -512,6 +516,7 @@ mod tests { transcript_scrolled: false, transcript_selection_active: false, transcript_scroll_position: None, + transcript_copy_selection_key: key_hint::ctrl_shift(KeyCode::Char('c')), }, ); @@ -527,6 +532,7 @@ mod tests { transcript_scrolled: false, transcript_selection_active: false, transcript_scroll_position: None, + transcript_copy_selection_key: key_hint::ctrl_shift(KeyCode::Char('c')), }, ); @@ -542,6 +548,7 @@ mod tests { transcript_scrolled: false, transcript_selection_active: false, transcript_scroll_position: None, + transcript_copy_selection_key: key_hint::ctrl_shift(KeyCode::Char('c')), }, ); @@ -557,6 +564,7 @@ mod tests { transcript_scrolled: false, transcript_selection_active: false, transcript_scroll_position: None, + transcript_copy_selection_key: key_hint::ctrl_shift(KeyCode::Char('c')), }, ); @@ -572,6 +580,7 @@ mod tests { transcript_scrolled: false, transcript_selection_active: false, transcript_scroll_position: None, + transcript_copy_selection_key: key_hint::ctrl_shift(KeyCode::Char('c')), }, ); @@ -587,6 +596,7 @@ mod tests { transcript_scrolled: false, transcript_selection_active: false, transcript_scroll_position: None, + transcript_copy_selection_key: key_hint::ctrl_shift(KeyCode::Char('c')), }, ); } diff --git a/codex-rs/tui2/src/bottom_pane/mod.rs b/codex-rs/tui2/src/bottom_pane/mod.rs index 87141380c5d..0f058e9318a 100644 --- a/codex-rs/tui2/src/bottom_pane/mod.rs +++ b/codex-rs/tui2/src/bottom_pane/mod.rs @@ -26,12 +26,14 @@ mod chat_composer; mod chat_composer_history; mod command_popup; pub mod custom_prompt_view; +mod experimental_features_view; mod file_search_popup; mod footer; mod list_selection_view; mod plan_approval_overlay; mod plan_request_overlay; mod prompt_args; +mod resume_prompt_overlay; mod skill_popup; pub(crate) use list_selection_view::SelectionViewParams; mod feedback_view; @@ -57,10 +59,13 @@ use codex_protocol::custom_prompts::CustomPrompt; use crate::status_indicator_widget::StatusIndicatorWidget; pub(crate) use ask_user_question_overlay::AskUserQuestionOverlay; +pub(crate) use experimental_features_view::BetaFeatureItem; +pub(crate) use experimental_features_view::ExperimentalFeaturesView; pub(crate) use list_selection_view::SelectionAction; pub(crate) use list_selection_view::SelectionItem; pub(crate) use plan_approval_overlay::PlanApprovalOverlay; pub(crate) use plan_request_overlay::PlanRequestOverlay; +pub(crate) use resume_prompt_overlay::ResumePromptOverlay; /// Pane displayed in the lower half of the chat UI. pub(crate) struct BottomPane { @@ -272,12 +277,13 @@ impl BottomPane { self.composer.current_text() } - /// Update the animated header shown to the left of the brackets in the - /// status indicator (defaults to "Working"). No-ops if the status - /// indicator is not active. - pub(crate) fn update_status_header(&mut self, header: String) { + /// Update the status indicator header (defaults to "Working") and details below it. + /// + /// Passing `None` clears any existing details. No-ops if the status indicator is not active. + pub(crate) fn update_status(&mut self, header: String, details: Option) { if let Some(status) = self.status.as_mut() { status.update_header(header); + status.update_details(details); self.request_redraw(); } } @@ -409,9 +415,14 @@ impl BottomPane { scrolled: bool, selection_active: bool, scroll_position: Option<(usize, usize)>, + copy_selection_key: crate::key_hint::KeyBinding, ) { - self.composer - .set_transcript_ui_state(scrolled, selection_active, scroll_position); + self.composer.set_transcript_ui_state( + scrolled, + selection_active, + scroll_position, + copy_selection_key, + ); self.request_redraw(); } diff --git a/codex-rs/tui2/src/bottom_pane/resume_prompt_overlay.rs b/codex-rs/tui2/src/bottom_pane/resume_prompt_overlay.rs new file mode 100644 index 00000000000..1af8d336b78 --- /dev/null +++ b/codex-rs/tui2/src/bottom_pane/resume_prompt_overlay.rs @@ -0,0 +1,188 @@ +use crossterm::event::KeyCode; +use crossterm::event::KeyEvent; +use crossterm::event::KeyEventKind; +use crossterm::event::KeyModifiers; +use ratatui::buffer::Buffer; +use ratatui::layout::Constraint; +use ratatui::layout::Layout; +use ratatui::layout::Rect; +use ratatui::style::Stylize as _; +use ratatui::text::Line; +use ratatui::widgets::Block; +use ratatui::widgets::Clear; +use ratatui::widgets::Widget as _; + +use crate::app_event::AppEvent; +use crate::app_event_sender::AppEventSender; +use crate::render::Insets; +use crate::render::RectExt as _; +use crate::selection_list::selection_option_row; + +use super::CancellationEvent; +use super::bottom_pane_view::BottomPaneView; + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum ResumeSelection { + ResumePaused, + Continue, +} + +impl ResumeSelection { + fn next(self) -> Self { + match self { + ResumeSelection::ResumePaused => ResumeSelection::Continue, + ResumeSelection::Continue => ResumeSelection::ResumePaused, + } + } + + fn prev(self) -> Self { + match self { + ResumeSelection::ResumePaused => ResumeSelection::Continue, + ResumeSelection::Continue => ResumeSelection::ResumePaused, + } + } +} + +pub(crate) struct ResumePromptOverlay { + app_event_tx: AppEventSender, + highlighted: ResumeSelection, + selection: Option, + had_partial_output: bool, + had_in_progress_tools: bool, +} + +impl ResumePromptOverlay { + pub(crate) fn new( + app_event_tx: AppEventSender, + had_partial_output: bool, + had_in_progress_tools: bool, + ) -> Self { + Self { + app_event_tx, + highlighted: ResumeSelection::ResumePaused, + selection: None, + had_partial_output, + had_in_progress_tools, + } + } + + fn select(&mut self, selection: ResumeSelection) { + self.highlighted = selection; + self.selection = Some(selection); + if selection == ResumeSelection::Continue { + self.app_event_tx + .send(AppEvent::QueueUserText(continue_prompt())); + } + } +} + +impl BottomPaneView for ResumePromptOverlay { + fn handle_key_event(&mut self, key_event: KeyEvent) { + if key_event.kind == KeyEventKind::Release { + return; + } + if key_event.modifiers.contains(KeyModifiers::CONTROL) + && matches!(key_event.code, KeyCode::Char('c') | KeyCode::Char('d')) + { + self.select(ResumeSelection::ResumePaused); + return; + } + + match key_event.code { + KeyCode::Up | KeyCode::Char('k') => self.highlighted = self.highlighted.prev(), + KeyCode::Down | KeyCode::Char('j') => self.highlighted = self.highlighted.next(), + KeyCode::Char('1') => self.select(ResumeSelection::ResumePaused), + KeyCode::Char('2') => self.select(ResumeSelection::Continue), + KeyCode::Enter => self.select(self.highlighted), + KeyCode::Esc => self.select(ResumeSelection::ResumePaused), + _ => {} + } + } + + fn is_complete(&self) -> bool { + self.selection.is_some() + } + + fn on_ctrl_c(&mut self) -> CancellationEvent { + self.select(ResumeSelection::ResumePaused); + CancellationEvent::Handled + } +} + +impl crate::render::renderable::Renderable for ResumePromptOverlay { + fn render(&self, area: Rect, buf: &mut Buffer) { + Clear.render(area, buf); + let block = Block::bordered().title("Resume".bold()); + let inner = block.inner(area); + block.render(area, buf); + + let inset = inner.inset(Insets::vh(1, 2)); + let [header_area, options_area] = + Layout::vertical([Constraint::Length(4), Constraint::Fill(1)]).areas(inset); + + let mut header = Vec::new(); + header.push(Line::from(vec![ + "Previous turn was interrupted.".bold(), + " ".into(), + "Nothing will run until you choose.".dim(), + ])); + if self.had_partial_output || self.had_in_progress_tools { + let mut details = Vec::new(); + if self.had_partial_output { + details.push("partial assistant output".to_string()); + } + if self.had_in_progress_tools { + details.push("in-progress tool calls".to_string()); + } + header.push(Line::from(vec![ + "Detected: ".dim(), + details.join(", ").into(), + ])); + } + header.push(Line::from(vec![ + "Tip: ".dim(), + "Continue sends a new message; it does not auto-replay tool calls.".dim(), + ])); + + for (i, line) in header.into_iter().enumerate() { + let y = header_area.y.saturating_add(i as u16); + let bottom = header_area.y.saturating_add(header_area.height); + if y >= bottom { + break; + } + let line_area = Rect { + x: header_area.x, + y, + width: header_area.width, + height: 1, + }; + line.render(line_area, buf); + } + + let [opt0_area, opt1_area] = + Layout::vertical([Constraint::Length(2), Constraint::Length(2)]).areas(options_area); + + let option_0 = selection_option_row( + 0, + "Resume paused (recommended)".to_string(), + self.highlighted == ResumeSelection::ResumePaused, + ); + option_0.render(opt0_area, buf); + + let option_1 = selection_option_row( + 1, + "Continue from where you left off".to_string(), + self.highlighted == ResumeSelection::Continue, + ); + option_1.render(opt1_area, buf); + } + + fn desired_height(&self, _width: u16) -> u16 { + 9 + } +} + +fn continue_prompt() -> String { + "Continue from where you left off. Do not re-run tool calls that already completed; use the outputs above. If you need to rerun a tool, ask first." + .to_string() +} diff --git a/codex-rs/tui2/src/chatwidget.rs b/codex-rs/tui2/src/chatwidget.rs index eb8a6a119ec..b483cbb48ba 100644 --- a/codex-rs/tui2/src/chatwidget.rs +++ b/codex-rs/tui2/src/chatwidget.rs @@ -2,7 +2,6 @@ use std::collections::HashMap; use std::collections::HashSet; use std::collections::VecDeque; use std::path::PathBuf; -use std::str::FromStr; use std::sync::Arc; use std::sync::Mutex; use std::time::Duration; @@ -12,6 +11,8 @@ use codex_backend_client::Client as BackendClient; use codex_core::config::Config; use codex_core::config::ConstraintResult; use codex_core::config::types::Notifications; +use codex_core::features::FEATURES; +use codex_core::features::Feature; use codex_core::git_info::current_branch_name; use codex_core::git_info::local_git_branches; use codex_core::models_manager::manager::ModelsManager; @@ -50,6 +51,7 @@ use codex_core::protocol::PatchApplyBeginEvent; use codex_core::protocol::PlanApprovalRequestEvent; use codex_core::protocol::PlanRequest; use codex_core::protocol::RateLimitSnapshot; +use codex_core::protocol::RawResponseItemEvent; use codex_core::protocol::ReviewRequest; use codex_core::protocol::ReviewTarget; use codex_core::protocol::SkillsListEntry; @@ -75,6 +77,7 @@ use codex_core::skills::model::SkillMetadata; use codex_protocol::ConversationId; use codex_protocol::account::PlanType; use codex_protocol::approvals::ElicitationRequestEvent; +use codex_protocol::models::ResponseItem; use codex_protocol::parse_command::ParsedCommand; use codex_protocol::user_input::UserInput; use crossterm::event::KeyCode; @@ -97,12 +100,15 @@ use crate::app_event::AppEvent; use crate::app_event_sender::AppEventSender; use crate::bottom_pane::ApprovalRequest; use crate::bottom_pane::AskUserQuestionOverlay; +use crate::bottom_pane::BetaFeatureItem; use crate::bottom_pane::BottomPane; use crate::bottom_pane::BottomPaneParams; use crate::bottom_pane::CancellationEvent; +use crate::bottom_pane::ExperimentalFeaturesView; use crate::bottom_pane::InputResult; use crate::bottom_pane::PlanApprovalOverlay; use crate::bottom_pane::PlanRequestOverlay; +use crate::bottom_pane::ResumePromptOverlay; use crate::bottom_pane::SelectionAction; use crate::bottom_pane::SelectionItem; use crate::bottom_pane::SelectionViewParams; @@ -132,6 +138,7 @@ use crate::status::RateLimitSnapshotDisplay; use crate::text_formatting::truncate_text; use crate::tui::FrameRequester; mod interrupts; +use self::interrupts::CompletedLspToolCall; use self::interrupts::InterruptManager; mod agent; use self::agent::spawn_agent; @@ -299,6 +306,7 @@ pub(crate) struct ChatWidget { active_cell: Option>, active_subagent_group: Option>>, config: Config, + lsp_manager: codex_lsp::LspManager, model_family: ModelFamily, auth_manager: Arc, models_manager: Arc, @@ -315,6 +323,8 @@ pub(crate) struct ChatWidget { stream_controller: Option, running_commands: HashMap, suppressed_exec_calls: HashSet, + pending_lsp_tool_calls: HashMap, + pending_apply_patch_calls: HashSet, last_unified_wait: Option, task_complete_pending: bool, mcp_startup_status: Option>, @@ -328,7 +338,6 @@ pub(crate) struct ChatWidget { current_status_header: String, // Previous status header to restore after a transient stream retry. retry_status_header: Option, - plan_variants_progress: Option, conversation_id: Option, frame_requester: FrameRequester, // Whether to include the initial welcome banner on session configured @@ -336,6 +345,10 @@ pub(crate) struct ChatWidget { // When resuming an existing session (selected via resume picker), avoid an // immediate redraw on SessionConfigured to prevent a gratuitous UI flicker. suppress_session_configured_redraw: bool, + // Flags captured during replay to decide whether to prompt on resume. + resume_last_turn_aborted: bool, + resume_had_partial_output: bool, + resume_had_in_progress_tools: bool, // User messages queued while a turn is in progress queued_user_messages: VecDeque, // Pending notification to show when unfocused on next Draw @@ -354,132 +367,17 @@ pub(crate) struct ChatWidget { current_rollout_path: Option, } +struct PendingLspToolCall { + tool_name: String, + arguments: String, + start_time: std::time::Instant, +} + struct UserMessage { text: String, image_paths: Vec, } -#[derive(Debug, Clone, Copy, PartialEq, Eq)] -enum ProgressStatus { - Pending, - InProgress, - Completed, -} - -#[derive(Debug, Clone)] -struct PlanVariantsProgress { - total: usize, - steps: Vec, - durations: Vec>, - last_activity: Vec>, - tokens: Vec>, -} - -impl PlanVariantsProgress { - fn new(total: usize) -> Self { - Self { - total, - steps: vec![ProgressStatus::Pending; total], - durations: vec![None; total], - last_activity: vec![None; total], - tokens: vec![None; total], - } - } - - fn variant_label(&self, idx: usize) -> String { - if self.total == 3 { - match idx { - 0 => "Minimal".to_string(), - 1 => "Correctness".to_string(), - 2 => "DX".to_string(), - _ => format!("Variant {}/{}", idx + 1, self.total), - } - } else { - format!("Variant {}/{}", idx + 1, self.total) - } - } - - fn set_in_progress(&mut self, idx: usize) { - if idx < self.steps.len() { - self.steps[idx] = ProgressStatus::InProgress; - } - } - - fn set_completed(&mut self, idx: usize) { - if idx < self.steps.len() { - self.steps[idx] = ProgressStatus::Completed; - } - } - - fn set_duration(&mut self, idx: usize, duration: Option) { - if idx < self.durations.len() { - self.durations[idx] = duration; - } - } - - fn set_activity(&mut self, idx: usize, activity: Option) { - if idx < self.last_activity.len() { - self.last_activity[idx] = activity; - } - } - - fn set_tokens(&mut self, idx: usize, tokens: Option) { - if idx < self.tokens.len() { - self.tokens[idx] = tokens; - } - } - - fn render_detail_lines(&self) -> Vec> { - use ratatui::style::Stylize; - let mut lines = Vec::with_capacity(self.total); - for (idx, status) in self.steps.iter().copied().enumerate() { - let label = self.variant_label(idx); - let status_span = match status { - ProgressStatus::Pending => "○".dim(), - ProgressStatus::InProgress => "●".cyan(), - ProgressStatus::Completed => "✓".green(), - }; - - let mut spans = vec![" ".into(), status_span, " ".into(), label.into()]; - let duration = self.durations.get(idx).and_then(|d| d.as_deref()); - let tokens = self.tokens.get(idx).and_then(|t| t.as_deref()); - if duration.is_some() || tokens.is_some() { - let mut meta = String::new(); - meta.push('('); - if let Some(duration) = duration { - meta.push_str(duration); - } - if let Some(tokens) = tokens { - if duration.is_some() { - meta.push_str(", "); - } - meta.push_str(tokens); - meta.push_str(" tok"); - } - meta.push(')'); - spans.push(" ".into()); - spans.push(meta.dim()); - } - if status == ProgressStatus::Completed { - spans.push(" ".into()); - spans.push("—".dim()); - spans.push(" ".into()); - spans.push("done".dim()); - } else if let Some(activity) = self.last_activity.get(idx).and_then(|a| a.as_deref()) { - let activity = activity.strip_prefix("shell ").unwrap_or(activity).trim(); - if !activity.is_empty() { - spans.push(" ".into()); - spans.push("—".dim()); - spans.push(" ".into()); - spans.push(activity.to_string().dim()); - } - } - lines.push(spans.into()); - } - lines - } -} - impl From for UserMessage { fn from(text: String) -> Self { Self { @@ -534,13 +432,18 @@ impl ChatWidget { } } - fn set_status_header(&mut self, header: String) { - if self.plan_variants_progress.is_some() && header != "Planning plan variants" { - self.plan_variants_progress = None; - self.clear_status_detail_lines(); - } + /// Update the status indicator header and details. + /// + /// Passing `None` clears any existing details. + fn set_status(&mut self, header: String, details: Option) { self.current_status_header = header.clone(); - self.bottom_pane.update_status_header(header); + self.bottom_pane.update_status(header, details); + } + + /// Convenience wrapper around [`Self::set_status`]; + /// updates the status indicator header and clears any existing details. + fn set_status_header(&mut self, header: String) { + self.set_status(header, None); } fn set_status_detail_lines(&mut self, lines: Vec>) { @@ -552,9 +455,7 @@ impl ChatWidget { } fn restore_retry_status_header_if_present(&mut self) { - if let Some(header) = self.retry_status_header.take() - && self.current_status_header != header - { + if let Some(header) = self.retry_status_header.take() { self.set_status_header(header); } } @@ -566,7 +467,11 @@ impl ChatWidget { self.set_skills(None); self.conversation_id = Some(event.session_id); self.current_rollout_path = Some(event.rollout_path.clone()); + self.resume_last_turn_aborted = false; + self.resume_had_partial_output = false; + self.resume_had_in_progress_tools = false; let initial_messages = event.initial_messages.clone(); + let is_resume_replay = initial_messages.is_some(); let model_for_header = event.model.clone(); self.session_header.set_model(&model_for_header); self.add_to_history(history_cell::new_session_info( @@ -578,6 +483,36 @@ impl ChatWidget { if let Some(messages) = initial_messages { self.replay_initial_messages(messages); } + if is_resume_replay { + if self.stream_controller.is_some() { + self.resume_had_partial_output = true; + self.flush_answer_stream_with_separator(); + } + + self.flush_completed_active_cell_if_needed(); + self.clear_completed_subagent_group_if_needed(); + + if self.active_cell_is_in_progress() + || !self.running_commands.is_empty() + || self.active_subagent_group_is_in_progress() + { + self.resume_had_in_progress_tools = true; + self.finalize_turn(); + } + + if self.initial_user_message.is_none() + && (self.resume_last_turn_aborted + || self.resume_had_partial_output + || self.resume_had_in_progress_tools) + { + self.bottom_pane + .show_view(Box::new(ResumePromptOverlay::new( + self.app_event_tx.clone(), + self.resume_had_partial_output, + self.resume_had_in_progress_tools, + ))); + } + } // Ask codex-core to enumerate custom prompts for this session. self.submit_op(Op::ListCustomPrompts); self.submit_op(Op::ListSkills { @@ -634,6 +569,10 @@ impl ChatWidget { self.request_redraw(); } + pub(crate) fn queue_user_text(&mut self, text: String) { + self.queue_user_message(text.into()); + } + fn on_agent_message(&mut self, message: String) { // If we have a stream_controller, then the final agent message is redundant and will be a // duplicate of what has already been streamed. @@ -665,14 +604,11 @@ impl ChatWidget { } fn on_agent_reasoning_final(&mut self) { - let reasoning_summary_format = self.get_model_family().reasoning_summary_format; // At the end of a reasoning block, record transcript-only content. self.full_reasoning_buffer.push_str(&self.reasoning_buffer); if !self.full_reasoning_buffer.is_empty() { - let cell = history_cell::new_reasoning_summary_block( - self.full_reasoning_buffer.clone(), - reasoning_summary_format, - ); + let cell = + history_cell::new_reasoning_summary_block(self.full_reasoning_buffer.clone()); self.add_boxed_history(cell); } self.reasoning_buffer.clear(); @@ -693,7 +629,6 @@ impl ChatWidget { self.bottom_pane.clear_ctrl_c_quit_hint(); self.bottom_pane.set_task_running(true); self.retry_status_header = None; - self.plan_variants_progress = None; self.bottom_pane.set_interrupt_hint_visible(true); self.set_status_header(String::from("Working")); self.full_reasoning_buffer.clear(); @@ -704,6 +639,7 @@ impl ChatWidget { fn on_task_complete(&mut self, last_agent_message: Option) { // If a stream is currently active, finalize it. self.flush_answer_stream_with_separator(); + self.flush_completed_active_cell_if_needed(); // Mark task stopped and request redraw now that all content is in history. self.bottom_pane.set_task_running(false); self.running_commands.clear(); @@ -721,6 +657,15 @@ impl ChatWidget { self.maybe_show_pending_rate_limit_prompt(); } + fn on_task_complete_replay(&mut self) { + self.flush_answer_stream_with_separator(); + self.flush_completed_active_cell_if_needed(); + self.bottom_pane.set_task_running(false); + self.running_commands.clear(); + self.suppressed_exec_calls.clear(); + self.last_unified_wait = None; + } + pub(crate) fn set_token_info(&mut self, info: Option) { match info { Some(info) => self.apply_token_info(info), @@ -874,11 +819,11 @@ impl ChatWidget { self.bottom_pane.set_task_running(true); if let Some(current) = &self.mcp_startup_status { let total = current.len(); - let mut starting: Vec<_> = current + let mut starting: Vec = current .iter() .filter_map(|(name, state)| { if matches!(state, McpStartupStatus::Starting) { - Some(name) + Some(name.clone()) } else { None } @@ -905,6 +850,20 @@ impl ChatWidget { format!("Booting MCP server: {first}") }; self.set_status_header(header); + + let max_details = 4; + let mut detail_lines: Vec> = starting + .iter() + .take(max_details) + .map(|name| vec![" └ ".dim(), name.clone().into(), " starting".dim()].into()) + .collect(); + if starting.len() > max_details { + let extra = starting.len().saturating_sub(max_details); + detail_lines.push(vec![" └ ".dim(), format!("… +{extra} more").dim()].into()); + } + self.set_status_detail_lines(detail_lines); + } else { + self.clear_status_detail_lines(); } } self.request_redraw(); @@ -926,6 +885,7 @@ impl ChatWidget { self.on_warning(format!("MCP startup incomplete ({})", parts.join("; "))); } + self.clear_status_detail_lines(); self.mcp_startup_status = None; self.bottom_pane.set_task_running(false); self.maybe_send_next_queued_input(); @@ -970,6 +930,19 @@ impl ChatWidget { self.request_redraw(); } + fn on_interrupted_turn_replay(&mut self, _reason: TurnAbortReason) { + self.resume_last_turn_aborted = true; + self.resume_had_partial_output = + self.resume_had_partial_output || self.stream_controller.is_some(); + self.resume_had_in_progress_tools = self.resume_had_in_progress_tools + || self.active_cell.is_some() + || !self.running_commands.is_empty() + || self.active_subagent_group.is_some(); + + self.flush_answer_stream_with_separator(); + self.finalize_turn(); + } + fn on_plan_update(&mut self, update: UpdatePlanArgs) { let update_key = serde_json::to_string(&update).ok(); if let Some(key) = update_key.as_deref() @@ -1127,6 +1100,101 @@ impl ChatWidget { self.add_to_history(history_cell::new_web_search_call(ev.query)); } + fn on_raw_response_item(&mut self, ev: RawResponseItemEvent) { + match ev.item { + ResponseItem::CustomToolCall { name, call_id, .. } => { + if name == "apply_patch" { + self.pending_apply_patch_calls.insert(call_id); + } + } + ResponseItem::CustomToolCallOutput { call_id, output } => { + if self.pending_apply_patch_calls.remove(&call_id) + && let Some(idx) = output.find("## LSP diagnostics") + && let Some(cell) = history_cell::new_lsp_diagnostics_event( + output[idx..].to_string(), + self.config.cwd.clone(), + ) + { + self.flush_answer_stream_with_separator(); + self.flush_active_cell(); + self.add_boxed_history(Box::new(cell)); + self.request_redraw(); + } + } + ResponseItem::FunctionCall { + name, + arguments, + call_id, + .. + } => { + if name == "apply_patch" { + self.pending_apply_patch_calls.insert(call_id); + return; + } + if !name.starts_with("lsp_") { + return; + } + self.pending_lsp_tool_calls.insert( + call_id, + PendingLspToolCall { + tool_name: name, + arguments, + start_time: std::time::Instant::now(), + }, + ); + } + ResponseItem::FunctionCallOutput { call_id, output } => { + if self.pending_apply_patch_calls.remove(&call_id) { + if let Some(idx) = output.content.find("## LSP diagnostics") + && let Some(cell) = history_cell::new_lsp_diagnostics_event( + output.content[idx..].to_string(), + self.config.cwd.clone(), + ) + { + self.flush_answer_stream_with_separator(); + self.flush_active_cell(); + self.add_boxed_history(Box::new(cell)); + self.request_redraw(); + } + return; + } + let Some(call) = self.pending_lsp_tool_calls.remove(&call_id) else { + return; + }; + if !call.tool_name.starts_with("lsp_") { + return; + } + + let completed = CompletedLspToolCall { + tool_name: call.tool_name, + arguments: call.arguments, + output: output.content, + success: output.success.unwrap_or(true), + duration: Some(call.start_time.elapsed()), + }; + let completed2 = completed.clone(); + self.defer_or_handle( + |q| q.push_lsp_tool_call(completed), + |s| s.handle_lsp_tool_call_now(completed2), + ); + } + _ => {} + } + } + + pub(crate) fn handle_lsp_tool_call_now(&mut self, ev: CompletedLspToolCall) { + self.flush_answer_stream_with_separator(); + self.flush_active_cell(); + self.add_boxed_history(Box::new(history_cell::FunctionToolCallCell::new( + ev.tool_name, + ev.arguments, + ev.output, + ev.success, + ev.duration, + ))); + self.request_redraw(); + } + fn on_get_history_entry_response( &mut self, event: codex_core::protocol::GetHistoryEntryResponseEvent, @@ -1158,121 +1226,10 @@ impl ChatWidget { debug!("BackgroundEvent: {message}"); self.bottom_pane.ensure_status_indicator(); self.bottom_pane.set_interrupt_hint_visible(true); - - if let Some(progress) = self.maybe_update_plan_variants_progress(message.as_str()) { - self.plan_variants_progress = Some(progress); - self.set_status_header("Planning plan variants".to_string()); - self.set_status_detail_lines( - self.plan_variants_progress - .as_ref() - .map(PlanVariantsProgress::render_detail_lines) - .unwrap_or_default(), - ); - return; - } - - self.plan_variants_progress = None; self.clear_status_detail_lines(); self.set_status_header(message); } - fn maybe_update_plan_variants_progress( - &mut self, - message: &str, - ) -> Option { - let message = message.trim(); - if message.starts_with("Plan variants:") { - // Expected shapes: - // - "Plan variants: generating 1/3…" - // - "Plan variants: finished 1/3 (12.3s)" - let tokens: Vec<&str> = message.split_whitespace().collect(); - if tokens.len() < 4 { - return None; - } - - let action = tokens.get(2).copied()?; - let fraction = tokens.get(3).copied()?; - let fraction = fraction.trim_end_matches('…'); - let (idx_str, total_str) = fraction.split_once('/')?; - let idx = usize::from_str(idx_str).ok()?.saturating_sub(1); - let total = usize::from_str(total_str).ok()?; - if total == 0 { - return None; - } - - let duration = message - .find('(') - .and_then(|start| message.rfind(')').map(|end| (start, end))) - .and_then(|(start, end)| { - if end > start + 1 { - Some(message[start + 1..end].to_string()) - } else { - None - } - }); - - let mut progress = self - .plan_variants_progress - .clone() - .filter(|p| p.total == total) - .unwrap_or_else(|| PlanVariantsProgress::new(total)); - - match action { - "generating" => { - progress.set_in_progress(idx); - progress.set_duration(idx, None); - } - "finished" => { - progress.set_completed(idx); - progress.set_duration(idx, duration); - progress.set_activity(idx, None); - } - _ => return None, - } - - return Some(progress); - } - - if let Some(rest) = message.strip_prefix("Plan variant ") { - // Expected shape: - // - "Plan variant 2/3: rg -n ..." - // - "Plan variant 2/3: shell rg -n ..." (legacy) - let (fraction, activity) = rest.split_once(':')?; - let fraction = fraction.trim(); - let (idx_str, total_str) = fraction.split_once('/')?; - let idx = usize::from_str(idx_str).ok()?.saturating_sub(1); - let total = usize::from_str(total_str).ok()?; - if total == 0 { - return None; - } - - let mut progress = self - .plan_variants_progress - .clone() - .filter(|p| p.total == total) - .unwrap_or_else(|| PlanVariantsProgress::new(total)); - - if idx < progress.steps.len() && progress.steps[idx] == ProgressStatus::Pending { - progress.set_in_progress(idx); - } - - let activity = activity.trim(); - if let Some(tokens) = activity.strip_prefix("tokens ") { - progress.set_tokens(idx, Some(tokens.trim().to_string())); - } else { - let activity = activity.strip_prefix("shell ").unwrap_or(activity).trim(); - if activity.is_empty() { - progress.set_activity(idx, None); - } else { - progress.set_activity(idx, Some(activity.to_string())); - } - } - return Some(progress); - } - - None - } - fn on_undo_started(&mut self, event: UndoStartedEvent) { self.bottom_pane.ensure_status_indicator(); self.bottom_pane.set_interrupt_hint_visible(false); @@ -1299,11 +1256,11 @@ impl ChatWidget { } } - fn on_stream_error(&mut self, message: String) { + fn on_stream_error(&mut self, message: String, additional_details: Option) { if self.retry_status_header.is_none() { self.retry_status_header = Some(self.current_status_header.clone()); } - self.set_status_header(message); + self.set_status(message, additional_details); } /// Periodic tick to commit at most one queued line to history with a small delay, @@ -1674,6 +1631,7 @@ impl ChatWidget { invocation, duration, tokens, + outcome, result, } = ev; @@ -1689,7 +1647,7 @@ impl ChatWidget { let mut group = group .lock() .unwrap_or_else(std::sync::PoisonError::into_inner); - group.complete_call(&call_id, duration, tokens, result); + group.complete_call(&call_id, duration, tokens, outcome, result); group.is_complete() }; if is_complete { @@ -1702,7 +1660,7 @@ impl ChatWidget { let mut cell = history_cell::new_active_subagent_tool_call_group(call_id.clone(), invocation); - cell.complete_call(&call_id, duration, tokens, result); + cell.complete_call(&call_id, duration, tokens, outcome, result); self.add_boxed_history(Box::new(cell)); self.request_redraw(); } @@ -1763,6 +1721,7 @@ impl ChatWidget { let model_slug = model_family.get_model_slug().to_string(); let mut config = config; config.model = Some(model_slug.clone()); + let lsp_manager = conversation_manager.lsp_manager(); let mut rng = rand::rng(); let placeholder = EXAMPLE_PROMPTS[rng.random_range(0..EXAMPLE_PROMPTS.len())].to_string(); let codex_op_tx = spawn_agent(config.clone(), app_event_tx.clone(), conversation_manager); @@ -1784,6 +1743,7 @@ impl ChatWidget { active_cell: None, active_subagent_group: None, config, + lsp_manager, model_family, auth_manager, models_manager, @@ -1802,6 +1762,8 @@ impl ChatWidget { stream_controller: None, running_commands: HashMap::new(), suppressed_exec_calls: HashSet::new(), + pending_lsp_tool_calls: HashMap::new(), + pending_apply_patch_calls: HashSet::new(), last_unified_wait: None, task_complete_pending: false, mcp_startup_status: None, @@ -1810,11 +1772,13 @@ impl ChatWidget { full_reasoning_buffer: String::new(), current_status_header: String::from("Ready"), retry_status_header: None, - plan_variants_progress: None, conversation_id: None, queued_user_messages: VecDeque::new(), show_welcome_banner: is_first_run, suppress_session_configured_redraw: false, + resume_last_turn_aborted: false, + resume_had_partial_output: false, + resume_had_in_progress_tools: false, pending_notification: None, is_review_mode: false, pre_review_token_info: None, @@ -1828,6 +1792,7 @@ impl ChatWidget { /// Create a ChatWidget attached to an existing conversation (e.g., a fork). pub(crate) fn new_from_existing( common: ChatWidgetInit, + conversation_manager: Arc, conversation: std::sync::Arc, session_configured: codex_core::protocol::SessionConfiguredEvent, ) -> Self { @@ -1847,6 +1812,7 @@ impl ChatWidget { let model_slug = model_family.get_model_slug().to_string(); let mut rng = rand::rng(); let placeholder = EXAMPLE_PROMPTS[rng.random_range(0..EXAMPLE_PROMPTS.len())].to_string(); + let lsp_manager = conversation_manager.lsp_manager(); let codex_op_tx = spawn_agent_from_existing(conversation, session_configured, app_event_tx.clone()); @@ -1868,6 +1834,7 @@ impl ChatWidget { active_cell: None, active_subagent_group: None, config, + lsp_manager, model_family, auth_manager, models_manager, @@ -1886,6 +1853,8 @@ impl ChatWidget { stream_controller: None, running_commands: HashMap::new(), suppressed_exec_calls: HashSet::new(), + pending_lsp_tool_calls: HashMap::new(), + pending_apply_patch_calls: HashSet::new(), last_unified_wait: None, task_complete_pending: false, mcp_startup_status: None, @@ -1894,11 +1863,13 @@ impl ChatWidget { full_reasoning_buffer: String::new(), current_status_header: String::from("Ready"), retry_status_header: None, - plan_variants_progress: None, conversation_id: None, queued_user_messages: VecDeque::new(), show_welcome_banner: false, suppress_session_configured_redraw: true, + resume_last_turn_aborted: false, + resume_had_partial_output: false, + resume_had_in_progress_tools: false, pending_notification: None, is_review_mode: false, pre_review_token_info: None, @@ -2054,9 +2025,15 @@ impl ChatWidget { SlashCommand::PlanModel => { self.open_plan_model_popup(); } + SlashCommand::SubagentModel => { + self.open_subagent_model_popup(); + } SlashCommand::Approvals => { self.open_approvals_popup(); } + SlashCommand::Experimental => { + self.open_experimental_popup(); + } SlashCommand::Quit | SlashCommand::Exit => { self.request_exit(); } @@ -2069,9 +2046,9 @@ impl ChatWidget { } self.request_exit(); } - SlashCommand::Undo => { - self.app_event_tx.send(AppEvent::CodexOp(Op::Undo)); - } + // SlashCommand::Undo => { + // self.app_event_tx.send(AppEvent::CodexOp(Op::Undo)); + // } SlashCommand::Diff => { self.add_diff_in_progress(); let tx = self.app_event_tx.clone(); @@ -2098,6 +2075,9 @@ impl ChatWidget { SlashCommand::Status => { self.add_status_output(); } + SlashCommand::Lsp => { + self.open_lsp_status(); + } SlashCommand::Mcp => { self.add_mcp_output(); } @@ -2181,6 +2161,58 @@ impl ChatWidget { } } + fn active_cell_is_in_progress(&self) -> bool { + let Some(cell) = self.active_cell.as_ref() else { + return false; + }; + + if let Some(exec) = cell.as_any().downcast_ref::() { + return exec.is_active(); + } + if let Some(tool) = cell.as_any().downcast_ref::() { + return tool.is_active(); + } + if let Some(tool) = cell.as_any().downcast_ref::() { + return !tool.is_complete(); + } + + false + } + + fn flush_completed_active_cell_if_needed(&mut self) -> bool { + if self.active_cell.is_none() { + return false; + } + if self.active_cell_is_in_progress() { + return false; + } + self.flush_active_cell(); + true + } + + fn active_subagent_group_is_in_progress(&self) -> bool { + let Some(group) = self.active_subagent_group.as_ref() else { + return false; + }; + let group = group + .lock() + .unwrap_or_else(std::sync::PoisonError::into_inner); + !group.is_complete() + } + + fn clear_completed_subagent_group_if_needed(&mut self) -> bool { + if !self.active_subagent_group.as_ref().is_some_and(|group| { + let group = group + .lock() + .unwrap_or_else(std::sync::PoisonError::into_inner); + group.is_complete() + }) { + return false; + } + self.active_subagent_group = None; + true + } + fn add_to_history(&mut self, cell: impl HistoryCell + 'static) { self.add_boxed_history(Box::new(cell)); } @@ -2328,10 +2360,14 @@ impl ChatWidget { } EventMsg::AgentReasoningSectionBreak(_) => self.on_reasoning_section_break(), EventMsg::TaskStarted(_) if !from_replay => self.on_task_started(), - EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }) if !from_replay => { - self.on_task_complete(last_agent_message) + EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }) => { + if from_replay { + self.on_task_complete_replay(); + } else { + self.on_task_complete(last_agent_message); + } } - EventMsg::TaskStarted(_) | EventMsg::TaskComplete(_) => {} + EventMsg::TaskStarted(_) => {} EventMsg::TokenCount(ev) => { self.set_token_info(ev.info); self.on_rate_limit_snapshot(ev.rate_limits); @@ -2340,17 +2376,23 @@ impl ChatWidget { EventMsg::Error(ErrorEvent { message, .. }) => self.on_error(message), EventMsg::McpStartupUpdate(ev) => self.on_mcp_startup_update(ev), EventMsg::McpStartupComplete(ev) => self.on_mcp_startup_complete(ev), - EventMsg::TurnAborted(ev) => match ev.reason { - TurnAbortReason::Interrupted => { - self.on_interrupted_turn(ev.reason); - } - TurnAbortReason::Replaced => { - self.on_error("Turn aborted: replaced by a new task".to_owned()) - } - TurnAbortReason::ReviewEnded => { - self.on_interrupted_turn(ev.reason); + EventMsg::TurnAborted(ev) => { + if from_replay { + self.on_interrupted_turn_replay(ev.reason); + } else { + match ev.reason { + TurnAbortReason::Interrupted => { + self.on_interrupted_turn(ev.reason); + } + TurnAbortReason::Replaced => { + self.on_error("Turn aborted: replaced by a new task".to_owned()) + } + TurnAbortReason::ReviewEnded => { + self.on_interrupted_turn(ev.reason); + } + } } - }, + } EventMsg::PlanUpdate(update) => self.on_plan_update(update), EventMsg::ExecApprovalRequest(ev) => { // For replayed events, synthesize an empty id (these should not occur). @@ -2401,9 +2443,11 @@ impl ChatWidget { } EventMsg::UndoStarted(ev) if !from_replay => self.on_undo_started(ev), EventMsg::UndoCompleted(ev) => self.on_undo_completed(ev), - EventMsg::StreamError(StreamErrorEvent { message, .. }) if !from_replay => { - self.on_stream_error(message) - } + EventMsg::StreamError(StreamErrorEvent { + message, + additional_details, + .. + }) if !from_replay => self.on_stream_error(message, additional_details), EventMsg::BackgroundEvent(_) | EventMsg::UndoStarted(_) | EventMsg::StreamError(_) => {} EventMsg::UserMessage(ev) => { if from_replay { @@ -2423,8 +2467,8 @@ impl ChatWidget { } } EventMsg::ContextCompacted(_) => self.on_agent_message("Context compacted".to_owned()), - EventMsg::RawResponseItem(_) - | EventMsg::ItemStarted(_) + EventMsg::RawResponseItem(ev) => self.on_raw_response_item(ev), + EventMsg::ItemStarted(_) | EventMsg::ItemCompleted(_) | EventMsg::AgentMessageContentDelta(_) | EventMsg::ReasoningContentDelta(_) @@ -2708,8 +2752,14 @@ impl ChatWidget { sandbox_policy: None, model: Some(switch_model.clone()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: Some(Some(default_effort)), plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, })); tx.send(AppEvent::UpdateModel(switch_model.clone())); @@ -2778,6 +2828,10 @@ impl ChatWidget { self.open_model_popup_for_target(crate::app_event::ModelPickerTarget::Plan); } + pub(crate) fn open_subagent_model_popup(&mut self) { + self.open_model_popup_for_target(crate::app_event::ModelPickerTarget::Subagent); + } + fn open_model_popup_for_target(&mut self, target: crate::app_event::ModelPickerTarget) { let chat_model = self.model_family.get_model_slug(); let current_model = match target { @@ -2787,6 +2841,11 @@ impl ChatWidget { .plan_model .clone() .unwrap_or_else(|| chat_model.to_string()), + crate::app_event::ModelPickerTarget::Subagent => self + .config + .subagent_model + .clone() + .unwrap_or_else(|| chat_model.to_string()), }; let presets: Vec = // todo(aibrahim): make this async function @@ -2863,6 +2922,11 @@ impl ChatWidget { "Choose a specific model and reasoning level for /plan (current: {current_label})" ) } + crate::app_event::ModelPickerTarget::Subagent => { + format!( + "Choose a specific model and reasoning level for spawned subagents (current: {current_label})" + ) + } }); items.push(SelectionItem { @@ -2879,6 +2943,9 @@ impl ChatWidget { title: Some(match target { crate::app_event::ModelPickerTarget::Chat => "Select Model".to_string(), crate::app_event::ModelPickerTarget::Plan => "Select Plan Model".to_string(), + crate::app_event::ModelPickerTarget::Subagent => { + "Select Subagent Model".to_string() + } }), subtitle: Some(match target { crate::app_event::ModelPickerTarget::Chat => { @@ -2887,6 +2954,9 @@ impl ChatWidget { crate::app_event::ModelPickerTarget::Plan => { "Pick a quick auto mode or browse all models for /plan.".to_string() } + crate::app_event::ModelPickerTarget::Subagent => { + "Pick a quick auto mode or browse all models for spawned subagents.".to_string() + } }), footer_hint: Some(standard_popup_hint_line()), items, @@ -2928,6 +2998,11 @@ impl ChatWidget { .plan_model .clone() .unwrap_or_else(|| chat_model.to_string()), + crate::app_event::ModelPickerTarget::Subagent => self + .config + .subagent_model + .clone() + .unwrap_or_else(|| chat_model.to_string()), }; let mut items: Vec = Vec::new(); for preset in presets.into_iter() { @@ -2960,6 +3035,9 @@ impl ChatWidget { crate::app_event::ModelPickerTarget::Plan => { "Select Plan Model and Effort".to_string() } + crate::app_event::ModelPickerTarget::Subagent => { + "Select Subagent Model and Effort".to_string() + } }), subtitle: Some( "Access legacy models by running codex -m or in your config.toml" @@ -2988,8 +3066,14 @@ impl ChatWidget { sandbox_policy: None, model: Some(model_for_action.clone()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: Some(effort_for_action), plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, })); tx.send(AppEvent::UpdateModel(model_for_action.clone())); @@ -3011,8 +3095,14 @@ impl ChatWidget { sandbox_policy: None, model: None, plan_model: Some(model_for_action.clone()), + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: None, plan_effort: Some(effort_for_action), + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, })); tx.send(AppEvent::UpdatePlanModel(model_for_action.clone())); @@ -3027,6 +3117,35 @@ impl ChatWidget { effort_label ); } + crate::app_event::ModelPickerTarget::Subagent => { + tx.send(AppEvent::CodexOp(Op::OverrideTurnContext { + cwd: None, + approval_policy: None, + sandbox_policy: None, + model: None, + plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: Some(model_for_action.clone()), + effort: None, + plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: Some(effort_for_action), + summary: None, + })); + tx.send(AppEvent::UpdateSubagentModel(model_for_action.clone())); + tx.send(AppEvent::UpdateSubagentReasoningEffort(effort_for_action)); + tx.send(AppEvent::PersistSubagentModelSelection { + model: model_for_action.clone(), + effort: effort_for_action, + }); + tracing::info!( + "Selected subagent model: {}, Selected effort: {}", + model_for_action, + effort_label + ); + } } })] } @@ -3105,6 +3224,9 @@ impl ChatWidget { crate::app_event::ModelPickerTarget::Plan => { self.config.plan_model.as_deref().unwrap_or(chat_model) } + crate::app_event::ModelPickerTarget::Subagent => { + self.config.subagent_model.as_deref().unwrap_or(chat_model) + } }; let is_current_model = effective_current_model == preset.model; let highlight_choice = if is_current_model { @@ -3117,6 +3239,13 @@ impl ChatWidget { self.config.model_reasoning_effort } } + crate::app_event::ModelPickerTarget::Subagent => { + if self.config.subagent_model.as_deref() == Some(preset.model.as_str()) { + self.config.subagent_model_reasoning_effort + } else { + self.config.model_reasoning_effort + } + } } } else { default_choice @@ -3216,8 +3345,14 @@ impl ChatWidget { sandbox_policy: None, model: Some(model.clone()), plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: Some(effort), plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, })); self.app_event_tx.send(AppEvent::UpdateModel(model.clone())); @@ -3241,8 +3376,14 @@ impl ChatWidget { sandbox_policy: None, model: None, plan_model: Some(model.clone()), + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: None, plan_effort: Some(effort), + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, })); self.app_event_tx @@ -3259,6 +3400,39 @@ impl ChatWidget { effort_label ); } + crate::app_event::ModelPickerTarget::Subagent => { + self.app_event_tx + .send(AppEvent::CodexOp(Op::OverrideTurnContext { + cwd: None, + approval_policy: None, + sandbox_policy: None, + model: None, + plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: Some(model.clone()), + effort: None, + plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: Some(effort), + summary: None, + })); + self.app_event_tx + .send(AppEvent::UpdateSubagentModel(model.clone())); + self.app_event_tx + .send(AppEvent::UpdateSubagentReasoningEffort(effort)); + self.app_event_tx + .send(AppEvent::PersistSubagentModelSelection { + model: model.clone(), + effort, + }); + tracing::info!( + "Selected subagent model: {}, Selected effort: {}", + model, + effort_label + ); + } } } @@ -3339,6 +3513,25 @@ impl ChatWidget { }); } + pub(crate) fn open_experimental_popup(&mut self) { + let features: Vec = FEATURES + .iter() + .filter_map(|spec| { + let name = spec.stage.beta_menu_name()?; + let description = spec.stage.beta_menu_description()?; + Some(BetaFeatureItem { + feature: spec.id, + name: name.to_string(), + description: description.to_string(), + enabled: self.config.features.enabled(spec.id), + }) + }) + .collect(); + + let view = ExperimentalFeaturesView::new(features, self.app_event_tx.clone()); + self.bottom_pane.show_view(Box::new(view)); + } + fn approval_preset_actions( approval: AskForApproval, sandbox: SandboxPolicy, @@ -3351,8 +3544,14 @@ impl ChatWidget { sandbox_policy: Some(sandbox_clone.clone()), model: None, plan_model: None, + explore_model: None, + mini_subagent_model: None, + subagent_model: None, effort: None, plan_effort: None, + explore_effort: None, + mini_subagent_effort: None, + subagent_effort: None, summary: None, })); tx.send(AppEvent::UpdateAskForApprovalPolicy(approval)); @@ -3685,6 +3884,14 @@ impl ChatWidget { Ok(()) } + pub(crate) fn set_feature_enabled(&mut self, feature: Feature, enabled: bool) { + if enabled { + self.config.features.enable(feature); + } else { + self.config.features.disable(feature); + } + } + pub(crate) fn set_full_access_warning_acknowledged(&mut self, acknowledged: bool) { self.config.notices.hide_full_access_warning = Some(acknowledged); } @@ -3718,6 +3925,11 @@ impl ChatWidget { self.config.plan_model_reasoning_effort = effort; } + /// Set the subagent reasoning effort in the widget's config copy. + pub(crate) fn set_subagent_reasoning_effort(&mut self, effort: Option) { + self.config.subagent_model_reasoning_effort = effort; + } + /// Set the model in the widget's config copy. pub(crate) fn set_model(&mut self, model: &str, model_family: ModelFamily) { self.session_header.set_model(model); @@ -3729,6 +3941,11 @@ impl ChatWidget { self.config.plan_model = Some(model.to_string()); } + /// Set the subagent model in the widget's config copy. + pub(crate) fn set_subagent_model(&mut self, model: &str) { + self.config.subagent_model = Some(model.to_string()); + } + pub(crate) fn add_info_message(&mut self, message: String, hint: Option) { self.add_to_history(history_cell::new_info_event(message, hint)); self.request_redraw(); @@ -3818,9 +4035,14 @@ impl ChatWidget { scrolled: bool, selection_active: bool, scroll_position: Option<(usize, usize)>, + copy_selection_key: crate::key_hint::KeyBinding, ) { - self.bottom_pane - .set_transcript_ui_state(scrolled, selection_active, scroll_position); + self.bottom_pane.set_transcript_ui_state( + scrolled, + selection_active, + scroll_position, + copy_selection_key, + ); } /// Forward an `Op` directly to codex. @@ -4050,6 +4272,30 @@ impl ChatWidget { ); RenderableItem::Owned(Box::new(flex)) } + + fn open_lsp_status(&mut self) { + if !self.config.features.enabled(Feature::Lsp) { + self.add_error_message( + "LSP is disabled. Enable `[features].lsp = true` in config.toml.".to_string(), + ); + return; + }; + + let cwd = self.config.cwd.clone(); + let mut lsp_config = self.config.lsp.manager.clone(); + lsp_config.enabled = true; + let lsp = self.lsp_manager.clone(); + let tx = self.app_event_tx.clone(); + tokio::spawn(async move { + lsp.set_config(lsp_config).await; + match lsp.status(&cwd).await { + Ok(status) => tx.send(AppEvent::LspStatusLoaded(status)), + Err(err) => tx.send(AppEvent::LspStatusLoadFailed(format!( + "Failed to fetch LSP status: {err:#}", + ))), + } + }); + } } impl Drop for ChatWidget { diff --git a/codex-rs/tui2/src/chatwidget/interrupts.rs b/codex-rs/tui2/src/chatwidget/interrupts.rs index ab650ed7628..7f0fd0990a1 100644 --- a/codex-rs/tui2/src/chatwidget/interrupts.rs +++ b/codex-rs/tui2/src/chatwidget/interrupts.rs @@ -17,6 +17,15 @@ use codex_protocol::approvals::ElicitationRequestEvent; use super::ChatWidget; +#[derive(Debug, Clone)] +pub(crate) struct CompletedLspToolCall { + pub(crate) tool_name: String, + pub(crate) arguments: String, + pub(crate) output: String, + pub(crate) success: bool, + pub(crate) duration: Option, +} + #[derive(Debug)] pub(crate) enum QueuedInterrupt { ExecApproval(String, ExecApprovalRequestEvent), @@ -28,6 +37,7 @@ pub(crate) enum QueuedInterrupt { ExecEnd(ExecCommandEndEvent), McpBegin(McpToolCallBeginEvent), McpEnd(McpToolCallEndEvent), + LspToolCall(CompletedLspToolCall), SubAgentBegin(SubAgentToolCallBeginEvent), SubAgentActivity(SubAgentToolCallActivityEvent), SubAgentTokens(SubAgentToolCallTokensEvent), @@ -94,6 +104,10 @@ impl InterruptManager { self.queue.push_back(QueuedInterrupt::McpEnd(ev)); } + pub(crate) fn push_lsp_tool_call(&mut self, ev: CompletedLspToolCall) { + self.queue.push_back(QueuedInterrupt::LspToolCall(ev)); + } + pub(crate) fn push_subagent_begin(&mut self, ev: SubAgentToolCallBeginEvent) { self.queue.push_back(QueuedInterrupt::SubAgentBegin(ev)); } @@ -132,6 +146,7 @@ impl InterruptManager { QueuedInterrupt::ExecEnd(ev) => chat.handle_exec_end_now(ev), QueuedInterrupt::McpBegin(ev) => chat.handle_mcp_begin_now(ev), QueuedInterrupt::McpEnd(ev) => chat.handle_mcp_end_now(ev), + QueuedInterrupt::LspToolCall(ev) => chat.handle_lsp_tool_call_now(ev), QueuedInterrupt::SubAgentBegin(ev) => chat.handle_subagent_begin_now(ev), QueuedInterrupt::SubAgentActivity(ev) => chat.handle_subagent_activity_now(ev), QueuedInterrupt::SubAgentTokens(ev) => chat.handle_subagent_tokens_now(ev), diff --git a/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__mcp_startup_header_booting.snap b/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__mcp_startup_header_booting.snap index e16f50ba1b9..0c63a489cc7 100644 --- a/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__mcp_startup_header_booting.snap +++ b/codex-rs/tui2/src/chatwidget/snapshots/codex_tui2__chatwidget__tests__mcp_startup_header_booting.snap @@ -4,6 +4,7 @@ expression: terminal.backend() --- " " "• Booting MCP server: alpha (0s • esc to interrupt) " +" └ alpha starting " " " " " "› Ask Codex to do anything " diff --git a/codex-rs/tui2/src/chatwidget/tests.rs b/codex-rs/tui2/src/chatwidget/tests.rs index a7435bbf187..690eb00fcca 100644 --- a/codex-rs/tui2/src/chatwidget/tests.rs +++ b/codex-rs/tui2/src/chatwidget/tests.rs @@ -239,6 +239,136 @@ async fn resumed_session_does_not_auto_execute_plan() { ); } +#[tokio::test] +async fn resumed_interrupted_session_prompts_before_continuing() { + let (mut chat, mut rx, _ops) = make_chatwidget_manual(None).await; + + let conversation_id = ConversationId::new(); + let rollout_file = NamedTempFile::new().unwrap(); + let configured = codex_core::protocol::SessionConfiguredEvent { + session_id: conversation_id, + model: "test-model".to_string(), + model_provider_id: "test-provider".to_string(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::ReadOnly, + cwd: PathBuf::from("/home/user/project"), + reasoning_effort: Some(ReasoningEffortConfig::default()), + history_log_id: 0, + history_entry_count: 0, + initial_messages: Some(vec![ + EventMsg::UserMessage(UserMessageEvent { + message: "hello".to_string(), + images: None, + }), + EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { + delta: "partial".to_string(), + }), + EventMsg::TurnAborted(codex_core::protocol::TurnAbortedEvent { + reason: codex_core::protocol::TurnAbortReason::Interrupted, + }), + ]), + rollout_path: rollout_file.path().to_path_buf(), + }; + + chat.handle_codex_event(Event { + id: "initial".into(), + msg: EventMsg::SessionConfigured(configured), + }); + + assert!( + !chat.bottom_pane.is_normal_backtrack_mode(), + "expected resume prompt overlay to be active" + ); + + chat.handle_key_event(KeyEvent::new(KeyCode::Char('2'), KeyModifiers::NONE)); + + let mut continue_text = None; + while let Ok(ev) = rx.try_recv() { + if let AppEvent::QueueUserText(text) = ev { + continue_text = Some(text); + break; + } + } + + assert_eq!( + continue_text.expect("expected QueueUserText from resume prompt"), + "Continue from where you left off. Do not re-run tool calls that already completed; use the outputs above. If you need to rerun a tool, ask first." + ); +} + +#[tokio::test] +async fn resumed_completed_exploring_commands_do_not_prompt() { + let (mut chat, _rx, _ops) = make_chatwidget_manual(None).await; + + let conversation_id = ConversationId::new(); + let rollout_file = NamedTempFile::new().unwrap(); + let configured = codex_core::protocol::SessionConfiguredEvent { + session_id: conversation_id, + model: "test-model".to_string(), + model_provider_id: "test-provider".to_string(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::ReadOnly, + cwd: PathBuf::from("/home/user/project"), + reasoning_effort: Some(ReasoningEffortConfig::default()), + history_log_id: 0, + history_entry_count: 0, + initial_messages: Some(vec![ + EventMsg::UserMessage(UserMessageEvent { + message: "hello".to_string(), + images: None, + }), + EventMsg::ExecCommandBegin(ExecCommandBeginEvent { + call_id: "call-1".to_string(), + process_id: None, + turn_id: "turn-1".to_string(), + command: vec!["cat".to_string(), "README.md".to_string()], + cwd: PathBuf::from("/home/user/project"), + parsed_cmd: vec![ParsedCommand::Read { + cmd: "cat README.md".to_string(), + name: "cat".to_string(), + path: PathBuf::from("/home/user/project/README.md"), + }], + source: ExecCommandSource::Agent, + interaction_input: None, + }), + EventMsg::ExecCommandEnd(ExecCommandEndEvent { + call_id: "call-1".to_string(), + process_id: None, + turn_id: "turn-1".to_string(), + command: vec!["cat".to_string(), "README.md".to_string()], + cwd: PathBuf::from("/home/user/project"), + parsed_cmd: vec![ParsedCommand::Read { + cmd: "cat README.md".to_string(), + name: "cat".to_string(), + path: PathBuf::from("/home/user/project/README.md"), + }], + source: ExecCommandSource::Agent, + interaction_input: None, + stdout: "contents".to_string(), + stderr: String::new(), + aggregated_output: "contents".to_string(), + exit_code: 0, + duration: std::time::Duration::from_millis(5), + formatted_output: "contents".to_string(), + }), + EventMsg::TaskComplete(TaskCompleteEvent { + last_agent_message: Some("done".to_string()), + }), + ]), + rollout_path: rollout_file.path().to_path_buf(), + }; + + chat.handle_codex_event(Event { + id: "initial".into(), + msg: EventMsg::SessionConfigured(configured), + }); + + assert!( + chat.is_normal_backtrack_mode(), + "expected resume replay to avoid showing interruption prompt overlay" + ); +} + /// Entering review mode uses the hint provided by the review request. #[tokio::test] async fn entered_review_mode_uses_request_hint() { @@ -460,6 +590,7 @@ async fn make_chatwidget_manual( active_cell: None, active_subagent_group: None, config: cfg.clone(), + lsp_manager: codex_lsp::LspManager::new(codex_lsp::LspManagerConfig::default()), model_family: ModelsManager::construct_model_family_offline(&resolved_model, &cfg), auth_manager: auth_manager.clone(), models_manager: Arc::new(ModelsManager::new(auth_manager)), @@ -475,6 +606,8 @@ async fn make_chatwidget_manual( stream_controller: None, running_commands: HashMap::new(), suppressed_exec_calls: HashSet::new(), + pending_lsp_tool_calls: HashMap::new(), + pending_apply_patch_calls: HashSet::new(), last_unified_wait: None, task_complete_pending: false, mcp_startup_status: None, @@ -483,12 +616,14 @@ async fn make_chatwidget_manual( full_reasoning_buffer: String::new(), current_status_header: String::from("Ready"), retry_status_header: None, - plan_variants_progress: None, conversation_id: None, frame_requester: FrameRequester::test_dummy(), show_welcome_banner: true, queued_user_messages: VecDeque::new(), suppress_session_configured_redraw: false, + resume_last_turn_aborted: false, + resume_had_partial_output: false, + resume_had_in_progress_tools: false, pending_notification: None, is_review_mode: false, pre_review_token_info: None, @@ -559,6 +694,55 @@ fn make_token_info(total_tokens: i64, context_window: i64) -> TokenUsageInfo { } } +#[tokio::test] +async fn lsp_tool_calls_render_in_history() { + let (mut chat, _app_event_tx, mut app_ev_rx, _op_rx) = + make_chatwidget_manual_with_sender().await; + + chat.dispatch_event_msg( + None, + EventMsg::RawResponseItem(codex_core::protocol::RawResponseItemEvent { + item: codex_protocol::models::ResponseItem::FunctionCall { + id: None, + name: "lsp_definition".to_string(), + arguments: r#"{"file_path":"C:\\repo\\src\\main.rs","line":1,"character":1}"# + .to_string(), + call_id: "call-1".to_string(), + }, + }), + false, + ); + + chat.dispatch_event_msg( + None, + EventMsg::RawResponseItem(codex_core::protocol::RawResponseItemEvent { + item: codex_protocol::models::ResponseItem::FunctionCallOutput { + call_id: "call-1".to_string(), + output: codex_protocol::models::FunctionCallOutputPayload { + content: r#"{"locations":[]}"#.to_string(), + success: Some(true), + ..Default::default() + }, + }, + }), + false, + ); + + let rendered: String = drain_insert_history(&mut app_ev_rx) + .into_iter() + .map(|lines| lines_to_single_string(&lines)) + .collect(); + + assert!( + rendered.contains("lsp_definition"), + "expected lsp tool call to be visible in history, got:\n{rendered}" + ); + assert!( + rendered.contains("\"locations\""), + "expected lsp tool output to be visible in history, got:\n{rendered}" + ); +} + #[tokio::test] async fn rate_limit_warnings_emit_thresholds() { let mut state = RateLimitWarningState::default(); @@ -1382,18 +1566,6 @@ async fn slash_resume_opens_picker() { assert_matches!(rx.try_recv(), Ok(AppEvent::OpenResumePicker)); } -#[tokio::test] -async fn slash_undo_sends_op() { - let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None).await; - - chat.dispatch_command(SlashCommand::Undo); - - match rx.try_recv() { - Ok(AppEvent::CodexOp(Op::Undo)) => {} - other => panic!("expected AppEvent::CodexOp(Op::Undo), got {other:?}"), - } -} - #[tokio::test] async fn slash_rollout_displays_current_path() { let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None).await; @@ -1440,6 +1612,7 @@ async fn subagent_history_cell_keeps_updating_after_other_history_is_inserted() description: "Alpha task".to_string(), label: "alpha".to_string(), prompt: "Prompt".to_string(), + model: None, }, }); @@ -3050,11 +3223,13 @@ async fn stream_error_updates_status_indicator() { let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None).await; chat.bottom_pane.set_task_running(true); let msg = "Reconnecting... 2/5"; + let details = "Idle timeout waiting for SSE"; chat.handle_codex_event(Event { id: "sub-1".into(), msg: EventMsg::StreamError(StreamErrorEvent { message: msg.to_string(), codex_error_info: Some(CodexErrorInfo::Other), + additional_details: Some(details.to_string()), }), }); @@ -3068,6 +3243,7 @@ async fn stream_error_updates_status_indicator() { .status_widget() .expect("status indicator should be visible"); assert_eq!(status.header(), msg); + assert_eq!(status.details(), Some(details)); } #[tokio::test] @@ -3104,6 +3280,7 @@ async fn stream_recovery_restores_previous_status_header() { msg: EventMsg::StreamError(StreamErrorEvent { message: "Reconnecting... 1/5".to_string(), codex_error_info: Some(CodexErrorInfo::Other), + additional_details: None, }), }); drain_insert_history(&mut rx); @@ -3119,6 +3296,7 @@ async fn stream_recovery_restores_previous_status_header() { .status_widget() .expect("status indicator should be visible"); assert_eq!(status.header(), "Working"); + assert_eq!(status.details(), None); assert!(chat.retry_status_header.is_none()); } diff --git a/codex-rs/tui2/src/diff_render.rs b/codex-rs/tui2/src/diff_render.rs index 24c5be597b7..1bda0e53dd8 100644 --- a/codex-rs/tui2/src/diff_render.rs +++ b/codex-rs/tui2/src/diff_render.rs @@ -300,6 +300,17 @@ fn render_change(change: &FileChange, out: &mut Vec>, width: usi } pub(crate) fn display_path_for(path: &Path, cwd: &Path) -> String { + // Prefer a stable, user-local relative path when the file is under the current working + // directory. This keeps output deterministic in jj-only repos (no `.git`) and matches user + // expectations for "files in this project". + if let Some(rel) = pathdiff::diff_paths(path, cwd) + && !rel + .components() + .any(|c| matches!(c, std::path::Component::ParentDir)) + { + return rel.display().to_string(); + } + let path_in_same_repo = match (get_git_repo_root(cwd), get_git_repo_root(path)) { (Some(cwd_repo), Some(path_repo)) => cwd_repo == path_repo, _ => false, @@ -557,23 +568,15 @@ mod tests { #[test] fn ui_snapshot_apply_delete_block() { - // Write a temporary file so the delete renderer can read original content - let tmp_path = PathBuf::from("tmp_delete_example.txt"); - std::fs::write(&tmp_path, "first\nsecond\nthird\n").expect("write tmp file"); - let mut changes: HashMap = HashMap::new(); changes.insert( - tmp_path.clone(), + PathBuf::from("tmp_delete_example.txt"), FileChange::Delete { content: "first\nsecond\nthird\n".to_string(), }, ); let lines = diff_summary_for_tests(&changes); - - // Cleanup best-effort; rendering has already read the file - let _ = std::fs::remove_file(&tmp_path); - snapshot_lines("apply_delete_block", lines, 80, 12); } diff --git a/codex-rs/tui2/src/history_cell.rs b/codex-rs/tui2/src/history_cell.rs index eb47d5ee695..3da3ba86ad7 100644 --- a/codex-rs/tui2/src/history_cell.rs +++ b/codex-rs/tui2/src/history_cell.rs @@ -10,7 +10,6 @@ use crate::exec_command::strip_bash_lc_and_escape; use crate::markdown::append_markdown; use crate::render::line_utils::line_to_static; use crate::render::line_utils::prefix_lines; -use crate::render::line_utils::push_owned_lines; use crate::render::renderable::Renderable; use crate::style::user_message_style; use crate::text_formatting::format_and_truncate_tool_result; @@ -21,7 +20,6 @@ use crate::update_action::UpdateAction; use crate::version::CODEX_CLI_VERSION; use crate::wrapping::RtOptions; use crate::wrapping::word_wrap_line; -use crate::wrapping::word_wrap_lines; use base64::Engine; use codex_common::format_env_display::format_env_display; use codex_core::config::Config; @@ -31,8 +29,8 @@ use codex_core::protocol::McpAuthStatus; use codex_core::protocol::McpInvocation; use codex_core::protocol::SessionConfiguredEvent; use codex_core::protocol::SubAgentInvocation; +use codex_core::protocol::SubAgentToolCallOutcome; use codex_protocol::openai_models::ReasoningEffort as ReasoningEffortConfig; -use codex_protocol::openai_models::ReasoningSummaryFormat; use codex_protocol::plan_tool::PlanItemArg; use codex_protocol::plan_tool::StepStatus; use codex_protocol::plan_tool::UpdatePlanArgs; @@ -61,6 +59,47 @@ use std::time::Instant; use tracing::error; use unicode_width::UnicodeWidthStr; +/// Visual transcript lines plus soft-wrap joiners. +/// +/// A history cell can produce multiple "visual lines" once prefixes/indents and wrapping are +/// applied. Clipboard reconstruction needs more information than just those lines: users expect +/// soft-wrapped prose to copy as a single logical line, while explicit newlines and spacer rows +/// should remain hard breaks. +/// +/// `joiner_before` records, for each output line, whether it is a continuation created by the +/// wrapping algorithm and what string should be inserted at the wrap boundary when joining lines. +/// This avoids heuristics like always inserting a space, and instead preserves the exact whitespace +/// that was skipped at the boundary. +/// +/// ## Note for `codex-tui` vs `codex-tui2` +/// +/// In `codex-tui`, `HistoryCell` only exposes `transcript_lines(...)` and the UI generally doesn't +/// need to reconstruct clipboard text across off-screen history or soft-wrap boundaries. +/// +/// In `codex-tui2`, transcript selection and copy are app-driven (not terminal-driven) and may span +/// content that isn't currently visible. That means we need additional metadata to distinguish hard +/// breaks from soft wraps and to preserve the exact whitespace at wrap boundaries. +/// +/// Invariants: +/// - `joiner_before.len() == lines.len()` +/// - `joiner_before[0]` is always `None` +/// - `None` represents a hard break +/// - `Some(joiner)` represents a soft wrap continuation +/// +/// Consumers: +/// - `transcript_render` threads joiners through transcript flattening/wrapping. +/// - `transcript_copy` uses them to join wrapped prose while preserving hard breaks. +#[derive(Debug, Clone)] +pub(crate) struct TranscriptLinesWithJoiners { + /// Visual transcript lines for a history cell, including any indent/prefix spans. + /// + /// This is the same shape used for on-screen transcript rendering: a single cell may expand + /// to multiple `Line`s after wrapping and prefixing. + pub(crate) lines: Vec>, + /// For each output line, whether and how to join it to the previous line when copying. + pub(crate) joiner_before: Vec>, +} + /// Represents an event to display in the conversation history. Returns its /// `Vec>` representation to make it easier to display in a /// scrollable list. @@ -79,6 +118,19 @@ pub(crate) trait HistoryCell: std::fmt::Debug + Send + Sync + Any { self.display_lines(width) } + /// Transcript lines plus soft-wrap joiners used for copy/paste fidelity. + /// + /// Most cells can use the default implementation (no joiners), but cells that apply wrapping + /// should override this and return joiners derived from the same wrapping operation so + /// clipboard reconstruction can distinguish hard breaks from soft wraps. + fn transcript_lines_with_joiners(&self, width: u16) -> TranscriptLinesWithJoiners { + let lines = self.transcript_lines(width); + TranscriptLinesWithJoiners { + joiner_before: vec![None; lines.len()], + lines, + } + } + fn desired_transcript_height(&self, width: u16) -> u16 { let lines = self.transcript_lines(width); // Workaround for ratatui bug: if there's only one line and it's whitespace-only, ratatui gives 2 lines. @@ -178,8 +230,10 @@ pub(crate) struct UserHistoryCell { impl HistoryCell for UserHistoryCell { fn display_lines(&self, width: u16) -> Vec> { - let mut lines: Vec> = Vec::new(); + self.transcript_lines_with_joiners(width).lines + } + fn transcript_lines_with_joiners(&self, width: u16) -> TranscriptLinesWithJoiners { let wrap_width = width .saturating_sub( LIVE_PREFIX_COLS + 1, /* keep a one-column right margin for wrapping */ @@ -188,17 +242,32 @@ impl HistoryCell for UserHistoryCell { let style = user_message_style(); - let wrapped = word_wrap_lines( + let (wrapped, joiner_before) = crate::wrapping::word_wrap_lines_with_joiners( self.message.lines().map(|l| Line::from(l).style(style)), // Wrap algorithm matches textarea.rs. RtOptions::new(usize::from(wrap_width)) .wrap_algorithm(textwrap::WrapAlgorithm::FirstFit), ); + let mut lines: Vec> = Vec::new(); + let mut joins: Vec> = Vec::new(); + lines.push(Line::from("").style(style)); - lines.extend(prefix_lines(wrapped, "› ".bold().dim(), " ".into())); + joins.push(None); + + let prefixed = prefix_lines(wrapped, "› ".bold().dim(), " ".into()); + for (line, joiner) in prefixed.into_iter().zip(joiner_before) { + lines.push(line); + joins.push(joiner); + } + lines.push(Line::from("").style(style)); - lines + joins.push(None); + + TranscriptLinesWithJoiners { + lines, + joiner_before: joins, + } } } @@ -219,6 +288,10 @@ impl ReasoningSummaryCell { } fn lines(&self, width: u16) -> Vec> { + self.lines_with_joiners(width).lines + } + + fn lines_with_joiners(&self, width: u16) -> TranscriptLinesWithJoiners { let mut lines: Vec> = Vec::new(); append_markdown( &self.content, @@ -238,12 +311,17 @@ impl ReasoningSummaryCell { }) .collect::>(); - word_wrap_lines( + let (lines, joiner_before) = crate::wrapping::word_wrap_lines_with_joiners( &summary_lines, RtOptions::new(width as usize) .initial_indent("• ".dim().into()) .subsequent_indent(" ".into()), - ) + ); + + TranscriptLinesWithJoiners { + lines, + joiner_before, + } } } @@ -268,6 +346,10 @@ impl HistoryCell for ReasoningSummaryCell { self.lines(width) } + fn transcript_lines_with_joiners(&self, width: u16) -> TranscriptLinesWithJoiners { + self.lines_with_joiners(width) + } + fn desired_transcript_height(&self, width: u16) -> u16 { self.lines(width).len() as u16 } @@ -290,16 +372,50 @@ impl AgentMessageCell { impl HistoryCell for AgentMessageCell { fn display_lines(&self, width: u16) -> Vec> { - word_wrap_lines( - &self.lines, - RtOptions::new(width as usize) - .initial_indent(if self.is_first_line { - "• ".dim().into() - } else { - " ".into() - }) - .subsequent_indent(" ".into()), - ) + self.transcript_lines_with_joiners(width).lines + } + + fn transcript_lines_with_joiners(&self, width: u16) -> TranscriptLinesWithJoiners { + use ratatui::style::Color; + + let mut out_lines: Vec> = Vec::new(); + let mut joiner_before: Vec> = Vec::new(); + + let mut is_first_output_line = true; + for line in &self.lines { + let is_code_block_line = line.style.fg == Some(Color::Cyan); + let initial_indent: Line<'static> = if is_first_output_line && self.is_first_line { + "• ".dim().into() + } else { + " ".into() + }; + let subsequent_indent: Line<'static> = " ".into(); + + if is_code_block_line { + let mut spans = initial_indent.spans; + spans.extend(line.spans.iter().cloned()); + out_lines.push(Line::from(spans).style(line.style)); + joiner_before.push(None); + is_first_output_line = false; + continue; + } + + let opts = RtOptions::new(width as usize) + .initial_indent(initial_indent) + .subsequent_indent(subsequent_indent.clone()); + let (wrapped, wrapped_joiners) = + crate::wrapping::word_wrap_line_with_joiners(line, opts); + for (l, j) in wrapped.into_iter().zip(wrapped_joiners) { + out_lines.push(line_to_static(&l)); + joiner_before.push(j); + is_first_output_line = false; + } + } + + TranscriptLinesWithJoiners { + lines: out_lines, + joiner_before, + } } fn is_stream_continuation(&self) -> bool { @@ -401,20 +517,29 @@ impl PrefixedWrappedHistoryCell { impl HistoryCell for PrefixedWrappedHistoryCell { fn display_lines(&self, width: u16) -> Vec> { + self.transcript_lines_with_joiners(width).lines + } + + fn desired_height(&self, width: u16) -> u16 { + self.display_lines(width).len() as u16 + } + + fn transcript_lines_with_joiners(&self, width: u16) -> TranscriptLinesWithJoiners { if width == 0 { - return Vec::new(); + return TranscriptLinesWithJoiners { + lines: Vec::new(), + joiner_before: Vec::new(), + }; } let opts = RtOptions::new(width.max(1) as usize) .initial_indent(self.initial_prefix.clone()) .subsequent_indent(self.subsequent_prefix.clone()); - let wrapped = word_wrap_lines(&self.text, opts); - let mut out = Vec::new(); - push_owned_lines(&wrapped, &mut out); - out - } - - fn desired_height(&self, width: u16) -> u16 { - self.display_lines(width).len() as u16 + let (lines, joiner_before) = + crate::wrapping::word_wrap_lines_with_joiners(&self.text, opts); + TranscriptLinesWithJoiners { + lines, + joiner_before, + } } } @@ -897,6 +1022,103 @@ pub(crate) struct McpToolCallCell { animations_enabled: bool, } +#[derive(Debug)] +pub(crate) struct FunctionToolCallCell { + tool_name: String, + arguments: String, + output: String, + success: bool, + duration: Option, +} + +impl FunctionToolCallCell { + pub(crate) fn new( + tool_name: String, + arguments: String, + output: String, + success: bool, + duration: Option, + ) -> Self { + Self { + tool_name, + arguments, + output, + success, + duration, + } + } + + fn invocation_text(&self) -> String { + if self.arguments.trim().is_empty() { + return self.tool_name.clone(); + } + format!("{} {}", self.tool_name, self.arguments) + } +} + +impl HistoryCell for FunctionToolCallCell { + fn display_lines(&self, width: u16) -> Vec> { + let mut lines: Vec> = Vec::new(); + + let bullet = if self.success { + "•".green().bold() + } else { + "•".red().bold() + }; + let header_text = "Called".bold(); + + let mut header_spans = vec![bullet, " ".into(), header_text, " ".into()]; + if let Some(duration) = self.duration { + header_spans.push(format!("({})", fmt_subagent_duration(duration)).dim()); + header_spans.push(" ".into()); + } + + let invocation_line: Line<'static> = Line::from(self.invocation_text()); + let mut compact_header = Line::from(header_spans.clone()); + let reserved = compact_header.width(); + let inline_invocation = + invocation_line.width() <= (width as usize).saturating_sub(reserved); + + if inline_invocation { + compact_header.extend(invocation_line.spans.clone()); + lines.push(compact_header); + } else { + header_spans.pop(); // drop trailing space for standalone header + lines.push(Line::from(header_spans)); + + let opts = RtOptions::new((width as usize).saturating_sub(4)) + .initial_indent("".into()) + .subsequent_indent(" ".into()); + let wrapped = word_wrap_line(&invocation_line, opts); + let body_lines: Vec> = wrapped.iter().map(line_to_static).collect(); + lines.extend(prefix_lines(body_lines, " └ ".dim(), " ".into())); + } + + if !self.output.trim().is_empty() { + let detail_wrap_width = (width as usize).saturating_sub(4).max(1); + let text = format_and_truncate_tool_result( + &self.output, + TOOL_CALL_MAX_LINES, + detail_wrap_width, + ); + let mut detail_lines: Vec> = Vec::new(); + for segment in text.split('\n') { + let line = Line::from(segment.to_string().dim()); + let wrapped = word_wrap_line( + &line, + RtOptions::new(detail_wrap_width) + .initial_indent("".into()) + .subsequent_indent(" ".into()), + ); + detail_lines.extend(wrapped.iter().map(line_to_static)); + } + lines.extend(prefix_lines(detail_lines, " └ ".dim(), " ".into())); + } + + lines + } +} + impl McpToolCallCell { pub(crate) fn new( call_id: String, @@ -943,6 +1165,10 @@ impl McpToolCallCell { self.result = Some(Err("interrupted".to_string())); } + pub(crate) fn is_active(&self) -> bool { + self.result.is_none() + } + fn render_content_block(block: &mcp_types::ContentBlock, width: usize) -> String { match block { mcp_types::ContentBlock::TextContent(text) => { @@ -1064,6 +1290,7 @@ pub(crate) struct SubAgentToolCallCell { duration: Option, activity: Option, tokens: Option, + outcome: Option, result: Option>, } @@ -1076,6 +1303,7 @@ impl SubAgentToolCallCell { duration: None, activity: None, tokens: None, + outcome: None, result: None, } } @@ -1092,10 +1320,12 @@ impl SubAgentToolCallCell { &mut self, duration: Duration, tokens: Option, + outcome: Option, result: Result, ) { self.duration = Some(duration); self.tokens = tokens; + self.outcome = outcome; self.result = Some(result); } @@ -1124,6 +1354,7 @@ impl SubAgentToolCallCell { let elapsed = self.start_time.elapsed(); self.duration = Some(elapsed); self.tokens = None; + self.outcome = Some(SubAgentToolCallOutcome::Cancelled); self.result = Some(Err("interrupted".to_string())); } } @@ -1132,6 +1363,7 @@ impl HistoryCell for SubAgentToolCallCell { fn display_lines(&self, width: u16) -> Vec> { let status = self.success(); let indicator = match status { + _ if self.outcome == Some(SubAgentToolCallOutcome::Cancelled) => "⏹".dim(), Some(true) => "✓".green(), Some(false) => "✗".red(), None => "●".cyan(), @@ -1141,13 +1373,21 @@ impl HistoryCell for SubAgentToolCallCell { let elapsed = self.duration.unwrap_or_else(|| self.start_time.elapsed()); let elapsed = fmt_subagent_duration(elapsed); - let mut header_spans: Vec> = vec![ - indicator, - " ".into(), - "Subagent:".bold(), - " ".into(), - summary.into(), - ]; + let is_mini = self.invocation.model.as_deref() == Some("gpt-5.1-codex-mini"); + let is_explorer = self.invocation.label.starts_with("plan_explore_"); + + let mut header_spans: Vec> = vec![indicator, " ".into(), "Subagent".bold()]; + if is_explorer && !is_mini { + header_spans.push(" ".into()); + header_spans.push("[Explorer]".cyan().bold()); + } + if is_mini { + header_spans.push(" ".into()); + header_spans.push("[Mini]".magenta().bold()); + } + header_spans.push(":".bold()); + header_spans.push(" ".into()); + header_spans.push(summary.into()); let mut meta = format!(" ({elapsed}"); if let Some(tokens) = self.tokens.and_then(fmt_subagent_tokens) { meta.push_str(&format!(", {tokens} tok")); @@ -1161,6 +1401,7 @@ impl HistoryCell for SubAgentToolCallCell { let activity = self.activity.as_deref().unwrap_or("working…"); let activity = activity.strip_prefix("shell ").unwrap_or(activity).trim(); let detail_span = match &self.result { + _ if self.outcome == Some(SubAgentToolCallOutcome::Cancelled) => "cancelled".dim(), Some(Ok(_)) => "done".dim(), Some(Err(_)) => "failed".red(), None => activity.to_string().dim(), @@ -1244,6 +1485,7 @@ impl SubAgentToolCallGroupCell { call_id: &str, duration: Duration, tokens: Option, + outcome: Option, result: Result, ) -> bool { match self @@ -1253,7 +1495,7 @@ impl SubAgentToolCallGroupCell { .find(|cell| cell.call_id() == call_id) { Some(cell) => { - cell.complete(duration, tokens, result); + cell.complete(duration, tokens, outcome, result); true } None => false, @@ -1574,6 +1816,202 @@ pub(crate) fn new_info_event(message: String, hint: Option) -> PlainHist PlainHistoryCell { lines } } +#[derive(Debug, PartialEq, Eq)] +struct LspDiagnosticsEntry { + path: PathBuf, + line: u32, + character: u32, + message: String, +} + +#[derive(Debug)] +struct LspDiagnosticsSummary { + errors: usize, + warnings: usize, + entries: Vec, +} + +pub(crate) fn new_lsp_diagnostics_event(text: String, cwd: PathBuf) -> Option { + let summary = parse_lsp_diagnostics_summary(&text)?; + if summary.errors == 0 && summary.warnings == 0 { + return None; + } + if summary.entries.is_empty() { + return None; + } + + let mut lines: Vec> = Vec::new(); + + let icon = if summary.errors > 0 { + "!".red().bold() + } else { + "!".magenta().bold() + }; + lines.push(vec![icon, " ".into(), "LSP diagnostics".bold()].into()); + + let errors = if summary.errors > 0 { + summary.errors.to_string().red().bold() + } else { + summary.errors.to_string().dim() + }; + let warnings = if summary.warnings > 0 { + summary.warnings.to_string().magenta().bold() + } else { + summary.warnings.to_string().dim() + }; + lines.push( + vec![ + " ".into(), + "Errors: ".dim(), + errors, + ", ".dim(), + "Warnings: ".dim(), + warnings, + ] + .into(), + ); + lines.push(Line::from("")); + + for entry in summary.entries { + let display_path = display_path_for(&entry.path, &cwd); + let loc = format!("{display_path}:{}:{}", entry.line, entry.character); + lines.push(vec![" - ".dim(), loc.dim(), " ".into(), entry.message.into()].into()); + } + + Some(PlainHistoryCell { lines }) +} + +fn parse_lsp_diagnostics_summary(text: &str) -> Option { + let mut lines = text.lines(); + let header = lines.next()?.trim(); + if header != "## LSP diagnostics" && header != "LSP diagnostics" { + return None; + } + let counts = lines.next()?.trim(); + let errors = parse_lsp_count(counts, "Errors")?; + let warnings = parse_lsp_count(counts, "Warnings")?; + + let mut entries = Vec::new(); + for line in lines { + let line = line.trim(); + let Some(stripped) = line.strip_prefix("- ") else { + continue; + }; + if let Some(entry) = parse_lsp_diagnostic_entry(stripped) { + entries.push(entry); + } + } + + Some(LspDiagnosticsSummary { + errors, + warnings, + entries, + }) +} + +fn parse_lsp_count(line: &str, key: &str) -> Option { + let needle = format!("{key}:"); + let start = line.find(&needle)? + needle.len(); + let rest = line.get(start..)?.trim_start(); + let digits: String = rest.chars().take_while(char::is_ascii_digit).collect(); + digits.parse().ok() +} + +fn parse_lsp_diagnostic_entry(text: &str) -> Option { + let text = text.trim(); + // Format: ":: " + // + // `` can contain ':' on Windows (drive letters), and `` can also contain ':'. + // Parse from the right by looking for ": " (character) and a preceding ":" (line). + let colon_positions: Vec = text.match_indices(':').map(|(idx, _)| idx).collect(); + if colon_positions.len() < 2 { + return None; + } + + for idx in (1..colon_positions.len()).rev() { + let colon_character = colon_positions[idx]; + let after_character = text.get(colon_character + 1..)?; + + let digits_len = after_character + .chars() + .take_while(char::is_ascii_digit) + .map(char::len_utf8) + .sum::(); + if digits_len == 0 { + continue; + } + + let after_digits = after_character.get(digits_len..)?; + if !after_digits.starts_with(' ') { + continue; + } + + let character = after_character.get(..digits_len)?.parse::().ok()?; + let message = after_digits.trim_start().to_string(); + + let colon_line = colon_positions[idx - 1]; + let line_str = text.get(colon_line + 1..colon_character)?; + if line_str.is_empty() || !line_str.chars().all(|c| c.is_ascii_digit()) { + continue; + } + let line = line_str.parse::().ok()?; + + let path = PathBuf::from(text.get(..colon_line)?); + return Some(LspDiagnosticsEntry { + path, + line, + character, + message, + }); + } + + None +} + +#[cfg(test)] +mod lsp_diagnostics_tests { + use super::*; + use pretty_assertions::assert_eq; + + #[test] + fn parse_lsp_diagnostic_entry_handles_colons_in_path_and_message() { + let entry = parse_lsp_diagnostic_entry( + r#"F:\GitHub\codex\codex-rs\mcp-server\tests\common\mcp_process.rs:43:14 Syntax Error: expected expression"#, + ) + .expect("should parse entry"); + assert_eq!( + entry, + LspDiagnosticsEntry { + path: PathBuf::from( + r#"F:\GitHub\codex\codex-rs\mcp-server\tests\common\mcp_process.rs"#, + ), + line: 43, + character: 14, + message: "Syntax Error: expected expression".to_string(), + } + ); + } + + #[test] + fn parse_lsp_diagnostic_entry_handles_messages_without_colons() { + let entry = parse_lsp_diagnostic_entry( + r#"F:\GitHub\codex\codex-rs\mcp-server\tests\common\mcp_process.rs:75:17 Variable `None` should have snake_case name, e.g. `none`"#, + ) + .expect("should parse entry"); + assert_eq!( + entry, + LspDiagnosticsEntry { + path: PathBuf::from( + r#"F:\GitHub\codex\codex-rs\mcp-server\tests\common\mcp_process.rs"#, + ), + line: 75, + character: 17, + message: "Variable `None` should have snake_case name, e.g. `none`".to_string(), + } + ); + } +} + pub(crate) fn new_error_event(message: String) -> PlainHistoryCell { // Use a hair space (U+200A) to create a subtle, near-invisible separation // before the text. VS16 is intentionally omitted to keep spacing tighter @@ -1698,39 +2136,34 @@ pub(crate) fn new_view_image_tool_call(path: PathBuf, cwd: &Path) -> PlainHistor PlainHistoryCell { lines } } -pub(crate) fn new_reasoning_summary_block( - full_reasoning_buffer: String, - reasoning_summary_format: ReasoningSummaryFormat, -) -> Box { - if reasoning_summary_format == ReasoningSummaryFormat::Experimental { - // Experimental format is following: - // ** header ** - // - // reasoning summary - // - // So we need to strip header from reasoning summary - let full_reasoning_buffer = full_reasoning_buffer.trim(); - if let Some(open) = full_reasoning_buffer.find("**") { - let after_open = &full_reasoning_buffer[(open + 2)..]; - if let Some(close) = after_open.find("**") { - let after_close_idx = open + 2 + close + 2; - // if we don't have anything beyond `after_close_idx` - // then we don't have a summary to inject into history - if after_close_idx < full_reasoning_buffer.len() { - let header_buffer = full_reasoning_buffer[..after_close_idx].to_string(); - let summary_buffer = full_reasoning_buffer[after_close_idx..].to_string(); - return Box::new(ReasoningSummaryCell::new( - header_buffer, - summary_buffer, - false, - )); - } +pub(crate) fn new_reasoning_summary_block(full_reasoning_buffer: String) -> Box { + // Experimental format is following: + // ** header ** + // + // reasoning summary + // + // So we need to strip header from reasoning summary + let full_reasoning_buffer = full_reasoning_buffer.trim(); + if let Some(open) = full_reasoning_buffer.find("**") { + let after_open = &full_reasoning_buffer[(open + 2)..]; + if let Some(close) = after_open.find("**") { + let after_close_idx = open + 2 + close + 2; + // if we don't have anything beyond `after_close_idx` + // then we don't have a summary to inject into history + if after_close_idx < full_reasoning_buffer.len() { + let header_buffer = full_reasoning_buffer[..after_close_idx].to_string(); + let summary_buffer = full_reasoning_buffer[after_close_idx..].to_string(); + return Box::new(ReasoningSummaryCell::new( + header_buffer, + summary_buffer, + false, + )); } } } Box::new(ReasoningSummaryCell::new( "".to_string(), - full_reasoning_buffer, + full_reasoning_buffer.to_string(), true, )) } @@ -1848,13 +2281,13 @@ mod tests { use codex_core::config::ConfigBuilder; use codex_core::config::types::McpServerConfig; use codex_core::config::types::McpServerTransportConfig; - use codex_core::models_manager::manager::ModelsManager; use codex_core::protocol::McpAuthStatus; use codex_protocol::parse_command::ParsedCommand; use dirs::home_dir; use pretty_assertions::assert_eq; use serde_json::json; use std::collections::HashMap; + use std::time::Duration; use codex_core::protocol::ExecCommandSource; use mcp_types::CallToolResult; @@ -1887,16 +2320,84 @@ mod tests { render_lines(&cell.transcript_lines(u16::MAX)) } + #[test] + fn function_tool_call_cell_renders_invocation_and_output() { + let cell = FunctionToolCallCell::new( + "lsp_definition".to_string(), + r#"{"file_path":"C:\\repo\\src\\main.rs","line":1,"character":1}"#.to_string(), + r#"{"locations":[]}"#.to_string(), + true, + Some(Duration::from_millis(120)), + ); + let rendered = render_transcript(&cell).join("\n"); + assert!(rendered.contains("Called")); + assert!(rendered.contains("lsp_definition")); + assert!(rendered.contains("\"locations\"")); + } + #[test] fn subagent_summary_prefers_description() { let invocation = SubAgentInvocation { description: "Summarize the auth flow".to_string(), label: "alpha".to_string(), prompt: "Prompt".to_string(), + model: None, }; assert_eq!(subagent_summary(&invocation), "Summarize the auth flow"); } + #[test] + fn subagent_cell_renders_mini_badge() { + let invocation = SubAgentInvocation { + description: "Trace control flow".to_string(), + label: "mini".to_string(), + prompt: "Prompt".to_string(), + model: Some("gpt-5.1-codex-mini".to_string()), + }; + let mut cell = SubAgentToolCallCell::new("call-1".to_string(), invocation); + cell.duration = Some(Duration::from_secs(0)); + let lines = render_lines(&cell.display_lines(200)); + assert!( + lines + .first() + .is_some_and(|line| line.contains("Subagent [Mini]:")) + ); + } + + #[test] + fn subagent_cell_renders_explorer_badge() { + let invocation = SubAgentInvocation { + description: "Repo map".to_string(), + label: "plan_explore_1".to_string(), + prompt: "Prompt".to_string(), + model: None, + }; + let mut cell = SubAgentToolCallCell::new("call-1".to_string(), invocation); + cell.duration = Some(Duration::from_secs(0)); + let lines = render_lines(&cell.display_lines(200)); + assert!( + lines + .first() + .is_some_and(|line| line.contains("Subagent [Explorer]:")) + ); + } + + #[test] + fn subagent_cell_renders_mini_badge_when_explorer_and_mini() { + let invocation = SubAgentInvocation { + description: "Repo map".to_string(), + label: "plan_explore_1".to_string(), + prompt: "Prompt".to_string(), + model: Some("gpt-5.1-codex-mini".to_string()), + }; + let mut cell = SubAgentToolCallCell::new("call-1".to_string(), invocation); + cell.duration = Some(Duration::from_secs(0)); + let lines = render_lines(&cell.display_lines(200)); + let first = lines.first(); + assert!(first.is_some_and(|line| line.contains("Subagent [Mini]:"))); + assert!(first.is_some_and(|line| !line.contains("[Explorer]"))); + } + #[tokio::test] async fn mcp_tools_output_masks_sensitive_values() { let mut config = test_config().await; @@ -2705,10 +3206,8 @@ mod tests { } #[test] fn reasoning_summary_block() { - let reasoning_format = ReasoningSummaryFormat::Experimental; let cell = new_reasoning_summary_block( "**High level reasoning**\n\nDetailed reasoning goes here.".to_string(), - reasoning_format, ); let rendered_display = render_lines(&cell.display_lines(80)); @@ -2720,11 +3219,7 @@ mod tests { #[test] fn reasoning_summary_block_returns_reasoning_cell_when_feature_disabled() { - let reasoning_format = ReasoningSummaryFormat::Experimental; - let cell = new_reasoning_summary_block( - "Detailed reasoning goes here.".to_string(), - reasoning_format, - ); + let cell = new_reasoning_summary_block("Detailed reasoning goes here.".to_string()); let rendered = render_transcript(cell.as_ref()); assert_eq!(rendered, vec!["• Detailed reasoning goes here."]); @@ -2735,17 +3230,9 @@ mod tests { let mut config = test_config().await; config.model = Some("gpt-3.5-turbo".to_string()); config.model_supports_reasoning_summaries = Some(true); - config.model_reasoning_summary_format = Some(ReasoningSummaryFormat::Experimental); - let model_family = - ModelsManager::construct_model_family_offline(&config.model.clone().unwrap(), &config); - assert_eq!( - model_family.reasoning_summary_format, - ReasoningSummaryFormat::Experimental - ); let cell = new_reasoning_summary_block( "**High level reasoning**\n\nDetailed reasoning goes here.".to_string(), - model_family.reasoning_summary_format, ); let rendered_display = render_lines(&cell.display_lines(80)); @@ -2754,11 +3241,8 @@ mod tests { #[test] fn reasoning_summary_block_falls_back_when_header_is_missing() { - let reasoning_format = ReasoningSummaryFormat::Experimental; - let cell = new_reasoning_summary_block( - "**High level reasoning without closing".to_string(), - reasoning_format, - ); + let cell = + new_reasoning_summary_block("**High level reasoning without closing".to_string()); let rendered = render_transcript(cell.as_ref()); assert_eq!(rendered, vec!["• **High level reasoning without closing"]); @@ -2766,18 +3250,14 @@ mod tests { #[test] fn reasoning_summary_block_falls_back_when_summary_is_missing() { - let reasoning_format = ReasoningSummaryFormat::Experimental; - let cell = new_reasoning_summary_block( - "**High level reasoning without closing**".to_string(), - reasoning_format.clone(), - ); + let cell = + new_reasoning_summary_block("**High level reasoning without closing**".to_string()); let rendered = render_transcript(cell.as_ref()); assert_eq!(rendered, vec!["• High level reasoning without closing"]); let cell = new_reasoning_summary_block( "**High level reasoning without closing**\n\n ".to_string(), - reasoning_format, ); let rendered = render_transcript(cell.as_ref()); @@ -2786,10 +3266,8 @@ mod tests { #[test] fn reasoning_summary_block_splits_header_and_summary_when_present() { - let reasoning_format = ReasoningSummaryFormat::Experimental; let cell = new_reasoning_summary_block( "**High level plan**\n\nWe should fix the bug next.".to_string(), - reasoning_format, ); let rendered_display = render_lines(&cell.display_lines(80)); diff --git a/codex-rs/tui2/src/insert_history.rs b/codex-rs/tui2/src/insert_history.rs index e0e2731678a..86a32c16ce8 100644 --- a/codex-rs/tui2/src/insert_history.rs +++ b/codex-rs/tui2/src/insert_history.rs @@ -1,3 +1,22 @@ +//! Render `ratatui` transcript lines into terminal scrollback. +//! +//! `insert_history_lines` is responsible for inserting rendered transcript lines +//! *above* the TUI viewport by emitting ANSI control sequences through the +//! terminal backend writer. +//! +//! ## Why we use crossterm style commands +//! +//! `write_spans` is also used by non-terminal callers (e.g. +//! `transcript_render::render_lines_to_ansi`) to produce deterministic ANSI +//! output for tests and "print after exit" flows. That means the implementation +//! must work with any `impl Write` (including an in-memory `Vec`) and must +//! preserve `ratatui::style::Color` semantics, including `Rgb(...)` and +//! `Indexed(...)`. +//! +//! Crossterm's style commands implement `Command` (including ANSI emission), so +//! `write_spans` can remain backend-independent while still producing ANSI +//! output that matches the terminal-rendered transcript. + use std::fmt; use std::io; use std::io::Write; @@ -10,14 +29,11 @@ use crossterm::style::Color as CColor; use crossterm::style::Colors; use crossterm::style::Print; use crossterm::style::SetAttribute; -use crossterm::style::SetBackgroundColor; use crossterm::style::SetColors; -use crossterm::style::SetForegroundColor; use crossterm::terminal::Clear; use crossterm::terminal::ClearType; use ratatui::layout::Size; use ratatui::prelude::Backend; -use ratatui::style::Color; use ratatui::style::Modifier; use ratatui::text::Line; use ratatui::text::Span; @@ -97,14 +113,8 @@ where queue!( writer, SetColors(Colors::new( - line.style - .fg - .map(std::convert::Into::into) - .unwrap_or(CColor::Reset), - line.style - .bg - .map(std::convert::Into::into) - .unwrap_or(CColor::Reset) + line.style.fg.map(Into::into).unwrap_or(CColor::Reset), + line.style.bg.map(Into::into).unwrap_or(CColor::Reset), )) )?; queue!(writer, Clear(ClearType::UntilNewLine))?; @@ -245,8 +255,8 @@ pub(crate) fn write_spans<'a, I>(mut writer: &mut impl Write, content: I) -> io: where I: IntoIterator>, { - let mut fg = Color::Reset; - let mut bg = Color::Reset; + let mut fg = CColor::Reset; + let mut bg = CColor::Reset; let mut last_modifier = Modifier::empty(); for span in content { let mut modifier = Modifier::empty(); @@ -260,13 +270,10 @@ where diff.queue(&mut writer)?; last_modifier = modifier; } - let next_fg = span.style.fg.unwrap_or(Color::Reset); - let next_bg = span.style.bg.unwrap_or(Color::Reset); + let next_fg = span.style.fg.map(Into::into).unwrap_or(CColor::Reset); + let next_bg = span.style.bg.map(Into::into).unwrap_or(CColor::Reset); if next_fg != fg || next_bg != bg { - queue!( - writer, - SetColors(Colors::new(next_fg.into(), next_bg.into())) - )?; + queue!(writer, SetColors(Colors::new(next_fg, next_bg)))?; fg = next_fg; bg = next_bg; } @@ -274,12 +281,7 @@ where queue!(writer, Print(span.content.clone()))?; } - queue!( - writer, - SetForegroundColor(CColor::Reset), - SetBackgroundColor(CColor::Reset), - SetAttribute(crossterm::style::Attribute::Reset), - ) + queue!(writer, SetAttribute(crossterm::style::Attribute::Reset)) } #[cfg(test)] @@ -287,8 +289,10 @@ mod tests { use super::*; use crate::markdown_render::render_markdown_text; use crate::test_backend::VT100Backend; + use pretty_assertions::assert_eq; use ratatui::layout::Rect; use ratatui::style::Color; + use ratatui::style::Style; #[test] fn writes_bold_then_regular_spans() { @@ -306,8 +310,6 @@ mod tests { Print("A"), SetAttribute(crossterm::style::Attribute::NormalIntensity), Print("B"), - SetForegroundColor(CColor::Reset), - SetBackgroundColor(CColor::Reset), SetAttribute(crossterm::style::Attribute::Reset), ) .unwrap(); @@ -318,6 +320,45 @@ mod tests { ); } + #[test] + fn write_spans_emits_truecolor_and_indexed_sgr() { + // This test asserts that `write_spans` emits the correct SGR sequences for colors that + // can't be represented with the theme-aware ANSI palette: + // + // - `ratatui::style::Color::Rgb` (truecolor; `38;2;r;g;b`) + // - `ratatui::style::Color::Indexed` (256-color index; `48;5;n`) + // + // Those constructors are intentionally disallowed in production code (see + // `codex-rs/clippy.toml`), but the test needs them so the output bytes are fully + // deterministic. + #[expect(clippy::disallowed_methods)] + let fg = Color::Rgb(1, 2, 3); + #[expect(clippy::disallowed_methods)] + let bg = Color::Indexed(42); + + let spans = [Span::styled("X", Style::default().fg(fg).bg(bg))]; + + let mut actual: Vec = Vec::new(); + write_spans(&mut actual, spans.iter()).unwrap(); + + let mut expected: Vec = Vec::new(); + queue!( + expected, + SetColors(Colors::new( + CColor::Rgb { r: 1, g: 2, b: 3 }, + CColor::AnsiValue(42) + )), + Print("X"), + SetAttribute(crossterm::style::Attribute::Reset), + ) + .unwrap(); + + assert_eq!( + String::from_utf8(actual).unwrap(), + String::from_utf8(expected).unwrap(), + ); + } + #[test] fn vt100_blockquote_line_emits_green_fg() { // Set up a small off-screen terminal @@ -331,7 +372,7 @@ mod tests { // Build a blockquote-like line: apply line-level green style and prefix "> " let mut line: Line<'static> = Line::from(vec!["> ".into(), "Hello world".into()]); - line = line.style(Color::Green); + line = line.style(Style::default().fg(Color::Green)); insert_history_lines(&mut term, vec![line]) .expect("Failed to insert history lines in test"); @@ -369,7 +410,7 @@ mod tests { "> ".into(), "This is a long quoted line that should wrap".into(), ]); - line = line.style(Color::Green); + line = line.style(Style::default().fg(Color::Green)); insert_history_lines(&mut term, vec![line]) .expect("Failed to insert history lines in test"); diff --git a/codex-rs/tui2/src/key_hint.rs b/codex-rs/tui2/src/key_hint.rs index f277f073845..cd9c188f8e2 100644 --- a/codex-rs/tui2/src/key_hint.rs +++ b/codex-rs/tui2/src/key_hint.rs @@ -53,6 +53,10 @@ pub(crate) const fn ctrl_alt(key: KeyCode) -> KeyBinding { KeyBinding::new(key, KeyModifiers::CONTROL.union(KeyModifiers::ALT)) } +pub(crate) const fn ctrl_shift(key: KeyCode) -> KeyBinding { + KeyBinding::new(key, KeyModifiers::CONTROL.union(KeyModifiers::SHIFT)) +} + fn modifiers_to_string(modifiers: KeyModifiers) -> String { let mut result = String::new(); if modifiers.contains(KeyModifiers::CONTROL) { @@ -75,8 +79,17 @@ impl From for Span<'static> { impl From<&KeyBinding> for Span<'static> { fn from(binding: &KeyBinding) -> Self { let KeyBinding { key, modifiers } = binding; - let modifiers = modifiers_to_string(*modifiers); - let key = match key { + // `KeyCode::BackTab` is effectively "shift + tab", but crossterm formats it as "back tab", + // which is confusing in our UI hints. + let mut display_key = *key; + let mut display_modifiers = *modifiers; + if display_key == KeyCode::BackTab { + display_key = KeyCode::Tab; + display_modifiers |= KeyModifiers::SHIFT; + } + + let modifiers = modifiers_to_string(display_modifiers); + let key = match display_key { KeyCode::Enter => "enter".to_string(), KeyCode::Char(' ') => "space".to_string(), KeyCode::Up => "↑".to_string(), @@ -85,7 +98,7 @@ impl From<&KeyBinding> for Span<'static> { KeyCode::Right => "→".to_string(), KeyCode::PageUp => "pgup".to_string(), KeyCode::PageDown => "pgdn".to_string(), - _ => format!("{key}").to_ascii_lowercase(), + _ => format!("{display_key}").to_ascii_lowercase(), }; Span::styled(format!("{modifiers}{key}"), key_hint_style()) } diff --git a/codex-rs/tui2/src/lib.rs b/codex-rs/tui2/src/lib.rs index 23353e1ba8d..503275eec7e 100644 --- a/codex-rs/tui2/src/lib.rs +++ b/codex-rs/tui2/src/lib.rs @@ -55,6 +55,7 @@ mod history_cell; pub mod insert_history; mod key_hint; pub mod live_wrap; +mod lsp_status; mod markdown; mod markdown_render; mod markdown_stream; @@ -77,6 +78,11 @@ mod style; mod terminal_palette; mod text_formatting; mod tooltips; +mod transcript_copy; +mod transcript_copy_ui; +mod transcript_multi_click; +mod transcript_render; +mod transcript_selection; mod tui; mod ui_consts; pub mod update_action; @@ -278,7 +284,10 @@ pub async fn run_main( let file_layer = tracing_subscriber::fmt::layer() .with_writer(non_blocking) - .with_target(false) + // `with_target(true)` is the default, but we previously disabled it for file output. + // Keep it enabled so we can selectively enable targets via `RUST_LOG=...` and then + // grep for a specific module/target while troubleshooting. + .with_target(true) .with_ansi(false) .with_span_events(tracing_subscriber::fmt::format::FmtSpan::CLOSE) .with_filter(env_filter()); diff --git a/codex-rs/tui2/src/lsp_status.rs b/codex-rs/tui2/src/lsp_status.rs new file mode 100644 index 00000000000..d39755f3075 --- /dev/null +++ b/codex-rs/tui2/src/lsp_status.rs @@ -0,0 +1,123 @@ +use codex_lsp::LspServerSource; +use codex_lsp::LspStatus; +use ratatui::style::Style; +use ratatui::style::Stylize as _; +use ratatui::text::Line; +use ratatui::text::Span; + +pub(crate) fn render(status: &LspStatus) -> Vec> { + let mut lines: Vec> = Vec::new(); + + lines.push(Line::from("LSP status".bold())); + + let enabled = status.enabled.to_string(); + lines.push(vec!["enabled: ".dim(), enabled.into()].into()); + + lines.push(vec!["root: ".dim(), status.root.display().to_string().into()].into()); + + let workspace = if status.workspace_initialized { + "initialized".to_string() + } else { + "not initialized".to_string() + }; + lines.push(vec!["workspace: ".dim(), workspace.into()].into()); + + if !status.ignored_globs.is_empty() { + lines.push(vec!["ignored: ".dim(), status.ignored_globs.join(", ").into()].into()); + } + + lines.push( + vec![ + "max_file_bytes: ".dim(), + status.max_file_bytes.to_string().into(), + ] + .into(), + ); + + lines.push(Line::from("")); + lines.push(Line::from("Servers".bold())); + + for lang in &status.languages { + let supports_pull_diagnostics = lang.supports_pull_diagnostics == Some(true); + let header = vec![ + "• ".into(), + Span::from(lang.language_id.clone()).bold(), + " ".into(), + "running=".dim(), + if lang.running { + "yes".green() + } else { + "no".dim() + }, + if supports_pull_diagnostics { + ", caps=pull-diags".dim() + } else { + Span::default() + }, + ]; + lines.push(header.into()); + + let effective = lang.effective.as_ref().map(|(cfg, source)| { + let source = match source { + LspServerSource::Config => "config", + LspServerSource::Autodetect => "autodetect", + }; + format!("{} ({source})", cfg.command) + }); + lines.extend(render_kv( + "effective", + effective.as_deref().unwrap_or("(none)"), + 2, + )); + if let Some((cfg, _)) = &lang.effective + && !cfg.args.is_empty() + { + lines.extend(render_kv("args", &cfg.args.join(" "), 2)); + } + + if let Some(cfg) = &lang.configured { + lines.extend(render_kv("configured", &cfg.command, 2)); + if !cfg.args.is_empty() { + lines.extend(render_kv("args", &cfg.args.join(" "), 2)); + } + } else { + lines.extend(render_kv("configured", "(none)", 2)); + } + + match (&lang.autodetected, &lang.effective) { + (Some(det), Some((eff, LspServerSource::Autodetect))) + if det.command == eff.command && det.args == eff.args => {} + (Some(det), _) => { + lines.extend(render_kv("detected", &det.command, 2)); + if !det.args.is_empty() { + lines.extend(render_kv("args", &det.args.join(" "), 2)); + } + } + (None, _) => { + lines.extend(render_kv("detected", "(not found on PATH)", 2)); + } + } + + lines.push(Line::from("")); + } + + while matches!(lines.last(), Some(line) if line.spans.is_empty()) { + lines.pop(); + } + + lines +} + +fn render_kv(key: &str, value: &str, indent: usize) -> Vec> { + let prefix = " ".repeat(indent); + let key = format!("{prefix}{key}: "); + + let key_span: Span<'static> = Span::styled(key, Style::new().dim()); + let value_span: Span<'static> = if value.starts_with('(') { + Span::styled(value.to_string(), Style::new().dim()) + } else { + Span::from(value.to_string()) + }; + + vec![vec![key_span, value_span].into()] +} diff --git a/codex-rs/tui2/src/markdown_render.rs b/codex-rs/tui2/src/markdown_render.rs index 4ba7b613167..22a234326aa 100644 --- a/codex-rs/tui2/src/markdown_render.rs +++ b/codex-rs/tui2/src/markdown_render.rs @@ -468,11 +468,19 @@ where .indent_stack .iter() .any(|ctx| ctx.prefix.iter().any(|s| s.content.contains('>'))); - let style = if blockquote_active { + let mut style = if blockquote_active { self.styles.blockquote } else { line.style }; + // Code blocks are "preformatted": we want them to keep code styling even when they appear + // within other structures like blockquotes (which otherwise apply a line-level style). + // + // This matters for copy fidelity: downstream copy logic uses code styling as a cue to + // preserve indentation and to fence code runs with Markdown markers. + if self.in_code_block { + style = style.patch(self.styles.code); + } let was_pending = self.pending_marker_line; self.current_initial_indent = self.prefix_spans(was_pending); diff --git a/codex-rs/tui2/src/markdown_render_tests.rs b/codex-rs/tui2/src/markdown_render_tests.rs index c5496996957..fc9c8bc8033 100644 --- a/codex-rs/tui2/src/markdown_render_tests.rs +++ b/codex-rs/tui2/src/markdown_render_tests.rs @@ -1,4 +1,5 @@ use pretty_assertions::assert_eq; +use ratatui::style::Color; use ratatui::style::Stylize; use ratatui::text::Line; use ratatui::text::Span; @@ -382,34 +383,20 @@ fn blockquote_heading_inherits_heading_style() { fn blockquote_with_code_block() { let md = "> ```\n> code\n> ```\n"; let text = render_markdown_text(md); - let lines: Vec = text - .lines - .iter() - .map(|l| { - l.spans - .iter() - .map(|s| s.content.clone()) - .collect::() - }) - .collect(); - assert_eq!(lines, vec!["> code".to_string()]); + assert_eq!(text.lines, [Line::from_iter(["> ", "", "code"]).cyan()]); } #[test] fn blockquote_with_multiline_code_block() { let md = "> ```\n> first\n> second\n> ```\n"; let text = render_markdown_text(md); - let lines: Vec = text - .lines - .iter() - .map(|l| { - l.spans - .iter() - .map(|s| s.content.clone()) - .collect::() - }) - .collect(); - assert_eq!(lines, vec!["> first", "> second"]); + assert_eq!( + text.lines, + [ + Line::from_iter(["> ", "", "first"]).cyan(), + Line::from_iter(["> ", "", "second"]).cyan(), + ] + ); } #[test] @@ -453,6 +440,12 @@ fn nested_blockquote_with_inline_and_fenced_code() { "> > echo \"hello from a quote\"".to_string(), ] ); + + // Fenced code inside nested blockquotes should keep code styling so copy logic can treat it as + // preformatted. + for idx in [4usize, 5usize] { + assert_eq!(text.lines[idx].style.fg, Some(Color::Cyan)); + } } #[test] @@ -654,7 +647,7 @@ fn link() { #[test] fn code_block_unhighlighted() { let text = render_markdown_text("```rust\nfn main() {}\n```\n"); - let expected = Text::from_iter([Line::from_iter(["", "fn main() {}"])]); + let expected = Text::from_iter([Line::from_iter(["", "fn main() {}"]).cyan()]); assert_eq!(text, expected); } @@ -663,8 +656,8 @@ fn code_block_multiple_lines_root() { let md = "```\nfirst\nsecond\n```\n"; let text = render_markdown_text(md); let expected = Text::from_iter([ - Line::from_iter(["", "first"]), - Line::from_iter(["", "second"]), + Line::from_iter(["", "first"]).cyan(), + Line::from_iter(["", "second"]).cyan(), ]); assert_eq!(text, expected); } @@ -674,9 +667,9 @@ fn code_block_indented() { let md = " function greet() {\n console.log(\"Hi\");\n }\n"; let text = render_markdown_text(md); let expected = Text::from_iter([ - Line::from_iter([" ", "function greet() {"]), - Line::from_iter([" ", " console.log(\"Hi\");"]), - Line::from_iter([" ", "}"]), + Line::from_iter([" ", "function greet() {"]).cyan(), + Line::from_iter([" ", " console.log(\"Hi\");"]).cyan(), + Line::from_iter([" ", "}"]).cyan(), ]); assert_eq!(text, expected); } diff --git a/codex-rs/tui2/src/pager_overlay.rs b/codex-rs/tui2/src/pager_overlay.rs index 06a7cad467e..3d24aef96a6 100644 --- a/codex-rs/tui2/src/pager_overlay.rs +++ b/codex-rs/tui2/src/pager_overlay.rs @@ -1,6 +1,5 @@ use std::io::Result; use std::sync::Arc; -use std::time::Duration; use crate::history_cell::HistoryCell; use crate::history_cell::UserHistoryCell; @@ -280,8 +279,8 @@ impl PagerView { return Ok(()); } } - tui.frame_requester() - .schedule_frame_in(Duration::from_millis(16)); + // Request a redraw; the frame scheduler coalesces bursts and clamps to 60fps. + tui.frame_requester().schedule_frame(); Ok(()) } @@ -298,8 +297,8 @@ impl PagerView { return Ok(()); } } - tui.frame_requester() - .schedule_frame_in(Duration::from_millis(16)); + // Request a redraw; the frame scheduler coalesces bursts and clamps to 60fps. + tui.frame_requester().schedule_frame(); Ok(()) } diff --git a/codex-rs/tui2/src/slash_command.rs b/codex-rs/tui2/src/slash_command.rs index df759a40ccd..ccf0a930b9d 100644 --- a/codex-rs/tui2/src/slash_command.rs +++ b/codex-rs/tui2/src/slash_command.rs @@ -14,7 +14,9 @@ pub enum SlashCommand { // more frequently used commands should be listed first. Model, PlanModel, + SubagentModel, Approvals, + Experimental, Skills, Review, Plan, @@ -22,10 +24,11 @@ pub enum SlashCommand { Resume, Init, Compact, - Undo, + // Undo, Diff, Mention, Status, + Lsp, Mcp, Logout, Quit, @@ -46,15 +49,20 @@ impl SlashCommand { SlashCommand::Review => "review my current changes and find issues", SlashCommand::Plan => "plan a task before making changes", SlashCommand::Resume => "resume a saved chat", - SlashCommand::Undo => "ask Codex to undo a turn", + // SlashCommand::Undo => "ask Codex to undo a turn", SlashCommand::Quit | SlashCommand::Exit => "exit Codex", SlashCommand::Diff => "show git diff (including untracked files)", SlashCommand::Mention => "mention a file", SlashCommand::Skills => "use skills to improve how Codex performs specific tasks", SlashCommand::Status => "show current session configuration and token usage", + SlashCommand::Lsp => "show current LSP status (detected/configured/running)", SlashCommand::Model => "choose what model and reasoning effort to use", SlashCommand::PlanModel => "choose what model and reasoning effort to use for /plan", + SlashCommand::SubagentModel => { + "choose what model and reasoning effort to use for spawned subagents" + } SlashCommand::Approvals => "choose what Codex can do without approval", + SlashCommand::Experimental => "toggle beta features", SlashCommand::Mcp => "list configured MCP tools", SlashCommand::Logout => "log out of Codex", SlashCommand::Rollout => "print the rollout file path", @@ -75,10 +83,12 @@ impl SlashCommand { | SlashCommand::Resume | SlashCommand::Init | SlashCommand::Compact - | SlashCommand::Undo + // | SlashCommand::Undo | SlashCommand::Model | SlashCommand::PlanModel + | SlashCommand::SubagentModel | SlashCommand::Approvals + | SlashCommand::Experimental | SlashCommand::Review | SlashCommand::Plan | SlashCommand::Logout => false, @@ -86,6 +96,7 @@ impl SlashCommand { | SlashCommand::Mention | SlashCommand::Skills | SlashCommand::Status + | SlashCommand::Lsp | SlashCommand::Mcp | SlashCommand::Feedback | SlashCommand::Quit diff --git a/codex-rs/tui2/src/snapshots/codex_tui2__status_indicator_widget__tests__renders_wrapped_details_panama_two_lines.snap b/codex-rs/tui2/src/snapshots/codex_tui2__status_indicator_widget__tests__renders_wrapped_details_panama_two_lines.snap new file mode 100644 index 00000000000..94c0fc3082b --- /dev/null +++ b/codex-rs/tui2/src/snapshots/codex_tui2__status_indicator_widget__tests__renders_wrapped_details_panama_two_lines.snap @@ -0,0 +1,7 @@ +--- +source: tui2/src/status_indicator_widget.rs +expression: terminal.backend() +--- +"• Working (0s) " +" └ A man a plan a canal " +" panama " diff --git a/codex-rs/tui2/src/status/rate_limits.rs b/codex-rs/tui2/src/status/rate_limits.rs index e8dc689a628..3fae3ac295c 100644 --- a/codex-rs/tui2/src/status/rate_limits.rs +++ b/codex-rs/tui2/src/status/rate_limits.rs @@ -1,4 +1,5 @@ use crate::chatwidget::get_limits_duration; +use crate::text_formatting::capitalize_first; use super::helpers::format_reset_timestamp; use chrono::DateTime; @@ -221,15 +222,3 @@ fn format_credit_balance(raw: &str) -> Option { None } - -fn capitalize_first(label: &str) -> String { - let mut chars = label.chars(); - match chars.next() { - Some(first) => { - let mut capitalized = first.to_uppercase().collect::(); - capitalized.push_str(chars.as_str()); - capitalized - } - None => String::new(), - } -} diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap index 80c57d09d0d..f38b882f044 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭─────────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_credits_and_limits.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_credits_and_limits.snap index 413c362939c..91b71c66368 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_credits_and_limits.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_credits_and_limits.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_monthly_limit.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_monthly_limit.snap index 53a28c0377f..bc324f13675 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_monthly_limit.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_monthly_limit.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭────────────────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_reasoning_details.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_reasoning_details.snap index 62c5eb52ff9..f5f79f16d34 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_reasoning_details.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_reasoning_details.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_empty_limits_message.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_empty_limits_message.snap index ee5840f3102..9f97bd6443f 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_empty_limits_message.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_empty_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_missing_limits_message.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_missing_limits_message.snap index ee5840f3102..9f97bd6443f 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_missing_limits_message.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_missing_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_stale_limits_message.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_stale_limits_message.snap index 91cce793770..d2dc4e08b23 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_stale_limits_message.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_stale_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_truncates_in_narrow_terminal.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_truncates_in_narrow_terminal.snap index 9ca461bdbec..16ee2321fcd 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_truncates_in_narrow_terminal.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_truncates_in_narrow_terminal.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭────────────────────────────────────────────────────────────────────╮ -│ >_ Codexel (v0.1.3) │ +│ >_ Codexel (v0.1.4) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status_indicator_widget.rs b/codex-rs/tui2/src/status_indicator_widget.rs index f391b267ac9..ebf943b8194 100644 --- a/codex-rs/tui2/src/status_indicator_widget.rs +++ b/codex-rs/tui2/src/status_indicator_widget.rs @@ -10,7 +10,11 @@ use ratatui::buffer::Buffer; use ratatui::layout::Rect; use ratatui::style::Stylize; use ratatui::text::Line; +use ratatui::text::Span; +use ratatui::text::Text; +use ratatui::widgets::Paragraph; use ratatui::widgets::WidgetRef; +use unicode_width::UnicodeWidthStr; use crate::app_event::AppEvent; use crate::app_event_sender::AppEventSender; @@ -18,11 +22,18 @@ use crate::exec_cell::spinner; use crate::key_hint; use crate::render::renderable::Renderable; use crate::shimmer::shimmer_spans; +use crate::text_formatting::capitalize_first; use crate::tui::FrameRequester; +use crate::wrapping::RtOptions; +use crate::wrapping::word_wrap_lines; + +const DETAILS_MAX_LINES: usize = 3; +const DETAILS_PREFIX: &str = " └ "; pub(crate) struct StatusIndicatorWidget { /// Animated header text (defaults to "Working"). header: String, + details: Option, detail_lines: Vec>, show_interrupt_hint: bool, @@ -59,6 +70,7 @@ impl StatusIndicatorWidget { ) -> Self { Self { header: String::from("Working"), + details: None, detail_lines: Vec::new(), show_interrupt_hint: true, elapsed_running: Duration::ZERO, @@ -80,6 +92,13 @@ impl StatusIndicatorWidget { self.header = header; } + /// Update the details text shown below the header. + pub(crate) fn update_details(&mut self, details: Option) { + self.details = details + .filter(|details| !details.is_empty()) + .map(|details| capitalize_first(details.trim_start())); + } + pub(crate) fn set_detail_lines(&mut self, lines: Vec>) { self.detail_lines = lines; } @@ -93,6 +112,11 @@ impl StatusIndicatorWidget { &self.header } + #[cfg(test)] + pub(crate) fn details(&self) -> Option<&str> { + self.details.as_deref() + } + pub(crate) fn set_interrupt_hint_visible(&mut self, visible: bool) { self.show_interrupt_hint = visible; } @@ -142,11 +166,44 @@ impl StatusIndicatorWidget { pub fn elapsed_seconds(&self) -> u64 { self.elapsed_seconds_at(Instant::now()) } + + /// Wrap the details text into a fixed width and return the lines, truncating if necessary. + fn wrapped_details_lines(&self, width: u16) -> Vec> { + let Some(details) = self.details.as_deref() else { + return Vec::new(); + }; + if width == 0 { + return Vec::new(); + } + + let prefix_width = UnicodeWidthStr::width(DETAILS_PREFIX); + let opts = RtOptions::new(usize::from(width)) + .initial_indent(Line::from(DETAILS_PREFIX.dim())) + .subsequent_indent(Line::from(Span::from(" ".repeat(prefix_width)).dim())) + .break_words(true); + + let mut out = word_wrap_lines(details.lines().map(|line| vec![line.dim()]), opts); + + if out.len() > DETAILS_MAX_LINES { + out.truncate(DETAILS_MAX_LINES); + let content_width = usize::from(width).saturating_sub(prefix_width).max(1); + let max_base_len = content_width.saturating_sub(1); + if let Some(last) = out.last_mut() + && let Some(span) = last.spans.last_mut() + { + let trimmed: String = span.content.as_ref().chars().take(max_base_len).collect(); + *span = format!("{trimmed}…").dim(); + } + } + + out + } } impl Renderable for StatusIndicatorWidget { - fn desired_height(&self, _width: u16) -> u16 { - 1u16.saturating_add(self.detail_lines.len().try_into().unwrap_or(u16::MAX)) + fn desired_height(&self, width: u16) -> u16 { + 1u16.saturating_add(u16::try_from(self.wrapped_details_lines(width).len()).unwrap_or(0)) + .saturating_add(u16::try_from(self.detail_lines.len()).unwrap_or(u16::MAX)) } fn render(&self, area: Rect, buf: &mut Buffer) { @@ -180,23 +237,21 @@ impl Renderable for StatusIndicatorWidget { spans.push(format!("({pretty_elapsed})").dim()); } - let mut row = area; - row.height = 1; - Line::from(spans).render_ref(row, buf); - - for (idx, line) in self.detail_lines.iter().enumerate() { - let y = area.y.saturating_add((idx as u16).saturating_add(1)); - if y >= area.y.saturating_add(area.height) { - break; - } - let detail_area = Rect { - x: area.x, - y, - width: area.width, - height: 1, - }; - line.render_ref(detail_area, buf); + let mut lines = Vec::new(); + lines.push(Line::from(spans)); + let mut remaining = usize::from(area.height.saturating_sub(1)); + if remaining > 0 { + // If there is enough space, add the details lines below the header. + let details = self.wrapped_details_lines(area.width); + let take = details.len().min(remaining); + lines.extend(details.into_iter().take(take)); + remaining = remaining.saturating_sub(take); } + if remaining > 0 { + lines.extend(self.detail_lines.iter().take(remaining).cloned()); + } + + Paragraph::new(Text::from(lines)).render_ref(area, buf); } } @@ -255,6 +310,27 @@ mod tests { insta::assert_snapshot!(terminal.backend()); } + #[test] + fn renders_wrapped_details_panama_two_lines() { + let (tx_raw, _rx) = unbounded_channel::(); + let tx = AppEventSender::new(tx_raw); + let mut w = StatusIndicatorWidget::new(tx, crate::tui::FrameRequester::test_dummy(), false); + w.update_details(Some("A man a plan a canal panama".to_string())); + w.set_interrupt_hint_visible(false); + + // Freeze time-dependent rendering (elapsed + spinner) to keep the snapshot stable. + w.is_paused = true; + w.elapsed_running = Duration::ZERO; + + // Prefix is 4 columns, so a width of 30 yields a content width of 26: one column + // short of fitting the whole phrase (27 cols), forcing exactly one wrap without ellipsis. + let mut terminal = Terminal::new(TestBackend::new(30, 3)).expect("terminal"); + terminal + .draw(|f| w.render(f.area(), f.buffer_mut())) + .expect("draw"); + insta::assert_snapshot!(terminal.backend()); + } + #[test] fn timer_pauses_when_requested() { let (tx_raw, _rx) = unbounded_channel::(); @@ -276,4 +352,20 @@ mod tests { let after_resume = widget.elapsed_seconds_at(baseline + Duration::from_secs(13)); assert_eq!(after_resume, before_pause + 3); } + + #[test] + fn details_overflow_adds_ellipsis() { + let (tx_raw, _rx) = unbounded_channel::(); + let tx = AppEventSender::new(tx_raw); + let mut w = StatusIndicatorWidget::new(tx, crate::tui::FrameRequester::test_dummy(), true); + w.update_details(Some("abcd abcd abcd abcd".to_string())); + + let lines = w.wrapped_details_lines(6); + assert_eq!(lines.len(), DETAILS_MAX_LINES); + let last = lines.last().expect("expected last details line"); + assert!( + last.spans[1].content.as_ref().ends_with("…"), + "expected ellipsis in last line: {last:?}" + ); + } } diff --git a/codex-rs/tui2/src/test_backend.rs b/codex-rs/tui2/src/test_backend.rs index a5460af2e09..47f17e6b93a 100644 --- a/codex-rs/tui2/src/test_backend.rs +++ b/codex-rs/tui2/src/test_backend.rs @@ -25,6 +25,7 @@ pub struct VT100Backend { impl VT100Backend { /// Creates a new `TestBackend` with the specified width and height. pub fn new(width: u16, height: u16) -> Self { + crossterm::style::Colored::set_ansi_color_disabled(false); Self { crossterm_backend: CrosstermBackend::new(vt100::Parser::new(height, width, 0)), } diff --git a/codex-rs/tui2/src/text_formatting.rs b/codex-rs/tui2/src/text_formatting.rs index 91d1c84f22d..f747b2fef2a 100644 --- a/codex-rs/tui2/src/text_formatting.rs +++ b/codex-rs/tui2/src/text_formatting.rs @@ -2,6 +2,18 @@ use unicode_segmentation::UnicodeSegmentation; use unicode_width::UnicodeWidthChar; use unicode_width::UnicodeWidthStr; +pub(crate) fn capitalize_first(input: &str) -> String { + let mut chars = input.chars(); + match chars.next() { + Some(first) => { + let mut capitalized = first.to_uppercase().collect::(); + capitalized.push_str(chars.as_str()); + capitalized + } + None => String::new(), + } +} + /// Truncate a tool result to fit within the given height and width. If the text is valid JSON, we format it in a compact way before truncating. /// This is a best-effort approach that may not work perfectly for text where 1 grapheme is rendered as multiple terminal cells. pub(crate) fn format_and_truncate_tool_result( diff --git a/codex-rs/tui2/src/transcript_copy.rs b/codex-rs/tui2/src/transcript_copy.rs new file mode 100644 index 00000000000..141ce901f74 --- /dev/null +++ b/codex-rs/tui2/src/transcript_copy.rs @@ -0,0 +1,801 @@ +//! Converting a transcript selection to clipboard text. +//! +//! Copy is driven by a content-relative selection (`TranscriptSelectionPoint`), +//! but the transcript is rendered with styling and wrapping for the TUI. This +//! module reconstructs clipboard text from the rendered transcript lines while +//! preserving user expectations: +//! +//! - Soft-wrapped prose is treated as a single logical line when copying. +//! - Code blocks preserve meaningful indentation. +//! - Markdown “source markers” are emitted when copying (backticks for inline +//! code, triple-backtick fences for code blocks) even if the on-screen +//! rendering is styled differently. +//! +//! ## Inputs and invariants +//! +//! Clipboard reconstruction is performed over the same *visual lines* that are +//! rendered in the transcript viewport: +//! +//! - `lines`: wrapped transcript `Line`s, including the gutter spans. +//! - `joiner_before`: a parallel vector describing which wrapped lines are +//! *soft wrap* continuations (and what to insert at the wrap boundary). +//! - `(line_index, column)` selection points in *content space* (columns exclude +//! the gutter). +//! +//! Callers must keep `lines` and `joiner_before` aligned. In practice, `App` +//! obtains both from `transcript_render`, which itself builds from each cell's +//! `HistoryCell::transcript_lines_with_joiners` implementation. +//! +//! ## Style-derived Markdown cues +//! +//! For fidelity, we copy Markdown source markers even though the viewport may +//! render content using styles instead of literal characters. Today, the copy +//! logic derives "inline code" and "code block" boundaries from the styling we +//! apply during rendering (currently cyan spans/lines). +//! +//! If transcript styling changes (for example, if code blocks stop using cyan), +//! update `is_code_block_line` and [`span_is_inline_code`] so clipboard output +//! continues to match user expectations. +//! +//! The caller can choose whether copy covers only the visible viewport range +//! (by passing `visible_start..visible_end`) or spans the entire transcript +//! (by passing `0..lines.len()`). +//! +//! UI affordances (keybinding detection and the on-screen "copy" pill) live in +//! `transcript_copy_ui`. + +use ratatui::text::Line; +use ratatui::text::Span; + +use crate::history_cell::HistoryCell; +use crate::transcript_selection::TRANSCRIPT_GUTTER_COLS; +use crate::transcript_selection::TranscriptSelection; +use crate::transcript_selection::TranscriptSelectionPoint; +use std::sync::Arc; + +/// Render the current transcript selection into clipboard text. +/// +/// This is the `App`-level helper: it rebuilds wrapped transcript lines using +/// the same rules as the on-screen viewport and then applies +/// [`selection_to_copy_text`] across the full transcript range (including +/// off-screen lines). +pub(crate) fn selection_to_copy_text_for_cells( + cells: &[Arc], + selection: TranscriptSelection, + width: u16, +) -> Option { + let (anchor, head) = selection.anchor.zip(selection.head)?; + + let transcript = crate::transcript_render::build_wrapped_transcript_lines(cells, width); + let total_lines = transcript.lines.len(); + if total_lines == 0 { + return None; + } + + selection_to_copy_text( + &transcript.lines, + &transcript.joiner_before, + anchor, + head, + 0, + total_lines, + width, + ) +} + +/// Render the selected region into clipboard text. +/// +/// `lines` must be the wrapped transcript lines as rendered by the TUI, +/// including the leading gutter spans. `start`/`end` columns are expressed in +/// content-space (excluding the gutter), and will be ordered internally if the +/// endpoints are reversed. +/// +/// `joiner_before[i]` is the exact string to insert *before* `lines[i]` when +/// it is a continuation of a soft-wrapped prose line. This enables copy to +/// treat soft-wrapped prose as a single logical line. +/// +/// Notes: +/// +/// - For code/preformatted runs, copy is permitted to extend beyond the +/// viewport width when the user selects “to the right edge”, so we avoid +/// producing truncated logical lines in narrow terminals. +/// - Markdown markers are derived from render-time styles (see module docs). +/// - Column math is display-width-aware (wide glyphs count as multiple columns). +/// +/// Returns `None` if the inputs imply an empty selection or if `width` is too +/// small to contain the gutter plus at least one content column. +pub(crate) fn selection_to_copy_text( + lines: &[Line<'static>], + joiner_before: &[Option], + start: TranscriptSelectionPoint, + end: TranscriptSelectionPoint, + visible_start: usize, + visible_end: usize, + width: u16, +) -> Option { + use ratatui::style::Color; + + if width <= TRANSCRIPT_GUTTER_COLS { + return None; + } + + // Selection points are expressed in content-relative coordinates and may be provided in either + // direction (dragging "backwards"). Normalize to a forward `(start, end)` pair so the rest of + // the logic can assume `start <= end`. + let (start, end) = order_points(start, end); + if start == end { + return None; + } + + // Transcript `Line`s include a left gutter (bullet/prefix space). Selection columns exclude the + // gutter, so we translate selection columns to absolute columns by adding `base_x`. + let base_x = TRANSCRIPT_GUTTER_COLS; + let max_x = width.saturating_sub(1); + + let mut out = String::new(); + let mut prev_selected_line: Option = None; + + // We emit Markdown fences around runs of code/preformatted visual lines so: + // - the clipboard captures source-style markers (` ``` `) even if the viewport is stylized + // - indentation is preserved and paste is stable in editors + let mut in_code_run = false; + + // `wrote_any` lets us handle separators (newline or soft-wrap joiner) without special-casing + // "first output line" at every decision point. + let mut wrote_any = false; + + for line_index in visible_start..visible_end { + // Only consider lines that intersect the selection's line range. (Selection endpoints are + // clamped elsewhere; if the indices don't exist, `lines.get(...)` returns `None`.) + if line_index < start.line_index || line_index > end.line_index { + continue; + } + + let line = lines.get(line_index)?; + + // Code blocks (and other preformatted content) are detected via styling and copied as + // "verbatim lines" (no inline Markdown re-encoding). This also enables special handling for + // narrow terminals: selecting "to the right edge" should copy the full logical line, not a + // viewport-truncated slice. + let is_code_block_line = line.style.fg == Some(Color::Cyan); + + // Flatten the line to compute the rightmost non-space column. We use that to: + // - avoid copying trailing right-margin padding + // - clamp prose selection to the viewport width + let flat = line_to_flat(line); + let text_end = if is_code_block_line { + last_non_space_col(flat.as_str()) + } else { + last_non_space_col(flat.as_str()).map(|c| c.min(max_x)) + }; + + // Convert selection endpoints into a selection range for this specific visual line: + // - first line clamps the start column + // - last line clamps the end column + // - intermediate lines select the full line. + let line_start_col = if line_index == start.line_index { + start.column + } else { + 0 + }; + let line_end_col = if line_index == end.line_index { + end.column + } else { + max_x.saturating_sub(base_x) + }; + + let row_sel_start = base_x.saturating_add(line_start_col).min(max_x); + + // For code/preformatted lines, treat "selection ends at the viewport edge" as a special + // "copy to end of logical line" case. This prevents narrow terminals from producing + // truncated clipboard content when the user drags to the right edge. + let row_sel_end = if is_code_block_line && line_end_col >= max_x.saturating_sub(base_x) { + u16::MAX + } else { + base_x.saturating_add(line_end_col).min(max_x) + }; + if row_sel_start > row_sel_end { + continue; + } + + let selected_line = if let Some(text_end) = text_end { + let from_col = row_sel_start.max(base_x); + let to_col = row_sel_end.min(text_end); + if from_col > to_col { + Line::default().style(line.style) + } else { + slice_line_by_cols(line, from_col, to_col) + } + } else { + Line::default().style(line.style) + }; + + // Convert the selected `Line` into Markdown source: + // - For prose: wrap inline-code spans in backticks. + // - For code blocks: return the raw flat text so we preserve indentation/spacing. + let line_text = line_to_markdown(&selected_line, is_code_block_line); + + // Track transitions into/out of code/preformatted runs and emit triple-backtick fences. + // We always separate a code run from prior prose with a newline. + if is_code_block_line && !in_code_run { + if wrote_any { + out.push('\n'); + } + out.push_str("```"); + out.push('\n'); + in_code_run = true; + prev_selected_line = None; + wrote_any = true; + } else if !is_code_block_line && in_code_run { + out.push('\n'); + out.push_str("```"); + out.push('\n'); + in_code_run = false; + prev_selected_line = None; + wrote_any = true; + } + + // When copying inside a code run, every selected visual line becomes a literal line inside + // the fence (no soft-wrap joining). We preserve explicit blank lines by writing empty + // strings as a line. + if in_code_run { + if wrote_any && (!out.ends_with('\n') || prev_selected_line.is_some()) { + out.push('\n'); + } + out.push_str(line_text.as_str()); + prev_selected_line = Some(line_index); + wrote_any = true; + continue; + } + + // Prose path: + // - If this line is a soft-wrap continuation of the previous selected line, insert the + // recorded joiner (often spaces) instead of a newline. + // - Otherwise, insert a newline to preserve hard breaks. + if wrote_any { + let joiner = joiner_before.get(line_index).cloned().unwrap_or(None); + if prev_selected_line == Some(line_index.saturating_sub(1)) + && let Some(joiner) = joiner + { + out.push_str(joiner.as_str()); + } else { + out.push('\n'); + } + } + + out.push_str(line_text.as_str()); + prev_selected_line = Some(line_index); + wrote_any = true; + } + + if in_code_run { + out.push('\n'); + out.push_str("```"); + } + + (!out.is_empty()).then_some(out) +} + +/// Order two selection endpoints into `(start, end)` in transcript order. +/// +/// Dragging can produce reversed endpoints; callers typically want a normalized range before +/// iterating visual lines. +fn order_points( + a: TranscriptSelectionPoint, + b: TranscriptSelectionPoint, +) -> (TranscriptSelectionPoint, TranscriptSelectionPoint) { + if (b.line_index < a.line_index) || (b.line_index == a.line_index && b.column < a.column) { + (b, a) + } else { + (a, b) + } +} + +/// Flatten a styled `Line` into its plain text content. +/// +/// This is used for cursor/column arithmetic and for emitting plain-text code lines. +fn line_to_flat(line: &Line<'_>) -> String { + line.spans + .iter() + .map(|s| s.content.as_ref()) + .collect::() +} + +/// Return the last non-space *display column* in `flat` (inclusive). +/// +/// This is display-width-aware, so wide glyphs (e.g. CJK) advance by more than one column. +/// +/// Rationale: transcript rendering often pads out to the viewport width; copy should avoid +/// including that right-margin whitespace. +fn last_non_space_col(flat: &str) -> Option { + use unicode_width::UnicodeWidthChar; + + let mut col: u16 = 0; + let mut last: Option = None; + for ch in flat.chars() { + let w = UnicodeWidthChar::width(ch).unwrap_or(0) as u16; + if ch != ' ' { + let end = col.saturating_add(w.saturating_sub(1)); + last = Some(end); + } + col = col.saturating_add(w); + } + last +} + +/// Map a display-column range to a UTF-8 byte range within `flat`. +/// +/// The returned range is suitable for slicing `flat` and for slicing the original `Span` strings +/// (once translated into span-local offsets). +/// +/// This walks Unicode scalar values and advances by display width so callers can slice based on the +/// same column semantics the selection model uses. +fn byte_range_for_cols(flat: &str, start_col: u16, end_col: u16) -> Option> { + use unicode_width::UnicodeWidthChar; + + // We translate selection columns (display columns, not bytes) into a UTF-8 byte range. This is + // intentionally Unicode-width aware: wide glyphs cover multiple columns but occupy one `char` + // and several bytes. + // + // Strategy: + // - Walk `flat` by `char_indices()` while tracking the current display column. + // - The start byte is the first char whose rendered columns intersect `start_col`. + // - The end byte is the end of the last char whose rendered columns intersect `end_col`. + let mut col: u16 = 0; + let mut start_byte: Option = None; + let mut end_byte: Option = None; + + for (idx, ch) in flat.char_indices() { + let w = UnicodeWidthChar::width(ch).unwrap_or(0) as u16; + let end = col.saturating_add(w.saturating_sub(1)); + + // Start is inclusive: select the first glyph whose right edge reaches the start column. + if start_byte.is_none() && end >= start_col { + start_byte = Some(idx); + } + + // End is inclusive in column space; keep extending end byte while we're still at/before + // `end_col`. This includes a wide glyph even if it starts before `end_col` but ends after. + if col <= end_col { + end_byte = Some(idx + ch.len_utf8()); + } + + col = col.saturating_add(w); + if col > end_col && start_byte.is_some() { + break; + } + } + + match (start_byte, end_byte) { + (Some(s), Some(e)) if e >= s => Some(s..e), + _ => None, + } +} + +/// Slice a styled `Line` by display columns, preserving per-span style. +/// +/// This is the core "selection → styled substring" helper used before Markdown re-encoding. It +/// avoids mixing styles across spans by slicing each contributing span independently, then +/// reassembling them into a new `Line` with the original line-level style. +fn slice_line_by_cols(line: &Line<'static>, start_col: u16, end_col: u16) -> Line<'static> { + // `Line` spans store independent string slices with their own styles. To slice by columns while + // preserving styling, we: + // 1) Flatten the line and compute the desired UTF-8 byte range in the flattened string. + // 2) Compute each span's byte range within the flattened string. + // 3) Intersect the selection range with each span range and slice per-span, preserving styles. + let flat = line_to_flat(line); + let mut span_bounds: Vec<(std::ops::Range, ratatui::style::Style)> = Vec::new(); + let mut acc = 0usize; + for s in &line.spans { + let start = acc; + let text = s.content.as_ref(); + acc += text.len(); + span_bounds.push((start..acc, s.style)); + } + + let Some(range) = byte_range_for_cols(flat.as_str(), start_col, end_col) else { + return Line::default().style(line.style); + }; + + // Translate the flattened byte range back into (span-local) slices. + let start_byte = range.start; + let end_byte = range.end; + let mut spans: Vec> = Vec::new(); + for (i, (r, style)) in span_bounds.iter().enumerate() { + let s = r.start; + let e = r.end; + if e <= start_byte { + continue; + } + if s >= end_byte { + break; + } + let seg_start = start_byte.max(s); + let seg_end = end_byte.min(e); + if seg_end > seg_start { + let local_start = seg_start - s; + let local_end = seg_end - s; + let content = line.spans[i].content.as_ref(); + spans.push(ratatui::text::Span { + style: *style, + content: content[local_start..local_end].to_string().into(), + }); + } + if e >= end_byte { + break; + } + } + Line::from(spans).style(line.style) +} + +/// Whether a span should be treated as "inline code" when reconstructing Markdown. +/// +/// TUI2 renders inline code using a cyan foreground. Links also use cyan, but are underlined, so we +/// exclude underlined cyan spans to avoid wrapping links in backticks. +fn span_is_inline_code(span: &Span<'_>) -> bool { + use ratatui::style::Color; + + span.style.fg == Some(Color::Cyan) + && !span + .style + .add_modifier + .contains(ratatui::style::Modifier::UNDERLINED) +} + +/// Convert a selected, styled `Line` back into Markdown-ish source text. +/// +/// - For prose: wraps runs of inline-code spans in backticks to preserve the source marker. +/// - For code blocks: emits the raw flat text (no additional escaping), since the entire run will +/// be wrapped in triple-backtick fences by the caller. +fn line_to_markdown(line: &Line<'static>, is_code_block: bool) -> String { + if is_code_block { + return line_to_flat(line); + } + + let mut out = String::new(); + let mut in_code = false; + for span in &line.spans { + let is_code = span_is_inline_code(span); + if is_code && !in_code { + out.push('`'); + in_code = true; + } else if !is_code && in_code { + out.push('`'); + in_code = false; + } + out.push_str(span.content.as_ref()); + } + if in_code { + out.push('`'); + } + out +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + use ratatui::style::Color; + use ratatui::style::Style; + use ratatui::style::Stylize; + + #[test] + fn selection_to_copy_text_returns_none_for_zero_content_width() { + let lines = vec![Line::from("• Hello")]; + let joiner_before = vec![None]; + let start = TranscriptSelectionPoint { + line_index: 0, + column: 0, + }; + let end = TranscriptSelectionPoint { + line_index: 0, + column: 1, + }; + + assert_eq!( + selection_to_copy_text( + &lines, + &joiner_before, + start, + end, + 0, + lines.len(), + TRANSCRIPT_GUTTER_COLS, + ), + None + ); + } + + #[test] + fn selection_to_copy_text_returns_none_for_empty_selection_point() { + let lines = vec![Line::from("• Hello")]; + let joiner_before = vec![None]; + let pt = TranscriptSelectionPoint { + line_index: 0, + column: 0, + }; + + assert_eq!( + selection_to_copy_text(&lines, &joiner_before, pt, pt, 0, lines.len(), 20), + None + ); + } + + #[test] + fn selection_to_copy_text_orders_reversed_endpoints() { + let lines = vec![Line::from("• Hello world")]; + let joiner_before = vec![None]; + + let start = TranscriptSelectionPoint { + line_index: 0, + column: 10, + }; + let end = TranscriptSelectionPoint { + line_index: 0, + column: 6, + }; + + let out = selection_to_copy_text(&lines, &joiner_before, start, end, 0, 1, 80) + .expect("expected text"); + + assert_eq!(out, "world"); + } + + #[test] + fn copy_selection_soft_wrap_joins_without_newline() { + let lines = vec![Line::from("• Hello"), Line::from(" world")]; + let joiner_before = vec![None, Some(" ".to_string())]; + let start = TranscriptSelectionPoint { + line_index: 0, + column: 0, + }; + let end = TranscriptSelectionPoint { + line_index: 1, + column: 100, + }; + + let out = selection_to_copy_text(&lines, &joiner_before, start, end, 0, lines.len(), 20) + .expect("expected text"); + + assert_eq!(out, "Hello world"); + } + + #[test] + fn copy_selection_wraps_inline_code_in_backticks() { + let lines = vec![Line::from(vec![ + "• ".into(), + "Use ".into(), + ratatui::text::Span::from("foo()").style(Style::new().fg(Color::Cyan)), + " now".into(), + ])]; + let joiner_before = vec![None]; + let start = TranscriptSelectionPoint { + line_index: 0, + column: 0, + }; + let end = TranscriptSelectionPoint { + line_index: 0, + column: 100, + }; + + let out = selection_to_copy_text(&lines, &joiner_before, start, end, 0, 1, 80) + .expect("expected text"); + + assert_eq!(out, "Use `foo()` now"); + } + + #[test] + fn selection_to_copy_text_for_cells_reconstructs_full_code_line_beyond_viewport() { + #[derive(Debug)] + struct FakeCell { + lines: Vec>, + joiner_before: Vec>, + } + + impl HistoryCell for FakeCell { + fn display_lines(&self, _width: u16) -> Vec> { + self.lines.clone() + } + + fn transcript_lines_with_joiners( + &self, + _width: u16, + ) -> crate::history_cell::TranscriptLinesWithJoiners { + crate::history_cell::TranscriptLinesWithJoiners { + lines: self.lines.clone(), + joiner_before: self.joiner_before.clone(), + } + } + } + + let style = Style::new().fg(Color::Cyan); + let cell = FakeCell { + lines: vec![Line::from("• 0123456789ABCDEFGHIJ").style(style)], + joiner_before: vec![None], + }; + let cells: Vec> = vec![std::sync::Arc::new(cell)]; + + let width: u16 = 12; + let max_x = width.saturating_sub(1); + let viewport_edge_col = max_x.saturating_sub(TRANSCRIPT_GUTTER_COLS); + + let selection = TranscriptSelection { + anchor: Some(TranscriptSelectionPoint::new(0, 0)), + head: Some(TranscriptSelectionPoint::new(0, viewport_edge_col)), + }; + + let out = + selection_to_copy_text_for_cells(&cells, selection, width).expect("expected text"); + assert_eq!(out, "```\n 0123456789ABCDEFGHIJ\n```"); + } + + #[test] + fn order_points_orders_by_line_then_column() { + let a = TranscriptSelectionPoint::new(2, 5); + let b = TranscriptSelectionPoint::new(1, 10); + assert_eq!(order_points(a, b), (b, a)); + + let a = TranscriptSelectionPoint::new(1, 5); + let b = TranscriptSelectionPoint::new(1, 10); + assert_eq!(order_points(a, b), (a, b)); + } + + #[test] + fn line_to_flat_concatenates_spans() { + let line = Line::from(vec!["a".into(), "b".into(), "c".into()]); + assert_eq!(line_to_flat(&line), "abc"); + } + + #[test] + fn last_non_space_col_counts_display_width() { + // "コ" is width 2, so "コX" occupies columns 0..=2. + assert_eq!(last_non_space_col("コX"), Some(2)); + assert_eq!(last_non_space_col("a "), Some(0)); + assert_eq!(last_non_space_col(" "), None); + } + + #[test] + fn byte_range_for_cols_maps_columns_to_utf8_bytes() { + let flat = "abcd"; + let range = byte_range_for_cols(flat, 1, 2).expect("range"); + assert_eq!(&flat[range], "bc"); + + let flat = "コX"; + let range = byte_range_for_cols(flat, 0, 2).expect("range"); + assert_eq!(&flat[range], "コX"); + } + + #[test] + fn slice_line_by_cols_preserves_span_styles() { + let line = Line::from(vec![ + "• ".into(), + "Hello".red(), + " ".into(), + "world".green(), + ]); + + // Slice "llo wo" (crosses span boundaries). + let sliced = slice_line_by_cols(&line, 4, 9); + assert_eq!(line_to_flat(&sliced), "llo wo"); + assert_eq!(sliced.spans.len(), 3); + assert_eq!(sliced.spans[0].content.as_ref(), "llo"); + assert_eq!(sliced.spans[0].style.fg, Some(Color::Red)); + assert_eq!(sliced.spans[1].content.as_ref(), " "); + assert_eq!(sliced.spans[2].content.as_ref(), "wo"); + assert_eq!(sliced.spans[2].style.fg, Some(Color::Green)); + } + + #[test] + fn span_is_inline_code_excludes_underlined_cyan() { + let inline_code = Span::from("x").style(Style::new().fg(Color::Cyan)); + assert!(span_is_inline_code(&inline_code)); + + let link_like = Span::from("x").style(Style::new().fg(Color::Cyan).underlined()); + assert!(!span_is_inline_code(&link_like)); + + let other = Span::from("x").style(Style::new().fg(Color::Green)); + assert!(!span_is_inline_code(&other)); + } + + #[test] + fn line_to_markdown_wraps_contiguous_inline_code_spans() { + let line = Line::from(vec![ + "Use ".into(), + Span::from("foo").style(Style::new().fg(Color::Cyan)), + Span::from("()").style(Style::new().fg(Color::Cyan)), + " now".into(), + ]); + assert_eq!(line_to_markdown(&line, false), "Use `foo()` now"); + } + + #[test] + fn copy_selection_preserves_wide_glyphs() { + let lines = vec![Line::from("• コX")]; + let joiner_before = vec![None]; + + let start = TranscriptSelectionPoint { + line_index: 0, + column: 0, + }; + let end = TranscriptSelectionPoint { + line_index: 0, + column: 2, + }; + + let out = selection_to_copy_text(&lines, &joiner_before, start, end, 0, 1, 80) + .expect("expected text"); + + assert_eq!(out, "コX"); + } + + #[test] + fn copy_selection_wraps_code_block_in_fences_and_preserves_indent() { + let style = Style::new().fg(Color::Cyan); + let lines = vec![ + Line::from("• fn main() {}").style(style), + Line::from(" println!(\"hi\");").style(style), + ]; + let joiner_before = vec![None, None]; + let start = TranscriptSelectionPoint { + line_index: 0, + column: 0, + }; + let end = TranscriptSelectionPoint { + line_index: 1, + column: 100, + }; + + let out = selection_to_copy_text(&lines, &joiner_before, start, end, 0, lines.len(), 80) + .expect("expected text"); + + assert_eq!(out, "```\n fn main() {}\n println!(\"hi\");\n```"); + } + + #[test] + fn copy_selection_code_block_end_col_at_viewport_edge_copies_full_line() { + let style = Style::new().fg(Color::Cyan); + let lines = vec![Line::from("• 0123456789ABCDEFGHIJ").style(style)]; + let joiner_before = vec![None]; + + let width: u16 = 12; + let max_x = width.saturating_sub(1); + let viewport_edge_col = max_x.saturating_sub(TRANSCRIPT_GUTTER_COLS); + + let start = TranscriptSelectionPoint { + line_index: 0, + column: 0, + }; + let end = TranscriptSelectionPoint { + line_index: 0, + column: viewport_edge_col, + }; + + let out = selection_to_copy_text(&lines, &joiner_before, start, end, 0, 1, width) + .expect("expected text"); + + assert_eq!(out, "```\n 0123456789ABCDEFGHIJ\n```"); + } + + #[test] + fn copy_selection_code_block_end_col_before_viewport_edge_copies_partial_line() { + let style = Style::new().fg(Color::Cyan); + let lines = vec![Line::from("• 0123456789ABCDEFGHIJ").style(style)]; + let joiner_before = vec![None]; + + let width: u16 = 12; + + let start = TranscriptSelectionPoint { + line_index: 0, + column: 0, + }; + let end = TranscriptSelectionPoint { + line_index: 0, + column: 7, + }; + + let out = selection_to_copy_text(&lines, &joiner_before, start, end, 0, 1, width) + .expect("expected text"); + + assert_eq!(out, "```\n 0123\n```"); + } +} diff --git a/codex-rs/tui2/src/transcript_copy_ui.rs b/codex-rs/tui2/src/transcript_copy_ui.rs new file mode 100644 index 00000000000..6c852c23416 --- /dev/null +++ b/codex-rs/tui2/src/transcript_copy_ui.rs @@ -0,0 +1,332 @@ +//! Transcript-selection copy UX helpers. +//! +//! # Background +//! +//! TUI2 owns a logical transcript viewport (with history that can live outside the visible buffer), +//! plus its own selection model. Terminal-native selection/copy does not work reliably in this +//! setup because: +//! +//! - The selection can extend outside the current viewport, while terminal selection can't. +//! - We want to exclude non-content regions (like the left gutter) from copied text. +//! - The terminal may intercept some keybindings before the app ever sees them. +//! +//! This module centralizes: +//! +//! - The effective "copy selection" shortcut (so the footer and affordance stay in sync). +//! - Key matching for triggering copy (with terminal quirks handled in one place). +//! - A small on-screen clickable "⧉ copy …" pill rendered near the current selection. +//! +//! # VS Code shortcut rationale +//! +//! VS Code's integrated terminal commonly captures `Ctrl+Shift+C` for its own copy behavior and +//! does not forward the keypress to applications running inside the terminal. Since we can't +//! observe it via crossterm, we advertise and accept `Ctrl+Y` in that environment. +//! +//! Clipboard text reconstruction (preserving indentation, joining soft-wrapped +//! prose, and emitting Markdown source markers) lives in `transcript_copy`. + +use codex_core::terminal::TerminalName; +use codex_core::terminal::terminal_info; +use crossterm::event::KeyCode; +use crossterm::event::KeyModifiers; +use ratatui::buffer::Buffer; +use ratatui::layout::Rect; +use ratatui::style::Color; +use ratatui::style::Modifier; +use ratatui::style::Style; +use ratatui::text::Line; +use ratatui::text::Span; +use ratatui::widgets::Paragraph; +use ratatui::widgets::WidgetRef; +use unicode_width::UnicodeWidthStr; + +use crate::key_hint; +use crate::key_hint::KeyBinding; +use crate::transcript_selection::TRANSCRIPT_GUTTER_COLS; + +#[derive(Clone, Copy, Debug, Eq, PartialEq)] +/// The shortcut we advertise and accept for "copy selection". +pub(crate) enum CopySelectionShortcut { + CtrlShiftC, + CtrlY, +} + +/// Returns the best shortcut to advertise/accept for "copy selection". +/// +/// VS Code's integrated terminal typically captures `Ctrl+Shift+C` for its own copy behavior and +/// does not forward it to applications running inside the terminal. That means we can't reliably +/// observe it via crossterm, so we use `Ctrl+Y` there. +/// +/// We use both the terminal name (when available) and `VSCODE_IPC_HOOK_CLI` because the terminal +/// name can be `Unknown` early during startup in some environments. +pub(crate) fn detect_copy_selection_shortcut() -> CopySelectionShortcut { + let info = terminal_info(); + if info.name == TerminalName::VsCode || std::env::var_os("VSCODE_IPC_HOOK_CLI").is_some() { + return CopySelectionShortcut::CtrlY; + } + CopySelectionShortcut::CtrlShiftC +} + +pub(crate) fn key_binding_for(shortcut: CopySelectionShortcut) -> KeyBinding { + match shortcut { + CopySelectionShortcut::CtrlShiftC => key_hint::ctrl_shift(KeyCode::Char('c')), + CopySelectionShortcut::CtrlY => key_hint::ctrl(KeyCode::Char('y')), + } +} + +/// Whether the given `(ch, modifiers)` should trigger "copy selection". +/// +/// Terminal/event edge cases: +/// - Some terminals report `Ctrl+Shift+C` as `Char('C')` with `CONTROL` only, baking the shift into +/// the character. We accept both `c` and `C` in `CtrlShiftC` mode (including VS Code). +/// - Some environments intercept `Ctrl+Shift+C` before the app sees it. We keep `Ctrl+Y` as a +/// fallback in `CtrlShiftC` mode to preserve a working key path. +pub(crate) fn is_copy_selection_key( + shortcut: CopySelectionShortcut, + ch: char, + modifiers: KeyModifiers, +) -> bool { + if !modifiers.contains(KeyModifiers::CONTROL) { + return false; + } + + match shortcut { + CopySelectionShortcut::CtrlY => ch == 'y' && modifiers == KeyModifiers::CONTROL, + CopySelectionShortcut::CtrlShiftC => { + (matches!(ch, 'c' | 'C') && (modifiers.contains(KeyModifiers::SHIFT) || ch == 'C')) + // Fallback for environments that intercept Ctrl+Shift+C. + || (ch == 'y' && modifiers == KeyModifiers::CONTROL) + } + } +} + +/// UI state for the on-screen copy affordance shown near an active selection. +/// +/// This tracks a `Rect` for hit-testing so we can treat the pill as a clickable button. +#[derive(Debug)] +pub(crate) struct TranscriptCopyUi { + shortcut: CopySelectionShortcut, + dragging: bool, + affordance_rect: Option, +} + +impl TranscriptCopyUi { + /// Creates a new instance using the provided shortcut. + pub(crate) fn new_with_shortcut(shortcut: CopySelectionShortcut) -> Self { + Self { + shortcut, + dragging: false, + affordance_rect: None, + } + } + + pub(crate) fn key_binding(&self) -> KeyBinding { + key_binding_for(self.shortcut) + } + + pub(crate) fn is_copy_key(&self, ch: char, modifiers: KeyModifiers) -> bool { + is_copy_selection_key(self.shortcut, ch, modifiers) + } + + pub(crate) fn set_dragging(&mut self, dragging: bool) { + self.dragging = dragging; + } + + pub(crate) fn clear_affordance(&mut self) { + self.affordance_rect = None; + } + + /// Returns `true` if the last rendered pill contains `(x, y)`. + /// + /// `render_copy_pill()` sets `affordance_rect` and `clear_affordance()` clears it, so callers + /// should treat this as "hit test against the current frame's affordance". + pub(crate) fn hit_test(&self, x: u16, y: u16) -> bool { + self.affordance_rect + .is_some_and(|r| x >= r.x && x < r.right() && y >= r.y && y < r.bottom()) + } + + /// Render the copy "pill" just below the visible end of the selection. + /// + /// Inputs are expressed in logical transcript coordinates: + /// - `anchor`/`head`: `(line_index, column)` in the wrapped transcript (not screen rows). + /// - `view_top`: first logical line index currently visible in `area`. + /// - `total_lines`: total number of logical transcript lines. + /// + /// Placement details / edge cases: + /// - We hide the pill while dragging to avoid accidental clicks during selection updates. + /// - We only render if some part of the selection is visible, and there's room for a line + /// below it inside `area`. + /// - We scan the buffer to find the last non-space cell on each candidate row so the pill can + /// sit "near content", not far to the right past trailing whitespace. + /// + /// Important: this assumes the transcript content has already been rendered into `buf` for the + /// current frame, since the placement logic derives `text_end` by inspecting buffer contents. + pub(crate) fn render_copy_pill( + &mut self, + area: Rect, + buf: &mut Buffer, + anchor: (usize, u16), + head: (usize, u16), + view_top: usize, + total_lines: usize, + ) { + // Reset every frame. If we don't render (e.g. selection is off-screen) we shouldn't keep + // an old hit target around. + self.affordance_rect = None; + + if self.dragging || total_lines == 0 { + return; + } + + // Skip the transcript gutter (line numbers, diff markers, etc.). Selection/copy operates on + // transcript content only. + let base_x = area.x.saturating_add(TRANSCRIPT_GUTTER_COLS); + let max_x = area.right().saturating_sub(1); + if base_x > max_x { + return; + } + + // Normalize to a start/end pair so the rest of the code can assume forward order. + let mut start = anchor; + let mut end = head; + if (end.0 < start.0) || (end.0 == start.0 && end.1 < start.1) { + std::mem::swap(&mut start, &mut end); + } + + // We want to place the pill *near the visible end of the selection*, which means: + // - Find the last visible transcript line that intersects the selection. + // - Find the rightmost selected column on that line (clamped to actual rendered text). + // - Place the pill one row below that point. + let visible_start = view_top; + let visible_end = view_top + .saturating_add(area.height as usize) + .min(total_lines); + let mut last_visible_segment: Option<(u16, u16)> = None; + + for (row_index, line_index) in (visible_start..visible_end).enumerate() { + // Skip lines outside the selection range. + if line_index < start.0 || line_index > end.0 { + continue; + } + + let y = area.y + row_index as u16; + + // Look for the rightmost non-space cell on this row so we can clamp the pill placement + // to real content. (The transcript renderer often pads the row with spaces.) + let mut last_text_x = None; + for x in base_x..=max_x { + let cell = &buf[(x, y)]; + if cell.symbol() != " " { + last_text_x = Some(x); + } + } + + let Some(text_end) = last_text_x else { + continue; + }; + + let line_end_col = if line_index == end.0 { + end.1 + } else { + // For multi-line selections, treat intermediate lines as selected "to the end" so + // the pill doesn't jump left unexpectedly when only the final line has an explicit + // end column. + max_x.saturating_sub(base_x) + }; + + let row_sel_end = base_x.saturating_add(line_end_col).min(max_x); + if row_sel_end < base_x { + continue; + } + + // Clamp the selection end to `text_end` so we don't place the pill far to the right on + // lines that are mostly blank (or padded). + let to_x = row_sel_end.min(text_end); + last_visible_segment = Some((y, to_x)); + } + + // If nothing in the selection is visible, don't show the affordance. + let Some((y, to_x)) = last_visible_segment else { + return; + }; + // Place the pill on the row below the last visible selection segment. + let Some(y) = y.checked_add(1).filter(|y| *y < area.bottom()) else { + return; + }; + + let key_label: Span<'static> = self.key_binding().into(); + let key_label = key_label.content.as_ref().to_string(); + + let pill_text = format!(" ⧉ copy {key_label} "); + let pill_width = UnicodeWidthStr::width(pill_text.as_str()); + if pill_width == 0 || area.width == 0 { + return; + } + + let pill_width = (pill_width as u16).min(area.width); + // Prefer a small gap between the selected content and the pill so we don't visually merge + // into the highlighted selection block. + let desired_x = to_x.saturating_add(2); + let max_start_x = area.right().saturating_sub(pill_width); + let x = if max_start_x < area.x { + area.x + } else { + desired_x.clamp(area.x, max_start_x) + }; + + let pill_area = Rect::new(x, y, pill_width, 1); + let base_style = Style::new().bg(Color::DarkGray); + let icon_style = base_style.fg(Color::Cyan); + let bold_style = base_style.add_modifier(Modifier::BOLD); + + let mut spans: Vec> = vec![ + Span::styled(" ", base_style), + Span::styled("⧉", icon_style), + Span::styled(" ", base_style), + Span::styled("copy", bold_style), + Span::styled(" ", base_style), + Span::styled(key_label, base_style), + ]; + spans.push(Span::styled(" ", base_style)); + + Paragraph::new(vec![Line::from(spans)]).render_ref(pill_area, buf); + self.affordance_rect = Some(pill_area); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use ratatui::buffer::Buffer; + + fn buf_to_string(buf: &Buffer, area: Rect) -> String { + let mut s = String::new(); + for y in area.y..area.bottom() { + for x in area.x..area.right() { + s.push(buf[(x, y)].symbol().chars().next().unwrap_or(' ')); + } + s.push('\n'); + } + s + } + + #[test] + fn ctrl_y_pill_does_not_include_ctrl_shift_c() { + let area = Rect::new(0, 0, 60, 3); + let mut buf = Buffer::empty(area); + for y in 0..area.height { + for x in 2..area.width.saturating_sub(1) { + buf[(x, y)].set_symbol("X"); + } + } + + let mut ui = TranscriptCopyUi::new_with_shortcut(CopySelectionShortcut::CtrlY); + ui.render_copy_pill(area, &mut buf, (1, 2), (1, 6), 0, 3); + + let rendered = buf_to_string(&buf, area); + assert!(rendered.contains("copy")); + assert!(rendered.contains("ctrl + y")); + assert!(!rendered.contains("ctrl + shift + c")); + assert!(ui.affordance_rect.is_some()); + } +} diff --git a/codex-rs/tui2/src/transcript_multi_click.rs b/codex-rs/tui2/src/transcript_multi_click.rs new file mode 100644 index 00000000000..8ddf2920de2 --- /dev/null +++ b/codex-rs/tui2/src/transcript_multi_click.rs @@ -0,0 +1,1221 @@ +//! Transcript-relative multi-click selection helpers. +//! +//! This module implements multi-click selection in terms of the **rendered +//! transcript model** (wrapped transcript lines + content columns), not +//! terminal buffer coordinates. +//! +//! Terminal `(row, col)` coordinates are ephemeral: scrolling, resizing, and +//! reflow (especially while streaming) change where a given piece of transcript +//! content appears on screen. Transcript-relative selection coordinates are +//! stable because they are anchored to the flattened, wrapped transcript line +//! model. +//! +//! Integration notes: +//! - Mouse event → `TranscriptSelectionPoint` mapping is handled by `app.rs`. +//! - This module: +//! - groups nearby clicks into a multi-click sequence +//! - expands the selection based on the current click count +//! - rebuilds the wrapped transcript lines from `HistoryCell::display_lines(width)` +//! so selection expansion matches on-screen wrapping. +//! - In TUI2 we start transcript selection on drag. A single click stores an +//! anchor but is not an "active" selection (no head). Multi-click selection +//! (double/triple/quad+) *does* create an active selection immediately. +//! +//! Complexity / cost model: +//! - single clicks are `O(1)` (just click tracking + caret placement) +//! - multi-click expansion rebuilds the current wrapped transcript view +//! (`O(total rendered transcript text)`) so selection matches what is on screen +//! *right now* (including streaming/reflow). +//! +//! Coordinates: +//! - `TranscriptSelectionPoint::line_index` is an index into the flattened, +//! wrapped transcript lines ("visual lines"). +//! - `TranscriptSelectionPoint::column` is a 0-based *content* column offset, +//! measured from immediately after the transcript gutter +//! (`TRANSCRIPT_GUTTER_COLS`). +//! - Selection endpoints are inclusive (they represent a closed interval of +//! selected cells). +//! +//! Selection expansion is UI-oriented: +//! - "word" selection uses display width (`unicode_width`) and a lightweight +//! character class heuristic. +//! - "paragraph" selection is based on contiguous non-empty wrapped lines. +//! - "cell" selection selects all wrapped lines that belong to a single history +//! cell (the unit returned by `HistoryCell::display_lines`). + +use crate::history_cell::HistoryCell; +use crate::transcript_selection::TRANSCRIPT_GUTTER_COLS; +use crate::transcript_selection::TranscriptSelection; +use crate::transcript_selection::TranscriptSelectionPoint; +use crate::wrapping::RtOptions; +use crate::wrapping::word_wrap_line; +use ratatui::text::Line; +use std::sync::Arc; +use std::time::Duration; +use std::time::Instant; +use unicode_width::UnicodeWidthChar; + +/// Stateful multi-click selection handler for the transcript viewport. +/// +/// This holds the click history required to infer multi-click sequences across +/// mouse events. The actual selection expansion is computed from the current +/// transcript content so it stays aligned with on-screen wrapping. +#[derive(Debug, Default)] +pub(crate) struct TranscriptMultiClick { + /// Tracks recent clicks so we can infer a multi-click sequence. + /// + /// This is intentionally kept separate from the selection itself: selection + /// endpoints are owned by `TranscriptSelection`, while multi-click behavior + /// is a transient input gesture state. + tracker: ClickTracker, +} + +impl TranscriptMultiClick { + /// Handle a left-button mouse down within the transcript viewport. + /// + /// This is intended to be called from `App`'s mouse handler. + /// + /// Behavior: + /// - Always updates the underlying selection anchor (delegates to + /// [`crate::transcript_selection::on_mouse_down`]) so dragging can extend + /// from this point. + /// - Tracks the click as part of a potential multi-click sequence. + /// - On multi-click (double/triple/quad+), replaces the selection with an + /// active expanded selection (word/line/paragraph). + /// + /// `width` must match the transcript viewport width used for rendering so + /// wrapping (and therefore word/paragraph boundaries) align with what the + /// user sees. + /// + /// Returns whether the selection changed (useful to decide whether to + /// request a redraw). + pub(crate) fn on_mouse_down( + &mut self, + selection: &mut TranscriptSelection, + cells: &[Arc], + width: u16, + point: Option, + ) -> bool { + self.on_mouse_down_at(selection, cells, width, point, Instant::now()) + } + + /// Notify the handler that the user is drag-selecting. + /// + /// Drag-selection should not be interpreted as a continuation of a + /// multi-click sequence, so we reset click history once the cursor moves + /// away from the anchor point. + /// + /// `point` is expected to be clamped to transcript content coordinates. If + /// `point` is `None`, this is a no-op. + pub(crate) fn on_mouse_drag( + &mut self, + selection: &TranscriptSelection, + point: Option, + ) { + let (Some(anchor), Some(point)) = (selection.anchor, point) else { + return; + }; + + // Some terminals emit `Drag` events for very small cursor motion while + // the button is held down (e.g. trackpad “jitter” during a click). + // Resetting the click sequence on *any* drag makes double/quad clicks + // hard to trigger, so we only treat it as a drag gesture once the + // cursor has meaningfully moved away from the anchor. + let moved_to_other_wrapped_line = point.line_index != anchor.line_index; + let moved_far_enough_horizontally = + point.column.abs_diff(anchor.column) > ClickTracker::MAX_COLUMN_DISTANCE; + if moved_to_other_wrapped_line || moved_far_enough_horizontally { + self.tracker.reset(); + } + } + + /// Testable implementation of [`Self::on_mouse_down`]. + /// + /// Taking `now` as an input makes click grouping deterministic in tests. + /// + /// High-level flow (kept here so callers don’t have to mentally simulate the + /// selection state machine): + /// 1. Update the underlying selection state using + /// [`crate::transcript_selection::on_mouse_down`]. In TUI2 this records an + /// anchor and clears any head so a single click does not leave a visible + /// selection. + /// 2. If the click is outside the transcript content (`point == None`), + /// reset the click tracker and return. + /// 3. Register the click with the tracker to infer the click count. + /// 4. For multi-click (`>= 2`), compute an expanded selection from the + /// *current* wrapped transcript view and overwrite the selection with an + /// active selection (`anchor` + `head` set). + fn on_mouse_down_at( + &mut self, + selection: &mut TranscriptSelection, + cells: &[Arc], + width: u16, + point: Option, + now: Instant, + ) -> bool { + let before = *selection; + + let selection_changed = crate::transcript_selection::on_mouse_down(selection, point); + let Some(point) = point else { + self.tracker.reset(); + return selection_changed; + }; + + let click_count = self.tracker.register_click(point, now); + if click_count == 1 { + return *selection != before; + } + + *selection = selection_for_click(cells, width, point, click_count); + *selection != before + } +} + +/// Tracks recent clicks so we can infer multi-click counts. +#[derive(Debug, Default)] +struct ClickTracker { + /// The last click observed (used to group nearby clicks into a sequence). + last_click: Option, +} + +/// A single click event used for multi-click grouping. +#[derive(Debug, Clone, Copy)] +struct Click { + /// Location of the click in transcript coordinates. + point: TranscriptSelectionPoint, + /// Click count for the current sequence. + click_count: u8, + /// Time the click occurred (used to bound multi-click grouping). + at: Instant, +} + +impl ClickTracker { + /// Maximum time gap between clicks to be considered part of a sequence. + const MAX_DELAY: Duration = Duration::from_millis(650); + /// Maximum horizontal motion (in transcript *content* columns) to be + /// considered "the same click target" for multi-click grouping. + const MAX_COLUMN_DISTANCE: u16 = 4; + + /// Reset click history so the next click begins a new sequence. + fn reset(&mut self) { + self.last_click = None; + } + + /// Record a click and return the inferred click count for this sequence. + /// + /// Clicks are grouped when: + /// - they occur close in time (`MAX_DELAY`), and + /// - they target the same transcript wrapped line, and + /// - they occur at nearly the same content column (`MAX_COLUMN_DISTANCE`), + /// with increasing tolerance for later clicks in the sequence + /// + /// The returned count saturates at `u8::MAX` (we only care about the + /// `>= 4` bucket). + fn register_click(&mut self, point: TranscriptSelectionPoint, now: Instant) -> u8 { + let mut click_count = 1u8; + if let Some(prev) = self.last_click + && now.duration_since(prev.at) <= Self::MAX_DELAY + && prev.point.line_index == point.line_index + && prev.point.column.abs_diff(point.column) <= max_column_distance(prev.click_count) + { + click_count = prev.click_count.saturating_add(1); + } + + self.last_click = Some(Click { + point, + click_count, + at: now, + }); + + click_count + } +} + +/// Column-distance tolerance for continuing an existing click sequence. +/// +/// We intentionally loosen grouping after the selection has expanded: once the +/// user is on the “whole line” or “paragraph” step, requiring a near-identical +/// column makes quad-clicks hard to trigger because the user can naturally +/// click elsewhere on the already-highlighted line. +fn max_column_distance(prev_click_count: u8) -> u16 { + match prev_click_count { + 0 | 1 => ClickTracker::MAX_COLUMN_DISTANCE, + 2 => ClickTracker::MAX_COLUMN_DISTANCE.saturating_mul(2), + _ => u16::MAX, + } +} + +/// Expand a click (plus inferred `click_count`) into a transcript selection. +/// +/// This is the core of multi-click behavior. For expanded selections it +/// rebuilds the current wrapped transcript view from history cells so selection +/// boundaries line up with the rendered transcript model (not raw source +/// strings, and not terminal buffer coordinates). +/// +/// `TranscriptSelectionPoint::column` is interpreted in content coordinates: +/// column 0 is the first column immediately after the transcript gutter +/// (`TRANSCRIPT_GUTTER_COLS`). The returned selection columns are clamped to +/// the content width for the given `width`. +/// +/// Gesture mapping: +/// - double click selects a “word-ish” run on the clicked wrapped line +/// - triple click selects the entire wrapped line +/// - quad+ click selects the containing paragraph (contiguous non-empty wrapped +/// lines, with empty/spacer lines treated as paragraph breaks) +/// - quint+ click selects the entire history cell +/// +/// Returned selections are always “active” (both `anchor` and `head` set). This +/// intentionally differs from normal single-click behavior in TUI2 (which only +/// stores an anchor until a drag makes the selection active). +/// +/// Defensiveness: +/// - if the transcript is empty, or wrapping yields no lines, this falls back +/// to a caret-like selection at `point` so multi-click never produces “no +/// selection” +/// - if `point` refers past the end of the wrapped line list, it is clamped to +/// the last wrapped line so behavior stays stable during scroll/resize/reflow +fn selection_for_click( + cells: &[Arc], + width: u16, + point: TranscriptSelectionPoint, + click_count: u8, +) -> TranscriptSelection { + if click_count == 1 { + return TranscriptSelection { + anchor: Some(point), + head: Some(point), + }; + } + + // `width` is the total viewport width, including the gutter. Selection + // columns are content-relative, so compute the maximum selectable *content* + // column. + let max_content_col = width + .saturating_sub(1) + .saturating_sub(TRANSCRIPT_GUTTER_COLS); + + // Rebuild the same logical line stream the transcript renders from. This + // keeps expansion boundaries aligned with current streaming output and the + // current wrap width. + let (lines, line_cell_index) = build_transcript_lines_with_cell_index(cells, width); + if lines.is_empty() { + return TranscriptSelection { + anchor: Some(point), + head: Some(point), + }; + } + + // Expand based on the wrapped *visual* lines so triple/quad/quint-click + // selection respects the current wrap width. + let (wrapped, wrapped_cell_index) = word_wrap_lines_with_cell_index( + &lines, + &line_cell_index, + RtOptions::new(width.max(1) as usize), + ); + if wrapped.is_empty() { + return TranscriptSelection { + anchor: Some(point), + head: Some(point), + }; + } + + // Clamp both the target line and column into the current wrapped view. This + // matters during live streaming, where the transcript can grow between the + // time the UI clamps the click and the time we compute expansion. + let line_index = point.line_index.min(wrapped.len().saturating_sub(1)); + let point = TranscriptSelectionPoint::new(line_index, point.column.min(max_content_col)); + + if click_count == 2 { + let Some((start, end)) = + word_bounds_in_wrapped_line(&wrapped[line_index], TRANSCRIPT_GUTTER_COLS, point.column) + else { + return TranscriptSelection { + anchor: Some(point), + head: Some(point), + }; + }; + return TranscriptSelection { + anchor: Some(TranscriptSelectionPoint::new( + line_index, + start.min(max_content_col), + )), + head: Some(TranscriptSelectionPoint::new( + line_index, + end.min(max_content_col), + )), + }; + } + + if click_count == 3 { + return TranscriptSelection { + anchor: Some(TranscriptSelectionPoint::new(line_index, 0)), + head: Some(TranscriptSelectionPoint::new(line_index, max_content_col)), + }; + } + + if click_count == 4 { + let (start_line, end_line) = + paragraph_bounds_in_wrapped_lines(&wrapped, TRANSCRIPT_GUTTER_COLS, line_index) + .unwrap_or((line_index, line_index)); + return TranscriptSelection { + anchor: Some(TranscriptSelectionPoint::new(start_line, 0)), + head: Some(TranscriptSelectionPoint::new(end_line, max_content_col)), + }; + } + + let Some((start_line, end_line)) = + cell_bounds_in_wrapped_lines(&wrapped_cell_index, line_index) + else { + return TranscriptSelection { + anchor: Some(point), + head: Some(point), + }; + }; + TranscriptSelection { + anchor: Some(TranscriptSelectionPoint::new(start_line, 0)), + head: Some(TranscriptSelectionPoint::new(end_line, max_content_col)), + } +} + +/// Flatten transcript history cells into the same line stream used by the UI. +/// +/// This mirrors `App::build_transcript_lines` semantics: insert a blank spacer +/// line between non-continuation cells so word/paragraph boundaries match what +/// the user sees. +#[cfg(test)] +fn build_transcript_lines(cells: &[Arc], width: u16) -> Vec> { + let mut lines: Vec> = Vec::new(); + let mut has_emitted_lines = false; + + for cell in cells { + let cell_lines = cell.display_lines(width); + if cell_lines.is_empty() { + continue; + } + + if !cell.is_stream_continuation() { + if has_emitted_lines { + // `App` inserts a spacer between distinct (non-continuation) + // history cells; preserve that here so paragraph detection + // matches what users see. + lines.push(Line::from("")); + } else { + has_emitted_lines = true; + } + } + + lines.extend(cell_lines); + } + + lines +} + +/// Like [`build_transcript_lines`], but also returns a per-line mapping to the +/// originating history cell index. +/// +/// This mapping lets us implement "select the whole history cell" in terms of +/// wrapped visual line indices. +fn build_transcript_lines_with_cell_index( + cells: &[Arc], + width: u16, +) -> (Vec>, Vec>) { + let mut lines: Vec> = Vec::new(); + let mut line_cell_index: Vec> = Vec::new(); + let mut has_emitted_lines = false; + + for (cell_index, cell) in cells.iter().enumerate() { + let cell_lines = cell.display_lines(width); + if cell_lines.is_empty() { + continue; + } + + if !cell.is_stream_continuation() { + if has_emitted_lines { + lines.push(Line::from("")); + line_cell_index.push(None); + } else { + has_emitted_lines = true; + } + } + + line_cell_index.extend(std::iter::repeat_n(Some(cell_index), cell_lines.len())); + lines.extend(cell_lines); + } + + debug_assert_eq!(lines.len(), line_cell_index.len()); + (lines, line_cell_index) +} + +/// Wrap lines and carry forward a per-line mapping to history cell index. +/// +/// This mirrors [`word_wrap_lines_borrowed`] behavior so selection expansion +/// uses the same wrapped line model as rendering. +fn word_wrap_lines_with_cell_index<'a, O>( + lines: &'a [Line<'a>], + line_cell_index: &[Option], + width_or_options: O, +) -> (Vec>, Vec>) +where + O: Into>, +{ + debug_assert_eq!(lines.len(), line_cell_index.len()); + + let base_opts: RtOptions<'a> = width_or_options.into(); + let mut out: Vec> = Vec::new(); + let mut out_cell_index: Vec> = Vec::new(); + + let mut first = true; + for (line, cell_index) in lines.iter().zip(line_cell_index.iter().copied()) { + let opts = if first { + base_opts.clone() + } else { + base_opts + .clone() + .initial_indent(base_opts.subsequent_indent.clone()) + }; + + let wrapped = word_wrap_line(line, opts); + out_cell_index.extend(std::iter::repeat_n(cell_index, wrapped.len())); + out.extend(wrapped); + first = false; + } + + debug_assert_eq!(out.len(), out_cell_index.len()); + (out, out_cell_index) +} + +/// Expand to the contiguous range of wrapped lines that belong to a single +/// history cell. +/// +/// `line_index` is in wrapped line coordinates. If the line at `line_index` is +/// a spacer (no cell index), we select the nearest preceding cell, falling back +/// to the next cell below. +fn cell_bounds_in_wrapped_lines( + wrapped_cell_index: &[Option], + line_index: usize, +) -> Option<(usize, usize)> { + let total = wrapped_cell_index.len(); + if total == 0 { + return None; + } + + let mut target = line_index.min(total.saturating_sub(1)); + let mut cell_index = wrapped_cell_index[target]; + if cell_index.is_none() { + if let Some(found) = (0..target) + .rev() + .find(|idx| wrapped_cell_index[*idx].is_some()) + { + target = found; + cell_index = wrapped_cell_index[found]; + } else if let Some(found) = + (target + 1..total).find(|idx| wrapped_cell_index[*idx].is_some()) + { + target = found; + cell_index = wrapped_cell_index[found]; + } + } + let cell_index = cell_index?; + + let mut start = target; + while start > 0 && wrapped_cell_index[start - 1] == Some(cell_index) { + start = start.saturating_sub(1); + } + + let mut end = target; + while end + 1 < total && wrapped_cell_index[end + 1] == Some(cell_index) { + end = end.saturating_add(1); + } + + Some((start, end)) +} + +/// Coarse character classes used for "word-ish" selection. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum WordCharClass { + /// Any whitespace (select as a contiguous run). + Whitespace, + /// Alphanumeric plus token punctuation (paths/idents/URLs). + Token, + /// Everything else. + Other, +} + +/// Classify characters for UI-oriented "word-ish" selection. +/// +/// This intentionally does not attempt full Unicode word boundary semantics. +/// It is tuned for terminal transcript interactions, where "word" often means +/// identifiers, paths, URLs, and punctuation-adjacent tokens. +fn word_char_class(ch: char) -> WordCharClass { + if ch.is_whitespace() { + return WordCharClass::Whitespace; + } + + let is_token = ch.is_alphanumeric() + || matches!( + ch, + '_' | '-' + | '.' + | '/' + | '\\' + | ':' + | '@' + | '#' + | '$' + | '%' + | '+' + | '=' + | '?' + | '&' + | '~' + | '*' + ); + if is_token { + WordCharClass::Token + } else { + WordCharClass::Other + } +} + +/// Concatenate a styled `Line` into its plain text representation. +/// +/// Multi-click selection operates on the rendered text content (what the user +/// sees), independent of styling. +fn flatten_line_text(line: &Line<'_>) -> String { + line.spans.iter().map(|s| s.content.as_ref()).collect() +} + +/// Find the UTF-8 byte index that corresponds to `prefix_cols` display columns. +/// +/// This is used to exclude the transcript gutter/prefix when interpreting +/// clicks and paragraph breaks. Column math uses display width, not byte +/// offsets, to match terminal layout. +fn byte_index_after_prefix_cols(text: &str, prefix_cols: u16) -> usize { + let mut col = 0u16; + for (idx, ch) in text.char_indices() { + if col >= prefix_cols { + return idx; + } + col = col.saturating_add(UnicodeWidthChar::width(ch).unwrap_or(0) as u16); + } + text.len() +} + +/// Compute the (inclusive) content column bounds of the "word" under a click. +/// +/// This is defined in terms of the *rendered* line: +/// - `line` is a visual wrapped transcript line (including the gutter/prefix). +/// - `prefix_cols` is the number of display columns to ignore on the left +/// (the transcript gutter). +/// - `click_col` is a 0-based content column, measured from the first column +/// after the gutter. +/// +/// The returned `(start, end)` is an inclusive selection range in content +/// columns (`0..=max_content_col`), suitable for populating +/// [`TranscriptSelectionPoint::column`]. +fn word_bounds_in_wrapped_line( + line: &Line<'_>, + prefix_cols: u16, + click_col: u16, +) -> Option<(u16, u16)> { + // We compute word bounds by flattening to plain text and mapping each + // displayed glyph to a column range (by display width). This mirrors what + // the user sees, even if the underlying spans have multiple styles. + // + // Notes / limitations: + // - This operates at the `char` level, not grapheme clusters. For most + // transcript content (ASCII-ish tokens/paths/URLs) that’s sufficient. + // - Zero-width chars are skipped; they don’t occupy a terminal cell. + let full = flatten_line_text(line); + let prefix_byte = byte_index_after_prefix_cols(&full, prefix_cols); + let content = &full[prefix_byte..]; + + let mut cells: Vec<(char, u16, u16)> = Vec::new(); + let mut col = 0u16; + for ch in content.chars() { + let w = UnicodeWidthChar::width(ch).unwrap_or(0) as u16; + if w == 0 { + continue; + } + let start = col; + let end = col.saturating_add(w); + cells.push((ch, start, end)); + col = end; + } + + let total_width = col; + if cells.is_empty() || total_width == 0 { + return None; + } + + let click_col = click_col.min(total_width.saturating_sub(1)); + let mut idx = cells + .iter() + .position(|(_, start, end)| click_col >= *start && click_col < *end) + .unwrap_or(0); + if idx >= cells.len() { + idx = cells.len().saturating_sub(1); + } + + let class = word_char_class(cells[idx].0); + + let mut start_idx = idx; + while start_idx > 0 && word_char_class(cells[start_idx - 1].0) == class { + start_idx = start_idx.saturating_sub(1); + } + + let mut end_idx = idx; + while end_idx + 1 < cells.len() && word_char_class(cells[end_idx + 1].0) == class { + end_idx = end_idx.saturating_add(1); + } + + let start_col = cells[start_idx].1; + let end_col = cells[end_idx].2.saturating_sub(1); + Some((start_col, end_col)) +} + +/// Compute the (inclusive) wrapped line index bounds of the paragraph +/// surrounding `line_index`. +/// +/// Paragraphs are defined on *wrapped visual lines* (not underlying history +/// cells): a paragraph is any contiguous run of non-empty wrapped lines, and +/// empty lines (after trimming the transcript gutter/prefix) break paragraphs. +/// +/// When `line_index` points at a break line, this selects the nearest preceding +/// non-break line so a quad-click on the spacer line between history cells +/// selects the paragraph above (matching common terminal UX expectations). +fn paragraph_bounds_in_wrapped_lines( + lines: &[Line<'_>], + prefix_cols: u16, + line_index: usize, +) -> Option<(usize, usize)> { + if lines.is_empty() { + return None; + } + + // Paragraph breaks are determined after skipping the transcript gutter so a + // line that only contains the gutter prefix still counts as “empty”. + let is_break = |idx: usize| -> bool { + let full = flatten_line_text(&lines[idx]); + let prefix_byte = byte_index_after_prefix_cols(&full, prefix_cols); + full[prefix_byte..].trim().is_empty() + }; + + let mut target = line_index.min(lines.len().saturating_sub(1)); + if is_break(target) { + // Prefer the paragraph above for spacer lines inserted between history + // cells. If there is no paragraph above, fall back to the next + // paragraph below. + target = (0..target) + .rev() + .find(|idx| !is_break(*idx)) + .or_else(|| (target + 1..lines.len()).find(|idx| !is_break(*idx)))?; + } + + let mut start = target; + while start > 0 && !is_break(start - 1) { + start = start.saturating_sub(1); + } + + let mut end = target; + while end + 1 < lines.len() && !is_break(end + 1) { + end = end.saturating_add(1); + } + + Some((start, end)) +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + use ratatui::text::Line; + + #[derive(Debug)] + struct StaticCell { + lines: Vec>, + is_stream_continuation: bool, + } + + impl StaticCell { + fn new(lines: Vec>) -> Self { + Self { + lines, + is_stream_continuation: false, + } + } + + fn continuation(lines: Vec>) -> Self { + Self { + lines, + is_stream_continuation: true, + } + } + } + + impl HistoryCell for StaticCell { + fn display_lines(&self, _width: u16) -> Vec> { + self.lines.clone() + } + + fn is_stream_continuation(&self) -> bool { + self.is_stream_continuation + } + } + + #[test] + fn word_bounds_respects_prefix_and_word_classes() { + let line = Line::from("› hello world"); + let prefix_cols = 2; + + assert_eq!( + word_bounds_in_wrapped_line(&line, prefix_cols, 1), + Some((0, 4)) + ); + assert_eq!( + word_bounds_in_wrapped_line(&line, prefix_cols, 6), + Some((5, 7)) + ); + assert_eq!( + word_bounds_in_wrapped_line(&line, prefix_cols, 9), + Some((8, 12)) + ); + } + + #[test] + fn paragraph_bounds_selects_contiguous_non_empty_lines() { + let lines = vec![ + Line::from("› first"), + Line::from(" second"), + Line::from(""), + Line::from("› third"), + ]; + let prefix_cols = 2; + + assert_eq!( + paragraph_bounds_in_wrapped_lines(&lines, prefix_cols, 1), + Some((0, 1)) + ); + assert_eq!( + paragraph_bounds_in_wrapped_lines(&lines, prefix_cols, 2), + Some((0, 1)) + ); + assert_eq!( + paragraph_bounds_in_wrapped_lines(&lines, prefix_cols, 3), + Some((3, 3)) + ); + } + + #[test] + fn click_sequence_expands_selection_word_then_line_then_paragraph() { + let cells: Vec> = vec![Arc::new(StaticCell::new(vec![ + Line::from("› first"), + Line::from(" second"), + ]))]; + let width = 20; + + let mut multi = TranscriptMultiClick::default(); + let t0 = Instant::now(); + let point = TranscriptSelectionPoint::new(1, 1); + let mut selection = TranscriptSelection::default(); + + multi.on_mouse_down_at(&mut selection, &cells, width, Some(point), t0); + assert_eq!(selection.anchor, Some(point)); + assert_eq!(selection.head, None); + + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(point), + t0 + Duration::from_millis(10), + ); + assert_eq!( + selection + .anchor + .zip(selection.head) + .map(|(a, h)| (a.line_index, a.column, h.column)), + Some((1, 0, 5)) + ); + + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(point), + t0 + Duration::from_millis(20), + ); + let max_content_col = width + .saturating_sub(1) + .saturating_sub(TRANSCRIPT_GUTTER_COLS); + assert_eq!( + selection + .anchor + .zip(selection.head) + .map(|(a, h)| (a.line_index, a.column, h.column)), + Some((1, 0, max_content_col)) + ); + + // The final click can land elsewhere on the highlighted line; we still + // want to treat it as continuing the multi-click sequence. + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(TranscriptSelectionPoint::new(point.line_index, 10)), + t0 + Duration::from_millis(30), + ); + assert_eq!( + selection + .anchor + .zip(selection.head) + .map(|(a, h)| (a.line_index, h.line_index)), + Some((0, 1)) + ); + } + + #[test] + fn double_click_on_whitespace_selects_whitespace_run() { + let cells: Vec> = vec![Arc::new(StaticCell::new(vec![Line::from( + "› hello world", + )]))]; + let width = 40; + + let mut multi = TranscriptMultiClick::default(); + let t0 = Instant::now(); + let point = TranscriptSelectionPoint::new(0, 6); + let mut selection = TranscriptSelection::default(); + + multi.on_mouse_down_at(&mut selection, &cells, width, Some(point), t0); + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(point), + t0 + Duration::from_millis(5), + ); + + assert_eq!( + selection + .anchor + .zip(selection.head) + .map(|(a, h)| (a.column, h.column)), + Some((5, 7)) + ); + } + + #[test] + fn click_sequence_resets_when_click_moves_too_far_horizontally() { + let cells: Vec> = + vec![Arc::new(StaticCell::new(vec![Line::from("› hello world")]))]; + let width = 40; + + let mut multi = TranscriptMultiClick::default(); + let t0 = Instant::now(); + let mut selection = TranscriptSelection::default(); + + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(TranscriptSelectionPoint::new(0, 0)), + t0, + ); + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(TranscriptSelectionPoint::new(0, 10)), + t0 + Duration::from_millis(10), + ); + + assert_eq!(selection.anchor, Some(TranscriptSelectionPoint::new(0, 10))); + assert_eq!(selection.head, None); + } + + #[test] + fn click_sequence_resets_when_click_is_too_slow() { + let cells: Vec> = + vec![Arc::new(StaticCell::new(vec![Line::from("› hello world")]))]; + let width = 40; + + let mut multi = TranscriptMultiClick::default(); + let t0 = Instant::now(); + let point = TranscriptSelectionPoint::new(0, 1); + let mut selection = TranscriptSelection::default(); + + multi.on_mouse_down_at(&mut selection, &cells, width, Some(point), t0); + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(point), + t0 + ClickTracker::MAX_DELAY + Duration::from_millis(1), + ); + + assert_eq!(selection.anchor, Some(point)); + assert_eq!(selection.head, None); + } + + #[test] + fn click_sequence_resets_when_click_changes_line() { + let cells: Vec> = vec![Arc::new(StaticCell::new(vec![ + Line::from("› first"), + Line::from(" second"), + ]))]; + let width = 40; + + let mut multi = TranscriptMultiClick::default(); + let t0 = Instant::now(); + let mut selection = TranscriptSelection::default(); + + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(TranscriptSelectionPoint::new(0, 1)), + t0, + ); + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(TranscriptSelectionPoint::new(1, 1)), + t0 + Duration::from_millis(10), + ); + + assert_eq!(selection.anchor, Some(TranscriptSelectionPoint::new(1, 1))); + assert_eq!(selection.head, None); + } + + #[test] + fn drag_resets_multi_click_sequence() { + let cells: Vec> = + vec![Arc::new(StaticCell::new(vec![Line::from("› hello world")]))]; + let width = 40; + + let mut multi = TranscriptMultiClick::default(); + let t0 = Instant::now(); + let point = TranscriptSelectionPoint::new(0, 1); + let mut selection = TranscriptSelection::default(); + + multi.on_mouse_down_at(&mut selection, &cells, width, Some(point), t0); + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(point), + t0 + Duration::from_millis(10), + ); + assert_eq!( + selection + .anchor + .zip(selection.head) + .map(|(a, h)| (a.column, h.column)), + Some((0, 4)) + ); + + multi.on_mouse_drag(&selection, Some(TranscriptSelectionPoint::new(0, 10))); + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(point), + t0 + Duration::from_millis(20), + ); + assert_eq!(selection.anchor, Some(point)); + assert_eq!(selection.head, None); + } + + #[test] + fn small_drag_jitter_does_not_reset_multi_click_sequence() { + let cells: Vec> = + vec![Arc::new(StaticCell::new(vec![Line::from("› hello world")]))]; + let width = 40; + + let mut multi = TranscriptMultiClick::default(); + let t0 = Instant::now(); + let point = TranscriptSelectionPoint::new(0, 1); + let mut selection = TranscriptSelection::default(); + + multi.on_mouse_down_at(&mut selection, &cells, width, Some(point), t0); + multi.on_mouse_drag(&selection, Some(TranscriptSelectionPoint::new(0, 2))); + + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(point), + t0 + Duration::from_millis(10), + ); + assert_eq!( + selection + .anchor + .zip(selection.head) + .map(|(a, h)| (a.column, h.column)), + Some((0, 4)) + ); + } + + #[test] + fn paragraph_selects_nearest_non_empty_when_clicking_break_line() { + let cells: Vec> = vec![ + Arc::new(StaticCell::new(vec![Line::from("› first")])), + Arc::new(StaticCell::new(vec![Line::from("› second")])), + ]; + let width = 40; + + let mut multi = TranscriptMultiClick::default(); + let t0 = Instant::now(); + let mut selection = TranscriptSelection::default(); + + // Index 1 is the spacer line inserted between the two non-continuation cells. + let point = TranscriptSelectionPoint::new(1, 0); + multi.on_mouse_down_at(&mut selection, &cells, width, Some(point), t0); + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(point), + t0 + Duration::from_millis(10), + ); + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(point), + t0 + Duration::from_millis(20), + ); + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(point), + t0 + Duration::from_millis(30), + ); + + assert_eq!( + selection + .anchor + .zip(selection.head) + .map(|(a, h)| (a.line_index, h.line_index)), + Some((0, 0)) + ); + } + + #[test] + fn build_transcript_lines_inserts_spacer_between_non_continuation_cells() { + let cells: Vec> = vec![ + Arc::new(StaticCell::new(vec![Line::from("› first")])), + Arc::new(StaticCell::continuation(vec![Line::from(" cont")])), + Arc::new(StaticCell::new(vec![Line::from("› second")])), + ]; + let width = 40; + + let lines = build_transcript_lines(&cells, width); + let text: Vec = lines.iter().map(flatten_line_text).collect(); + assert_eq!(text, vec!["› first", " cont", "", "› second"]); + } + + #[test] + fn quint_click_selects_entire_history_cell() { + let cells: Vec> = vec![ + Arc::new(StaticCell::new(vec![ + Line::from("› first"), + Line::from(""), + Line::from(" second"), + ])), + Arc::new(StaticCell::new(vec![Line::from("› other")])), + ]; + let width = 40; + + let mut multi = TranscriptMultiClick::default(); + let t0 = Instant::now(); + let point = TranscriptSelectionPoint::new(2, 1); + let mut selection = TranscriptSelection::default(); + + multi.on_mouse_down_at(&mut selection, &cells, width, Some(point), t0); + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(point), + t0 + Duration::from_millis(10), + ); + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(point), + t0 + Duration::from_millis(20), + ); + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(point), + t0 + Duration::from_millis(30), + ); + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(TranscriptSelectionPoint::new(2, 10)), + t0 + Duration::from_millis(40), + ); + + let max_content_col = width + .saturating_sub(1) + .saturating_sub(TRANSCRIPT_GUTTER_COLS); + assert_eq!( + selection.anchor.zip(selection.head).map(|(a, h)| ( + a.line_index, + a.column, + h.line_index, + h.column + )), + Some((0, 0, 2, max_content_col)) + ); + } + + #[test] + fn quint_click_on_spacer_selects_cell_above() { + let cells: Vec> = vec![ + Arc::new(StaticCell::new(vec![Line::from("› first")])), + Arc::new(StaticCell::new(vec![Line::from("› second")])), + ]; + let width = 40; + + let mut multi = TranscriptMultiClick::default(); + let t0 = Instant::now(); + let mut selection = TranscriptSelection::default(); + + // Index 1 is the spacer line inserted between the two non-continuation cells. + let point = TranscriptSelectionPoint::new(1, 0); + for (idx, dt) in [0u64, 10, 20, 30, 40].into_iter().enumerate() { + multi.on_mouse_down_at( + &mut selection, + &cells, + width, + Some(TranscriptSelectionPoint::new( + point.line_index, + if idx < 3 { 0 } else { (idx as u16) * 5 }, + )), + t0 + Duration::from_millis(dt), + ); + } + + let max_content_col = width + .saturating_sub(1) + .saturating_sub(TRANSCRIPT_GUTTER_COLS); + assert_eq!( + selection.anchor.zip(selection.head).map(|(a, h)| ( + a.line_index, + a.column, + h.line_index, + h.column + )), + Some((0, 0, 0, max_content_col)) + ); + } +} diff --git a/codex-rs/tui2/src/transcript_render.rs b/codex-rs/tui2/src/transcript_render.rs new file mode 100644 index 00000000000..ceee03d8978 --- /dev/null +++ b/codex-rs/tui2/src/transcript_render.rs @@ -0,0 +1,399 @@ +//! Transcript rendering helpers (flattening, wrapping, and metadata). +//! +//! `App` treats the transcript (history cells) as the source of truth and +//! renders a *flattened* list of visual lines into the viewport. A single +//! history cell may render multiple visual lines, and the viewport may include +//! synthetic spacer rows between cells. +//! +//! This module centralizes the logic for: +//! - Flattening history cells into visual `ratatui::text::Line`s. +//! - Producing parallel metadata (`TranscriptLineMeta`) used for scroll +//! anchoring and "user row" styling. +//! - Computing *soft-wrap joiners* so copy can treat wrapped prose as one +//! logical line instead of inserting hard newlines. + +use crate::history_cell::HistoryCell; +use crate::tui::scrolling::TranscriptLineMeta; +use ratatui::text::Line; +use std::sync::Arc; + +/// Flattened transcript lines plus the metadata required to interpret them. +#[derive(Debug)] +pub(crate) struct TranscriptLines { + /// Flattened visual transcript lines, in the same order they are rendered. + pub(crate) lines: Vec>, + /// Parallel metadata for each line (same length as `lines`). + /// + /// This maps a visual line back to `(cell_index, line_in_cell)` so scroll + /// anchoring and "user row" styling remain stable across reflow. + pub(crate) meta: Vec, + /// Soft-wrap joiners (same length as `lines`). + /// + /// `joiner_before[i]` is `Some(joiner)` when line `i` is a soft-wrap + /// continuation of line `i - 1`, and `None` when the break is a hard break + /// (between input lines/cells, or spacer rows). + /// + /// Copy uses this to join wrapped prose without inserting hard newlines, + /// while still preserving hard line breaks and explicit blank lines. + pub(crate) joiner_before: Vec>, +} + +/// Build flattened transcript lines without applying additional viewport wrapping. +/// +/// This is useful for: +/// - Exit transcript rendering (ANSI) where we want the "cell as rendered" +/// output. +/// - Any consumer that wants a stable cell → line mapping without re-wrapping. +pub(crate) fn build_transcript_lines( + cells: &[Arc], + width: u16, +) -> TranscriptLines { + // This function is the "lossless" transcript flattener: + // - it asks each cell for its transcript lines (including any per-cell prefixes/indents) + // - it inserts spacer rows between non-continuation cells to match the viewport layout + // - it emits parallel metadata so scroll anchoring can map visual lines back to cells. + let mut lines: Vec> = Vec::new(); + let mut meta: Vec = Vec::new(); + let mut joiner_before: Vec> = Vec::new(); + let mut has_emitted_lines = false; + + for (cell_index, cell) in cells.iter().enumerate() { + // Cells provide joiners alongside lines so copy can distinguish hard breaks from soft wraps + // (and preserve the exact whitespace at wrap boundaries). + let rendered = cell.transcript_lines_with_joiners(width); + if rendered.lines.is_empty() { + continue; + } + + // Cells that are not stream continuations are separated by an explicit spacer row. + // This keeps the flattened transcript aligned with what the user sees in the viewport + // and preserves intentional blank lines in copy. + if !cell.is_stream_continuation() { + if has_emitted_lines { + lines.push(Line::from("")); + meta.push(TranscriptLineMeta::Spacer); + joiner_before.push(None); + } else { + has_emitted_lines = true; + } + } + + for (line_in_cell, line) in rendered.lines.into_iter().enumerate() { + // `line_in_cell` is the *visual* line index within the cell. Consumers use this for + // anchoring (e.g., "keep this row visible when the transcript reflows"). + meta.push(TranscriptLineMeta::CellLine { + cell_index, + line_in_cell, + }); + lines.push(line); + // Maintain the `joiner_before` invariant: exactly one entry per output line. + joiner_before.push( + rendered + .joiner_before + .get(line_in_cell) + .cloned() + .unwrap_or(None), + ); + } + } + + TranscriptLines { + lines, + meta, + joiner_before, + } +} + +/// Build flattened transcript lines as they appear in the transcript viewport. +/// +/// This applies *viewport wrapping* to prose lines, while deliberately avoiding +/// wrapping for preformatted content (currently detected via the code-block +/// line style) so indentation remains meaningful for copy/paste. +pub(crate) fn build_wrapped_transcript_lines( + cells: &[Arc], + width: u16, +) -> TranscriptLines { + use crate::render::line_utils::line_to_static; + use ratatui::style::Color; + + if width == 0 { + return TranscriptLines { + lines: Vec::new(), + meta: Vec::new(), + joiner_before: Vec::new(), + }; + } + + let base_opts: crate::wrapping::RtOptions<'_> = + crate::wrapping::RtOptions::new(width.max(1) as usize); + + let mut lines: Vec> = Vec::new(); + let mut meta: Vec = Vec::new(); + let mut joiner_before: Vec> = Vec::new(); + let mut has_emitted_lines = false; + + for (cell_index, cell) in cells.iter().enumerate() { + // Start from each cell's transcript view (prefixes/indents already applied), then apply + // viewport wrapping to prose while keeping preformatted content intact. + let rendered = cell.transcript_lines_with_joiners(width); + if rendered.lines.is_empty() { + continue; + } + + if !cell.is_stream_continuation() { + if has_emitted_lines { + lines.push(Line::from("")); + meta.push(TranscriptLineMeta::Spacer); + joiner_before.push(None); + } else { + has_emitted_lines = true; + } + } + + // `visual_line_in_cell` counts the output visual lines produced from this cell *after* any + // viewport wrapping. This is distinct from `base_idx` (the index into the cell's input + // lines), since a single input line may wrap into multiple visual lines. + let mut visual_line_in_cell: usize = 0; + let mut first = true; + for (base_idx, base_line) in rendered.lines.iter().enumerate() { + // Preserve code blocks (and other preformatted text) by not applying + // viewport wrapping, so indentation remains meaningful for copy/paste. + if base_line.style.fg == Some(Color::Cyan) { + lines.push(base_line.clone()); + meta.push(TranscriptLineMeta::CellLine { + cell_index, + line_in_cell: visual_line_in_cell, + }); + visual_line_in_cell = visual_line_in_cell.saturating_add(1); + // Preformatted lines are treated as hard breaks; we keep the cell-provided joiner + // (which is typically `None`). + joiner_before.push( + rendered + .joiner_before + .get(base_idx) + .cloned() + .unwrap_or(None), + ); + first = false; + continue; + } + + let opts = if first { + base_opts.clone() + } else { + // For subsequent input lines within a cell, treat the "initial" indent as the + // cell's subsequent indent (matches textarea wrapping expectations). + base_opts + .clone() + .initial_indent(base_opts.subsequent_indent.clone()) + }; + // `word_wrap_line_with_joiners` returns both the wrapped visual lines and, for each + // continuation segment, the exact joiner substring that should be inserted instead of a + // newline when copying as a logical line. + let (wrapped, wrapped_joiners) = + crate::wrapping::word_wrap_line_with_joiners(base_line, opts); + + for (seg_idx, (wrapped_line, seg_joiner)) in + wrapped.into_iter().zip(wrapped_joiners).enumerate() + { + lines.push(line_to_static(&wrapped_line)); + meta.push(TranscriptLineMeta::CellLine { + cell_index, + line_in_cell: visual_line_in_cell, + }); + visual_line_in_cell = visual_line_in_cell.saturating_add(1); + + if seg_idx == 0 { + // The first wrapped segment corresponds to the original input line, so we use + // the cell-provided joiner (hard break vs soft break *between input lines*). + joiner_before.push( + rendered + .joiner_before + .get(base_idx) + .cloned() + .unwrap_or(None), + ); + } else { + // Subsequent wrapped segments are soft-wrap continuations produced by viewport + // wrapping, so we use the wrap-derived joiner. + joiner_before.push(seg_joiner); + } + } + + first = false; + } + } + + TranscriptLines { + lines, + meta, + joiner_before, + } +} + +/// Render flattened transcript lines into ANSI strings suitable for printing after the TUI exits. +/// +/// This helper mirrors the transcript viewport behavior: +/// - Merges line-level style into each span so ANSI output matches on-screen styling. +/// - For user-authored rows, pads the background style out to the full terminal width so prompts +/// appear as solid blocks in scrollback. +/// - Streams spans through the shared vt100 writer so downstream tests and tools see consistent +/// escape sequences. +pub(crate) fn render_lines_to_ansi( + lines: &[Line<'static>], + line_meta: &[TranscriptLineMeta], + is_user_cell: &[bool], + width: u16, +) -> Vec { + use unicode_width::UnicodeWidthStr; + + lines + .iter() + .enumerate() + .map(|(idx, line)| { + // Determine whether this visual line belongs to a user-authored cell. We use this to + // pad the background to the full terminal width so prompts appear as solid blocks in + // scrollback. + let is_user_row = line_meta + .get(idx) + .and_then(TranscriptLineMeta::cell_index) + .map(|cell_index| is_user_cell.get(cell_index).copied().unwrap_or(false)) + .unwrap_or(false); + + // Line-level styles in ratatui apply to the entire line, but spans can also have their + // own styles. ANSI output is span-based, so we "bake" the line style into every span by + // patching span style with the line style. + let mut merged_spans: Vec> = line + .spans + .iter() + .map(|span| ratatui::text::Span { + style: span.style.patch(line.style), + content: span.content.clone(), + }) + .collect(); + + if is_user_row && width > 0 { + // For user rows, pad out to the full width so the background color extends across + // the line in terminal scrollback (mirrors the on-screen viewport behavior). + let text: String = merged_spans + .iter() + .map(|span| span.content.as_ref()) + .collect(); + let text_width = UnicodeWidthStr::width(text.as_str()); + let total_width = usize::from(width); + if text_width < total_width { + let pad_len = total_width.saturating_sub(text_width); + if pad_len > 0 { + let pad_style = crate::style::user_message_style(); + merged_spans.push(ratatui::text::Span { + style: pad_style, + content: " ".repeat(pad_len).into(), + }); + } + } + } + + let mut buf: Vec = Vec::new(); + let _ = crate::insert_history::write_spans(&mut buf, merged_spans.iter()); + String::from_utf8(buf).unwrap_or_default() + }) + .collect() +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::history_cell::TranscriptLinesWithJoiners; + use pretty_assertions::assert_eq; + use std::sync::Arc; + + #[derive(Debug)] + struct FakeCell { + lines: Vec>, + joiner_before: Vec>, + is_stream_continuation: bool, + } + + impl HistoryCell for FakeCell { + fn display_lines(&self, _width: u16) -> Vec> { + self.lines.clone() + } + + fn transcript_lines_with_joiners(&self, _width: u16) -> TranscriptLinesWithJoiners { + TranscriptLinesWithJoiners { + lines: self.lines.clone(), + joiner_before: self.joiner_before.clone(), + } + } + + fn is_stream_continuation(&self) -> bool { + self.is_stream_continuation + } + } + + fn concat_line(line: &Line<'_>) -> String { + line.spans + .iter() + .map(|s| s.content.as_ref()) + .collect::() + } + + #[test] + fn build_wrapped_transcript_lines_threads_joiners_and_spacers() { + let cells: Vec> = vec![ + Arc::new(FakeCell { + lines: vec![Line::from("• hello world")], + joiner_before: vec![None], + is_stream_continuation: false, + }), + Arc::new(FakeCell { + lines: vec![Line::from("• foo bar")], + joiner_before: vec![None], + is_stream_continuation: false, + }), + ]; + + // Force wrapping so we get soft-wrap joiners for the second segment of each cell's line. + let transcript = build_wrapped_transcript_lines(&cells, 8); + + assert_eq!(transcript.lines.len(), transcript.meta.len()); + assert_eq!(transcript.lines.len(), transcript.joiner_before.len()); + + let rendered: Vec = transcript.lines.iter().map(concat_line).collect(); + assert_eq!(rendered, vec!["• hello", "world", "", "• foo", "bar"]); + + assert_eq!( + transcript.meta, + vec![ + TranscriptLineMeta::CellLine { + cell_index: 0, + line_in_cell: 0 + }, + TranscriptLineMeta::CellLine { + cell_index: 0, + line_in_cell: 1 + }, + TranscriptLineMeta::Spacer, + TranscriptLineMeta::CellLine { + cell_index: 1, + line_in_cell: 0 + }, + TranscriptLineMeta::CellLine { + cell_index: 1, + line_in_cell: 1 + }, + ] + ); + + assert_eq!( + transcript.joiner_before, + vec![ + None, + Some(" ".to_string()), + None, + None, + Some(" ".to_string()), + ] + ); + } +} diff --git a/codex-rs/tui2/src/transcript_selection.rs b/codex-rs/tui2/src/transcript_selection.rs new file mode 100644 index 00000000000..f16bbaef775 --- /dev/null +++ b/codex-rs/tui2/src/transcript_selection.rs @@ -0,0 +1,368 @@ +//! Transcript selection primitives. +//! +//! The transcript (history) viewport is rendered as a flattened list of visual +//! lines after wrapping. Selection in the transcript needs to be stable across +//! scrolling and terminal resizes, so endpoints are expressed in +//! *content-relative* coordinates: +//! +//! - `line_index`: index into the flattened, wrapped transcript lines (visual +//! lines). +//! - `column`: a zero-based offset within that visual line, measured from the +//! first content column to the right of the gutter. +//! +//! These coordinates are intentionally independent of the current viewport: the +//! user can scroll after selecting, and the selection should continue to refer +//! to the same conversation content. +//! +//! Clipboard reconstruction is implemented in `transcript_copy` (including +//! off-screen lines), while keybinding detection and the on-screen copy +//! affordance live in `transcript_copy_ui`. +//! +//! ## Mouse selection semantics +//! +//! The transcript supports click-and-drag selection. To avoid leaving a +//! distracting 1-cell highlight on a simple click, the selection only becomes +//! active once a drag updates the head point. + +use crate::tui::scrolling::TranscriptScroll; + +/// Number of columns reserved for the transcript gutter (bullet/prefix space). +/// +/// Transcript rendering prefixes each line with a short gutter (e.g. `• ` or +/// continuation padding). Selection coordinates intentionally exclude this +/// gutter so selection/copy operates on content columns instead of terminal +/// absolute columns. +pub(crate) const TRANSCRIPT_GUTTER_COLS: u16 = 2; + +/// Content-relative selection within the inline transcript viewport. +#[derive(Debug, Clone, Copy, Default, PartialEq, Eq)] +pub(crate) struct TranscriptSelection { + /// The initial selection point (where the selection drag started). + /// + /// This remains fixed while dragging; the highlighted region is the span + /// between `anchor` and `head`. + pub(crate) anchor: Option, + /// The current selection point (where the selection drag currently ends). + /// + /// This is `None` until the user drags, which prevents a simple click from + /// creating a persistent selection highlight. + pub(crate) head: Option, +} + +/// A single endpoint of a transcript selection. +#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)] +pub(crate) struct TranscriptSelectionPoint { + /// Index into the flattened, wrapped transcript lines. + pub(crate) line_index: usize, + /// Zero-based content column (excluding the gutter). + /// + /// This is not a terminal absolute column: callers add the gutter offset + /// when mapping it to a rendered buffer row. + pub(crate) column: u16, +} + +impl TranscriptSelectionPoint { + /// Create a selection endpoint at a given wrapped line index and column. + pub(crate) const fn new(line_index: usize, column: u16) -> Self { + Self { line_index, column } + } +} + +impl From<(usize, u16)> for TranscriptSelectionPoint { + fn from((line_index, column): (usize, u16)) -> Self { + Self::new(line_index, column) + } +} + +/// Return `(start, end)` with `start <= end` in transcript order. +pub(crate) fn ordered_endpoints( + anchor: TranscriptSelectionPoint, + head: TranscriptSelectionPoint, +) -> (TranscriptSelectionPoint, TranscriptSelectionPoint) { + if anchor <= head { + (anchor, head) + } else { + (head, anchor) + } +} + +/// Begin a potential transcript selection (left button down). +/// +/// This records an anchor point and clears any existing head. The selection is +/// not considered "active" until a drag sets a head, which avoids highlighting +/// a 1-cell region on simple click. +/// +/// Returns whether the selection changed (useful to decide whether to request a +/// redraw). +pub(crate) fn on_mouse_down( + selection: &mut TranscriptSelection, + point: Option, +) -> bool { + let before = *selection; + let Some(point) = point else { + return false; + }; + begin(selection, point); + *selection != before +} + +/// The outcome of a mouse drag update. +/// +/// This is returned by [`on_mouse_drag`]. It separates selection state updates +/// from `App`-level actions, so callers can decide when to schedule redraws or +/// lock the transcript scroll position. +/// +/// `lock_scroll` indicates the caller should lock the transcript viewport (if +/// currently following the bottom) so ongoing streaming output does not move +/// the selection under the cursor. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) struct MouseDragOutcome { + /// Whether the selection changed (useful to decide whether to request a + /// redraw). + pub(crate) changed: bool, + /// Whether the caller should lock the transcript scroll position. + pub(crate) lock_scroll: bool, +} + +/// Update the selection state for a left-button drag. +/// +/// This sets the selection head (creating an active selection) and returns: +/// +/// - `changed`: whether the selection state changed (useful to decide whether to +/// request a redraw). +/// - `lock_scroll`: whether the caller should lock transcript scrolling to +/// freeze the viewport under the selection while streaming output arrives. +/// +/// `point` is expected to already be clamped to the transcript's content area +/// (e.g. not in the gutter). If `point` is `None`, this is a no-op. +pub(crate) fn on_mouse_drag( + selection: &mut TranscriptSelection, + scroll: &TranscriptScroll, + point: Option, + streaming: bool, +) -> MouseDragOutcome { + let before = *selection; + let Some(point) = point else { + return MouseDragOutcome { + changed: false, + lock_scroll: false, + }; + }; + let lock_scroll = drag(selection, scroll, point, streaming); + MouseDragOutcome { + changed: *selection != before, + lock_scroll, + } +} + +/// Finalize the selection state when the left button is released. +/// +/// If the selection never became active (no head) or the head ended up equal to +/// the anchor, the selection is cleared so a click does not leave a persistent +/// highlight. +/// +/// Returns whether the selection changed (useful to decide whether to request a +/// redraw). +pub(crate) fn on_mouse_up(selection: &mut TranscriptSelection) -> bool { + let before = *selection; + end(selection); + *selection != before +} + +/// Begin a potential selection by recording an anchor and clearing any head. +/// +/// This ensures a plain click does not create an active selection/highlight. +/// The selection becomes active on the first drag that sets `head`. +fn begin(selection: &mut TranscriptSelection, point: TranscriptSelectionPoint) { + *selection = TranscriptSelection { + anchor: Some(point), + head: None, + }; +} + +/// Update selection state during a drag by setting `head` when anchored. +/// +/// Returns whether the caller should lock the transcript scroll position while +/// streaming and following the bottom, so new output doesn't move the selection +/// under the cursor. +fn drag( + selection: &mut TranscriptSelection, + scroll: &TranscriptScroll, + point: TranscriptSelectionPoint, + streaming: bool, +) -> bool { + let Some(anchor) = selection.anchor else { + return false; + }; + + let should_lock_scroll = + streaming && matches!(*scroll, TranscriptScroll::ToBottom) && point != anchor; + + selection.head = Some(point); + + should_lock_scroll +} + +/// Finalize selection on mouse up. +/// +/// Clears the selection if it never became active (no head) or if the head +/// ended up equal to the anchor, so a click doesn't leave a 1-cell highlight. +fn end(selection: &mut TranscriptSelection) { + if selection.head.is_none() || selection.anchor == selection.head { + *selection = TranscriptSelection::default(); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + + #[test] + fn selection_only_highlights_on_drag() { + let anchor = TranscriptSelectionPoint::new(0, 1); + let head = TranscriptSelectionPoint::new(0, 3); + + let mut selection = TranscriptSelection::default(); + assert!(on_mouse_down(&mut selection, Some(anchor))); + assert_eq!( + selection, + TranscriptSelection { + anchor: Some(anchor), + head: None, + } + ); + + assert!(on_mouse_up(&mut selection)); + assert_eq!(selection, TranscriptSelection::default()); + + assert!(on_mouse_down(&mut selection, Some(anchor))); + let outcome = on_mouse_drag( + &mut selection, + &TranscriptScroll::ToBottom, + Some(head), + false, + ); + assert!(outcome.changed); + assert!(!outcome.lock_scroll); + assert_eq!( + selection, + TranscriptSelection { + anchor: Some(anchor), + head: Some(head), + } + ); + } + + #[test] + fn selection_clears_when_drag_ends_at_anchor() { + let point = TranscriptSelectionPoint::new(0, 1); + + let mut selection = TranscriptSelection::default(); + assert!(on_mouse_down(&mut selection, Some(point))); + let outcome = on_mouse_drag( + &mut selection, + &TranscriptScroll::ToBottom, + Some(point), + false, + ); + assert!(outcome.changed); + assert!(!outcome.lock_scroll); + assert!(on_mouse_up(&mut selection)); + + assert_eq!(selection, TranscriptSelection::default()); + } + + #[test] + fn drag_requests_scroll_lock_when_streaming_at_bottom_and_point_moves() { + let anchor = TranscriptSelectionPoint::new(0, 1); + let head = TranscriptSelectionPoint::new(0, 2); + + let mut selection = TranscriptSelection::default(); + assert!(on_mouse_down(&mut selection, Some(anchor))); + let outcome = on_mouse_drag( + &mut selection, + &TranscriptScroll::ToBottom, + Some(head), + true, + ); + assert!(outcome.changed); + assert!(outcome.lock_scroll); + } + + #[test] + fn selection_helpers_noop_without_points_or_anchor() { + let mut selection = TranscriptSelection::default(); + assert!(!on_mouse_down(&mut selection, None)); + assert_eq!(selection, TranscriptSelection::default()); + + let outcome = on_mouse_drag(&mut selection, &TranscriptScroll::ToBottom, None, false); + assert_eq!( + outcome, + MouseDragOutcome { + changed: false, + lock_scroll: false, + } + ); + assert_eq!(selection, TranscriptSelection::default()); + + let outcome = on_mouse_drag( + &mut selection, + &TranscriptScroll::ToBottom, + Some(TranscriptSelectionPoint::new(0, 1)), + false, + ); + assert_eq!( + outcome, + MouseDragOutcome { + changed: false, + lock_scroll: false, + } + ); + assert_eq!(selection, TranscriptSelection::default()); + + assert!(!on_mouse_up(&mut selection)); + assert_eq!(selection, TranscriptSelection::default()); + } + + #[test] + fn mouse_down_resets_head() { + let anchor = TranscriptSelectionPoint::new(0, 1); + let head = TranscriptSelectionPoint::new(0, 2); + let next_anchor = TranscriptSelectionPoint::new(1, 0); + + let mut selection = TranscriptSelection { + anchor: Some(anchor), + head: Some(head), + }; + + assert!(on_mouse_down(&mut selection, Some(next_anchor))); + assert_eq!( + selection, + TranscriptSelection { + anchor: Some(next_anchor), + head: None, + } + ); + } + + #[test] + fn dragging_does_not_request_scroll_lock_when_not_at_bottom() { + let anchor = TranscriptSelectionPoint::new(0, 1); + let head = TranscriptSelectionPoint::new(0, 2); + + let mut selection = TranscriptSelection::default(); + assert!(on_mouse_down(&mut selection, Some(anchor))); + let outcome = on_mouse_drag( + &mut selection, + &TranscriptScroll::Scrolled { + cell_index: 0, + line_in_cell: 0, + }, + Some(head), + true, + ); + assert!(outcome.changed); + assert!(!outcome.lock_scroll); + } +} diff --git a/codex-rs/tui2/src/tui.rs b/codex-rs/tui2/src/tui.rs index 712c5cf553d..3cb1fb01ca3 100644 --- a/codex-rs/tui2/src/tui.rs +++ b/codex-rs/tui2/src/tui.rs @@ -46,6 +46,8 @@ use crate::tui::job_control::SUSPEND_KEY; #[cfg(unix)] use crate::tui::job_control::SuspendContext; +mod alt_screen_nesting; +mod frame_rate_limiter; mod frame_requester; #[cfg(unix)] mod job_control; @@ -131,6 +133,7 @@ pub struct Tui { draw_tx: broadcast::Sender<()>, pub(crate) terminal: Terminal, pending_history_lines: Vec>, + alt_screen_nesting: alt_screen_nesting::AltScreenNesting, alt_saved_viewport: Option, #[cfg(unix)] suspend_context: SuspendContext, @@ -159,6 +162,7 @@ impl Tui { draw_tx, terminal, pending_history_lines: vec![], + alt_screen_nesting: alt_screen_nesting::AltScreenNesting::default(), alt_saved_viewport: None, #[cfg(unix)] suspend_context: SuspendContext::new(), @@ -305,6 +309,10 @@ impl Tui { /// Enter alternate screen and expand the viewport to full terminal size, saving the current /// inline viewport for restoration when leaving. pub fn enter_alt_screen(&mut self) -> Result<()> { + if !self.alt_screen_nesting.enter() { + self.alt_screen_active.store(true, Ordering::Relaxed); + return Ok(()); + } let _ = execute!(self.terminal.backend_mut(), EnterAlternateScreen); if let Ok(size) = self.terminal.size() { self.alt_saved_viewport = Some(self.terminal.viewport_area); @@ -322,6 +330,11 @@ impl Tui { /// Leave alternate screen and restore the previously saved inline viewport, if any. pub fn leave_alt_screen(&mut self) -> Result<()> { + if !self.alt_screen_nesting.leave() { + self.alt_screen_active + .store(self.alt_screen_nesting.is_active(), Ordering::Relaxed); + return Ok(()); + } let _ = execute!(self.terminal.backend_mut(), LeaveAlternateScreen); if let Some(saved) = self.alt_saved_viewport.take() { self.terminal.set_viewport_area(saved); @@ -368,8 +381,17 @@ impl Tui { let area = Rect::new(0, 0, size.width, height.min(size.height)); if area != terminal.viewport_area { // TODO(nornagon): probably this could be collapsed with the clear + set_viewport_area above. - terminal.clear()?; - terminal.set_viewport_area(area); + if terminal.viewport_area.is_empty() { + // On the first draw the viewport is empty, so `Terminal::clear()` is a no-op. + // If we don't clear after sizing the viewport, diff-based rendering may skip + // writing spaces (because "space" == "space" in the buffers) and stale terminal + // contents can leak through as random characters between words. + terminal.set_viewport_area(area); + terminal.clear()?; + } else { + terminal.clear()?; + terminal.set_viewport_area(area); + } } // Update the y position for suspending so Ctrl-Z can place the cursor correctly. diff --git a/codex-rs/tui2/src/tui/alt_screen_nesting.rs b/codex-rs/tui2/src/tui/alt_screen_nesting.rs new file mode 100644 index 00000000000..d384bd27048 --- /dev/null +++ b/codex-rs/tui2/src/tui/alt_screen_nesting.rs @@ -0,0 +1,81 @@ +//! Alternate-screen nesting guard. +//! +//! The main `codex-tui2` UI typically runs inside the terminal’s alternate screen buffer so the +//! full viewport can be used without polluting normal scrollback. Some sub-flows (e.g. pager-style +//! overlays) also call `enter_alt_screen()`/`leave_alt_screen()` for historical reasons. +//! +//! Those calls are conceptually “idempotent” (the UI is already on the alt screen), but the +//! underlying terminal commands are *not*: issuing a real `LeaveAlternateScreen` while the rest of +//! the app still thinks it is drawing on the alternate buffer desynchronizes rendering and can +//! leave stale characters behind when returning to the normal view. +//! +//! `AltScreenNesting` tracks a small nesting depth so only the outermost enter/leave actually +//! toggles the terminal mode. + +#[derive(Debug, Default, Clone, Copy, PartialEq, Eq)] +pub(crate) struct AltScreenNesting { + depth: u16, +} + +impl AltScreenNesting { + pub(crate) fn is_active(self) -> bool { + self.depth > 0 + } + + /// Record an enter-alt-screen request. + /// + /// Returns `true` when the caller should actually enter the alternate screen. + pub(crate) fn enter(&mut self) -> bool { + if self.depth == 0 { + self.depth = 1; + true + } else { + self.depth = self.depth.saturating_add(1); + false + } + } + + /// Record a leave-alt-screen request. + /// + /// Returns `true` when the caller should actually leave the alternate screen. + pub(crate) fn leave(&mut self) -> bool { + match self.depth { + 0 => false, + 1 => { + self.depth = 0; + true + } + _ => { + self.depth = self.depth.saturating_sub(1); + false + } + } + } +} + +#[cfg(test)] +mod tests { + use super::AltScreenNesting; + use pretty_assertions::assert_eq; + + #[test] + fn alt_screen_nesting_tracks_outermost_transitions() { + let mut nesting = AltScreenNesting::default(); + assert_eq!(false, nesting.is_active()); + + assert_eq!(true, nesting.enter()); + assert_eq!(true, nesting.is_active()); + + assert_eq!(false, nesting.enter()); + assert_eq!(true, nesting.is_active()); + + assert_eq!(false, nesting.leave()); + assert_eq!(true, nesting.is_active()); + + assert_eq!(true, nesting.leave()); + assert_eq!(false, nesting.is_active()); + + assert_eq!(false, nesting.leave()); + assert_eq!(false, nesting.is_active()); + } +} diff --git a/codex-rs/tui2/src/tui/frame_rate_limiter.rs b/codex-rs/tui2/src/tui/frame_rate_limiter.rs new file mode 100644 index 00000000000..56dd752e643 --- /dev/null +++ b/codex-rs/tui2/src/tui/frame_rate_limiter.rs @@ -0,0 +1,62 @@ +//! Limits how frequently frame draw notifications may be emitted. +//! +//! Widgets sometimes call `FrameRequester::schedule_frame()` more frequently than a user can +//! perceive. This limiter clamps draw notifications to a maximum of 60 FPS to avoid wasted work. +//! +//! This is intentionally a small, pure helper so it can be unit-tested in isolation and used by +//! the async frame scheduler without adding complexity to the app/event loop. + +use std::time::Duration; +use std::time::Instant; + +/// A 60 FPS minimum frame interval (≈16.67ms). +pub(super) const MIN_FRAME_INTERVAL: Duration = Duration::from_nanos(16_666_667); + +/// Remembers the most recent emitted draw, allowing deadlines to be clamped forward. +#[derive(Debug, Default)] +pub(super) struct FrameRateLimiter { + last_emitted_at: Option, +} + +impl FrameRateLimiter { + /// Returns `requested`, clamped forward if it would exceed the maximum frame rate. + pub(super) fn clamp_deadline(&self, requested: Instant) -> Instant { + let Some(last_emitted_at) = self.last_emitted_at else { + return requested; + }; + let min_allowed = last_emitted_at + .checked_add(MIN_FRAME_INTERVAL) + .unwrap_or(last_emitted_at); + requested.max(min_allowed) + } + + /// Records that a draw notification was emitted at `emitted_at`. + pub(super) fn mark_emitted(&mut self, emitted_at: Instant) { + self.last_emitted_at = Some(emitted_at); + } +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + + #[test] + fn default_does_not_clamp() { + let t0 = Instant::now(); + let limiter = FrameRateLimiter::default(); + assert_eq!(limiter.clamp_deadline(t0), t0); + } + + #[test] + fn clamps_to_min_interval_since_last_emit() { + let t0 = Instant::now(); + let mut limiter = FrameRateLimiter::default(); + + assert_eq!(limiter.clamp_deadline(t0), t0); + limiter.mark_emitted(t0); + + let too_soon = t0 + Duration::from_millis(1); + assert_eq!(limiter.clamp_deadline(too_soon), t0 + MIN_FRAME_INTERVAL); + } +} diff --git a/codex-rs/tui2/src/tui/frame_requester.rs b/codex-rs/tui2/src/tui/frame_requester.rs index 4f7886aa22c..88db9d71f3c 100644 --- a/codex-rs/tui2/src/tui/frame_requester.rs +++ b/codex-rs/tui2/src/tui/frame_requester.rs @@ -18,6 +18,8 @@ use std::time::Instant; use tokio::sync::broadcast; use tokio::sync::mpsc; +use super::frame_rate_limiter::FrameRateLimiter; + /// A requester for scheduling future frame draws on the TUI event loop. /// /// This is the handler side of an actor/handler pair with `FrameScheduler`, which coalesces @@ -68,15 +70,23 @@ impl FrameRequester { /// A scheduler for coalescing frame draw requests and notifying the TUI event loop. /// /// This type is internal to `FrameRequester` and is spawned as a task to handle scheduling logic. +/// +/// To avoid wasted redraw work, draw notifications are clamped to a maximum of 60 FPS (see +/// [`FrameRateLimiter`]). struct FrameScheduler { receiver: mpsc::UnboundedReceiver, draw_tx: broadcast::Sender<()>, + rate_limiter: FrameRateLimiter, } impl FrameScheduler { /// Create a new FrameScheduler with the provided receiver and draw notification sender. fn new(receiver: mpsc::UnboundedReceiver, draw_tx: broadcast::Sender<()>) -> Self { - Self { receiver, draw_tx } + Self { + receiver, + draw_tx, + rate_limiter: FrameRateLimiter::default(), + } } /// Run the scheduling loop, coalescing frame requests and notifying the TUI event loop. @@ -97,6 +107,7 @@ impl FrameScheduler { // All senders dropped; exit the scheduler. break }; + let draw_at = self.rate_limiter.clamp_deadline(draw_at); next_deadline = Some(next_deadline.map_or(draw_at, |cur| cur.min(draw_at))); // Do not send a draw immediately here. By continuing the loop, @@ -107,6 +118,7 @@ impl FrameScheduler { _ = &mut deadline => { if next_deadline.is_some() { next_deadline = None; + self.rate_limiter.mark_emitted(target); let _ = self.draw_tx.send(()); } } @@ -116,6 +128,7 @@ impl FrameScheduler { } #[cfg(test)] mod tests { + use super::super::frame_rate_limiter::MIN_FRAME_INTERVAL; use super::*; use tokio::time; use tokio_util::time::FutureExt; @@ -218,6 +231,98 @@ mod tests { assert!(second.is_err(), "unexpected extra draw received"); } + #[tokio::test(flavor = "current_thread", start_paused = true)] + async fn test_limits_draw_notifications_to_60fps() { + let (draw_tx, mut draw_rx) = broadcast::channel(16); + let requester = FrameRequester::new(draw_tx); + + requester.schedule_frame(); + time::advance(Duration::from_millis(1)).await; + let first = draw_rx + .recv() + .timeout(Duration::from_millis(50)) + .await + .expect("timed out waiting for first draw"); + assert!(first.is_ok(), "broadcast closed unexpectedly"); + + requester.schedule_frame(); + time::advance(Duration::from_millis(1)).await; + let early = draw_rx.recv().timeout(Duration::from_millis(1)).await; + assert!( + early.is_err(), + "draw fired too early; expected max 60fps (min interval {MIN_FRAME_INTERVAL:?})" + ); + + time::advance(MIN_FRAME_INTERVAL).await; + let second = draw_rx + .recv() + .timeout(Duration::from_millis(50)) + .await + .expect("timed out waiting for second draw"); + assert!(second.is_ok(), "broadcast closed unexpectedly"); + } + + #[tokio::test(flavor = "current_thread", start_paused = true)] + async fn test_rate_limit_clamps_early_delayed_requests() { + let (draw_tx, mut draw_rx) = broadcast::channel(16); + let requester = FrameRequester::new(draw_tx); + + requester.schedule_frame(); + time::advance(Duration::from_millis(1)).await; + let first = draw_rx + .recv() + .timeout(Duration::from_millis(50)) + .await + .expect("timed out waiting for first draw"); + assert!(first.is_ok(), "broadcast closed unexpectedly"); + + requester.schedule_frame_in(Duration::from_millis(1)); + + time::advance(Duration::from_millis(10)).await; + let too_early = draw_rx.recv().timeout(Duration::from_millis(1)).await; + assert!( + too_early.is_err(), + "draw fired too early; expected max 60fps (min interval {MIN_FRAME_INTERVAL:?})" + ); + + time::advance(MIN_FRAME_INTERVAL).await; + let second = draw_rx + .recv() + .timeout(Duration::from_millis(50)) + .await + .expect("timed out waiting for clamped draw"); + assert!(second.is_ok(), "broadcast closed unexpectedly"); + } + + #[tokio::test(flavor = "current_thread", start_paused = true)] + async fn test_rate_limit_does_not_delay_future_draws() { + let (draw_tx, mut draw_rx) = broadcast::channel(16); + let requester = FrameRequester::new(draw_tx); + + requester.schedule_frame(); + time::advance(Duration::from_millis(1)).await; + let first = draw_rx + .recv() + .timeout(Duration::from_millis(50)) + .await + .expect("timed out waiting for first draw"); + assert!(first.is_ok(), "broadcast closed unexpectedly"); + + requester.schedule_frame_in(Duration::from_millis(50)); + + time::advance(Duration::from_millis(49)).await; + let early = draw_rx.recv().timeout(Duration::from_millis(1)).await; + assert!(early.is_err(), "draw fired too early"); + + time::advance(Duration::from_millis(1)).await; + let second = draw_rx + .recv() + .timeout(Duration::from_millis(50)) + .await + .expect("timed out waiting for delayed draw"); + assert!(second.is_ok(), "broadcast closed unexpectedly"); + } + #[tokio::test(flavor = "current_thread", start_paused = true)] async fn test_multiple_delayed_requests_coalesce_to_earliest() { let (draw_tx, mut draw_rx) = broadcast::channel(16); diff --git a/codex-rs/tui2/src/tui/scrolling.rs b/codex-rs/tui2/src/tui/scrolling.rs index 95346793dc7..c3ca6e94de9 100644 --- a/codex-rs/tui2/src/tui/scrolling.rs +++ b/codex-rs/tui2/src/tui/scrolling.rs @@ -19,6 +19,13 @@ //! Spacer rows between non-continuation cells are represented as `TranscriptLineMeta::Spacer`. //! They are not valid anchors; `anchor_for` will pick the nearest non-spacer line when needed. +pub(crate) mod mouse; +pub(crate) use mouse::MouseScrollState; +pub(crate) use mouse::ScrollConfig; +pub(crate) use mouse::ScrollConfigOverrides; +pub(crate) use mouse::ScrollDirection; +pub(crate) use mouse::ScrollUpdate; + /// Per-flattened-line metadata for the transcript view. /// /// Each rendered line in the flattened transcript has a corresponding `TranscriptLineMeta` entry diff --git a/codex-rs/tui2/src/tui/scrolling/mouse.rs b/codex-rs/tui2/src/tui/scrolling/mouse.rs new file mode 100644 index 00000000000..975baf41ad1 --- /dev/null +++ b/codex-rs/tui2/src/tui/scrolling/mouse.rs @@ -0,0 +1,1105 @@ +//! Scroll normalization for mouse wheel/trackpad input. +//! +//! Terminal scroll events vary widely in event counts per wheel tick, and inter-event timing +//! overlaps heavily between wheel and trackpad input. We normalize scroll input by treating +//! events as short streams separated by gaps, converting events into line deltas with a +//! per-terminal events-per-tick factor, and coalescing redraw to a fixed cadence. +//! +//! A mouse wheel "tick" (one notch) is expected to scroll by a fixed number of lines (default: 3) +//! regardless of the terminal's raw event density. Trackpad scrolling should remain higher +//! fidelity (small movements can result in sub-line accumulation that only scrolls once whole +//! lines are reached). +//! +//! Because terminal mouse scroll events do not encode magnitude (only direction), wheel-vs-trackpad +//! detection is heuristic. We bias toward treating input as trackpad-like (to avoid overshoot) and +//! "promote" to wheel-like when the first tick-worth of events arrives quickly. A user can always +//! force wheel/trackpad behavior via config if the heuristic is wrong for their setup. +//! +//! See `codex-rs/tui2/docs/scroll_input_model.md` for the data-derived constants and analysis. + +use codex_core::config::types::ScrollInputMode; +use codex_core::terminal::TerminalInfo; +use codex_core::terminal::TerminalName; +use std::time::Duration; +use std::time::Instant; + +const STREAM_GAP_MS: u64 = 80; +const STREAM_GAP: Duration = Duration::from_millis(STREAM_GAP_MS); +const REDRAW_CADENCE_MS: u64 = 16; +const REDRAW_CADENCE: Duration = Duration::from_millis(REDRAW_CADENCE_MS); +const DEFAULT_EVENTS_PER_TICK: u16 = 3; +const DEFAULT_WHEEL_LINES_PER_TICK: u16 = 3; +const DEFAULT_TRACKPAD_LINES_PER_TICK: u16 = 1; +const DEFAULT_SCROLL_MODE: ScrollInputMode = ScrollInputMode::Auto; +const DEFAULT_WHEEL_TICK_DETECT_MAX_MS: u64 = 12; +const DEFAULT_WHEEL_LIKE_MAX_DURATION_MS: u64 = 200; +const DEFAULT_TRACKPAD_ACCEL_EVENTS: u16 = 30; +const DEFAULT_TRACKPAD_ACCEL_MAX: u16 = 3; +const MAX_EVENTS_PER_STREAM: usize = 256; +const MAX_ACCUMULATED_LINES: i32 = 256; +const MIN_LINES_PER_WHEEL_STREAM: i32 = 1; + +fn default_wheel_tick_detect_max_ms_for_terminal(name: TerminalName) -> u64 { + // This threshold is only used for the "promote to wheel-like" fast path in auto mode. + // We keep it per-terminal because some terminals emit wheel ticks spread over tens of + // milliseconds; a tight global threshold causes those wheel ticks to be misclassified as + // trackpad-like and feel too slow. + match name { + TerminalName::WarpTerminal => 20, + _ => DEFAULT_WHEEL_TICK_DETECT_MAX_MS, + } +} + +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +enum ScrollStreamKind { + Unknown, + Wheel, + Trackpad, +} + +/// High-level scroll direction used to sign line deltas. +#[derive(Clone, Copy, Debug, PartialEq, Eq)] +pub(crate) enum ScrollDirection { + Up, + Down, +} + +impl ScrollDirection { + fn sign(self) -> i32 { + match self { + ScrollDirection::Up => -1, + ScrollDirection::Down => 1, + } + } + + fn inverted(self) -> Self { + match self { + ScrollDirection::Up => ScrollDirection::Down, + ScrollDirection::Down => ScrollDirection::Up, + } + } +} + +/// Scroll normalization settings derived from terminal metadata and user overrides. +/// +/// These are the knobs used by [`MouseScrollState`] to translate raw `ScrollUp`/`ScrollDown` +/// events into deltas in *visual lines* for the transcript viewport. +/// +/// - `events_per_line` normalizes per-terminal "event density" (how many raw events correspond to +/// one unit of scroll movement). +/// - `wheel_lines_per_tick` scales short, discrete streams so a single mouse wheel notch retains +/// the classic multi-line feel. +/// +/// See `codex-rs/tui2/docs/scroll_input_model.md` for the probe data and rationale. +/// User-facing overrides are exposed via `config.toml` as: +/// - `tui.scroll_events_per_tick` +/// - `tui.scroll_wheel_lines` +/// - `tui.scroll_invert` +#[derive(Clone, Copy, Debug)] +pub(crate) struct ScrollConfig { + /// Per-terminal normalization factor ("events per wheel tick"). + /// + /// Terminals can emit anywhere from ~1 to ~9+ raw events for the same physical wheel notch. + /// We use this factor to convert raw event counts into a "ticks" estimate. + /// + /// Each raw scroll event contributes `1 / events_per_tick` ticks. That tick value is then + /// scaled to lines depending on the active scroll mode (wheel vs trackpad). + /// + /// User-facing name: `tui.scroll_events_per_tick`. + events_per_tick: u16, + + /// Lines applied per mouse wheel tick. + /// + /// When the input is interpreted as wheel-like, one physical wheel notch maps to this many + /// transcript lines. Default is 3 to match typical "classic terminal" scrolling. + wheel_lines_per_tick: u16, + + /// Lines applied per tick-equivalent for trackpad scrolling. + /// + /// Trackpads do not have discrete "ticks", but terminals still emit discrete up/down events. + /// We interpret trackpad-like streams as `trackpad_lines_per_tick / events_per_tick` lines per + /// event and accumulate fractions until they cross a whole line. + trackpad_lines_per_tick: u16, + + /// Trackpad acceleration: the approximate number of events required to gain +1x speed. + /// + /// This is a pragmatic UX knob: in some terminals the vertical event density for trackpad + /// input can be relatively low, which makes large/faster swipes feel sluggish even when small + /// swipes feel correct. + trackpad_accel_events: u16, + + /// Trackpad acceleration: maximum multiplier applied to trackpad-like streams. + /// + /// Set to 1 to effectively disable acceleration. + trackpad_accel_max: u16, + + /// Force wheel/trackpad behavior, or infer it per stream. + mode: ScrollInputMode, + + /// Auto-mode threshold: how quickly the first wheel tick must complete to be considered wheel. + /// + /// This uses the time between the first event of a stream and the moment we have seen + /// `events_per_tick` events. If the first tick completes faster than this, we promote the + /// stream to wheel-like. If not, we keep treating it as trackpad-like. + wheel_tick_detect_max: Duration, + + /// Auto-mode fallback: maximum duration that is still considered "wheel-like". + /// + /// If a stream ends before this duration and we couldn't confidently classify it, we treat it + /// as wheel-like so wheel notches in 1-event-per-tick terminals (WezTerm/iTerm/VS Code) still + /// get classic multi-line behavior. + wheel_like_max_duration: Duration, + + /// Invert the sign of vertical scroll direction. + /// + /// We do not attempt to infer terminal-level inversion settings; this is an explicit + /// application-level toggle. + invert_direction: bool, +} + +/// Optional user overrides for scroll configuration. +/// +/// Most callers should construct this from the merged [`codex_core::config::Config`] fields so +/// TUI2 inherits terminal defaults and only overrides what the user configured. +#[derive(Clone, Copy, Debug, Default)] +pub(crate) struct ScrollConfigOverrides { + pub(crate) events_per_tick: Option, + pub(crate) wheel_lines_per_tick: Option, + pub(crate) trackpad_lines_per_tick: Option, + pub(crate) trackpad_accel_events: Option, + pub(crate) trackpad_accel_max: Option, + pub(crate) mode: Option, + pub(crate) wheel_tick_detect_max_ms: Option, + pub(crate) wheel_like_max_duration_ms: Option, + pub(crate) invert_direction: bool, +} + +impl ScrollConfig { + /// Derive scroll normalization defaults from detected terminal metadata. + /// + /// This uses [`TerminalInfo`] (in particular [`TerminalName`]) to pick an empirically derived + /// `events_per_line` default. Users can override both `events_per_line` and the per-wheel-tick + /// multiplier via `config.toml` (see [`ScrollConfig`] docs). + pub(crate) fn from_terminal(terminal: &TerminalInfo, overrides: ScrollConfigOverrides) -> Self { + let mut events_per_tick = match terminal.name { + TerminalName::AppleTerminal => 3, + TerminalName::WarpTerminal => 9, + TerminalName::WezTerm => 1, + TerminalName::Alacritty => 3, + TerminalName::Ghostty => 3, + TerminalName::Iterm2 => 1, + TerminalName::VsCode => 1, + TerminalName::Kitty => 3, + _ => DEFAULT_EVENTS_PER_TICK, + }; + + if let Some(override_value) = overrides.events_per_tick { + events_per_tick = override_value.max(1); + } + + let mut wheel_lines_per_tick = DEFAULT_WHEEL_LINES_PER_TICK; + if let Some(override_value) = overrides.wheel_lines_per_tick { + wheel_lines_per_tick = override_value.max(1); + } + + let mut trackpad_lines_per_tick = DEFAULT_TRACKPAD_LINES_PER_TICK; + if let Some(override_value) = overrides.trackpad_lines_per_tick { + trackpad_lines_per_tick = override_value.max(1); + } + + let mut trackpad_accel_events = DEFAULT_TRACKPAD_ACCEL_EVENTS; + if let Some(override_value) = overrides.trackpad_accel_events { + trackpad_accel_events = override_value.max(1); + } + + let mut trackpad_accel_max = DEFAULT_TRACKPAD_ACCEL_MAX; + if let Some(override_value) = overrides.trackpad_accel_max { + trackpad_accel_max = override_value.max(1); + } + + let wheel_tick_detect_max_ms = overrides + .wheel_tick_detect_max_ms + .unwrap_or_else(|| default_wheel_tick_detect_max_ms_for_terminal(terminal.name)); + let wheel_tick_detect_max = Duration::from_millis(wheel_tick_detect_max_ms); + let wheel_like_max_duration = Duration::from_millis( + overrides + .wheel_like_max_duration_ms + .unwrap_or(DEFAULT_WHEEL_LIKE_MAX_DURATION_MS), + ); + + Self { + events_per_tick, + wheel_lines_per_tick, + trackpad_lines_per_tick, + trackpad_accel_events, + trackpad_accel_max, + mode: overrides.mode.unwrap_or(DEFAULT_SCROLL_MODE), + wheel_tick_detect_max, + wheel_like_max_duration, + invert_direction: overrides.invert_direction, + } + } + + fn events_per_tick_f32(self) -> f32 { + self.events_per_tick.max(1) as f32 + } + + fn wheel_lines_per_tick_f32(self) -> f32 { + self.wheel_lines_per_tick.max(1) as f32 + } + + fn trackpad_lines_per_tick_f32(self) -> f32 { + self.trackpad_lines_per_tick.max(1) as f32 + } + + fn trackpad_events_per_tick_f32(self) -> f32 { + // `events_per_tick` is derived from wheel behavior and can be much larger than the actual + // trackpad event density for the same physical movement. If we use it directly for + // trackpads, terminals like Ghostty/Warp can feel artificially slow. + // + // We cap at the global "typical" wheel tick size (3) which produces more consistent + // trackpad feel across terminals while keeping wheel normalization intact. + self.events_per_tick.clamp(1, DEFAULT_EVENTS_PER_TICK) as f32 + } + + fn trackpad_accel_events_f32(self) -> f32 { + self.trackpad_accel_events.max(1) as f32 + } + + fn trackpad_accel_max_f32(self) -> f32 { + self.trackpad_accel_max.max(1) as f32 + } + + fn apply_direction(self, direction: ScrollDirection) -> ScrollDirection { + if self.invert_direction { + direction.inverted() + } else { + direction + } + } +} + +impl Default for ScrollConfig { + fn default() -> Self { + Self { + events_per_tick: DEFAULT_EVENTS_PER_TICK, + wheel_lines_per_tick: DEFAULT_WHEEL_LINES_PER_TICK, + trackpad_lines_per_tick: DEFAULT_TRACKPAD_LINES_PER_TICK, + trackpad_accel_events: DEFAULT_TRACKPAD_ACCEL_EVENTS, + trackpad_accel_max: DEFAULT_TRACKPAD_ACCEL_MAX, + mode: DEFAULT_SCROLL_MODE, + wheel_tick_detect_max: Duration::from_millis(DEFAULT_WHEEL_TICK_DETECT_MAX_MS), + wheel_like_max_duration: Duration::from_millis(DEFAULT_WHEEL_LIKE_MAX_DURATION_MS), + invert_direction: false, + } + } +} + +/// Output from scroll handling: lines to apply plus when to check for stream end. +/// +/// The caller should apply `lines` immediately. If `next_tick_in` is `Some`, schedule a follow-up +/// tick (typically by requesting a frame) so [`MouseScrollState::on_tick`] can close the stream +/// after a period of silence. +#[derive(Clone, Copy, Debug, Default, PartialEq, Eq)] +pub(crate) struct ScrollUpdate { + pub(crate) lines: i32, + pub(crate) next_tick_in: Option, +} + +/// Tracks mouse scroll input streams and coalesces redraws. +/// +/// This is the state machine that turns discrete terminal scroll events (`ScrollUp`/`ScrollDown`) +/// into viewport line deltas. It implements the stream-based model described in +/// `codex-rs/tui2/docs/scroll_input_model.md`: +/// +/// - **Streams**: a sequence of events is treated as one user gesture until a gap larger than +/// [`STREAM_GAP`] or a direction flip closes the stream. +/// - **Normalization**: streams are converted to line deltas using [`ScrollConfig`] (per-terminal +/// `events_per_tick`, per-mode lines-per-tick, and optional invert). +/// - **Coalescing**: trackpad-like streams are flushed at most every [`REDRAW_CADENCE`] to avoid +/// floods in very dense terminals; wheel-like streams flush immediately to feel responsive. +/// - **Follow-up ticks**: because stream closure is defined by a *time gap*, callers must schedule +/// periodic ticks while a stream is active. The returned [`ScrollUpdate::next_tick_in`] provides +/// the next suggested wake-up. +/// +/// Typical usage: +/// - Call [`MouseScrollState::on_scroll_event`] for each vertical scroll event. +/// - Apply the returned [`ScrollUpdate::lines`] to the transcript scroll state. +/// - If [`ScrollUpdate::next_tick_in`] is present, schedule a delayed tick and call +/// [`MouseScrollState::on_tick`] to close the stream after it goes idle. +#[derive(Clone, Debug)] +pub(crate) struct MouseScrollState { + stream: Option, + last_redraw_at: Instant, + carry_lines: f32, + carry_direction: Option, +} + +impl MouseScrollState { + /// Create a new scroll state with a deterministic time origin. + /// + /// This is primarily used by unit tests so they can control the coalescing and stream-gap + /// behavior by choosing `now` values. Production code generally uses [`Default`] and the + /// `Instant::now()`-based entrypoints. + fn new_at(now: Instant) -> Self { + Self { + stream: None, + last_redraw_at: now, + carry_lines: 0.0, + carry_direction: None, + } + } + + /// Handle a scroll event using the current time. + /// + /// This is the normal production entrypoint used by the TUI event loop. It forwards to + /// [`MouseScrollState::on_scroll_event_at`] using `Instant::now()`. + /// + /// If the returned [`ScrollUpdate::next_tick_in`] is `Some`, callers should schedule a future + /// tick (typically by requesting a frame) and call [`MouseScrollState::on_tick`] (or + /// [`MouseScrollState::on_tick_at`] in tests) so we can close the stream after it goes idle. + /// Without those ticks, streams would only close when a *new* scroll event arrives, which can + /// leave fractional trackpad scroll unflushed and make stop behavior feel laggy. + pub(crate) fn on_scroll_event( + &mut self, + direction: ScrollDirection, + config: ScrollConfig, + ) -> ScrollUpdate { + self.on_scroll_event_at(Instant::now(), direction, config) + } + + /// Handle a scroll event at a specific time. + /// + /// This is the deterministic entrypoint for the scroll stream state machine. It exists so we + /// can write unit tests that exercise stream splitting, coalesced redraw, and end-of-stream + /// flushing without depending on wall-clock time. + /// + /// Behavior is identical to [`MouseScrollState::on_scroll_event`], except the caller provides + /// the timestamp (`now`). In the real app, the timestamp comes from `Instant::now()`. + /// + /// Key details (see `codex-rs/tui2/docs/scroll_input_model.md` for the full model): + /// + /// - **Stream boundaries**: a gap larger than [`STREAM_GAP`] or a direction flip closes the + /// previous stream and starts a new one. + /// - **Wheel vs trackpad**: the stream kind may be promoted to wheel-like in auto mode when a + /// tick-worth of events arrives quickly; otherwise it remains trackpad-like. + /// - **Redraw coalescing**: wheel-like streams flush immediately; trackpad-like streams flush + /// at most every [`REDRAW_CADENCE`]. + /// - **Follow-up ticks**: the returned [`ScrollUpdate::next_tick_in`] tells the caller when it + /// should call [`MouseScrollState::on_tick_at`] to close idle streams and flush any remaining + /// whole lines. In TUI2 this is wired through the app’s frame scheduler. + pub(crate) fn on_scroll_event_at( + &mut self, + now: Instant, + direction: ScrollDirection, + config: ScrollConfig, + ) -> ScrollUpdate { + let direction = config.apply_direction(direction); + let mut lines = 0; + + if let Some(mut stream) = self.stream.take() { + let gap = now.duration_since(stream.last); + if gap > STREAM_GAP || stream.direction != direction { + lines += self.finalize_stream_at(now, &mut stream); + } else { + self.stream = Some(stream); + } + } + + if self.stream.is_none() { + if self.carry_direction != Some(direction) { + self.carry_lines = 0.0; + self.carry_direction = Some(direction); + } + self.stream = Some(ScrollStream::new(now, direction, config)); + } + let carry_lines = self.carry_lines; + let Some(stream) = self.stream.as_mut() else { + unreachable!("stream inserted above"); + }; + stream.push_event(now, direction); + stream.maybe_promote_kind(now); + + // Wheel-like scrolling should feel immediate; trackpad-like streams are coalesced to a + // fixed redraw cadence to avoid floods in very dense terminals. + if stream.is_wheel_like() + || now.duration_since(self.last_redraw_at) >= REDRAW_CADENCE + || stream.just_promoted + { + lines += Self::flush_lines_at(&mut self.last_redraw_at, carry_lines, now, stream); + stream.just_promoted = false; + } + + ScrollUpdate { + lines, + next_tick_in: self.next_tick_in(now), + } + } + + /// Check whether an active stream has ended based on the current time. + pub(crate) fn on_tick(&mut self) -> ScrollUpdate { + self.on_tick_at(Instant::now()) + } + + /// Check whether an active stream has ended at a specific time (for tests). + /// + /// This should be called even when no new scroll events are arriving, while a stream is still + /// considered active. It has two roles: + /// + /// - **Stream closure**: if the stream has been idle for longer than [`STREAM_GAP`], we close + /// it and flush any remaining whole-line scroll. + /// - **Coalesced flush**: for trackpad-like streams, we also flush on [`REDRAW_CADENCE`] even + /// without new events. This avoids a perceived "late jump" when the stream finally closes + /// (users interpret that as overshoot). + pub(crate) fn on_tick_at(&mut self, now: Instant) -> ScrollUpdate { + let mut lines = 0; + if let Some(mut stream) = self.stream.take() { + let gap = now.duration_since(stream.last); + if gap > STREAM_GAP { + lines = self.finalize_stream_at(now, &mut stream); + } else { + // No new events, but we may still have accumulated enough fractional scroll to + // apply additional whole lines. Flushing on a fixed cadence prevents a "late jump" + // when the stream finally closes (which users perceive as overshoot). + if now.duration_since(self.last_redraw_at) >= REDRAW_CADENCE { + lines = Self::flush_lines_at( + &mut self.last_redraw_at, + self.carry_lines, + now, + &mut stream, + ); + } + self.stream = Some(stream); + } + } + + ScrollUpdate { + lines, + next_tick_in: self.next_tick_in(now), + } + } + + /// Finalize a stream and update the trackpad carry state. + /// + /// Callers invoke this when a stream is known to have ended (gap/direction flip). It forces + /// a final wheel/trackpad classification for auto mode, flushes any whole-line deltas, and + /// persists any remaining fractional scroll for trackpad-like streams so the next stream + /// continues smoothly. + fn finalize_stream_at(&mut self, now: Instant, stream: &mut ScrollStream) -> i32 { + stream.finalize_kind(); + let lines = Self::flush_lines_at(&mut self.last_redraw_at, self.carry_lines, now, stream); + + // Preserve sub-line fractional scroll for trackpad-like streams across stream boundaries. + if stream.kind != ScrollStreamKind::Wheel && stream.config.mode != ScrollInputMode::Wheel { + self.carry_lines = + stream.desired_lines_f32(self.carry_lines) - stream.applied_lines as f32; + } else { + self.carry_lines = 0.0; + } + + lines + } + + /// Compute and apply any newly-reached whole-line deltas for the active stream. + /// + /// This converts the stream’s accumulated events to a *desired total line position*, + /// truncates to whole lines, and returns the delta relative to what has already been applied + /// for this stream. + /// + /// For wheel-like streams we also apply a minimum of ±1 line for any non-zero input so wheel + /// notches never become "dead" due to rounding or mis-detection. + fn flush_lines_at( + last_redraw_at: &mut Instant, + carry_lines: f32, + now: Instant, + stream: &mut ScrollStream, + ) -> i32 { + let desired_total = stream.desired_lines_f32(carry_lines); + let mut desired_lines = desired_total.trunc() as i32; + + // For wheel-mode (or wheel-like streams), ensure at least one line for any non-zero input. + // This avoids "dead" wheel ticks when `events_per_tick` is mis-detected or overridden. + if stream.is_wheel_like() && desired_lines == 0 && stream.accumulated_events != 0 { + desired_lines = stream.accumulated_events.signum() * MIN_LINES_PER_WHEEL_STREAM; + } + + let mut delta = desired_lines - stream.applied_lines; + if delta == 0 { + return 0; + } + + delta = delta.clamp(-MAX_ACCUMULATED_LINES, MAX_ACCUMULATED_LINES); + stream.applied_lines = stream.applied_lines.saturating_add(delta); + *last_redraw_at = now; + delta + } + + /// Determine when the caller should next call [`MouseScrollState::on_tick_at`]. + /// + /// While a stream is active, we need follow-up ticks for two reasons: + /// + /// - **Stream closure**: once idle for [`STREAM_GAP`], we finalize the stream. + /// - **Trackpad coalescing**: if whole lines are pending but we haven't hit + /// [`REDRAW_CADENCE`] yet, we schedule an earlier tick so the viewport updates promptly. + /// + /// Returning `None` means no stream is active (or it is already past the gap threshold). + fn next_tick_in(&self, now: Instant) -> Option { + let stream = self.stream.as_ref()?; + let gap = now.duration_since(stream.last); + if gap > STREAM_GAP { + return None; + } + + let mut next = STREAM_GAP.saturating_sub(gap); + + // If we've accumulated at least one whole line but haven't flushed yet (because the last + // event arrived before the redraw cadence elapsed), schedule an earlier tick so we can + // flush promptly. + let desired_lines = stream.desired_lines_f32(self.carry_lines).trunc() as i32; + if desired_lines != stream.applied_lines { + let since_redraw = now.duration_since(self.last_redraw_at); + let until_redraw = if since_redraw >= REDRAW_CADENCE { + Duration::from_millis(0) + } else { + REDRAW_CADENCE.saturating_sub(since_redraw) + }; + next = next.min(until_redraw); + } + + Some(next) + } +} + +impl Default for MouseScrollState { + fn default() -> Self { + Self::new_at(Instant::now()) + } +} + +#[derive(Clone, Debug)] +/// Per-stream state accumulated while the user performs one scroll gesture. +/// +/// A "stream" corresponds to one contiguous gesture as defined by [`STREAM_GAP`] (silence) and +/// direction changes. The stream accumulates raw event counts and converts them into a desired +/// total line position via [`ScrollConfig`]. The outer [`MouseScrollState`] then applies only the +/// delta between `desired_total` and `applied_lines` so callers can treat scroll updates as +/// incremental line deltas. +/// +/// This type is intentionally not exposed outside this module. The public API is the pair of +/// entrypoints: +/// +/// - [`MouseScrollState::on_scroll_event_at`] for new events. +/// - [`MouseScrollState::on_tick_at`] for idle-gap closure and coalesced flush. +/// +/// See `codex-rs/tui2/docs/scroll_input_model.md` for the full rationale and probe-derived +/// constants. +struct ScrollStream { + start: Instant, + last: Instant, + direction: ScrollDirection, + event_count: usize, + accumulated_events: i32, + applied_lines: i32, + config: ScrollConfig, + kind: ScrollStreamKind, + first_tick_completed_at: Option, + just_promoted: bool, +} + +impl ScrollStream { + /// Start a new stream at `now`. + /// + /// The initial `kind` is [`ScrollStreamKind::Unknown`]. In auto mode, streams begin behaving + /// like trackpads (to avoid overshoot) until [`ScrollStream::maybe_promote_kind`] promotes the + /// stream to wheel-like. + fn new(now: Instant, direction: ScrollDirection, config: ScrollConfig) -> Self { + Self { + start: now, + last: now, + direction, + event_count: 0, + accumulated_events: 0, + applied_lines: 0, + config, + kind: ScrollStreamKind::Unknown, + first_tick_completed_at: None, + just_promoted: false, + } + } + + /// Record one raw event in the stream. + /// + /// This updates the stream's last-seen timestamp, direction, and counters. Counters are + /// clamped to avoid floods and numeric blowups when terminals emit extremely dense streams. + fn push_event(&mut self, now: Instant, direction: ScrollDirection) { + self.last = now; + self.direction = direction; + self.event_count = self + .event_count + .saturating_add(1) + .min(MAX_EVENTS_PER_STREAM); + self.accumulated_events = (self.accumulated_events + direction.sign()).clamp( + -(MAX_EVENTS_PER_STREAM as i32), + MAX_EVENTS_PER_STREAM as i32, + ); + } + + /// Promote an auto-mode stream to wheel-like if the first tick completes quickly. + /// + /// Terminals often batch a wheel notch into a short burst of `events_per_tick` raw events. + /// When we observe at least that many events and they arrived within + /// [`ScrollConfig::wheel_tick_detect_max`], we treat the stream as wheel-like so a notch + /// scrolls a fixed multi-line amount (classic feel). + /// + /// We only attempt this when `events_per_tick >= 2`. In 1-event-per-tick terminals there is + /// no "tick completion time" signal; auto-mode handles those via + /// [`ScrollStream::finalize_kind`]'s end-of-stream fallback. + fn maybe_promote_kind(&mut self, now: Instant) { + if self.config.mode != ScrollInputMode::Auto { + return; + } + if self.kind != ScrollStreamKind::Unknown { + return; + } + + let events_per_tick = self.config.events_per_tick.max(1) as usize; + if events_per_tick >= 2 && self.event_count >= events_per_tick { + self.first_tick_completed_at.get_or_insert(now); + let elapsed = now.duration_since(self.start); + if elapsed <= self.config.wheel_tick_detect_max { + self.kind = ScrollStreamKind::Wheel; + self.just_promoted = true; + } + } + } + + /// Finalize wheel/trackpad classification for the stream. + /// + /// In forced modes (`wheel`/`trackpad`), this simply sets the stream kind. + /// + /// In auto mode, streams that were not promoted to wheel-like remain trackpad-like, except + /// for a small end-of-stream fallback for 1-event-per-tick terminals. That fallback treats a + /// very small, short-lived stream as wheel-like so wheels in WezTerm/iTerm/VS Code still get + /// the expected multi-line notch behavior. + fn finalize_kind(&mut self) { + match self.config.mode { + ScrollInputMode::Wheel => self.kind = ScrollStreamKind::Wheel, + ScrollInputMode::Trackpad => self.kind = ScrollStreamKind::Trackpad, + ScrollInputMode::Auto => { + if self.kind != ScrollStreamKind::Unknown { + return; + } + // If we didn't see a fast-completing first tick, we keep treating the stream as + // trackpad-like. The only exception is terminals that emit 1 event per wheel tick: + // we can't observe a "tick completion time" there, so we use a conservative + // end-of-stream fallback for *very small* bursts. + let duration = self.last.duration_since(self.start); + if self.config.events_per_tick <= 1 + && self.event_count <= 2 + && duration <= self.config.wheel_like_max_duration + { + self.kind = ScrollStreamKind::Wheel; + } else { + self.kind = ScrollStreamKind::Trackpad; + } + } + } + } + + /// Whether this stream should currently behave like a wheel. + /// + /// In auto mode, streams are wheel-like only after we promote them (or after the 1-event + /// fallback triggers on finalization). While `kind` is still unknown, we treat the stream as + /// trackpad-like to avoid overshooting. + fn is_wheel_like(&self) -> bool { + match self.config.mode { + ScrollInputMode::Wheel => true, + ScrollInputMode::Trackpad => false, + ScrollInputMode::Auto => matches!(self.kind, ScrollStreamKind::Wheel), + } + } + + /// The per-mode lines-per-tick scaling factor. + /// + /// In auto mode, unknown streams use the trackpad factor until promoted. + fn effective_lines_per_tick_f32(&self) -> f32 { + match self.config.mode { + ScrollInputMode::Wheel => self.config.wheel_lines_per_tick_f32(), + ScrollInputMode::Trackpad => self.config.trackpad_lines_per_tick_f32(), + ScrollInputMode::Auto => match self.kind { + ScrollStreamKind::Wheel => self.config.wheel_lines_per_tick_f32(), + ScrollStreamKind::Trackpad | ScrollStreamKind::Unknown => { + self.config.trackpad_lines_per_tick_f32() + } + }, + } + } + + /// Compute the desired total line position for this stream (including trackpad carry). + /// + /// This converts raw event counts into line units using the appropriate divisor and scaling: + /// + /// - Wheel-like: `lines = events * (wheel_lines_per_tick / events_per_tick)` + /// - Trackpad-like: `lines = events * (trackpad_lines_per_tick / min(events_per_tick, 3))` + /// + /// For trackpad-like streams we also add `carry_lines` (fractional remainder from previous + /// streams) and then apply bounded acceleration. The returned value is clamped as a guardrail. + fn desired_lines_f32(&self, carry_lines: f32) -> f32 { + let events_per_tick = if self.is_wheel_like() { + self.config.events_per_tick_f32() + } else { + self.config.trackpad_events_per_tick_f32() + }; + let lines_per_tick = self.effective_lines_per_tick_f32(); + + // Note: clamping here is a guardrail; the primary protection is limiting event_count. + let mut total = (self.accumulated_events as f32 * (lines_per_tick / events_per_tick)) + .clamp( + -(MAX_ACCUMULATED_LINES as f32), + MAX_ACCUMULATED_LINES as f32, + ); + if !self.is_wheel_like() { + total = (total + carry_lines).clamp( + -(MAX_ACCUMULATED_LINES as f32), + MAX_ACCUMULATED_LINES as f32, + ); + + // Trackpad acceleration: keep small swipes precise, but speed up large/fast swipes so + // they can cover more content. This is intentionally simple and bounded. + let event_count = self.accumulated_events.abs() as f32; + let accel = (1.0 + (event_count / self.config.trackpad_accel_events_f32())) + .clamp(1.0, self.config.trackpad_accel_max_f32()); + total = (total * accel).clamp( + -(MAX_ACCUMULATED_LINES as f32), + MAX_ACCUMULATED_LINES as f32, + ); + } + total + } +} + +#[cfg(test)] +mod tests { + use super::*; + use pretty_assertions::assert_eq; + + fn terminal_info_named(name: TerminalName) -> TerminalInfo { + TerminalInfo { + name, + term_program: None, + version: None, + term: None, + multiplexer: None, + } + } + + #[test] + fn terminal_overrides_match_current_defaults() { + let wezterm = ScrollConfig::from_terminal( + &terminal_info_named(TerminalName::WezTerm), + ScrollConfigOverrides::default(), + ); + let warp = ScrollConfig::from_terminal( + &terminal_info_named(TerminalName::WarpTerminal), + ScrollConfigOverrides::default(), + ); + let ghostty = ScrollConfig::from_terminal( + &terminal_info_named(TerminalName::Ghostty), + ScrollConfigOverrides::default(), + ); + let unknown = ScrollConfig::from_terminal( + &terminal_info_named(TerminalName::Unknown), + ScrollConfigOverrides::default(), + ); + + assert_eq!(wezterm.events_per_tick, 1); + assert_eq!(wezterm.wheel_lines_per_tick, DEFAULT_WHEEL_LINES_PER_TICK); + assert_eq!(warp.events_per_tick, 9); + assert_eq!(ghostty.events_per_tick, 3); + assert_eq!(unknown.events_per_tick, DEFAULT_EVENTS_PER_TICK); + } + + #[test] + fn wheel_tick_scrolls_three_lines_even_when_terminal_emits_three_events() { + let config = ScrollConfig::from_terminal( + &terminal_info_named(TerminalName::AppleTerminal), + ScrollConfigOverrides { + events_per_tick: Some(3), + mode: Some(ScrollInputMode::Auto), + ..ScrollConfigOverrides::default() + }, + ); + let base = Instant::now(); + let mut state = MouseScrollState::new_at(base); + + // Simulate a single wheel notch in terminals that emit 3 raw events per tick. + let _ = state.on_scroll_event_at( + base + Duration::from_millis(1), + ScrollDirection::Down, + config, + ); + let _ = state.on_scroll_event_at( + base + Duration::from_millis(2), + ScrollDirection::Down, + config, + ); + let update = state.on_scroll_event_at( + base + Duration::from_millis(3), + ScrollDirection::Down, + config, + ); + + assert_eq!( + update, + ScrollUpdate { + lines: 3, + next_tick_in: Some(Duration::from_millis(STREAM_GAP_MS)), + } + ); + } + + #[test] + fn wheel_tick_scrolls_three_lines_when_terminal_emits_nine_events() { + let config = ScrollConfig::from_terminal( + &terminal_info_named(TerminalName::WarpTerminal), + ScrollConfigOverrides { + events_per_tick: Some(9), + mode: Some(ScrollInputMode::Auto), + ..ScrollConfigOverrides::default() + }, + ); + let base = Instant::now(); + let mut state = MouseScrollState::new_at(base); + + let mut update = ScrollUpdate::default(); + for idx in 0..9u64 { + update = state.on_scroll_event_at( + base + Duration::from_millis(idx + 1), + ScrollDirection::Down, + config, + ); + } + assert_eq!(update.lines, 3); + } + + #[test] + fn wheel_lines_override_scales_wheel_ticks() { + let config = ScrollConfig::from_terminal( + &terminal_info_named(TerminalName::AppleTerminal), + ScrollConfigOverrides { + events_per_tick: Some(3), + wheel_lines_per_tick: Some(2), + mode: Some(ScrollInputMode::Wheel), + ..ScrollConfigOverrides::default() + }, + ); + let base = Instant::now(); + let mut state = MouseScrollState::new_at(base); + + let first = state.on_scroll_event_at( + base + Duration::from_millis(1), + ScrollDirection::Down, + config, + ); + let second = state.on_scroll_event_at( + base + Duration::from_millis(2), + ScrollDirection::Down, + config, + ); + let third = state.on_scroll_event_at( + base + Duration::from_millis(3), + ScrollDirection::Down, + config, + ); + + assert_eq!(first.lines + second.lines + third.lines, 2); + } + + #[test] + fn ghostty_trackpad_is_not_penalized_by_wheel_event_density() { + let config = ScrollConfig::from_terminal( + &terminal_info_named(TerminalName::Ghostty), + ScrollConfigOverrides { + events_per_tick: Some(9), + mode: Some(ScrollInputMode::Trackpad), + ..ScrollConfigOverrides::default() + }, + ); + let base = Instant::now(); + let mut state = MouseScrollState::new_at(base); + + let _ = state.on_scroll_event_at( + base + Duration::from_millis(1), + ScrollDirection::Down, + config, + ); + let _ = state.on_scroll_event_at( + base + Duration::from_millis(2), + ScrollDirection::Down, + config, + ); + let update = state.on_scroll_event_at( + base + Duration::from_millis(REDRAW_CADENCE_MS + 1), + ScrollDirection::Down, + config, + ); + + // Trackpad mode uses a capped events-per-tick for normalization, so 3 events should + // produce at least one line even when the wheel tick size is 9. + assert_eq!(update.lines, 1); + } + + #[test] + fn trackpad_acceleration_speeds_up_large_swipes_without_affecting_small_swipes_too_much() { + let config = ScrollConfig::from_terminal( + &terminal_info_named(TerminalName::Ghostty), + ScrollConfigOverrides { + events_per_tick: Some(9), + trackpad_accel_events: Some(30), + trackpad_accel_max: Some(3), + mode: Some(ScrollInputMode::Trackpad), + ..ScrollConfigOverrides::default() + }, + ); + let base = Instant::now(); + let mut state = MouseScrollState::new_at(base); + + let mut total_lines = 0; + for idx in 0..60u64 { + let update = state.on_scroll_event_at( + base + Duration::from_millis((idx + 1) * (REDRAW_CADENCE_MS + 1)), + ScrollDirection::Down, + config, + ); + total_lines += update.lines; + } + total_lines += state + .on_tick_at(base + Duration::from_millis(60 * (REDRAW_CADENCE_MS + 1)) + STREAM_GAP) + .lines; + + // Without acceleration, 60 events at 1/3 line each would be ~20 lines. With acceleration, + // we should be meaningfully faster. + assert!(total_lines >= 30, "total_lines={total_lines}"); + } + + #[test] + fn direction_flip_closes_previous_stream() { + let config = ScrollConfig::from_terminal( + &terminal_info_named(TerminalName::AppleTerminal), + ScrollConfigOverrides { + events_per_tick: Some(3), + mode: Some(ScrollInputMode::Auto), + ..ScrollConfigOverrides::default() + }, + ); + let base = Instant::now(); + let mut state = MouseScrollState::new_at(base); + + let _ = + state.on_scroll_event_at(base + Duration::from_millis(1), ScrollDirection::Up, config); + let _ = + state.on_scroll_event_at(base + Duration::from_millis(2), ScrollDirection::Up, config); + let up = + state.on_scroll_event_at(base + Duration::from_millis(3), ScrollDirection::Up, config); + let down = state.on_scroll_event_at( + base + Duration::from_millis(4), + ScrollDirection::Down, + config, + ); + + assert_eq!( + up, + ScrollUpdate { + lines: -3, + next_tick_in: Some(Duration::from_millis(STREAM_GAP_MS)), + } + ); + assert_eq!( + down, + ScrollUpdate { + lines: 0, + next_tick_in: Some(Duration::from_millis(STREAM_GAP_MS)), + } + ); + } + + #[test] + fn continuous_stream_coalesces_redraws() { + let config = ScrollConfig::from_terminal( + &terminal_info_named(TerminalName::AppleTerminal), + ScrollConfigOverrides { + events_per_tick: Some(1), + mode: Some(ScrollInputMode::Trackpad), + ..ScrollConfigOverrides::default() + }, + ); + let base = Instant::now(); + let mut state = MouseScrollState::new_at(base); + + let first = state.on_scroll_event_at( + base + Duration::from_millis(1), + ScrollDirection::Down, + config, + ); + let second = state.on_scroll_event_at( + base + Duration::from_millis(10), + ScrollDirection::Down, + config, + ); + let third = state.on_scroll_event_at( + base + Duration::from_millis(20), + ScrollDirection::Down, + config, + ); + + assert_eq!( + first, + ScrollUpdate { + lines: 0, + next_tick_in: Some(Duration::from_millis(REDRAW_CADENCE_MS - 1)), + } + ); + assert_eq!( + second, + ScrollUpdate { + lines: 0, + next_tick_in: Some(Duration::from_millis(REDRAW_CADENCE_MS - 10)), + } + ); + assert_eq!( + third, + ScrollUpdate { + lines: 3, + next_tick_in: Some(Duration::from_millis(STREAM_GAP_MS)), + } + ); + } + + #[test] + fn invert_direction_flips_sign() { + let config = ScrollConfig::from_terminal( + &terminal_info_named(TerminalName::AppleTerminal), + ScrollConfigOverrides { + events_per_tick: Some(1), + invert_direction: true, + ..ScrollConfigOverrides::default() + }, + ); + let base = Instant::now(); + let mut state = MouseScrollState::new_at(base); + + let update = state.on_scroll_event_at( + base + Duration::from_millis(REDRAW_CADENCE_MS + 1), + ScrollDirection::Up, + config, + ); + + assert_eq!( + update, + ScrollUpdate { + lines: 1, + next_tick_in: Some(Duration::from_millis(STREAM_GAP_MS)), + } + ); + } +} diff --git a/codex-rs/tui2/src/wrapping.rs b/codex-rs/tui2/src/wrapping.rs index c29106651d4..00c2812bb77 100644 --- a/codex-rs/tui2/src/wrapping.rs +++ b/codex-rs/tui2/src/wrapping.rs @@ -155,6 +155,13 @@ pub(crate) fn word_wrap_line<'a, O>(line: &'a Line<'a>, width_or_options: O) -> where O: Into>, { + let (lines, _joiners) = word_wrap_line_with_joiners(line, width_or_options); + lines +} + +fn flatten_line_and_bounds<'a>( + line: &'a Line<'a>, +) -> (String, Vec<(Range, ratatui::style::Style)>) { // Flatten the line and record span byte ranges. let mut flat = String::new(); let mut span_bounds = Vec::new(); @@ -166,6 +173,43 @@ where acc += text.len(); span_bounds.push((start..acc, s.style)); } + (flat, span_bounds) +} + +fn build_wrapped_line_from_range<'a>( + indent: Line<'a>, + original: &'a Line<'a>, + span_bounds: &[(Range, ratatui::style::Style)], + range: &Range, +) -> Line<'a> { + let mut out = indent.style(original.style); + let sliced = slice_line_spans(original, span_bounds, range); + let mut spans = out.spans; + spans.append( + &mut sliced + .spans + .into_iter() + .map(|s| s.patch_style(original.style)) + .collect(), + ); + out.spans = spans; + out +} + +/// Wrap a single line and also return, for each output line, the string that should be inserted +/// when joining it to the previous output line as a *soft wrap*. +/// +/// - The first output line always has `None`. +/// - Continuation lines have `Some(joiner)` where `joiner` is the exact substring (often spaces, +/// possibly empty) that was skipped at the wrap boundary. +pub(crate) fn word_wrap_line_with_joiners<'a, O>( + line: &'a Line<'a>, + width_or_options: O, +) -> (Vec>, Vec>) +where + O: Into>, +{ + let (flat, span_bounds) = flatten_line_and_bounds(line); let rt_opts: RtOptions<'a> = width_or_options.into(); let opts = Options::new(rt_opts.width) @@ -176,7 +220,9 @@ where .word_splitter(rt_opts.word_splitter); let mut out: Vec> = Vec::new(); + let mut joiners: Vec> = Vec::new(); + // The first output line uses the initial indent and a reduced available width. // Compute first line range with reduced width due to initial indent. let initial_width_available = opts .width @@ -184,54 +230,100 @@ where .max(1); let initial_wrapped = wrap_ranges_trim(&flat, opts.clone().width(initial_width_available)); let Some(first_line_range) = initial_wrapped.first() else { - return vec![rt_opts.initial_indent.clone()]; + out.push(rt_opts.initial_indent.clone()); + joiners.push(None); + return (out, joiners); }; - // Build first wrapped line with initial indent. - let mut first_line = rt_opts.initial_indent.clone().style(line.style); - { - let sliced = slice_line_spans(line, &span_bounds, first_line_range); - let mut spans = first_line.spans; - spans.append( - &mut sliced - .spans - .into_iter() - .map(|s| s.patch_style(line.style)) - .collect(), - ); - first_line.spans = spans; - out.push(first_line); - } - - // Wrap the remainder using subsequent indent width and map back to original indices. - let base = first_line_range.end; + let first_line = build_wrapped_line_from_range( + rt_opts.initial_indent.clone(), + line, + &span_bounds, + first_line_range, + ); + out.push(first_line); + joiners.push(None); + + // Wrap the remainder using subsequent indent width. We also compute the joiner strings that + // were skipped at each wrap boundary so callers can treat these as soft wraps during copy. + let mut base = first_line_range.end; let skip_leading_spaces = flat[base..].chars().take_while(|c| *c == ' ').count(); - let base = base + skip_leading_spaces; + let joiner_first = flat[base..base.saturating_add(skip_leading_spaces)].to_string(); + base = base.saturating_add(skip_leading_spaces); + let subsequent_width_available = opts .width .saturating_sub(rt_opts.subsequent_indent.width()) .max(1); - let remaining_wrapped = wrap_ranges_trim(&flat[base..], opts.width(subsequent_width_available)); - for r in &remaining_wrapped { + let remaining = &flat[base..]; + let remaining_wrapped = wrap_ranges_trim(remaining, opts.width(subsequent_width_available)); + + let mut prev_end = 0usize; + for (i, r) in remaining_wrapped.iter().enumerate() { if r.is_empty() { continue; } - let mut subsequent_line = rt_opts.subsequent_indent.clone().style(line.style); + + // Each continuation line has `Some(joiner)`. The joiner may be empty (e.g. splitting a + // long word), but the distinction from `None` is important: `None` represents a hard break. + let joiner = if i == 0 { + joiner_first.clone() + } else { + remaining[prev_end..r.start].to_string() + }; + prev_end = r.end; + let offset_range = (r.start + base)..(r.end + base); - let sliced = slice_line_spans(line, &span_bounds, &offset_range); - let mut spans = subsequent_line.spans; - spans.append( - &mut sliced - .spans - .into_iter() - .map(|s| s.patch_style(line.style)) - .collect(), + let subsequent_line = build_wrapped_line_from_range( + rt_opts.subsequent_indent.clone(), + line, + &span_bounds, + &offset_range, ); - subsequent_line.spans = spans; out.push(subsequent_line); + joiners.push(Some(joiner)); } - out + (out, joiners) +} + +/// Like `word_wrap_lines`, but also returns a parallel vector of soft-wrap joiners. +/// +/// The joiner is `None` when the line break is a hard break (between input lines), and `Some` +/// when the line break is a soft wrap continuation produced by the wrapping algorithm. +#[allow(private_bounds)] // IntoLineInput isn't public, but it doesn't really need to be. +pub(crate) fn word_wrap_lines_with_joiners<'a, I, O, L>( + lines: I, + width_or_options: O, +) -> (Vec>, Vec>) +where + I: IntoIterator, + L: IntoLineInput<'a>, + O: Into>, +{ + let base_opts: RtOptions<'a> = width_or_options.into(); + let mut out: Vec> = Vec::new(); + let mut joiners: Vec> = Vec::new(); + + for (idx, line) in lines.into_iter().enumerate() { + let line_input = line.into_line_input(); + let opts = if idx == 0 { + base_opts.clone() + } else { + let mut o = base_opts.clone(); + let sub = o.subsequent_indent.clone(); + o = o.initial_indent(sub); + o + }; + + let (wrapped, wrapped_joiners) = word_wrap_line_with_joiners(line_input.as_ref(), opts); + for (l, j) in wrapped.into_iter().zip(wrapped_joiners) { + out.push(crate::render::line_utils::line_to_static(&l)); + joiners.push(j); + } + } + + (out, joiners) } /// Utilities to allow wrapping either borrowed or owned lines. @@ -552,6 +644,66 @@ mod tests { assert_eq!(concat_line(&out[1]), "cd"); } + #[test] + fn wrap_line_with_joiners_matches_word_wrap_line_output() { + let opts = RtOptions::new(8) + .initial_indent(Line::from("- ")) + .subsequent_indent(Line::from(" ")); + let line = Line::from(vec!["hello ".red(), "world".into()]); + + let out = word_wrap_line(&line, opts.clone()); + let (with_joiners, joiners) = word_wrap_line_with_joiners(&line, opts); + + assert_eq!( + with_joiners.iter().map(concat_line).collect_vec(), + out.iter().map(concat_line).collect_vec() + ); + assert_eq!(joiners.len(), with_joiners.len()); + assert_eq!( + joiners.first().cloned().unwrap_or(Some("x".to_string())), + None + ); + } + + #[test] + fn wrap_line_with_joiners_includes_skipped_spaces() { + let line = Line::from("hello world"); + let (wrapped, joiners) = word_wrap_line_with_joiners(&line, 8); + + assert_eq!( + wrapped.iter().map(concat_line).collect_vec(), + vec!["hello", "world"] + ); + assert_eq!(joiners, vec![None, Some(" ".to_string())]); + } + + #[test] + fn wrap_line_with_joiners_uses_empty_joiner_for_mid_word_split() { + let line = Line::from("abcd"); + let (wrapped, joiners) = word_wrap_line_with_joiners(&line, 2); + + assert_eq!( + wrapped.iter().map(concat_line).collect_vec(), + vec!["ab", "cd"] + ); + assert_eq!(joiners, vec![None, Some("".to_string())]); + } + + #[test] + fn wrap_lines_with_joiners_marks_hard_breaks_between_input_lines() { + let (wrapped, joiners) = + word_wrap_lines_with_joiners([Line::from("hello world"), Line::from("foo bar")], 5); + + assert_eq!( + wrapped.iter().map(concat_line).collect_vec(), + vec!["hello", "world", "foo", "bar"] + ); + assert_eq!( + joiners, + vec![None, Some(" ".to_string()), None, Some(" ".to_string())] + ); + } + #[test] fn wrap_lines_applies_initial_indent_only_once() { let opts = RtOptions::new(8) diff --git a/codex-rs/tui2/tooltips.txt b/codex-rs/tui2/tooltips.txt index 94ad487bbb0..f2fd1f55c72 100644 --- a/codex-rs/tui2/tooltips.txt +++ b/codex-rs/tui2/tooltips.txt @@ -1,6 +1,5 @@ Use /compact when the conversation gets long to summarize history and free up context. Start a fresh idea with /new; the previous session stays in history. -If a turn went sideways, /undo asks Codexel to revert the last changes. Use /feedback to send logs to the maintainers when something looks off. Switch models or reasoning effort quickly with /model. You can run any shell command from Codexel using `!` (e.g. `!ls`) diff --git a/codex-rs/utils/absolute-path/src/lib.rs b/codex-rs/utils/absolute-path/src/lib.rs index 77d16e2d5ed..2eb7d487580 100644 --- a/codex-rs/utils/absolute-path/src/lib.rs +++ b/codex-rs/utils/absolute-path/src/lib.rs @@ -14,7 +14,7 @@ use ts_rs::TS; /// guaranteed to be canonicalized or exist on the filesystem). /// /// IMPORTANT: When deserializing an `AbsolutePathBuf`, a base path must be set -/// using `AbsolutePathBufGuard::new(base_path)`. If no base path is set, the +/// using [AbsolutePathBufGuard::new]. If no base path is set, the /// deserialization will fail unless the path being deserialized is already /// absolute. #[derive(Debug, Clone, PartialEq, Eq, Serialize, JsonSchema, TS)] @@ -43,6 +43,13 @@ impl AbsolutePathBuf { Self::resolve_path_against_base(path, &self.0) } + pub fn parent(&self) -> Option { + self.0.parent().map(|p| { + #[expect(clippy::expect_used)] + Self::from_absolute_path(p).expect("parent of AbsolutePathBuf must be absolute") + }) + } + pub fn as_path(&self) -> &Path { &self.0 } @@ -112,6 +119,10 @@ thread_local! { static ABSOLUTE_PATH_BASE: RefCell> = const { RefCell::new(None) }; } +/// Ensure this guard is held while deserializing `AbsolutePathBuf` values to +/// provide a base path for resolving relative paths. Because this relies on +/// thread-local storage, the deserialization must be single-threaded and +/// occur on the same thread that created the guard. pub struct AbsolutePathBufGuard; impl AbsolutePathBufGuard { diff --git a/codex-rs/utils/cargo-bin/Cargo.toml b/codex-rs/utils/cargo-bin/Cargo.toml new file mode 100644 index 00000000000..d8e6877e6e8 --- /dev/null +++ b/codex-rs/utils/cargo-bin/Cargo.toml @@ -0,0 +1,12 @@ +[package] +name = "codex-utils-cargo-bin" +version.workspace = true +edition.workspace = true +license.workspace = true + +[lints] +workspace = true + +[dependencies] +assert_cmd = { workspace = true } +thiserror = { workspace = true } diff --git a/codex-rs/utils/cargo-bin/src/lib.rs b/codex-rs/utils/cargo-bin/src/lib.rs new file mode 100644 index 00000000000..16f54939150 --- /dev/null +++ b/codex-rs/utils/cargo-bin/src/lib.rs @@ -0,0 +1,127 @@ +use std::ffi::OsString; +use std::path::PathBuf; + +#[derive(Debug, thiserror::Error)] +pub enum CargoBinError { + #[error("failed to read current exe")] + CurrentExe { + #[source] + source: std::io::Error, + }, + #[error("failed to read current directory")] + CurrentDir { + #[source] + source: std::io::Error, + }, + #[error("CARGO_BIN_EXE env var {key} resolved to {path:?}, but it does not exist")] + ResolvedPathDoesNotExist { key: String, path: PathBuf }, + #[error("could not locate binary {name:?}; tried env vars {env_keys:?}; {fallback}")] + NotFound { + name: String, + env_keys: Vec, + fallback: String, + }, +} + +/// Returns an absolute path to a binary target built for the current test run. +/// +/// In `cargo test`, `CARGO_BIN_EXE_*` env vars are absolute, but Buck2 may set +/// them to project-relative paths (e.g. `buck-out/...`). Those paths break if a +/// test later changes its working directory. This helper makes the path +/// absolute up-front so callers can safely `chdir` afterwards. +pub fn cargo_bin(name: &str) -> Result { + let env_keys = cargo_bin_env_keys(name); + for key in &env_keys { + if let Some(value) = std::env::var_os(key) { + return resolve_bin_from_env(key, value); + } + } + + match assert_cmd::Command::cargo_bin(name) { + Ok(cmd) => { + let abs = absolutize_from_buck_or_cwd(PathBuf::from(cmd.get_program()))?; + if abs.exists() { + Ok(abs) + } else { + Err(CargoBinError::ResolvedPathDoesNotExist { + key: "assert_cmd::Command::cargo_bin".to_owned(), + path: abs, + }) + } + } + Err(err) => Err(CargoBinError::NotFound { + name: name.to_owned(), + env_keys, + fallback: format!("assert_cmd fallback failed: {err}"), + }), + } +} + +fn cargo_bin_env_keys(name: &str) -> Vec { + let mut keys = Vec::with_capacity(2); + keys.push(format!("CARGO_BIN_EXE_{name}")); + + // Cargo replaces dashes in target names when exporting env vars. + let underscore_name = name.replace('-', "_"); + if underscore_name != name { + keys.push(format!("CARGO_BIN_EXE_{underscore_name}")); + } + + keys +} + +fn resolve_bin_from_env(key: &str, value: OsString) -> Result { + let abs = absolutize_from_buck_or_cwd(PathBuf::from(value))?; + + if abs.exists() { + Ok(abs) + } else { + Err(CargoBinError::ResolvedPathDoesNotExist { + key: key.to_owned(), + path: abs, + }) + } +} + +fn absolutize_from_buck_or_cwd(path: PathBuf) -> Result { + if path.is_absolute() { + return Ok(path); + } + + if let Some(root) = + buck_project_root().map_err(|source| CargoBinError::CurrentExe { source })? + { + return Ok(root.join(path)); + } + + Ok(std::env::current_dir() + .map_err(|source| CargoBinError::CurrentDir { source })? + .join(path)) +} + +/// Best-effort attempt to find the Buck project root for the currently running +/// process. +/// +/// Prefer this over `env!("CARGO_MANIFEST_DIR")` when running under Buck2: our +/// Buck generator sets `CARGO_MANIFEST_DIR="."` for compilation, which makes +/// `env!("CARGO_MANIFEST_DIR")` unusable for locating workspace files. +pub fn buck_project_root() -> Result, std::io::Error> { + if let Some(root) = std::env::var_os("BUCK_PROJECT_ROOT") { + let root = PathBuf::from(root); + if root.is_absolute() { + return Ok(Some(root)); + } + } + + // Fall back to deriving the project root from the location of the test + // runner executable: + // /buck-out/v2/gen/.../__tests__/test-binary + let exe = std::env::current_exe()?; + for ancestor in exe.ancestors() { + if ancestor.file_name().is_some_and(|name| name == "buck-out") { + return Ok(ancestor.parent().map(PathBuf::from)); + } + } + + Ok(None) +} diff --git a/codex-rs/windows-sandbox-rs/Cargo.toml b/codex-rs/windows-sandbox-rs/Cargo.toml index eec3925ffa1..aa872035bc0 100644 --- a/codex-rs/windows-sandbox-rs/Cargo.toml +++ b/codex-rs/windows-sandbox-rs/Cargo.toml @@ -47,7 +47,7 @@ version = "0.8" [dependencies.dirs-next] version = "2.0" -[dependencies.windows-sys] +[target.'cfg(windows)'.dependencies.windows-sys] features = [ "Win32_Foundation", "Win32_System_Diagnostics_Debug", diff --git a/codex-rs/windows-sandbox-rs/src/acl.rs b/codex-rs/windows-sandbox-rs/src/acl.rs index eee826d660d..7dd37a9452f 100644 --- a/codex-rs/windows-sandbox-rs/src/acl.rs +++ b/codex-rs/windows-sandbox-rs/src/acl.rs @@ -9,7 +9,6 @@ use windows_sys::Win32::Foundation::ERROR_SUCCESS; use windows_sys::Win32::Foundation::HLOCAL; use windows_sys::Win32::Foundation::INVALID_HANDLE_VALUE; use windows_sys::Win32::Security::AclSizeInformation; -use windows_sys::Win32::Security::Authorization::GetEffectiveRightsFromAclW; use windows_sys::Win32::Security::Authorization::GetNamedSecurityInfoW; use windows_sys::Win32::Security::Authorization::GetSecurityInfo; use windows_sys::Win32::Security::Authorization::SetEntriesInAclW; @@ -258,100 +257,18 @@ pub unsafe fn dacl_has_write_deny_for_sid(p_dacl: *mut ACL, psid: *mut c_void) - false } -// Compute effective rights for a trustee SID against a DACL and decide if write is effectively allowed. -// This accounts for deny ACEs and ordering; falls back to a conservative per-ACE scan if the API fails. -#[allow(dead_code)] -pub unsafe fn dacl_effective_allows_write(p_dacl: *mut ACL, psid: *mut c_void) -> bool { - if p_dacl.is_null() { - return false; - } - let trustee = TRUSTEE_W { - pMultipleTrustee: std::ptr::null_mut(), - MultipleTrusteeOperation: 0, - TrusteeForm: TRUSTEE_IS_SID, - TrusteeType: TRUSTEE_IS_UNKNOWN, - ptstrName: psid as *mut u16, - }; - let mut access: u32 = 0; - let ok = GetEffectiveRightsFromAclW(p_dacl, &trustee, &mut access); - if ok == ERROR_SUCCESS { - // Map generic bits to avoid “missing” write when generic permissions are present. - let mut mapped_access = access; - if (access & GENERIC_WRITE_MASK) != 0 { - mapped_access |= FILE_GENERIC_WRITE | FILE_WRITE_DATA | FILE_APPEND_DATA; - } - if (access & READ_CONTROL) != 0 { - mapped_access |= FILE_GENERIC_READ; - } - let write_bits = FILE_GENERIC_WRITE - | FILE_WRITE_DATA - | FILE_APPEND_DATA - | FILE_WRITE_EA - | FILE_WRITE_ATTRIBUTES; - return (mapped_access & write_bits) != 0; - } - // Fallback: simple allow ACE scan (already ignores inherit-only) - dacl_has_write_allow_for_sid(p_dacl, psid) -} - -#[allow(dead_code)] -pub unsafe fn dacl_effective_allows_mask( - p_dacl: *mut ACL, - psid: *mut c_void, - desired_mask: u32, -) -> bool { - if p_dacl.is_null() { - return false; - } - use windows_sys::Win32::Security::Authorization::GetEffectiveRightsFromAclW; - use windows_sys::Win32::Security::Authorization::TRUSTEE_IS_SID; - use windows_sys::Win32::Security::Authorization::TRUSTEE_IS_UNKNOWN; - use windows_sys::Win32::Security::Authorization::TRUSTEE_W; - - let trustee = TRUSTEE_W { - pMultipleTrustee: std::ptr::null_mut(), - MultipleTrusteeOperation: 0, - TrusteeForm: TRUSTEE_IS_SID, - TrusteeType: TRUSTEE_IS_UNKNOWN, - ptstrName: psid as *mut u16, - }; - let mut access: u32 = 0; - let ok = GetEffectiveRightsFromAclW(p_dacl, &trustee, &mut access); - if ok == ERROR_SUCCESS { - // Map generic bits to avoid “missing” when generic permissions are present. - let mut mapped_access = access; - if (access & GENERIC_WRITE_MASK) != 0 { - mapped_access |= FILE_GENERIC_WRITE | FILE_WRITE_DATA | FILE_APPEND_DATA; - } - if (access & READ_CONTROL) != 0 { - mapped_access |= FILE_GENERIC_READ; - } - return (mapped_access & desired_mask) == desired_mask; - } - // Fallbacks on error: if write bits are requested, reuse the write helper; otherwise fail closed. - if (desired_mask & FILE_GENERIC_WRITE) != 0 { - return dacl_effective_allows_write(p_dacl, psid); - } - false -} - -#[allow(dead_code)] const WRITE_ALLOW_MASK: u32 = FILE_GENERIC_READ | FILE_GENERIC_WRITE | FILE_GENERIC_EXECUTE | DELETE | FILE_DELETE_CHILD; -/// Ensure all provided SIDs have an allow ACE with the requested mask on the path. -/// Returns true if any ACE was added. -/// -/// # Safety -/// Caller must pass valid SID pointers and an existing path; free the returned security descriptor with `LocalFree`. -#[allow(dead_code)] -pub unsafe fn ensure_allow_mask_aces( + +unsafe fn ensure_allow_mask_aces_with_inheritance_impl( path: &Path, sids: &[*mut c_void], allow_mask: u32, + inheritance: u32, ) -> Result { let (p_dacl, p_sd) = fetch_dacl_handle(path)?; let mut entries: Vec = Vec::new(); @@ -362,7 +279,7 @@ pub unsafe fn ensure_allow_mask_aces( entries.push(EXPLICIT_ACCESS_W { grfAccessPermissions: allow_mask, grfAccessMode: 2, // SET_ACCESS - grfInheritance: CONTAINER_INHERIT_ACE | OBJECT_INHERIT_ACE, + grfInheritance: inheritance, Trustee: TRUSTEE_W { pMultipleTrustee: std::ptr::null_mut(), MultipleTrusteeOperation: 0, @@ -393,6 +310,9 @@ pub unsafe fn ensure_allow_mask_aces( ); if code3 == ERROR_SUCCESS { added = true; + if !p_new_dacl.is_null() { + LocalFree(p_new_dacl as HLOCAL); + } } else { if !p_new_dacl.is_null() { LocalFree(p_new_dacl as HLOCAL); @@ -402,9 +322,6 @@ pub unsafe fn ensure_allow_mask_aces( } return Err(anyhow!("SetNamedSecurityInfoW failed: {}", code3)); } - if !p_new_dacl.is_null() { - LocalFree(p_new_dacl as HLOCAL); - } } else { if !p_sd.is_null() { LocalFree(p_sd as HLOCAL); @@ -418,12 +335,43 @@ pub unsafe fn ensure_allow_mask_aces( Ok(added) } +/// Ensure all provided SIDs have an allow ACE with the requested mask on the path. +/// Returns true if any ACE was added. +/// +/// # Safety +/// Caller must pass valid SID pointers and an existing path; free the returned security descriptor with `LocalFree`. +pub unsafe fn ensure_allow_mask_aces_with_inheritance( + path: &Path, + sids: &[*mut c_void], + allow_mask: u32, + inheritance: u32, +) -> Result { + ensure_allow_mask_aces_with_inheritance_impl(path, sids, allow_mask, inheritance) +} + +/// Ensure all provided SIDs have an allow ACE with the requested mask on the path. +/// Returns true if any ACE was added. +/// +/// # Safety +/// Caller must pass valid SID pointers and an existing path; free the returned security descriptor with `LocalFree`. +pub unsafe fn ensure_allow_mask_aces( + path: &Path, + sids: &[*mut c_void], + allow_mask: u32, +) -> Result { + ensure_allow_mask_aces_with_inheritance( + path, + sids, + allow_mask, + CONTAINER_INHERIT_ACE | OBJECT_INHERIT_ACE, + ) +} + /// Ensure all provided SIDs have a write-capable allow ACE on the path. /// Returns true if any ACE was added. /// /// # Safety /// Caller must pass valid SID pointers and an existing path; free the returned security descriptor with `LocalFree`. -#[allow(dead_code)] pub unsafe fn ensure_allow_write_aces(path: &Path, sids: &[*mut c_void]) -> Result { ensure_allow_mask_aces(path, sids, WRITE_ALLOW_MASK) } diff --git a/codex-rs/windows-sandbox-rs/src/command_runner_win.rs b/codex-rs/windows-sandbox-rs/src/command_runner_win.rs index 7171383353b..8f0558e0982 100644 --- a/codex-rs/windows-sandbox-rs/src/command_runner_win.rs +++ b/codex-rs/windows-sandbox-rs/src/command_runner_win.rs @@ -33,6 +33,12 @@ use windows_sys::Win32::System::Threading::TerminateProcess; use windows_sys::Win32::System::Threading::WaitForSingleObject; use windows_sys::Win32::System::Threading::INFINITE; +#[path = "cwd_junction.rs"] +mod cwd_junction; + +#[allow(dead_code)] +mod read_acl_mutex; + #[derive(Debug, Deserialize)] struct RunnerRequest { policy_json_or_preset: String, @@ -146,16 +152,52 @@ pub fn main() -> Result<()> { let h_stdin = open_pipe(&req.stdin_pipe, FILE_GENERIC_READ)?; let h_stdout = open_pipe(&req.stdout_pipe, FILE_GENERIC_WRITE)?; let h_stderr = open_pipe(&req.stderr_pipe, FILE_GENERIC_WRITE)?; + let stdio = Some((h_stdin, h_stdout, h_stderr)); + + // While the read-ACL helper is running, PowerShell can fail to start in the requested CWD due + // to unreadable ancestors. Use a junction CWD for that window; once the helper finishes, go + // back to using the real requested CWD (no probing, no extra state). + let use_junction = match read_acl_mutex::read_acl_mutex_exists() { + Ok(exists) => exists, + Err(err) => { + // Fail-safe: if we can't determine the state, assume the helper might be running and + // use the junction path to avoid CWD failures on unreadable ancestors. + log_note( + &format!("junction: read_acl_mutex_exists failed: {err}; assuming read ACL helper is running"), + log_dir, + ); + true + } + }; + if use_junction { + log_note( + "junction: read ACL helper running; using junction CWD", + log_dir, + ); + } + let effective_cwd = if use_junction { + cwd_junction::create_cwd_junction(&req.cwd, log_dir).unwrap_or_else(|| req.cwd.clone()) + } else { + req.cwd.clone() + }; + log_note( + &format!( + "runner: effective cwd={} (requested {})", + effective_cwd.display(), + req.cwd.display() + ), + log_dir, + ); // Build command and env, spawn with CreateProcessAsUserW. let spawn_result = unsafe { create_process_as_user( h_token, &req.command, - &req.cwd, + &effective_cwd, &req.env_map, Some(&req.codex_home), - Some((h_stdin, h_stdout, h_stderr)), + stdio, ) }; let (proc_info, _si) = match spawn_result { @@ -219,9 +261,5 @@ pub fn main() -> Result<()> { if exit_code != 0 { eprintln!("runner child exited with code {}", exit_code); } - log_note( - &format!("runner exit pid={} code={}", proc_info.hProcess, exit_code), - log_dir, - ); std::process::exit(exit_code); } diff --git a/codex-rs/windows-sandbox-rs/src/cwd_junction.rs b/codex-rs/windows-sandbox-rs/src/cwd_junction.rs new file mode 100644 index 00000000000..4765686b34d --- /dev/null +++ b/codex-rs/windows-sandbox-rs/src/cwd_junction.rs @@ -0,0 +1,142 @@ +#![cfg(target_os = "windows")] + +use codex_windows_sandbox::log_note; +use std::collections::hash_map::DefaultHasher; +use std::hash::Hash; +use std::hash::Hasher; +use std::os::windows::fs::MetadataExt as _; +use std::os::windows::process::CommandExt as _; +use std::path::Path; +use std::path::PathBuf; +use windows_sys::Win32::Storage::FileSystem::FILE_ATTRIBUTE_REPARSE_POINT; + +fn junction_name_for_path(path: &Path) -> String { + let mut hasher = DefaultHasher::new(); + path.to_string_lossy().hash(&mut hasher); + format!("{:x}", hasher.finish()) +} + +fn junction_root_for_userprofile(userprofile: &str) -> PathBuf { + PathBuf::from(userprofile) + .join(".codex") + .join(".sandbox") + .join("cwd") +} + +pub fn create_cwd_junction(requested_cwd: &Path, log_dir: Option<&Path>) -> Option { + let userprofile = std::env::var("USERPROFILE").ok()?; + let junction_root = junction_root_for_userprofile(&userprofile); + if let Err(err) = std::fs::create_dir_all(&junction_root) { + log_note( + &format!( + "junction: failed to create {}: {err}", + junction_root.display() + ), + log_dir, + ); + return None; + } + + let junction_path = junction_root.join(junction_name_for_path(requested_cwd)); + if junction_path.exists() { + // Reuse an existing junction if it looks like a reparse point; this keeps the hot path + // cheap and avoids repeatedly shelling out to mklink. + match std::fs::symlink_metadata(&junction_path) { + Ok(md) if (md.file_attributes() & FILE_ATTRIBUTE_REPARSE_POINT) != 0 => { + log_note( + &format!("junction: reusing existing {}", junction_path.display()), + log_dir, + ); + return Some(junction_path); + } + Ok(_) => { + // Unexpected: something else exists at this path (regular directory/file). We'll + // try to remove it if possible and recreate the junction. + log_note( + &format!( + "junction: existing path is not a reparse point, recreating {}", + junction_path.display() + ), + log_dir, + ); + } + Err(err) => { + log_note( + &format!( + "junction: failed to stat existing {}: {err}", + junction_path.display() + ), + log_dir, + ); + return None; + } + } + + if let Err(err) = std::fs::remove_dir(&junction_path) { + log_note( + &format!( + "junction: failed to remove existing {}: {err}", + junction_path.display() + ), + log_dir, + ); + return None; + } + } + + let link = junction_path.to_string_lossy().to_string(); + let target = requested_cwd.to_string_lossy().to_string(); + + // Use `cmd /c` so we can call `mklink` (a cmd builtin). We must quote paths so CWDs + // containing spaces work reliably. + // + // IMPORTANT: `std::process::Command::args()` will apply Windows quoting/escaping rules when + // constructing the command line. Passing a single argument that itself contains quotes can + // confuse `cmd.exe` and cause mklink to fail with "syntax is incorrect". We avoid that by + // using `raw_arg` to pass the tokens exactly as `cmd.exe` expects to parse them. + // + // Paths cannot contain quotes on Windows, so no extra escaping is needed here. + let link_quoted = format!("\"{link}\""); + let target_quoted = format!("\"{target}\""); + log_note( + &format!("junction: creating via cmd /c mklink /J {link_quoted} {target_quoted}"), + log_dir, + ); + let output = match std::process::Command::new("cmd") + .raw_arg("/c") + .raw_arg("mklink") + .raw_arg("/J") + .raw_arg(&link_quoted) + .raw_arg(&target_quoted) + .output() + { + Ok(output) => output, + Err(err) => { + log_note(&format!("junction: mklink failed to run: {err}"), log_dir); + return None; + } + }; + if output.status.success() && junction_path.exists() { + log_note( + &format!( + "junction: created {} -> {}", + junction_path.display(), + requested_cwd.display() + ), + log_dir, + ); + return Some(junction_path); + } + let stdout = String::from_utf8_lossy(&output.stdout); + let stderr = String::from_utf8_lossy(&output.stderr); + log_note( + &format!( + "junction: mklink failed status={:?} stdout={} stderr={}", + output.status, + stdout.trim(), + stderr.trim() + ), + log_dir, + ); + None +} diff --git a/codex-rs/windows-sandbox-rs/src/elevated_impl.rs b/codex-rs/windows-sandbox-rs/src/elevated_impl.rs index fb75e6f20ef..7dfcd6e0ae9 100644 --- a/codex-rs/windows-sandbox-rs/src/elevated_impl.rs +++ b/codex-rs/windows-sandbox-rs/src/elevated_impl.rs @@ -7,7 +7,6 @@ mod windows_impl { use crate::env::inherit_path_env; use crate::env::normalize_null_device_env; use crate::identity::require_logon_sandbox_creds; - use crate::logging::debug_log; use crate::logging::log_failure; use crate::logging::log_note; use crate::logging::log_start; @@ -15,7 +14,6 @@ mod windows_impl { use crate::policy::parse_policy; use crate::policy::SandboxPolicy; use crate::token::convert_string_sid_to_sid; - use crate::winutil::format_last_error; use crate::winutil::quote_windows_arg; use crate::winutil::to_wide; use anyhow::Result; @@ -94,7 +92,7 @@ mod windows_impl { fn inject_git_safe_directory( env_map: &mut HashMap, cwd: &Path, - logs_base_dir: Option<&Path>, + _logs_base_dir: Option<&Path>, ) { if let Some(git_root) = find_git_root(cwd) { let mut cfg_count: usize = env_map @@ -109,10 +107,6 @@ mod windows_impl { env_map.insert(format!("GIT_CONFIG_VALUE_{cfg_count}"), git_path); cfg_count += 1; env_map.insert("GIT_CONFIG_COUNT".to_string(), cfg_count.to_string()); - log_note( - &format!("injected git safe.directory for {}", git_root.display()), - logs_base_dir, - ); } } @@ -237,7 +231,6 @@ mod windows_impl { log_start(&command, logs_base_dir); let sandbox_creds = require_logon_sandbox_creds(&policy, sandbox_policy_cwd, cwd, &env_map, codex_home)?; - log_note("cli creds ready", logs_base_dir); // Build capability SID for ACL grants. if matches!( &policy, @@ -283,13 +276,6 @@ mod windows_impl { &stderr_name, PIPE_ACCESS_DUPLEX | PIPE_TYPE_BYTE | PIPE_READMODE_BYTE | PIPE_WAIT, )?; - log_note( - &format!( - "cli pipes created stdin={} stdout={} stderr={}", - stdin_name, stdout_name, stderr_name - ), - logs_base_dir, - ); // Launch runner as sandbox user via CreateProcessWithLogonW. let runner_exe = find_runner_exe(); @@ -326,10 +312,6 @@ mod windows_impl { ); return Err(e.into()); } - log_note( - &format!("cli request file written path={}", req_file.display()), - logs_base_dir, - ); let runner_full_cmd = format!( "{} {}", quote_windows_arg(&runner_cmdline), @@ -338,14 +320,6 @@ mod windows_impl { let mut cmdline_vec: Vec = to_wide(&runner_full_cmd); let exe_w: Vec = to_wide(&runner_cmdline); let cwd_w: Vec = to_wide(cwd); - log_note( - &format!("cli prep done request_file={}", req_file.display()), - logs_base_dir, - ); - log_note( - &format!("cli about to launch runner cmd={}", runner_full_cmd), - logs_base_dir, - ); // Minimal CPWL launch: inherit env, no desktop override, no handle inheritance. let env_block: Option> = None; @@ -380,23 +354,8 @@ mod windows_impl { }; if spawn_res == 0 { let err = unsafe { GetLastError() } as i32; - let dbg = format!( - "CreateProcessWithLogonW failed: {} ({}) | cwd={} | cmd={} | req_file={} | env=inherit | si_flags={}", - err, - format_last_error(err), - cwd.display(), - runner_full_cmd, - req_file.display(), - si.dwFlags, - ); - debug_log(&dbg, logs_base_dir); - log_note(&dbg, logs_base_dir); return Err(anyhow::anyhow!("CreateProcessWithLogonW failed: {}", err)); } - log_note( - &format!("cli runner launched pid={}", pi.hProcess), - logs_base_dir, - ); // Pipes are no longer passed as std handles; no stdin payload is sent. connect_pipe(h_stdin_pipe)?; diff --git a/codex-rs/windows-sandbox-rs/src/env.rs b/codex-rs/windows-sandbox-rs/src/env.rs index 65950a0d595..8fa2f012744 100644 --- a/codex-rs/windows-sandbox-rs/src/env.rs +++ b/codex-rs/windows-sandbox-rs/src/env.rs @@ -29,7 +29,6 @@ pub fn ensure_non_interactive_pager(env_map: &mut HashMap) { } // Keep PATH and PATHEXT stable for callers that rely on inheriting the parent process env. -#[allow(dead_code)] pub fn inherit_path_env(env_map: &mut HashMap) { if !env_map.contains_key("PATH") { if let Ok(path) = env::var("PATH") { diff --git a/codex-rs/windows-sandbox-rs/src/lib.rs b/codex-rs/windows-sandbox-rs/src/lib.rs index 3a1c5c82a2e..4ffab10dc59 100644 --- a/codex-rs/windows-sandbox-rs/src/lib.rs +++ b/codex-rs/windows-sandbox-rs/src/lib.rs @@ -20,6 +20,8 @@ pub use acl::allow_null_device; #[cfg(target_os = "windows")] pub use acl::ensure_allow_mask_aces; #[cfg(target_os = "windows")] +pub use acl::ensure_allow_mask_aces_with_inheritance; +#[cfg(target_os = "windows")] pub use acl::ensure_allow_write_aces; #[cfg(target_os = "windows")] pub use acl::fetch_dacl_handle; diff --git a/codex-rs/windows-sandbox-rs/src/process.rs b/codex-rs/windows-sandbox-rs/src/process.rs index 786fe416a44..2dbd152ce75 100644 --- a/codex-rs/windows-sandbox-rs/src/process.rs +++ b/codex-rs/windows-sandbox-rs/src/process.rs @@ -16,22 +16,12 @@ use windows_sys::Win32::System::Console::GetStdHandle; use windows_sys::Win32::System::Console::STD_ERROR_HANDLE; use windows_sys::Win32::System::Console::STD_INPUT_HANDLE; use windows_sys::Win32::System::Console::STD_OUTPUT_HANDLE; -use windows_sys::Win32::System::JobObjects::AssignProcessToJobObject; -use windows_sys::Win32::System::JobObjects::CreateJobObjectW; -use windows_sys::Win32::System::JobObjects::JobObjectExtendedLimitInformation; -use windows_sys::Win32::System::JobObjects::SetInformationJobObject; -use windows_sys::Win32::System::JobObjects::JOBOBJECT_EXTENDED_LIMIT_INFORMATION; -use windows_sys::Win32::System::JobObjects::JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE; use windows_sys::Win32::System::Threading::CreateProcessAsUserW; -use windows_sys::Win32::System::Threading::GetExitCodeProcess; -use windows_sys::Win32::System::Threading::WaitForSingleObject; use windows_sys::Win32::System::Threading::CREATE_UNICODE_ENVIRONMENT; -use windows_sys::Win32::System::Threading::INFINITE; use windows_sys::Win32::System::Threading::PROCESS_INFORMATION; use windows_sys::Win32::System::Threading::STARTF_USESTDHANDLES; use windows_sys::Win32::System::Threading::STARTUPINFOW; -#[allow(dead_code)] pub fn make_env_block(env: &HashMap) -> Vec { let mut items: Vec<(String, String)> = env.iter().map(|(k, v)| (k.clone(), v.clone())).collect(); @@ -51,7 +41,6 @@ pub fn make_env_block(env: &HashMap) -> Vec { w } -#[allow(dead_code)] unsafe fn ensure_inheritable_stdio(si: &mut STARTUPINFOW) -> Result<()> { for kind in [STD_INPUT_HANDLE, STD_OUTPUT_HANDLE, STD_ERROR_HANDLE] { let h = GetStdHandle(kind); @@ -72,7 +61,6 @@ unsafe fn ensure_inheritable_stdio(si: &mut STARTUPINFOW) -> Result<()> { /// # Safety /// Caller must provide a valid primary token handle (`h_token`) with appropriate access, /// and the `argv`, `cwd`, and `env_map` must remain valid for the duration of the call. -#[allow(dead_code)] pub unsafe fn create_process_as_user( h_token: HANDLE, argv: &[String], @@ -148,56 +136,3 @@ pub unsafe fn create_process_as_user( } Ok((pi, si)) } - -/// # Safety -/// Caller must provide valid process information handles. -#[allow(dead_code)] -pub unsafe fn wait_process_and_exitcode(pi: &PROCESS_INFORMATION) -> Result { - let res = WaitForSingleObject(pi.hProcess, INFINITE); - if res != 0 { - return Err(anyhow!("WaitForSingleObject failed: {}", GetLastError())); - } - let mut code: u32 = 0; - if GetExitCodeProcess(pi.hProcess, &mut code) == 0 { - return Err(anyhow!("GetExitCodeProcess failed: {}", GetLastError())); - } - Ok(code as i32) -} - -/// # Safety -/// Caller must close the returned job handle. -#[allow(dead_code)] -pub unsafe fn create_job_kill_on_close() -> Result { - let h = CreateJobObjectW(std::ptr::null_mut(), std::ptr::null()); - if h == 0 { - return Err(anyhow!("CreateJobObjectW failed: {}", GetLastError())); - } - let mut limits: JOBOBJECT_EXTENDED_LIMIT_INFORMATION = std::mem::zeroed(); - limits.BasicLimitInformation.LimitFlags = JOB_OBJECT_LIMIT_KILL_ON_JOB_CLOSE; - let ok = SetInformationJobObject( - h, - JobObjectExtendedLimitInformation, - &mut limits as *mut _ as *mut c_void, - std::mem::size_of::() as u32, - ); - if ok == 0 { - return Err(anyhow!( - "SetInformationJobObject failed: {}", - GetLastError() - )); - } - Ok(h) -} - -/// # Safety -/// Caller must pass valid handles for a job object and a process. -#[allow(dead_code)] -pub unsafe fn assign_to_job(h_job: HANDLE, h_process: HANDLE) -> Result<()> { - if AssignProcessToJobObject(h_job, h_process) == 0 { - return Err(anyhow!( - "AssignProcessToJobObject failed: {}", - GetLastError() - )); - } - Ok(()) -} diff --git a/codex-rs/windows-sandbox-rs/src/setup_main_win.rs b/codex-rs/windows-sandbox-rs/src/setup_main_win.rs index 0ed0adea031..d84aa2fba07 100644 --- a/codex-rs/windows-sandbox-rs/src/setup_main_win.rs +++ b/codex-rs/windows-sandbox-rs/src/setup_main_win.rs @@ -6,7 +6,7 @@ use base64::engine::general_purpose::STANDARD as BASE64; use base64::Engine; use codex_windows_sandbox::convert_string_sid_to_sid; use codex_windows_sandbox::dpapi_protect; -use codex_windows_sandbox::ensure_allow_mask_aces; +use codex_windows_sandbox::ensure_allow_mask_aces_with_inheritance; use codex_windows_sandbox::ensure_allow_write_aces; use codex_windows_sandbox::load_or_create_cap_sids; use codex_windows_sandbox::log_note; @@ -32,7 +32,6 @@ use std::path::PathBuf; use std::process::Command; use std::process::Stdio; use std::sync::mpsc; -use std::time::Instant; use windows::core::Interface; use windows::core::BSTR; use windows::Win32::Foundation::VARIANT_TRUE; @@ -104,17 +103,13 @@ struct Payload { #[derive(Debug, Clone, Copy, Deserialize, Serialize, PartialEq, Eq)] #[serde(rename_all = "kebab-case")] +#[derive(Default)] enum SetupMode { + #[default] Full, ReadAclsOnly, } -impl Default for SetupMode { - fn default() -> Self { - Self::Full - } -} - #[derive(Serialize)] struct SandboxUserRecord { username: String, @@ -267,14 +262,14 @@ fn resolve_sid(name: &str) -> Result> { } } -fn spawn_read_acl_helper(payload: &Payload, log: &mut File) -> Result<()> { +fn spawn_read_acl_helper(payload: &Payload, _log: &mut File) -> Result<()> { let mut read_payload = payload.clone(); read_payload.mode = SetupMode::ReadAclsOnly; read_payload.refresh_only = true; let payload_json = serde_json::to_vec(&read_payload)?; let payload_b64 = BASE64.encode(payload_json); let exe = std::env::current_exe().context("locate setup helper")?; - let child = Command::new(&exe) + Command::new(&exe) .arg(payload_b64) .stdin(Stdio::null()) .stdout(Stdio::null()) @@ -282,8 +277,6 @@ fn spawn_read_acl_helper(payload: &Payload, log: &mut File) -> Result<()> { .creation_flags(0x08000000) // CREATE_NO_WINDOW .spawn() .context("spawn read ACL helper")?; - let pid = child.id(); - log_line(log, &format!("spawned read ACL helper pid={pid}"))?; Ok(()) } @@ -295,20 +288,18 @@ struct ReadAclSubjects<'a> { fn apply_read_acls( read_roots: &[PathBuf], - subjects: ReadAclSubjects<'_>, + subjects: &ReadAclSubjects<'_>, log: &mut File, refresh_errors: &mut Vec, + access_mask: u32, + access_label: &str, + inheritance: u32, ) -> Result<()> { - log_line( - log, - &format!("read ACL: processing {} read roots", read_roots.len()), - )?; - let read_mask = FILE_GENERIC_READ | FILE_GENERIC_EXECUTE; for root in read_roots { if !root.exists() { log_line( log, - &format!("read root {} missing; skipping", root.display()), + &format!("{access_label} root {} missing; skipping", root.display()), )?; continue; } @@ -316,25 +307,20 @@ fn apply_read_acls( root, subjects.rx_psids, None, - read_mask, + access_mask, + access_label, refresh_errors, log, )?; if builtin_has { - log_line( - log, - &format!( - "Users/AU/Everyone already has RX on {}; skipping", - root.display() - ), - )?; continue; } let offline_has = read_mask_allows_or_log( root, &[subjects.offline_psid], Some("offline"), - read_mask, + access_mask, + access_label, refresh_errors, log, )?; @@ -342,23 +328,20 @@ fn apply_read_acls( root, &[subjects.online_psid], Some("online"), - read_mask, + access_mask, + access_label, refresh_errors, log, )?; if offline_has && online_has { - log_line( - log, - &format!( - "sandbox users already have RX on {}; skipping", - root.display() - ), - )?; continue; } log_line( log, - &format!("granting read ACE to {} for sandbox users", root.display()), + &format!( + "granting {access_label} ACE to {} for sandbox users", + root.display() + ), )?; let mut successes = usize::from(offline_has) + usize::from(online_has); let mut missing_psids: Vec<*mut c_void> = Vec::new(); @@ -372,33 +355,30 @@ fn apply_read_acls( missing_labels.push("online"); } if !missing_psids.is_empty() { - let started = Instant::now(); - match unsafe { ensure_allow_mask_aces(root, &missing_psids, read_mask) } { + let result = unsafe { + ensure_allow_mask_aces_with_inheritance( + root, + &missing_psids, + access_mask, + inheritance, + ) + }; + match result { Ok(_) => { - let elapsed_ms = started.elapsed().as_millis(); successes = 2; - log_line( - log, - &format!( - "grant read ACE succeeded on {} for {} in {elapsed_ms}ms", - root.display(), - missing_labels.join(", ") - ), - )?; } Err(err) => { - let elapsed_ms = started.elapsed().as_millis(); let label_list = missing_labels.join(", "); for label in &missing_labels { refresh_errors.push(format!( - "grant read ACE failed on {} for {label}: {err}", + "grant {access_label} ACE failed on {} for {label}: {err}", root.display() )); } log_line( log, &format!( - "grant read ACE failed on {} for {} in {elapsed_ms}ms: {err}", + "grant {access_label} ACE failed on {} for {}: {err}", root.display(), label_list ), @@ -407,12 +387,11 @@ fn apply_read_acls( } } if successes == 2 { - log_line(log, &format!("granted read ACE to {}", root.display()))?; } else { log_line( log, &format!( - "read ACE incomplete on {} (success {successes}/2)", + "{access_label} ACE incomplete on {} (success {successes}/2)", root.display() ), )?; @@ -426,6 +405,7 @@ fn read_mask_allows_or_log( psids: &[*mut c_void], label: Option<&str>, read_mask: u32, + access_label: &str, refresh_errors: &mut Vec, log: &mut File, ) -> Result { @@ -436,7 +416,7 @@ fn read_mask_allows_or_log( .map(|value| format!(" for {value}")) .unwrap_or_default(); refresh_errors.push(format!( - "read mask check failed on {}{}: {}", + "{access_label} mask check failed on {}{}: {}", root.display(), label_suffix, e @@ -444,7 +424,7 @@ fn read_mask_allows_or_log( log_line( log, &format!( - "read mask check failed on {}{}: {}; continuing", + "{access_label} mask check failed on {}{}: {}; continuing", root.display(), label_suffix, e @@ -532,7 +512,7 @@ fn lock_sandbox_dir( dir: &Path, real_user: &str, sandbox_user_sids: &[Vec], - log: &mut File, + _log: &mut File, ) -> Result<()> { std::fs::create_dir_all(dir)?; let system_sid = resolve_sid("SYSTEM")?; @@ -630,10 +610,6 @@ fn lock_sandbox_dir( } } } - log_line( - log, - &format!("sandbox dir ACL applied at {}", dir.display()), - )?; Ok(()) } @@ -723,8 +699,6 @@ fn real_main() -> Result<()> { .append(true) .open(&log_path) .context("open log")?; - log_line(&mut log, "setup binary started")?; - log_note("setup binary started", Some(sbx_dir.as_path())); let result = run_setup(&payload, &mut log, &sbx_dir); if let Err(err) = &result { let _ = log_line(&mut log, &format!("setup error: {err:?}")); @@ -766,7 +740,15 @@ fn run_read_acl_only(payload: &Payload, log: &mut File) -> Result<()> { online_psid, rx_psids: &rx_psids, }; - apply_read_acls(&payload.read_roots, subjects, log, &mut refresh_errors)?; + apply_read_acls( + &payload.read_roots, + &subjects, + log, + &mut refresh_errors, + FILE_GENERIC_READ | FILE_GENERIC_EXECUTE, + "read", + OBJECT_INHERIT_ACE | CONTAINER_INHERIT_ACE, + )?; unsafe { if !offline_psid.is_null() { LocalFree(offline_psid as HLOCAL); @@ -810,7 +792,6 @@ fn run_setup_full(payload: &Payload, log: &mut File, sbx_dir: &Path) -> Result<( Some(random_password()) }; if refresh_only { - log_line(log, "refresh-only mode: skipping user creation/firewall")?; } else { log_line( log, @@ -831,33 +812,17 @@ fn run_setup_full(payload: &Payload, log: &mut File, sbx_dir: &Path) -> Result<( let offline_psid = sid_bytes_to_psid(&offline_sid)?; let online_psid = sid_bytes_to_psid(&online_sid)?; let offline_sid_str = string_from_sid_bytes(&offline_sid).map_err(anyhow::Error::msg)?; - log_line( - log, - &format!( - "resolved SIDs offline={} online={}", - offline_sid_str, - string_from_sid_bytes(&online_sid).map_err(anyhow::Error::msg)? - ), - )?; + let caps = load_or_create_cap_sids(&payload.codex_home)?; let cap_psid = unsafe { convert_string_sid_to_sid(&caps.workspace) .ok_or_else(|| anyhow::anyhow!("convert capability SID failed"))? }; let mut refresh_errors: Vec = Vec::new(); - log_line(log, &format!("resolved capability SID {}", caps.workspace))?; if !refresh_only { run_netsh_firewall(&offline_sid_str, log)?; } - log_line( - log, - &format!( - "refresh: delegating {} read roots; processing {} write roots", - payload.read_roots.len(), - payload.write_roots.len() - ), - )?; if payload.read_roots.is_empty() { log_line(log, "no read roots to grant; skipping read ACL helper")?; } else { @@ -923,14 +888,6 @@ fn run_setup_full(payload: &Payload, log: &mut File, sbx_dir: &Path) -> Result<( false } }; - log_line( - log, - &format!( - "write check {label} on {} => {}", - root.display(), - if has { "present" } else { "missing" } - ), - )?; if !has { need_grant = true; } @@ -944,14 +901,6 @@ fn run_setup_full(payload: &Payload, log: &mut File, sbx_dir: &Path) -> Result<( ), )?; grant_tasks.push(root.clone()); - } else { - log_line( - log, - &format!( - "write ACE already present for all sandbox SIDs on {}", - root.display() - ), - )?; } } @@ -985,20 +934,7 @@ fn run_setup_full(payload: &Payload, log: &mut File, sbx_dir: &Path) -> Result<( drop(tx); for (root, res) in rx { match res { - Ok(added) => { - if log_line( - log, - &format!( - "write ACE {} on {}", - if added { "added" } else { "already present" }, - root.display() - ), - ) - .is_err() - { - // ignore log errors inside scoped thread - } - } + Ok(_) => {} Err(e) => { refresh_errors.push(format!("write ACE failed on {}: {}", root.display(), e)); if log_line( @@ -1031,7 +967,6 @@ fn run_setup_full(payload: &Payload, log: &mut File, sbx_dir: &Path) -> Result<( &[offline_sid.clone(), online_sid.clone()], log, )?; - log_line(log, "sandbox dir ACL applied")?; write_secrets( &payload.codex_home, &payload.offline_username, @@ -1041,10 +976,6 @@ fn run_setup_full(payload: &Payload, log: &mut File, sbx_dir: &Path) -> Result<( &payload.read_roots, &payload.write_roots, )?; - log_line( - log, - "sandbox users and marker written (sandbox_users.json, setup_marker.json)", - )?; } unsafe { if !offline_psid.is_null() { @@ -1064,7 +995,6 @@ fn run_setup_full(payload: &Payload, log: &mut File, sbx_dir: &Path) -> Result<( )?; anyhow::bail!("setup refresh had errors"); } - log_line(log, "setup binary completed")?; log_note("setup binary completed", Some(sbx_dir)); Ok(()) } diff --git a/codex-rs/windows-sandbox-rs/src/setup_orchestrator.rs b/codex-rs/windows-sandbox-rs/src/setup_orchestrator.rs index c26d544812b..97ff1f0a8e2 100644 --- a/codex-rs/windows-sandbox-rs/src/setup_orchestrator.rs +++ b/codex-rs/windows-sandbox-rs/src/setup_orchestrator.rs @@ -80,10 +80,6 @@ pub fn run_setup_refresh( let json = serde_json::to_vec(&payload)?; let b64 = BASE64_STANDARD.encode(json); let exe = find_setup_exe(); - log_note( - &format!("setup refresh: invoking {}", exe.display()), - Some(&sandbox_dir(codex_home)), - ); // Refresh should never request elevation; ensure verb isn't set and we don't trigger UAC. let mut cmd = Command::new(&exe); cmd.arg(&b64).stdout(Stdio::null()).stderr(Stdio::null()); diff --git a/codex-rs/windows-sandbox-rs/src/token.rs b/codex-rs/windows-sandbox-rs/src/token.rs index 3b25a5a0578..fc0e840eb51 100644 --- a/codex-rs/windows-sandbox-rs/src/token.rs +++ b/codex-rs/windows-sandbox-rs/src/token.rs @@ -144,7 +144,6 @@ pub unsafe fn convert_string_sid_to_sid(s: &str) -> Option<*mut c_void> { /// # Safety /// Caller must close the returned token handle. -#[allow(dead_code)] pub unsafe fn get_current_token_for_restriction() -> Result { let desired = TOKEN_DUPLICATE | TOKEN_QUERY @@ -285,7 +284,6 @@ unsafe fn enable_single_privilege(h_token: HANDLE, name: &str) -> Result<()> { /// # Safety /// Caller must close the returned token handle. -#[allow(dead_code)] pub unsafe fn create_workspace_write_token_with_cap( psid_capability: *mut c_void, ) -> Result<(HANDLE, *mut c_void)> { @@ -297,7 +295,6 @@ pub unsafe fn create_workspace_write_token_with_cap( /// # Safety /// Caller must close the returned token handle. -#[allow(dead_code)] pub unsafe fn create_readonly_token_with_cap( psid_capability: *mut c_void, ) -> Result<(HANDLE, *mut c_void)> { diff --git a/codex-rs/windows-sandbox-rs/src/winutil.rs b/codex-rs/windows-sandbox-rs/src/winutil.rs index 02f7a8cce33..5fbda726e28 100644 --- a/codex-rs/windows-sandbox-rs/src/winutil.rs +++ b/codex-rs/windows-sandbox-rs/src/winutil.rs @@ -85,7 +85,6 @@ pub fn format_last_error(err: i32) -> String { } } -#[allow(dead_code)] pub fn string_from_sid_bytes(sid: &[u8]) -> Result { unsafe { let mut str_ptr: *mut u16 = std::ptr::null_mut(); diff --git a/docs/config.md b/docs/config.md index de8cb448ba1..1a22aad0777 100644 --- a/docs/config.md +++ b/docs/config.md @@ -7,6 +7,7 @@ Codexel configuration gives you fine-grained control over the model, execution e - [Feature flags](#feature-flags) - [Model selection](#model-selection) - [Execution environment](#execution-environment) +- [Project root detection](#project-root-detection) - [MCP integration](#mcp-integration) - [Observability and telemetry](#observability-and-telemetry) - [Profiles and overrides](#profiles-and-overrides) @@ -41,23 +42,66 @@ web_search_request = true # allow the model to request web searches Supported features: -| Key | Default | Stage | Description | -| ------------------------------------- | :-----: | ------------ | ----------------------------------------------------- | -| `unified_exec` | false | Experimental | Use the unified PTY-backed exec tool | -| `rmcp_client` | false | Experimental | Enable oauth support for streamable HTTP MCP servers | -| `apply_patch_freeform` | false | Beta | Include the freeform `apply_patch` tool | -| `view_image_tool` | true | Stable | Include the `view_image` tool | -| `web_search_request` | false | Stable | Allow the model to issue web searches | -| `ghost_commit` | false | Experimental | Create a ghost commit each turn | -| `enable_experimental_windows_sandbox` | false | Experimental | Use the Windows restricted-token sandbox | -| `tui2` | false | Experimental | Use the experimental TUI v2 (viewport) implementation | -| `skills` | false | Experimental | Enable discovery and injection of skills | +| Key | Default | Stage | Description | +| ------------------------------------- | :-----: | ------------ | ------------------------------------------------------------------------- | +| `unified_exec` | false | Experimental | Use the unified PTY-backed exec tool | +| `apply_patch_freeform` | false | Beta | Include the freeform `apply_patch` tool | +| `view_image_tool` | true | Stable | Include the `view_image` tool | +| `web_search_request` | false | Stable | Allow the model to issue web searches (including during `/plan`) | +| `ghost_commit` | false | Experimental | Create a ghost commit each turn | +| `enable_experimental_windows_sandbox` | false | Experimental | Use the Windows restricted-token sandbox | +| `tui2` | false | Experimental | Use the experimental TUI v2 (viewport) implementation | +| `skills` | false | Experimental | Enable discovery and injection of skills | +| `mini_subagents` | true | Beta | Allow the agent to spawn read-only mini subagents (`spawn_mini_subagent`) | +| `lsp` | false | Experimental | Enable Language Server Protocol diagnostics and navigation | Notes: - Omit a key to accept its default. - Legacy booleans such as `experimental_use_exec_command_tool`, `experimental_use_unified_exec_tool`, `include_apply_patch_tool`, and similar `experimental_use_*` keys are deprecated; setting the corresponding `[features].` avoids repeated warnings. +## LSP + +Enable LSP support with: + +```toml +[features] +lsp = true +``` + +Optional configuration: + +```toml +[lsp] +prompt_diagnostics = true +max_prompt_diagnostics = 10 +max_tool_diagnostics = 200 +tool_diagnostics_wait_ms = 2000 +max_file_bytes = 524288 +ignored_globs = ["**/target/**", "**/node_modules/**", "**/.git/**"] + +# Note: Codex also respects `.gitignore` (and `.git/info/exclude`) in the workspace root when deciding +# what files to scan/open, in addition to `ignored_globs`. + +[lsp.servers.rust] +command = "rust-analyzer" +args = [] + +[lsp.servers.csharp] +command = "csharp-ls" +args = ["--stdio"] + +[lsp.servers.php] +command = "intelephense" +args = ["--stdio"] + +[lsp.servers.perl] +command = "perlnavigator" +args = [] +``` + +If you omit `[lsp.servers.*]`, Codex will try to autodetect common language servers from `PATH` (e.g. `rust-analyzer`, `gopls`, `csharp-ls`, `OmniSharp`, `pyright-langserver`, `typescript-language-server`, `intelephense`, `perlnavigator`) and start them as needed. + ## Model selection ### model @@ -77,6 +121,32 @@ Optional model to use for planning flows such as `/plan` (and plan-variant subag plan_model = "gpt-5.1-codex" ``` +### explore_model (deprecated) + +Deprecated. Use `mini_subagent_model` instead. When set, this value is treated as an alias for `mini_subagent_model` for compatibility. + +```toml +# Deprecated: use `mini_subagent_model` instead. +explore_model = "gpt-5.1-mini" +``` + +### mini_subagent_model + +Optional model to use for mini subagents. This affects `/plan` exploration (grounding) and the `spawn_mini_subagent` tool. When unset, mini subagents default to `gpt-5.1-codex-mini`. + +```toml +mini_subagent_model = "gpt-5.1-codex-mini" +``` + +### subagent_model + +Optional model to use for spawned subagents (the `spawn_subagent` tool flow). When unset, spawned subagents inherit the active model for that turn. + +```toml +# Use a cheaper model for general subagents without changing the main chat model. +subagent_model = "gpt-5.1-mini" +``` + ### model_providers This option lets you add to the default set of model providers bundled with Codex. The map key becomes the value you use with `model_provider` to select the provider. @@ -215,6 +285,18 @@ Note: to minimize reasoning, choose `"minimal"`. Optional reasoning effort to use for planning flows such as `/plan`. When unset, planning uses `model_reasoning_effort`. +### explore_model_reasoning_effort (deprecated) + +Deprecated. Use `mini_subagent_model_reasoning_effort` instead. When set, this value is treated as an alias for `mini_subagent_model_reasoning_effort` for compatibility. + +### mini_subagent_model_reasoning_effort + +Optional reasoning effort to use for mini subagents. This affects `/plan` exploration (grounding) and the `spawn_mini_subagent` tool. When unset, mini subagents default to `"medium"`. + +### subagent_model_reasoning_effort + +Optional reasoning effort to use for spawned subagents (the `spawn_subagent` tool flow). When unset, spawned subagents inherit the reasoning effort for that turn. + ### model_reasoning_summary If the model name starts with `"o"` (as in `"o3"` or `"o4-mini"`) or `"codex"`, reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries), this can be set to: @@ -432,6 +514,17 @@ set = { PATH = "/usr/bin", MY_FLAG = "1" } Currently, `CODEX_SANDBOX_NETWORK_DISABLED=1` is also added to the environment, assuming network is disabled. This is not configurable. +## Project root detection + +Codex discovers `.codex/` project layers by walking up from the working directory until it hits a project marker. By default it looks for `.git`. You can override the marker list in user/system/MDM config: + +```toml +# $CODEX_HOME/config.toml +project_root_markers = [".git", ".hg", ".sl"] +``` + +Set `project_root_markers = []` to skip searching parent directories and treat the current working directory as the project root. + ## MCP integration ### mcp_servers @@ -480,14 +573,7 @@ http_headers = { "HEADER_NAME" = "HEADER_VALUE" } env_http_headers = { "HEADER_NAME" = "ENV_VAR" } ``` -Streamable HTTP connections always use the experimental Rust MCP client under the hood, so expect occasional rough edges. OAuth login flows are gated on the `rmcp_client = true` flag: - -```toml -[features] -rmcp_client = true -``` - -After enabling it, run `codexel mcp login ` when the server supports OAuth. +Streamable HTTP connections always use the Rust MCP client under the hood. Run `codexel mcp login ` to authenticate for servers supporting OAuth. #### Other configuration options @@ -900,6 +986,54 @@ notifications = [ "agent-turn-complete", "approval-requested" ] # Disable terminal animations (welcome screen, status shimmer, spinner). # Defaults to true. animations = false + +# TUI2 mouse scrolling (wheel + trackpad) +# +# Terminals emit different numbers of raw scroll events per physical wheel notch (commonly 1, 3, +# or 9+). TUI2 normalizes raw event density into consistent wheel behavior (default: ~3 lines per +# wheel notch) while keeping trackpad input higher fidelity via fractional accumulation. +# +# See `codex-rs/tui2/docs/scroll_input_model.md` for the model and probe data. + +# Override *wheel* event density (raw events per physical wheel notch). TUI2 only. +# +# Wheel-like per-event contribution is: +# - `scroll_wheel_lines / scroll_events_per_tick` +# +# Trackpad-like streams use `min(scroll_events_per_tick, 3)` as the divisor so dense wheel ticks +# (e.g. 9 events per notch) do not make trackpads feel artificially slow. +scroll_events_per_tick = 3 + +# Override wheel scroll lines per physical wheel notch (classic feel). TUI2 only. +scroll_wheel_lines = 3 + +# Override baseline trackpad sensitivity (lines per tick-equivalent). TUI2 only. +# +# Trackpad-like per-event contribution is: +# - `scroll_trackpad_lines / min(scroll_events_per_tick, 3)` +scroll_trackpad_lines = 1 + +# Trackpad acceleration (optional). TUI2 only. +# These keep small swipes precise while letting large/faster swipes cover more content. +# +# Concretely, TUI2 computes: +# - `multiplier = clamp(1 + abs(events) / scroll_trackpad_accel_events, 1..scroll_trackpad_accel_max)` +# +# The multiplier is applied to the trackpad-like stream’s computed line delta (including any +# carried fractional remainder). +scroll_trackpad_accel_events = 30 +scroll_trackpad_accel_max = 3 + +# Force scroll interpretation. TUI2 only. +# Valid values: "auto" (default), "wheel", "trackpad" +scroll_mode = "auto" + +# Auto-mode heuristic tuning. TUI2 only. +scroll_wheel_tick_detect_max_ms = 12 +scroll_wheel_like_max_duration_ms = 200 + +# Invert scroll direction for mouse wheel/trackpad. TUI2 only. +scroll_invert = false ``` > [!NOTE] @@ -907,6 +1041,23 @@ animations = false > [!NOTE] > `tui.notifications` is built‑in and limited to the TUI session. For programmatic or cross‑environment notifications—or to integrate with OS‑specific notifiers—use the top‑level `notify` option to run an external program that receives event JSON. The two settings are independent and can be used together. +Scroll settings (`tui.scroll_events_per_tick`, `tui.scroll_wheel_lines`, `tui.scroll_trackpad_lines`, `tui.scroll_trackpad_accel_*`, `tui.scroll_mode`, `tui.scroll_wheel_*`, `tui.scroll_invert`) currently apply to the TUI2 viewport scroll implementation. + +> [!NOTE] > `tui.scroll_events_per_tick` has terminal-specific defaults derived from mouse scroll probe logs +> collected on macOS for a small set of terminals: +> +> - Terminal.app: 3 +> - Warp: 9 +> - WezTerm: 1 +> - Alacritty: 3 +> - Ghostty: 3 (stopgap; one probe measured ~9) +> - iTerm2: 1 +> - VS Code terminal: 1 +> - Kitty: 3 +> +> We should augment these defaults with data from more terminals and other platforms over time. +> Unknown terminals fall back to 3 and can be overridden via `tui.scroll_events_per_tick`. + ## Authentication and authorization ### Forcing a login method @@ -958,6 +1109,7 @@ Valid values: | `notify` | array | External program for notifications. | | `tui.animations` | boolean | Enable terminal animations (welcome screen, shimmer, spinner). Defaults to true; set to `false` to disable visual motion. | | `instructions` | string | Currently ignored; use `experimental_instructions_file` or `AGENTS.md`. | +| `developer_instructions` | string | The additional developer instructions. | | `features.` | boolean | See [feature flags](#feature-flags) for details | | `ghost_snapshot.disable_warnings` | boolean | Disable every warnings around ghost snapshot (large files, directory, ...) | | `ghost_snapshot.ignore_large_untracked_files` | number | Exclude untracked files larger than this many bytes from ghost snapshots (default: 10 MiB). Set to `0` to disable. | @@ -990,16 +1142,30 @@ Valid values: | `file_opener` | `vscode` \| `vscode-insiders` \| `windsurf` \| `cursor` \| `none` | URI scheme for clickable citations (default: `vscode`). | | `tui` | table | TUI‑specific options. | | `tui.notifications` | boolean \| array | Enable desktop notifications in the tui (default: true). | +| `tui.scroll_events_per_tick` | number | Raw events per wheel notch (normalization input; default: terminal-specific; fallback: 3). | +| `tui.scroll_wheel_lines` | number | Lines per physical wheel notch in wheel-like mode (default: 3). | +| `tui.scroll_trackpad_lines` | number | Baseline trackpad sensitivity in trackpad-like mode (default: 1). | +| `tui.scroll_trackpad_accel_events` | number | Trackpad acceleration: events per +1x speed in TUI2 (default: 30). | +| `tui.scroll_trackpad_accel_max` | number | Trackpad acceleration: max multiplier in TUI2 (default: 3). | +| `tui.scroll_mode` | `auto` \| `wheel` \| `trackpad` | How to interpret scroll input in TUI2 (default: `auto`). | +| `tui.scroll_wheel_tick_detect_max_ms` | number | Auto-mode threshold (ms) for promoting a stream to wheel-like behavior (default: 12). | +| `tui.scroll_wheel_like_max_duration_ms` | number | Auto-mode fallback duration (ms) used for 1-event-per-tick terminals (default: 200). | +| `tui.scroll_invert` | boolean | Invert mouse scroll direction in TUI2 (default: false). | | `hide_agent_reasoning` | boolean | Hide model reasoning events. | | `check_for_update_on_startup` | boolean | Check for Codex updates on startup (default: true). Set to `false` only if updates are centrally managed. | | `show_raw_agent_reasoning` | boolean | Show raw reasoning (when available). | | `model_reasoning_effort` | `minimal` \| `low` \| `medium` \| `high`\|`xhigh` | Responses API reasoning effort. | | `plan_model` | string | Optional model for planning flows (defaults to `model`). | | `plan_model_reasoning_effort` | `minimal` \| `low` \| `medium` \| `high`\|`xhigh` | Optional reasoning effort for planning flows (defaults to `model_reasoning_effort`). | +| `explore_model` | string | Deprecated alias for `mini_subagent_model`. | +| `explore_model_reasoning_effort` | `minimal` \| `low` \| `medium` \| `high`\|`xhigh` | Deprecated alias for `mini_subagent_model_reasoning_effort`. | +| `mini_subagent_model` | string | Optional model for mini subagents (used for `/plan` exploration and `spawn_mini_subagent`; default: `gpt-5.1-codex-mini`). | +| `mini_subagent_model_reasoning_effort` | `minimal` \| `low` \| `medium` \| `high`\|`xhigh` | Optional reasoning effort for mini subagents (default: `medium`). | +| `subagent_model` | string | Optional model for spawned subagents (defaults to the active model for that turn). | +| `subagent_model_reasoning_effort` | `minimal` \| `low` \| `medium` \| `high`\|`xhigh` | Optional reasoning effort for spawned subagents (defaults to the active effort for that turn). | | `model_reasoning_summary` | `auto` \| `concise` \| `detailed` \| `none` | Reasoning summaries. | | `model_verbosity` | `low` \| `medium` \| `high` | GPT‑5 text verbosity (Responses API). | | `model_supports_reasoning_summaries` | boolean | Force‑enable reasoning summaries. | -| `model_reasoning_summary_format` | `none` \| `experimental` | Force reasoning summary format. | | `chatgpt_base_url` | string | Base URL for ChatGPT auth flow. | | `experimental_instructions_file` | string (path) | Replace built‑in instructions (experimental). | | `experimental_use_exec_command_tool` | boolean | Use experimental exec command tool. | diff --git a/docs/example-config.md b/docs/example-config.md index 41337aeb57d..efaf2ee2d25 100644 --- a/docs/example-config.md +++ b/docs/example-config.md @@ -24,6 +24,15 @@ model = "gpt-5.1-codex-max" # Optional model used for `/plan` (defaults to `model` when unset). # plan_model = "gpt-5.1-codex" +# Optional model used for mini subagents (used for `/plan` exploration and `spawn_mini_subagent`). +# mini_subagent_model = "gpt-5.1-codex-mini" +# +# Deprecated alias (still supported for compatibility): +# explore_model = "gpt-5.1-mini" + +# Optional model used for spawned subagents (defaults to the active model for that turn). +# subagent_model = "gpt-5.1-mini" + # Model used by the /review feature (code reviews). Default: "gpt-5.1-codex-max". review_model = "gpt-5.1-codex-max" @@ -46,6 +55,15 @@ model_reasoning_effort = "medium" # Optional reasoning effort for `/plan` (defaults to `model_reasoning_effort` when unset). # plan_model_reasoning_effort = "medium" +# Optional reasoning effort for mini subagents (used for `/plan` exploration and `spawn_mini_subagent`). +# mini_subagent_model_reasoning_effort = "medium" +# +# Deprecated alias (still supported for compatibility): +# explore_model_reasoning_effort = "medium" + +# Optional reasoning effort for spawned subagents (defaults to the active effort for that turn). +# subagent_model_reasoning_effort = "medium" + # Reasoning summary: auto | concise | detailed | none (default: auto) model_reasoning_summary = "auto" @@ -55,17 +73,14 @@ model_verbosity = "medium" # Force-enable reasoning summaries for current model (default: false) model_supports_reasoning_summaries = false -# Force reasoning summary format: none | experimental (default: none) -model_reasoning_summary_format = "none" - ################################################################################ # Instruction Overrides ################################################################################ -# Additional user instructions appended after AGENTS.md. Default: unset. +# Additional user instructions inject before AGENTS.md. Default: unset. # developer_instructions = "" -# Optional legacy base instructions override (prefer AGENTS.md). Default: unset. +# (Ignored) Optional legacy base instructions override (prefer AGENTS.md). Default: unset. # instructions = "" # Inline override for the history compaction prompt. Default: unset. @@ -220,13 +235,12 @@ view_image = true [features] # Leave this table empty to accept defaults. Set explicit booleans to opt in/out. unified_exec = false -rmcp_client = false apply_patch_freeform = false view_image_tool = true web_search_request = false -ghost_commit = false enable_experimental_windows_sandbox = false skills = false +mini_subagents = true ################################################################################ # Experimental toggles (legacy; prefer [features]) diff --git a/docs/exec.md b/docs/exec.md index 35a457c78e9..891604f505b 100644 --- a/docs/exec.md +++ b/docs/exec.md @@ -14,6 +14,10 @@ Use `codexel exec --full-auto` to allow file edits. Use `codexel exec --sandbox `codexel exec` is non-interactive. If the agent attempts to ask an interactive multiple-choice question (AskUserQuestion), the request is automatically cancelled. Plan approval prompts are automatically rejected. +### Subagents are supported + +`codexel exec` can still use `spawn_subagent` for parallel, read-only repo research. + ### Default output mode By default, Codexel streams its activity to stderr and only writes the final message from the agent to stdout. This makes it easier to pipe `codexel exec` into another tool without extra filtering. diff --git a/docs/skills.md b/docs/skills.md index 0be1904e3a3..244d6ca70c9 100644 --- a/docs/skills.md +++ b/docs/skills.md @@ -1,4 +1,4 @@ -# Skills (experimental) +# Skills > **Warning:** This is an experimental and non-stable feature. If you depend on it, please expect breaking changes over the coming weeks and understand that there is currently no guarantee that this works well. Use at your own risk! @@ -26,8 +26,8 @@ Skills are behind the experimental `skills` feature flag and are disabled by def - YAML frontmatter + body. - Required: - - `name` (non-empty, ≤100 chars, sanitized to one line) - - `description` (non-empty, ≤500 chars, sanitized to one line) + - `name` (non-empty, <=100 chars, sanitized to one line) + - `description` (non-empty, <=500 chars, sanitized to one line) - Extra keys are ignored. The body can contain any Markdown; it is not injected into context. ## Loading and rendering @@ -78,3 +78,5 @@ description: Extract text and tables from PDFs; use when PDFs, forms, or documen - For form filling, see FORMS.md. SKILL_EXAMPLE ``` + +For upstream documentation about skills, refer to [this documentation](https://developers.openai.com/codex/skills). diff --git a/docs/slash_commands.md b/docs/slash_commands.md index d8ccc480326..7a003682fb4 100644 --- a/docs/slash_commands.md +++ b/docs/slash_commands.md @@ -10,28 +10,30 @@ Slash commands are special commands you can type that start with `/`. Control Codex’s behavior during an interactive session with slash commands. -| Command | Purpose | -| --------------- | -------------------------------------------------------------------------- | -| `/model` | choose what model and reasoning effort to use | -| `/plan-model` | choose what model and reasoning effort to use for `/plan` | -| `/approvals` | choose what Codex can do without approval | -| `/review` | review my current changes and find issues | -| `/plan` | create and approve a plan before making changes | -| `/new` | start a new chat during a conversation | -| `/resume` | resume an old chat | -| `/init` | create an AGENTS.md file with instructions for Codex | -| `/compact` | summarize conversation to prevent hitting the context limit | -| `/undo` | ask Codex to undo a turn | -| `/diff` | show git diff (including untracked files) | -| `/mention` | mention a file | -| `/status` | show current session configuration and token usage | -| `/mcp` | list configured MCP tools | -| `/experimental` | open the experimental menu to enable features from our beta program | -| `/skills` | browse and insert skills (experimental; see [docs/skills.md](./skills.md)) | -| `/logout` | log out of Codex | -| `/quit` | exit Codex | -| `/exit` | exit Codex | -| `/feedback` | send logs to maintainers | +| Command | Purpose | +| ----------------- | -------------------------------------------------------------------------- | +| `/model` | choose what model and reasoning effort to use | +| `/plan-model` | choose what model and reasoning effort to use for `/plan` | +| `/subagent-model` | choose what model and reasoning effort to use for spawned subagents | +| `/approvals` | choose what Codex can do without approval | +| `/review` | review my current changes and find issues | +| `/plan` | create and approve a plan before making changes | +| `/new` | start a new chat during a conversation | +| `/resume` | resume an old chat | +| `/init` | create an AGENTS.md file with instructions for Codex | +| `/compact` | summarize conversation to prevent hitting the context limit | +| `/diff` | show git diff (including untracked files) | +| `/mention` | mention a file | +| `/status` | show current session configuration and token usage | +| `/lsp` | show current LSP status (detected/configured/running) | +| `/diagnostics` | show LSP diagnostics for the current workspace | +| `/mcp` | list configured MCP tools | +| `/experimental` | open the experimental menu to enable features from our beta program | +| `/skills` | browse and insert skills (experimental; see [docs/skills.md](./skills.md)) | +| `/logout` | log out of Codex | +| `/quit` | exit Codex | +| `/exit` | exit Codex | +| `/feedback` | send logs to maintainers | --- diff --git a/scripts/gen-changelog.ps1 b/scripts/gen-changelog.ps1 index 6e080417918..51dfa16cfe7 100644 --- a/scripts/gen-changelog.ps1 +++ b/scripts/gen-changelog.ps1 @@ -41,6 +41,24 @@ function Get-GroupForSubject([string]$Subject) { return "Other" } +function Escape-MarkdownInlineText([string]$Line) { + # Make identifier-like tokens stable under Markdown + Prettier by rendering them as inline code. + # This avoids Prettier rewriting things like "mini_subagent_*" into emphasis. + # Skip segments that are already inside inline code spans. + $parts = $Line.Split('`') + for ($i = 0; $i -lt $parts.Length; $i += 2) { + $parts[$i] = [regex]::Replace($parts[$i], '\b[A-Za-z0-9]+(?:_[A-Za-z0-9]+)+_\*', { + param($m) '`' + $m.Value + '`' + }) + + $parts[$i] = [regex]::Replace($parts[$i], '\b[A-Za-z0-9]+(?:_[A-Za-z0-9]+)+\b', { + param($m) '`' + $m.Value + '`' + }) + } + + return ($parts -join '`') +} + function Render-Details([string]$Range) { $revArgs = @("rev-list", "--reverse", $Range) if ($hasUpstream) { @@ -81,6 +99,9 @@ function Render-Details([string]$Range) { $group = Get-GroupForSubject $subject $lines = $body -split "`n", -1 + for ($i = 0; $i -lt $lines.Count; $i++) { + $lines[$i] = Escape-MarkdownInlineText $lines[$i] + } $lines[0] = "- " + $lines[0] $entry = ($lines -join $newline).TrimEnd() $groups[$group] += $entry @@ -92,6 +113,7 @@ function Render-Details([string]$Range) { continue } $out += "#### $($kvp.Key)" + $out += "" $out += $kvp.Value $out += "" } @@ -112,7 +134,7 @@ $updated = [regex]::Replace($text, $pattern, { if ([string]::IsNullOrWhiteSpace($details)) { $details = "_No fork-only changes yet._" } - return "$newline$details$newline" + return "$($newline)$($newline)$details$($newline)" }, [System.Text.RegularExpressions.RegexOptions]::Singleline) if ($updated -eq $text) { diff --git a/scripts/gen-changelog.sh b/scripts/gen-changelog.sh index 7d6e556b4c0..d6d7d549686 100644 --- a/scripts/gen-changelog.sh +++ b/scripts/gen-changelog.sh @@ -65,6 +65,15 @@ def group_for_subject(subject): return "Chores" return "Other" +def escape_markdown_inline_text(line: str) -> str: + # Make identifier-like tokens stable under Markdown + Prettier by rendering them as inline code. + # This avoids Prettier rewriting things like "mini_subagent_*" into emphasis. + parts = line.split("`") + for i in range(0, len(parts), 2): + parts[i] = re.sub(r"\b[A-Za-z0-9]+(?:_[A-Za-z0-9]+)+_\*", r"`\g<0>`", parts[i]) + parts[i] = re.sub(r"\b[A-Za-z0-9]+(?:_[A-Za-z0-9]+)+\b", r"`\g<0>`", parts[i]) + return "`".join(parts) + def git_lines(args): result = subprocess.run(args, capture_output=True, text=True) if result.returncode != 0: @@ -108,7 +117,7 @@ def render_details(range_): body = commit_body(sha) if not body.strip(): continue - lines = body.split("\n") + lines = [escape_markdown_inline_text(l) for l in body.split("\n")] subject = lines[0].strip() if not subject: continue @@ -122,6 +131,7 @@ def render_details(range_): if not commits: continue out.append(f"#### {group}") + out.append("") out.extend(commits) out.append("") @@ -133,7 +143,7 @@ def render(match): if not details: details = "_No fork-only changes yet._" details = details.replace("\n", newline) - return f"{newline}{details}{newline}" + return f"{newline}{newline}{details}{newline}" updated = pattern.sub(render, text) if updated == text: