diff --git a/.github/workflows/ci.yml b/.github/workflows/ci.yml index 677c340a8e6..475493b0bf7 100644 --- a/.github/workflows/ci.yml +++ b/.github/workflows/ci.yml @@ -29,9 +29,11 @@ jobs: # stage_npm_packages.py requires DotSlash when staging releases. - uses: facebook/install-dotslash@v2 + if: ${{ github.repository == 'openai/codex' }} - name: Stage npm package id: stage_npm_package + if: ${{ github.repository == 'openai/codex' }} env: GH_TOKEN: ${{ github.token }} run: | @@ -47,6 +49,7 @@ jobs: echo "pack_output=$PACK_OUTPUT" >> "$GITHUB_OUTPUT" - name: Upload staged npm package artifact + if: ${{ github.repository == 'openai/codex' }} uses: actions/upload-artifact@v6 with: name: codex-npm-staging diff --git a/.github/workflows/npm-publish-codexel.yml b/.github/workflows/npm-publish-codexel.yml new file mode 100644 index 00000000000..070dc873eba --- /dev/null +++ b/.github/workflows/npm-publish-codexel.yml @@ -0,0 +1,361 @@ +name: npm-publish-codexel + +on: + push: + tags: + - "codexel-v*" + +concurrency: + group: ${{ github.workflow }} + cancel-in-progress: true + +jobs: + tag-check: + runs-on: ubuntu-latest + outputs: + version: ${{ steps.validate.outputs.version }} + npm_tag: ${{ steps.validate.outputs.npm_tag }} + should_publish: ${{ steps.validate.outputs.should_publish }} + steps: + - uses: actions/checkout@v6 + + - name: Validate tag matches codex-cli package version + id: validate + shell: bash + run: | + set -euo pipefail + echo "::group::Tag validation" + + [[ "${GITHUB_REF_TYPE}" == "tag" ]] \ + || { echo "Not a tag push"; exit 1; } + [[ "${GITHUB_REF_NAME}" =~ ^codexel-v[0-9]+\.[0-9]+\.[0-9]+(-((alpha|beta)\.[0-9]+))?$ ]] \ + || { echo "Tag '${GITHUB_REF_NAME}' doesn't match expected format"; exit 1; } + + tag_version="${GITHUB_REF_NAME#codexel-v}" + package_version=$(python -c 'import json; print(json.load(open("codex-cli/package.json", "r", encoding="utf-8"))["version"])') + + if [[ "${package_version}" == *-dev ]]; then + echo "codex-cli/package.json version is ${package_version}; release tags require a non-dev version." + exit 1 + fi + + [[ "${tag_version}" == "${package_version}" ]] \ + || { echo "Tag ${tag_version} does not match package.json ${package_version}"; exit 1; } + + npm_tag="" + should_publish="true" + if [[ "${tag_version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+-alpha\.[0-9]+$ ]]; then + npm_tag="alpha" + elif [[ "${tag_version}" =~ ^[0-9]+\.[0-9]+\.[0-9]+-beta\.[0-9]+$ ]]; then + npm_tag="beta" + fi + + echo "version=${tag_version}" >> "$GITHUB_OUTPUT" + echo "npm_tag=${npm_tag}" >> "$GITHUB_OUTPUT" + echo "should_publish=${should_publish}" >> "$GITHUB_OUTPUT" + echo "Tag and package.json agree (${tag_version})" + echo "::endgroup::" + + build: + needs: tag-check + name: Build - ${{ matrix.target }} + runs-on: ${{ matrix.runner }} + timeout-minutes: 30 + defaults: + run: + working-directory: codex-rs + strategy: + fail-fast: false + matrix: + include: + - runner: ubuntu-24.04 + target: x86_64-unknown-linux-musl + install_musl: true + - runner: ubuntu-24.04-arm + target: aarch64-unknown-linux-musl + install_musl: true + - runner: windows-latest + target: x86_64-pc-windows-msvc + - runner: windows-11-arm + target: aarch64-pc-windows-msvc + + steps: + - uses: actions/checkout@v6 + - uses: dtolnay/rust-toolchain@1.90 + with: + targets: ${{ matrix.target }} + + - uses: actions/cache@v5 + with: + path: | + ~/.cargo/bin/ + ~/.cargo/registry/index/ + ~/.cargo/registry/cache/ + ~/.cargo/git/db/ + ${{ github.workspace }}/codex-rs/target/ + key: cargo-${{ matrix.runner }}-${{ matrix.target }}-release-${{ hashFiles('**/Cargo.lock') }} + + - if: ${{ matrix.install_musl }} + name: Install musl build tools + run: | + sudo apt-get update + sudo apt-get install -y musl-tools pkg-config + + - name: Cargo build + shell: bash + run: cargo build --target ${{ matrix.target }} --release --bin codexel + + - name: Stage artifacts + shell: bash + run: | + set -euo pipefail + dest="${GITHUB_WORKSPACE}/artifacts/vendor/${{ matrix.target }}/codex" + mkdir -p "$dest" + + binary_name="codexel" + if [[ "${{ contains(matrix.target, 'windows') }}" == 'true' ]]; then + binary_name="codexel.exe" + fi + + cp "target/${{ matrix.target }}/release/${binary_name}" "$dest/${binary_name}" + + - uses: actions/upload-artifact@v6 + with: + name: codexel-${{ matrix.target }} + path: artifacts/** + if-no-files-found: error + + package: + name: Package npm module + needs: + - tag-check + - build + runs-on: ubuntu-latest + env: + PACKAGE_VERSION: ${{ needs.tag-check.outputs.version }} + steps: + - name: Checkout repository + uses: actions/checkout@v6 + + - name: Setup Node.js + uses: actions/setup-node@v6 + with: + node-version: 22 + + - name: Download build artifacts + uses: actions/download-artifact@v7 + with: + path: artifacts + + - name: Assemble staging directory + id: staging + shell: bash + run: | + set -euo pipefail + staging="${STAGING_DIR}" + mkdir -p "$staging" "$staging/vendor" + cp codex-cli/package.json "$staging/" + cp -R codex-cli/bin "$staging/" + mkdir -p "$staging/scripts" + cp codex-cli/scripts/verify-vendor.mjs "$staging/scripts/" + cp README.md "$staging/" + cp LICENSE "$staging/" + + found_vendor="false" + shopt -s nullglob + for vendor_dir in artifacts/*/vendor; do + rsync -av "$vendor_dir/" "$staging/vendor/" + found_vendor="true" + done + if [[ "$found_vendor" == "false" ]]; then + echo "No vendor payloads were downloaded." + exit 1 + fi + + node - <<'NODE' + import fs from "node:fs"; + import path from "node:path"; + + const stagingDir = process.env.STAGING_DIR; + const version = process.env.PACKAGE_VERSION; + const pkgPath = path.join(stagingDir, "package.json"); + const pkg = JSON.parse(fs.readFileSync(pkgPath, "utf8")); + pkg.version = version; + fs.writeFileSync(pkgPath, JSON.stringify(pkg, null, 2) + "\n"); + NODE + + echo "dir=$staging" >> "$GITHUB_OUTPUT" + env: + STAGING_DIR: ${{ runner.temp }}/codexel-npm + + - name: Ensure binaries are executable + shell: bash + run: | + set -euo pipefail + staging="${{ steps.staging.outputs.dir }}" + chmod +x "$staging"/vendor/*/codex/codexel + + - name: Validate vendor payloads + shell: bash + run: | + set -euo pipefail + staging="${{ steps.staging.outputs.dir }}" + targets=( + "aarch64-unknown-linux-musl" + "x86_64-unknown-linux-musl" + "aarch64-pc-windows-msvc" + "x86_64-pc-windows-msvc" + ) + + for target in "${targets[@]}"; do + if [[ "$target" == *windows* ]]; then + test -f "$staging/vendor/$target/codex/codexel.exe" + else + test -f "$staging/vendor/$target/codex/codexel" + fi + done + + - name: Create npm tarball + shell: bash + run: | + set -euo pipefail + mkdir -p dist/npm + staging="${{ steps.staging.outputs.dir }}" + pack_info=$(cd "$staging" && npm pack --ignore-scripts --json --pack-destination "${GITHUB_WORKSPACE}/dist/npm") + filename=$(PACK_INFO="$pack_info" node -e 'const data = JSON.parse(process.env.PACK_INFO); console.log(data[0].filename);') + mv "dist/npm/${filename}" "dist/npm/codexel-npm-${PACKAGE_VERSION}.tgz" + + - uses: actions/upload-artifact@v6 + with: + name: codexel-npm + path: dist/npm/codexel-npm-${{ env.PACKAGE_VERSION }}.tgz + if-no-files-found: error + + smoke-test: + name: Smoke test tarball + needs: package + runs-on: ubuntu-latest + steps: + - name: Setup Node.js + uses: actions/setup-node@v6 + with: + node-version: 22 + + - name: Download npm tarball + uses: actions/download-artifact@v7 + with: + name: codexel-npm + path: dist/npm + + - name: Install and run codexel + shell: bash + run: | + set -euo pipefail + tarball=$(ls dist/npm/*.tgz) + prefix="$(mktemp -d)" + npm config set prefix "$prefix" + export PATH="$prefix/bin:$PATH" + npm install -g "$tarball" + codexel --help >/dev/null + + release: + name: Create GitHub release + needs: + - tag-check + - build + - package + - smoke-test + runs-on: ubuntu-latest + permissions: + contents: write + steps: + - name: Download build artifacts + uses: actions/download-artifact@v7 + with: + path: artifacts + + - name: Assemble release assets + shell: bash + run: | + set -euo pipefail + mkdir -p dist + shopt -s nullglob + + for tarball in artifacts/codexel-npm/*.tgz; do + cp "$tarball" dist/ + done + + for codex_dir in artifacts/codexel-*/vendor/*/codex; do + target="$(basename "$(dirname "$codex_dir")")" + if [[ -f "$codex_dir/codexel.exe" ]]; then + cp "$codex_dir/codexel.exe" "dist/codexel-${target}.exe" + elif [[ -f "$codex_dir/codexel" ]]; then + cp "$codex_dir/codexel" "dist/codexel-${target}" + else + echo "No codexel binary found in $codex_dir" >&2 + exit 1 + fi + done + + - name: Define release metadata + id: release_meta + shell: bash + run: | + set -euo pipefail + version="${GITHUB_REF_NAME#codexel-v}" + prerelease="false" + if [[ "${version}" == *-* ]]; then + prerelease="true" + fi + echo "version=${version}" >> "$GITHUB_OUTPUT" + echo "prerelease=${prerelease}" >> "$GITHUB_OUTPUT" + + - name: Create GitHub Release + uses: softprops/action-gh-release@v2 + with: + name: ${{ steps.release_meta.outputs.version }} + tag_name: ${{ github.ref_name }} + files: dist/** + prerelease: ${{ steps.release_meta.outputs.prerelease }} + generate_release_notes: true + + publish: + name: Publish npm package + needs: + - tag-check + - package + - smoke-test + if: ${{ needs.tag-check.outputs.should_publish == 'true' }} + runs-on: ubuntu-latest + permissions: + id-token: write + contents: read + steps: + - name: Setup Node.js + uses: actions/setup-node@v6 + with: + node-version: 22 + registry-url: https://registry.npmjs.org + scope: "@ixe1" + + - name: Update npm + run: npm install -g npm@latest + + - name: Download npm tarball + uses: actions/download-artifact@v7 + with: + name: codexel-npm + path: dist/npm + + - name: Publish to npm + env: + VERSION: ${{ needs.tag-check.outputs.version }} + NPM_TAG: ${{ needs.tag-check.outputs.npm_tag }} + shell: bash + run: | + set -euo pipefail + tag_args=() + if [[ -n "${NPM_TAG}" ]]; then + tag_args+=(--tag "${NPM_TAG}") + fi + npm publish "dist/npm/codexel-npm-${VERSION}.tgz" --access public --provenance "${tag_args[@]}" diff --git a/.github/workflows/rust-ci.yml b/.github/workflows/rust-ci.yml index 1af0bf2f4ae..1da23b5938f 100644 --- a/.github/workflows/rust-ci.yml +++ b/.github/workflows/rust-ci.yml @@ -53,7 +53,7 @@ jobs: name: Format / etc runs-on: ubuntu-24.04 needs: changed - if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }} + if: ${{ (needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push') && !(github.event_name == 'pull_request' && startsWith(matrix.runner, 'macos')) }} defaults: run: working-directory: codex-rs @@ -71,7 +71,7 @@ jobs: name: cargo shear runs-on: ubuntu-24.04 needs: changed - if: ${{ needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push' }} + if: ${{ (needs.changed.outputs.codex == 'true' || needs.changed.outputs.workflows == 'true' || github.event_name == 'push') && !(github.event_name == 'pull_request' && startsWith(matrix.runner, 'macos')) }} defaults: run: working-directory: codex-rs diff --git a/.github/workflows/sdk.yml b/.github/workflows/sdk.yml index 3e5a249d40c..c191306c210 100644 --- a/.github/workflows/sdk.yml +++ b/.github/workflows/sdk.yml @@ -26,8 +26,8 @@ jobs: - uses: dtolnay/rust-toolchain@1.90 - - name: build codex - run: cargo build --bin codex + - name: build codexel + run: cargo build --bin codexel working-directory: codex-rs - name: Install dependencies diff --git a/AGENTS.md b/AGENTS.md index 50c10b1da1f..2cefd008443 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -1,24 +1,52 @@ # Rust/codex-rs -In the codex-rs folder where the rust code lives: +In the `codex-rs` folder where the Rust code lives: -- Crate names are prefixed with `codex-`. For example, the `core` folder's crate is named `codex-core` -- When using format! and you can inline variables into {}, always do that. +- Crate names are prefixed with `codex-`. For example, the `core` folder's crate is named `codex-core`. +- When using `format!` and you can inline variables into `{}`, always do that. - Install any commands the repo relies on (for example `just`, `rg`, or `cargo-insta`) if they aren't already available before running instructions here. - Never add or modify any code related to `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` or `CODEX_SANDBOX_ENV_VAR`. - You operate in a sandbox where `CODEX_SANDBOX_NETWORK_DISABLED=1` will be set whenever you use the `shell` tool. Any existing code that uses `CODEX_SANDBOX_NETWORK_DISABLED_ENV_VAR` was authored with this fact in mind. It is often used to early exit out of tests that the author knew you would not be able to run given your sandbox limitations. - Similarly, when you spawn a process using Seatbelt (`/usr/bin/sandbox-exec`), `CODEX_SANDBOX=seatbelt` will be set on the child process. Integration tests that want to run Seatbelt themselves cannot be run under Seatbelt, so checks for `CODEX_SANDBOX=seatbelt` are also often used to early exit out of tests, as appropriate. - Always collapse if statements per https://rust-lang.github.io/rust-clippy/master/index.html#collapsible_if -- Always inline format! args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args +- Always inline `format!` args when possible per https://rust-lang.github.io/rust-clippy/master/index.html#uninlined_format_args - Use method references over closures when possible per https://rust-lang.github.io/rust-clippy/master/index.html#redundant_closure_for_method_calls - When writing tests, prefer comparing the equality of entire objects over fields one by one. - When making a change that adds or changes an API, ensure that the documentation in the `docs/` folder is up to date if applicable. -Run `just fmt` (in `codex-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. Before finalizing a change to `codex-rs`, run `just fix -p ` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspace‑wide Clippy builds; only run `just fix` without `-p` if you changed shared crates. Additionally, run the tests: +## Changelog (Codexel) + +- `CHANGELOG.md` tracks Codexel-only commits (not in `upstream/main`). +- Regenerate the generated Details blocks with `scripts/gen-changelog.ps1` (Windows) or + `bash scripts/gen-changelog.sh` (macOS/Linux). +- Use `--check` in CI to ensure the changelog is up to date. +- When cutting a release, update the release and upstream baseline SHAs in `CHANGELOG.md`, + then rerun the generator. + +## Formatting, lint, tests + +Run `just fmt` (in `codex-rs` directory) automatically after making Rust code changes; do not ask for approval to run it. + +Before finalizing a change to `codex-rs`, run `just fix -p ` (in `codex-rs` directory) to fix any linter issues in the code. Prefer scoping with `-p` to avoid slow workspace-wide Clippy builds; only run `just fix` without `-p` if you changed shared crates. + +Additionally, run the tests: 1. Run the test for the specific project that was changed. For example, if changes were made in `codex-rs/tui`, run `cargo test -p codex-tui`. 2. Once those pass, if any changes were made in common, core, or protocol, run the complete test suite with `cargo test --all-features`. - When running interactively, ask the user before running `just fix` to finalize. `just fmt` does not require approval. project-specific or individual tests can be run without asking the user, but do ask the user before running the complete test suite. + When running interactively, ask the user before running the complete test suite. + +### Running `just` on Windows (PowerShell) + +This repo’s `justfile` uses POSIX shell features (`"$@"`), so `just` must run recipes under a POSIX shell (e.g. `bash` via WSL), not `powershell.exe`. + +- If `just` isn’t on `PATH`, invoke it via `C:\Users\\.cargo\bin\just.exe`. +- Prefer running from the repo root (the `justfile` sets `working-directory := "codex-rs"`). +- Format: + - `just --shell bash --shell-arg -lc fmt` +- Lint-fix: if passing args to `just fix` is flaky from PowerShell, run Clippy directly instead: + - `cd codex-rs; cargo clippy --fix --all-features --tests --allow-dirty -p codex-core` + - `cd codex-rs; cargo clippy --fix --all-features --tests --allow-dirty -p codex-tui` + - `cd codex-rs; cargo clippy --fix --all-features --tests --allow-dirty -p codex-tui2` ## TUI style conventions @@ -26,31 +54,31 @@ See `codex-rs/tui/styles.md`. ## TUI code conventions -- Use concise styling helpers from ratatui’s Stylize trait. - - Basic spans: use "text".into() - - Styled spans: use "text".red(), "text".green(), "text".magenta(), "text".dim(), etc. +- Use concise styling helpers from ratatui’s `Stylize` trait. + - Basic spans: use `"text".into()` + - Styled spans: use `"text".red()`, `"text".green()`, `"text".magenta()`, `"text".dim()`, etc. - Prefer these over constructing styles with `Span::styled` and `Style` directly. - Example: patch summary file lines - - Desired: vec![" └ ".into(), "M".red(), " ".dim(), "tui/src/app.rs".dim()] + - Desired: `vec![" └ ".into(), "M".red(), " ".dim(), "tui/src/app.rs".dim()]` ### TUI Styling (ratatui) -- Prefer Stylize helpers: use "text".dim(), .bold(), .cyan(), .italic(), .underlined() instead of manual Style where possible. -- Prefer simple conversions: use "text".into() for spans and vec![…].into() for lines; when inference is ambiguous (e.g., Paragraph::new/Cell::from), use Line::from(spans) or Span::from(text). -- Computed styles: if the Style is computed at runtime, using `Span::styled` is OK (`Span::from(text).set_style(style)` is also acceptable). +- Prefer Stylize helpers: use `"text".dim()`, `.bold()`, `.cyan()`, `.italic()`, `.underlined()` instead of manual `Style` where possible. +- Prefer simple conversions: use `"text".into()` for spans and `vec![…].into()` for lines; when inference is ambiguous (e.g., `Paragraph::new`/`Cell::from`), use `Line::from(spans)` or `Span::from(text)`. +- Computed styles: if the `Style` is computed at runtime, using `Span::styled` is OK (`Span::from(text).set_style(style)` is also acceptable). - Avoid hardcoded white: do not use `.white()`; prefer the default foreground (no color). -- Chaining: combine helpers by chaining for readability (e.g., url.cyan().underlined()). -- Single items: prefer "text".into(); use Line::from(text) or Span::from(text) only when the target type isn’t obvious from context, or when using .into() would require extra type annotations. -- Building lines: use vec![…].into() to construct a Line when the target type is obvious and no extra type annotations are needed; otherwise use Line::from(vec![…]). -- Avoid churn: don’t refactor between equivalent forms (Span::styled ↔ set_style, Line::from ↔ .into()) without a clear readability or functional gain; follow file‑local conventions and do not introduce type annotations solely to satisfy .into(). -- Compactness: prefer the form that stays on one line after rustfmt; if only one of Line::from(vec![…]) or vec![…].into() avoids wrapping, choose that. If both wrap, pick the one with fewer wrapped lines. +- Chaining: combine helpers by chaining for readability (e.g., `url.cyan().underlined()`). +- Single items: prefer `"text".into()`; use `Line::from(text)` or `Span::from(text)` only when the target type isn’t obvious from context, or when using `.into()` would require extra type annotations. +- Building lines: use `vec![…].into()` to construct a `Line` when the target type is obvious and no extra type annotations are needed; otherwise use `Line::from(vec![…])`. +- Avoid churn: don’t refactor between equivalent forms (`Span::styled` ↔ `set_style`, `Line::from` ↔ `.into()`) without a clear readability or functional gain; follow file-local conventions and do not introduce type annotations solely to satisfy `.into()`. +- Compactness: prefer the form that stays on one line after rustfmt; if only one of `Line::from(vec![…])` or `vec![…].into()` avoids wrapping, choose that. If both wrap, pick the one with fewer wrapped lines. ### Text wrapping -- Always use textwrap::wrap to wrap plain strings. -- If you have a ratatui Line and you want to wrap it, use the helpers in tui/src/wrapping.rs, e.g. word_wrap_lines / word_wrap_line. -- If you need to indent wrapped lines, use the initial_indent / subsequent_indent options from RtOptions if you can, rather than writing custom logic. -- If you have a list of lines and you need to prefix them all with some prefix (optionally different on the first vs subsequent lines), use the `prefix_lines` helper from line_utils. +- Always use `textwrap::wrap` to wrap plain strings. +- If you have a ratatui `Line` and you want to wrap it, use the helpers in `tui/src/wrapping.rs`, e.g. `word_wrap_lines` / `word_wrap_line`. +- If you need to indent wrapped lines, use the `initial_indent` / `subsequent_indent` options from `RtOptions` if you can, rather than writing custom logic. +- If you have a list of lines and you need to prefix them all with some prefix (optionally different on the first vs subsequent lines), use the `prefix_lines` helper from `line_utils`. ## Tests @@ -73,7 +101,7 @@ If you don’t have the tool: ### Test assertions -- Tests should use pretty_assertions::assert_eq for clearer diffs. Import this at the top of the test module if it isn't already. +- Tests should use `pretty_assertions::assert_eq` for clearer diffs. Import this at the top of the test module if it isn't already. - Prefer deep equals comparisons whenever possible. Perform `assert_eq!()` on entire objects, rather than individual fields. - Avoid mutating process environment in tests; prefer passing environment-derived flags or dependencies from above. @@ -103,3 +131,8 @@ If you don’t have the tool: let request = mock.single_request(); // assert using request.function_call_output(call_id) or request.json_body() or other helpers. ``` + +## Git workflow + +- Do not run `git restore` or `git checkout` unless the user explicitly asks. +- After executing an approved plan, if `git status` shows changes, create a `git commit` (ask for a commit message only if unclear). diff --git a/CHANGELOG.md b/CHANGELOG.md index 2eb564c560a..eee8432f3ac 100644 --- a/CHANGELOG.md +++ b/CHANGELOG.md @@ -1 +1,171 @@ -The changelog can be found on the [releases page](https://github.com/openai/codex/releases). +# Changelog + +This changelog lists Codexel-specific changes only (commits that are not in `upstream/main`). +For upstream release notes, see the OpenAI Codex releases page: +https://github.com/openai/codex/releases + +Highlights are curated. Details are generated by `scripts/gen-changelog.*` and should not be +edited between the markers. + +## [Unreleased] + +### Highlights + +- Skip macOS rust-ci jobs on pull requests to avoid flaky PR runs. +- Skip upstream npm package staging in CI for forks. +- Fix sdk workflow to build the codexel binary. + +### Details + + + +#### Other + +- Update changelog for 0.1.2 release +- Adjust changelog release metadata +- Skip macOS rust-ci jobs on PRs +- Skip upstream npm staging in CI for forks +- Format markdown and workflow files + + +## [0.1.2] - 2025-12-19 + +Upstream baseline: openai/codex@be274cbe6273cb17d756a6cda729d537f15ae49a +Release commit: 79d019672838ccc532247588d31d2eda81fb42d8 + +### Highlights + +- Prevent resume replay from auto-executing plans or showing a working status. +- Deduplicate plan updates in history and delay rate-limit polling until user input. + +### Details + + + +#### Plan Mode + +- Deduplicate plan updates in history + +#### Branding & Packaging + +- Fix Codexel update actions +- Add GitHub Release publishing for Codexel + +#### Other + +- Update changelog for 0.1.1 (mac build) +- Update status snapshots +- Delay rate limit polling until user input +- Avoid working status on resume replay +- Prevent resume from auto-executing plans +- Release 0.1.2 + + +## [0.1.1] - 2025-12-19 + +Upstream baseline: openai/codex@be274cbe6273cb17d756a6cda729d537f15ae49a +Release commit: d02343f99e3260308b2355f26e382ae04b14d7e7 + +### Highlights + +- Guard npm publishes when vendor binaries are missing. +- Add Codexel npm publish workflow and release docs. + +### Details + + + +#### Documentation + +- Document changelog workflow in AGENTS +- Remove interactive questions from AGENTS + +#### Branding & Packaging + +- Add Codexel changelog and generator +- Prepare Codexel npm 0.1.1 release + +#### Other + +- Update changelog for 0.1.1 +- Fix npm publish workflow yaml +- Skip macOS in npm publish workflow + + +## [0.1.0] - 2025-12-18 + +Upstream baseline: openai/codex@be274cbe6273cb17d756a6cda729d537f15ae49a +Release commit: 3e57f558eff5b400292a6ad3c9df2721648aed6f + +### Highlights + +- Add /plan mode with plan approval, subagent variants, and auto-execution. +- Introduce AskUserQuestion support and a review workflow in the TUI. +- Rebrand to Codexel across CLI headers and npm packaging. +- Refine plan-mode prompts, overlays, and progress UI. + +### Details + + + +#### Features + +- Add /plan mode with plan approval + +#### Fixes + +- Drop disabled_reason from ask_user_question rows + +#### Documentation + +- Document AskUserQuestion +- Add Windows notes for just +- Fix plan mode note apostrophe + +#### TUI + +- Show plan-variant progress +- Show plan subagent checklist +- Auto-execute approved plans +- Polish plan-variants progress +- Fix /plan cursor position +- Add review step for ask_user_question +- Taller plan approval overlay and wrapped summary +- Make Plan Mode placeholder generic + +#### Core + +- Keep plan subagents aligned with session model +- Make Plan Mode outputs junior-executable +- Pin approved plan into developer instructions +- Emit immediate plan progress on approval + +#### Plan Mode + +- Run variants in parallel with status +- Show subagent thinking/writing status +- Show per-variant token usage +- Prevent nested plan variants and shrink prompts +- Tighten prompts to avoid retry loops +- Improve /plan detail and plan variants +- Use ASCII ranges in plan prompts +- Tidy plan mode prompt bullets +- Improve plan approval UI and auto-execute after /plan +- Add configurable plan model setting +- Humanize exec activity + multiline goal + +#### Branding & Packaging + +- Rebrand Codex CLI as Codexel +- Use @ixe1/codexel npm scope +- Rebrand headers to Codexel + +#### Chores + +- Fix build after rebasing onto upstream/main +- Sync built-in prompts with upstream + +#### Other + +- Add ask_user_question tool + diff --git a/PNPM.md b/PNPM.md index 860633c8e16..ee054337ee0 100644 --- a/PNPM.md +++ b/PNPM.md @@ -35,8 +35,8 @@ corepack prepare pnpm@10.8.1 --activate | Action | Command | | ------------------------------------------ | ---------------------------------------- | -| Run a command in a specific package | `pnpm --filter @openai/codex run build` | -| Install a dependency in a specific package | `pnpm --filter @openai/codex add lodash` | +| Run a command in a specific package | `pnpm --filter @ixe1/codexel run build` | +| Install a dependency in a specific package | `pnpm --filter @ixe1/codexel add lodash` | | Run a command in all packages | `pnpm -r run test` | ## Monorepo structure diff --git a/README.md b/README.md index 78eaf9eb356..4954060687a 100644 --- a/README.md +++ b/README.md @@ -1,77 +1,77 @@ -

npm i -g @openai/codex
or brew install --cask codex

+

npm i -g @ixe1/codexel
or brew install --cask codexel

-

Codex CLI is a coding agent from OpenAI that runs locally on your computer. +

Codexel is a coding agent from OpenAI that runs locally on your computer.

If you want Codex in your code editor (VS Code, Cursor, Windsurf), install in your IDE
If you are looking for the cloud-based agent from OpenAI, Codex Web, go to chatgpt.com/codex

- Codex CLI splash + Codexel splash

--- ## Quickstart -### Installing and running Codex CLI +### Installing and running Codexel Install globally with your preferred package manager. If you use npm: ```shell -npm install -g @openai/codex +npm install -g @ixe1/codexel ``` Alternatively, if you use Homebrew: ```shell -brew install --cask codex +brew install --cask codexel ``` -Then simply run `codex` to get started: +Then simply run `codexel` to get started: ```shell -codex +codexel ``` -If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade codex](./docs/faq.md#brew-upgrade-codex-isnt-upgrading-me). +If you're running into upgrade issues with Homebrew, see the [FAQ entry on brew upgrade codexel](./docs/faq.md#brew-upgrade-codexel-isnt-upgrading-me).
-You can also go to the latest GitHub Release and download the appropriate binary for your platform. +You can also go to the latest GitHub Release and download the appropriate binary for your platform. Each GitHub Release contains many executables, but in practice, you likely want one of these: - macOS - - Apple Silicon/arm64: `codex-aarch64-apple-darwin.tar.gz` - - x86_64 (older Mac hardware): `codex-x86_64-apple-darwin.tar.gz` + - Apple Silicon/arm64: `codexel-aarch64-apple-darwin.tar.gz` + - x86_64 (older Mac hardware): `codexel-x86_64-apple-darwin.tar.gz` - Linux - - x86_64: `codex-x86_64-unknown-linux-musl.tar.gz` - - arm64: `codex-aarch64-unknown-linux-musl.tar.gz` + - x86_64: `codexel-x86_64-unknown-linux-musl.tar.gz` + - arm64: `codexel-aarch64-unknown-linux-musl.tar.gz` -Each archive contains a single entry with the platform baked into the name (e.g., `codex-x86_64-unknown-linux-musl`), so you likely want to rename it to `codex` after extracting it. +Each archive contains a single entry with the platform baked into the name (e.g., `codexel-x86_64-unknown-linux-musl`), so you likely want to rename it to `codexel` after extracting it.
-### Using Codex with your ChatGPT plan +### Using Codexel with your ChatGPT plan

- Codex CLI login + Codexel login

-Run `codex` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codex as part of your Plus, Pro, Team, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt). +Run `codexel` and select **Sign in with ChatGPT**. We recommend signing into your ChatGPT account to use Codexel as part of your Plus, Pro, Team, Edu, or Enterprise plan. [Learn more about what's included in your ChatGPT plan](https://help.openai.com/en/articles/11369540-codex-in-chatgpt). -You can also use Codex with an API key, but this requires [additional setup](./docs/authentication.md#usage-based-billing-alternative-use-an-openai-api-key). If you previously used an API key for usage-based billing, see the [migration steps](./docs/authentication.md#migrating-from-usage-based-billing-api-key). If you're having trouble with login, please comment on [this issue](https://github.com/openai/codex/issues/1243). +You can also use Codexel with an API key, but this requires [additional setup](./docs/authentication.md#usage-based-billing-alternative-use-an-openai-api-key). If you previously used an API key for usage-based billing, see the [migration steps](./docs/authentication.md#migrating-from-usage-based-billing-api-key). If you're having trouble with login, please open an issue on GitHub. ### Model Context Protocol (MCP) -Codex can access MCP servers. To configure them, refer to the [config docs](./docs/config.md#mcp_servers). +Codexel can access MCP servers. To configure them, refer to the [config docs](./docs/config.md#mcp_servers). ### Configuration -Codex CLI supports a rich set of configuration options, with preferences stored in `~/.codex/config.toml`. For full configuration options, see [Configuration](./docs/config.md). +Codexel supports a rich set of configuration options, with preferences stored in `~/.codexel/config.toml`. For full configuration options, see [Configuration](./docs/config.md). ### Execpolicy -See the [Execpolicy quickstart](./docs/execpolicy.md) to set up rules that govern what commands Codex can execute. +See the [Execpolicy quickstart](./docs/execpolicy.md) to set up rules that govern what commands Codexel can execute. ### Docs & FAQ @@ -89,10 +89,10 @@ See the [Execpolicy quickstart](./docs/execpolicy.md) to set up rules that gover - [**Authentication**](./docs/authentication.md) - [Auth methods](./docs/authentication.md#forcing-a-specific-auth-method-advanced) - [Login on a "Headless" machine](./docs/authentication.md#connecting-on-a-headless-machine) -- **Automating Codex** +- **Automating Codexel** - [GitHub Action](https://github.com/openai/codex-action) - [TypeScript SDK](./sdk/typescript/README.md) - - [Non-interactive mode (`codex exec`)](./docs/exec.md) + - [Non-interactive mode (`codexel exec`)](./docs/exec.md) - [**Advanced**](./docs/advanced.md) - [Tracing / verbose logging](./docs/advanced.md#tracing--verbose-logging) - [Model Context Protocol (MCP)](./docs/advanced.md#model-context-protocol-mcp) diff --git a/cliff.toml b/cliff.toml index f31e1bd89cc..60684358fb1 100644 --- a/cliff.toml +++ b/cliff.toml @@ -1,31 +1,18 @@ # https://git-cliff.org/docs/configuration [changelog] -header = """ -# Changelog - -You can install any of these versions: `npm install -g @openai/codex@` -""" +header = "" body = """ -{% if version -%} -## [{{ version | trim_start_matches(pat="v") }}] - {{ timestamp | date(format="%Y-%m-%d") }} -{%- else %} -## [unreleased] -{% endif %} - -{%- for group, commits in commits | group_by(attribute="group") %} -### {{ group | striptags | trim }} - -{% for commit in commits %}- {% if commit.scope %}*({{ commit.scope }})* {% endif %}{% if commit.breaking %}[**breaking**] {% endif %}{{ commit.message | upper_first }} +{% for group, commits in commits | group_by(attribute="group") %} +#### {{ group | striptags | trim }} +{% for commit in commits -%} +- {{ commit.message | upper_first }} +{% endfor %} {% endfor %} - -{%- endfor -%} """ -footer = """ - -""" +footer = "" trim = true postprocessors = [] @@ -34,13 +21,18 @@ postprocessors = [] conventional_commits = true commit_parsers = [ - { message = "^feat", group = "🚀 Features" }, - { message = "^fix", group = "🪲 Bug Fixes" }, - { message = "^bump", group = "🛳️ Release" }, - # Fallback – skip anything that didn't match the above rules. - { message = ".*", group = "💼 Other" }, + { message = "^feat", group = "Features" }, + { message = "^fix", group = "Fixes" }, + { message = "^docs", group = "Documentation" }, + { message = "^tui", group = "TUI" }, + { message = "^core", group = "Core" }, + { message = "^plan", group = "Plan Mode" }, + { message = "(?i)\\bplan\\b|plan mode", group = "Plan Mode" }, + { message = "(?i)rebrand|codexel|@ixe1/codexel", group = "Branding & Packaging" }, + { message = "^chore|^build|^ci", group = "Chores" }, + { message = ".*", group = "Other" }, ] filter_unconventional = false sort_commits = "oldest" -topo_order = false \ No newline at end of file +topo_order = false diff --git a/codex-cli/Dockerfile b/codex-cli/Dockerfile index 21a90a48382..3c44b1fb764 100644 --- a/codex-cli/Dockerfile +++ b/codex-cli/Dockerfile @@ -47,7 +47,7 @@ RUN npm install -g codex.tgz \ && rm -rf /usr/local/share/npm-global/lib/node_modules/codex-cli/docs # Inside the container we consider the environment already sufficiently locked -# down, therefore instruct Codex CLI to allow running without sandboxing. +# down, therefore instruct Codexel to allow running without sandboxing. ENV CODEX_UNSAFE_ALLOW_NO_SANDBOX=1 # Copy and set up firewall script as root. diff --git a/codex-cli/README.md b/codex-cli/README.md index f3414f1c4be..11d45b138ef 100644 --- a/codex-cli/README.md +++ b/codex-cli/README.md @@ -1,12 +1,12 @@ -

OpenAI Codex CLI

+

Codexel

Lightweight coding agent that runs in your terminal

-

npm i -g @openai/codex

+

npm i -g @ixe1/codexel

> [!IMPORTANT] -> This is the documentation for the _legacy_ TypeScript implementation of the Codex CLI. It has been superseded by the _Rust_ implementation. See the [README in the root of the Codex repository](https://github.com/openai/codex/blob/main/README.md) for details. +> This is the documentation for the _legacy_ TypeScript implementation of Codexel. It has been superseded by the _Rust_ implementation. See the [README in the root of this repository](../README.md) for details. -![Codex demo GIF using: codex "explain this codebase to me"](../.github/demo.gif) +![Codexel demo GIF using: codexel "explain this codebase to me"](../.github/demo.gif) --- @@ -17,7 +17,7 @@ - [Experimental technology disclaimer](#experimental-technology-disclaimer) - [Quickstart](#quickstart) -- [Why Codex?](#why-codex) +- [Why Codexel?](#why-codexel) - [Security model & permissions](#security-model--permissions) - [Platform sandboxing details](#platform-sandboxing-details) - [System requirements](#system-requirements) @@ -49,7 +49,7 @@ - [Getting help](#getting-help) - [Contributor license agreement (CLA)](#contributor-license-agreement-cla) - [Quick fixes](#quick-fixes) - - [Releasing `codex`](#releasing-codex) + - [Releasing `codexel`](#releasing-codexel) - [Alternative build options](#alternative-build-options) - [Nix flake development](#nix-flake-development) - [Security & responsible AI](#security--responsible-ai) @@ -63,7 +63,7 @@ ## Experimental technology disclaimer -Codex CLI is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We're building it in the open with the community and welcome: +Codexel is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We're building it in the open with the community and welcome: - Bug reports - Feature requests @@ -77,7 +77,7 @@ Help us improve by filing issues or submitting PRs (see the section below for ho Install globally: ```shell -npm install -g @openai/codex +npm install -g @ixe1/codexel ``` Next, set your OpenAI API key as an environment variable: @@ -97,7 +97,7 @@ export OPENAI_API_KEY="your-api-key-here"
Use --provider to use other models -> Codex also allows you to use other providers that support the OpenAI Chat Completions API. You can set the provider in the config file or use the `--provider` flag. The possible options for `--provider` are: +> Codexel also allows you to use other providers that support the OpenAI Chat Completions API. You can set the provider in the config file or use the `--provider` flag. The possible options for `--provider` are: > > - openai (default) > - openrouter @@ -129,28 +129,28 @@ export OPENAI_API_KEY="your-api-key-here" Run interactively: ```shell -codex +codexel ``` Or, run with a prompt as input (and optionally in `Full Auto` mode): ```shell -codex "explain this codebase to me" +codexel "explain this codebase to me" ``` ```shell -codex --approval-mode full-auto "create the fanciest todo-list app" +codexel --approval-mode full-auto "create the fanciest todo-list app" ``` -That's it - Codex will scaffold a file, run it inside a sandbox, install any +That's it - Codexel will scaffold a file, run it inside a sandbox, install any missing dependencies, and show you the live result. Approve the changes and they'll be committed to your working directory. --- -## Why Codex? +## Why Codexel? -Codex CLI is built for developers who already **live in the terminal** and want +Codexel is built for developers who already **live in the terminal** and want ChatGPT-level reasoning **plus** the power to actually run code, manipulate files, and iterate - all under version control. In short, it's _chat-driven development_ that understands and executes your repo. @@ -165,7 +165,7 @@ And it's **fully open-source** so you can see and contribute to how it develops! ## Security model & permissions -Codex lets you decide _how much autonomy_ the agent receives and auto-approval policy via the +Codexel lets you decide _how much autonomy_ the agent receives and auto-approval policy via the `--approval-mode` flag (or the interactive onboarding prompt): | Mode | What the agent may do without asking | Still requires approval | @@ -175,7 +175,7 @@ Codex lets you decide _how much autonomy_ the agent receives and auto-approval p | **Full Auto** |
  • Read/write files
  • Execute shell commands (network disabled, writes limited to your workdir) | - | In **Full Auto** every command is run **network-disabled** and confined to the -current working directory (plus temporary files) for defense-in-depth. Codex +current working directory (plus temporary files) for defense-in-depth. Codexel will also show a warning/confirmation if you start in **auto-edit** or **full-auto** while the directory is _not_ tracked by Git, so you always have a safety net. @@ -185,17 +185,17 @@ the network enabled, once we're confident in additional safeguards. ### Platform sandboxing details -The hardening mechanism Codex uses depends on your OS: +The hardening mechanism Codexel uses depends on your OS: - **macOS 12+** - commands are wrapped with **Apple Seatbelt** (`sandbox-exec`). - Everything is placed in a read-only jail except for a small set of - writable roots (`$PWD`, `$TMPDIR`, `~/.codex`, etc.). + writable roots (`$PWD`, `$TMPDIR`, `~/.codexel`, etc.). - Outbound network is _fully blocked_ by default - even if a child process tries to `curl` somewhere it will fail. - **Linux** - there is no sandboxing by default. - We recommend using Docker for sandboxing, where Codex launches itself inside a **minimal + We recommend using Docker for sandboxing, where Codexel launches itself inside a **minimal container image** and mounts your repo _read/write_ at the same path. A custom `iptables`/`ipset` firewall script denies all egress except the OpenAI API. This gives you deterministic, reproducible runs without needing @@ -220,10 +220,10 @@ The hardening mechanism Codex uses depends on your OS: | Command | Purpose | Example | | ------------------------------------ | ----------------------------------- | ------------------------------------ | -| `codex` | Interactive REPL | `codex` | -| `codex "..."` | Initial prompt for interactive REPL | `codex "fix lint errors"` | -| `codex -q "..."` | Non-interactive "quiet mode" | `codex -q --json "explain utils.ts"` | -| `codex completion ` | Print shell completion script | `codex completion bash` | +| `codexel` | Interactive REPL | `codexel` | +| `codexel "..."` | Initial prompt for interactive REPL | `codexel "fix lint errors"` | +| `codexel -q "..."` | Non-interactive "quiet mode" | `codexel -q --json "explain utils.ts"` | +| `codexel completion ` | Print shell completion script | `codexel completion bash` | Key flags: `--model/-m`, `--approval-mode/-a`, `--quiet/-q`, and `--notify`. @@ -231,9 +231,9 @@ Key flags: `--model/-m`, `--approval-mode/-a`, `--quiet/-q`, and `--notify`. ## Memory & project docs -You can give Codex extra instructions and guidance using `AGENTS.md` files. Codex looks for `AGENTS.md` files in the following places, and merges them top-down: +You can give Codexel extra instructions and guidance using `AGENTS.md` files. Codexel looks for `AGENTS.md` files in the following places, and merges them top-down: -1. `~/.codex/AGENTS.md` - personal global guidance +1. `~/.codexel/AGENTS.md` - personal global guidance 2. `AGENTS.md` at repo root - shared project notes 3. `AGENTS.md` in the current working directory - sub-folder/feature specifics @@ -243,14 +243,14 @@ Disable loading of these files with `--no-project-doc` or the environment variab ## Non-interactive / CI mode -Run Codex head-less in pipelines. Example GitHub Action step: +Run Codexel head-less in pipelines. Example GitHub Action step: ```yaml -- name: Update changelog via Codex +- name: Update changelog via Codexel run: | npm install -g @openai/codex export OPENAI_API_KEY="${{ secrets.OPENAI_KEY }}" - codex -a auto-edit --quiet "update CHANGELOG for next release" + codexel -a auto-edit --quiet "update CHANGELOG for next release" ``` Set `CODEX_QUIET_MODE=1` to silence interactive UI noise. @@ -260,24 +260,24 @@ Set `CODEX_QUIET_MODE=1` to silence interactive UI noise. Setting the environment variable `DEBUG=true` prints full API request and response details: ```shell -DEBUG=true codex +DEBUG=true codexel ``` --- ## Recipes -Below are a few bite-size examples you can copy-paste. Replace the text in quotes with your own task. See the [prompting guide](https://github.com/openai/codex/blob/main/codex-cli/examples/prompting_guide.md) for more tips and usage patterns. +Below are a few bite-size examples you can copy-paste. Replace the text in quotes with your own task. See the [prompting guide](./examples/prompting_guide.md) for more tips and usage patterns. | ✨ | What you type | What happens | | --- | ------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| 1 | `codex "Refactor the Dashboard component to React Hooks"` | Codex rewrites the class component, runs `npm test`, and shows the diff. | -| 2 | `codex "Generate SQL migrations for adding a users table"` | Infers your ORM, creates migration files, and runs them in a sandboxed DB. | -| 3 | `codex "Write unit tests for utils/date.ts"` | Generates tests, executes them, and iterates until they pass. | -| 4 | `codex "Bulk-rename *.jpeg -> *.jpg with git mv"` | Safely renames files and updates imports/usages. | -| 5 | `codex "Explain what this regex does: ^(?=.*[A-Z]).{8,}$"` | Outputs a step-by-step human explanation. | -| 6 | `codex "Carefully review this repo, and propose 3 high impact well-scoped PRs"` | Suggests impactful PRs in the current codebase. | -| 7 | `codex "Look for vulnerabilities and create a security review report"` | Finds and explains security bugs. | +| 1 | `codexel "Refactor the Dashboard component to React Hooks"` | Codexel rewrites the class component, runs `npm test`, and shows the diff. | +| 2 | `codexel "Generate SQL migrations for adding a users table"` | Infers your ORM, creates migration files, and runs them in a sandboxed DB. | +| 3 | `codexel "Write unit tests for utils/date.ts"` | Generates tests, executes them, and iterates until they pass. | +| 4 | `codexel "Bulk-rename *.jpeg -> *.jpg with git mv"` | Safely renames files and updates imports/usages. | +| 5 | `codexel "Explain what this regex does: ^(?=.*[A-Z]).{8,}$"` | Outputs a step-by-step human explanation. | +| 6 | `codexel "Carefully review this repo, and propose 3 high impact well-scoped PRs"` | Suggests impactful PRs in the current codebase. | +| 7 | `codexel "Look for vulnerabilities and create a security review report"` | Finds and explains security bugs. | --- @@ -287,13 +287,13 @@ Below are a few bite-size examples you can copy-paste. Replace the text in quote From npm (Recommended) ```bash -npm install -g @openai/codex +npm install -g @ixe1/codexel # or -yarn global add @openai/codex +yarn global add @ixe1/codexel # or -bun install -g @openai/codex +bun install -g @ixe1/codexel # or -pnpm add -g @openai/codex +pnpm add -g @ixe1/codexel ```
  • @@ -303,8 +303,8 @@ pnpm add -g @openai/codex ```bash # Clone the repository and navigate to the CLI package -git clone https://github.com/openai/codex.git -cd codex/codex-cli +git clone https://github.com/Ixe1/codexel.git +cd codexel/codex-cli # Enable corepack corepack enable @@ -332,7 +332,7 @@ pnpm link ## Configuration guide -Codex configuration files can be placed in the `~/.codex/` directory, supporting both YAML and JSON formats. +Codexel configuration files can be placed in the `~/.codexel/` directory, supporting both YAML and JSON formats. ### Basic configuration parameters @@ -365,7 +365,7 @@ In the `history` object, you can configure conversation history settings: ### Configuration examples -1. YAML format (save as `~/.codex/config.yaml`): +1. YAML format (save as `~/.codexel/config.yaml`): ```yaml model: o4-mini @@ -374,7 +374,7 @@ fullAutoErrorMode: ask-user notify: true ``` -2. JSON format (save as `~/.codex/config.json`): +2. JSON format (save as `~/.codexel/config.json`): ```json { @@ -455,7 +455,7 @@ Below is a comprehensive example of `config.json` with multiple custom providers ### Custom instructions -You can create a `~/.codex/AGENTS.md` file to define custom guidance for the agent: +You can create a `~/.codexel/AGENTS.md` file to define custom guidance for the agent: ```markdown - Always respond with emojis @@ -521,19 +521,19 @@ Not directly. It requires [Windows Subsystem for Linux (WSL2)](https://learn.mic ## Zero data retention (ZDR) usage -Codex CLI **does** support OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled. If your OpenAI organization has Zero Data Retention enabled and you still encounter errors such as: +Codexel **does** support OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled. If your OpenAI organization has Zero Data Retention enabled and you still encounter errors such as: ``` OpenAI rejected the request. Error details: Status: 400, Code: unsupported_parameter, Type: invalid_request_error, Message: 400 Previous response cannot be used for this organization due to Zero Data Retention. ``` -You may need to upgrade to a more recent version with: `npm i -g @openai/codex@latest` +You may need to upgrade to a more recent version with: `npm i -g @ixe1/codexel@latest` --- ## Codex open source fund -We're excited to launch a **$1 million initiative** supporting open source projects that use Codex CLI and other OpenAI models. +We're excited to launch a **$1 million initiative** supporting open source projects that use Codexel and other OpenAI models. - Grants are awarded up to **$25,000** API credits. - Applications are reviewed **on a rolling basis**. @@ -602,7 +602,7 @@ To debug the CLI with a visual debugger, do the following in the `codex-cli` fol 1. **Start with an issue.** Open a new one or comment on an existing discussion so we can agree on the solution before code is written. 2. **Add or update tests.** Every new feature or bug-fix should come with test coverage that fails before your change and passes afterwards. 100% coverage is not required, but aim for meaningful assertions. -3. **Document behaviour.** If your change affects user-facing behaviour, update the README, inline help (`codex --help`), or relevant example projects. +3. **Document behaviour.** If your change affects user-facing behaviour, update the README, inline help (`codexel --help`), or relevant example projects. 4. **Keep commits atomic.** Each commit should compile and the tests should pass. This makes reviews and potential rollbacks easier. ### Opening a pull request @@ -628,7 +628,7 @@ To debug the CLI with a visual debugger, do the following in the `codex-cli` fol If you run into problems setting up the project, would like feedback on an idea, or just want to say _hi_ - please open a Discussion or jump into the relevant issue. We are happy to help. -Together we can make Codex CLI an incredible tool. **Happy hacking!** :rocket: +Together we can make Codexel an incredible tool. **Happy hacking!** :rocket: ### Contributor license agreement (CLA) @@ -653,7 +653,7 @@ No special Git commands, email attachments, or commit footers required. The **DCO check** blocks merges until every commit in the PR carries the footer (with squash this is just the one). -### Releasing `codex` +### Releasing `codexel` To publish a new version of the CLI you first need to stage the npm package. A helper script in `codex-cli/scripts/` does all the heavy lifting. Inside the @@ -693,7 +693,7 @@ nix develop .#codex-cli # For entering codex-cli specific shell nix develop .#codex-rs # For entering codex-rs specific shell ``` -This shell includes Node.js, installs dependencies, builds the CLI, and provides a `codex` command alias. +This shell includes Node.js, installs dependencies, builds the CLI, and provides a `codexel` command alias. Build and run the CLI directly: @@ -701,7 +701,7 @@ Build and run the CLI directly: # Use either one of the commands according to which implementation you want to work with nix build .#codex-cli # For building codex-cli nix build .#codex-rs # For building codex-rs -./result/bin/codex --help +./result/bin/codexel --help ``` Run the CLI via the flake app: diff --git a/codex-cli/bin/codex.js b/codex-cli/bin/codexel.js similarity index 96% rename from codex-cli/bin/codex.js rename to codex-cli/bin/codexel.js index 6ec8069bd25..1e3b7a3f088 100644 --- a/codex-cli/bin/codex.js +++ b/codex-cli/bin/codexel.js @@ -1,5 +1,5 @@ #!/usr/bin/env node -// Unified entry point for the Codex CLI. +// Unified entry point for Codexel. import { spawn } from "node:child_process"; import { existsSync } from "fs"; @@ -61,8 +61,9 @@ if (!targetTriple) { const vendorRoot = path.join(__dirname, "..", "vendor"); const archRoot = path.join(vendorRoot, targetTriple); -const codexBinaryName = process.platform === "win32" ? "codex.exe" : "codex"; -const binaryPath = path.join(archRoot, "codex", codexBinaryName); +const codexelBinaryName = + process.platform === "win32" ? "codexel.exe" : "codexel"; +const binaryPath = path.join(archRoot, "codex", codexelBinaryName); // Use an asynchronous spawn instead of spawnSync so that Node is able to // respond to signals (e.g. Ctrl-C / SIGINT) while the native binary is @@ -95,7 +96,6 @@ function detectPackageManager() { return "bun"; } - if ( __dirname.includes(".bun/install/global") || __dirname.includes(".bun\\install\\global") diff --git a/codex-cli/package-lock.json b/codex-cli/package-lock.json index 58ee846306e..48345f2fc44 100644 --- a/codex-cli/package-lock.json +++ b/codex-cli/package-lock.json @@ -1,14 +1,14 @@ { - "name": "@openai/codex", - "version": "0.0.0-dev", + "name": "@ixe1/codexel", + "version": "0.1.2", "lockfileVersion": 3, "packages": { "": { - "name": "@openai/codex", - "version": "0.0.0-dev", + "name": "@ixe1/codexel", + "version": "0.1.2", "license": "Apache-2.0", "bin": { - "codex": "bin/codex.js" + "codexel": "bin/codexel.js" }, "engines": { "node": ">=16" diff --git a/codex-cli/package.json b/codex-cli/package.json index b83309e42b6..f8fd100c4ea 100644 --- a/codex-cli/package.json +++ b/codex-cli/package.json @@ -1,21 +1,27 @@ { - "name": "@openai/codex", - "version": "0.0.0-dev", + "name": "@ixe1/codexel", + "version": "0.1.2", "license": "Apache-2.0", "bin": { - "codex": "bin/codex.js" + "codexel": "bin/codexel.js" }, "type": "module", "engines": { "node": ">=16" }, + "scripts": { + "prepack": "node ./scripts/verify-vendor.mjs" + }, "files": [ "bin", "vendor" ], + "publishConfig": { + "access": "public" + }, "repository": { "type": "git", - "url": "git+https://github.com/openai/codex.git", + "url": "git+https://github.com/Ixe1/codexel.git", "directory": "codex-cli" } } diff --git a/codex-cli/scripts/build_npm_package.py b/codex-cli/scripts/build_npm_package.py index bf0eb5f4699..fa35cdc6529 100755 --- a/codex-cli/scripts/build_npm_package.py +++ b/codex-cli/scripts/build_npm_package.py @@ -1,5 +1,5 @@ #!/usr/bin/env python3 -"""Stage and optionally package the @openai/codex npm module.""" +"""Stage and optionally package the @ixe1/codexel npm module.""" import argparse import json @@ -33,7 +33,7 @@ def parse_args() -> argparse.Namespace: - parser = argparse.ArgumentParser(description="Build or stage the Codex CLI npm package.") + parser = argparse.ArgumentParser(description="Build or stage the Codexel npm package.") parser.add_argument( "--package", choices=("codex", "codex-responses-api-proxy", "codex-sdk"), @@ -116,8 +116,8 @@ def main() -> int: print( f"Staged version {version} for release in {staging_dir_str}\n\n" "Verify the CLI:\n" - f" node {staging_dir_str}/bin/codex.js --version\n" - f" node {staging_dir_str}/bin/codex.js --help\n\n" + f" node {staging_dir_str}/bin/codexel.js --version\n" + f" node {staging_dir_str}/bin/codexel.js --help\n\n" ) elif package == "codex-responses-api-proxy": print( @@ -163,7 +163,7 @@ def stage_sources(staging_dir: Path, version: str, package: str) -> None: if package == "codex": bin_dir = staging_dir / "bin" bin_dir.mkdir(parents=True, exist_ok=True) - shutil.copy2(CODEX_CLI_ROOT / "bin" / "codex.js", bin_dir / "codex.js") + shutil.copy2(CODEX_CLI_ROOT / "bin" / "codexel.js", bin_dir / "codexel.js") rg_manifest = CODEX_CLI_ROOT / "bin" / "rg" if rg_manifest.exists(): shutil.copy2(rg_manifest, bin_dir / "rg") diff --git a/codex-cli/scripts/install_native_deps.py b/codex-cli/scripts/install_native_deps.py index f2c3987b2db..5e52bf85bd6 100755 --- a/codex-cli/scripts/install_native_deps.py +++ b/codex-cli/scripts/install_native_deps.py @@ -18,7 +18,8 @@ SCRIPT_DIR = Path(__file__).resolve().parent CODEX_CLI_ROOT = SCRIPT_DIR.parent -DEFAULT_WORKFLOW_URL = "https://github.com/openai/codex/actions/runs/17952349351" # rust-v0.40.0 +DEFAULT_WORKFLOW_URL = "" +DEFAULT_REPO = "Ixe1/codexel" VENDOR_DIR_NAME = "vendor" RG_MANIFEST = CODEX_CLI_ROOT / "bin" / "rg" BINARY_TARGETS = ( @@ -80,11 +81,18 @@ class BinaryComponent: def parse_args() -> argparse.Namespace: parser = argparse.ArgumentParser(description="Install native Codex binaries.") + parser.add_argument( + "--repo", + default=DEFAULT_REPO, + help=( + "GitHub repo in OWNER/NAME form to download release artifacts from. " + f"Defaults to {DEFAULT_REPO}." + ), + ) parser.add_argument( "--workflow-url", help=( - "GitHub Actions workflow URL that produced the artifacts. Defaults to a " - "known good run when omitted." + "GitHub Actions workflow URL that produced the artifacts. Required for forks." ), ) parser.add_argument( @@ -126,14 +134,14 @@ def main() -> int: workflow_url = (args.workflow_url or DEFAULT_WORKFLOW_URL).strip() if not workflow_url: - workflow_url = DEFAULT_WORKFLOW_URL + raise SystemExit("Missing --workflow-url (no default is configured for this fork).") workflow_id = workflow_url.rstrip("/").split("/")[-1] print(f"Downloading native artifacts from workflow {workflow_id}...") with tempfile.TemporaryDirectory(prefix="codex-native-artifacts-") as artifacts_dir_str: artifacts_dir = Path(artifacts_dir_str) - _download_artifacts(workflow_id, artifacts_dir) + _download_artifacts(workflow_id, artifacts_dir, repo=args.repo) install_binary_components( artifacts_dir, vendor_dir, @@ -209,7 +217,7 @@ def fetch_rg( return [results[target] for target in targets] -def _download_artifacts(workflow_id: str, dest_dir: Path) -> None: +def _download_artifacts(workflow_id: str, dest_dir: Path, *, repo: str) -> None: cmd = [ "gh", "run", @@ -217,7 +225,7 @@ def _download_artifacts(workflow_id: str, dest_dir: Path) -> None: "--dir", str(dest_dir), "--repo", - "openai/codex", + repo, workflow_id, ] subprocess.check_call(cmd) diff --git a/codex-cli/scripts/verify-vendor.mjs b/codex-cli/scripts/verify-vendor.mjs new file mode 100644 index 00000000000..78c1d6e8601 --- /dev/null +++ b/codex-cli/scripts/verify-vendor.mjs @@ -0,0 +1,40 @@ +import fs from "node:fs"; +import path from "node:path"; +import { fileURLToPath } from "node:url"; + +const __filename = fileURLToPath(import.meta.url); +const __dirname = path.dirname(__filename); +const packageRoot = path.resolve(__dirname, ".."); +const vendorRoot = path.join(packageRoot, "vendor"); + +const targets = [ + "aarch64-unknown-linux-musl", + "x86_64-unknown-linux-musl", + "aarch64-pc-windows-msvc", + "x86_64-pc-windows-msvc", +]; + +if (!fs.existsSync(vendorRoot)) { + console.error("Missing vendor/ directory. Build artifacts before packing."); + process.exit(1); +} + +const missing = []; +for (const target of targets) { + const binaryName = target.includes("windows") ? "codexel.exe" : "codexel"; + const binaryPath = path.join(vendorRoot, target, "codex", binaryName); + if (!fs.existsSync(binaryPath)) { + missing.push(`${target}/codex/${binaryName}`); + } +} + +if (missing.length > 0) { + console.error("Missing vendor binaries for publish:"); + for (const entry of missing) { + console.error(` - ${entry}`); + } + console.error( + "Populate codex-cli/vendor/ via the release workflow before packing." + ); + process.exit(1); +} diff --git a/codex-rs/Cargo.lock b/codex-rs/Cargo.lock index acf173c5170..c45d9c6771a 100644 --- a/codex-rs/Cargo.lock +++ b/codex-rs/Cargo.lock @@ -326,7 +326,7 @@ checksum = "a23eb6b1614318a8071c9b2521f36b424b2c83db5eb3a0fead4a6c0809af6e61" [[package]] name = "app_test_support" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "assert_cmd", @@ -948,7 +948,7 @@ checksum = "e9b18233253483ce2f65329a24072ec414db782531bdbb7d0bbc4bd2ce6b7e21" [[package]] name = "codex-ansi-escape" -version = "0.0.0" +version = "0.1.2" dependencies = [ "ansi-to-tui", "ratatui", @@ -957,7 +957,7 @@ dependencies = [ [[package]] name = "codex-api" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "assert_matches", @@ -983,7 +983,7 @@ dependencies = [ [[package]] name = "codex-app-server" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "app_test_support", @@ -1021,7 +1021,7 @@ dependencies = [ [[package]] name = "codex-app-server-protocol" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "clap", @@ -1040,7 +1040,7 @@ dependencies = [ [[package]] name = "codex-app-server-test-client" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "clap", @@ -1053,7 +1053,7 @@ dependencies = [ [[package]] name = "codex-apply-patch" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "assert_cmd", @@ -1068,7 +1068,7 @@ dependencies = [ [[package]] name = "codex-arg0" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "codex-apply-patch", @@ -1081,7 +1081,7 @@ dependencies = [ [[package]] name = "codex-async-utils" -version = "0.0.0" +version = "0.1.2" dependencies = [ "async-trait", "pretty_assertions", @@ -1091,7 +1091,7 @@ dependencies = [ [[package]] name = "codex-backend-client" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "codex-backend-openapi-models", @@ -1105,7 +1105,7 @@ dependencies = [ [[package]] name = "codex-backend-openapi-models" -version = "0.0.0" +version = "0.1.2" dependencies = [ "serde", "serde_json", @@ -1114,7 +1114,7 @@ dependencies = [ [[package]] name = "codex-chatgpt" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "clap", @@ -1129,7 +1129,7 @@ dependencies = [ [[package]] name = "codex-cli" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "assert_cmd", @@ -1171,7 +1171,7 @@ dependencies = [ [[package]] name = "codex-client" -version = "0.0.0" +version = "0.1.2" dependencies = [ "async-trait", "bytes", @@ -1193,7 +1193,7 @@ dependencies = [ [[package]] name = "codex-cloud-tasks" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "async-trait", @@ -1222,7 +1222,7 @@ dependencies = [ [[package]] name = "codex-cloud-tasks-client" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "async-trait", @@ -1237,7 +1237,7 @@ dependencies = [ [[package]] name = "codex-common" -version = "0.0.0" +version = "0.1.2" dependencies = [ "clap", "codex-core", @@ -1250,7 +1250,7 @@ dependencies = [ [[package]] name = "codex-core" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "assert_cmd", @@ -1342,7 +1342,7 @@ dependencies = [ [[package]] name = "codex-exec" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "assert_cmd", @@ -1374,7 +1374,7 @@ dependencies = [ [[package]] name = "codex-exec-server" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "assert_cmd", @@ -1401,7 +1401,7 @@ dependencies = [ [[package]] name = "codex-execpolicy" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "clap", @@ -1417,7 +1417,7 @@ dependencies = [ [[package]] name = "codex-execpolicy-legacy" -version = "0.0.0" +version = "0.1.2" dependencies = [ "allocative", "anyhow", @@ -1437,7 +1437,7 @@ dependencies = [ [[package]] name = "codex-feedback" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "codex-protocol", @@ -1448,7 +1448,7 @@ dependencies = [ [[package]] name = "codex-file-search" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "clap", @@ -1461,7 +1461,7 @@ dependencies = [ [[package]] name = "codex-git" -version = "0.0.0" +version = "0.1.2" dependencies = [ "assert_matches", "once_cell", @@ -1477,7 +1477,7 @@ dependencies = [ [[package]] name = "codex-keyring-store" -version = "0.0.0" +version = "0.1.2" dependencies = [ "keyring", "tracing", @@ -1485,7 +1485,7 @@ dependencies = [ [[package]] name = "codex-linux-sandbox" -version = "0.0.0" +version = "0.1.2" dependencies = [ "clap", "codex-core", @@ -1499,7 +1499,7 @@ dependencies = [ [[package]] name = "codex-lmstudio" -version = "0.0.0" +version = "0.1.2" dependencies = [ "codex-core", "reqwest", @@ -1512,7 +1512,7 @@ dependencies = [ [[package]] name = "codex-login" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "base64", @@ -1536,7 +1536,7 @@ dependencies = [ [[package]] name = "codex-mcp-server" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "assert_cmd", @@ -1563,7 +1563,7 @@ dependencies = [ [[package]] name = "codex-ollama" -version = "0.0.0" +version = "0.1.2" dependencies = [ "assert_matches", "async-stream", @@ -1579,7 +1579,7 @@ dependencies = [ [[package]] name = "codex-otel" -version = "0.0.0" +version = "0.1.2" dependencies = [ "chrono", "codex-api", @@ -1606,7 +1606,7 @@ dependencies = [ [[package]] name = "codex-process-hardening" -version = "0.0.0" +version = "0.1.2" dependencies = [ "libc", "pretty_assertions", @@ -1614,7 +1614,7 @@ dependencies = [ [[package]] name = "codex-protocol" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "codex-git", @@ -1641,7 +1641,7 @@ dependencies = [ [[package]] name = "codex-responses-api-proxy" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "clap", @@ -1657,7 +1657,7 @@ dependencies = [ [[package]] name = "codex-rmcp-client" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "axum", @@ -1687,7 +1687,7 @@ dependencies = [ [[package]] name = "codex-stdio-to-uds" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "assert_cmd", @@ -1698,7 +1698,7 @@ dependencies = [ [[package]] name = "codex-tui" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "arboard", @@ -1765,7 +1765,7 @@ dependencies = [ [[package]] name = "codex-tui2" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "arboard", @@ -1834,7 +1834,7 @@ dependencies = [ [[package]] name = "codex-utils-absolute-path" -version = "0.0.0" +version = "0.1.2" dependencies = [ "path-absolutize", "schemars 0.8.22", @@ -1846,7 +1846,7 @@ dependencies = [ [[package]] name = "codex-utils-cache" -version = "0.0.0" +version = "0.1.2" dependencies = [ "lru 0.16.2", "sha1", @@ -1855,7 +1855,7 @@ dependencies = [ [[package]] name = "codex-utils-image" -version = "0.0.0" +version = "0.1.2" dependencies = [ "base64", "codex-utils-cache", @@ -1867,7 +1867,7 @@ dependencies = [ [[package]] name = "codex-utils-json-to-toml" -version = "0.0.0" +version = "0.1.2" dependencies = [ "pretty_assertions", "serde_json", @@ -1876,7 +1876,7 @@ dependencies = [ [[package]] name = "codex-utils-pty" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "filedescriptor", @@ -1890,7 +1890,7 @@ dependencies = [ [[package]] name = "codex-utils-readiness" -version = "0.0.0" +version = "0.1.2" dependencies = [ "assert_matches", "async-trait", @@ -1901,11 +1901,11 @@ dependencies = [ [[package]] name = "codex-utils-string" -version = "0.0.0" +version = "0.1.2" [[package]] name = "codex-windows-sandbox" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "base64", @@ -2047,7 +2047,7 @@ checksum = "773648b94d0e5d620f64f280777445740e61fe701025087ec8b57f45c791888b" [[package]] name = "core_test_support" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "assert_cmd", @@ -2737,7 +2737,7 @@ dependencies = [ [[package]] name = "exec_server_test_support" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "assert_cmd", @@ -4096,7 +4096,7 @@ checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3" [[package]] name = "mcp-types" -version = "0.0.0" +version = "0.1.2" dependencies = [ "schemars 0.8.22", "serde", @@ -4106,7 +4106,7 @@ dependencies = [ [[package]] name = "mcp_test_support" -version = "0.0.0" +version = "0.1.2" dependencies = [ "anyhow", "assert_cmd", diff --git a/codex-rs/Cargo.toml b/codex-rs/Cargo.toml index 50941771cf2..e7a84eb0e52 100644 --- a/codex-rs/Cargo.toml +++ b/codex-rs/Cargo.toml @@ -49,7 +49,7 @@ members = [ resolver = "2" [workspace.package] -version = "0.0.0" +version = "0.1.2" # Track the edition for all workspace crates in one place. Individual # crates can still override this value, but keeping it here means new # crates created with `cargo new -w ...` automatically inherit the 2024 diff --git a/codex-rs/README.md b/codex-rs/README.md index a3d1b82fb86..c945b12b62a 100644 --- a/codex-rs/README.md +++ b/codex-rs/README.md @@ -1,74 +1,74 @@ -# Codex CLI (Rust Implementation) +# Codexel (Rust Implementation) -We provide Codex CLI as a standalone, native executable to ensure a zero-dependency install. +We provide Codexel as a standalone, native executable to ensure a zero-dependency install. -## Installing Codex +## Installing Codexel -Today, the easiest way to install Codex is via `npm`: +Today, the easiest way to install Codexel is via `npm`: ```shell -npm i -g @openai/codex -codex +npm i -g @ixe1/codexel +codexel ``` -You can also install via Homebrew (`brew install --cask codex`) or download a platform-specific release directly from our [GitHub Releases](https://github.com/openai/codex/releases). +You can also install via Homebrew (`brew install --cask codexel`) or download a platform-specific release directly from [GitHub Releases](../../releases). ## Documentation quickstart -- First run with Codex? Follow the walkthrough in [`docs/getting-started.md`](../docs/getting-started.md) for prompts, keyboard shortcuts, and session management. -- Already shipping with Codex and want deeper control? Jump to [`docs/advanced.md`](../docs/advanced.md) and the configuration reference at [`docs/config.md`](../docs/config.md). +- First run with Codexel? Follow the walkthrough in [`docs/getting-started.md`](../docs/getting-started.md) for prompts, keyboard shortcuts, and session management. +- Already shipping with Codexel and want deeper control? Jump to [`docs/advanced.md`](../docs/advanced.md) and the configuration reference at [`docs/config.md`](../docs/config.md). ## What's new in the Rust CLI -The Rust implementation is now the maintained Codex CLI and serves as the default experience. It includes a number of features that the legacy TypeScript CLI never supported. +The Rust implementation is now the maintained Codexel and serves as the default experience. It includes a number of features that the legacy TypeScript CLI never supported. ### Config -Codex supports a rich set of configuration options. Note that the Rust CLI uses `config.toml` instead of `config.json`. See [`docs/config.md`](../docs/config.md) for details. +Codexel supports a rich set of configuration options. Note that the Rust CLI uses `config.toml` instead of `config.json`. See [`docs/config.md`](../docs/config.md) for details. ### Model Context Protocol Support #### MCP client -Codex CLI functions as an MCP client that allows the Codex CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#mcp_servers) for details. +Codexel functions as an MCP client that allows the Codexel CLI and IDE extension to connect to MCP servers on startup. See the [`configuration documentation`](../docs/config.md#mcp_servers) for details. #### MCP server (experimental) -Codex can be launched as an MCP _server_ by running `codex mcp-server`. This allows _other_ MCP clients to use Codex as a tool for another agent. +Codexel can be launched as an MCP _server_ by running `codexel mcp-server`. This allows _other_ MCP clients to use Codexel as a tool for another agent. Use the [`@modelcontextprotocol/inspector`](https://github.com/modelcontextprotocol/inspector) to try it out: ```shell -npx @modelcontextprotocol/inspector codex mcp-server +npx @modelcontextprotocol/inspector codexel mcp-server ``` -Use `codex mcp` to add/list/get/remove MCP server launchers defined in `config.toml`, and `codex mcp-server` to run the MCP server directly. +Use `codexel mcp` to add/list/get/remove MCP server launchers defined in `config.toml`, and `codexel mcp-server` to run the MCP server directly. ### Notifications -You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](../docs/config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS. When Codex detects that it is running under WSL 2 inside Windows Terminal (`WT_SESSION` is set), the TUI automatically falls back to native Windows toast notifications so approval prompts and completed turns surface even though Windows Terminal does not implement OSC 9. +You can enable notifications by configuring a script that is run whenever the agent finishes a turn. The [notify documentation](../docs/config.md#notify) includes a detailed example that explains how to get desktop notifications via [terminal-notifier](https://github.com/julienXX/terminal-notifier) on macOS. When Codexel detects that it is running under WSL 2 inside Windows Terminal (`WT_SESSION` is set), the TUI automatically falls back to native Windows toast notifications so approval prompts and completed turns surface even though Windows Terminal does not implement OSC 9. -### `codex exec` to run Codex programmatically/non-interactively +### `codexel exec` to run Codexel programmatically/non-interactively -To run Codex non-interactively, run `codex exec PROMPT` (you can also pass the prompt via `stdin`) and Codex will work on your task until it decides that it is done and exits. Output is printed to the terminal directly. You can set the `RUST_LOG` environment variable to see more about what's going on. +To run Codexel non-interactively, run `codexel exec PROMPT` (you can also pass the prompt via `stdin`) and Codexel will work on your task until it decides that it is done and exits. Output is printed to the terminal directly. You can set the `RUST_LOG` environment variable to see more about what's going on. ### Experimenting with the Codex Sandbox -To test to see what happens when a command is run under the sandbox provided by Codex, we provide the following subcommands in Codex CLI: +To test to see what happens when a command is run under the sandbox provided by Codexel, we provide the following subcommands in Codexel: ``` # macOS -codex sandbox macos [--full-auto] [--log-denials] [COMMAND]... +codexel sandbox macos [--full-auto] [--log-denials] [COMMAND]... # Linux -codex sandbox linux [--full-auto] [COMMAND]... +codexel sandbox linux [--full-auto] [COMMAND]... # Windows -codex sandbox windows [--full-auto] [COMMAND]... +codexel sandbox windows [--full-auto] [COMMAND]... # Legacy aliases -codex debug seatbelt [--full-auto] [--log-denials] [COMMAND]... -codex debug landlock [--full-auto] [COMMAND]... +codexel debug seatbelt [--full-auto] [--log-denials] [COMMAND]... +codexel debug landlock [--full-auto] [COMMAND]... ``` ### Selecting a sandbox policy via `--sandbox` @@ -76,23 +76,23 @@ codex debug landlock [--full-auto] [COMMAND]... The Rust CLI exposes a dedicated `--sandbox` (`-s`) flag that lets you pick the sandbox policy **without** having to reach for the generic `-c/--config` option: ```shell -# Run Codex with the default, read-only sandbox -codex --sandbox read-only +# Run Codexel with the default, read-only sandbox +codexel --sandbox read-only # Allow the agent to write within the current workspace while still blocking network access -codex --sandbox workspace-write +codexel --sandbox workspace-write # Danger! Disable sandboxing entirely (only do this if you are already running in a container or other isolated env) -codex --sandbox danger-full-access +codexel --sandbox danger-full-access ``` -The same setting can be persisted in `~/.codex/config.toml` via the top-level `sandbox_mode = "MODE"` key, e.g. `sandbox_mode = "workspace-write"`. +The same setting can be persisted in `~/.codexel/config.toml` via the top-level `sandbox_mode = "MODE"` key, e.g. `sandbox_mode = "workspace-write"`. ## Code Organization This folder is the root of a Cargo workspace. It contains quite a bit of experimental code, but here are the key crates: -- [`core/`](./core) contains the business logic for Codex. Ultimately, we hope this to be a library crate that is generally useful for building other Rust/native applications that use Codex. +- [`core/`](./core) contains the business logic for Codexel. Ultimately, we hope this to be a library crate that is generally useful for building other Rust/native applications that use Codexel. - [`exec/`](./exec) "headless" CLI for use in automation. - [`tui/`](./tui) CLI that launches a fullscreen TUI built with [Ratatui](https://ratatui.rs/). - [`cli/`](./cli) CLI multitool that provides the aforementioned CLIs via subcommands. diff --git a/codex-rs/app-server-test-client/src/main.rs b/codex-rs/app-server-test-client/src/main.rs index b66c59d55a7..0330cb94c2e 100644 --- a/codex-rs/app-server-test-client/src/main.rs +++ b/codex-rs/app-server-test-client/src/main.rs @@ -59,10 +59,10 @@ use uuid::Uuid; /// Minimal launcher that initializes the Codex app-server and logs the handshake. #[derive(Parser)] -#[command(author = "Codex", version, about = "Bootstrap Codex app-server", long_about = None)] +#[command(author = "Codexel", version, about = "Bootstrap Codexel app-server", long_about = None)] struct Cli { - /// Path to the `codex` CLI binary. - #[arg(long, env = "CODEX_BIN", default_value = "codex")] + /// Path to the `codexel` CLI binary. + #[arg(long, env = "CODEX_BIN", default_value = "codexel")] codex_bin: String, #[command(subcommand)] @@ -71,15 +71,15 @@ struct Cli { #[derive(Subcommand)] enum CliCommand { - /// Send a user message through the Codex app-server. + /// Send a user message through the Codexel app-server. SendMessage { - /// User message to send to Codex. + /// User message to send to Codexel. #[arg()] user_message: String, }, /// Send a user message through the app-server V2 thread/turn APIs. SendMessageV2 { - /// User message to send to Codex. + /// User message to send to Codexel. #[arg()] user_message: String, }, diff --git a/codex-rs/app-server/README.md b/codex-rs/app-server/README.md index 2f141c4e179..99fea9f78da 100644 --- a/codex-rs/app-server/README.md +++ b/codex-rs/app-server/README.md @@ -1,6 +1,6 @@ # codex-app-server -`codex app-server` is the interface Codex uses to power rich interfaces such as the [Codex VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt). +`codexel app-server` is the interface Codexel uses to power rich interfaces such as the [Codex VS Code extension](https://marketplace.visualstudio.com/items?itemName=openai.chatgpt). ## Table of Contents @@ -15,15 +15,15 @@ ## Protocol -Similar to [MCP](https://modelcontextprotocol.io/), `codex app-server` supports bidirectional communication, streaming JSONL over stdio. The protocol is JSON-RPC 2.0, though the `"jsonrpc":"2.0"` header is omitted. +Similar to [MCP](https://modelcontextprotocol.io/), `codexel app-server` supports bidirectional communication, streaming JSONL over stdio. The protocol is JSON-RPC 2.0, though the `"jsonrpc":"2.0"` header is omitted. ## Message Schema -Currently, you can dump a TypeScript version of the schema using `codex app-server generate-ts`, or a JSON Schema bundle via `codex app-server generate-json-schema`. Each output is specific to the version of Codex you used to run the command, so the generated artifacts are guaranteed to match that version. +Currently, you can dump a TypeScript version of the schema using `codexel app-server generate-ts`, or a JSON Schema bundle via `codexel app-server generate-json-schema`. Each output is specific to the version of Codexel you used to run the command, so the generated artifacts are guaranteed to match that version. ``` -codex app-server generate-ts --out DIR -codex app-server generate-json-schema --out DIR +codexel app-server generate-ts --out DIR +codexel app-server generate-json-schema --out DIR ``` ## Core Primitives @@ -48,7 +48,7 @@ Use the thread APIs to create, list, or archive conversations. Drive a conversat Clients must send a single `initialize` request before invoking any other method, then acknowledge with an `initialized` notification. The server returns the user agent string it will present to upstream services; subsequent requests issued before initialization receive a `"Not initialized"` error, and repeated `initialize` calls receive an `"Already initialized"` error. -Applications building on top of `codex app-server` should identify themselves via the `clientInfo` parameter. +Applications building on top of `codexel app-server` should identify themselves via the `clientInfo` parameter. Example (from OpenAI's official VSCode extension): diff --git a/codex-rs/app-server/src/codex_message_processor.rs b/codex-rs/app-server/src/codex_message_processor.rs index 2d581e2383a..552b26bc8e2 100644 --- a/codex-rs/app-server/src/codex_message_processor.rs +++ b/codex-rs/app-server/src/codex_message_processor.rs @@ -2729,7 +2729,9 @@ impl CodexMessageProcessor { approval_policy: params.approval_policy.map(AskForApproval::to_core), sandbox_policy: params.sandbox_policy.map(|p| p.to_core()), model: params.model, + plan_model: None, effort: params.effort.map(Some), + plan_effort: None, summary: params.summary, }) .await; diff --git a/codex-rs/arg0/src/lib.rs b/codex-rs/arg0/src/lib.rs index 6b60536413c..484d1a76074 100644 --- a/codex-rs/arg0/src/lib.rs +++ b/codex-rs/arg0/src/lib.rs @@ -62,7 +62,7 @@ pub fn arg0_dispatch() -> Option { } } -/// While we want to deploy the Codex CLI as a single executable for simplicity, +/// While we want to deploy Codexel as a single executable for simplicity, /// we also want to expose some of its functionality as distinct CLIs, so we use /// the "arg0 trick" to determine which CLI to dispatch. This effectively allows /// us to simulate deploying multiple executables as a single binary on Mac and @@ -72,7 +72,7 @@ pub fn arg0_dispatch() -> Option { /// `codex-linux-sandbox` we *directly* execute /// [`codex_linux_sandbox::run_main`] (which never returns). Otherwise we: /// -/// 1. Load `.env` values from `~/.codex/.env` before creating any threads. +/// 1. Load `.env` values from `~/.codexel/.env` before creating any threads. /// 2. Construct a Tokio multi-thread runtime. /// 3. Derive the path to the current executable (so children can re-invoke the /// sandbox) when running on Linux. @@ -109,7 +109,7 @@ where const ILLEGAL_ENV_VAR_PREFIX: &str = "CODEX_"; -/// Load env vars from ~/.codex/.env. +/// Load env vars from ~/.codexel/.env. /// /// Security: Do not allow `.env` files to create or modify any variables /// with names starting with `CODEX_`. @@ -144,7 +144,7 @@ where /// This temporary directory is prepended to the PATH environment variable so /// that `apply_patch` can be on the PATH without requiring the user to /// install a separate `apply_patch` executable, simplifying the deployment of -/// Codex CLI. +/// Codexel. /// /// IMPORTANT: This function modifies the PATH environment variable, so it MUST /// be called before multiple threads are spawned. diff --git a/codex-rs/chatgpt/src/chatgpt_client.rs b/codex-rs/chatgpt/src/chatgpt_client.rs index 7528631981f..6f1b9ee48f2 100644 --- a/codex-rs/chatgpt/src/chatgpt_client.rs +++ b/codex-rs/chatgpt/src/chatgpt_client.rs @@ -24,7 +24,7 @@ pub(crate) async fn chatgpt_get_request( get_chatgpt_token_data().ok_or_else(|| anyhow::anyhow!("ChatGPT token not available"))?; let account_id = token.account_id.ok_or_else(|| { - anyhow::anyhow!("ChatGPT account ID not available, please re-run `codex login`") + anyhow::anyhow!("ChatGPT account ID not available, please re-run `codexel login`") }); let response = client diff --git a/codex-rs/cli/Cargo.toml b/codex-rs/cli/Cargo.toml index 84e6e9acaf4..b3881a348ea 100644 --- a/codex-rs/cli/Cargo.toml +++ b/codex-rs/cli/Cargo.toml @@ -5,7 +5,7 @@ edition.workspace = true license.workspace = true [[bin]] -name = "codex" +name = "codexel" path = "src/main.rs" [lib] diff --git a/codex-rs/cli/src/login.rs b/codex-rs/cli/src/login.rs index 8fbf7b04b6d..eba185e0dda 100644 --- a/codex-rs/cli/src/login.rs +++ b/codex-rs/cli/src/login.rs @@ -95,7 +95,7 @@ pub fn read_api_key_from_stdin() -> String { if stdin.is_terminal() { eprintln!( - "--with-api-key expects the API key on stdin. Try piping it, e.g. `printenv OPENAI_API_KEY | codex login --with-api-key`." + "--with-api-key expects the API key on stdin. Try piping it, e.g. `printenv OPENAI_API_KEY | codexel login --with-api-key`." ); std::process::exit(1); } diff --git a/codex-rs/cli/src/main.rs b/codex-rs/cli/src/main.rs index 80db64767d5..4329a73f2f9 100644 --- a/codex-rs/cli/src/main.rs +++ b/codex-rs/cli/src/main.rs @@ -45,7 +45,7 @@ use codex_core::features::FeatureOverrides; use codex_core::features::Features; use codex_core::features::is_known_feature_key; -/// Codex CLI +/// Codexel /// /// If no subcommand is specified, options will be forwarded to the interactive CLI. #[derive(Debug, Parser)] @@ -55,10 +55,10 @@ use codex_core::features::is_known_feature_key; // If a sub‑command is given, ignore requirements of the default args. subcommand_negates_reqs = true, // The executable is sometimes invoked via a platform‑specific name like - // `codex-x86_64-unknown-linux-musl`, but the help output should always use - // the generic `codex` command name that users run. - bin_name = "codex", - override_usage = "codex [OPTIONS] [PROMPT]\n codex [OPTIONS] [ARGS]" + // `codexel-x86_64-unknown-linux-musl`, but the help output should always use + // the generic `codexel` command name that users run. + bin_name = "codexel", + override_usage = "codexel [OPTIONS] [PROMPT]\n codexel [OPTIONS] [ARGS]" )] struct MultitoolCli { #[clap(flatten)] @@ -76,7 +76,7 @@ struct MultitoolCli { #[derive(Debug, clap::Subcommand)] enum Subcommand { - /// Run Codex non-interactively. + /// Run Codexel non-interactively. #[clap(visible_alias = "e")] Exec(ExecCli), @@ -89,10 +89,10 @@ enum Subcommand { /// Remove stored authentication credentials. Logout(LogoutCommand), - /// [experimental] Run Codex as an MCP server and manage MCP servers. + /// [experimental] Run Codexel as an MCP server and manage MCP servers. Mcp(McpCli), - /// [experimental] Run the Codex MCP server (stdio transport). + /// [experimental] Run the Codexel MCP server (stdio transport). McpServer, /// [experimental] Run the app server or related tooling. @@ -101,7 +101,7 @@ enum Subcommand { /// Generate shell completion scripts. Completion(CompletionCommand), - /// Run commands within a Codex-provided sandbox. + /// Run commands within a Codexel-provided sandbox. #[clap(visible_alias = "debug")] Sandbox(SandboxArgs), @@ -109,14 +109,14 @@ enum Subcommand { #[clap(hide = true)] Execpolicy(ExecpolicyCommand), - /// Apply the latest diff produced by Codex agent as a `git apply` to your local working tree. + /// Apply the latest diff produced by the Codexel agent as a `git apply` to your local working tree. #[clap(visible_alias = "a")] Apply(ApplyCommand), /// Resume a previous interactive session (picker by default; use --last to continue the most recent). Resume(ResumeCommand), - /// [EXPERIMENTAL] Browse tasks from Codex Cloud and apply changes locally. + /// [EXPERIMENTAL] Browse tasks from Codexel Cloud and apply changes locally. #[clap(name = "cloud", alias = "cloud-tasks")] Cloud(CloudTasksCli), @@ -198,7 +198,7 @@ struct LoginCommand { #[arg( long = "with-api-key", - help = "Read the API key from stdin (e.g. `printenv OPENAI_API_KEY | codex login --with-api-key`)" + help = "Read the API key from stdin (e.g. `printenv OPENAI_API_KEY | codexel login --with-api-key`)" )] with_api_key: bool, @@ -296,7 +296,7 @@ fn format_exit_messages(exit_info: AppExitInfo, color_enabled: bool) -> Vec anyhow::Result<()> { fn run_update_action(action: UpdateAction) -> anyhow::Result<()> { println!(); let cmd_str = action.command_str(); - println!("Updating Codex via `{cmd_str}`..."); + println!("Updating Codexel via `{cmd_str}`..."); let status = { #[cfg(windows)] @@ -352,7 +352,7 @@ fn run_update_action(action: UpdateAction) -> anyhow::Result<()> { anyhow::bail!("`{cmd_str}` failed with status {status}"); } println!(); - println!("🎉 Update ran successfully! Please restart Codex."); + println!("🎉 Update ran successfully! Please restart Codexel."); Ok(()) } @@ -461,7 +461,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option) -> anyhow::Result<() codex_exec::run_main(exec_cli, codex_linux_sandbox_exe).await?; } Some(Subcommand::Review(review_args)) => { - let mut exec_cli = ExecCli::try_parse_from(["codex", "exec"])?; + let mut exec_cli = ExecCli::try_parse_from(["codexel", "exec"])?; exec_cli.command = Some(ExecCommand::Review(review_args)); prepend_config_flags( &mut exec_cli.config_overrides, @@ -527,7 +527,7 @@ async fn cli_main(codex_linux_sandbox_exe: Option) -> anyhow::Result<() .await; } else if login_cli.api_key.is_some() { eprintln!( - "The --api-key flag is no longer supported. Pipe the key instead, e.g. `printenv OPENAI_API_KEY | codex login --with-api-key`." + "The --api-key flag is no longer supported. Pipe the key instead, e.g. `printenv OPENAI_API_KEY | codexel login --with-api-key`." ); std::process::exit(1); } else if login_cli.with_api_key { @@ -660,7 +660,7 @@ fn prepend_config_flags( .splice(0..0, cli_config_overrides.raw_overrides); } -/// Run the interactive Codex TUI, dispatching to either the legacy implementation or the +/// Run the interactive Codexel TUI, dispatching to either the legacy implementation or the /// experimental TUI v2 shim based on feature flags resolved from config. async fn run_interactive_tui( interactive: TuiCli, @@ -694,7 +694,7 @@ async fn is_tui2_enabled(cli: &TuiCli) -> std::io::Result { Ok(features.enabled(Feature::Tui2)) } -/// Build the final `TuiCli` for a `codex resume` invocation. +/// Build the final `TuiCli` for a `codexel resume` invocation. fn finalize_resume_interactive( mut interactive: TuiCli, root_config_overrides: CliConfigOverrides, @@ -704,7 +704,7 @@ fn finalize_resume_interactive( resume_cli: TuiCli, ) -> TuiCli { // Start with the parsed interactive CLI so resume shares the same - // configuration surface area as `codex` without additional flags. + // configuration surface area as `codexel` without additional flags. let resume_session_id = session_id; interactive.resume_picker = resume_session_id.is_none() && !last; interactive.resume_last = last; @@ -720,7 +720,7 @@ fn finalize_resume_interactive( interactive } -/// Merge flags provided to `codex resume` so they take precedence over any +/// Merge flags provided to `codexel resume` so they take precedence over any /// root-level flags. Only overrides fields explicitly set on the resume-scoped /// CLI. Also appends `-c key=value` overrides with highest precedence. fn merge_resume_cli_flags(interactive: &mut TuiCli, resume_cli: TuiCli) { @@ -769,7 +769,7 @@ fn merge_resume_cli_flags(interactive: &mut TuiCli, resume_cli: TuiCli) { fn print_completion(cmd: CompletionCommand) { let mut app = MultitoolCli::command(); - let name = "codex"; + let name = "codexel"; generate(cmd.shell, &mut app, name, &mut std::io::stdout()); } @@ -844,7 +844,7 @@ mod tests { lines, vec![ "Token usage: total=2 input=0 output=2".to_string(), - "To continue this session, run codex resume 123e4567-e89b-12d3-a456-426614174000" + "To continue this session, run codexel resume 123e4567-e89b-12d3-a456-426614174000" .to_string(), ] ); @@ -860,7 +860,7 @@ mod tests { #[test] fn resume_model_flag_applies_when_no_root_flags() { - let interactive = finalize_from_args(["codex", "resume", "-m", "gpt-5.1-test"].as_ref()); + let interactive = finalize_from_args(["codexel", "resume", "-m", "gpt-5.1-test"].as_ref()); assert_eq!(interactive.model.as_deref(), Some("gpt-5.1-test")); assert!(interactive.resume_picker); @@ -870,7 +870,7 @@ mod tests { #[test] fn resume_picker_logic_none_and_not_last() { - let interactive = finalize_from_args(["codex", "resume"].as_ref()); + let interactive = finalize_from_args(["codexel", "resume"].as_ref()); assert!(interactive.resume_picker); assert!(!interactive.resume_last); assert_eq!(interactive.resume_session_id, None); @@ -879,7 +879,7 @@ mod tests { #[test] fn resume_picker_logic_last() { - let interactive = finalize_from_args(["codex", "resume", "--last"].as_ref()); + let interactive = finalize_from_args(["codexel", "resume", "--last"].as_ref()); assert!(!interactive.resume_picker); assert!(interactive.resume_last); assert_eq!(interactive.resume_session_id, None); @@ -888,7 +888,7 @@ mod tests { #[test] fn resume_picker_logic_with_session_id() { - let interactive = finalize_from_args(["codex", "resume", "1234"].as_ref()); + let interactive = finalize_from_args(["codexel", "resume", "1234"].as_ref()); assert!(!interactive.resume_picker); assert!(!interactive.resume_last); assert_eq!(interactive.resume_session_id.as_deref(), Some("1234")); @@ -897,7 +897,7 @@ mod tests { #[test] fn resume_all_flag_sets_show_all() { - let interactive = finalize_from_args(["codex", "resume", "--all"].as_ref()); + let interactive = finalize_from_args(["codexel", "resume", "--all"].as_ref()); assert!(interactive.resume_picker); assert!(interactive.resume_show_all); } @@ -906,7 +906,7 @@ mod tests { fn resume_merges_option_flags_and_full_auto() { let interactive = finalize_from_args( [ - "codex", + "codexel", "resume", "sid", "--oss", @@ -963,7 +963,7 @@ mod tests { fn resume_merges_dangerously_bypass_flag() { let interactive = finalize_from_args( [ - "codex", + "codexel", "resume", "--dangerously-bypass-approvals-and-sandbox", ] diff --git a/codex-rs/cli/src/mcp_cmd.rs b/codex-rs/cli/src/mcp_cmd.rs index 9dcc4e21402..1b6f5ffcbb9 100644 --- a/codex-rs/cli/src/mcp_cmd.rs +++ b/codex-rs/cli/src/mcp_cmd.rs @@ -20,13 +20,13 @@ use codex_rmcp_client::delete_oauth_tokens; use codex_rmcp_client::perform_oauth_login; use codex_rmcp_client::supports_oauth_login; -/// [experimental] Launch Codex as an MCP server or manage configured MCP servers. +/// [experimental] Launch Codexel as an MCP server or manage configured MCP servers. /// /// Subcommands: /// - `serve` — run the MCP server on stdio /// - `list` — list configured servers (with `--json`) /// - `get` — show a single server (with `--json`) -/// - `add` — add a server launcher entry to `~/.codex/config.toml` +/// - `add` — add a server launcher entry to `~/.codexel/config.toml` /// - `remove` — delete a server entry #[derive(Debug, clap::Parser)] pub struct McpCli { @@ -78,7 +78,7 @@ pub struct GetArgs { } #[derive(Debug, clap::Parser)] -#[command(override_usage = "codex mcp add [OPTIONS] (--url | -- ...)")] +#[command(override_usage = "codexel mcp add [OPTIONS] (--url | -- ...)")] pub struct AddArgs { /// Name for the MCP server configuration. pub name: String, @@ -210,7 +210,7 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re validate_server_name(&name)?; - let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?; + let codex_home = find_codex_home().context("failed to resolve Codexel home directory")?; let mut servers = load_global_mcp_servers(&codex_home) .await .with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?; @@ -285,7 +285,7 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re if !config.features.enabled(Feature::RmcpClient) { println!( "MCP server supports login. Add `features.rmcp_client = true` \ - to your config.toml and run `codex mcp login {name}` to login." + to your config.toml and run `codexel mcp login {name}` to login." ); } else { println!("Detected OAuth support. Starting OAuth flow…"); @@ -303,7 +303,7 @@ async fn run_add(config_overrides: &CliConfigOverrides, add_args: AddArgs) -> Re } Ok(false) => {} Err(_) => println!( - "MCP server may or may not require login. Run `codex mcp login {name}` to login." + "MCP server may or may not require login. Run `codexel mcp login {name}` to login." ), } } @@ -320,7 +320,7 @@ async fn run_remove(config_overrides: &CliConfigOverrides, remove_args: RemoveAr validate_server_name(&name)?; - let codex_home = find_codex_home().context("failed to resolve CODEX_HOME")?; + let codex_home = find_codex_home().context("failed to resolve Codexel home directory")?; let mut servers = load_global_mcp_servers(&codex_home) .await .with_context(|| format!("failed to load MCP servers from {}", codex_home.display()))?; @@ -354,7 +354,7 @@ async fn run_login(config_overrides: &CliConfigOverrides, login_args: LoginArgs) if !config.features.enabled(Feature::RmcpClient) { bail!( - "OAuth login is only supported when [features].rmcp_client is true in config.toml. See https://github.com/openai/codex/blob/main/docs/config.md#feature-flags for details." + "OAuth login is only supported when [features].rmcp_client is true in config.toml. See docs/config.md#feature-flags for details." ); } @@ -491,7 +491,7 @@ async fn run_list(config_overrides: &CliConfigOverrides, list_args: ListArgs) -> } if entries.is_empty() { - println!("No MCP servers configured yet. Try `codex mcp add my-tool -- my-command`."); + println!("No MCP servers configured yet. Try `codexel mcp add my-tool -- my-command`."); return Ok(()); } @@ -822,7 +822,7 @@ async fn run_get(config_overrides: &CliConfigOverrides, get_args: GetArgs) -> Re if let Some(timeout) = server.tool_timeout_sec { println!(" tool_timeout_sec: {}", timeout.as_secs_f64()); } - println!(" remove: codex mcp remove {}", get_args.name); + println!(" remove: codexel mcp remove {}", get_args.name); Ok(()) } diff --git a/codex-rs/cli/tests/execpolicy.rs b/codex-rs/cli/tests/execpolicy.rs index 241a873d594..4b64de53161 100644 --- a/codex-rs/cli/tests/execpolicy.rs +++ b/codex-rs/cli/tests/execpolicy.rs @@ -24,7 +24,7 @@ prefix_rule( "#, )?; - let output = Command::cargo_bin("codex")? + let output = Command::cargo_bin("codexel")? .env("CODEX_HOME", codex_home.path()) .args([ "execpolicy", diff --git a/codex-rs/cli/tests/mcp_add_remove.rs b/codex-rs/cli/tests/mcp_add_remove.rs index 2911637331c..feabd9e1453 100644 --- a/codex-rs/cli/tests/mcp_add_remove.rs +++ b/codex-rs/cli/tests/mcp_add_remove.rs @@ -8,7 +8,7 @@ use pretty_assertions::assert_eq; use tempfile::TempDir; fn codex_command(codex_home: &Path) -> Result { - let mut cmd = assert_cmd::Command::cargo_bin("codex")?; + let mut cmd = assert_cmd::Command::cargo_bin("codexel")?; cmd.env("CODEX_HOME", codex_home); Ok(cmd) } diff --git a/codex-rs/cli/tests/mcp_list.rs b/codex-rs/cli/tests/mcp_list.rs index 1492365afb2..5e7f3f2d974 100644 --- a/codex-rs/cli/tests/mcp_list.rs +++ b/codex-rs/cli/tests/mcp_list.rs @@ -12,7 +12,7 @@ use serde_json::json; use tempfile::TempDir; fn codex_command(codex_home: &Path) -> Result { - let mut cmd = assert_cmd::Command::cargo_bin("codex")?; + let mut cmd = assert_cmd::Command::cargo_bin("codexel")?; cmd.env("CODEX_HOME", codex_home); Ok(cmd) } @@ -125,7 +125,7 @@ async fn list_and_get_render_expected_output() -> Result<()> { assert!(stdout.contains("APP_TOKEN=*****")); assert!(stdout.contains("WORKSPACE_ID=*****")); assert!(stdout.contains("enabled: true")); - assert!(stdout.contains("remove: codex mcp remove docs")); + assert!(stdout.contains("remove: codexel mcp remove docs")); let mut get_json_cmd = codex_command(codex_home.path())?; get_json_cmd diff --git a/codex-rs/cloud-tasks/src/cli.rs b/codex-rs/cloud-tasks/src/cli.rs index 6b36509639a..25deb44880f 100644 --- a/codex-rs/cloud-tasks/src/cli.rs +++ b/codex-rs/cloud-tasks/src/cli.rs @@ -14,23 +14,23 @@ pub struct Cli { #[derive(Debug, clap::Subcommand)] pub enum Command { - /// Submit a new Codex Cloud task without launching the TUI. + /// Submit a new Codexel Cloud task without launching the TUI. Exec(ExecCommand), - /// Show the status of a Codex Cloud task. + /// Show the status of a Codexel Cloud task. Status(StatusCommand), - /// Apply the diff for a Codex Cloud task locally. + /// Apply the diff for a Codexel Cloud task locally. Apply(ApplyCommand), - /// Show the unified diff for a Codex Cloud task. + /// Show the unified diff for a Codexel Cloud task. Diff(DiffCommand), } #[derive(Debug, Args)] pub struct ExecCommand { - /// Task prompt to run in Codex Cloud. + /// Task prompt to run in Codexel Cloud. #[arg(value_name = "QUERY")] pub query: Option, - /// Target environment identifier (see `codex cloud` to browse). + /// Target environment identifier (see `codexel cloud` to browse). #[arg(long = "env", value_name = "ENV_ID")] pub environment: String, @@ -42,7 +42,7 @@ pub struct ExecCommand { )] pub attempts: usize, - /// Git branch to run in Codex Cloud (defaults to current branch). + /// Git branch to run in Codexel Cloud (defaults to current branch). #[arg(long = "branch", value_name = "BRANCH")] pub branch: Option, } @@ -60,14 +60,14 @@ fn parse_attempts(input: &str) -> Result { #[derive(Debug, Args)] pub struct StatusCommand { - /// Codex Cloud task identifier to inspect. + /// Codexel Cloud task identifier to inspect. #[arg(value_name = "TASK_ID")] pub task_id: String, } #[derive(Debug, Args)] pub struct ApplyCommand { - /// Codex Cloud task identifier to apply. + /// Codexel Cloud task identifier to apply. #[arg(value_name = "TASK_ID")] pub task_id: String, @@ -78,7 +78,7 @@ pub struct ApplyCommand { #[derive(Debug, Args)] pub struct DiffCommand { - /// Codex Cloud task identifier to display. + /// Codexel Cloud task identifier to display. #[arg(value_name = "TASK_ID")] pub task_id: String, diff --git a/codex-rs/cloud-tasks/src/lib.rs b/codex-rs/cloud-tasks/src/lib.rs index 105f6cfb2e7..def9a9c2149 100644 --- a/codex-rs/cloud-tasks/src/lib.rs +++ b/codex-rs/cloud-tasks/src/lib.rs @@ -69,7 +69,7 @@ async fn init_backend(user_agent_suffix: &str) -> anyhow::Result Some(auth) => auth, None => { eprintln!( - "Not signed in. Please run 'codex login' to sign in with ChatGPT, then re-run 'codex cloud'." + "Not signed in. Please run 'codexel login' to sign in with ChatGPT, then re-run 'codexel cloud'." ); std::process::exit(1); } @@ -83,7 +83,7 @@ async fn init_backend(user_agent_suffix: &str) -> anyhow::Result Ok(t) if !t.is_empty() => t, _ => { eprintln!( - "Not signed in. Please run 'codex login' to sign in with ChatGPT, then re-run 'codex cloud'." + "Not signed in. Please run 'codexel login' to sign in with ChatGPT, then re-run 'codexel cloud'." ); std::process::exit(1); } @@ -206,7 +206,7 @@ async fn resolve_environment_id(ctx: &BackendContext, requested: &str) -> anyhow .collect::>(); match label_matches.as_slice() { [] => Err(anyhow!( - "environment '{trimmed}' not found; run `codex cloud` to list available environments" + "environment '{trimmed}' not found; run `codexel cloud` to list available environments" )), [single] => Ok(single.id.clone()), [first, rest @ ..] => { @@ -215,7 +215,7 @@ async fn resolve_environment_id(ctx: &BackendContext, requested: &str) -> anyhow Ok(first_id.clone()) } else { Err(anyhow!( - "environment label '{trimmed}' is ambiguous; run `codex cloud` to pick the desired environment id" + "environment label '{trimmed}' is ambiguous; run `codexel cloud` to pick the desired environment id" )) } } @@ -640,7 +640,7 @@ fn spawn_apply( // (no standalone patch summarizer needed – UI displays raw diffs) -/// Entry point for the `codex cloud` subcommand. +/// Entry point for the `codexel cloud` subcommand. pub async fn run_main(cli: Cli, _codex_linux_sandbox_exe: Option) -> anyhow::Result<()> { if let Some(command) = cli.command { return match command { diff --git a/codex-rs/common/src/config_override.rs b/codex-rs/common/src/config_override.rs index 59dde92a22b..cde116bb78e 100644 --- a/codex-rs/common/src/config_override.rs +++ b/codex-rs/common/src/config_override.rs @@ -1,4 +1,4 @@ -//! Support for `-c key=value` overrides shared across Codex CLI tools. +//! Support for `-c key=value` overrides shared across Codexel tools. //! //! This module provides a [`CliConfigOverrides`] struct that can be embedded //! into a `clap`-derived CLI struct using `#[clap(flatten)]`. Each occurrence @@ -18,7 +18,7 @@ use toml::Value; #[derive(Parser, Debug, Default, Clone)] pub struct CliConfigOverrides { /// Override a configuration value that would otherwise be loaded from - /// `~/.codex/config.toml`. Use a dotted path (`foo.bar.baz`) to override + /// `~/.codexel/config.toml` (or legacy `~/.codex/config.toml`). Use a dotted path (`foo.bar.baz`) to override /// nested values. The `value` portion is parsed as TOML. If it fails to /// parse as TOML, the raw string is used as a literal. /// diff --git a/codex-rs/common/src/config_summary.rs b/codex-rs/common/src/config_summary.rs index 2254eeae854..8d9668e5ec2 100644 --- a/codex-rs/common/src/config_summary.rs +++ b/codex-rs/common/src/config_summary.rs @@ -12,6 +12,9 @@ pub fn create_config_summary_entries(config: &Config, model: &str) -> Vec<(&'sta ("approval", config.approval_policy.value().to_string()), ("sandbox", summarize_sandbox_policy(&config.sandbox_policy)), ]; + if let Some(plan_model) = config.plan_model.as_deref() { + entries.push(("plan model", plan_model.to_string())); + } if config.model_provider.wire_api == WireApi::Responses { let reasoning_effort = config .model_reasoning_effort @@ -20,6 +23,15 @@ pub fn create_config_summary_entries(config: &Config, model: &str) -> Vec<(&'sta "reasoning effort", reasoning_effort.unwrap_or_else(|| "none".to_string()), )); + if config.plan_model.is_some() || config.plan_model_reasoning_effort.is_some() { + let plan_effort = config + .plan_model_reasoning_effort + .map(|effort| effort.to_string()); + entries.push(( + "plan reasoning effort", + plan_effort.unwrap_or_else(|| "none".to_string()), + )); + } entries.push(( "reasoning summaries", config.model_reasoning_summary.to_string(), diff --git a/codex-rs/core/README.md b/codex-rs/core/README.md index 5d4911b022f..5fd56c6a1a8 100644 --- a/codex-rs/core/README.md +++ b/codex-rs/core/README.md @@ -1,6 +1,6 @@ # codex-core -This crate implements the business logic for Codex. It is designed to be used by the various Codex UIs written in Rust. +This crate implements the business logic for Codexel. It is designed to be used by the various Codexel UIs written in Rust. ## Dependencies @@ -12,7 +12,7 @@ Expects `/usr/bin/sandbox-exec` to be present. ### Linux -Expects the binary containing `codex-core` to run the equivalent of `codex sandbox linux` (legacy alias: `codex debug landlock`) when `arg0` is `codex-linux-sandbox`. See the `codex-arg0` crate for details. +Expects the binary containing `codex-core` to run the equivalent of `codexel sandbox linux` (legacy alias: `codexel debug landlock`) when `arg0` is `codex-linux-sandbox`. See the `codex-arg0` crate for details. ### All Platforms diff --git a/codex-rs/core/src/codex.rs b/codex-rs/core/src/codex.rs index c15fa03cfd7..ed883abcc35 100644 --- a/codex-rs/core/src/codex.rs +++ b/codex-rs/core/src/codex.rs @@ -20,6 +20,7 @@ use crate::openai_models::model_family::ModelFamily; use crate::openai_models::models_manager::ModelsManager; use crate::parse_command::parse_command; use crate::parse_turn_item; +use crate::plan_output; use crate::stream_events_utils::HandleOutputCtx; use crate::stream_events_utils::handle_non_tool_response_item; use crate::stream_events_utils::handle_output_item_done; @@ -36,6 +37,7 @@ use codex_protocol::protocol::FileChange; use codex_protocol::protocol::HasLegacyEvent; use codex_protocol::protocol::ItemCompletedEvent; use codex_protocol::protocol::ItemStartedEvent; +use codex_protocol::protocol::PlanOutputEvent; use codex_protocol::protocol::RawResponseItemEvent; use codex_protocol::protocol::ReviewRequest; use codex_protocol::protocol::RolloutItem; @@ -97,6 +99,9 @@ use crate::protocol::AgentMessageContentDeltaEvent; use crate::protocol::AgentReasoningSectionBreakEvent; use crate::protocol::ApplyPatchApprovalRequestEvent; use crate::protocol::AskForApproval; +use crate::protocol::AskUserQuestion; +use crate::protocol::AskUserQuestionRequestEvent; +use crate::protocol::AskUserQuestionResponse; use crate::protocol::BackgroundEventEvent; use crate::protocol::DeprecationNoticeEvent; use crate::protocol::ErrorEvent; @@ -104,6 +109,9 @@ use crate::protocol::Event; use crate::protocol::EventMsg; use crate::protocol::ExecApprovalRequestEvent; use crate::protocol::Op; +use crate::protocol::PlanApprovalRequestEvent; +use crate::protocol::PlanApprovalResponse; +use crate::protocol::PlanProposal; use crate::protocol::RateLimitSnapshot; use crate::protocol::ReasoningContentDeltaEvent; use crate::protocol::ReasoningRawContentDeltaEvent; @@ -130,6 +138,7 @@ use crate::skills::SkillMetadata; use crate::skills::SkillsManager; use crate::skills::build_skill_injections; use crate::state::ActiveTurn; +use crate::state::PendingPlanApproval; use crate::state::SessionServices; use crate::state::SessionState; use crate::tasks::GhostSnapshotTask; @@ -259,6 +268,8 @@ impl Codex { model: model.clone(), model_reasoning_effort: config.model_reasoning_effort, model_reasoning_summary: config.model_reasoning_summary, + plan_model: config.plan_model.clone(), + plan_model_reasoning_effort: config.plan_model_reasoning_effort, developer_instructions: config.developer_instructions.clone(), user_instructions, base_instructions: config.base_instructions.clone(), @@ -356,6 +367,8 @@ pub(crate) struct Session { pub(crate) struct TurnContext { pub(crate) sub_id: String, pub(crate) client: ModelClient, + pub(crate) plan_model: Option, + pub(crate) plan_reasoning_effort: Option, /// The session's current working directory. All relative paths provided by /// the model as well as sandbox policies are resolved against this path /// instead of `std::env::current_dir()`. @@ -401,6 +414,10 @@ pub(crate) struct SessionConfiguration { model_reasoning_effort: Option, model_reasoning_summary: ReasoningSummaryConfig, + /// Optional model slug override used for planning flows (e.g. `/plan` mode and plan-variant subagents). + plan_model: Option, + plan_model_reasoning_effort: Option, + /// Developer instructions that supplement the base instructions. developer_instructions: Option, @@ -442,9 +459,15 @@ impl SessionConfiguration { if let Some(model) = updates.model.clone() { next_configuration.model = model; } + if let Some(plan_model) = updates.plan_model.clone() { + next_configuration.plan_model = Some(plan_model); + } if let Some(effort) = updates.reasoning_effort { next_configuration.model_reasoning_effort = effort; } + if let Some(effort) = updates.plan_reasoning_effort { + next_configuration.plan_model_reasoning_effort = effort; + } if let Some(summary) = updates.reasoning_summary { next_configuration.model_reasoning_summary = summary; } @@ -467,7 +490,9 @@ pub(crate) struct SessionSettingsUpdate { pub(crate) approval_policy: Option, pub(crate) sandbox_policy: Option, pub(crate) model: Option, + pub(crate) plan_model: Option, pub(crate) reasoning_effort: Option>, + pub(crate) plan_reasoning_effort: Option>, pub(crate) reasoning_summary: Option, pub(crate) final_output_json_schema: Option>, } @@ -521,8 +546,20 @@ impl Session { TurnContext { sub_id, client, + plan_model: session_configuration.plan_model.clone(), + plan_reasoning_effort: session_configuration.plan_model_reasoning_effort, cwd: session_configuration.cwd.clone(), - developer_instructions: session_configuration.developer_instructions.clone(), + developer_instructions: match session_configuration.session_source { + SessionSource::Cli | SessionSource::VSCode => { + crate::tools::spec::prepend_ask_user_question_developer_instructions( + session_configuration.developer_instructions.clone(), + ) + } + SessionSource::Exec + | SessionSource::Mcp + | SessionSource::SubAgent(_) + | SessionSource::Unknown => session_configuration.developer_instructions.clone(), + }, base_instructions: session_configuration.base_instructions.clone(), compact_prompt: session_configuration.compact_prompt.clone(), user_instructions: session_configuration.user_instructions.clone(), @@ -614,7 +651,7 @@ impl Session { None } else { Some(format!( - "Enable it with `--enable {canonical}` or `[features].{canonical}` in config.toml. See https://github.com/openai/codex/blob/main/docs/config.md#feature-flags for details." + "Enable it with `--enable {canonical}` or `[features].{canonical}` in config.toml. See https://github.com/Ixe1/codexel/blob/main/docs/config.md#feature-flags for details." )) }; post_session_configured_events.push(Event { @@ -845,19 +882,42 @@ impl Session { } } + pub(crate) async fn set_pending_approved_plan(&self, plan_output: Option) { + let mut state = self.state.lock().await; + state.set_pending_approved_plan(plan_output); + } + + #[cfg(test)] + pub(crate) async fn new_turn( + &self, + updates: SessionSettingsUpdate, + ) -> ConstraintResult> { + let sub_id = self.next_internal_sub_id(); + self.new_turn_with_sub_id(sub_id, updates).await + } + pub(crate) async fn new_turn_with_sub_id( &self, sub_id: String, updates: SessionSettingsUpdate, ) -> ConstraintResult> { - let (session_configuration, sandbox_policy_changed) = { + let (session_configuration, sandbox_policy_changed, pending_approved_plan) = { let mut state = self.state.lock().await; match state.session_configuration.clone().apply(&updates) { Ok(next) => { let sandbox_policy_changed = state.session_configuration.sandbox_policy != next.sandbox_policy; state.session_configuration = next.clone(); - (next, sandbox_policy_changed) + let pending_approved_plan = match next.session_source { + SessionSource::Cli | SessionSource::VSCode => { + state.take_pending_approved_plan() + } + SessionSource::Exec + | SessionSource::Mcp + | SessionSource::SubAgent(_) + | SessionSource::Unknown => None, + }; + (next, sandbox_policy_changed, pending_approved_plan) } Err(err) => { drop(state); @@ -883,6 +943,7 @@ impl Session { session_configuration, updates.final_output_json_schema, sandbox_policy_changed, + pending_approved_plan, ) .await) } @@ -893,6 +954,7 @@ impl Session { session_configuration: SessionConfiguration, final_output_json_schema: Option>, sandbox_policy_changed: bool, + pending_approved_plan: Option, ) -> Arc { let per_turn_config = Self::build_per_turn_config(&session_configuration); @@ -929,6 +991,21 @@ impl Session { self.conversation_id, sub_id, ); + if let Some(out) = pending_approved_plan { + let prelude = plan_output::render_approved_plan_developer_prelude(&out); + turn_context.developer_instructions = + Some(match turn_context.developer_instructions.take() { + Some(existing) => { + let existing = existing.trim(); + if existing.is_empty() { + prelude + } else { + format!("{prelude}\n\n{existing}") + } + } + None => prelude, + }); + } if let Some(final_schema) = final_output_json_schema { turn_context.final_output_json_schema = final_schema; } @@ -945,7 +1022,7 @@ impl Session { let state = self.state.lock().await; state.session_configuration.clone() }; - self.new_turn_from_configuration(sub_id, session_configuration, None, false) + self.new_turn_from_configuration(sub_id, session_configuration, None, false, None) .await } @@ -1164,6 +1241,138 @@ impl Session { } } + pub async fn request_ask_user_question( + &self, + turn_context: &TurnContext, + call_id: String, + questions: Vec, + ) -> AskUserQuestionResponse { + let sub_id = turn_context.sub_id.clone(); + let (tx, rx) = oneshot::channel(); + + let prev_entry = { + let mut active = self.active_turn.lock().await; + match active.as_mut() { + Some(at) => { + let mut ts = at.turn_state.lock().await; + ts.insert_pending_user_question(sub_id.clone(), tx) + } + None => None, + } + }; + if prev_entry.is_some() { + warn!("Overwriting existing pending AskUserQuestion for sub_id: {sub_id}"); + } + + let event = + EventMsg::AskUserQuestionRequest(AskUserQuestionRequestEvent { call_id, questions }); + self.send_event(turn_context, event).await; + rx.await.unwrap_or(AskUserQuestionResponse::Cancelled) + } + + pub async fn notify_ask_user_question(&self, sub_id: &str, response: AskUserQuestionResponse) { + let entry = { + let mut active = self.active_turn.lock().await; + match active.as_mut() { + Some(at) => { + let mut ts = at.turn_state.lock().await; + ts.remove_pending_user_question(sub_id) + } + None => None, + } + }; + match entry { + Some(tx) => { + tx.send(response).ok(); + } + None => { + warn!("No pending AskUserQuestion found for sub_id: {sub_id}"); + } + } + } + + pub async fn request_plan_approval( + &self, + turn_context: &TurnContext, + call_id: String, + proposal: PlanProposal, + ) -> PlanApprovalResponse { + let sub_id = turn_context.sub_id.clone(); + let (tx, rx) = oneshot::channel(); + + let prev_entry = { + let mut active = self.active_turn.lock().await; + match active.as_mut() { + Some(at) => { + let mut ts = at.turn_state.lock().await; + ts.insert_pending_plan_approval( + sub_id.clone(), + PendingPlanApproval { + proposal: proposal.clone(), + tx, + }, + ) + } + None => None, + } + }; + if prev_entry.is_some() { + warn!("Overwriting existing pending PlanApproval for sub_id: {sub_id}"); + } + + let event = EventMsg::PlanApprovalRequest(PlanApprovalRequestEvent { call_id, proposal }); + self.send_event(turn_context, event).await; + rx.await.unwrap_or(PlanApprovalResponse::Rejected) + } + + pub async fn notify_plan_approval(&self, sub_id: &str, response: PlanApprovalResponse) { + let (entry, turn_context) = { + let mut active = self.active_turn.lock().await; + match active.as_mut() { + Some(at) => { + let turn_context = at + .tasks + .get(sub_id) + .map(|task| Arc::clone(&task.turn_context)); + let mut ts = at.turn_state.lock().await; + (ts.remove_pending_plan_approval(sub_id), turn_context) + } + None => (None, None), + } + }; + match entry { + Some(pending) => { + const APPROVED_MESSAGE: &str = "Plan approved; continuing..."; + + if response == PlanApprovalResponse::Approved { + if let Some(turn_context) = &turn_context { + self.send_event( + turn_context.as_ref(), + EventMsg::BackgroundEvent(BackgroundEventEvent { + message: APPROVED_MESSAGE.to_string(), + }), + ) + .await; + + let mut update = pending.proposal.plan.clone(); + update.explanation = Some(APPROVED_MESSAGE.to_string()); + self.send_event(turn_context.as_ref(), EventMsg::PlanUpdate(update)) + .await; + } else { + warn!( + "No active task context found for approved PlanApproval: sub_id={sub_id}" + ); + } + } + + pending.tx.send(response).ok(); + } + None => { + warn!("No pending PlanApproval found for sub_id: {sub_id}"); + } + } + } + pub async fn resolve_elicitation( &self, server_name: String, @@ -1606,7 +1815,9 @@ async fn submission_loop(sess: Arc, config: Arc, rx_sub: Receiv approval_policy, sandbox_policy, model, + plan_model, effort, + plan_effort, summary, } => { handlers::override_turn_context( @@ -1617,7 +1828,9 @@ async fn submission_loop(sess: Arc, config: Arc, rx_sub: Receiv approval_policy, sandbox_policy, model, + plan_model, reasoning_effort: effort, + plan_reasoning_effort: plan_effort, reasoning_summary: summary, ..Default::default() }, @@ -1672,6 +1885,12 @@ async fn submission_loop(sess: Arc, config: Arc, rx_sub: Receiv } => { handlers::resolve_elicitation(&sess, server_name, request_id, decision).await; } + Op::ResolveAskUserQuestion { id, response } => { + handlers::resolve_ask_user_question(&sess, id, response).await; + } + Op::ResolvePlanApproval { id, response } => { + handlers::resolve_plan_approval(&sess, id, response).await; + } Op::Shutdown => { if handlers::shutdown(&sess, sub.id.clone()).await { break; @@ -1680,6 +1899,9 @@ async fn submission_loop(sess: Arc, config: Arc, rx_sub: Receiv Op::Review { review_request } => { handlers::review(&sess, &config, sub.id.clone(), review_request).await; } + Op::Plan { plan_request } => { + handlers::plan(&sess, &config, sub.id.clone(), plan_request).await; + } _ => {} // Ignore unknown ops; enum is non_exhaustive to allow extensions. } } @@ -1699,10 +1921,12 @@ mod handlers { use crate::mcp::collect_mcp_snapshot_from_manager; use crate::review_prompts::resolve_review_request; use crate::tasks::CompactTask; + use crate::tasks::PlanTask; use crate::tasks::RegularTask; use crate::tasks::UndoTask; use crate::tasks::UserShellCommandTask; use codex_protocol::custom_prompts::CustomPrompt; + use codex_protocol::protocol::AskUserQuestionResponse; use codex_protocol::protocol::CodexErrorInfo; use codex_protocol::protocol::ErrorEvent; use codex_protocol::protocol::Event; @@ -1710,6 +1934,8 @@ mod handlers { use codex_protocol::protocol::ListCustomPromptsResponseEvent; use codex_protocol::protocol::ListSkillsResponseEvent; use codex_protocol::protocol::Op; + use codex_protocol::protocol::PlanApprovalResponse; + use codex_protocol::protocol::PlanRequest; use codex_protocol::protocol::ReviewDecision; use codex_protocol::protocol::ReviewRequest; use codex_protocol::protocol::SkillsListEntry; @@ -1769,7 +1995,9 @@ mod handlers { approval_policy: Some(approval_policy), sandbox_policy: Some(sandbox_policy), model: Some(model), + plan_model: None, reasoning_effort: Some(effort), + plan_reasoning_effort: None, reasoning_summary: Some(summary), final_output_json_schema: Some(final_output_json_schema), }, @@ -1844,6 +2072,22 @@ mod handlers { } } + pub async fn resolve_ask_user_question( + sess: &Arc, + id: String, + response: AskUserQuestionResponse, + ) { + sess.notify_ask_user_question(&id, response).await; + } + + pub async fn resolve_plan_approval( + sess: &Arc, + id: String, + response: PlanApprovalResponse, + ) { + sess.notify_plan_approval(&id, response).await; + } + /// Propagate a user's exec approval decision to the session. /// Also optionally applies an execpolicy amendment. pub async fn exec_approval(sess: &Arc, id: String, decision: ReviewDecision) { @@ -2089,6 +2333,29 @@ mod handlers { } } } + + pub async fn plan( + sess: &Arc, + _config: &Arc, + sub_id: String, + plan_request: PlanRequest, + ) { + let tc = match sess + .new_turn_with_sub_id(sub_id.clone(), SessionSettingsUpdate::default()) + .await + { + Ok(tc) => tc, + Err(_) => return, + }; + sess.spawn_task( + tc.clone(), + Vec::::new(), + PlanTask::new(plan_request.clone()), + ) + .await; + sess.send_event(&tc, EventMsg::EnteredPlanMode(plan_request)) + .await; + } } /// Spawn a review thread using the given prompt. @@ -2148,6 +2415,8 @@ async fn spawn_review_thread( let review_turn_context = TurnContext { sub_id: sub_id.to_string(), client, + plan_model: parent_turn_context.plan_model.clone(), + plan_reasoning_effort: parent_turn_context.plan_reasoning_effort, tools_config, ghost_snapshot: parent_turn_context.ghost_snapshot.clone(), developer_instructions: None, @@ -2756,6 +3025,10 @@ mod tests { use crate::shell::default_user_shell; use crate::tools::format_exec_output_str; use codex_protocol::models::FunctionCallOutputPayload; + use codex_protocol::plan_tool::PlanItemArg; + use codex_protocol::plan_tool::StepStatus; + use codex_protocol::plan_tool::UpdatePlanArgs; + use codex_protocol::protocol::SubAgentSource; use crate::protocol::CompactedItem; use crate::protocol::CreditsSnapshot; @@ -2847,6 +3120,8 @@ mod tests { model, model_reasoning_effort: config.model_reasoning_effort, model_reasoning_summary: config.model_reasoning_summary, + plan_model: None, + plan_model_reasoning_effort: None, developer_instructions: config.developer_instructions.clone(), user_instructions: config.user_instructions.clone(), base_instructions: config.base_instructions.clone(), @@ -2919,6 +3194,8 @@ mod tests { model, model_reasoning_effort: config.model_reasoning_effort, model_reasoning_summary: config.model_reasoning_summary, + plan_model: None, + plan_model_reasoning_effort: None, developer_instructions: config.developer_instructions.clone(), user_instructions: config.user_instructions.clone(), base_instructions: config.base_instructions.clone(), @@ -3123,6 +3400,8 @@ mod tests { model, model_reasoning_effort: config.model_reasoning_effort, model_reasoning_summary: config.model_reasoning_summary, + plan_model: None, + plan_model_reasoning_effort: None, developer_instructions: config.developer_instructions.clone(), user_instructions: config.user_instructions.clone(), base_instructions: config.base_instructions.clone(), @@ -3214,6 +3493,8 @@ mod tests { model, model_reasoning_effort: config.model_reasoning_effort, model_reasoning_summary: config.model_reasoning_summary, + plan_model: None, + plan_model_reasoning_effort: None, developer_instructions: config.developer_instructions.clone(), user_instructions: config.user_instructions.clone(), base_instructions: config.base_instructions.clone(), @@ -3279,6 +3560,83 @@ mod tests { (session, turn_context, rx_event) } + fn sample_plan_output_event() -> PlanOutputEvent { + PlanOutputEvent { + title: "Test plan".to_string(), + summary: "Test summary".to_string(), + plan: UpdatePlanArgs { + explanation: Some("Test explanation".to_string()), + plan: vec![PlanItemArg { + step: "Do the thing".to_string(), + status: StepStatus::Pending, + }], + }, + } + } + + #[tokio::test] + async fn approved_plan_is_pinned_into_next_cli_turn_developer_instructions() { + let (session, _turn_context, _rx) = make_session_and_context_with_rx(); + { + let mut state = session.state.lock().await; + state.session_configuration.session_source = SessionSource::Cli; + } + + let plan_output = sample_plan_output_event(); + session + .set_pending_approved_plan(Some(plan_output.clone())) + .await; + + let turn = session + .new_turn(SessionSettingsUpdate::default()) + .await + .expect("create turn"); + let developer_instructions = turn.developer_instructions.as_deref().unwrap_or_default(); + assert!(developer_instructions.starts_with("## Approved Plan (Pinned)")); + assert!(developer_instructions.contains(plan_output.title.as_str())); + + { + let state = session.state.lock().await; + assert!(state.pending_approved_plan.is_none()); + } + + let next_turn = session + .new_turn(SessionSettingsUpdate::default()) + .await + .expect("create second turn"); + let developer_instructions = next_turn + .developer_instructions + .as_deref() + .unwrap_or_default(); + assert!(!developer_instructions.contains("## Approved Plan (Pinned)")); + } + + #[tokio::test] + async fn approved_plan_is_not_consumed_for_subagent_turns() { + let (session, _turn_context, _rx) = make_session_and_context_with_rx(); + { + let mut state = session.state.lock().await; + state.session_configuration.session_source = + SessionSource::SubAgent(SubAgentSource::Other("test".to_string())); + } + + session + .set_pending_approved_plan(Some(sample_plan_output_event())) + .await; + + let turn = session + .new_turn(SessionSettingsUpdate::default()) + .await + .expect("create turn"); + let developer_instructions = turn.developer_instructions.as_deref().unwrap_or_default(); + assert!(!developer_instructions.contains("## Approved Plan (Pinned)")); + + { + let state = session.state.lock().await; + assert!(state.pending_approved_plan.is_some()); + } + } + #[tokio::test] async fn record_model_warning_appends_user_message() { let (mut session, turn_context) = make_session_and_context(); diff --git a/codex-rs/core/src/codex_delegate.rs b/codex-rs/core/src/codex_delegate.rs index c7aebbaf921..5323dc9078f 100644 --- a/codex-rs/core/src/codex_delegate.rs +++ b/codex-rs/core/src/codex_delegate.rs @@ -5,10 +5,14 @@ use async_channel::Receiver; use async_channel::Sender; use codex_async_utils::OrCancelExt; use codex_protocol::protocol::ApplyPatchApprovalRequestEvent; +use codex_protocol::protocol::AskUserQuestionRequestEvent; +use codex_protocol::protocol::AskUserQuestionResponse; use codex_protocol::protocol::Event; use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::ExecApprovalRequestEvent; use codex_protocol::protocol::Op; +use codex_protocol::protocol::PlanApprovalRequestEvent; +use codex_protocol::protocol::PlanApprovalResponse; use codex_protocol::protocol::SessionSource; use codex_protocol::protocol::SubAgentSource; use codex_protocol::protocol::Submission; @@ -33,6 +37,7 @@ use codex_protocol::protocol::InitialHistory; /// The returned `events_rx` yields non-approval events emitted by the sub-agent. /// Approval requests are handled via `parent_session` and are not surfaced. /// The returned `ops_tx` allows the caller to submit additional `Op`s to the sub-agent. +#[allow(clippy::too_many_arguments)] pub(crate) async fn run_codex_conversation_interactive( config: Config, auth_manager: Arc, @@ -41,6 +46,7 @@ pub(crate) async fn run_codex_conversation_interactive( parent_ctx: Arc, cancel_token: CancellationToken, initial_history: Option, + sub_agent_source: SubAgentSource, ) -> Result { let (tx_sub, rx_sub) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY); let (tx_ops, rx_ops) = async_channel::bounded(SUBMISSION_CHANNEL_CAPACITY); @@ -51,7 +57,7 @@ pub(crate) async fn run_codex_conversation_interactive( models_manager, Arc::clone(&parent_session.services.skills_manager), initial_history.unwrap_or(InitialHistory::New), - SessionSource::SubAgent(SubAgentSource::Review), + SessionSource::SubAgent(sub_agent_source), ) .await?; let codex = Arc::new(codex); @@ -102,6 +108,7 @@ pub(crate) async fn run_codex_conversation_one_shot( parent_ctx: Arc, cancel_token: CancellationToken, initial_history: Option, + sub_agent_source: SubAgentSource, ) -> Result { // Use a child token so we can stop the delegate after completion without // requiring the caller to cancel the parent token. @@ -114,6 +121,7 @@ pub(crate) async fn run_codex_conversation_one_shot( parent_ctx, child_cancel.clone(), initial_history, + sub_agent_source, ) .await?; @@ -217,6 +225,34 @@ async fn forward_events( ) .await; } + Event { + id, + msg: EventMsg::AskUserQuestionRequest(event), + } => { + handle_ask_user_question( + &codex, + id, + &parent_session, + &parent_ctx, + event, + &cancel_token, + ) + .await; + } + Event { + id, + msg: EventMsg::PlanApprovalRequest(event), + } => { + handle_plan_approval( + &codex, + id, + &parent_session, + &parent_ctx, + event, + &cancel_token, + ) + .await; + } other => { match tx_sub.send(other).or_cancel(&cancel_token).await { Ok(Ok(())) => {} @@ -322,6 +358,43 @@ async fn handle_patch_approval( let _ = codex.submit(Op::PatchApproval { id, decision }).await; } +async fn handle_ask_user_question( + codex: &Codex, + id: String, + parent_session: &Session, + parent_ctx: &TurnContext, + event: AskUserQuestionRequestEvent, + cancel_token: &CancellationToken, +) { + let fut = parent_session.request_ask_user_question( + parent_ctx, + parent_ctx.sub_id.clone(), + event.questions, + ); + let response = + await_ask_user_question_with_cancel(fut, parent_session, &parent_ctx.sub_id, cancel_token) + .await; + let _ = codex + .submit(Op::ResolveAskUserQuestion { id, response }) + .await; +} + +async fn handle_plan_approval( + codex: &Codex, + id: String, + parent_session: &Session, + parent_ctx: &TurnContext, + event: PlanApprovalRequestEvent, + cancel_token: &CancellationToken, +) { + let fut = + parent_session.request_plan_approval(parent_ctx, parent_ctx.sub_id.clone(), event.proposal); + let response = + await_plan_approval_with_cancel(fut, parent_session, &parent_ctx.sub_id, cancel_token) + .await; + let _ = codex.submit(Op::ResolvePlanApproval { id, response }).await; +} + /// Await an approval decision, aborting on cancellation. async fn await_approval_with_cancel( fut: F, @@ -346,6 +419,52 @@ where } } +async fn await_ask_user_question_with_cancel( + fut: F, + parent_session: &Session, + sub_id: &str, + cancel_token: &CancellationToken, +) -> AskUserQuestionResponse +where + F: core::future::Future, +{ + tokio::select! { + biased; + _ = cancel_token.cancelled() => { + parent_session + .notify_ask_user_question(sub_id, AskUserQuestionResponse::Cancelled) + .await; + AskUserQuestionResponse::Cancelled + } + response = fut => { + response + } + } +} + +async fn await_plan_approval_with_cancel( + fut: F, + parent_session: &Session, + sub_id: &str, + cancel_token: &CancellationToken, +) -> PlanApprovalResponse +where + F: core::future::Future, +{ + tokio::select! { + biased; + _ = cancel_token.cancelled() => { + parent_session + .notify_plan_approval(sub_id, PlanApprovalResponse::Rejected) + .await; + PlanApprovalResponse::Rejected + } + response = fut => { + response + } + } +} + #[cfg(test)] mod tests { use super::*; diff --git a/codex-rs/core/src/command_safety/is_safe_command.rs b/codex-rs/core/src/command_safety/is_safe_command.rs index 01a52026e2e..6ad8a99f5d2 100644 --- a/codex-rs/core/src/command_safety/is_safe_command.rs +++ b/codex-rs/core/src/command_safety/is_safe_command.rs @@ -321,8 +321,14 @@ mod tests { return; } + let Some(pwsh_path) = crate::powershell::try_find_pwsh_executable_blocking() else { + // Skip if PowerShell 7 isn't installed on this machine. + return; + }; + let pwsh = pwsh_path.to_string_lossy().to_string(); + assert!(is_known_safe_command(&vec_str(&[ - r"C:\Program Files\PowerShell\7\pwsh.exe", + pwsh.as_str(), "-Command", "Get-Location", ]))); diff --git a/codex-rs/core/src/config/edit.rs b/codex-rs/core/src/config/edit.rs index 58ffbbae3f7..67fa651c256 100644 --- a/codex-rs/core/src/config/edit.rs +++ b/codex-rs/core/src/config/edit.rs @@ -22,6 +22,11 @@ pub enum ConfigEdit { model: Option, effort: Option, }, + /// Update the active (or default) plan model selection and optional reasoning effort. + SetPlanModel { + model: Option, + effort: Option, + }, /// Toggle the acknowledgement flag under `[notice]`. SetNoticeHideFullAccessWarning(bool), /// Toggle the Windows world-writable directories warning acknowledgement flag. @@ -265,6 +270,18 @@ impl ConfigDocument { ); mutated }), + ConfigEdit::SetPlanModel { model, effort } => Ok({ + let mut mutated = false; + mutated |= self.write_profile_value( + &["plan_model"], + model.as_ref().map(|model_value| value(model_value.clone())), + ); + mutated |= self.write_profile_value( + &["plan_model_reasoning_effort"], + effort.map(|effort| value(effort.to_string())), + ); + mutated + }), ConfigEdit::SetNoticeHideFullAccessWarning(acknowledged) => Ok(self.write_value( Scope::Global, &[Notice::TABLE_KEY, "hide_full_access_warning"], @@ -596,6 +613,14 @@ impl ConfigEditsBuilder { self } + pub fn set_plan_model(mut self, model: Option<&str>, effort: Option) -> Self { + self.edits.push(ConfigEdit::SetPlanModel { + model: model.map(ToOwned::to_owned), + effort, + }); + self + } + pub fn set_hide_full_access_warning(mut self, acknowledged: bool) -> Self { self.edits .push(ConfigEdit::SetNoticeHideFullAccessWarning(acknowledged)); diff --git a/codex-rs/core/src/config/mod.rs b/codex-rs/core/src/config/mod.rs index 438e441b5c3..69357da1b07 100644 --- a/codex-rs/core/src/config/mod.rs +++ b/codex-rs/core/src/config/mod.rs @@ -95,6 +95,11 @@ pub struct Config { /// Optional override of model selection. pub model: Option, + /// Optional override of model selection used for planning flows (e.g. `/plan` mode). + /// + /// When unset, planning flows use the active `model`. + pub plan_model: Option, + /// Model used specifically for review sessions. Defaults to "gpt-5.1-codex-max". pub review_model: String, @@ -195,7 +200,7 @@ pub struct Config { /// Preferred store for MCP OAuth credentials. /// keyring: Use an OS-specific keyring service. /// Credentials stored in the keyring will only be readable by Codex unless the user explicitly grants access via OS-level keyring access. - /// https://github.com/openai/codex/blob/main/codex-rs/rmcp-client/src/oauth.rs#L2 + /// https://github.com/Ixe1/codexel/blob/main/codex-rs/rmcp-client/src/oauth.rs#L2 /// file: CODEX_HOME/.credentials.json /// This file will be readable to Codex and other applications running as the same user. /// auto (default): keyring if available, otherwise file. @@ -237,6 +242,11 @@ pub struct Config { /// Responses API. pub model_reasoning_effort: Option, + /// Value to use for `reasoning.effort` in planning flows (e.g. `/plan` mode). + /// + /// When unset, planning flows use `model_reasoning_effort`. + pub plan_model_reasoning_effort: Option, + /// If not "none", the value to use for `reasoning.summary` when making a /// request using the Responses API. pub model_reasoning_summary: ReasoningSummary, @@ -604,6 +614,8 @@ pub fn set_default_oss_provider(codex_home: &Path, provider: &str) -> std::io::R pub struct ConfigToml { /// Optional override of model selection. pub model: Option, + /// Optional override of model selection used for planning flows (e.g. `/plan` mode). + pub plan_model: Option, /// Review model override used by the `/review` feature. pub review_model: Option, @@ -663,7 +675,7 @@ pub struct ConfigToml { /// Preferred backend for storing MCP OAuth credentials. /// keyring: Use an OS-specific keyring service. - /// https://github.com/openai/codex/blob/main/codex-rs/rmcp-client/src/oauth.rs#L2 + /// https://github.com/Ixe1/codexel/blob/main/codex-rs/rmcp-client/src/oauth.rs#L2 /// file: Use a file in the Codex home directory. /// auto (default): Use the OS-specific keyring service if available, otherwise use a file. #[serde(default)] @@ -709,6 +721,7 @@ pub struct ConfigToml { pub show_raw_agent_reasoning: Option, pub model_reasoning_effort: Option, + pub plan_model_reasoning_effort: Option, pub model_reasoning_summary: Option, /// Optional verbosity control for GPT-5 models (Responses API `text.verbosity`). pub model_verbosity: Option, @@ -1191,6 +1204,7 @@ impl Config { let forced_login_method = cfg.forced_login_method; let model = model.or(config_profile.model).or(cfg.model); + let plan_model = config_profile.plan_model.or(cfg.plan_model); let compact_prompt = compact_prompt.or(cfg.compact_prompt).and_then(|value| { let trimmed = value.trim(); @@ -1244,6 +1258,7 @@ impl Config { let config = Self { model, + plan_model, review_model, model_context_window: cfg.model_context_window, model_auto_compact_token_limit: cfg.model_auto_compact_token_limit, @@ -1296,6 +1311,9 @@ impl Config { model_reasoning_effort: config_profile .model_reasoning_effort .or(cfg.model_reasoning_effort), + plan_model_reasoning_effort: config_profile + .plan_model_reasoning_effort + .or(cfg.plan_model_reasoning_effort), model_reasoning_summary: config_profile .model_reasoning_summary .or(cfg.model_reasoning_summary) @@ -1406,31 +1424,50 @@ fn default_review_model() -> String { OPENAI_DEFAULT_REVIEW_MODEL.to_string() } -/// Returns the path to the Codex configuration directory, which can be -/// specified by the `CODEX_HOME` environment variable. If not set, defaults to -/// `~/.codex`. +/// Returns the path to the Codexel configuration directory. /// -/// - If `CODEX_HOME` is set, the value will be canonicalized and this +/// The directory can be specified by the `CODEXEL_HOME` environment variable. +/// For compatibility with existing installs, `CODEX_HOME` is also honored. When +/// neither is set, defaults to `~/.codexel`, falling back to `~/.codex` if that +/// directory exists and `~/.codexel` does not. +/// +/// - If `CODEXEL_HOME` (or `CODEX_HOME`) is set, the value will be canonicalized and this /// function will Err if the path does not exist. -/// - If `CODEX_HOME` is not set, this function does not verify that the -/// directory exists. +/// - If neither environment variable is set, this function does not verify +/// that the directory exists. pub fn find_codex_home() -> std::io::Result { - // Honor the `CODEX_HOME` environment variable when it is set to allow users - // (and tests) to override the default location. + // Honor `CODEXEL_HOME` (preferred) and `CODEX_HOME` (legacy) when set to + // allow users (and tests) to override the default location. + if let Ok(val) = std::env::var("CODEXEL_HOME") + && !val.is_empty() + { + return PathBuf::from(val).canonicalize(); + } + if let Ok(val) = std::env::var("CODEX_HOME") && !val.is_empty() { return PathBuf::from(val).canonicalize(); } - let mut p = home_dir().ok_or_else(|| { + let home = home_dir().ok_or_else(|| { std::io::Error::new( std::io::ErrorKind::NotFound, "Could not find home directory", ) })?; - p.push(".codex"); - Ok(p) + + let codexel_home = home.join(".codexel"); + if codexel_home.exists() { + return Ok(codexel_home); + } + + let codex_home = home.join(".codex"); + if codex_home.exists() { + return Ok(codex_home); + } + + Ok(codexel_home) } /// Returns the path to the folder where Codex logs are stored. Does not verify @@ -3043,6 +3080,7 @@ model_verbosity = "high" assert_eq!( Config { model: Some("o3".to_string()), + plan_model: None, review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(), model_context_window: None, model_auto_compact_token_limit: None, @@ -3070,6 +3108,7 @@ model_verbosity = "high" hide_agent_reasoning: false, show_raw_agent_reasoning: false, model_reasoning_effort: Some(ReasoningEffort::High), + plan_model_reasoning_effort: None, model_reasoning_summary: ReasoningSummary::Detailed, model_supports_reasoning_summaries: None, model_reasoning_summary_format: None, @@ -3118,6 +3157,7 @@ model_verbosity = "high" )?; let expected_gpt3_profile_config = Config { model: Some("gpt-3.5-turbo".to_string()), + plan_model: None, review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(), model_context_window: None, model_auto_compact_token_limit: None, @@ -3145,6 +3185,7 @@ model_verbosity = "high" hide_agent_reasoning: false, show_raw_agent_reasoning: false, model_reasoning_effort: None, + plan_model_reasoning_effort: None, model_reasoning_summary: ReasoningSummary::default(), model_supports_reasoning_summaries: None, model_reasoning_summary_format: None, @@ -3208,6 +3249,7 @@ model_verbosity = "high" )?; let expected_zdr_profile_config = Config { model: Some("o3".to_string()), + plan_model: None, review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(), model_context_window: None, model_auto_compact_token_limit: None, @@ -3235,6 +3277,7 @@ model_verbosity = "high" hide_agent_reasoning: false, show_raw_agent_reasoning: false, model_reasoning_effort: None, + plan_model_reasoning_effort: None, model_reasoning_summary: ReasoningSummary::default(), model_supports_reasoning_summaries: None, model_reasoning_summary_format: None, @@ -3284,6 +3327,7 @@ model_verbosity = "high" )?; let expected_gpt5_profile_config = Config { model: Some("gpt-5.1".to_string()), + plan_model: None, review_model: OPENAI_DEFAULT_REVIEW_MODEL.to_string(), model_context_window: None, model_auto_compact_token_limit: None, @@ -3311,6 +3355,7 @@ model_verbosity = "high" hide_agent_reasoning: false, show_raw_agent_reasoning: false, model_reasoning_effort: Some(ReasoningEffort::High), + plan_model_reasoning_effort: None, model_reasoning_summary: ReasoningSummary::Detailed, model_supports_reasoning_summaries: None, model_reasoning_summary_format: None, diff --git a/codex-rs/core/src/config/profile.rs b/codex-rs/core/src/config/profile.rs index b74b70887d8..401625f7bf6 100644 --- a/codex-rs/core/src/config/profile.rs +++ b/codex-rs/core/src/config/profile.rs @@ -12,12 +12,14 @@ use codex_protocol::openai_models::ReasoningEffort; #[derive(Debug, Clone, Default, PartialEq, Deserialize)] pub struct ConfigProfile { pub model: Option, + pub plan_model: Option, /// The key in the `model_providers` map identifying the /// [`ModelProviderInfo`] to use. pub model_provider: Option, pub approval_policy: Option, pub sandbox_mode: Option, pub model_reasoning_effort: Option, + pub plan_model_reasoning_effort: Option, pub model_reasoning_summary: Option, pub model_verbosity: Option, pub chatgpt_base_url: Option, diff --git a/codex-rs/core/src/lib.rs b/codex-rs/core/src/lib.rs index f78c19328f0..88198aaf9cf 100644 --- a/codex-rs/core/src/lib.rs +++ b/codex-rs/core/src/lib.rs @@ -42,6 +42,7 @@ mod message_history; mod model_provider_info; pub mod parse_command; pub mod path_utils; +mod plan_output; pub mod powershell; pub mod sandboxing; mod stream_events_utils; diff --git a/codex-rs/core/src/mcp_connection_manager.rs b/codex-rs/core/src/mcp_connection_manager.rs index 3213b22b71a..4b21468672a 100644 --- a/codex-rs/core/src/mcp_connection_manager.rs +++ b/codex-rs/core/src/mcp_connection_manager.rs @@ -904,7 +904,7 @@ fn mcp_init_error_display( ) } else if is_mcp_client_auth_required_error(err) { format!( - "The {server_name} MCP server is not logged in. Run `codex mcp login {server_name}`." + "The {server_name} MCP server is not logged in. Run `codexel mcp login {server_name}`." ) } else if is_mcp_client_startup_timeout_error(err) { let startup_timeout_secs = match entry { @@ -1137,7 +1137,7 @@ mod tests { let display = mcp_init_error_display(server_name, None, &err); let expected = format!( - "The {server_name} MCP server is not logged in. Run `codex mcp login {server_name}`." + "The {server_name} MCP server is not logged in. Run `codexel mcp login {server_name}`." ); assert_eq!(expected, display); diff --git a/codex-rs/core/src/model_provider_info.rs b/codex-rs/core/src/model_provider_info.rs index 96173922372..1260bd48f2f 100644 --- a/codex-rs/core/src/model_provider_info.rs +++ b/codex-rs/core/src/model_provider_info.rs @@ -266,7 +266,7 @@ pub fn built_in_model_providers() -> HashMap { use ModelProviderInfo as P; // We do not want to be in the business of adjucating which third-party - // providers are bundled with Codex CLI, so we only include the OpenAI and + // providers are bundled with Codexel, so we only include the OpenAI and // open source ("oss") providers by default. Users are encouraged to add to // `model_providers` in config.toml to add their own providers. [ diff --git a/codex-rs/core/src/openai_models/model_presets.rs b/codex-rs/core/src/openai_models/model_presets.rs index a01875310f3..da0048ce40d 100644 --- a/codex-rs/core/src/openai_models/model_presets.rs +++ b/codex-rs/core/src/openai_models/model_presets.rs @@ -65,12 +65,7 @@ static PRESETS: Lazy> = Lazy::new(|| { }, ], is_default: false, - upgrade: Some(ModelUpgrade { - id: "gpt-5.2-codex".to_string(), - reasoning_effort_mapping: None, - migration_config_key: "gpt-5.2-codex".to_string(), - model_link: Some("https://openai.com/index/introducing-gpt-5-2-codex".to_string()), - }), + upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: true, supported_in_api: true, }, @@ -92,12 +87,7 @@ static PRESETS: Lazy> = Lazy::new(|| { }, ], is_default: false, - upgrade: Some(ModelUpgrade { - id: "gpt-5.2-codex".to_string(), - reasoning_effort_mapping: None, - migration_config_key: "gpt-5.2-codex".to_string(), - model_link: Some("https://openai.com/index/introducing-gpt-5-2-codex".to_string()), - }), + upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: true, supported_in_api: true, }, @@ -126,12 +116,7 @@ static PRESETS: Lazy> = Lazy::new(|| { }, ], is_default: false, - upgrade: Some(ModelUpgrade { - id: "gpt-5.2-codex".to_string(), - reasoning_effort_mapping: None, - migration_config_key: "gpt-5.2-codex".to_string(), - model_link: Some("https://openai.com/index/introducing-gpt-5-2-codex".to_string()), - }), + upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: true, supported_in_api: true, }, @@ -157,12 +142,7 @@ static PRESETS: Lazy> = Lazy::new(|| { }, ], is_default: false, - upgrade: Some(ModelUpgrade { - id: "gpt-5.2-codex".to_string(), - reasoning_effort_mapping: None, - migration_config_key: "gpt-5.2-codex".to_string(), - model_link: Some("https://openai.com/index/introducing-gpt-5-2-codex".to_string()), - }), + upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: false, supported_in_api: true, }, @@ -183,12 +163,7 @@ static PRESETS: Lazy> = Lazy::new(|| { }, ], is_default: false, - upgrade: Some(ModelUpgrade { - id: "gpt-5.1-codex-mini".to_string(), - reasoning_effort_mapping: None, - migration_config_key: HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG.to_string(), - model_link: Some("https://www.codex.com/models/caribou".to_string()), - }), + upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: false, supported_in_api: true, }, @@ -214,12 +189,7 @@ static PRESETS: Lazy> = Lazy::new(|| { }, ], is_default: false, - upgrade: Some(ModelUpgrade { - id: "caribou".to_string(), - reasoning_effort_mapping: None, - migration_config_key: "gpt-5.2-codex".to_string(), - model_link: Some("https://openai.com/index/introducing-gpt-5-2-codex".to_string()), - }), + upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: false, supported_in_api: true, }, @@ -248,12 +218,7 @@ static PRESETS: Lazy> = Lazy::new(|| { }, ], is_default: false, - upgrade: Some(ModelUpgrade { - id: "gpt-5.2-codex".to_string(), - reasoning_effort_mapping: None, - migration_config_key: "gpt-5.2-codex".to_string(), - model_link: Some("https://openai.com/index/introducing-gpt-5-2-codex".to_string()), - }), + upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: false, supported_in_api: true, }, @@ -278,18 +243,26 @@ static PRESETS: Lazy> = Lazy::new(|| { }, ], is_default: false, - upgrade: Some(ModelUpgrade { - id: "caribou".to_string(), - reasoning_effort_mapping: None, - migration_config_key: "caribou".to_string(), - model_link: Some("https://www.codex.com/models/caribou".to_string()), - }), + upgrade: Some(gpt_52_codex_upgrade()), show_in_picker: false, supported_in_api: true, }, ] }); +fn gpt_52_codex_upgrade() -> ModelUpgrade { + ModelUpgrade { + id: "gpt-5.2-codex".to_string(), + reasoning_effort_mapping: None, + migration_config_key: "gpt-5.2-codex".to_string(), + model_link: Some("https://openai.com/index/introducing-gpt-5-2-codex".to_string()), + upgrade_copy: Some( + "Codex is now powered by gpt-5.2-codex, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work." + .to_string(), + ), + } +} + pub(super) fn builtin_model_presets(_auth_mode: Option) -> Vec { PRESETS .iter() diff --git a/codex-rs/core/src/plan_output.rs b/codex-rs/core/src/plan_output.rs new file mode 100644 index 00000000000..ff4a0040f26 --- /dev/null +++ b/codex-rs/core/src/plan_output.rs @@ -0,0 +1,47 @@ +use codex_protocol::plan_tool::StepStatus; +use codex_protocol::protocol::PlanOutputEvent; + +pub(crate) fn render_approved_plan_body(out: &PlanOutputEvent) -> String { + let mut body = String::new(); + let title = out.title.trim(); + body.push_str(&format!("Title: {title}\n")); + let summary = out.summary.trim(); + if !summary.is_empty() { + body.push_str(&format!("Summary: {summary}\n")); + } + let explanation = out.plan.explanation.as_deref().unwrap_or_default().trim(); + if !explanation.is_empty() { + body.push_str("Explanation:\n"); + body.push_str(explanation); + body.push('\n'); + } + body.push_str("Steps:\n"); + if out.plan.plan.is_empty() { + body.push_str("- (no steps provided)\n"); + } else { + for item in &out.plan.plan { + let status = step_status_label(&item.status); + let step = item.step.trim(); + body.push_str(&format!("- [{status}] {step}\n")); + } + } + body +} + +pub(crate) fn render_approved_plan_transcript(out: &PlanOutputEvent) -> String { + let body = render_approved_plan_body(out); + format!("Approved plan:\n{body}") +} + +pub(crate) fn render_approved_plan_developer_prelude(out: &PlanOutputEvent) -> String { + let body = render_approved_plan_body(out); + format!("## Approved Plan (Pinned)\nExecute the approved plan below.\n\n{body}") +} + +fn step_status_label(status: &StepStatus) -> &'static str { + match status { + StepStatus::Pending => "pending", + StepStatus::InProgress => "in_progress", + StepStatus::Completed => "completed", + } +} diff --git a/codex-rs/core/src/rollout/policy.rs b/codex-rs/core/src/rollout/policy.rs index 07c8af1144b..ca623708bd0 100644 --- a/codex-rs/core/src/rollout/policy.rs +++ b/codex-rs/core/src/rollout/policy.rs @@ -45,6 +45,8 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool { | EventMsg::ContextCompacted(_) | EventMsg::EnteredReviewMode(_) | EventMsg::ExitedReviewMode(_) + | EventMsg::EnteredPlanMode(_) + | EventMsg::ExitedPlanMode(_) | EventMsg::UndoCompleted(_) | EventMsg::TurnAborted(_) => true, EventMsg::Error(_) @@ -67,6 +69,8 @@ pub(crate) fn should_persist_event_msg(ev: &EventMsg) -> bool { | EventMsg::ExecCommandEnd(_) | EventMsg::ExecApprovalRequest(_) | EventMsg::ElicitationRequest(_) + | EventMsg::AskUserQuestionRequest(_) + | EventMsg::PlanApprovalRequest(_) | EventMsg::ApplyPatchApprovalRequest(_) | EventMsg::BackgroundEvent(_) | EventMsg::StreamError(_) diff --git a/codex-rs/core/src/state/mod.rs b/codex-rs/core/src/state/mod.rs index 642433a7866..ca5569c58ae 100644 --- a/codex-rs/core/src/state/mod.rs +++ b/codex-rs/core/src/state/mod.rs @@ -5,5 +5,6 @@ mod turn; pub(crate) use service::SessionServices; pub(crate) use session::SessionState; pub(crate) use turn::ActiveTurn; +pub(crate) use turn::PendingPlanApproval; pub(crate) use turn::RunningTask; pub(crate) use turn::TaskKind; diff --git a/codex-rs/core/src/state/session.rs b/codex-rs/core/src/state/session.rs index c61d1883735..87af4237ed3 100644 --- a/codex-rs/core/src/state/session.rs +++ b/codex-rs/core/src/state/session.rs @@ -1,6 +1,7 @@ //! Session-wide mutable state. use codex_protocol::models::ResponseItem; +use codex_protocol::protocol::PlanOutputEvent; use crate::codex::SessionConfiguration; use crate::context_manager::ContextManager; @@ -14,6 +15,7 @@ pub(crate) struct SessionState { pub(crate) session_configuration: SessionConfiguration, pub(crate) history: ContextManager, pub(crate) latest_rate_limits: Option, + pub(crate) pending_approved_plan: Option, } impl SessionState { @@ -24,9 +26,18 @@ impl SessionState { session_configuration, history, latest_rate_limits: None, + pending_approved_plan: None, } } + pub(crate) fn set_pending_approved_plan(&mut self, plan: Option) { + self.pending_approved_plan = plan; + } + + pub(crate) fn take_pending_approved_plan(&mut self) -> Option { + self.pending_approved_plan.take() + } + // History helpers pub(crate) fn record_items(&mut self, items: I, policy: TruncationPolicy) where diff --git a/codex-rs/core/src/state/turn.rs b/codex-rs/core/src/state/turn.rs index e2fff0554e7..d43f4c7a6bc 100644 --- a/codex-rs/core/src/state/turn.rs +++ b/codex-rs/core/src/state/turn.rs @@ -8,13 +8,21 @@ use tokio::sync::Notify; use tokio_util::sync::CancellationToken; use tokio_util::task::AbortOnDropHandle; +use codex_protocol::ask_user_question::AskUserQuestionResponse; use codex_protocol::models::ResponseInputItem; +use codex_protocol::plan_approval::PlanApprovalResponse; use tokio::sync::oneshot; use crate::codex::TurnContext; +use crate::protocol::PlanProposal; use crate::protocol::ReviewDecision; use crate::tasks::SessionTask; +pub(crate) struct PendingPlanApproval { + pub(crate) proposal: PlanProposal, + pub(crate) tx: oneshot::Sender, +} + /// Metadata about the currently running turn. pub(crate) struct ActiveTurn { pub(crate) tasks: IndexMap, @@ -34,6 +42,7 @@ impl Default for ActiveTurn { pub(crate) enum TaskKind { Regular, Review, + Plan, Compact, } @@ -67,6 +76,8 @@ impl ActiveTurn { #[derive(Default)] pub(crate) struct TurnState { pending_approvals: HashMap>, + pending_user_questions: HashMap>, + pending_plan_approvals: HashMap, pending_input: Vec, } @@ -86,8 +97,40 @@ impl TurnState { self.pending_approvals.remove(key) } + pub(crate) fn insert_pending_user_question( + &mut self, + key: String, + tx: oneshot::Sender, + ) -> Option> { + self.pending_user_questions.insert(key, tx) + } + + pub(crate) fn remove_pending_user_question( + &mut self, + key: &str, + ) -> Option> { + self.pending_user_questions.remove(key) + } + + pub(crate) fn insert_pending_plan_approval( + &mut self, + key: String, + pending: PendingPlanApproval, + ) -> Option { + self.pending_plan_approvals.insert(key, pending) + } + + pub(crate) fn remove_pending_plan_approval( + &mut self, + key: &str, + ) -> Option { + self.pending_plan_approvals.remove(key) + } + pub(crate) fn clear_pending(&mut self) { self.pending_approvals.clear(); + self.pending_user_questions.clear(); + self.pending_plan_approvals.clear(); self.pending_input.clear(); } diff --git a/codex-rs/core/src/tasks/mod.rs b/codex-rs/core/src/tasks/mod.rs index b5aaf299991..e253acf78fa 100644 --- a/codex-rs/core/src/tasks/mod.rs +++ b/codex-rs/core/src/tasks/mod.rs @@ -1,5 +1,6 @@ mod compact; mod ghost_snapshot; +mod plan; mod regular; mod review; mod undo; @@ -31,6 +32,7 @@ use codex_protocol::user_input::UserInput; pub(crate) use compact::CompactTask; pub(crate) use ghost_snapshot::GhostSnapshotTask; +pub(crate) use plan::PlanTask; pub(crate) use regular::RegularTask; pub(crate) use review::ReviewTask; pub(crate) use undo::UndoTask; diff --git a/codex-rs/core/src/tasks/plan.rs b/codex-rs/core/src/tasks/plan.rs new file mode 100644 index 00000000000..46b7414729d --- /dev/null +++ b/codex-rs/core/src/tasks/plan.rs @@ -0,0 +1,396 @@ +use async_trait::async_trait; +use codex_protocol::items::TurnItem; +use codex_protocol::models::ContentItem; +use codex_protocol::models::ResponseItem; +use codex_protocol::protocol::AgentMessageContentDeltaEvent; +use codex_protocol::protocol::AgentMessageDeltaEvent; +use codex_protocol::protocol::Event; +use codex_protocol::protocol::EventMsg; +use codex_protocol::protocol::ExitedPlanModeEvent; +use codex_protocol::protocol::ItemCompletedEvent; +use codex_protocol::protocol::PlanOutputEvent; +use codex_protocol::protocol::PlanRequest; +use codex_protocol::protocol::SubAgentSource; +use tokio_util::sync::CancellationToken; + +use crate::codex::Session; +use crate::codex::TurnContext; +use crate::codex_delegate::run_codex_conversation_one_shot; +use crate::plan_output; +use crate::state::TaskKind; +use codex_protocol::user_input::UserInput; +use std::sync::Arc; + +use super::SessionTask; +use super::SessionTaskContext; + +#[derive(Clone)] +pub(crate) struct PlanTask { + request: PlanRequest, +} + +impl PlanTask { + pub(crate) fn new(request: PlanRequest) -> Self { + Self { request } + } +} + +const PLAN_MODE_DEVELOPER_INSTRUCTIONS: &str = r#"## Plan Mode +You are planning only. Do not call `apply_patch` or execute mutating commands. + +Output quality bar: +- The plan must be actionable by another engineer without extra back-and-forth. +- Prefer 8-16 steps. Each step should describe a concrete deliverable and, when helpful, name key files/components to touch. +- Put detailed substeps, rationale, trade-offs, risks, and validation commands in `plan.explanation` (multi-paragraph is fine). +- `plan.explanation` MUST be a practical runbook. Use clear section headings. Include ALL of: + - Assumptions + - Scope (in-scope + non-goals) + - Touchpoints (files/modules/components to change, with what/why) + - Approach (sequence notes; include a short "discovery checklist" of 2-6 read-only commands/files if the task is ambiguous) + - Risks (failure modes + mitigations + rollback) + - Acceptance criteria (observable outcomes; 3-8 bullets) + - Validation (exact commands, and where to run them) + +Mini-example (illustrative; do not copy verbatim): +- Step: "Add `--dry-run` flag to CLI" +- Touchpoints: `src/cli.rs` (arg parsing), `src/main.rs` (plumb flag) +- Acceptance criteria: "`mytool --dry-run` prints planned actions and exits 0 without writing" +- Validation: "`cd mytool; cargo test -p mytool-cli`" + +Process: +- Once you understand the goal, call `propose_plan_variants` to generate 3 alternative plans (at most once per draft). +- Synthesize the final plan (do not just pick a variant verbatim). +- Present the final plan via `approve_plan`. +- After an `approve_plan` result: + - Approved: output the final plan JSON as your only assistant message. + - Revised: incorporate feedback and call `approve_plan` again. + - Rejected: stop; do not proceed. +"#; + +const PLAN_MODE_DEVELOPER_PREFIX: &str = r#"## Plan Mode (Slash Command) +Goal: produce a clear, actionable implementation plan for the user's request without making code changes. + +Rules: +- You may explore the repo with read-only commands, but keep it minimal (2-6 targeted commands) and avoid dumping large files. +- Do not attempt to edit files or run mutating commands (no installs, no git writes, no redirects/heredocs that write files). +- You may ask clarifying questions via AskUserQuestion when requirements are ambiguous or missing. +- Use `propose_plan_variants` to generate 3 alternative plans as input (at most once per plan draft). If it fails, proceed without it. +- When you have a final plan, call `approve_plan` with: + - Title: short and specific. + - Summary: 2-4 sentences with key approach + scope boundaries. + - Steps: concise, ordered, and checkable. + - Explanation: use the required section headings (Assumptions; Scope; Touchpoints; Approach; Risks; Acceptance criteria; Validation) and make it a junior-executable runbook. +- If the user requests revisions, incorporate feedback and propose a revised plan (you may call `propose_plan_variants` again only if the plan materially changes or the user asks for alternatives). +- If the user rejects, stop. + +When the plan is approved, your final assistant message MUST be ONLY valid JSON matching: +{ "title": string, "summary": string, "plan": { "explanation": string|null, "plan": [ { "step": string, "status": "pending"|"in_progress"|"completed" } ] } } +"#; + +fn build_plan_mode_developer_instructions(existing: &str, ask: &str) -> String { + let mut developer_instructions = String::new(); + developer_instructions.push_str(PLAN_MODE_DEVELOPER_PREFIX); + developer_instructions.push_str("\n\n"); + developer_instructions.push_str(PLAN_MODE_DEVELOPER_INSTRUCTIONS); + + let ask = ask.trim(); + if !ask.is_empty() { + developer_instructions.push('\n'); + developer_instructions.push_str(ask); + } + + let existing = existing.trim(); + if !existing.is_empty() { + developer_instructions.push_str("\n\n"); + developer_instructions.push_str(existing); + } + + developer_instructions +} + +#[async_trait] +impl SessionTask for PlanTask { + fn kind(&self) -> TaskKind { + TaskKind::Plan + } + + async fn run( + self: Arc, + session: Arc, + ctx: Arc, + _input: Vec, + cancellation_token: CancellationToken, + ) -> Option { + let output = match start_plan_conversation( + session.clone(), + ctx.clone(), + self.request.clone(), + cancellation_token.clone(), + ) + .await + { + Some(receiver) => process_plan_events(session.clone(), ctx.clone(), receiver).await, + None => None, + }; + + if !cancellation_token.is_cancelled() { + exit_plan_mode(session.clone_session(), output.clone(), ctx.clone()).await; + } + None + } + + async fn abort(&self, session: Arc, ctx: Arc) { + exit_plan_mode(session.clone_session(), None, ctx).await; + } +} + +async fn start_plan_conversation( + session: Arc, + ctx: Arc, + request: PlanRequest, + cancellation_token: CancellationToken, +) -> Option> { + let config = ctx.client.config(); + let mut sub_agent_config = config.as_ref().clone(); + + // Ensure plan mode uses the same model + reasoning settings as the parent turn (e.g. after a + // `/model` change), unless a plan-model override is configured. The base config can lag behind + // session model overrides. + sub_agent_config.model = Some( + ctx.plan_model + .clone() + .unwrap_or_else(|| ctx.client.get_model()), + ); + sub_agent_config.model_reasoning_effort = ctx + .plan_reasoning_effort + .or(ctx.client.get_reasoning_effort()); + sub_agent_config.model_reasoning_summary = ctx.client.get_reasoning_summary(); + + let ask = crate::tools::spec::prepend_ask_user_question_developer_instructions(None) + .unwrap_or_default(); + + // Plan mode must not override the base/system prompt because some environments restrict it to + // whitelisted prompts. Instead, prepend plan mode guidance to developer instructions. + let existing = sub_agent_config + .developer_instructions + .clone() + .unwrap_or_default(); + sub_agent_config.developer_instructions = Some(build_plan_mode_developer_instructions( + existing.as_str(), + ask.as_str(), + )); + + sub_agent_config + .features + .disable(crate::features::Feature::ApplyPatchFreeform) + .disable(crate::features::Feature::WebSearchRequest) + .disable(crate::features::Feature::ViewImageTool); + + sub_agent_config.approval_policy = + crate::config::Constrained::allow_any(codex_protocol::protocol::AskForApproval::Never); + sub_agent_config.sandbox_policy = codex_protocol::protocol::SandboxPolicy::ReadOnly; + + let input: Vec = vec![UserInput::Text { + text: format!("User goal: {}", request.goal.trim()), + }]; + + run_codex_conversation_one_shot( + sub_agent_config, + session.auth_manager(), + session.models_manager(), + input, + session.clone_session(), + ctx, + cancellation_token, + None, + SubAgentSource::Other("plan_mode".to_string()), + ) + .await + .ok() + .map(|io| io.rx_event) +} + +async fn process_plan_events( + session: Arc, + ctx: Arc, + receiver: async_channel::Receiver, +) -> Option { + while let Ok(event) = receiver.recv().await { + match event.clone().msg { + // Suppress assistant text; plan mode surfaces via tool UIs and final output. + EventMsg::AgentMessage(_) + | EventMsg::ItemCompleted(ItemCompletedEvent { + item: TurnItem::AgentMessage(_), + .. + }) + | EventMsg::AgentMessageDelta(AgentMessageDeltaEvent { .. }) + | EventMsg::AgentMessageContentDelta(AgentMessageContentDeltaEvent { .. }) => {} + EventMsg::TaskComplete(task_complete) => { + let out = task_complete + .last_agent_message + .as_deref() + .and_then(parse_plan_output_event); + return out; + } + EventMsg::TurnAborted(_) => return None, + other => { + session + .clone_session() + .send_event(ctx.as_ref(), other) + .await; + } + } + } + None +} + +fn parse_plan_output_event(text: &str) -> Option { + let trimmed = text.trim(); + if trimmed.is_empty() || trimmed == "null" { + return None; + } + if let Ok(ev) = serde_json::from_str::(trimmed) { + return Some(ev); + } + if let (Some(start), Some(end)) = (trimmed.find('{'), trimmed.rfind('}')) + && start < end + && let Some(slice) = trimmed.get(start..=end) + && let Ok(ev) = serde_json::from_str::(slice) + { + return Some(ev); + } + None +} + +pub(crate) async fn exit_plan_mode( + session: Arc, + plan_output: Option, + ctx: Arc, +) { + const PLAN_USER_MESSAGE_ID: &str = "plan:rollout:user"; + const PLAN_ASSISTANT_MESSAGE_ID: &str = "plan:rollout:assistant"; + + session.set_pending_approved_plan(plan_output.clone()).await; + + let (user_message, assistant_message) = match plan_output.as_ref() { + Some(out) => ( + "Plan approved.".to_string(), + plan_output::render_approved_plan_transcript(out), + ), + None => ( + "Plan ended without an approved plan.".to_string(), + "Plan was rejected or interrupted.".to_string(), + ), + }; + + session + .record_conversation_items( + &ctx, + &[ResponseItem::Message { + id: Some(PLAN_USER_MESSAGE_ID.to_string()), + role: "user".to_string(), + content: vec![ContentItem::InputText { text: user_message }], + }], + ) + .await; + session + .send_event( + ctx.as_ref(), + EventMsg::ExitedPlanMode(ExitedPlanModeEvent { plan_output }), + ) + .await; + session + .record_response_item_and_emit_turn_item( + ctx.as_ref(), + ResponseItem::Message { + id: Some(PLAN_ASSISTANT_MESSAGE_ID.to_string()), + role: "assistant".to_string(), + content: vec![ContentItem::OutputText { + text: assistant_message, + }], + }, + ) + .await; +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn plan_mode_does_not_override_base_instructions() { + // This test guards against regressions where plan mode sets custom base/system prompts, + // which can break in environments that restrict system prompts. + let codex_home = tempfile::TempDir::new().expect("tmp dir"); + let overrides = { + #[cfg(target_os = "linux")] + { + use assert_cmd::cargo::cargo_bin; + let mut overrides = crate::config::ConfigOverrides::default(); + overrides.codex_linux_sandbox_exe = Some(cargo_bin("codex-linux-sandbox")); + overrides + } + #[cfg(not(target_os = "linux"))] + { + crate::config::ConfigOverrides::default() + } + }; + let mut cfg = crate::config::Config::load_from_base_config_with_overrides( + crate::config::ConfigToml::default(), + overrides, + codex_home.path().to_path_buf(), + ) + .expect("load test config"); + + cfg.base_instructions = None; + cfg.developer_instructions = Some("existing developer instructions".to_string()); + + let ask = crate::tools::spec::prepend_ask_user_question_developer_instructions(None) + .unwrap_or_default(); + let existing_base = cfg.base_instructions.clone(); + + let existing = cfg.developer_instructions.clone().unwrap_or_default(); + cfg.developer_instructions = Some(build_plan_mode_developer_instructions( + existing.as_str(), + ask.as_str(), + )); + + assert_eq!(cfg.base_instructions, existing_base); + assert!( + cfg.developer_instructions + .as_deref() + .unwrap_or_default() + .starts_with("## Plan Mode") + ); + assert!( + cfg.developer_instructions + .as_deref() + .unwrap_or_default() + .contains("existing developer instructions") + ); + } + + #[test] + fn plan_mode_requires_explanation_sections() { + let required = [ + "Assumptions", + "Scope (in-scope + non-goals)", + "Touchpoints (files/modules/components to change, with what/why)", + "Approach (sequence notes; include a short \"discovery checklist\" of 2-6 read-only commands/files if the task is ambiguous)", + "Risks (failure modes + mitigations + rollback)", + "Acceptance criteria (observable outcomes; 3-8 bullets)", + "Validation (exact commands, and where to run them)", + ]; + + for needle in required { + assert!( + PLAN_MODE_DEVELOPER_INSTRUCTIONS.contains(needle), + "missing required section anchor: {needle}" + ); + } + + assert!(PLAN_MODE_DEVELOPER_PREFIX.contains( + "Assumptions; Scope; Touchpoints; Approach; Risks; Acceptance criteria; Validation" + )); + } +} diff --git a/codex-rs/core/src/tasks/review.rs b/codex-rs/core/src/tasks/review.rs index 00dbc51f405..e21f082abf1 100644 --- a/codex-rs/core/src/tasks/review.rs +++ b/codex-rs/core/src/tasks/review.rs @@ -11,6 +11,7 @@ use codex_protocol::protocol::EventMsg; use codex_protocol::protocol::ExitedReviewModeEvent; use codex_protocol::protocol::ItemCompletedEvent; use codex_protocol::protocol::ReviewOutputEvent; +use codex_protocol::protocol::SubAgentSource; use tokio_util::sync::CancellationToken; use crate::codex::Session; @@ -101,6 +102,7 @@ async fn start_review_conversation( ctx.clone(), cancellation_token, None, + SubAgentSource::Review, ) .await) .ok() diff --git a/codex-rs/core/src/tools/handlers/ask_user_question.rs b/codex-rs/core/src/tools/handlers/ask_user_question.rs new file mode 100644 index 00000000000..2b241c8cc7e --- /dev/null +++ b/codex-rs/core/src/tools/handlers/ask_user_question.rs @@ -0,0 +1,181 @@ +use async_trait::async_trait; +use codex_protocol::ask_user_question::AskUserQuestion; +use codex_protocol::ask_user_question::AskUserQuestionArgs; +use codex_protocol::ask_user_question::AskUserQuestionResponse; +use codex_protocol::protocol::SessionSource; +use codex_protocol::protocol::SubAgentSource; +use serde_json::json; + +use crate::function_tool::FunctionCallError; +use crate::tools::context::ToolInvocation; +use crate::tools::context::ToolOutput; +use crate::tools::context::ToolPayload; +use crate::tools::registry::ToolHandler; +use crate::tools::registry::ToolKind; + +pub(crate) const ASK_USER_QUESTION_TOOL_NAME: &str = "ask_user_question"; + +pub struct AskUserQuestionHandler; + +fn normalize_choice_label(label: &str) -> String { + let trimmed = label.trim_start(); + + let mut chars = trimmed.char_indices().peekable(); + let mut after_digits = 0usize; + let mut saw_digit = false; + while let Some((idx, ch)) = chars.peek().copied() + && ch.is_ascii_digit() + { + saw_digit = true; + chars.next(); + after_digits = idx + ch.len_utf8(); + } + + if !saw_digit { + return trimmed.to_string(); + } + + // Only strip numeric prefixes when they look like enumeration: "1) Foo", "2. Bar", "3: Baz". + let Some((idx, ch)) = chars.peek().copied() else { + return trimmed.to_string(); + }; + if !matches!(ch, ')' | '.' | ':') { + return trimmed.to_string(); + } + + chars.next(); + let mut end = idx + ch.len_utf8(); + while let Some((idx, ch)) = chars.peek().copied() + && ch.is_whitespace() + { + chars.next(); + end = idx + ch.len_utf8(); + } + + if end <= after_digits { + return trimmed.to_string(); + } + + let rest = trimmed[end..].trim_start(); + if rest.is_empty() { + trimmed.to_string() + } else { + rest.to_string() + } +} + +fn normalize_questions(mut questions: Vec) -> Vec { + for q in &mut questions { + for opt in &mut q.options { + opt.label = normalize_choice_label(opt.label.as_str()); + } + } + questions +} + +#[async_trait] +impl ToolHandler for AskUserQuestionHandler { + fn kind(&self) -> ToolKind { + ToolKind::Function + } + + async fn is_mutating(&self, _invocation: &ToolInvocation) -> bool { + true + } + + async fn handle(&self, invocation: ToolInvocation) -> Result { + let ToolInvocation { + session, + turn, + call_id, + tool_name, + payload, + .. + } = invocation; + + let ToolPayload::Function { arguments } = payload else { + return Err(FunctionCallError::RespondToModel(format!( + "unsupported payload for {tool_name}" + ))); + }; + + let source = turn.client.get_session_source(); + if let SessionSource::SubAgent(SubAgentSource::Other(label)) = &source + && label.starts_with("plan_variant") + { + return Err(FunctionCallError::RespondToModel( + "AskUserQuestion is not supported in non-interactive planning subagents" + .to_string(), + )); + } + + let args: AskUserQuestionArgs = serde_json::from_str(&arguments).map_err(|e| { + FunctionCallError::RespondToModel(format!("failed to parse function arguments: {e:?}")) + })?; + + validate_questions(&args.questions).map_err(FunctionCallError::RespondToModel)?; + + let questions = normalize_questions(args.questions); + validate_questions(&questions).map_err(FunctionCallError::RespondToModel)?; + + let response = session + .request_ask_user_question(turn.as_ref(), call_id, questions) + .await; + + match response { + AskUserQuestionResponse::Answered { answers } => { + let output = json!({ "answers": answers }).to_string(); + Ok(ToolOutput::Function { + content: output, + content_items: None, + success: Some(true), + }) + } + AskUserQuestionResponse::Cancelled => Err(FunctionCallError::RespondToModel( + "AskUserQuestion was cancelled by the user".to_string(), + )), + } + } +} + +fn validate_questions(questions: &[AskUserQuestion]) -> Result<(), String> { + if questions.is_empty() { + return Err("AskUserQuestion requires at least 1 question".to_string()); + } + if questions.len() > 4 { + return Err("AskUserQuestion supports at most 4 questions".to_string()); + } + + for (idx, q) in questions.iter().enumerate() { + if q.header.is_empty() { + return Err(format!("question {idx} header must be non-empty")); + } + if q.header.chars().count() > 12 { + return Err(format!( + "question {idx} header must be at most 12 characters" + )); + } + + if q.question.is_empty() { + return Err(format!("question {idx} must be non-empty")); + } + + if q.options.len() < 2 || q.options.len() > 4 { + return Err(format!( + "question {idx} options must have 2-4 items (Other is provided automatically)" + )); + } + for opt in &q.options { + if opt.label.eq_ignore_ascii_case("other") { + return Err(format!( + "question {idx} must not include an 'Other' option (it is provided automatically)" + )); + } + if opt.label.is_empty() { + return Err(format!("question {idx} option label must be non-empty")); + } + } + } + + Ok(()) +} diff --git a/codex-rs/core/src/tools/handlers/mod.rs b/codex-rs/core/src/tools/handlers/mod.rs index dcf848e3760..b1179818fd7 100644 --- a/codex-rs/core/src/tools/handlers/mod.rs +++ b/codex-rs/core/src/tools/handlers/mod.rs @@ -1,9 +1,12 @@ pub mod apply_patch; +mod ask_user_question; mod grep_files; mod list_dir; mod mcp; mod mcp_resource; mod plan; +mod plan_approval; +mod plan_variants; mod read_file; mod shell; mod test_sync; @@ -13,11 +16,17 @@ mod view_image; pub use plan::PLAN_TOOL; pub use apply_patch::ApplyPatchHandler; +pub(crate) use ask_user_question::ASK_USER_QUESTION_TOOL_NAME; +pub use ask_user_question::AskUserQuestionHandler; pub use grep_files::GrepFilesHandler; pub use list_dir::ListDirHandler; pub use mcp::McpHandler; pub use mcp_resource::McpResourceHandler; pub use plan::PlanHandler; +pub(crate) use plan_approval::APPROVE_PLAN_TOOL_NAME; +pub use plan_approval::PlanApprovalHandler; +pub(crate) use plan_variants::PROPOSE_PLAN_VARIANTS_TOOL_NAME; +pub use plan_variants::PlanVariantsHandler; pub use read_file::ReadFileHandler; pub use shell::ShellCommandHandler; pub use shell::ShellHandler; diff --git a/codex-rs/core/src/tools/handlers/plan_approval.rs b/codex-rs/core/src/tools/handlers/plan_approval.rs new file mode 100644 index 00000000000..a47d971e355 --- /dev/null +++ b/codex-rs/core/src/tools/handlers/plan_approval.rs @@ -0,0 +1,85 @@ +use async_trait::async_trait; +use codex_protocol::plan_approval::PlanProposal; +use codex_protocol::protocol::SessionSource; +use codex_protocol::protocol::SubAgentSource; +use serde::Deserialize; +use serde_json::json; + +use crate::function_tool::FunctionCallError; +use crate::tools::context::ToolInvocation; +use crate::tools::context::ToolOutput; +use crate::tools::context::ToolPayload; +use crate::tools::registry::ToolHandler; +use crate::tools::registry::ToolKind; + +pub(crate) const APPROVE_PLAN_TOOL_NAME: &str = "approve_plan"; + +pub struct PlanApprovalHandler; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +struct ApprovePlanArgs { + proposal: PlanProposal, +} + +#[async_trait] +impl ToolHandler for PlanApprovalHandler { + fn kind(&self) -> ToolKind { + ToolKind::Function + } + + async fn is_mutating(&self, _invocation: &ToolInvocation) -> bool { + true + } + + async fn handle(&self, invocation: ToolInvocation) -> Result { + let ToolInvocation { + session, + turn, + call_id, + tool_name, + payload, + .. + } = invocation; + + let ToolPayload::Function { arguments } = payload else { + return Err(FunctionCallError::RespondToModel(format!( + "unsupported payload for {tool_name}" + ))); + }; + + let source = turn.client.get_session_source(); + if let SessionSource::SubAgent(SubAgentSource::Other(label)) = &source + && label.starts_with("plan_variant") + { + return Err(FunctionCallError::RespondToModel( + "approve_plan is not supported in non-interactive planning subagents".to_string(), + )); + } + + let args: ApprovePlanArgs = serde_json::from_str(&arguments).map_err(|e| { + FunctionCallError::RespondToModel(format!("failed to parse function arguments: {e:?}")) + })?; + + if args.proposal.title.trim().is_empty() { + return Err(FunctionCallError::RespondToModel( + "proposal.title must be non-empty".to_string(), + )); + } + if args.proposal.plan.plan.is_empty() { + return Err(FunctionCallError::RespondToModel( + "proposal.plan.plan must contain at least 1 step".to_string(), + )); + } + + let response = session + .request_plan_approval(turn.as_ref(), call_id, args.proposal) + .await; + + Ok(ToolOutput::Function { + content: json!({ "response": response }).to_string(), + content_items: None, + success: Some(true), + }) + } +} diff --git a/codex-rs/core/src/tools/handlers/plan_variants.rs b/codex-rs/core/src/tools/handlers/plan_variants.rs new file mode 100644 index 00000000000..74423d1c955 --- /dev/null +++ b/codex-rs/core/src/tools/handlers/plan_variants.rs @@ -0,0 +1,600 @@ +use async_trait::async_trait; +use codex_protocol::plan_mode::PlanOutputEvent; +use codex_protocol::plan_tool::UpdatePlanArgs; +use codex_protocol::protocol::Event; +use codex_protocol::protocol::EventMsg; +use codex_protocol::protocol::SessionSource; +use codex_protocol::protocol::SubAgentSource; +use codex_protocol::user_input::UserInput; +use serde::Deserialize; +use serde_json::json; +use std::sync::Arc; +use std::time::Duration; +use std::time::Instant; +use tokio::task::JoinSet; +use tokio_util::sync::CancellationToken; + +use crate::codex_delegate::run_codex_conversation_one_shot; +use crate::config::Config; +use crate::features::Feature; +use crate::function_tool::FunctionCallError; +use crate::tools::context::ToolInvocation; +use crate::tools::context::ToolOutput; +use crate::tools::context::ToolPayload; +use crate::tools::registry::ToolHandler; +use crate::tools::registry::ToolKind; + +pub(crate) const PROPOSE_PLAN_VARIANTS_TOOL_NAME: &str = "propose_plan_variants"; + +pub struct PlanVariantsHandler; + +#[derive(Debug, Clone, Deserialize)] +#[serde(deny_unknown_fields)] +struct ProposePlanVariantsArgs { + goal: String, +} + +const PLAN_VARIANT_PROMPT: &str = r#"You are a planning subagent producing a single plan variant for the user's goal. + +Hard rules: +- Do not ask the user questions. +- Do not propose or perform edits. Do not call apply_patch. +- Do not call propose_plan_variants. +- You may explore the repo with read-only commands, but keep it minimal (2-6 targeted commands) and avoid dumping large files. +- Output ONLY valid JSON matching this shape: + { "title": string, "summary": string, "plan": { "explanation": string|null, "plan": [ { "step": string, "status": "pending"|"in_progress"|"completed" } ] } } + +Quality bar: +- Prefer 8-16 steps that are checkable and ordered. +- `plan.explanation` MUST be a practical runbook with clear section headings. Include ALL of: + - Assumptions + - Scope (in-scope + non-goals) + - Touchpoints (files/modules/components to change, with what/why) + - Approach (sequence notes; include a short "discovery checklist" of 2-6 read-only commands/files if the task is ambiguous) + - Risks (failure modes + mitigations + rollback) + - Acceptance criteria (observable outcomes; 3-8 bullets) + - Validation (exact commands, and where to run them) +- Make this variant meaningfully different from other plausible variants (trade-offs, sequencing, scope, risk posture). +"#; + +fn variant_name(idx: usize, total: usize) -> Option<&'static str> { + if total == 3 { + match idx { + 1 => Some("Minimal"), + 2 => Some("Correctness"), + 3 => Some("DX"), + _ => None, + } + } else { + None + } +} + +fn variant_title(idx: usize, total: usize) -> String { + variant_name(idx, total) + .map(str::to_string) + .unwrap_or_else(|| { + if total > 0 { + format!("Variant {idx}/{total}") + } else { + format!("Variant {idx}") + } + }) +} + +fn plan_variant_focus(idx: usize) -> &'static str { + match idx { + 1 => { + "Variant 1 (Minimal): minimal-risk, minimal-diff path (pragmatic, incremental; avoid refactors). Title MUST be \"Minimal\"." + } + 2 => { + "Variant 2 (Correctness): correctness-first path (tests, invariants, edge cases, careful validation/rollback). Title MUST be \"Correctness\"." + } + 3 => { + "Variant 3 (DX): architecture/DX-first path (refactors that pay down tech debt, clearer abstractions, better ergonomics). Title MUST be \"DX\"." + } + _ => "Use a distinct angle and trade-offs.", + } +} + +fn build_plan_variant_developer_instructions(idx: usize, total: usize, existing: &str) -> String { + let existing = existing.trim(); + if existing.is_empty() { + return format!( + "{PLAN_VARIANT_PROMPT}\n\n{focus}\n(Return plan variant {idx}/{total}.)", + focus = plan_variant_focus(idx) + ); + } + format!( + "{PLAN_VARIANT_PROMPT}\n\n{focus}\n(Return plan variant {idx}/{total}.)\n\n{existing}", + focus = plan_variant_focus(idx) + ) +} + +#[async_trait] +impl ToolHandler for PlanVariantsHandler { + fn kind(&self) -> ToolKind { + ToolKind::Function + } + + async fn handle(&self, invocation: ToolInvocation) -> Result { + let ToolInvocation { + session, + turn, + payload, + tool_name, + .. + } = invocation; + + let source = turn.client.get_session_source(); + if let SessionSource::SubAgent(SubAgentSource::Other(label)) = &source + && label.starts_with("plan_variant") + { + return Err(FunctionCallError::RespondToModel( + "propose_plan_variants is not supported in plan-variant subagents".to_string(), + )); + } + + let ToolPayload::Function { arguments } = payload else { + return Err(FunctionCallError::RespondToModel(format!( + "unsupported payload for {tool_name}" + ))); + }; + + let args: ProposePlanVariantsArgs = serde_json::from_str(&arguments).map_err(|e| { + FunctionCallError::RespondToModel(format!("failed to parse function arguments: {e:?}")) + })?; + + let goal = args.goal.trim(); + if goal.is_empty() { + return Err(FunctionCallError::RespondToModel( + "goal must be non-empty".to_string(), + )); + } + + const TOTAL: usize = 3; + + let mut join_set = JoinSet::new(); + for idx in 1..=TOTAL { + let label = format!("plan_variant_{idx}"); + let base_config = turn.client.config().as_ref().clone(); + let goal = goal.to_string(); + let session = Arc::clone(&session); + let turn = Arc::clone(&turn); + join_set.spawn(async move { + let started_at = Instant::now(); + + session + .notify_background_event( + turn.as_ref(), + format!("Plan variants: generating {idx}/{TOTAL}…"), + ) + .await; + + session + .notify_background_event( + turn.as_ref(), + format!("Plan variant {idx}/{TOTAL}: starting"), + ) + .await; + + let out = run_one_variant( + base_config, + goal, + idx, + TOTAL, + label, + Arc::clone(&session), + Arc::clone(&turn), + ) + .await; + + let elapsed = started_at.elapsed(); + session + .notify_background_event( + turn.as_ref(), + format!( + "Plan variants: finished {idx}/{TOTAL} ({})", + fmt_variant_duration(elapsed) + ), + ) + .await; + + (idx, out) + }); + } + + let mut variants_by_idx = vec![None; TOTAL]; + while let Some(result) = join_set.join_next().await { + match result { + Ok((idx, out)) => { + if idx > 0 && idx <= TOTAL { + variants_by_idx[idx - 1] = Some(out); + } + } + Err(err) => { + return Err(FunctionCallError::RespondToModel(format!( + "failed to join planning subagent task: {err:?}" + ))); + } + } + } + + let variants = variants_by_idx + .into_iter() + .enumerate() + .map(|(idx, out)| { + out.unwrap_or_else(|| PlanOutputEvent { + title: variant_title(idx + 1, TOTAL), + summary: "Variant task did not return output.".to_string(), + plan: UpdatePlanArgs { + explanation: None, + plan: Vec::new(), + }, + }) + }) + .collect::>(); + + Ok(ToolOutput::Function { + content: json!({ "variants": variants }).to_string(), + content_items: None, + success: Some(true), + }) + } +} + +fn fmt_variant_duration(elapsed: Duration) -> String { + let secs = elapsed.as_secs_f64(); + if secs < 60.0 { + return format!("{secs:.1}s"); + } + + let whole_secs = elapsed.as_secs(); + let minutes = whole_secs / 60; + let seconds = whole_secs % 60; + format!("{minutes}m {seconds:02}s") +} + +fn fmt_exec_activity_command(command: &[String]) -> String { + if command.is_empty() { + return "shell".to_string(); + } + + let cmd = if let Some((_shell, script)) = crate::parse_command::extract_shell_command(command) { + let script = script.trim(); + if script.is_empty() { + "shell".to_string() + } else { + script + .lines() + .map(str::trim) + .filter(|line| !line.is_empty()) + .collect::>() + .join(" ") + } + } else { + crate::parse_command::shlex_join(command) + }; + + if cmd.is_empty() { + "shell".to_string() + } else { + cmd + } +} + +fn activity_for_event(msg: &EventMsg) -> Option { + match msg { + EventMsg::TaskStarted(_) => Some("starting".to_string()), + EventMsg::UserMessage(_) => Some("sending prompt".to_string()), + EventMsg::AgentReasoning(_) + | EventMsg::AgentReasoningDelta(_) + | EventMsg::AgentReasoningRawContent(_) + | EventMsg::AgentReasoningRawContentDelta(_) + | EventMsg::AgentReasoningSectionBreak(_) => Some("thinking".to_string()), + EventMsg::AgentMessage(_) | EventMsg::AgentMessageDelta(_) => Some("writing".to_string()), + EventMsg::ExecCommandBegin(ev) => Some(fmt_exec_activity_command(&ev.command)), + EventMsg::McpToolCallBegin(ev) => Some(format!( + "mcp {}/{}", + ev.invocation.server.trim(), + ev.invocation.tool.trim() + )), + EventMsg::WebSearchBegin(_) => Some("web_search".to_string()), + _ => None, + } +} + +fn fmt_variant_tokens(tokens: i64) -> Option { + if tokens <= 0 { + return None; + } + + let tokens_f = tokens as f64; + if tokens < 1_000 { + return Some(format!("{tokens}")); + } + if tokens < 100_000 { + return Some(format!("{:.1}k", tokens_f / 1_000.0)); + } + if tokens < 1_000_000 { + return Some(format!("{}k", tokens / 1_000)); + } + if tokens < 100_000_000 { + return Some(format!("{:.1}M", tokens_f / 1_000_000.0)); + } + + Some(format!("{}M", tokens / 1_000_000)) +} + +async fn run_one_variant( + base_config: Config, + goal: String, + idx: usize, + total: usize, + label: String, + parent_session: Arc, + parent_ctx: Arc, +) -> PlanOutputEvent { + let mut cfg = base_config.clone(); + + // Do not override the base/system prompt; some environments restrict it to whitelisted prompts. + // Put plan-variant guidance in developer instructions instead. + // + // Also avoid inheriting large caller developer instructions (e.g. plan mode's own instructions) + // into each variant, which can significantly increase token usage. Plan variants use a focused + // prompt and return JSON only. + cfg.developer_instructions = Some(build_plan_variant_developer_instructions(idx, total, "")); + + // Keep plan variants on the same model + reasoning settings as the parent turn, unless a + // plan-model override is configured. + cfg.model = Some( + parent_ctx + .plan_model + .clone() + .unwrap_or_else(|| parent_ctx.client.get_model()), + ); + cfg.model_reasoning_effort = parent_ctx + .plan_reasoning_effort + .or(parent_ctx.client.get_reasoning_effort()); + cfg.model_reasoning_summary = parent_ctx.client.get_reasoning_summary(); + + let mut features = cfg.features.clone(); + features + .disable(Feature::ApplyPatchFreeform) + .disable(Feature::WebSearchRequest) + .disable(Feature::ViewImageTool); + cfg.features = features; + cfg.approval_policy = + crate::config::Constrained::allow_any(codex_protocol::protocol::AskForApproval::Never); + cfg.sandbox_policy = codex_protocol::protocol::SandboxPolicy::ReadOnly; + + let input = vec![UserInput::Text { + text: format!("Goal: {goal}\n\nReturn plan variant #{idx}."), + }]; + + let cancel = CancellationToken::new(); + let session_for_events = Arc::clone(&parent_session); + let io = match run_codex_conversation_one_shot( + cfg, + Arc::clone(&parent_session.services.auth_manager), + Arc::clone(&parent_session.services.models_manager), + input, + parent_session, + Arc::clone(&parent_ctx), + cancel, + None, + SubAgentSource::Other(label), + ) + .await + { + Ok(io) => io, + Err(err) => { + return PlanOutputEvent { + title: variant_title(idx, total), + summary: format!("Failed to start subagent: {err}"), + plan: UpdatePlanArgs { + explanation: None, + plan: Vec::new(), + }, + }; + } + }; + + let mut last_agent_message: Option = None; + let mut last_activity: Option = None; + let mut last_reported_tokens: Option = None; + let mut last_token_update_at: Option = None; + while let Ok(Event { msg, .. }) = io.rx_event.recv().await { + if let EventMsg::TokenCount(ev) = &msg + && let Some(info) = &ev.info + { + let tokens = info.total_token_usage.blended_total(); + let now = Instant::now(); + let should_report = match (last_reported_tokens, last_token_update_at) { + (Some(prev), Some(prev_at)) => { + tokens > prev + && (tokens - prev >= 250 || now.duration_since(prev_at).as_secs() >= 2) + } + (Some(prev), None) => tokens > prev, + (None, _) => tokens > 0, + }; + + if should_report && let Some(formatted) = fmt_variant_tokens(tokens) { + session_for_events + .notify_background_event( + parent_ctx.as_ref(), + format!("Plan variant {idx}/{total}: tokens {formatted}"), + ) + .await; + last_reported_tokens = Some(tokens); + last_token_update_at = Some(now); + } + } + + if let Some(activity) = activity_for_event(&msg) + && last_activity.as_deref() != Some(activity.as_str()) + { + session_for_events + .notify_background_event( + parent_ctx.as_ref(), + format!("Plan variant {idx}/{total}: {activity}"), + ) + .await; + last_activity = Some(activity); + } + + match msg { + EventMsg::TaskComplete(ev) => { + last_agent_message = ev.last_agent_message; + break; + } + EventMsg::TurnAborted(_) => break, + _ => {} + } + } + + let text = last_agent_message.unwrap_or_default(); + parse_plan_output_event(idx, total, text.as_str()) +} + +fn parse_plan_output_event(idx: usize, total: usize, text: &str) -> PlanOutputEvent { + if let Ok(mut ev) = serde_json::from_str::(text) { + ev.title = variant_title(idx, total); + return ev; + } + if let (Some(start), Some(end)) = (text.find('{'), text.rfind('}')) + && start < end + && let Some(slice) = text.get(start..=end) + && let Ok(mut ev) = serde_json::from_str::(slice) + { + ev.title = variant_title(idx, total); + return ev; + } + PlanOutputEvent { + title: variant_title(idx, total), + summary: "Subagent did not return valid JSON.".to_string(), + plan: UpdatePlanArgs { + explanation: Some(text.to_string()), + plan: Vec::new(), + }, + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn exec_activity_command_strips_powershell_wrapper() { + let shell = if cfg!(windows) { + "C:\\windows\\System32\\WindowsPowerShell\\v1.0\\powershell.exe" + } else { + "/usr/local/bin/powershell.exe" + }; + let cmd = vec![ + shell.to_string(), + "-NoProfile".to_string(), + "-Command".to_string(), + "rg --version".to_string(), + ]; + assert_eq!(fmt_exec_activity_command(&cmd), "rg --version"); + } + + #[test] + fn exec_activity_command_strips_bash_lc_wrapper() { + let cmd = vec![ + "bash".to_string(), + "-lc".to_string(), + "rg --version".to_string(), + ]; + assert_eq!(fmt_exec_activity_command(&cmd), "rg --version"); + } + + #[test] + fn plan_variant_titles_are_stable() { + assert_eq!(variant_title(1, 3), "Minimal"); + assert_eq!(variant_title(2, 3), "Correctness"); + assert_eq!(variant_title(3, 3), "DX"); + assert_eq!(variant_title(4, 3), "Variant 4/3"); + assert_eq!(variant_title(1, 2), "Variant 1/2"); + } + + #[test] + fn plan_variant_output_titles_are_normalized() { + let ev = parse_plan_output_event( + 2, + 3, + r#"{ "title": "Something else", "summary": "ok", "plan": { "explanation": null, "plan": [] } }"#, + ); + assert_eq!(ev.title, "Correctness"); + } + + #[test] + fn plan_variants_do_not_override_base_instructions() { + let codex_home = tempfile::TempDir::new().expect("tmp dir"); + let overrides = { + #[cfg(target_os = "linux")] + { + use assert_cmd::cargo::cargo_bin; + let mut overrides = crate::config::ConfigOverrides::default(); + overrides.codex_linux_sandbox_exe = Some(cargo_bin("codex-linux-sandbox")); + overrides + } + #[cfg(not(target_os = "linux"))] + { + crate::config::ConfigOverrides::default() + } + }; + let mut cfg = crate::config::Config::load_from_base_config_with_overrides( + crate::config::ConfigToml::default(), + overrides, + codex_home.path().to_path_buf(), + ) + .expect("load test config"); + + cfg.base_instructions = None; + cfg.developer_instructions = Some("existing developer instructions".to_string()); + + let existing_base = cfg.base_instructions.clone(); + let existing = cfg.developer_instructions.clone().unwrap_or_default(); + cfg.developer_instructions = Some(build_plan_variant_developer_instructions( + 1, + 3, + existing.as_str(), + )); + + assert_eq!(cfg.base_instructions, existing_base); + assert!( + cfg.developer_instructions + .as_deref() + .unwrap_or_default() + .starts_with("You are a planning subagent") + ); + assert!( + cfg.developer_instructions + .as_deref() + .unwrap_or_default() + .contains("existing developer instructions") + ); + } + + #[test] + fn plan_variants_require_explanation_sections() { + let required = [ + "Assumptions", + "Scope (in-scope + non-goals)", + "Touchpoints (files/modules/components to change, with what/why)", + "Approach (sequence notes; include a short \"discovery checklist\" of 2-6 read-only commands/files if the task is ambiguous)", + "Risks (failure modes + mitigations + rollback)", + "Acceptance criteria (observable outcomes; 3-8 bullets)", + "Validation (exact commands, and where to run them)", + ]; + + for needle in required { + assert!( + PLAN_VARIANT_PROMPT.contains(needle), + "missing required section anchor: {needle}" + ); + } + } +} diff --git a/codex-rs/core/src/tools/handlers/read_file.rs b/codex-rs/core/src/tools/handlers/read_file.rs index 58b6ea6888b..98174db5337 100644 --- a/codex-rs/core/src/tools/handlers/read_file.rs +++ b/codex-rs/core/src/tools/handlers/read_file.rs @@ -41,7 +41,9 @@ struct ReadFileArgs { #[derive(Deserialize)] #[serde(rename_all = "snake_case")] +#[derive(Default)] enum ReadMode { + #[default] Slice, Indentation, } @@ -464,12 +466,6 @@ mod defaults { } } - impl Default for ReadMode { - fn default() -> Self { - Self::Slice - } - } - pub fn offset() -> usize { 1 } diff --git a/codex-rs/core/src/tools/spec.rs b/codex-rs/core/src/tools/spec.rs index 35ebaf736fb..3839a8196e9 100644 --- a/codex-rs/core/src/tools/spec.rs +++ b/codex-rs/core/src/tools/spec.rs @@ -3,7 +3,10 @@ use crate::client_common::tools::ToolSpec; use crate::features::Feature; use crate::features::Features; use crate::openai_models::model_family::ModelFamily; +use crate::tools::handlers::APPROVE_PLAN_TOOL_NAME; +use crate::tools::handlers::ASK_USER_QUESTION_TOOL_NAME; use crate::tools::handlers::PLAN_TOOL; +use crate::tools::handlers::PROPOSE_PLAN_VARIANTS_TOOL_NAME; use crate::tools::handlers::apply_patch::create_apply_patch_freeform_tool; use crate::tools::handlers::apply_patch::create_apply_patch_json_tool; use crate::tools::registry::ToolRegistryBuilder; @@ -16,6 +19,34 @@ use serde_json::json; use std::collections::BTreeMap; use std::collections::HashMap; +pub(crate) const ASK_USER_QUESTION_DEVELOPER_INSTRUCTIONS: &str = r#"## AskUserQuestion +Use `ask_user_question` when you need the user to make a decision or clarify requirements during execution. + +- Do not ask these questions in plain text. Immediately call `ask_user_question` and wait for the tool result. +- If you have multiple questions, include them in a single `ask_user_question` call (up to 4). +- Use `multiSelect: true` when multiple answers are allowed. +- Do not include an "Other" option; the UI provides it automatically. +- Do not include numbering in option labels (e.g. "1:", "2.", "A)"); the UI provides numbering. +- If you recommend an option, put it first and add "(Recommended)" to its label. +"#; + +pub(crate) fn prepend_ask_user_question_developer_instructions( + developer_instructions: Option, +) -> Option { + if let Some(existing) = developer_instructions.as_deref() + && (existing.contains(ASK_USER_QUESTION_TOOL_NAME) || existing.contains("AskUserQuestion")) + { + return developer_instructions; + } + + match developer_instructions { + Some(existing) => Some(format!( + "{ASK_USER_QUESTION_DEVELOPER_INSTRUCTIONS}\n{existing}" + )), + None => Some(ASK_USER_QUESTION_DEVELOPER_INSTRUCTIONS.to_string()), + } +} + #[derive(Debug, Clone)] pub(crate) struct ToolsConfig { pub shell_type: ConfigShellToolType, @@ -256,6 +287,198 @@ fn create_write_stdin_tool() -> ToolSpec { }) } +fn create_ask_user_question_tool() -> ToolSpec { + let mut option_props = BTreeMap::new(); + option_props.insert( + "label".to_string(), + JsonSchema::String { + description: Some( + "Short display label (1-5 words). Do not prefix with numbering; the UI provides numbering." + .to_string(), + ), + }, + ); + option_props.insert( + "description".to_string(), + JsonSchema::String { + description: Some("What this option means / trade-offs.".to_string()), + }, + ); + + let mut question_props = BTreeMap::new(); + question_props.insert( + "question".to_string(), + JsonSchema::String { + description: Some( + "The complete question to ask the user (end with a '?').".to_string(), + ), + }, + ); + question_props.insert( + "header".to_string(), + JsonSchema::String { + description: Some("Short tag/label (max 12 chars).".to_string()), + }, + ); + question_props.insert( + "options".to_string(), + JsonSchema::Array { + items: Box::new(JsonSchema::Object { + properties: option_props, + required: Some(vec!["label".to_string(), "description".to_string()]), + additional_properties: Some(false.into()), + }), + description: Some( + "2-4 options. Do not include an 'Other' option; it is provided automatically." + .to_string(), + ), + }, + ); + question_props.insert( + "multiSelect".to_string(), + JsonSchema::Boolean { + description: Some( + "Set true to allow selecting multiple options (not mutually exclusive)." + .to_string(), + ), + }, + ); + + let mut root_props = BTreeMap::new(); + root_props.insert( + "questions".to_string(), + JsonSchema::Array { + items: Box::new(JsonSchema::Object { + properties: question_props, + required: Some(vec![ + "question".to_string(), + "header".to_string(), + "options".to_string(), + "multiSelect".to_string(), + ]), + additional_properties: Some(false.into()), + }), + description: Some("1-4 questions to ask the user.".to_string()), + }, + ); + + ToolSpec::Function(ResponsesApiTool { + name: ASK_USER_QUESTION_TOOL_NAME.to_string(), + description: "Ask the user 1-4 multiple-choice questions during execution to clarify requirements. Do not ask these questions in plain text; call this tool to pause and wait. The UI always provides an 'Other' choice for custom text input." + .to_string(), + strict: false, + parameters: JsonSchema::Object { + properties: root_props, + required: Some(vec!["questions".to_string()]), + additional_properties: Some(false.into()), + }, + }) +} + +fn create_approve_plan_tool() -> ToolSpec { + let mut plan_item_props = BTreeMap::new(); + plan_item_props.insert("step".to_string(), JsonSchema::String { description: None }); + plan_item_props.insert( + "status".to_string(), + JsonSchema::String { + description: Some("One of: pending, in_progress, completed".to_string()), + }, + ); + + let update_plan_props = { + let mut props = BTreeMap::new(); + props.insert( + "explanation".to_string(), + JsonSchema::String { + description: Some("Optional explanation for the plan.".to_string()), + }, + ); + props.insert( + "plan".to_string(), + JsonSchema::Array { + items: Box::new(JsonSchema::Object { + properties: plan_item_props, + required: Some(vec!["step".to_string(), "status".to_string()]), + additional_properties: Some(false.into()), + }), + description: Some("The list of steps.".to_string()), + }, + ); + props + }; + + let mut proposal_props = BTreeMap::new(); + proposal_props.insert( + "title".to_string(), + JsonSchema::String { + description: Some("Short title for the plan.".to_string()), + }, + ); + proposal_props.insert( + "summary".to_string(), + JsonSchema::String { + description: Some("Short summary of what the plan will do.".to_string()), + }, + ); + proposal_props.insert( + "plan".to_string(), + JsonSchema::Object { + properties: update_plan_props, + required: Some(vec!["plan".to_string()]), + additional_properties: Some(false.into()), + }, + ); + + let mut root_props = BTreeMap::new(); + root_props.insert( + "proposal".to_string(), + JsonSchema::Object { + properties: proposal_props, + required: Some(vec![ + "title".to_string(), + "summary".to_string(), + "plan".to_string(), + ]), + additional_properties: Some(false.into()), + }, + ); + + ToolSpec::Function(ResponsesApiTool { + name: APPROVE_PLAN_TOOL_NAME.to_string(), + description: "Present a plan to the user for approval. The UI will offer options to approve, request revisions (with feedback), or reject." + .to_string(), + strict: false, + parameters: JsonSchema::Object { + properties: root_props, + required: Some(vec!["proposal".to_string()]), + additional_properties: Some(false.into()), + }, + }) +} + +fn create_propose_plan_variants_tool() -> ToolSpec { + let mut root_props = BTreeMap::new(); + root_props.insert( + "goal".to_string(), + JsonSchema::String { + description: Some("The user's goal to plan for.".to_string()), + }, + ); + + ToolSpec::Function(ResponsesApiTool { + name: PROPOSE_PLAN_VARIANTS_TOOL_NAME.to_string(), + description: + "Generate 3 plan variants for a given goal using non-interactive planning subagents." + .to_string(), + strict: false, + parameters: JsonSchema::Object { + properties: root_props, + required: Some(vec!["goal".to_string()]), + additional_properties: Some(false.into()), + }, + }) +} + fn create_shell_tool() -> ToolSpec { let mut properties = BTreeMap::new(); properties.insert( @@ -981,11 +1204,14 @@ pub(crate) fn build_specs( mcp_tools: Option>, ) -> ToolRegistryBuilder { use crate::tools::handlers::ApplyPatchHandler; + use crate::tools::handlers::AskUserQuestionHandler; use crate::tools::handlers::GrepFilesHandler; use crate::tools::handlers::ListDirHandler; use crate::tools::handlers::McpHandler; use crate::tools::handlers::McpResourceHandler; + use crate::tools::handlers::PlanApprovalHandler; use crate::tools::handlers::PlanHandler; + use crate::tools::handlers::PlanVariantsHandler; use crate::tools::handlers::ReadFileHandler; use crate::tools::handlers::ShellCommandHandler; use crate::tools::handlers::ShellHandler; @@ -999,8 +1225,11 @@ pub(crate) fn build_specs( let shell_handler = Arc::new(ShellHandler); let unified_exec_handler = Arc::new(UnifiedExecHandler); let plan_handler = Arc::new(PlanHandler); + let plan_approval_handler = Arc::new(PlanApprovalHandler); + let plan_variants_handler = Arc::new(PlanVariantsHandler); let apply_patch_handler = Arc::new(ApplyPatchHandler); let view_image_handler = Arc::new(ViewImageHandler); + let ask_user_question_handler = Arc::new(AskUserQuestionHandler); let mcp_handler = Arc::new(McpHandler); let mcp_resource_handler = Arc::new(McpResourceHandler); let shell_command_handler = Arc::new(ShellCommandHandler); @@ -1044,6 +1273,15 @@ pub(crate) fn build_specs( builder.push_spec(PLAN_TOOL.clone()); builder.register_handler("update_plan", plan_handler); + builder.push_spec(create_ask_user_question_tool()); + builder.register_handler(ASK_USER_QUESTION_TOOL_NAME, ask_user_question_handler); + + builder.push_spec(create_approve_plan_tool()); + builder.register_handler(APPROVE_PLAN_TOOL_NAME, plan_approval_handler); + + builder.push_spec(create_propose_plan_variants_tool()); + builder.register_handler(PROPOSE_PLAN_VARIANTS_TOOL_NAME, plan_variants_handler); + if let Some(apply_patch_tool_type) = &config.apply_patch_tool_type { match apply_patch_tool_type { ApplyPatchToolType::Freeform => { @@ -1258,6 +1496,9 @@ mod tests { create_list_mcp_resource_templates_tool(), create_read_mcp_resource_tool(), PLAN_TOOL.clone(), + create_ask_user_question_tool(), + create_approve_plan_tool(), + create_propose_plan_variants_tool(), create_apply_patch_freeform_tool(), ToolSpec::WebSearch {}, create_view_image_tool(), @@ -1303,6 +1544,9 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "ask_user_question", + "approve_plan", + "propose_plan_variants", "apply_patch", "view_image", ], @@ -1320,6 +1564,9 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "ask_user_question", + "approve_plan", + "propose_plan_variants", "apply_patch", "view_image", ], @@ -1340,6 +1587,9 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "ask_user_question", + "approve_plan", + "propose_plan_variants", "apply_patch", "web_search", "view_image", @@ -1361,6 +1611,9 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "ask_user_question", + "approve_plan", + "propose_plan_variants", "apply_patch", "web_search", "view_image", @@ -1379,6 +1632,9 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "ask_user_question", + "approve_plan", + "propose_plan_variants", "view_image", ], ); @@ -1395,6 +1651,9 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "ask_user_question", + "approve_plan", + "propose_plan_variants", "apply_patch", "view_image", ], @@ -1412,6 +1671,9 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "ask_user_question", + "approve_plan", + "propose_plan_variants", "view_image", ], ); @@ -1428,6 +1690,9 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "ask_user_question", + "approve_plan", + "propose_plan_variants", "apply_patch", "view_image", ], @@ -1446,6 +1711,9 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "ask_user_question", + "approve_plan", + "propose_plan_variants", "apply_patch", "view_image", ], @@ -1466,6 +1734,9 @@ mod tests { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "ask_user_question", + "approve_plan", + "propose_plan_variants", "web_search", "view_image", ], diff --git a/codex-rs/core/tests/suite/client.rs b/codex-rs/core/tests/suite/client.rs index 35a67a69299..a39272a6ed2 100644 --- a/codex-rs/core/tests/suite/client.rs +++ b/codex-rs/core/tests/suite/client.rs @@ -67,18 +67,6 @@ fn assert_message_role(request_body: &serde_json::Value, role: &str) { assert_eq!(request_body["role"].as_str().unwrap(), role); } -#[expect(clippy::expect_used)] -fn assert_message_equals(request_body: &serde_json::Value, text: &str) { - let content = request_body["content"][0]["text"] - .as_str() - .expect("invalid message content"); - - assert_eq!( - content, text, - "expected message content '{content}' to equal '{text}'" - ); -} - #[expect(clippy::expect_used)] fn assert_message_starts_with(request_body: &serde_json::Value, text: &str) { let content = request_body["content"][0]["text"] @@ -1066,7 +1054,8 @@ async fn includes_developer_instructions_message_in_request() { .contains("be nice") ); assert_message_role(&request_body["input"][0], "developer"); - assert_message_equals(&request_body["input"][0], "be useful"); + assert_message_starts_with(&request_body["input"][0], "be useful"); + assert_message_ends_with(&request_body["input"][0], "be useful"); assert_message_role(&request_body["input"][1], "user"); assert_message_starts_with(&request_body["input"][1], "# AGENTS.md instructions for "); assert_message_ends_with(&request_body["input"][1], ""); diff --git a/codex-rs/core/tests/suite/deprecation_notice.rs b/codex-rs/core/tests/suite/deprecation_notice.rs index bab715ebd80..c70e8e0a30c 100644 --- a/codex-rs/core/tests/suite/deprecation_notice.rs +++ b/codex-rs/core/tests/suite/deprecation_notice.rs @@ -42,7 +42,7 @@ async fn emits_deprecation_notice_for_legacy_feature_flag() -> anyhow::Result<() assert_eq!( details.as_deref(), Some( - "Enable it with `--enable unified_exec` or `[features].unified_exec` in config.toml. See https://github.com/openai/codex/blob/main/docs/config.md#feature-flags for details." + "Enable it with `--enable unified_exec` or `[features].unified_exec` in config.toml. See https://github.com/Ixe1/codexel/blob/main/docs/config.md#feature-flags for details." ), ); diff --git a/codex-rs/core/tests/suite/list_models.rs b/codex-rs/core/tests/suite/list_models.rs index 7db4cf76a5c..8cbcc063ad6 100644 --- a/codex-rs/core/tests/suite/list_models.rs +++ b/codex-rs/core/tests/suite/list_models.rs @@ -184,6 +184,10 @@ fn gpt52_codex_upgrade() -> codex_protocol::openai_models::ModelUpgrade { reasoning_effort_mapping: None, migration_config_key: "gpt-5.2-codex".to_string(), model_link: Some("https://openai.com/index/introducing-gpt-5-2-codex".to_string()), + upgrade_copy: Some( + "Codex is now powered by gpt-5.2-codex, our latest frontier agentic coding model. It is smarter and faster than its predecessors and capable of long-running project-scale work." + .to_string(), + ), } } diff --git a/codex-rs/core/tests/suite/mod.rs b/codex-rs/core/tests/suite/mod.rs index e047899d722..5164709762c 100644 --- a/codex-rs/core/tests/suite/mod.rs +++ b/codex-rs/core/tests/suite/mod.rs @@ -38,6 +38,7 @@ mod live_cli; mod model_overrides; mod model_tools; mod otel; +mod plan_approval; mod prompt_caching; mod quota_exceeded; mod read_file; diff --git a/codex-rs/core/tests/suite/model_overrides.rs b/codex-rs/core/tests/suite/model_overrides.rs index 53a45e67868..c0680c3c14f 100644 --- a/codex-rs/core/tests/suite/model_overrides.rs +++ b/codex-rs/core/tests/suite/model_overrides.rs @@ -38,7 +38,9 @@ async fn override_turn_context_does_not_persist_when_config_exists() { approval_policy: None, sandbox_policy: None, model: Some("o3".to_string()), + plan_model: None, effort: Some(Some(ReasoningEffort::High)), + plan_effort: None, summary: None, }) .await @@ -80,7 +82,9 @@ async fn override_turn_context_does_not_create_config_file() { approval_policy: None, sandbox_policy: None, model: Some("o3".to_string()), + plan_model: None, effort: Some(Some(ReasoningEffort::Medium)), + plan_effort: None, summary: None, }) .await diff --git a/codex-rs/core/tests/suite/model_tools.rs b/codex-rs/core/tests/suite/model_tools.rs index cb2c5725f2b..493f6ce481d 100644 --- a/codex-rs/core/tests/suite/model_tools.rs +++ b/codex-rs/core/tests/suite/model_tools.rs @@ -58,6 +58,9 @@ async fn model_selects_expected_tools() { "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), + "ask_user_question".to_string(), + "approve_plan".to_string(), + "propose_plan_variants".to_string(), "view_image".to_string() ], "codex-mini-latest should expose the local shell tool", @@ -72,6 +75,9 @@ async fn model_selects_expected_tools() { "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), + "ask_user_question".to_string(), + "approve_plan".to_string(), + "propose_plan_variants".to_string(), "apply_patch".to_string(), "view_image".to_string() ], @@ -87,6 +93,9 @@ async fn model_selects_expected_tools() { "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), + "ask_user_question".to_string(), + "approve_plan".to_string(), + "propose_plan_variants".to_string(), "apply_patch".to_string(), "view_image".to_string() ], @@ -102,6 +111,9 @@ async fn model_selects_expected_tools() { "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), + "ask_user_question".to_string(), + "approve_plan".to_string(), + "propose_plan_variants".to_string(), "view_image".to_string() ], "gpt-5 should expose the apply_patch tool", @@ -116,6 +128,9 @@ async fn model_selects_expected_tools() { "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), + "ask_user_question".to_string(), + "approve_plan".to_string(), + "propose_plan_variants".to_string(), "apply_patch".to_string(), "view_image".to_string() ], @@ -131,6 +146,9 @@ async fn model_selects_expected_tools() { "list_mcp_resource_templates".to_string(), "read_mcp_resource".to_string(), "update_plan".to_string(), + "ask_user_question".to_string(), + "approve_plan".to_string(), + "propose_plan_variants".to_string(), "apply_patch".to_string(), "view_image".to_string() ], diff --git a/codex-rs/core/tests/suite/plan_approval.rs b/codex-rs/core/tests/suite/plan_approval.rs new file mode 100644 index 00000000000..ca6ffd99392 --- /dev/null +++ b/codex-rs/core/tests/suite/plan_approval.rs @@ -0,0 +1,158 @@ +use codex_core::protocol::AskForApproval; +use codex_core::protocol::EventMsg; +use codex_core::protocol::Op; +use codex_core::protocol::PlanApprovalResponse; +use codex_core::protocol::SandboxPolicy; +use codex_protocol::config_types::ReasoningSummary; +use codex_protocol::user_input::UserInput; +use core_test_support::responses; +use core_test_support::responses::ev_assistant_message; +use core_test_support::responses::ev_completed; +use core_test_support::responses::ev_function_call; +use core_test_support::responses::ev_response_created; +use core_test_support::responses::sse; +use core_test_support::responses::start_mock_server; +use core_test_support::skip_if_no_network; +use core_test_support::test_codex::test_codex; +use core_test_support::wait_for_event; +use serde_json::json; + +#[tokio::test(flavor = "multi_thread", worker_threads = 2)] +async fn plan_approval_approved_emits_immediate_background_and_plan_update_events() +-> anyhow::Result<()> { + skip_if_no_network!(Ok(())); + use pretty_assertions::assert_eq; + + const APPROVED_MESSAGE: &str = "Plan approved; continuing..."; + + let server = start_mock_server().await; + + let call_id = "approve-plan-call"; + let proposal = json!({ + "title": "Test Plan", + "summary": "Test plan summary", + "plan": { + "explanation": "Original plan explanation", + "plan": [ + {"step": "Step 1", "status": "pending"}, + {"step": "Step 2", "status": "in_progress"}, + ] + } + }); + let args = json!({ "proposal": proposal }).to_string(); + + let first_response = sse(vec![ + ev_response_created("resp-1"), + ev_function_call(call_id, "approve_plan", &args), + ev_completed("resp-1"), + ]); + responses::mount_sse_once(&server, first_response).await; + + let second_response = sse(vec![ + ev_assistant_message("msg-1", "continuing"), + ev_completed("resp-2"), + ]); + let second_mock = responses::mount_sse_once(&server, second_response).await; + + let test = test_codex().build(&server).await?; + let session_model = test.session_configured.model.clone(); + + let sub_id = test + .codex + .submit(Op::UserTurn { + items: vec![UserInput::Text { + text: "please request plan approval".into(), + }], + final_output_json_schema: None, + cwd: test.cwd.path().to_path_buf(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::DangerFullAccess, + model: session_model, + effort: None, + summary: ReasoningSummary::Auto, + }) + .await?; + + let plan_request = wait_for_event(&test.codex, |event| { + matches!( + event, + EventMsg::PlanApprovalRequest(_) | EventMsg::TaskComplete(_) + ) + }) + .await; + match plan_request { + EventMsg::PlanApprovalRequest(ev) => { + assert_eq!(ev.call_id, call_id); + assert_eq!(ev.proposal.title, "Test Plan"); + } + EventMsg::TaskComplete(_) => { + panic!("expected PlanApprovalRequest before completion"); + } + other => { + panic!("unexpected event: {other:?}"); + } + } + + let _ = test + .codex + .submit(Op::ResolvePlanApproval { + id: sub_id, + response: PlanApprovalResponse::Approved, + }) + .await?; + + let mut saw_background = false; + let mut saw_plan_update = None; + for _ in 0..2 { + let ev = wait_for_event(&test.codex, |event| { + matches!( + event, + EventMsg::BackgroundEvent(_) | EventMsg::PlanUpdate(_) | EventMsg::TaskComplete(_) + ) + }) + .await; + match ev { + EventMsg::BackgroundEvent(bg) => { + assert_eq!(bg.message, APPROVED_MESSAGE); + saw_background = true; + } + EventMsg::PlanUpdate(update) => { + saw_plan_update = Some(update); + } + EventMsg::TaskComplete(_) => { + panic!("expected background/plan update before completion"); + } + other => panic!("unexpected event: {other:?}"), + } + } + + assert!(saw_background, "expected a BackgroundEvent after approval"); + + let update = saw_plan_update.expect("expected a PlanUpdate after approval"); + assert_eq!(update.explanation, Some(APPROVED_MESSAGE.to_string())); + let update_json = serde_json::to_value(&update)?; + assert_eq!( + update_json, + json!({ + "explanation": APPROVED_MESSAGE, + "plan": [ + {"step": "Step 1", "status": "pending"}, + {"step": "Step 2", "status": "in_progress"} + ] + }) + ); + + wait_for_event(&test.codex, |event| { + matches!(event, EventMsg::TaskComplete(_)) + }) + .await; + + let req = second_mock.single_request(); + let output_text = req + .function_call_output_text(call_id) + .expect("approve_plan should include function_call_output"); + let output_json: serde_json::Value = serde_json::from_str(&output_text)?; + assert_eq!(output_json["response"]["type"], "approved"); + + Ok(()) +} diff --git a/codex-rs/core/tests/suite/prompt_caching.rs b/codex-rs/core/tests/suite/prompt_caching.rs index b0b58b8d8cc..0a07aee370d 100644 --- a/codex-rs/core/tests/suite/prompt_caching.rs +++ b/codex-rs/core/tests/suite/prompt_caching.rs @@ -121,6 +121,9 @@ async fn prompt_tools_are_consistent_across_requests() -> anyhow::Result<()> { "list_mcp_resource_templates", "read_mcp_resource", "update_plan", + "ask_user_question", + "approve_plan", + "propose_plan_variants", "apply_patch", "view_image", ]; @@ -323,7 +326,9 @@ async fn overrides_turn_context_but_keeps_cached_prefix_and_key_constant() -> an exclude_slash_tmp: true, }), model: Some("o3".to_string()), + plan_model: None, effort: Some(Some(ReasoningEffort::High)), + plan_effort: None, summary: Some(ReasoningSummary::Detailed), }) .await?; @@ -402,7 +407,9 @@ async fn override_before_first_turn_emits_environment_context() -> anyhow::Resul approval_policy: Some(AskForApproval::Never), sandbox_policy: None, model: None, + plan_model: None, effort: None, + plan_effort: None, summary: None, }) .await?; diff --git a/codex-rs/core/tests/suite/remote_models.rs b/codex-rs/core/tests/suite/remote_models.rs index f95eef7ad62..a410d0b08ec 100644 --- a/codex-rs/core/tests/suite/remote_models.rs +++ b/codex-rs/core/tests/suite/remote_models.rs @@ -137,7 +137,9 @@ async fn remote_models_remote_model_uses_unified_exec() -> Result<()> { approval_policy: None, sandbox_policy: None, model: Some(REMOTE_MODEL_SLUG.to_string()), + plan_model: None, effort: None, + plan_effort: None, summary: None, }) .await?; @@ -270,7 +272,9 @@ async fn remote_models_apply_remote_base_instructions() -> Result<()> { approval_policy: None, sandbox_policy: None, model: Some(model.to_string()), + plan_model: None, effort: None, + plan_effort: None, summary: None, }) .await?; diff --git a/codex-rs/default.nix b/codex-rs/default.nix index 26971f18467..a19bb7e440d 100644 --- a/codex-rs/default.nix +++ b/codex-rs/default.nix @@ -25,8 +25,8 @@ rustPlatform.buildRustPackage (_: { }; meta = with lib; { - description = "OpenAI Codex command‑line interface rust implementation"; + description = "Codexel command-line interface rust implementation"; license = licenses.asl20; - homepage = "https://github.com/openai/codex"; + homepage = "https://github.com/Ixe1/codexel"; }; }) diff --git a/codex-rs/docs/codex_mcp_interface.md b/codex-rs/docs/codex_mcp_interface.md index a7236e363e8..daf95b2eadb 100644 --- a/codex-rs/docs/codex_mcp_interface.md +++ b/codex-rs/docs/codex_mcp_interface.md @@ -3,7 +3,7 @@ This document describes Codex’s experimental MCP server interface: a JSON‑RPC API that runs over the Model Context Protocol (MCP) transport to control a local Codex engine. - Status: experimental and subject to change without notice -- Server binary: `codex mcp-server` (or `codex-mcp-server`) +- Server binary: `codexel mcp-server` (or `codex-mcp-server`) - Transport: standard MCP over stdio (JSON‑RPC 2.0, line‑delimited) ## Overview @@ -38,16 +38,16 @@ See code for full type definitions and exact shapes: `protocol/src/mcp_protocol. Run Codex as an MCP server and connect an MCP client: ```bash -codex mcp-server | your_mcp_client +codexel mcp-server | your_mcp_client ``` For a simple inspection UI, you can also try: ```bash -npx @modelcontextprotocol/inspector codex mcp-server +npx @modelcontextprotocol/inspector codexel mcp-server ``` -Use the separate `codex mcp` subcommand to manage configured MCP server launchers in `config.toml`. +Use the separate `codexel mcp` subcommand to manage configured MCP server launchers in `config.toml`. ## Conversations diff --git a/codex-rs/docs/protocol_v1.md b/codex-rs/docs/protocol_v1.md index 805abb0ea8c..b4dd6c7c9c5 100644 --- a/codex-rs/docs/protocol_v1.md +++ b/codex-rs/docs/protocol_v1.md @@ -68,10 +68,17 @@ For complete documentation of the `Op` and `EventMsg` variants, refer to [protoc - `Op::UserInput` – Any input from the user to kick off a `Task` - `Op::Interrupt` – Interrupts a running task - `Op::ExecApproval` – Approve or deny code execution + - `Op::ResolveAskUserQuestion` – Reply to an interactive question prompt + - `Op::Plan` – Start a planning session (/plan) + - `Op::ResolvePlanApproval` – Reply to an interactive plan approval prompt - `Op::ListSkills` – Request skills for one or more cwd values (optionally `force_reload`) - `EventMsg` - `EventMsg::AgentMessage` – Messages from the `Model` - `EventMsg::ExecApprovalRequest` – Request approval from user to execute a command + - `EventMsg::AskUserQuestionRequest` – Ask the user a multiple-choice question and await an answer + - `EventMsg::PlanApprovalRequest` – Ask the user to approve / revise / reject a proposed plan + - `EventMsg::EnteredPlanMode` – Notify the UI that plan mode started + - `EventMsg::ExitedPlanMode` – Notify the UI that plan mode ended (optional final plan included) - `EventMsg::TaskComplete` – A task completed successfully - `EventMsg::Error` – A task stopped with an error - `EventMsg::Warning` – A non-fatal warning that the client should surface to the user @@ -173,3 +180,94 @@ sequenceDiagram task2->>user: Event::TurnCompleted task2->>-user: Event::TaskCompleted ``` + +### AskUserQuestion (interactive prompt) + +Pausing a task to ask the user a question, then resuming after the answer is provided. + +```mermaid +sequenceDiagram + box UI + participant user as User + end + box Daemon + participant session as Session + participant task as Task + end + box Rest API + participant agent as Model + end + user->>session: Op::UserInput + session-->>+task: start task + task->>agent: prompt + agent->>task: response (tool call: ask_user_question) + task->>user: Event::AskUserQuestionRequest + user->>task: Op::ResolveAskUserQuestion + task->>agent: tool output (answers) + agent->>task: response (continue) + task->>-user: Event::AgentMessage +``` + +### PlanApproval (interactive prompt) + +Pausing a task to ask the user to approve a proposed plan, then resuming after the decision is provided. + +```mermaid +sequenceDiagram + box UI + participant user as User + end + box Daemon + participant session as Session + participant task as Task + end + box Rest API + participant agent as Model + end + user->>session: Op::UserInput + session-->>+task: start task + task->>agent: prompt + agent->>task: response (tool call: approve_plan) + task->>user: Event::PlanApprovalRequest + user->>task: Op::ResolvePlanApproval + task->>agent: tool output (approved/revised/rejected) + agent->>task: response (continue) + task->>-user: Event::AgentMessage +``` + +Notes: +- When the user approves (`Op::ResolvePlanApproval` with `Approved`), the daemon may emit an immediate `Event::BackgroundEvent` and `Event::PlanUpdate` so UIs can show visible progress before the model produces more output. + +### Plan Mode (/plan) + +Starting a planning session that runs in a dedicated planning context, optionally uses planning subagents for variants, then exits back to the main session. + +Notes: +- When a plan is approved, the daemon emits `Event::ExitedPlanMode` with a `PlanOutputEvent` that includes the approved `title`, `summary`, and `plan` (including `explanation` + step list). UIs may render this directly. +- The daemon also records an assistant message summarizing the approved plan (title/summary/explanation/steps) so it appears in normal chat history. +- To make execution robust to conversation-history compaction, the daemon pins the approved plan into the next normal turn's developer instructions (consumed once) for interactive session sources (CLI/VSCode). +- Some clients may automatically start a follow-up normal turn after plan approval to execute the approved plan. + +```mermaid +sequenceDiagram + box UI + participant user as User + end + box Daemon + participant session as Session + participant task as PlanTask + end + box Rest API + participant agent as PlanAgent + end + user->>session: Op::Plan { goal } + session-->>+task: start plan task + task->>user: Event::EnteredPlanMode + task->>agent: prompt (plan mode) + agent->>task: tool call(s) (propose_plan_variants) + agent->>task: tool call (approve_plan) + task->>user: Event::PlanApprovalRequest + user->>task: Op::ResolvePlanApproval + task->>user: Event::ExitedPlanMode + task->>-user: Event::TaskComplete +``` diff --git a/codex-rs/exec-server/tests/suite/bash b/codex-rs/exec-server/tests/suite/bash index 5f5d1e55939..33523e2e928 100755 --- a/codex-rs/exec-server/tests/suite/bash +++ b/codex-rs/exec-server/tests/suite/bash @@ -1,7 +1,7 @@ #!/usr/bin/env dotslash // This is an instance of the fork of Bash that we bundle with -// https://www.npmjs.com/package/@openai/codex-shell-tool-mcp. +// https://www.npmjs.com/package/@ixe1/codexel-shell-tool-mcp. // Fetching the prebuilt version via DotSlash makes it easier to write // integration tests for the MCP server. // diff --git a/codex-rs/exec/src/event_processor_with_human_output.rs b/codex-rs/exec/src/event_processor_with_human_output.rs index a43718d5694..062e4320a3d 100644 --- a/codex-rs/exec/src/event_processor_with_human_output.rs +++ b/codex-rs/exec/src/event_processor_with_human_output.rs @@ -134,11 +134,7 @@ impl EventProcessor for EventProcessorWithHumanOutput { session_configured_event: &SessionConfiguredEvent, ) { const VERSION: &str = env!("CARGO_PKG_VERSION"); - ts_msg!( - self, - "OpenAI Codex v{} (research preview)\n--------", - VERSION - ); + ts_msg!(self, "Codexel (v{})\n--------", VERSION); let mut entries = create_config_summary_entries(config, session_configured_event.model.as_str()); @@ -241,6 +237,35 @@ impl EventProcessor for EventProcessorWithHumanOutput { "auto-cancelling (not supported in exec mode)".style(self.dimmed) ); } + EventMsg::AskUserQuestionRequest(_) => { + ts_msg!( + self, + "{}", + "AskUserQuestion request (auto-cancelling in exec mode)".style(self.dimmed) + ); + } + EventMsg::PlanApprovalRequest(_) => { + ts_msg!( + self, + "{}", + "PlanApproval request (auto-rejecting in exec mode)".style(self.dimmed) + ); + } + EventMsg::EnteredPlanMode(req) => { + let goal = req.goal.trim(); + if goal.is_empty() { + ts_msg!(self, "{}", "plan mode: started".style(self.cyan)); + } else { + ts_msg!(self, "{} {}", "plan mode:".style(self.cyan), goal); + } + } + EventMsg::ExitedPlanMode(ev) => { + if ev.plan_output.is_some() { + ts_msg!(self, "{}", "plan mode: finished".style(self.cyan)); + } else { + ts_msg!(self, "{}", "plan mode: ended".style(self.cyan)); + } + } EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }) => { let last_message = last_agent_message.as_deref(); if let Some(output_file) = self.last_message_path.as_deref() { diff --git a/codex-rs/exec/src/lib.rs b/codex-rs/exec/src/lib.rs index 8559e30d574..e2d10689ceb 100644 --- a/codex-rs/exec/src/lib.rs +++ b/codex-rs/exec/src/lib.rs @@ -443,6 +443,22 @@ pub async fn run_main(cli: Cli, codex_linux_sandbox_exe: Option) -> any }) .await?; } + if matches!(event.msg, EventMsg::AskUserQuestionRequest(_)) { + conversation + .submit(Op::ResolveAskUserQuestion { + id: event.id.clone(), + response: codex_protocol::protocol::AskUserQuestionResponse::Cancelled, + }) + .await?; + } + if matches!(event.msg, EventMsg::PlanApprovalRequest(_)) { + conversation + .submit(Op::ResolvePlanApproval { + id: event.id.clone(), + response: codex_protocol::protocol::PlanApprovalResponse::Rejected, + }) + .await?; + } if matches!(event.msg, EventMsg::Error(_)) { error_seen = true; } diff --git a/codex-rs/execpolicy/README.md b/codex-rs/execpolicy/README.md index 288a46dcbc6..b96b3418c5d 100644 --- a/codex-rs/execpolicy/README.md +++ b/codex-rs/execpolicy/README.md @@ -20,9 +20,9 @@ prefix_rule( ``` ## CLI -- From the Codex CLI, run `codex execpolicy check` subcommand with one or more policy files (for example `src/default.rules`) to check a command: +- From Codexel, run the `codexel execpolicy check` subcommand with one or more policy files (for example `src/default.rules`) to check a command: ```bash -codex execpolicy check --rules path/to/policy.rules git status +codexel execpolicy check --rules path/to/policy.rules git status ``` - Pass multiple `--rules` flags to merge rules, evaluated in the order provided, and use `--pretty` for formatted JSON. - You can also run the standalone dev binary directly during development: diff --git a/codex-rs/linux-sandbox/README.md b/codex-rs/linux-sandbox/README.md index 676f2349541..76a793bda1a 100644 --- a/codex-rs/linux-sandbox/README.md +++ b/codex-rs/linux-sandbox/README.md @@ -2,7 +2,7 @@ This crate is responsible for producing: -- a `codex-linux-sandbox` standalone executable for Linux that is bundled with the Node.js version of the Codex CLI +- a `codex-linux-sandbox` standalone executable for Linux that is bundled with the Node.js version of Codexel - a lib crate that exposes the business logic of the executable as `run_main()` so that - the `codex-exec` CLI can check if its arg0 is `codex-linux-sandbox` and, if so, execute as if it were `codex-linux-sandbox` - - this should also be true of the `codex` multitool CLI + - this should also be true of the `codexel` multitool CLI diff --git a/codex-rs/mcp-server/src/ask_user_question.rs b/codex-rs/mcp-server/src/ask_user_question.rs new file mode 100644 index 00000000000..523e861e83d --- /dev/null +++ b/codex-rs/mcp-server/src/ask_user_question.rs @@ -0,0 +1,145 @@ +use std::collections::HashMap; +use std::sync::Arc; + +use codex_core::CodexConversation; +use codex_core::protocol::AskUserQuestion; +use codex_core::protocol::AskUserQuestionResponse; +use codex_core::protocol::Op; +use mcp_types::ElicitRequest; +use mcp_types::ElicitRequestParamsRequestedSchema; +use mcp_types::ModelContextProtocolRequest; +use mcp_types::RequestId; +use serde::Deserialize; +use serde::Serialize; +use serde_json::json; +use tracing::error; + +use crate::outgoing_message::OutgoingMessageSender; + +#[derive(Debug, Serialize)] +pub struct AskUserQuestionElicitRequestParams { + pub message: String, + #[serde(rename = "requestedSchema")] + pub requested_schema: ElicitRequestParamsRequestedSchema, + pub codex_elicitation: String, + pub codex_mcp_tool_call_id: String, + pub codex_event_id: String, + pub codex_call_id: String, + pub codex_questions: Vec, +} + +#[derive(Debug, Deserialize)] +#[serde(rename_all = "camelCase")] +pub struct AskUserQuestionElicitResponse { + pub answers: HashMap, +} + +#[allow(clippy::too_many_arguments)] +pub(crate) async fn handle_ask_user_question_request( + call_id: String, + questions: Vec, + outgoing: Arc, + codex: Arc, + _request_id: RequestId, + tool_call_id: String, + event_id: String, +) { + let message = if questions.len() == 1 { + questions[0].question.clone() + } else { + let question_lines = questions + .iter() + .map(|q| { + format!( + "- {header}: {question}", + header = q.header, + question = q.question + ) + }) + .collect::>() + .join("\n"); + format!("Codex needs your input:\n{question_lines}") + }; + + let params = AskUserQuestionElicitRequestParams { + message, + requested_schema: ElicitRequestParamsRequestedSchema { + r#type: "object".to_string(), + properties: json!({ + "answers": { + "type": "object", + "additionalProperties": { "type": "string" } + } + }), + required: Some(vec!["answers".to_string()]), + }, + codex_elicitation: "ask-user-question".to_string(), + codex_mcp_tool_call_id: tool_call_id.clone(), + codex_event_id: event_id.clone(), + codex_call_id: call_id, + codex_questions: questions, + }; + + let params_json = match serde_json::to_value(¶ms) { + Ok(value) => value, + Err(err) => { + error!("Failed to serialize AskUserQuestionElicitRequestParams: {err}"); + let _ = codex + .submit(Op::ResolveAskUserQuestion { + id: event_id, + response: AskUserQuestionResponse::Cancelled, + }) + .await; + return; + } + }; + + let on_response = outgoing + .send_request(ElicitRequest::METHOD, Some(params_json)) + .await; + + // Listen for the response on a separate task so we don't block the main agent loop. + tokio::spawn(async move { + on_ask_user_question_response(event_id, on_response, codex).await; + }); +} + +async fn on_ask_user_question_response( + event_id: String, + receiver: tokio::sync::oneshot::Receiver, + codex: Arc, +) { + let value = match receiver.await { + Ok(value) => value, + Err(err) => { + error!("ask_user_question request failed: {err:?}"); + let _ = codex + .submit(Op::ResolveAskUserQuestion { + id: event_id, + response: AskUserQuestionResponse::Cancelled, + }) + .await; + return; + } + }; + + let response = serde_json::from_value::(value.clone()) + .or_else(|_| { + serde_json::from_value::(value) + .map(|r| AskUserQuestionResponse::Answered { answers: r.answers }) + }) + .unwrap_or_else(|err| { + error!("failed to deserialize AskUserQuestion response: {err}"); + AskUserQuestionResponse::Cancelled + }); + + if let Err(err) = codex + .submit(Op::ResolveAskUserQuestion { + id: event_id, + response, + }) + .await + { + error!("failed to submit ResolveAskUserQuestion: {err}"); + } +} diff --git a/codex-rs/mcp-server/src/codex_tool_runner.rs b/codex-rs/mcp-server/src/codex_tool_runner.rs index 39ae7486e55..12e2415b4c4 100644 --- a/codex-rs/mcp-server/src/codex_tool_runner.rs +++ b/codex-rs/mcp-server/src/codex_tool_runner.rs @@ -5,6 +5,7 @@ use std::collections::HashMap; use std::sync::Arc; +use crate::ask_user_question::handle_ask_user_question_request; use crate::exec_approval::handle_exec_approval_request; use crate::outgoing_message::OutgoingMessageSender; use crate::outgoing_message::OutgoingNotificationMeta; @@ -15,10 +16,12 @@ use codex_core::NewConversation; use codex_core::config::Config as CodexConfig; use codex_core::protocol::AgentMessageEvent; use codex_core::protocol::ApplyPatchApprovalRequestEvent; +use codex_core::protocol::AskUserQuestionRequestEvent; use codex_core::protocol::Event; use codex_core::protocol::EventMsg; use codex_core::protocol::ExecApprovalRequestEvent; use codex_core::protocol::Op; +use codex_core::protocol::PlanApprovalResponse; use codex_core::protocol::Submission; use codex_core::protocol::TaskCompleteEvent; use codex_protocol::ConversationId; @@ -211,6 +214,32 @@ async fn run_codex_tool_session_inner( // TODO: forward elicitation requests to the client? continue; } + EventMsg::AskUserQuestionRequest(AskUserQuestionRequestEvent { + call_id, + questions, + }) => { + handle_ask_user_question_request( + call_id, + questions, + outgoing.clone(), + codex.clone(), + request_id.clone(), + request_id_str.clone(), + event.id.clone(), + ) + .await; + continue; + } + EventMsg::PlanApprovalRequest(_) => { + // Plan approval is not supported via MCP today; reject to avoid hanging. + let _ = codex + .submit(Op::ResolvePlanApproval { + id: event.id.clone(), + response: PlanApprovalResponse::Rejected, + }) + .await; + continue; + } EventMsg::ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent { call_id, turn_id: _, @@ -299,6 +328,7 @@ async fn run_codex_tool_session_inner( | EventMsg::ViewImageToolCall(_) | EventMsg::RawResponseItem(_) | EventMsg::EnteredReviewMode(_) + | EventMsg::EnteredPlanMode(_) | EventMsg::ItemStarted(_) | EventMsg::ItemCompleted(_) | EventMsg::AgentMessageContentDelta(_) @@ -308,6 +338,7 @@ async fn run_codex_tool_session_inner( | EventMsg::UndoStarted(_) | EventMsg::UndoCompleted(_) | EventMsg::ExitedReviewMode(_) + | EventMsg::ExitedPlanMode(_) | EventMsg::ContextCompacted(_) | EventMsg::DeprecationNotice(_) => { // For now, we do not do anything extra for these diff --git a/codex-rs/mcp-server/src/lib.rs b/codex-rs/mcp-server/src/lib.rs index dabd7cca0f3..dbf2e4ae36d 100644 --- a/codex-rs/mcp-server/src/lib.rs +++ b/codex-rs/mcp-server/src/lib.rs @@ -19,6 +19,7 @@ use tracing::error; use tracing::info; use tracing_subscriber::EnvFilter; +mod ask_user_question; mod codex_tool_config; mod codex_tool_runner; mod error_code; @@ -31,6 +32,8 @@ use crate::message_processor::MessageProcessor; use crate::outgoing_message::OutgoingMessage; use crate::outgoing_message::OutgoingMessageSender; +pub use crate::ask_user_question::AskUserQuestionElicitRequestParams; +pub use crate::ask_user_question::AskUserQuestionElicitResponse; pub use crate::codex_tool_config::CodexToolCallParam; pub use crate::codex_tool_config::CodexToolCallReplyParam; pub use crate::exec_approval::ExecApprovalElicitRequestParams; diff --git a/codex-rs/protocol/README.md b/codex-rs/protocol/README.md index 7120d9f3b3f..3bd348d287c 100644 --- a/codex-rs/protocol/README.md +++ b/codex-rs/protocol/README.md @@ -1,6 +1,6 @@ # codex-protocol -This crate defines the "types" for the protocol used by Codex CLI, which includes both "internal types" for communication between `codex-core` and `codex-tui`, as well as "external types" used with `codex app-server`. +This crate defines the "types" for the protocol used by Codexel, which includes both "internal types" for communication between `codex-core` and `codex-tui`, as well as "external types" used with `codexel app-server`. This crate should have minimal dependencies. diff --git a/codex-rs/protocol/src/ask_user_question.rs b/codex-rs/protocol/src/ask_user_question.rs new file mode 100644 index 00000000000..f1a2d69251e --- /dev/null +++ b/codex-rs/protocol/src/ask_user_question.rs @@ -0,0 +1,41 @@ +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use std::collections::HashMap; +use ts_rs::TS; + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +pub struct AskUserQuestionOption { + pub label: String, + pub description: String, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +pub struct AskUserQuestion { + pub question: String, + pub header: String, + pub options: Vec, + pub multi_select: bool, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +pub struct AskUserQuestionArgs { + pub questions: Vec, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)] +#[serde(rename_all = "camelCase")] +pub struct AskUserQuestionRequestEvent { + pub call_id: String, + pub questions: Vec, +} + +#[derive(Debug, Clone, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)] +#[serde(tag = "type", rename_all = "snake_case")] +pub enum AskUserQuestionResponse { + Answered { answers: HashMap }, + Cancelled, +} diff --git a/codex-rs/protocol/src/lib.rs b/codex-rs/protocol/src/lib.rs index 0d6a0594fc7..07599913a1e 100644 --- a/codex-rs/protocol/src/lib.rs +++ b/codex-rs/protocol/src/lib.rs @@ -2,6 +2,7 @@ pub mod account; mod conversation_id; pub use conversation_id::ConversationId; pub mod approvals; +pub mod ask_user_question; pub mod config_types; pub mod custom_prompts; pub mod items; @@ -10,6 +11,8 @@ pub mod models; pub mod num_format; pub mod openai_models; pub mod parse_command; +pub mod plan_approval; +pub mod plan_mode; pub mod plan_tool; pub mod protocol; pub mod user_input; diff --git a/codex-rs/protocol/src/openai_models.rs b/codex-rs/protocol/src/openai_models.rs index 3fc29f9199c..28b25bb604e 100644 --- a/codex-rs/protocol/src/openai_models.rs +++ b/codex-rs/protocol/src/openai_models.rs @@ -53,6 +53,7 @@ pub struct ModelUpgrade { pub reasoning_effort_mapping: Option>, pub migration_config_key: String, pub model_link: Option, + pub upgrade_copy: Option, } /// Metadata describing a Codex-supported model. @@ -219,6 +220,7 @@ impl From for ModelPreset { migration_config_key: info.slug.clone(), // todo(aibrahim): add the model link here. model_link: None, + upgrade_copy: None, }), show_in_picker: info.visibility == ModelVisibility::List, supported_in_api: info.supported_in_api, diff --git a/codex-rs/protocol/src/plan_approval.rs b/codex-rs/protocol/src/plan_approval.rs new file mode 100644 index 00000000000..0afb90eadc9 --- /dev/null +++ b/codex-rs/protocol/src/plan_approval.rs @@ -0,0 +1,30 @@ +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use ts_rs::TS; + +use crate::plan_tool::UpdatePlanArgs; + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, TS)] +#[serde(deny_unknown_fields)] +pub struct PlanProposal { + pub title: String, + pub summary: String, + pub plan: UpdatePlanArgs, +} + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, TS)] +#[serde(deny_unknown_fields)] +pub struct PlanApprovalRequestEvent { + pub call_id: String, + pub proposal: PlanProposal, +} + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, TS, PartialEq, Eq)] +#[serde(tag = "type", rename_all = "snake_case")] +#[ts(tag = "type", rename_all = "snake_case")] +pub enum PlanApprovalResponse { + Approved, + Revised { feedback: String }, + Rejected, +} diff --git a/codex-rs/protocol/src/plan_mode.rs b/codex-rs/protocol/src/plan_mode.rs new file mode 100644 index 00000000000..add8fabdb0f --- /dev/null +++ b/codex-rs/protocol/src/plan_mode.rs @@ -0,0 +1,26 @@ +use schemars::JsonSchema; +use serde::Deserialize; +use serde::Serialize; +use ts_rs::TS; + +use crate::plan_tool::UpdatePlanArgs; + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, TS, PartialEq, Eq)] +#[serde(deny_unknown_fields)] +pub struct PlanRequest { + pub goal: String, +} + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, TS)] +#[serde(deny_unknown_fields)] +pub struct PlanOutputEvent { + pub title: String, + pub summary: String, + pub plan: UpdatePlanArgs, +} + +#[derive(Debug, Clone, Serialize, Deserialize, JsonSchema, TS)] +#[serde(deny_unknown_fields)] +pub struct ExitedPlanModeEvent { + pub plan_output: Option, +} diff --git a/codex-rs/protocol/src/protocol.rs b/codex-rs/protocol/src/protocol.rs index b3165acbe3b..1c62b485c7d 100644 --- a/codex-rs/protocol/src/protocol.rs +++ b/codex-rs/protocol/src/protocol.rs @@ -42,6 +42,17 @@ pub use crate::approvals::ApplyPatchApprovalRequestEvent; pub use crate::approvals::ElicitationAction; pub use crate::approvals::ExecApprovalRequestEvent; pub use crate::approvals::ExecPolicyAmendment; +pub use crate::ask_user_question::AskUserQuestion; +pub use crate::ask_user_question::AskUserQuestionArgs; +pub use crate::ask_user_question::AskUserQuestionOption; +pub use crate::ask_user_question::AskUserQuestionRequestEvent; +pub use crate::ask_user_question::AskUserQuestionResponse; +pub use crate::plan_approval::PlanApprovalRequestEvent; +pub use crate::plan_approval::PlanApprovalResponse; +pub use crate::plan_approval::PlanProposal; +pub use crate::plan_mode::ExitedPlanModeEvent; +pub use crate::plan_mode::PlanOutputEvent; +pub use crate::plan_mode::PlanRequest; /// Open/close tags for special user-input blocks. Used across crates to avoid /// duplicated hardcoded strings. @@ -129,6 +140,12 @@ pub enum Op { #[serde(skip_serializing_if = "Option::is_none")] model: Option, + /// Updated model slug used for planning flows (e.g. `/plan` mode and plan-variant subagents). + /// + /// When omitted, planning flows use the active `model`. + #[serde(skip_serializing_if = "Option::is_none")] + plan_model: Option, + /// Updated reasoning effort (honored only for reasoning-capable models). /// /// Use `Some(Some(_))` to set a specific effort, `Some(None)` to clear @@ -136,6 +153,13 @@ pub enum Op { #[serde(skip_serializing_if = "Option::is_none")] effort: Option>, + /// Updated reasoning effort for planning flows (honored only for reasoning-capable models). + /// + /// Use `Some(Some(_))` to set a specific effort, `Some(None)` to clear + /// the effort, or `None` to leave the existing value unchanged. + #[serde(skip_serializing_if = "Option::is_none")] + plan_effort: Option>, + /// Updated reasoning summary preference (honored only for reasoning-capable models). #[serde(skip_serializing_if = "Option::is_none")] summary: Option, @@ -167,6 +191,25 @@ pub enum Op { decision: ElicitationAction, }, + /// Resolve an AskUserQuestion request emitted during a tool call. + ResolveAskUserQuestion { + /// The id of the submission we are responding to. + id: String, + /// The user's response (answered or cancelled). + response: AskUserQuestionResponse, + }, + + /// Start a planning session (/plan). + Plan { plan_request: PlanRequest }, + + /// Resolve a PlanApproval request emitted during a tool call. + ResolvePlanApproval { + /// The id of the submission we are responding to. + id: String, + /// The user's response (approved/revised/rejected). + response: PlanApprovalResponse, + }, + /// Append an entry to the persistent cross-session message history. /// /// Note the entry is not guaranteed to be logged if the user has @@ -576,6 +619,11 @@ pub enum EventMsg { ElicitationRequest(ElicitationRequestEvent), + AskUserQuestionRequest(AskUserQuestionRequestEvent), + + /// Ask the user to approve, revise, or reject a proposed plan. + PlanApprovalRequest(PlanApprovalRequestEvent), + ApplyPatchApprovalRequest(ApplyPatchApprovalRequestEvent), /// Notification advising the user that something they are using has been @@ -629,6 +677,12 @@ pub enum EventMsg { /// Exited review mode with an optional final result to apply. ExitedReviewMode(ExitedReviewModeEvent), + /// Entered plan mode. + EnteredPlanMode(PlanRequest), + + /// Exited plan mode with an optional accepted plan. + ExitedPlanMode(ExitedPlanModeEvent), + RawResponseItem(RawResponseItemEvent), ItemStarted(ItemStartedEvent), @@ -1405,19 +1459,15 @@ pub struct ReviewLineRange { #[derive(Debug, Clone, Copy, Display, Deserialize, Serialize, PartialEq, Eq, JsonSchema, TS)] #[serde(rename_all = "snake_case")] +#[derive(Default)] pub enum ExecCommandSource { + #[default] Agent, UserShell, UnifiedExecStartup, UnifiedExecInteraction, } -impl Default for ExecCommandSource { - fn default() -> Self { - Self::Agent - } -} - #[derive(Debug, Clone, Deserialize, Serialize, JsonSchema, TS)] pub struct ExecCommandBeginEvent { /// Identifier so this can be paired with the ExecCommandEnd event. diff --git a/codex-rs/responses-api-proxy/README.md b/codex-rs/responses-api-proxy/README.md index 8a99c41a264..1086ef1b352 100644 --- a/codex-rs/responses-api-proxy/README.md +++ b/codex-rs/responses-api-proxy/README.md @@ -12,12 +12,12 @@ A privileged user (i.e., `root` or a user with `sudo`) who has access to `OPENAI printenv OPENAI_API_KEY | env -u OPENAI_API_KEY codex-responses-api-proxy --http-shutdown --server-info /tmp/server-info.json ``` -A non-privileged user would then run Codex as follows, specifying the `model_provider` dynamically: +A non-privileged user would then run Codexel as follows, specifying the `model_provider` dynamically: ```shell PROXY_PORT=$(jq .port /tmp/server-info.json) PROXY_BASE_URL="http://127.0.0.1:${PROXY_PORT}" -codex exec -c "model_providers.openai-proxy={ name = 'OpenAI Proxy', base_url = '${PROXY_BASE_URL}/v1', wire_api='responses' }" \ +codexel exec -c "model_providers.openai-proxy={ name = 'OpenAI Proxy', base_url = '${PROXY_BASE_URL}/v1', wire_api='responses' }" \ -c model_provider="openai-proxy" \ 'Your prompt here' ``` @@ -47,7 +47,7 @@ codex-responses-api-proxy [--port ] [--server-info ] [--http-shutdow - `--server-info `: If set, the proxy writes a single line of JSON with `{ "port": , "pid": }` once listening. - `--http-shutdown`: If set, enables `GET /shutdown` to exit the process with code `0`. - `--upstream-url `: Absolute URL to forward requests to. Defaults to `https://api.openai.com/v1/responses`. -- Authentication is fixed to `Authorization: Bearer ` to match the Codex CLI expectations. +- Authentication is fixed to `Authorization: Bearer ` to match Codexel expectations. For Azure, for example (ensure your deployment accepts `Authorization: Bearer `): @@ -67,7 +67,7 @@ printenv AZURE_OPENAI_API_KEY | env -u AZURE_OPENAI_API_KEY codex-responses-api- Care is taken to restrict access/copying to the value of `OPENAI_API_KEY` retained in memory: -- We leverage [`codex_process_hardening`](https://github.com/openai/codex/blob/main/codex-rs/process-hardening/README.md) so `codex-responses-api-proxy` is run with standard process-hardening techniques. +- We leverage [`codex_process_hardening`](../process-hardening/README.md) so `codex-responses-api-proxy` is run with standard process-hardening techniques. - At startup, we allocate a `1024` byte buffer on the stack and copy `"Bearer "` into the start of the buffer. - We then read from `stdin`, copying the contents into the buffer after `"Bearer "`. - After verifying the key matches `/^[a-zA-Z0-9_-]+$/` (and does not exceed the buffer), we create a `String` from that buffer (so the data is now on the heap). diff --git a/codex-rs/responses-api-proxy/npm/README.md b/codex-rs/responses-api-proxy/npm/README.md index 3458e527804..9e7b00d95af 100644 --- a/codex-rs/responses-api-proxy/npm/README.md +++ b/codex-rs/responses-api-proxy/npm/README.md @@ -1,8 +1,8 @@ -# @openai/codex-responses-api-proxy +# @ixe1/codexel-responses-api-proxy -

    npm i -g @openai/codex-responses-api-proxy to install codex-responses-api-proxy

    +

    npm i -g @ixe1/codexel-responses-api-proxy to install codex-responses-api-proxy

    -This package distributes the prebuilt [Codex Responses API proxy binary](https://github.com/openai/codex/tree/main/codex-rs/responses-api-proxy) for macOS, Linux, and Windows. +This package distributes the prebuilt Codexel Responses API proxy binary for macOS, Linux, and Windows. To see available options, run: @@ -10,4 +10,4 @@ To see available options, run: node ./bin/codex-responses-api-proxy.js --help ``` -Refer to [`codex-rs/responses-api-proxy/README.md`](https://github.com/openai/codex/blob/main/codex-rs/responses-api-proxy/README.md) for detailed documentation. +Refer to `codex-rs/responses-api-proxy/README.md` for detailed documentation. diff --git a/codex-rs/responses-api-proxy/npm/package.json b/codex-rs/responses-api-proxy/npm/package.json index f3956a77d6f..360c0865dbc 100644 --- a/codex-rs/responses-api-proxy/npm/package.json +++ b/codex-rs/responses-api-proxy/npm/package.json @@ -1,5 +1,5 @@ { - "name": "@openai/codex-responses-api-proxy", + "name": "@ixe1/codexel-responses-api-proxy", "version": "0.0.0-dev", "license": "Apache-2.0", "bin": { @@ -15,7 +15,7 @@ ], "repository": { "type": "git", - "url": "git+https://github.com/openai/codex.git", + "url": "git+https://github.com/Ixe1/codexel.git", "directory": "codex-rs/responses-api-proxy/npm" } } diff --git a/codex-rs/rmcp-client/src/find_codex_home.rs b/codex-rs/rmcp-client/src/find_codex_home.rs index d683ba9d164..b6ee474cbb4 100644 --- a/codex-rs/rmcp-client/src/find_codex_home.rs +++ b/codex-rs/rmcp-client/src/find_codex_home.rs @@ -5,29 +5,48 @@ use std::path::PathBuf; /// TODO: move this to a shared crate lower in the dependency tree. /// /// -/// Returns the path to the Codex configuration directory, which can be -/// specified by the `CODEX_HOME` environment variable. If not set, defaults to -/// `~/.codex`. +/// Returns the path to the Codexel configuration directory. /// -/// - If `CODEX_HOME` is set, the value will be canonicalized and this +/// The directory can be specified by the `CODEXEL_HOME` environment variable. +/// For compatibility with existing installs, `CODEX_HOME` is also honored. When +/// neither is set, defaults to `~/.codexel`, falling back to `~/.codex` if that +/// directory exists and `~/.codexel` does not. +/// +/// - If `CODEXEL_HOME` (or `CODEX_HOME`) is set, the value will be canonicalized and this /// function will Err if the path does not exist. -/// - If `CODEX_HOME` is not set, this function does not verify that the -/// directory exists. +/// - If neither environment variable is set, this function does not verify +/// that the directory exists. pub(crate) fn find_codex_home() -> std::io::Result { - // Honor the `CODEX_HOME` environment variable when it is set to allow users - // (and tests) to override the default location. + // Honor `CODEXEL_HOME` (preferred) and `CODEX_HOME` (legacy) when set to + // allow users (and tests) to override the default location. + if let Ok(val) = std::env::var("CODEXEL_HOME") + && !val.is_empty() + { + return PathBuf::from(val).canonicalize(); + } + if let Ok(val) = std::env::var("CODEX_HOME") && !val.is_empty() { return PathBuf::from(val).canonicalize(); } - let mut p = home_dir().ok_or_else(|| { + let home = home_dir().ok_or_else(|| { std::io::Error::new( std::io::ErrorKind::NotFound, "Could not find home directory", ) })?; - p.push(".codex"); - Ok(p) + + let codexel_home = home.join(".codexel"); + if codexel_home.exists() { + return Ok(codexel_home); + } + + let codex_home = home.join(".codex"); + if codex_home.exists() { + return Ok(codex_home); + } + + Ok(codexel_home) } diff --git a/codex-rs/tui/src/app.rs b/codex-rs/tui/src/app.rs index 66e12f086f0..7f16b091d98 100644 --- a/codex-rs/tui/src/app.rs +++ b/codex-rs/tui/src/app.rs @@ -79,7 +79,7 @@ fn session_summary( let usage_line = FinalOutput::from(token_usage).to_string(); let resume_command = - conversation_id.map(|conversation_id| format!("codex resume {conversation_id}")); + conversation_id.map(|conversation_id| format!("codexel resume {conversation_id}")); Some(SessionSummary { usage_line, resume_command, @@ -195,6 +195,7 @@ async fn handle_model_migration_prompt_if_needed( reasoning_effort_mapping, migration_config_key, model_link, + upgrade_copy, }) = upgrade { if migration_prompt_hidden(config, migration_config_key.as_str()) { @@ -227,6 +228,7 @@ async fn handle_model_migration_prompt_if_needed( model, &target_model, model_link.clone(), + upgrade_copy.clone(), heading_label, target_description, can_opt_out, @@ -763,11 +765,19 @@ impl App { self.chat_widget.set_model(&model, model_family); self.current_model = model; } - AppEvent::OpenReasoningPopup { model } => { - self.chat_widget.open_reasoning_popup(model); + AppEvent::UpdatePlanModel(model) => { + self.config.plan_model = Some(model.clone()); + self.chat_widget.set_plan_model(&model); } - AppEvent::OpenAllModelsPopup { models } => { - self.chat_widget.open_all_models_popup(models); + AppEvent::UpdatePlanReasoningEffort(effort) => { + self.config.plan_model_reasoning_effort = effort; + self.chat_widget.set_plan_reasoning_effort(effort); + } + AppEvent::OpenReasoningPopup { model, target } => { + self.chat_widget.open_reasoning_popup(target, model); + } + AppEvent::OpenAllModelsPopup { models, target } => { + self.chat_widget.open_all_models_popup(target, models); } AppEvent::OpenFullAccessConfirmation { preset } => { self.chat_widget.open_full_access_confirmation(preset); @@ -829,7 +839,9 @@ impl App { approval_policy: Some(preset.approval), sandbox_policy: Some(preset.sandbox.clone()), model: None, + plan_model: None, effort: None, + plan_effort: None, summary: None, }, )); @@ -896,6 +908,45 @@ impl App { } } } + AppEvent::PersistPlanModelSelection { model, effort } => { + let profile = self.active_profile.as_deref(); + match ConfigEditsBuilder::new(&self.config.codex_home) + .with_profile(profile) + .set_plan_model(Some(model.as_str()), effort) + .apply() + .await + { + Ok(()) => { + let mut message = format!("Plan model changed to {model}"); + if let Some(label) = Self::reasoning_label_for(&model, effort) { + message.push(' '); + message.push_str(label); + } + message.push_str(" (used for /plan)"); + if let Some(profile) = profile { + message.push_str(" for "); + message.push_str(profile); + message.push_str(" profile"); + } + self.chat_widget.add_info_message(message, None); + } + Err(err) => { + tracing::error!( + error = %err, + "failed to persist plan model selection" + ); + if let Some(profile) = profile { + self.chat_widget.add_error_message(format!( + "Failed to save plan model for profile `{profile}`: {err}" + )); + } else { + self.chat_widget.add_error_message(format!( + "Failed to save default plan model: {err}" + )); + } + } + } + } AppEvent::UpdateAskForApprovalPolicy(policy) => { self.chat_widget.set_approval_policy(policy); } @@ -1398,6 +1449,7 @@ mod tests { reasoning_effort_mapping: None, migration_config_key: HIDE_GPT5_1_MIGRATION_PROMPT_CONFIG.to_string(), model_link: None, + upgrade_copy: None, }); available.retain(|preset| preset.model != "gpt-5-codex"); available.push(current.clone()); @@ -1557,7 +1609,7 @@ mod tests { ); assert_eq!( summary.resume_command, - Some("codex resume 123e4567-e89b-12d3-a456-426614174000".to_string()) + Some("codexel resume 123e4567-e89b-12d3-a456-426614174000".to_string()) ); } } diff --git a/codex-rs/tui/src/app_event.rs b/codex-rs/tui/src/app_event.rs index 0be556de8bd..5e6bd7cf829 100644 --- a/codex-rs/tui/src/app_event.rs +++ b/codex-rs/tui/src/app_event.rs @@ -15,6 +15,12 @@ use codex_core::protocol::AskForApproval; use codex_core::protocol::SandboxPolicy; use codex_protocol::openai_models::ReasoningEffort; +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum ModelPickerTarget { + Chat, + Plan, +} + #[allow(clippy::large_enum_variant)] #[derive(Debug)] pub(crate) enum AppEvent { @@ -64,20 +70,34 @@ pub(crate) enum AppEvent { /// Update the current model slug in the running app and widget. UpdateModel(String), + /// Update the current plan model slug in the running app and widget. + UpdatePlanModel(String), + + /// Update the current plan reasoning effort in the running app and widget. + UpdatePlanReasoningEffort(Option), + /// Persist the selected model and reasoning effort to the appropriate config. PersistModelSelection { model: String, effort: Option, }, + /// Persist the selected plan model and reasoning effort to the appropriate config. + PersistPlanModelSelection { + model: String, + effort: Option, + }, + /// Open the reasoning selection popup after picking a model. OpenReasoningPopup { model: ModelPreset, + target: ModelPickerTarget, }, /// Open the full model picker (non-auto models). OpenAllModelsPopup { models: Vec, + target: ModelPickerTarget, }, /// Open the confirmation prompt before enabling full access mode. diff --git a/codex-rs/tui/src/bottom_pane/ask_user_question_overlay.rs b/codex-rs/tui/src/bottom_pane/ask_user_question_overlay.rs new file mode 100644 index 00000000000..dcefb69ad6d --- /dev/null +++ b/codex-rs/tui/src/bottom_pane/ask_user_question_overlay.rs @@ -0,0 +1,1134 @@ +use std::cell::RefCell; +use std::collections::HashMap; + +use crossterm::event::KeyCode; +use crossterm::event::KeyEvent; +use crossterm::event::KeyModifiers; +use ratatui::buffer::Buffer; +use ratatui::layout::Constraint; +use ratatui::layout::Layout; +use ratatui::layout::Rect; +use ratatui::style::Stylize; +use ratatui::text::Line; +use ratatui::text::Span; +use ratatui::widgets::Block; +use ratatui::widgets::Clear; +use ratatui::widgets::Paragraph; +use ratatui::widgets::StatefulWidgetRef; +use ratatui::widgets::Widget; +use textwrap::wrap; + +use codex_core::protocol::AskUserQuestion; +use codex_core::protocol::AskUserQuestionRequestEvent; +use codex_core::protocol::AskUserQuestionResponse; +use codex_core::protocol::Op; + +use crate::app_event::AppEvent; +use crate::app_event_sender::AppEventSender; +use crate::key_hint; +use crate::render::Insets; +use crate::render::RectExt as _; +use crate::style::user_message_style; + +use super::CancellationEvent; +use super::bottom_pane_view::BottomPaneView; +use super::popup_consts::MAX_POPUP_ROWS; +use super::scroll_state::ScrollState; +use super::selection_popup_common::GenericDisplayRow; +use super::selection_popup_common::measure_rows_height; +use super::selection_popup_common::render_rows; +use super::textarea::TextArea; +use super::textarea::TextAreaState; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum Mode { + Select, + OtherInput, + Review, +} + +fn normalize_choice_label(label: &str) -> String { + let trimmed = label.trim_start(); + + let mut chars = trimmed.char_indices().peekable(); + let mut saw_digit = false; + let mut after_digits = 0usize; + while let Some((idx, ch)) = chars.peek().copied() + && ch.is_ascii_digit() + { + saw_digit = true; + chars.next(); + after_digits = idx + ch.len_utf8(); + } + + if !saw_digit { + return trimmed.to_string(); + } + + // Only strip numeric prefixes when they look like enumeration: "1) Foo", "2. Bar", "3: Baz". + let Some((idx, ch)) = chars.peek().copied() else { + return trimmed.to_string(); + }; + if !matches!(ch, ')' | '.' | ':') { + return trimmed.to_string(); + } + + chars.next(); + let mut end = idx + ch.len_utf8(); + + while let Some((idx, ch)) = chars.peek().copied() + && ch.is_whitespace() + { + chars.next(); + end = idx + ch.len_utf8(); + } + + if end <= after_digits { + return trimmed.to_string(); + } + + let rest = trimmed[end..].trim_start(); + if rest.is_empty() { + trimmed.to_string() + } else { + rest.to_string() + } +} + +#[derive(Debug, Clone)] +struct AnswerDraft { + selected: Vec, + other_text: String, +} + +impl AnswerDraft { + fn new(q: &AskUserQuestion) -> Self { + Self { + selected: vec![false; q.options.len() + 1], // + Other + other_text: String::new(), + } + } + + fn any_selected(&self) -> bool { + self.selected.iter().any(|s| *s) + } + + fn other_selected(&self) -> bool { + self.selected.last().copied().unwrap_or(false) + } + + fn trimmed_other_text(&self) -> String { + self.other_text.trim().to_string() + } + + fn to_answer_string(&self, q: &AskUserQuestion) -> Option { + if !self.any_selected() { + return None; + } + + if self.other_selected() && self.trimmed_other_text().is_empty() { + return None; + } + + if q.multi_select { + let mut parts = Vec::new(); + for (idx, selected) in self.selected.iter().enumerate() { + if !*selected { + continue; + } + if idx == q.options.len() { + parts.push(self.trimmed_other_text()); + } else if let Some(opt) = q.options.get(idx) { + parts.push(normalize_choice_label(opt.label.as_str())); + } + } + Some(parts.join(", ")) + } else { + let (idx, _) = self.selected.iter().enumerate().find(|(_, s)| **s)?; + + if idx == q.options.len() { + let other = self.trimmed_other_text(); + if other.is_empty() { None } else { Some(other) } + } else { + q.options + .get(idx) + .map(|o| normalize_choice_label(o.label.as_str())) + } + } + } +} + +pub(crate) struct AskUserQuestionOverlay { + id: String, + questions: Vec, + current_idx: usize, + drafts: Vec, + + mode: Mode, + state: ScrollState, + multi_select: bool, + selected: Vec, + textarea: TextArea, + textarea_state: RefCell, + error: Option, + + return_to_review: bool, + + app_event_tx: AppEventSender, + complete: bool, +} + +impl AskUserQuestionOverlay { + pub(crate) fn new( + id: String, + ev: AskUserQuestionRequestEvent, + app_event_tx: AppEventSender, + ) -> Self { + let drafts = ev.questions.iter().map(AnswerDraft::new).collect(); + let mut overlay = Self { + id, + questions: ev.questions, + current_idx: 0, + drafts, + mode: Mode::Select, + state: ScrollState::new(), + multi_select: false, + selected: Vec::new(), + textarea: TextArea::new(), + textarea_state: RefCell::new(TextAreaState::default()), + error: None, + return_to_review: false, + app_event_tx, + complete: false, + }; + overlay.reset_for_current_question(); + overlay + } + + fn current_question(&self) -> Option<&AskUserQuestion> { + self.questions.get(self.current_idx) + } + + fn reset_for_current_question(&mut self) { + self.mode = Mode::Select; + self.error = None; + self.state.reset(); + self.textarea_state.replace(TextAreaState::default()); + + let Some(q) = self.current_question().cloned() else { + self.multi_select = false; + self.selected.clear(); + self.state.selected_idx = None; + self.textarea.set_text(""); + return; + }; + + self.multi_select = q.multi_select; + + let expected_len = q.options.len() + 1; + if let Some(draft) = self.drafts.get_mut(self.current_idx) + && draft.selected.len() != expected_len + { + *draft = AnswerDraft::new(&q); + } + + let draft = self + .drafts + .get(self.current_idx) + .cloned() + .unwrap_or_else(|| AnswerDraft::new(&q)); + + self.selected = draft.selected; + self.textarea.set_text(draft.other_text.as_str()); + self.state.selected_idx = Some(0); + } + + fn save_current_draft(&mut self) { + let Some(q) = self.current_question() else { + return; + }; + + let expected_len = q.options.len() + 1; + if self.selected.len() != expected_len { + self.selected = vec![false; expected_len]; + } + + if let Some(draft) = self.drafts.get_mut(self.current_idx) { + draft.selected.clone_from(&self.selected); + draft.other_text = self.textarea.text().to_string(); + } + } + + fn options_len(&self) -> usize { + self.current_question() + .map(|q| q.options.len() + 1) + .unwrap_or(0) + } + + fn is_other_idx(&self, idx: usize) -> bool { + self.current_question() + .map(|q| idx == q.options.len()) + .unwrap_or(false) + } + + fn move_up(&mut self) { + let len = self.rows_len(); + self.state.move_up_wrap(len); + self.state.ensure_visible(len, self.max_visible_rows()); + } + + fn move_down(&mut self) { + let len = self.rows_len(); + self.state.move_down_wrap(len); + self.state.ensure_visible(len, self.max_visible_rows()); + } + + fn max_visible_rows(&self) -> usize { + MAX_POPUP_ROWS.min(self.rows_len().max(1)) + } + + fn rows_len(&self) -> usize { + match self.mode { + Mode::Review => self.questions.len().saturating_add(2), // Submit, Cancel + Mode::Select | Mode::OtherInput => self.options_len(), + } + } + + fn toggle_current(&mut self) { + let Some(idx) = self.state.selected_idx else { + return; + }; + if let Some(flag) = self.selected.get_mut(idx) { + *flag = !*flag; + } + self.error = None; + } + + fn select_single(&mut self) { + let Some(idx) = self.state.selected_idx else { + return; + }; + self.selected.iter_mut().for_each(|s| *s = false); + if let Some(flag) = self.selected.get_mut(idx) { + *flag = true; + } + self.error = None; + } + + fn any_selected(&self) -> bool { + self.selected.iter().any(|s| *s) + } + + fn other_selected(&self) -> bool { + let Some(q) = self.current_question() else { + return false; + }; + self.selected.get(q.options.len()).copied().unwrap_or(false) + } + + fn other_text(&self) -> String { + self.textarea.text().trim().to_string() + } + + fn confirm_selection(&mut self) { + let Some(_) = self.current_question() else { + self.finish_answered(HashMap::new()); + return; + }; + + if self.multi_select { + if !self.any_selected() { + self.error = Some("Select at least one option.".to_string()); + return; + } + if self.other_selected() && self.other_text().is_empty() { + self.mode = Mode::OtherInput; + self.error = None; + return; + } + self.save_current_draft(); + self.advance_or_review(); + } else { + let Some((idx, _)) = self.selected.iter().enumerate().find(|(_, s)| **s) else { + self.error = Some("Select an option.".to_string()); + return; + }; + if self.is_other_idx(idx) { + if self.other_text().is_empty() { + self.mode = Mode::OtherInput; + self.error = None; + return; + } + self.save_current_draft(); + self.advance_or_review(); + return; + } + self.save_current_draft(); + self.advance_or_review(); + } + } + + fn accept_other_input(&mut self) { + if self.other_text().is_empty() { + self.error = Some("Other response cannot be empty.".to_string()); + return; + } + self.mode = Mode::Select; + self.confirm_selection(); + } + + fn advance_or_review(&mut self) { + if self.return_to_review || self.current_idx + 1 >= self.questions.len() { + self.enter_review(); + return; + } + + self.current_idx += 1; + self.reset_for_current_question(); + } + + fn enter_review(&mut self) { + self.save_current_draft(); + self.mode = Mode::Review; + self.error = None; + self.state.reset(); + self.state.selected_idx = Some(0); + self.return_to_review = true; + } + + fn submit_from_review(&mut self) { + let mut answers: HashMap = HashMap::new(); + for (idx, q) in self.questions.iter().enumerate() { + let Some(draft) = self.drafts.get(idx) else { + self.go_to_question( + idx, + Some("Please answer this question to submit.".to_string()), + ); + return; + }; + let Some(answer) = draft.to_answer_string(q) else { + self.go_to_question( + idx, + Some("Please answer this question to submit.".to_string()), + ); + return; + }; + answers.insert(q.header.clone(), answer); + } + + self.finish_answered(answers); + } + + fn go_to_question(&mut self, idx: usize, error: Option) { + if matches!(self.mode, Mode::Select | Mode::OtherInput) { + self.save_current_draft(); + } + self.current_idx = idx.min(self.questions.len().saturating_sub(1)); + self.reset_for_current_question(); + self.error = error; + } + + fn go_to_previous_question(&mut self) { + if self.current_idx == 0 { + return; + } + self.save_current_draft(); + self.current_idx -= 1; + self.reset_for_current_question(); + } + + fn finish_answered(&mut self, answers: HashMap) { + let response = AskUserQuestionResponse::Answered { answers }; + self.app_event_tx + .send(AppEvent::CodexOp(Op::ResolveAskUserQuestion { + id: self.id.clone(), + response, + })); + self.complete = true; + } + + fn finish_cancelled(&mut self) { + self.app_event_tx + .send(AppEvent::CodexOp(Op::ResolveAskUserQuestion { + id: self.id.clone(), + response: AskUserQuestionResponse::Cancelled, + })); + self.complete = true; + } + + fn build_rows(&self) -> Vec { + if self.mode == Mode::Review { + return self.build_review_rows(); + } + + let Some(q) = self.current_question() else { + return Vec::new(); + }; + + let mut rows = Vec::with_capacity(q.options.len() + 1); + for (idx, opt) in q.options.iter().enumerate() { + rows.push(GenericDisplayRow { + name: self.row_name(idx, opt.label.as_str()), + display_shortcut: None, + match_indices: None, + description: Some(opt.description.clone()), + disabled_reason: None, + wrap_indent: None, + }); + } + rows.push(GenericDisplayRow { + name: self.row_name(q.options.len(), "Other"), + display_shortcut: None, + match_indices: None, + description: Some("Provide custom text input.".to_string()), + disabled_reason: None, + wrap_indent: None, + }); + rows + } + + fn build_review_rows(&self) -> Vec { + let mut rows = Vec::with_capacity(self.questions.len() + 2); + for (idx, q) in self.questions.iter().enumerate() { + let answer = self + .drafts + .get(idx) + .and_then(|d| d.to_answer_string(q)) + .unwrap_or_else(|| "Unanswered".to_string()); + + rows.push(GenericDisplayRow { + name: format!("{}. {}", idx + 1, q.header), + display_shortcut: None, + match_indices: None, + description: Some(answer), + disabled_reason: None, + wrap_indent: Some(4), + }); + } + + rows.push(GenericDisplayRow { + name: "Submit".to_string(), + display_shortcut: None, + match_indices: None, + description: Some("Send answers.".to_string()), + disabled_reason: None, + wrap_indent: Some(4), + }); + + rows.push(GenericDisplayRow { + name: "Cancel".to_string(), + display_shortcut: None, + match_indices: None, + description: Some("Cancel without sending.".to_string()), + disabled_reason: None, + wrap_indent: Some(4), + }); + + rows + } + + fn row_name(&self, idx: usize, label: &str) -> String { + let n = idx + 1; + let label = normalize_choice_label(label); + if self.multi_select { + let checked = self.selected.get(idx).copied().unwrap_or(false); + let box_mark = if checked { "[x]" } else { "[ ]" }; + format!("{n}. {box_mark} {label}") + } else { + format!("{n}. {label}") + } + } + + fn footer_hint(&self) -> Line<'static> { + match self.mode { + Mode::Select => { + if self.multi_select { + Line::from(vec![ + "Space".into(), + " toggle, ".into(), + key_hint::plain(KeyCode::Enter).into(), + " next, ".into(), + key_hint::plain(KeyCode::BackTab).into(), + " back, ".into(), + key_hint::plain(KeyCode::Esc).into(), + " cancel".into(), + ]) + } else { + Line::from(vec![ + key_hint::plain(KeyCode::Enter).into(), + " choose, ".into(), + key_hint::plain(KeyCode::BackTab).into(), + " back, ".into(), + key_hint::plain(KeyCode::Esc).into(), + " cancel".into(), + ]) + } + } + Mode::OtherInput => Line::from(vec![ + key_hint::plain(KeyCode::Enter).into(), + " submit, ".into(), + key_hint::ctrl(KeyCode::Char('b')).into(), + " back, ".into(), + key_hint::plain(KeyCode::Esc).into(), + " cancel".into(), + ]), + Mode::Review => Line::from(vec![ + key_hint::plain(KeyCode::Enter).into(), + " edit/submit, ".into(), + key_hint::plain(KeyCode::Esc).into(), + " cancel".into(), + ]), + } + } + + fn header_lines(&self, width: u16) -> Vec> { + if self.mode == Mode::Review { + let progress = format!("Review ({})", self.questions.len()); + let mut lines = vec![Line::from(vec!["[".into(), progress.bold(), "]".into()])]; + lines.push(Line::from("Select a question to edit, then submit.")); + return lines; + } + + let Some(q) = self.current_question() else { + return vec![Line::from("No questions.".dim())]; + }; + + let usable_width = width.saturating_sub(4).max(1) as usize; + let progress = format!( + "{} ({}/{})", + q.header, + self.current_idx + 1, + self.questions.len() + ); + + let mut lines = vec![Line::from(vec!["[".into(), progress.bold(), "]".into()])]; + + for w in wrap(q.question.as_str(), usable_width) { + lines.push(Line::from(w.into_owned())); + } + + if let Some(err) = &self.error { + lines.push(Line::from(vec!["".into()])); + lines.push(Line::from(err.clone().red())); + } + + lines + } + + fn cursor_pos_for_other_input(&self, area: Rect) -> Option<(u16, u16)> { + if self.mode != Mode::OtherInput { + return None; + } + if area.height < 2 || area.width <= 2 { + return None; + } + let textarea_rect = self.textarea_rect(area); + let state = *self.textarea_state.borrow(); + self.textarea.cursor_pos_with_state(textarea_rect, state) + } + + fn textarea_rect(&self, area: Rect) -> Rect { + let inset = area.inset(Insets::vh(1, 2)); + Rect { + x: inset.x, + y: inset.y, + width: inset.width, + height: inset.height.clamp(1, 5), + } + } +} + +impl BottomPaneView for AskUserQuestionOverlay { + fn handle_key_event(&mut self, key_event: KeyEvent) { + match self.mode { + Mode::Select => match key_event { + KeyEvent { + code: KeyCode::Up, .. + } + | KeyEvent { + code: KeyCode::Char('p'), + modifiers: KeyModifiers::CONTROL, + .. + } + | KeyEvent { + code: KeyCode::Char('\u{0010}'), + modifiers: KeyModifiers::NONE, + .. + } /* ^P */ => self.move_up(), + KeyEvent { + code: KeyCode::Char('k'), + modifiers: KeyModifiers::NONE, + .. + } => self.move_up(), + KeyEvent { + code: KeyCode::Down, + .. + } + | KeyEvent { + code: KeyCode::Char('n'), + modifiers: KeyModifiers::CONTROL, + .. + } + | KeyEvent { + code: KeyCode::Char('\u{000e}'), + modifiers: KeyModifiers::NONE, + .. + } /* ^N */ => self.move_down(), + KeyEvent { + code: KeyCode::Char('j'), + modifiers: KeyModifiers::NONE, + .. + } => self.move_down(), + KeyEvent { + code: KeyCode::BackTab, + .. + } + | KeyEvent { + code: KeyCode::Left, + .. + } + | KeyEvent { + code: KeyCode::Char('h'), + modifiers: KeyModifiers::NONE, + .. + } + | KeyEvent { + code: KeyCode::Char('b'), + modifiers: KeyModifiers::CONTROL, + .. + } => self.go_to_previous_question(), + KeyEvent { + code: KeyCode::Esc, .. + } => { + self.on_ctrl_c(); + } + KeyEvent { + code: KeyCode::Char(' '), + modifiers: KeyModifiers::NONE, + .. + } if self.multi_select => { + self.toggle_current(); + } + KeyEvent { + code: KeyCode::Char(c), + modifiers, + .. + } if !modifiers.contains(KeyModifiers::CONTROL) + && !modifiers.contains(KeyModifiers::ALT) => + { + if let Some(idx) = c + .to_digit(10) + .map(|d| d as usize) + .and_then(|d| d.checked_sub(1)) + && idx < self.options_len() + { + self.state.selected_idx = Some(idx); + self.state.ensure_visible(self.options_len(), self.max_visible_rows()); + if self.multi_select { + self.toggle_current(); + } else { + self.select_single(); + self.confirm_selection(); + } + } + } + KeyEvent { + code: KeyCode::Enter, + modifiers: KeyModifiers::NONE, + .. + } => { + if self.multi_select { + self.confirm_selection(); + } else { + self.select_single(); + self.confirm_selection(); + } + } + _ => {} + }, + Mode::OtherInput => match key_event { + KeyEvent { + code: KeyCode::Esc, .. + } => { + self.on_ctrl_c(); + } + KeyEvent { + code: KeyCode::Char('b'), + modifiers: KeyModifiers::CONTROL, + .. + } => { + self.error = None; + self.mode = Mode::Select; + } + KeyEvent { + code: KeyCode::Enter, + modifiers: KeyModifiers::NONE, + .. + } => { + self.accept_other_input(); + } + KeyEvent { + code: KeyCode::Enter, + .. + } => { + self.textarea.input(key_event); + } + other => { + self.textarea.input(other); + } + }, + Mode::Review => match key_event { + KeyEvent { + code: KeyCode::Up, .. + } + | KeyEvent { + code: KeyCode::Char('p'), + modifiers: KeyModifiers::CONTROL, + .. + } => self.move_up(), + KeyEvent { + code: KeyCode::Char('k'), + modifiers: KeyModifiers::NONE, + .. + } => self.move_up(), + KeyEvent { + code: KeyCode::Down, + .. + } + | KeyEvent { + code: KeyCode::Char('n'), + modifiers: KeyModifiers::CONTROL, + .. + } => self.move_down(), + KeyEvent { + code: KeyCode::Char('j'), + modifiers: KeyModifiers::NONE, + .. + } => self.move_down(), + KeyEvent { + code: KeyCode::Esc, .. + } => { + self.on_ctrl_c(); + } + KeyEvent { + code: KeyCode::Char(c), + modifiers, + .. + } if !modifiers.contains(KeyModifiers::CONTROL) + && !modifiers.contains(KeyModifiers::ALT) => + { + if let Some(idx) = c + .to_digit(10) + .map(|d| d as usize) + .and_then(|d| d.checked_sub(1)) + && idx < self.questions.len() + { + self.state.selected_idx = Some(idx); + self.state + .ensure_visible(self.rows_len(), self.max_visible_rows()); + self.return_to_review = true; + self.go_to_question(idx, None); + } + } + KeyEvent { + code: KeyCode::Enter, + modifiers: KeyModifiers::NONE, + .. + } => { + let Some(idx) = self.state.selected_idx else { + return; + }; + + if idx < self.questions.len() { + self.return_to_review = true; + self.go_to_question(idx, None); + } else if idx == self.questions.len() { + self.submit_from_review(); + } else { + self.finish_cancelled(); + } + } + _ => {} + }, + } + } + + fn on_ctrl_c(&mut self) -> CancellationEvent { + self.finish_cancelled(); + CancellationEvent::Handled + } + + fn is_complete(&self) -> bool { + self.complete + } + + fn handle_paste(&mut self, pasted: String) -> bool { + if self.mode != Mode::OtherInput { + return false; + } + if pasted.is_empty() { + return false; + } + self.textarea.insert_str(&pasted); + true + } +} + +impl crate::render::renderable::Renderable for AskUserQuestionOverlay { + fn desired_height(&self, width: u16) -> u16 { + let header_height = self.header_lines(width).len() as u16; + let rows_height = measure_rows_height( + &self.build_rows(), + &self.state, + MAX_POPUP_ROWS, + width.saturating_sub(1).max(1), + ); + let footer_height = 1u16; + + let mut total = header_height + .saturating_add(1) + .saturating_add(rows_height) + .saturating_add(footer_height) + .saturating_add(2); + if self.mode == Mode::OtherInput { + total = total.saturating_add(6); + } + total + } + + fn cursor_pos(&self, area: Rect) -> Option<(u16, u16)> { + self.cursor_pos_for_other_input(area) + } + + fn render(&self, area: Rect, buf: &mut Buffer) { + if area.height == 0 || area.width == 0 { + return; + } + + Clear.render(area, buf); + Block::default() + .style(user_message_style()) + .render(area, buf); + + let [content_area, footer_area] = + Layout::vertical([Constraint::Fill(1), Constraint::Length(1)]).areas(area); + let inset = content_area.inset(Insets::vh(1, 2)); + + let header_lines = self.header_lines(inset.width); + let header_height = header_lines.len() as u16; + let [header_area, body_area] = + Layout::vertical([Constraint::Length(header_height), Constraint::Fill(1)]).areas(inset); + Paragraph::new(header_lines).render(header_area, buf); + + match self.mode { + Mode::Select => { + let rows = self.build_rows(); + let rows_height = measure_rows_height( + &rows, + &self.state, + MAX_POPUP_ROWS, + body_area.width.saturating_sub(1).max(1), + ); + let list_area = Rect { + x: body_area.x, + y: body_area.y, + width: body_area.width, + height: rows_height.min(body_area.height), + }; + render_rows( + list_area, + buf, + &rows, + &self.state, + MAX_POPUP_ROWS, + "no options", + ); + } + Mode::OtherInput => { + let label_area = Rect { + x: body_area.x, + y: body_area.y, + width: body_area.width, + height: 1, + }; + Paragraph::new(Line::from(vec![ + Span::from("Other response: ".to_string()).bold(), + "(press Enter to submit)".dim(), + ])) + .render(label_area, buf); + + let input_outer = Rect { + x: body_area.x, + y: body_area.y.saturating_add(1), + width: body_area.width, + height: body_area.height.saturating_sub(1).max(1), + }; + let textarea_rect = self.textarea_rect(input_outer); + let mut state = self.textarea_state.borrow_mut(); + StatefulWidgetRef::render_ref(&(&self.textarea), textarea_rect, buf, &mut state); + if self.textarea.text().is_empty() { + Paragraph::new(Line::from("Type your response…".dim())) + .render(textarea_rect, buf); + } + } + Mode::Review => { + let rows = self.build_rows(); + let rows_height = measure_rows_height( + &rows, + &self.state, + MAX_POPUP_ROWS, + body_area.width.saturating_sub(1).max(1), + ); + let list_area = Rect { + x: body_area.x, + y: body_area.y, + width: body_area.width, + height: rows_height.min(body_area.height), + }; + render_rows( + list_area, + buf, + &rows, + &self.state, + MAX_POPUP_ROWS, + "no questions", + ); + } + } + + let hint_area = Rect { + x: footer_area.x.saturating_add(2), + y: footer_area.y, + width: footer_area.width.saturating_sub(2), + height: 1, + }; + self.footer_hint().dim().render(hint_area, buf); + } +} + +#[cfg(test)] +mod tests { + use super::*; + + use pretty_assertions::assert_eq; + use tokio::sync::mpsc; + + fn option(label: &str) -> codex_core::protocol::AskUserQuestionOption { + codex_core::protocol::AskUserQuestionOption { + label: label.to_string(), + description: "".to_string(), + } + } + + fn question(header: &str, multi_select: bool, options: &[&str]) -> AskUserQuestion { + AskUserQuestion { + question: format!("Question {header}?"), + header: header.to_string(), + options: options.iter().copied().map(option).collect(), + multi_select, + } + } + + fn make_overlay( + questions: Vec, + ) -> (AskUserQuestionOverlay, mpsc::UnboundedReceiver) { + let (tx, rx) = mpsc::unbounded_channel(); + let app_event_tx = AppEventSender::new(tx); + let ev = AskUserQuestionRequestEvent { + call_id: "call-1".to_string(), + questions, + }; + ( + AskUserQuestionOverlay::new("ask-1".to_string(), ev, app_event_tx), + rx, + ) + } + + #[tokio::test] + async fn review_required_before_submit() { + let (mut overlay, mut rx) = make_overlay(vec![question("Q1", false, &["A", "B"])]); + + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('1'), KeyModifiers::NONE)); + assert_eq!(overlay.mode, Mode::Review); + assert!(rx.try_recv().is_err()); + + // Down to "Submit" row. + overlay.handle_key_event(KeyEvent::new(KeyCode::Down, KeyModifiers::NONE)); + overlay.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + + let AppEvent::CodexOp(op) = rx.try_recv().expect("submit op") else { + panic!("expected CodexOp"); + }; + match op { + Op::ResolveAskUserQuestion { id, response } => { + assert_eq!(id, "ask-1"); + assert_eq!( + response, + AskUserQuestionResponse::Answered { + answers: HashMap::from([("Q1".to_string(), "A".to_string())]) + } + ); + } + other => panic!("unexpected op: {other:?}"), + } + } + + #[tokio::test] + async fn back_navigation_restores_previous_answer() { + let (mut overlay, mut rx) = make_overlay(vec![ + question("Q1", false, &["A", "B"]), + question("Q2", false, &["C", "D"]), + ]); + + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('2'), KeyModifiers::NONE)); + assert_eq!(overlay.current_idx, 1); + + overlay.handle_key_event(KeyEvent::new(KeyCode::BackTab, KeyModifiers::NONE)); + assert_eq!(overlay.current_idx, 0); + assert_eq!(overlay.selected, vec![false, true, false]); + assert!(rx.try_recv().is_err()); + } + + #[tokio::test] + async fn editing_from_review_updates_answer() { + let (mut overlay, mut rx) = make_overlay(vec![ + question("Q1", false, &["A", "B"]), + question("Q2", false, &["C", "D"]), + ]); + + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('1'), KeyModifiers::NONE)); + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('2'), KeyModifiers::NONE)); + assert_eq!(overlay.mode, Mode::Review); + assert!(rx.try_recv().is_err()); + + // Edit Q1 from review. + overlay.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + assert_eq!(overlay.current_idx, 0); + + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('2'), KeyModifiers::NONE)); + assert_eq!(overlay.mode, Mode::Review); + + // Submit. + overlay.handle_key_event(KeyEvent::new(KeyCode::Down, KeyModifiers::NONE)); // Q2 + overlay.handle_key_event(KeyEvent::new(KeyCode::Down, KeyModifiers::NONE)); // Submit + overlay.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + + let AppEvent::CodexOp(op) = rx.try_recv().expect("submit op") else { + panic!("expected CodexOp"); + }; + match op { + Op::ResolveAskUserQuestion { response, .. } => { + assert_eq!( + response, + AskUserQuestionResponse::Answered { + answers: HashMap::from([ + ("Q1".to_string(), "B".to_string()), + ("Q2".to_string(), "D".to_string()) + ]) + } + ); + } + other => panic!("unexpected op: {other:?}"), + } + } +} diff --git a/codex-rs/tui/src/bottom_pane/feedback_view.rs b/codex-rs/tui/src/bottom_pane/feedback_view.rs index c563ab8e90b..ea062d62d51 100644 --- a/codex-rs/tui/src/bottom_pane/feedback_view.rs +++ b/codex-rs/tui/src/bottom_pane/feedback_view.rs @@ -27,8 +27,7 @@ use super::popup_consts::standard_popup_hint_line; use super::textarea::TextArea; use super::textarea::TextAreaState; -const BASE_BUG_ISSUE_URL: &str = - "https://github.com/openai/codex/issues/new?template=2-bug-report.yml"; +const BASE_BUG_ISSUE_URL: &str = "https://github.com/Ixe1/codexel/issues/new"; /// Minimal input overlay to collect an optional feedback note, then upload /// both logs and rollout with classification + metadata. @@ -338,7 +337,7 @@ fn feedback_classification(category: FeedbackCategory) -> &'static str { fn issue_url_for_category(category: FeedbackCategory, thread_id: &str) -> Option { match category { FeedbackCategory::Bug | FeedbackCategory::BadResult | FeedbackCategory::Other => Some( - format!("{BASE_BUG_ISSUE_URL}&steps=Uploaded%20thread:%20{thread_id}"), + format!("{BASE_BUG_ISSUE_URL}?steps=Uploaded%20thread:%20{thread_id}"), ), FeedbackCategory::GoodResult => None, } @@ -545,7 +544,7 @@ mod tests { assert!( bug_url .as_deref() - .is_some_and(|url| url.contains("template=2-bug-report")) + .is_some_and(|url| url.starts_with(BASE_BUG_ISSUE_URL)) ); let bad_result_url = issue_url_for_category(FeedbackCategory::BadResult, "thread-2"); diff --git a/codex-rs/tui/src/bottom_pane/mod.rs b/codex-rs/tui/src/bottom_pane/mod.rs index ea599698b96..21556463ce8 100644 --- a/codex-rs/tui/src/bottom_pane/mod.rs +++ b/codex-rs/tui/src/bottom_pane/mod.rs @@ -21,6 +21,7 @@ use std::time::Duration; mod approval_overlay; pub(crate) use approval_overlay::ApprovalOverlay; pub(crate) use approval_overlay::ApprovalRequest; +mod ask_user_question_overlay; mod bottom_pane_view; mod chat_composer; mod chat_composer_history; @@ -30,6 +31,8 @@ mod experimental_features_view; mod file_search_popup; mod footer; mod list_selection_view; +mod plan_approval_overlay; +mod plan_request_overlay; mod prompt_args; mod skill_popup; pub(crate) use list_selection_view::SelectionViewParams; @@ -56,10 +59,13 @@ pub(crate) use chat_composer::InputResult; use codex_protocol::custom_prompts::CustomPrompt; use crate::status_indicator_widget::StatusIndicatorWidget; +pub(crate) use ask_user_question_overlay::AskUserQuestionOverlay; pub(crate) use experimental_features_view::BetaFeatureItem; pub(crate) use experimental_features_view::ExperimentalFeaturesView; pub(crate) use list_selection_view::SelectionAction; pub(crate) use list_selection_view::SelectionItem; +pub(crate) use plan_approval_overlay::PlanApprovalOverlay; +pub(crate) use plan_request_overlay::PlanRequestOverlay; /// Pane displayed in the lower half of the chat UI. pub(crate) struct BottomPane { @@ -284,6 +290,23 @@ impl BottomPane { } } + pub(crate) fn update_status_detail_lines( + &mut self, + detail_lines: Vec>, + ) { + if let Some(status) = self.status.as_mut() { + status.set_detail_lines(detail_lines); + self.request_redraw(); + } + } + + pub(crate) fn clear_status_detail_lines(&mut self) { + if let Some(status) = self.status.as_mut() { + status.clear_detail_lines(); + self.request_redraw(); + } + } + pub(crate) fn show_ctrl_c_quit_hint(&mut self) { self.ctrl_c_quit_hint = true; self.composer diff --git a/codex-rs/tui/src/bottom_pane/plan_approval_overlay.rs b/codex-rs/tui/src/bottom_pane/plan_approval_overlay.rs new file mode 100644 index 00000000000..7490adc0c71 --- /dev/null +++ b/codex-rs/tui/src/bottom_pane/plan_approval_overlay.rs @@ -0,0 +1,622 @@ +use std::cell::RefCell; + +use crossterm::event::KeyCode; +use crossterm::event::KeyEvent; +use crossterm::event::KeyModifiers; +use ratatui::buffer::Buffer; +use ratatui::layout::Constraint; +use ratatui::layout::Layout; +use ratatui::layout::Rect; +use ratatui::style::Style; +use ratatui::style::Styled; +use ratatui::style::Stylize; +use ratatui::text::Line; +use ratatui::text::Span; +use ratatui::widgets::Block; +use ratatui::widgets::Clear; +use ratatui::widgets::Paragraph; +use ratatui::widgets::StatefulWidgetRef; +use ratatui::widgets::Widget; +use textwrap::wrap; +use unicode_width::UnicodeWidthStr; + +use codex_core::protocol::Op; +use codex_core::protocol::PlanApprovalRequestEvent; +use codex_core::protocol::PlanApprovalResponse; +use codex_core::protocol::PlanProposal; +use codex_protocol::plan_tool::PlanItemArg; +use codex_protocol::plan_tool::StepStatus; + +use crate::app_event::AppEvent; +use crate::app_event_sender::AppEventSender; +use crate::key_hint; +use crate::render::Insets; +use crate::render::RectExt as _; +use crate::render::line_utils::prefix_lines; +use crate::style::user_message_style; + +use super::CancellationEvent; +use super::bottom_pane_view::BottomPaneView; +use super::textarea::TextArea; +use super::textarea::TextAreaState; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum Mode { + Select, + FeedbackInput, +} + +const MAX_PLAN_APPROVAL_OVERLAY_ROWS: u16 = 44; +const FEEDBACK_BLOCK_HEIGHT: u16 = 8; + +pub(crate) struct PlanApprovalOverlay { + id: String, + proposal: PlanProposal, + mode: Mode, + scroll_top: usize, + selected_action: usize, + textarea: TextArea, + textarea_state: RefCell, + error: Option, + app_event_tx: AppEventSender, + complete: bool, +} + +impl PlanApprovalOverlay { + pub(crate) fn new( + id: String, + ev: PlanApprovalRequestEvent, + app_event_tx: AppEventSender, + ) -> Self { + Self { + id, + proposal: ev.proposal, + mode: Mode::Select, + scroll_top: 0, + selected_action: 0, + textarea: TextArea::new(), + textarea_state: RefCell::new(TextAreaState::default()), + error: None, + app_event_tx, + complete: false, + } + } + + fn finish(&mut self, response: PlanApprovalResponse) { + self.app_event_tx + .send(AppEvent::CodexOp(Op::ResolvePlanApproval { + id: self.id.clone(), + response, + })); + self.complete = true; + } + + fn other_text(&self) -> String { + self.textarea.text().trim().to_string() + } + + fn accept_selection(&mut self) { + match self.selected_action { + 0 => self.finish(PlanApprovalResponse::Approved), + 1 => { + self.mode = Mode::FeedbackInput; + self.error = None; + } + _ => self.finish(PlanApprovalResponse::Rejected), + } + } + + fn accept_feedback(&mut self) { + let feedback = self.other_text(); + if feedback.is_empty() { + self.error = Some("Feedback cannot be empty.".to_string()); + return; + } + self.finish(PlanApprovalResponse::Revised { feedback }); + } + + fn footer_hint(&self) -> Line<'static> { + match self.mode { + Mode::Select => Line::from(vec![ + "↑/↓ ".into(), + "scroll".bold(), + ", ".into(), + "←/→ ".into(), + "action".bold(), + ", ".into(), + key_hint::plain(KeyCode::Enter).into(), + " select, ".into(), + key_hint::plain(KeyCode::Esc).into(), + " reject".into(), + ]), + Mode::FeedbackInput => Line::from(vec![ + key_hint::plain(KeyCode::Enter).into(), + " submit, ".into(), + key_hint::plain(KeyCode::Esc).into(), + " back".into(), + ]), + } + } + + fn plan_lines(&self, width: u16) -> Vec> { + let usable_width = width.saturating_sub(4).max(1) as usize; + let mut lines = Vec::new(); + + lines.push( + vec![ + "[".into(), + "Plan".bold(), + "] ".into(), + self.proposal.title.clone().bold(), + ] + .into(), + ); + + let summary = self.proposal.summary.trim(); + if !summary.is_empty() { + lines.push(Line::from("")); + lines.push(Line::from("Summary:".bold())); + for raw_line in summary.lines() { + let raw_line = raw_line.trim_end(); + if raw_line.trim().is_empty() { + lines.push(Line::from("")); + continue; + } + for w in wrap(raw_line, usable_width) { + lines.push(Line::from(vec![" ".into(), w.into_owned().into()])); + } + } + } + + let explanation = self + .proposal + .plan + .explanation + .as_deref() + .unwrap_or_default() + .trim(); + if !explanation.is_empty() { + lines.push(Line::from("")); + lines.push(Line::from("Explanation:".bold())); + for raw_line in explanation.lines() { + let raw_line = raw_line.trim_end(); + if raw_line.trim().is_empty() { + lines.push(Line::from("")); + continue; + } + for w in wrap(raw_line, usable_width) { + lines.push(Line::from(vec![" ".into(), w.into_owned().into()])); + } + } + } + + lines.push(Line::from("")); + lines.push(Line::from("Steps:".bold())); + + let mut step_lines = Vec::new(); + if self.proposal.plan.plan.is_empty() { + step_lines.push(Line::from("(no steps provided)".dim().italic())); + } else { + for PlanItemArg { step, status } in &self.proposal.plan.plan { + step_lines.extend(render_step_lines(width, status, step.as_str())); + } + } + lines.extend(prefix_lines(step_lines, " ".into(), " ".into())); + + lines + } + + fn action_bar(&self) -> Line<'static> { + let selected = Style::default().cyan().bold(); + let normal = Style::default().dim(); + + let approve_style = if self.selected_action == 0 { + selected + } else { + normal + }; + let revise_style = if self.selected_action == 1 { + selected + } else { + normal + }; + let reject_style = if self.selected_action == 2 { + selected + } else { + normal + }; + + Line::from(vec![ + Span::from("[1] Approve").set_style(approve_style), + " ".into(), + Span::from("[2] Revise").set_style(revise_style), + " ".into(), + Span::from("[3] Reject").set_style(reject_style), + ]) + } + + fn move_action_left(&mut self) { + self.selected_action = self.selected_action.saturating_sub(1); + } + + fn move_action_right(&mut self) { + self.selected_action = (self.selected_action + 1).min(2); + } + + fn scroll_up(&mut self) { + self.scroll_top = self.scroll_top.saturating_sub(1); + } + + fn scroll_down(&mut self) { + self.scroll_top = self.scroll_top.saturating_add(1); + } + + fn page_up(&mut self) { + self.scroll_top = self.scroll_top.saturating_sub(8); + } + + fn page_down(&mut self) { + self.scroll_top = self.scroll_top.saturating_add(8); + } + + fn scroll_home(&mut self) { + self.scroll_top = 0; + } + + fn cursor_pos_for_feedback(&self, area: Rect) -> Option<(u16, u16)> { + if self.mode != Mode::FeedbackInput { + return None; + } + if area.height < 2 || area.width <= 2 { + return None; + } + let textarea_rect = self.textarea_rect(area); + let state = *self.textarea_state.borrow(); + self.textarea.cursor_pos_with_state(textarea_rect, state) + } + + fn textarea_rect(&self, area: Rect) -> Rect { + let inset = area.inset(Insets::vh(1, 2)); + Rect { + x: inset.x, + y: inset.y, + width: inset.width, + height: inset.height.clamp(1, 5), + } + } +} + +impl BottomPaneView for PlanApprovalOverlay { + fn handle_key_event(&mut self, key_event: KeyEvent) { + match self.mode { + Mode::Select => match key_event { + KeyEvent { + code: KeyCode::Left, + .. + } => self.move_action_left(), + KeyEvent { + code: KeyCode::Right, + .. + } => self.move_action_right(), + KeyEvent { + code: KeyCode::Up, .. + } + | KeyEvent { + code: KeyCode::Char('p'), + modifiers: KeyModifiers::CONTROL, + .. + } + | KeyEvent { + code: KeyCode::Char('\u{0010}'), + modifiers: KeyModifiers::NONE, + .. + } => self.scroll_up(), + KeyEvent { + code: KeyCode::Char('k'), + modifiers: KeyModifiers::NONE, + .. + } => self.scroll_up(), + KeyEvent { + code: KeyCode::Down, + .. + } + | KeyEvent { + code: KeyCode::Char('n'), + modifiers: KeyModifiers::CONTROL, + .. + } + | KeyEvent { + code: KeyCode::Char('\u{000e}'), + modifiers: KeyModifiers::NONE, + .. + } => self.scroll_down(), + KeyEvent { + code: KeyCode::Char('j'), + modifiers: KeyModifiers::NONE, + .. + } => self.scroll_down(), + KeyEvent { + code: KeyCode::PageUp, + .. + } => self.page_up(), + KeyEvent { + code: KeyCode::PageDown, + .. + } => self.page_down(), + KeyEvent { + code: KeyCode::Home, + .. + } => self.scroll_home(), + KeyEvent { + code: KeyCode::Esc, .. + } => { + self.finish(PlanApprovalResponse::Rejected); + } + KeyEvent { + code: KeyCode::Char(c), + modifiers, + .. + } if !modifiers.contains(KeyModifiers::CONTROL) + && !modifiers.contains(KeyModifiers::ALT) => + { + if let Some(idx) = c + .to_digit(10) + .map(|d| d as usize) + .and_then(|d| d.checked_sub(1)) + && idx <= 2 + { + self.selected_action = idx; + self.accept_selection(); + } + } + KeyEvent { + code: KeyCode::Enter, + modifiers: KeyModifiers::NONE, + .. + } => self.accept_selection(), + _ => {} + }, + Mode::FeedbackInput => match key_event { + KeyEvent { + code: KeyCode::Esc, .. + } => { + self.mode = Mode::Select; + self.error = None; + } + KeyEvent { + code: KeyCode::Enter, + modifiers: KeyModifiers::NONE, + .. + } => self.accept_feedback(), + KeyEvent { + code: KeyCode::Enter, + .. + } => { + self.textarea.input(key_event); + } + other => { + self.textarea.input(other); + } + }, + } + } + + fn on_ctrl_c(&mut self) -> CancellationEvent { + self.finish(PlanApprovalResponse::Rejected); + CancellationEvent::Handled + } + + fn is_complete(&self) -> bool { + self.complete + } + + fn handle_paste(&mut self, pasted: String) -> bool { + if self.mode != Mode::FeedbackInput { + return false; + } + if pasted.is_empty() { + return false; + } + self.textarea.insert_str(&pasted); + true + } +} + +impl crate::render::renderable::Renderable for PlanApprovalOverlay { + fn desired_height(&self, width: u16) -> u16 { + let plan_lines = self.plan_lines(width); + let plan_height = u16::try_from(plan_lines.len()).unwrap_or(u16::MAX); + + let mut total = 2u16; // outer padding + total = total.saturating_add(1); // action bar + total = total.saturating_add(1); // footer hint + total = total.saturating_add(plan_height.max(4)); + if self.mode == Mode::FeedbackInput { + total = total.saturating_add(FEEDBACK_BLOCK_HEIGHT); + } + total.clamp(8, MAX_PLAN_APPROVAL_OVERLAY_ROWS) + } + + fn cursor_pos(&self, area: Rect) -> Option<(u16, u16)> { + self.cursor_pos_for_feedback(area) + } + + fn render(&self, area: Rect, buf: &mut Buffer) { + if area.height == 0 || area.width == 0 { + return; + } + + Clear.render(area, buf); + Block::default() + .style(user_message_style()) + .render(area, buf); + + let [content_area, footer_area] = + Layout::vertical([Constraint::Fill(1), Constraint::Length(1)]).areas(area); + let inset = content_area.inset(Insets::vh(1, 2)); + + match self.mode { + Mode::Select => { + let [plan_area, actions_area] = + Layout::vertical([Constraint::Fill(1), Constraint::Length(1)]).areas(inset); + + let plan_lines = self.plan_lines(plan_area.width); + let max_scroll = plan_lines.len().saturating_sub(plan_area.height as usize); + let scroll = self.scroll_top.min(max_scroll) as u16; + Paragraph::new(plan_lines) + .scroll((scroll, 0)) + .render(plan_area, buf); + + self.action_bar().render(actions_area, buf); + } + Mode::FeedbackInput => { + let [plan_area, feedback_area] = Layout::vertical([ + Constraint::Fill(1), + Constraint::Length(FEEDBACK_BLOCK_HEIGHT), + ]) + .areas(inset); + + let plan_lines = self.plan_lines(plan_area.width); + let max_scroll = plan_lines.len().saturating_sub(plan_area.height as usize); + let scroll = self.scroll_top.min(max_scroll) as u16; + Paragraph::new(plan_lines) + .scroll((scroll, 0)) + .render(plan_area, buf); + + let label_area = Rect { + x: feedback_area.x, + y: feedback_area.y, + width: feedback_area.width, + height: 1, + }; + Paragraph::new(Line::from(vec![ + Span::from("Feedback: ").bold(), + "(press Enter to submit)".dim(), + ])) + .render(label_area, buf); + + if let Some(err) = &self.error { + let err_area = Rect { + x: feedback_area.x, + y: feedback_area.y.saturating_add(1), + width: feedback_area.width, + height: 1, + }; + Line::from(err.clone().red()).render(err_area, buf); + } + + let input_outer = Rect { + x: feedback_area.x, + y: feedback_area.y.saturating_add(2), + width: feedback_area.width, + height: feedback_area.height.saturating_sub(2).max(1), + }; + let textarea_rect = self.textarea_rect(input_outer); + let mut state = self.textarea_state.borrow_mut(); + StatefulWidgetRef::render_ref(&(&self.textarea), textarea_rect, buf, &mut state); + if self.textarea.text().is_empty() { + Paragraph::new(Line::from("Type your feedback…".dim())) + .render(textarea_rect, buf); + } + } + } + + let hint_area = Rect { + x: footer_area.x.saturating_add(2), + y: footer_area.y, + width: footer_area.width.saturating_sub(2), + height: 1, + }; + self.footer_hint().dim().render(hint_area, buf); + } +} + +fn render_step_lines(width: u16, status: &StepStatus, text: &str) -> Vec> { + let (box_str, step_style) = match status { + StepStatus::Completed => ("[x] ", Style::default().crossed_out().dim()), + StepStatus::InProgress => ("[~] ", Style::default().cyan().bold()), + StepStatus::Pending => ("[ ] ", Style::default().dim()), + }; + let wrap_width = (width as usize) + .saturating_sub(4) + .saturating_sub(box_str.width()) + .max(1); + let parts = wrap(text, wrap_width); + let lines: Vec> = parts + .into_iter() + .map(|s| Line::from(Span::from(s.into_owned()).set_style(step_style))) + .collect(); + prefix_lines(lines, box_str.into(), " ".into()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::app_event::AppEvent; + use crate::render::renderable::Renderable; + use pretty_assertions::assert_eq; + use tokio::sync::mpsc::unbounded_channel; + + fn make_proposal(summary: &str, steps: usize) -> PlanProposal { + PlanProposal { + title: "Plan title".to_string(), + summary: summary.to_string(), + plan: codex_protocol::plan_tool::UpdatePlanArgs { + explanation: None, + plan: (0..steps) + .map(|i| PlanItemArg { + step: format!("step {}", i + 1), + status: StepStatus::Pending, + }) + .collect(), + }, + } + } + + fn render_to_lines(view: &PlanApprovalOverlay, width: u16) -> Vec { + let height = view.desired_height(width); + let mut buf = Buffer::empty(Rect::new(0, 0, width, height)); + view.render(Rect::new(0, 0, width, height), &mut buf); + (0..buf.area.height) + .map(|row| { + (0..buf.area.width) + .map(|col| buf[(col, row)].symbol().to_string()) + .collect() + }) + .collect() + } + + #[test] + fn summary_label_is_only_rendered_once() { + let (tx, _rx) = unbounded_channel::(); + let tx = AppEventSender::new(tx); + let ev = PlanApprovalRequestEvent { + call_id: "call-1".to_string(), + proposal: make_proposal( + "A summary that should wrap across multiple lines but should only show a single label.", + 1, + ), + }; + let view = PlanApprovalOverlay::new("id-1".to_string(), ev, tx); + + let rendered = render_to_lines(&view, 40); + let label_count = rendered + .iter() + .filter(|line| line.contains("Summary:")) + .count(); + assert_eq!(label_count, 1); + } + + #[test] + fn desired_height_clamps_to_max_rows_for_long_plans() { + let (tx, _rx) = unbounded_channel::(); + let tx = AppEventSender::new(tx); + let ev = PlanApprovalRequestEvent { + call_id: "call-1".to_string(), + proposal: make_proposal("short summary", 200), + }; + let view = PlanApprovalOverlay::new("id-1".to_string(), ev, tx); + + assert_eq!(view.desired_height(80), MAX_PLAN_APPROVAL_OVERLAY_ROWS); + } +} diff --git a/codex-rs/tui/src/bottom_pane/plan_request_overlay.rs b/codex-rs/tui/src/bottom_pane/plan_request_overlay.rs new file mode 100644 index 00000000000..e4c124ee3f7 --- /dev/null +++ b/codex-rs/tui/src/bottom_pane/plan_request_overlay.rs @@ -0,0 +1,269 @@ +use std::cell::RefCell; + +use crossterm::event::KeyCode; +use crossterm::event::KeyEvent; +use crossterm::event::KeyModifiers; +use ratatui::buffer::Buffer; +use ratatui::layout::Constraint; +use ratatui::layout::Layout; +use ratatui::layout::Rect; +use ratatui::style::Stylize; +use ratatui::text::Line; +use ratatui::widgets::Block; +use ratatui::widgets::Clear; +use ratatui::widgets::Paragraph; +use ratatui::widgets::StatefulWidgetRef; +use ratatui::widgets::Widget; + +use codex_core::protocol::Op; +use codex_core::protocol::PlanRequest; + +use crate::app_event::AppEvent; +use crate::app_event_sender::AppEventSender; +use crate::key_hint; +use crate::render::Insets; +use crate::render::RectExt as _; +use crate::style::user_message_style; + +use super::CancellationEvent; +use super::bottom_pane_view::BottomPaneView; +use super::textarea::TextArea; +use super::textarea::TextAreaState; + +struct PlanRequestOverlayLayout { + header_lines: Vec>, + header_area: Rect, + textarea_rect: Rect, + hint_area: Rect, +} + +pub(crate) struct PlanRequestOverlay { + textarea: TextArea, + textarea_state: RefCell, + error: Option, + app_event_tx: AppEventSender, + complete: bool, +} + +impl PlanRequestOverlay { + pub(crate) fn new(app_event_tx: AppEventSender) -> Self { + Self { + textarea: TextArea::new(), + textarea_state: RefCell::new(TextAreaState::default()), + error: None, + app_event_tx, + complete: false, + } + } + + fn goal_text(&self) -> String { + self.textarea.text().trim().to_string() + } + + fn header_lines(&self) -> Vec> { + let mut lines = vec![Line::from(vec![ + "[".into(), + "Plan Mode".bold(), + "] ".into(), + "Describe what you want to do.".into(), + ])]; + if let Some(err) = &self.error { + lines.push(Line::from("")); + lines.push(Line::from(err.clone().red())); + } + lines + } + + fn layout(&self, area: Rect) -> PlanRequestOverlayLayout { + let [content_area, footer_area] = + Layout::vertical([Constraint::Fill(1), Constraint::Length(1)]).areas(area); + let inset = content_area.inset(Insets::vh(1, 2)); + + let header_lines = self.header_lines(); + let header_height = header_lines.len() as u16; + let [header_area, body_area] = + Layout::vertical([Constraint::Length(header_height), Constraint::Fill(1)]).areas(inset); + + let hint_area = Rect { + x: footer_area.x.saturating_add(2), + y: footer_area.y, + width: footer_area.width.saturating_sub(2), + height: 1, + }; + + PlanRequestOverlayLayout { + header_lines, + header_area, + textarea_rect: self.textarea_rect(body_area), + hint_area, + } + } + + fn submit(&mut self) { + let goal = self.goal_text(); + if goal.is_empty() { + self.error = Some("Goal cannot be empty.".to_string()); + return; + } + self.app_event_tx.send(AppEvent::CodexOp(Op::Plan { + plan_request: PlanRequest { goal }, + })); + self.complete = true; + } + + fn footer_hint(&self) -> Line<'static> { + Line::from(vec![ + key_hint::plain(KeyCode::Enter).into(), + " submit, ".into(), + key_hint::shift(KeyCode::Enter).into(), + "/".into(), + key_hint::ctrl(KeyCode::Char('j')).into(), + " newline, ".into(), + key_hint::plain(KeyCode::Esc).into(), + " cancel".into(), + ]) + } + + fn textarea_rect(&self, area: Rect) -> Rect { + let inset = area.inset(Insets::vh(1, 2)); + Rect { + x: inset.x, + y: inset.y, + width: inset.width, + height: inset.height.clamp(1, 6), + } + } +} + +impl BottomPaneView for PlanRequestOverlay { + fn handle_key_event(&mut self, key_event: KeyEvent) { + match key_event { + KeyEvent { + code: KeyCode::Esc, .. + } => { + self.complete = true; + } + KeyEvent { + code: KeyCode::Enter, + modifiers: KeyModifiers::NONE, + .. + } => self.submit(), + KeyEvent { + code: KeyCode::Enter, + .. + } => { + self.textarea.input(key_event); + } + other => { + self.textarea.input(other); + } + } + } + + fn on_ctrl_c(&mut self) -> CancellationEvent { + self.complete = true; + CancellationEvent::Handled + } + + fn is_complete(&self) -> bool { + self.complete + } + + fn handle_paste(&mut self, pasted: String) -> bool { + if pasted.is_empty() { + return false; + } + self.textarea.insert_str(&pasted); + true + } +} + +impl crate::render::renderable::Renderable for PlanRequestOverlay { + fn desired_height(&self, _width: u16) -> u16 { + 10 + } + + fn cursor_pos(&self, area: Rect) -> Option<(u16, u16)> { + if area.height < 2 || area.width <= 2 { + return None; + } + let textarea_rect = self.layout(area).textarea_rect; + let state = *self.textarea_state.borrow(); + self.textarea.cursor_pos_with_state(textarea_rect, state) + } + + fn render(&self, area: Rect, buf: &mut Buffer) { + if area.height == 0 || area.width == 0 { + return; + } + + let layout = self.layout(area); + + Clear.render(area, buf); + Block::default() + .style(user_message_style()) + .render(area, buf); + + Paragraph::new(layout.header_lines).render(layout.header_area, buf); + + let textarea_rect = layout.textarea_rect; + let mut state = self.textarea_state.borrow_mut(); + StatefulWidgetRef::render_ref(&(&self.textarea), textarea_rect, buf, &mut state); + if self.textarea.text().is_empty() { + Paragraph::new(Line::from( + "e.g. \"Add pagination to search results\"".dim(), + )) + .render(textarea_rect, buf); + } + + self.footer_hint().dim().render(layout.hint_area, buf); + } +} + +#[cfg(test)] +mod tests { + use crate::render::renderable::Renderable as _; + + use super::*; + + #[test] + fn cursor_pos_accounts_for_header_and_insets() { + let (app_event_tx, _app_event_rx) = tokio::sync::mpsc::unbounded_channel(); + let overlay = PlanRequestOverlay::new(AppEventSender::new(app_event_tx)); + assert_eq!(overlay.cursor_pos(Rect::new(0, 0, 80, 10)), Some((4, 3))); + } + + #[test] + fn cursor_pos_accounts_for_error_header_height() { + let (app_event_tx, _app_event_rx) = tokio::sync::mpsc::unbounded_channel(); + let mut overlay = PlanRequestOverlay::new(AppEventSender::new(app_event_tx)); + overlay.error = Some("Goal cannot be empty.".to_string()); + assert_eq!(overlay.cursor_pos(Rect::new(0, 0, 80, 10)), Some((4, 5))); + } + + #[test] + fn plan_request_overlay_supports_multiline_goal_entry() { + let (app_event_tx, mut app_event_rx) = tokio::sync::mpsc::unbounded_channel(); + let mut overlay = PlanRequestOverlay::new(AppEventSender::new(app_event_tx)); + + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('a'), KeyModifiers::NONE)); + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('j'), KeyModifiers::CONTROL)); + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('b'), KeyModifiers::NONE)); + overlay.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::SHIFT)); + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('c'), KeyModifiers::NONE)); + + assert_eq!(overlay.goal_text(), "a\nb\nc"); + assert!(!overlay.is_complete()); + + overlay.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + assert!(overlay.is_complete()); + + let ev = app_event_rx.try_recv().expect("plan op"); + match ev { + AppEvent::CodexOp(Op::Plan { plan_request }) => { + assert_eq!(plan_request.goal, "a\nb\nc"); + } + other => panic!("unexpected event: {other:?}"), + } + } +} diff --git a/codex-rs/tui/src/chatwidget.rs b/codex-rs/tui/src/chatwidget.rs index f38bcd783a7..6551707d356 100644 --- a/codex-rs/tui/src/chatwidget.rs +++ b/codex-rs/tui/src/chatwidget.rs @@ -2,6 +2,7 @@ use std::collections::HashMap; use std::collections::HashSet; use std::collections::VecDeque; use std::path::PathBuf; +use std::str::FromStr; use std::sync::Arc; use std::time::Duration; @@ -23,6 +24,7 @@ use codex_core::protocol::AgentReasoningEvent; use codex_core::protocol::AgentReasoningRawContentDeltaEvent; use codex_core::protocol::AgentReasoningRawContentEvent; use codex_core::protocol::ApplyPatchApprovalRequestEvent; +use codex_core::protocol::AskUserQuestionRequestEvent; use codex_core::protocol::BackgroundEventEvent; use codex_core::protocol::CreditsSnapshot; use codex_core::protocol::DeprecationNoticeEvent; @@ -33,6 +35,7 @@ use codex_core::protocol::ExecApprovalRequestEvent; use codex_core::protocol::ExecCommandBeginEvent; use codex_core::protocol::ExecCommandEndEvent; use codex_core::protocol::ExecCommandSource; +use codex_core::protocol::ExitedPlanModeEvent; use codex_core::protocol::ExitedReviewModeEvent; use codex_core::protocol::ListCustomPromptsResponseEvent; use codex_core::protocol::ListSkillsResponseEvent; @@ -44,6 +47,8 @@ use codex_core::protocol::McpToolCallBeginEvent; use codex_core::protocol::McpToolCallEndEvent; use codex_core::protocol::Op; use codex_core::protocol::PatchApplyBeginEvent; +use codex_core::protocol::PlanApprovalRequestEvent; +use codex_core::protocol::PlanRequest; use codex_core::protocol::RateLimitSnapshot; use codex_core::protocol::ReviewRequest; use codex_core::protocol::ReviewTarget; @@ -87,12 +92,15 @@ use tracing::debug; use crate::app_event::AppEvent; use crate::app_event_sender::AppEventSender; use crate::bottom_pane::ApprovalRequest; +use crate::bottom_pane::AskUserQuestionOverlay; use crate::bottom_pane::BetaFeatureItem; use crate::bottom_pane::BottomPane; use crate::bottom_pane::BottomPaneParams; use crate::bottom_pane::CancellationEvent; use crate::bottom_pane::ExperimentalFeaturesView; use crate::bottom_pane::InputResult; +use crate::bottom_pane::PlanApprovalOverlay; +use crate::bottom_pane::PlanRequestOverlay; use crate::bottom_pane::SelectionAction; use crate::bottom_pane::SelectionItem; use crate::bottom_pane::SelectionViewParams; @@ -315,6 +323,7 @@ pub(crate) struct ChatWidget { token_info: Option, rate_limit_snapshot: Option, plan_type: Option, + last_plan_update_key: Option, rate_limit_warnings: RateLimitWarningState, rate_limit_switch_prompt: RateLimitSwitchPromptState, rate_limit_poller: Option>, @@ -336,6 +345,7 @@ pub(crate) struct ChatWidget { current_status_header: String, // Previous status header to restore after a transient stream retry. retry_status_header: Option, + plan_variants_progress: Option, conversation_id: Option, frame_requester: FrameRequester, // Whether to include the initial welcome banner on session configured @@ -366,6 +376,127 @@ struct UserMessage { image_paths: Vec, } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum ProgressStatus { + Pending, + InProgress, + Completed, +} + +#[derive(Debug, Clone)] +struct PlanVariantsProgress { + total: usize, + steps: Vec, + durations: Vec>, + last_activity: Vec>, + tokens: Vec>, +} + +impl PlanVariantsProgress { + fn new(total: usize) -> Self { + Self { + total, + steps: vec![ProgressStatus::Pending; total], + durations: vec![None; total], + last_activity: vec![None; total], + tokens: vec![None; total], + } + } + + fn variant_label(&self, idx: usize) -> String { + if self.total == 3 { + match idx { + 0 => "Minimal".to_string(), + 1 => "Correctness".to_string(), + 2 => "DX".to_string(), + _ => format!("Variant {}/{}", idx + 1, self.total), + } + } else { + format!("Variant {}/{}", idx + 1, self.total) + } + } + + fn set_in_progress(&mut self, idx: usize) { + if idx < self.steps.len() { + self.steps[idx] = ProgressStatus::InProgress; + } + } + + fn set_completed(&mut self, idx: usize) { + if idx < self.steps.len() { + self.steps[idx] = ProgressStatus::Completed; + } + } + + fn set_duration(&mut self, idx: usize, duration: Option) { + if idx < self.durations.len() { + self.durations[idx] = duration; + } + } + + fn set_activity(&mut self, idx: usize, activity: Option) { + if idx < self.last_activity.len() { + self.last_activity[idx] = activity; + } + } + + fn set_tokens(&mut self, idx: usize, tokens: Option) { + if idx < self.tokens.len() { + self.tokens[idx] = tokens; + } + } + + fn render_detail_lines(&self) -> Vec> { + use ratatui::style::Stylize; + let mut lines = Vec::with_capacity(self.total); + for (idx, status) in self.steps.iter().copied().enumerate() { + let label = self.variant_label(idx); + let status_span = match status { + ProgressStatus::Pending => "○".dim(), + ProgressStatus::InProgress => "●".cyan(), + ProgressStatus::Completed => "✓".green(), + }; + + let mut spans = vec![" ".into(), status_span, " ".into(), label.into()]; + let duration = self.durations.get(idx).and_then(|d| d.as_deref()); + let tokens = self.tokens.get(idx).and_then(|t| t.as_deref()); + if duration.is_some() || tokens.is_some() { + let mut meta = String::new(); + meta.push('('); + if let Some(duration) = duration { + meta.push_str(duration); + } + if let Some(tokens) = tokens { + if duration.is_some() { + meta.push_str(", "); + } + meta.push_str(tokens); + meta.push_str(" tok"); + } + meta.push(')'); + spans.push(" ".into()); + spans.push(meta.dim()); + } + if status == ProgressStatus::Completed { + spans.push(" ".into()); + spans.push("—".dim()); + spans.push(" ".into()); + spans.push("done".dim()); + } else if let Some(activity) = self.last_activity.get(idx).and_then(|a| a.as_deref()) { + let activity = activity.strip_prefix("shell ").unwrap_or(activity).trim(); + if !activity.is_empty() { + spans.push(" ".into()); + spans.push("—".dim()); + spans.push(" ".into()); + spans.push(activity.to_string().dim()); + } + } + lines.push(spans.into()); + } + lines + } +} + impl From for UserMessage { fn from(text: String) -> Self { Self { @@ -393,6 +524,25 @@ fn create_initial_user_message(text: String, image_paths: Vec) -> Optio } impl ChatWidget { + fn prepare_for_immediate_interrupt(&mut self) { + if self.stream_controller.is_some() { + self.flush_answer_stream_with_separator(); + } + if !self.interrupts.is_empty() { + self.flush_interrupt_queue(); + } + } + + fn prepare_for_immediate_interrupt_discard_stream(&mut self) { + if self.stream_controller.is_some() { + self.stream_controller = None; + self.app_event_tx.send(AppEvent::StopCommitAnimation); + } + if !self.interrupts.is_empty() { + self.flush_interrupt_queue(); + } + } + fn flush_answer_stream_with_separator(&mut self) { if let Some(mut controller) = self.stream_controller.take() && let Some(cell) = controller.finalize() @@ -402,10 +552,22 @@ impl ChatWidget { } fn set_status_header(&mut self, header: String) { + if self.plan_variants_progress.is_some() && header != "Planning plan variants" { + self.plan_variants_progress = None; + self.clear_status_detail_lines(); + } self.current_status_header = header.clone(); self.bottom_pane.update_status_header(header); } + fn set_status_detail_lines(&mut self, lines: Vec>) { + self.bottom_pane.update_status_detail_lines(lines); + } + + fn clear_status_detail_lines(&mut self) { + self.bottom_pane.clear_status_detail_lines(); + } + fn restore_retry_status_header_if_present(&mut self) { if let Some(header) = self.retry_status_header.take() && self.current_status_header != header @@ -548,6 +710,7 @@ impl ChatWidget { self.bottom_pane.clear_ctrl_c_quit_hint(); self.bottom_pane.set_task_running(true); self.retry_status_header = None; + self.plan_variants_progress = None; self.bottom_pane.set_interrupt_hint_visible(true); self.set_status_header(String::from("Working")); self.full_reasoning_buffer.clear(); @@ -825,10 +988,18 @@ impl ChatWidget { } fn on_plan_update(&mut self, update: UpdatePlanArgs) { + let update_key = serde_json::to_string(&update).ok(); + if let Some(key) = update_key.as_deref() + && self.last_plan_update_key.as_deref() == Some(key) + { + return; + } + self.last_plan_update_key = update_key; self.add_to_history(history_cell::new_plan_update(update)); } fn on_exec_approval_request(&mut self, id: String, ev: ExecApprovalRequestEvent) { + self.prepare_for_immediate_interrupt(); let id2 = id.clone(); let ev2 = ev.clone(); self.defer_or_handle( @@ -838,6 +1009,7 @@ impl ChatWidget { } fn on_apply_patch_approval_request(&mut self, id: String, ev: ApplyPatchApprovalRequestEvent) { + self.prepare_for_immediate_interrupt(); let id2 = id.clone(); let ev2 = ev.clone(); self.defer_or_handle( @@ -847,6 +1019,7 @@ impl ChatWidget { } fn on_elicitation_request(&mut self, ev: ElicitationRequestEvent) { + self.prepare_for_immediate_interrupt(); let ev2 = ev.clone(); self.defer_or_handle( |q| q.push_elicitation(ev), @@ -854,6 +1027,26 @@ impl ChatWidget { ); } + fn on_ask_user_question_request(&mut self, id: String, ev: AskUserQuestionRequestEvent) { + self.prepare_for_immediate_interrupt_discard_stream(); + let id2 = id.clone(); + let ev2 = ev.clone(); + self.defer_or_handle( + |q| q.push_ask_user_question(id, ev), + |s| s.handle_ask_user_question_request_now(id2, ev2), + ); + } + + fn on_plan_approval_request(&mut self, id: String, ev: PlanApprovalRequestEvent) { + self.prepare_for_immediate_interrupt_discard_stream(); + let id2 = id.clone(); + let ev2 = ev.clone(); + self.defer_or_handle( + |q| q.push_plan_approval(id, ev), + |s| s.handle_plan_approval_request_now(id2, ev2), + ); + } + fn on_exec_command_begin(&mut self, ev: ExecCommandBeginEvent) { self.flush_answer_stream_with_separator(); if is_unified_exec_source(ev.source) { @@ -977,10 +1170,7 @@ impl ChatWidget { fn on_web_search_end(&mut self, ev: WebSearchEndEvent) { self.flush_answer_stream_with_separator(); - self.add_to_history(history_cell::new_web_search_call(format!( - "Searched: {}", - ev.query - ))); + self.add_to_history(history_cell::new_web_search_call(ev.query)); } fn on_get_history_entry_response( @@ -1014,9 +1204,121 @@ impl ChatWidget { debug!("BackgroundEvent: {message}"); self.bottom_pane.ensure_status_indicator(); self.bottom_pane.set_interrupt_hint_visible(true); + + if let Some(progress) = self.maybe_update_plan_variants_progress(message.as_str()) { + self.plan_variants_progress = Some(progress); + self.set_status_header("Planning plan variants".to_string()); + self.set_status_detail_lines( + self.plan_variants_progress + .as_ref() + .map(PlanVariantsProgress::render_detail_lines) + .unwrap_or_default(), + ); + return; + } + + self.plan_variants_progress = None; + self.clear_status_detail_lines(); self.set_status_header(message); } + fn maybe_update_plan_variants_progress( + &mut self, + message: &str, + ) -> Option { + let message = message.trim(); + if message.starts_with("Plan variants:") { + // Expected shapes: + // - "Plan variants: generating 1/3…" + // - "Plan variants: finished 1/3 (12.3s)" + let tokens: Vec<&str> = message.split_whitespace().collect(); + if tokens.len() < 4 { + return None; + } + + let action = tokens.get(2).copied()?; + let fraction = tokens.get(3).copied()?; + let fraction = fraction.trim_end_matches('…'); + let (idx_str, total_str) = fraction.split_once('/')?; + let idx = usize::from_str(idx_str).ok()?.saturating_sub(1); + let total = usize::from_str(total_str).ok()?; + if total == 0 { + return None; + } + + let duration = message + .find('(') + .and_then(|start| message.rfind(')').map(|end| (start, end))) + .and_then(|(start, end)| { + if end > start + 1 { + Some(message[start + 1..end].to_string()) + } else { + None + } + }); + + let mut progress = self + .plan_variants_progress + .clone() + .filter(|p| p.total == total) + .unwrap_or_else(|| PlanVariantsProgress::new(total)); + + match action { + "generating" => { + progress.set_in_progress(idx); + progress.set_duration(idx, None); + } + "finished" => { + progress.set_completed(idx); + progress.set_duration(idx, duration); + progress.set_activity(idx, None); + } + _ => return None, + } + + return Some(progress); + } + + if let Some(rest) = message.strip_prefix("Plan variant ") { + // Expected shape: + // - "Plan variant 2/3: rg -n ..." + // - "Plan variant 2/3: shell rg -n ..." (legacy) + let (fraction, activity) = rest.split_once(':')?; + let fraction = fraction.trim(); + let (idx_str, total_str) = fraction.split_once('/')?; + let idx = usize::from_str(idx_str).ok()?.saturating_sub(1); + let total = usize::from_str(total_str).ok()?; + if total == 0 { + return None; + } + + let mut progress = self + .plan_variants_progress + .clone() + .filter(|p| p.total == total) + .unwrap_or_else(|| PlanVariantsProgress::new(total)); + + if idx < progress.steps.len() && progress.steps[idx] == ProgressStatus::Pending { + progress.set_in_progress(idx); + } + + let activity = activity.trim(); + if let Some(tokens) = activity.strip_prefix("tokens ") { + progress.set_tokens(idx, Some(tokens.trim().to_string())); + } else { + let activity = activity.strip_prefix("shell ").unwrap_or(activity).trim(); + if activity.is_empty() { + progress.set_activity(idx, None); + } else { + progress.set_activity(idx, Some(activity.to_string())); + } + } + return Some(progress); + } + + None + } + fn on_undo_started(&mut self, event: UndoStartedEvent) { self.bottom_pane.ensure_status_indicator(); self.bottom_pane.set_interrupt_hint_visible(false); @@ -1243,6 +1545,36 @@ impl ChatWidget { self.request_redraw(); } + pub(crate) fn handle_ask_user_question_request_now( + &mut self, + id: String, + ev: AskUserQuestionRequestEvent, + ) { + self.flush_answer_stream_with_separator(); + self.bottom_pane + .show_view(Box::new(AskUserQuestionOverlay::new( + id, + ev, + self.app_event_tx.clone(), + ))); + self.request_redraw(); + } + + pub(crate) fn handle_plan_approval_request_now( + &mut self, + id: String, + ev: PlanApprovalRequestEvent, + ) { + self.flush_answer_stream_with_separator(); + self.bottom_pane + .show_view(Box::new(PlanApprovalOverlay::new( + id, + ev, + self.app_event_tx.clone(), + ))); + self.request_redraw(); + } + pub(crate) fn handle_exec_begin_now(&mut self, ev: ExecCommandBeginEvent) { // Ensure the status indicator is visible while the command runs. self.running_commands.insert( @@ -1373,7 +1705,7 @@ impl ChatWidget { let placeholder = EXAMPLE_PROMPTS[rng.random_range(0..EXAMPLE_PROMPTS.len())].to_string(); let codex_op_tx = spawn_agent(config.clone(), app_event_tx.clone(), conversation_manager); - let mut widget = Self { + Self { app_event_tx: app_event_tx.clone(), frame_requester: frame_requester.clone(), codex_op_tx, @@ -1400,6 +1732,7 @@ impl ChatWidget { token_info: None, rate_limit_snapshot: None, plan_type: None, + last_plan_update_key: None, rate_limit_warnings: RateLimitWarningState::default(), rate_limit_switch_prompt: RateLimitSwitchPromptState::default(), rate_limit_poller: None, @@ -1413,8 +1746,9 @@ impl ChatWidget { interrupts: InterruptManager::new(), reasoning_buffer: String::new(), full_reasoning_buffer: String::new(), - current_status_header: String::from("Working"), + current_status_header: String::from("Ready"), retry_status_header: None, + plan_variants_progress: None, conversation_id: None, queued_user_messages: VecDeque::new(), show_welcome_banner: is_first_run, @@ -1426,11 +1760,7 @@ impl ChatWidget { last_rendered_width: std::cell::Cell::new(None), feedback, current_rollout_path: None, - }; - - widget.prefetch_rate_limits(); - - widget + } } /// Create a ChatWidget attached to an existing conversation (e.g., a fork). @@ -1459,7 +1789,7 @@ impl ChatWidget { let codex_op_tx = spawn_agent_from_existing(conversation, session_configured, app_event_tx.clone()); - let mut widget = Self { + Self { app_event_tx: app_event_tx.clone(), frame_requester: frame_requester.clone(), codex_op_tx, @@ -1486,6 +1816,7 @@ impl ChatWidget { token_info: None, rate_limit_snapshot: None, plan_type: None, + last_plan_update_key: None, rate_limit_warnings: RateLimitWarningState::default(), rate_limit_switch_prompt: RateLimitSwitchPromptState::default(), rate_limit_poller: None, @@ -1499,8 +1830,9 @@ impl ChatWidget { interrupts: InterruptManager::new(), reasoning_buffer: String::new(), full_reasoning_buffer: String::new(), - current_status_header: String::from("Working"), + current_status_header: String::from("Ready"), retry_status_header: None, + plan_variants_progress: None, conversation_id: None, queued_user_messages: VecDeque::new(), show_welcome_banner: false, @@ -1512,11 +1844,7 @@ impl ChatWidget { last_rendered_width: std::cell::Cell::new(None), feedback, current_rollout_path: None, - }; - - widget.prefetch_rate_limits(); - - widget + } } pub(crate) fn handle_key_event(&mut self, key_event: KeyEvent) { @@ -1653,9 +1981,17 @@ impl ChatWidget { SlashCommand::Review => { self.open_review_popup(); } + SlashCommand::Plan => { + self.bottom_pane + .show_view(Box::new(PlanRequestOverlay::new(self.app_event_tx.clone()))); + self.request_redraw(); + } SlashCommand::Model => { self.open_model_popup(); } + SlashCommand::PlanModel => { + self.open_plan_model_popup(); + } SlashCommand::Approvals => { self.open_approvals_popup(); } @@ -1852,6 +2188,7 @@ impl ChatWidget { } } + self.prefetch_rate_limits(); self.codex_op_tx .send(Op::UserInput { items }) .unwrap_or_else(|e| { @@ -1931,10 +2268,11 @@ impl ChatWidget { self.on_agent_reasoning_final(); } EventMsg::AgentReasoningSectionBreak(_) => self.on_reasoning_section_break(), - EventMsg::TaskStarted(_) => self.on_task_started(), - EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }) => { + EventMsg::TaskStarted(_) if !from_replay => self.on_task_started(), + EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }) if !from_replay => { self.on_task_complete(last_agent_message) } + EventMsg::TaskStarted(_) | EventMsg::TaskComplete(_) => {} EventMsg::TokenCount(ev) => { self.set_token_info(ev.info); self.on_rate_limit_snapshot(ev.rate_limits); @@ -1965,6 +2303,12 @@ impl ChatWidget { EventMsg::ElicitationRequest(ev) => { self.on_elicitation_request(ev); } + EventMsg::AskUserQuestionRequest(ev) => { + self.on_ask_user_question_request(id.unwrap_or_default(), ev) + } + EventMsg::PlanApprovalRequest(ev) => { + self.on_plan_approval_request(id.unwrap_or_default(), ev) + } EventMsg::ExecCommandBegin(ev) => self.on_exec_command_begin(ev), EventMsg::TerminalInteraction(delta) => self.on_terminal_interaction(delta), EventMsg::ExecCommandOutputDelta(delta) => self.on_exec_command_output_delta(delta), @@ -1989,14 +2333,15 @@ impl ChatWidget { EventMsg::ShutdownComplete => self.on_shutdown_complete(), EventMsg::TurnDiff(TurnDiffEvent { unified_diff }) => self.on_turn_diff(unified_diff), EventMsg::DeprecationNotice(ev) => self.on_deprecation_notice(ev), - EventMsg::BackgroundEvent(BackgroundEventEvent { message }) => { + EventMsg::BackgroundEvent(BackgroundEventEvent { message }) if !from_replay => { self.on_background_event(message) } - EventMsg::UndoStarted(ev) => self.on_undo_started(ev), + EventMsg::UndoStarted(ev) if !from_replay => self.on_undo_started(ev), EventMsg::UndoCompleted(ev) => self.on_undo_completed(ev), - EventMsg::StreamError(StreamErrorEvent { message, .. }) => { + EventMsg::StreamError(StreamErrorEvent { message, .. }) if !from_replay => { self.on_stream_error(message) } + EventMsg::BackgroundEvent(_) | EventMsg::UndoStarted(_) | EventMsg::StreamError(_) => {} EventMsg::UserMessage(ev) => { if from_replay { self.on_user_message_event(ev); @@ -2006,6 +2351,14 @@ impl ChatWidget { self.on_entered_review_mode(review_request) } EventMsg::ExitedReviewMode(review) => self.on_exited_review_mode(review), + EventMsg::EnteredPlanMode(request) => self.on_entered_plan_mode(request), + EventMsg::ExitedPlanMode(ev) => { + if from_replay { + self.on_exited_plan_mode_replay(ev); + } else { + self.on_exited_plan_mode(ev); + } + } EventMsg::ContextCompacted(_) => self.on_agent_message("Context compacted".to_owned()), EventMsg::RawResponseItem(_) | EventMsg::ItemStarted(_) @@ -2065,6 +2418,45 @@ impl ChatWidget { self.request_redraw(); } + fn on_entered_plan_mode(&mut self, request: PlanRequest) { + let goal = request.goal.trim(); + if goal.is_empty() { + self.add_info_message(">> Plan mode started <<".to_string(), None); + } else { + self.add_info_message(format!(">> Plan mode started: {goal} <<"), None); + } + self.request_redraw(); + } + + fn on_exited_plan_mode(&mut self, ev: ExitedPlanModeEvent) { + if ev.plan_output.is_some() { + self.add_info_message( + "<< Plan mode finished; executing approved plan >>".to_string(), + None, + ); + self.queue_user_message(UserMessage { + text: "Proceed with the approved plan.".to_string(), + image_paths: Vec::new(), + }); + } else { + self.add_info_message("<< Plan mode ended <<".to_string(), None); + } + self.request_redraw(); + } + + fn on_exited_plan_mode_replay(&mut self, ev: ExitedPlanModeEvent) { + if ev.plan_output.is_some() { + self.add_info_message( + "<< Plan mode finished; send 'Proceed with the approved plan.' to continue >>" + .to_string(), + None, + ); + } else { + self.add_info_message("<< Plan mode ended >>".to_string(), None); + } + self.request_redraw(); + } + fn on_user_message_event(&mut self, event: UserMessageEvent) { let message = event.message.trim(); if !message.is_empty() { @@ -2164,7 +2556,12 @@ impl ChatWidget { } fn prefetch_rate_limits(&mut self) { - self.stop_rate_limit_poller(); + if self.rate_limit_poller.is_some() { + return; + } + if tokio::runtime::Handle::try_current().is_err() { + return; + } let Some(auth) = self.auth_manager.auth() else { return; @@ -2235,7 +2632,9 @@ impl ChatWidget { approval_policy: None, sandbox_policy: None, model: Some(switch_model.clone()), + plan_model: None, effort: Some(Some(default_effort)), + plan_effort: None, summary: None, })); tx.send(AppEvent::UpdateModel(switch_model.clone())); @@ -2297,7 +2696,23 @@ impl ChatWidget { /// Open a popup to choose a quick auto model. Selecting "All models" /// opens the full picker with every available preset. pub(crate) fn open_model_popup(&mut self) { - let current_model = self.model_family.get_model_slug().to_string(); + self.open_model_popup_for_target(crate::app_event::ModelPickerTarget::Chat); + } + + pub(crate) fn open_plan_model_popup(&mut self) { + self.open_model_popup_for_target(crate::app_event::ModelPickerTarget::Plan); + } + + fn open_model_popup_for_target(&mut self, target: crate::app_event::ModelPickerTarget) { + let chat_model = self.model_family.get_model_slug(); + let current_model = match target { + crate::app_event::ModelPickerTarget::Chat => chat_model.to_string(), + crate::app_event::ModelPickerTarget::Plan => self + .config + .plan_model + .clone() + .unwrap_or_else(|| chat_model.to_string()), + }; let presets: Vec = // todo(aibrahim): make this async function match self.models_manager.try_list_models(&self.config) { @@ -2323,7 +2738,7 @@ impl ChatWidget { .partition(|preset| Self::is_auto_model(&preset.model)); if auto_presets.is_empty() { - self.open_all_models_popup(other_presets); + self.open_all_models_popup(target, other_presets); return; } @@ -2336,6 +2751,7 @@ impl ChatWidget { (!preset.description.is_empty()).then_some(preset.description.clone()); let model = preset.model.clone(); let actions = Self::model_selection_actions( + target, model.clone(), Some(preset.default_reasoning_effort), ); @@ -2356,13 +2772,23 @@ impl ChatWidget { let actions: Vec = vec![Box::new(move |tx| { tx.send(AppEvent::OpenAllModelsPopup { models: all_models.clone(), + target, }); })]; let is_current = !items.iter().any(|item| item.is_current); - let description = Some(format!( - "Choose a specific model and reasoning level (current: {current_label})" - )); + let description = Some(match target { + crate::app_event::ModelPickerTarget::Chat => { + format!( + "Choose a specific model and reasoning level (current: {current_label})" + ) + } + crate::app_event::ModelPickerTarget::Plan => { + format!( + "Choose a specific model and reasoning level for /plan (current: {current_label})" + ) + } + }); items.push(SelectionItem { name: "All models".to_string(), @@ -2375,8 +2801,18 @@ impl ChatWidget { } self.bottom_pane.show_selection_view(SelectionViewParams { - title: Some("Select Model".to_string()), - subtitle: Some("Pick a quick auto mode or browse all models.".to_string()), + title: Some(match target { + crate::app_event::ModelPickerTarget::Chat => "Select Model".to_string(), + crate::app_event::ModelPickerTarget::Plan => "Select Plan Model".to_string(), + }), + subtitle: Some(match target { + crate::app_event::ModelPickerTarget::Chat => { + "Pick a quick auto mode or browse all models.".to_string() + } + crate::app_event::ModelPickerTarget::Plan => { + "Pick a quick auto mode or browse all models for /plan.".to_string() + } + }), footer_hint: Some(standard_popup_hint_line()), items, ..Default::default() @@ -2396,7 +2832,11 @@ impl ChatWidget { } } - pub(crate) fn open_all_models_popup(&mut self, presets: Vec) { + pub(crate) fn open_all_models_popup( + &mut self, + target: crate::app_event::ModelPickerTarget, + presets: Vec, + ) { if presets.is_empty() { self.add_info_message( "No additional models are available right now.".to_string(), @@ -2405,7 +2845,15 @@ impl ChatWidget { return; } - let current_model = self.model_family.get_model_slug().to_string(); + let chat_model = self.model_family.get_model_slug(); + let current_model = match target { + crate::app_event::ModelPickerTarget::Chat => chat_model.to_string(), + crate::app_event::ModelPickerTarget::Plan => self + .config + .plan_model + .clone() + .unwrap_or_else(|| chat_model.to_string()), + }; let mut items: Vec = Vec::new(); for preset in presets.into_iter() { let description = @@ -2417,6 +2865,7 @@ impl ChatWidget { let preset_for_event = preset_for_action.clone(); tx.send(AppEvent::OpenReasoningPopup { model: preset_for_event, + target, }); })]; items.push(SelectionItem { @@ -2431,7 +2880,12 @@ impl ChatWidget { } self.bottom_pane.show_selection_view(SelectionViewParams { - title: Some("Select Model and Effort".to_string()), + title: Some(match target { + crate::app_event::ModelPickerTarget::Chat => "Select Model and Effort".to_string(), + crate::app_event::ModelPickerTarget::Plan => { + "Select Plan Model and Effort".to_string() + } + }), subtitle: Some( "Access legacy models by running codex -m or in your config.toml" .to_string(), @@ -2443,6 +2897,7 @@ impl ChatWidget { } fn model_selection_actions( + target: crate::app_event::ModelPickerTarget, model_for_action: String, effort_for_action: Option, ) -> Vec { @@ -2450,30 +2905,63 @@ impl ChatWidget { let effort_label = effort_for_action .map(|effort| effort.to_string()) .unwrap_or_else(|| "default".to_string()); - tx.send(AppEvent::CodexOp(Op::OverrideTurnContext { - cwd: None, - approval_policy: None, - sandbox_policy: None, - model: Some(model_for_action.clone()), - effort: Some(effort_for_action), - summary: None, - })); - tx.send(AppEvent::UpdateModel(model_for_action.clone())); - tx.send(AppEvent::UpdateReasoningEffort(effort_for_action)); - tx.send(AppEvent::PersistModelSelection { - model: model_for_action.clone(), - effort: effort_for_action, - }); - tracing::info!( - "Selected model: {}, Selected effort: {}", - model_for_action, - effort_label - ); + match target { + crate::app_event::ModelPickerTarget::Chat => { + tx.send(AppEvent::CodexOp(Op::OverrideTurnContext { + cwd: None, + approval_policy: None, + sandbox_policy: None, + model: Some(model_for_action.clone()), + plan_model: None, + effort: Some(effort_for_action), + plan_effort: None, + summary: None, + })); + tx.send(AppEvent::UpdateModel(model_for_action.clone())); + tx.send(AppEvent::UpdateReasoningEffort(effort_for_action)); + tx.send(AppEvent::PersistModelSelection { + model: model_for_action.clone(), + effort: effort_for_action, + }); + tracing::info!( + "Selected model: {}, Selected effort: {}", + model_for_action, + effort_label + ); + } + crate::app_event::ModelPickerTarget::Plan => { + tx.send(AppEvent::CodexOp(Op::OverrideTurnContext { + cwd: None, + approval_policy: None, + sandbox_policy: None, + model: None, + plan_model: Some(model_for_action.clone()), + effort: None, + plan_effort: Some(effort_for_action), + summary: None, + })); + tx.send(AppEvent::UpdatePlanModel(model_for_action.clone())); + tx.send(AppEvent::UpdatePlanReasoningEffort(effort_for_action)); + tx.send(AppEvent::PersistPlanModelSelection { + model: model_for_action.clone(), + effort: effort_for_action, + }); + tracing::info!( + "Selected plan model: {}, Selected effort: {}", + model_for_action, + effort_label + ); + } + } })] } /// Open a popup to choose the reasoning effort (stage 2) for the given model. - pub(crate) fn open_reasoning_popup(&mut self, preset: ModelPreset) { + pub(crate) fn open_reasoning_popup( + &mut self, + target: crate::app_event::ModelPickerTarget, + preset: ModelPreset, + ) { let default_effort: ReasoningEffortConfig = preset.default_reasoning_effort; let supported = preset.supported_reasoning_efforts; @@ -2520,9 +3008,9 @@ impl ChatWidget { if choices.len() == 1 { if let Some(effort) = choices.first().and_then(|c| c.stored) { - self.apply_model_and_effort(preset.model, Some(effort)); + self.apply_model_and_effort(target, preset.model, Some(effort)); } else { - self.apply_model_and_effort(preset.model, None); + self.apply_model_and_effort(target, preset.model, None); } return; } @@ -2536,9 +3024,25 @@ impl ChatWidget { .or(Some(default_effort)); let model_slug = preset.model.to_string(); - let is_current_model = self.model_family.get_model_slug() == preset.model; + let chat_model = self.model_family.get_model_slug(); + let effective_current_model = match target { + crate::app_event::ModelPickerTarget::Chat => chat_model, + crate::app_event::ModelPickerTarget::Plan => { + self.config.plan_model.as_deref().unwrap_or(chat_model) + } + }; + let is_current_model = effective_current_model == preset.model; let highlight_choice = if is_current_model { - self.config.model_reasoning_effort + match target { + crate::app_event::ModelPickerTarget::Chat => self.config.model_reasoning_effort, + crate::app_event::ModelPickerTarget::Plan => { + if self.config.plan_model.as_deref() == Some(preset.model.as_str()) { + self.config.plan_model_reasoning_effort + } else { + self.config.model_reasoning_effort + } + } + } } else { default_choice }; @@ -2581,7 +3085,7 @@ impl ChatWidget { }; let model_for_action = model_slug.clone(); - let actions = Self::model_selection_actions(model_for_action, choice.stored); + let actions = Self::model_selection_actions(target, model_for_action, choice.stored); items.push(SelectionItem { name: effort_label, @@ -2619,30 +3123,68 @@ impl ChatWidget { } } - fn apply_model_and_effort(&self, model: String, effort: Option) { - self.app_event_tx - .send(AppEvent::CodexOp(Op::OverrideTurnContext { - cwd: None, - approval_policy: None, - sandbox_policy: None, - model: Some(model.clone()), - effort: Some(effort), - summary: None, - })); - self.app_event_tx.send(AppEvent::UpdateModel(model.clone())); - self.app_event_tx - .send(AppEvent::UpdateReasoningEffort(effort)); - self.app_event_tx.send(AppEvent::PersistModelSelection { - model: model.clone(), - effort, - }); - tracing::info!( - "Selected model: {}, Selected effort: {}", - model, - effort - .map(|e| e.to_string()) - .unwrap_or_else(|| "default".to_string()) - ); + fn apply_model_and_effort( + &self, + target: crate::app_event::ModelPickerTarget, + model: String, + effort: Option, + ) { + let effort_label = effort + .map(|e| e.to_string()) + .unwrap_or_else(|| "default".to_string()); + match target { + crate::app_event::ModelPickerTarget::Chat => { + self.app_event_tx + .send(AppEvent::CodexOp(Op::OverrideTurnContext { + cwd: None, + approval_policy: None, + sandbox_policy: None, + model: Some(model.clone()), + plan_model: None, + effort: Some(effort), + plan_effort: None, + summary: None, + })); + self.app_event_tx.send(AppEvent::UpdateModel(model.clone())); + self.app_event_tx + .send(AppEvent::UpdateReasoningEffort(effort)); + self.app_event_tx.send(AppEvent::PersistModelSelection { + model: model.clone(), + effort, + }); + tracing::info!( + "Selected model: {}, Selected effort: {}", + model, + effort_label + ); + } + crate::app_event::ModelPickerTarget::Plan => { + self.app_event_tx + .send(AppEvent::CodexOp(Op::OverrideTurnContext { + cwd: None, + approval_policy: None, + sandbox_policy: None, + model: None, + plan_model: Some(model.clone()), + effort: None, + plan_effort: Some(effort), + summary: None, + })); + self.app_event_tx + .send(AppEvent::UpdatePlanModel(model.clone())); + self.app_event_tx + .send(AppEvent::UpdatePlanReasoningEffort(effort)); + self.app_event_tx.send(AppEvent::PersistPlanModelSelection { + model: model.clone(), + effort, + }); + tracing::info!( + "Selected plan model: {}, Selected effort: {}", + model, + effort_label + ); + } + } } /// Open a popup to choose the approvals mode (ask for approval policy + sandbox policy). @@ -2756,7 +3298,9 @@ impl ChatWidget { approval_policy: Some(approval), sandbox_policy: Some(sandbox_clone.clone()), model: None, + plan_model: None, effort: None, + plan_effort: None, summary: None, })); tx.send(AppEvent::UpdateAskForApprovalPolicy(approval)); @@ -3123,12 +3667,22 @@ impl ChatWidget { self.config.model_reasoning_effort = effort; } + /// Set the plan reasoning effort in the widget's config copy. + pub(crate) fn set_plan_reasoning_effort(&mut self, effort: Option) { + self.config.plan_model_reasoning_effort = effort; + } + /// Set the model in the widget's config copy. pub(crate) fn set_model(&mut self, model: &str, model_family: ModelFamily) { self.session_header.set_model(model); self.model_family = model_family; } + /// Set the plan model in the widget's config copy. + pub(crate) fn set_plan_model(&mut self, model: &str) { + self.config.plan_model = Some(model.to_string()); + } + pub(crate) fn add_info_message(&mut self, message: String, hint: Option) { self.add_to_history(history_cell::new_info_event(message, hint)); self.request_redraw(); diff --git a/codex-rs/tui/src/chatwidget/interrupts.rs b/codex-rs/tui/src/chatwidget/interrupts.rs index dc1e683ea55..7a49547f181 100644 --- a/codex-rs/tui/src/chatwidget/interrupts.rs +++ b/codex-rs/tui/src/chatwidget/interrupts.rs @@ -1,12 +1,14 @@ use std::collections::VecDeque; use codex_core::protocol::ApplyPatchApprovalRequestEvent; +use codex_core::protocol::AskUserQuestionRequestEvent; use codex_core::protocol::ExecApprovalRequestEvent; use codex_core::protocol::ExecCommandBeginEvent; use codex_core::protocol::ExecCommandEndEvent; use codex_core::protocol::McpToolCallBeginEvent; use codex_core::protocol::McpToolCallEndEvent; use codex_core::protocol::PatchApplyEndEvent; +use codex_core::protocol::PlanApprovalRequestEvent; use codex_protocol::approvals::ElicitationRequestEvent; use super::ChatWidget; @@ -16,6 +18,8 @@ pub(crate) enum QueuedInterrupt { ExecApproval(String, ExecApprovalRequestEvent), ApplyPatchApproval(String, ApplyPatchApprovalRequestEvent), Elicitation(ElicitationRequestEvent), + AskUserQuestion(String, AskUserQuestionRequestEvent), + PlanApproval(String, PlanApprovalRequestEvent), ExecBegin(ExecCommandBeginEvent), ExecEnd(ExecCommandEndEvent), McpBegin(McpToolCallBeginEvent), @@ -57,6 +61,15 @@ impl InterruptManager { self.queue.push_back(QueuedInterrupt::Elicitation(ev)); } + pub(crate) fn push_ask_user_question(&mut self, id: String, ev: AskUserQuestionRequestEvent) { + self.queue + .push_back(QueuedInterrupt::AskUserQuestion(id, ev)); + } + + pub(crate) fn push_plan_approval(&mut self, id: String, ev: PlanApprovalRequestEvent) { + self.queue.push_back(QueuedInterrupt::PlanApproval(id, ev)); + } + pub(crate) fn push_exec_begin(&mut self, ev: ExecCommandBeginEvent) { self.queue.push_back(QueuedInterrupt::ExecBegin(ev)); } @@ -85,6 +98,12 @@ impl InterruptManager { chat.handle_apply_patch_approval_now(id, ev) } QueuedInterrupt::Elicitation(ev) => chat.handle_elicitation_request_now(ev), + QueuedInterrupt::AskUserQuestion(id, ev) => { + chat.handle_ask_user_question_request_now(id, ev) + } + QueuedInterrupt::PlanApproval(id, ev) => { + chat.handle_plan_approval_request_now(id, ev) + } QueuedInterrupt::ExecBegin(ev) => chat.handle_exec_begin_now(ev), QueuedInterrupt::ExecEnd(ev) => chat.handle_exec_end_now(ev), QueuedInterrupt::McpBegin(ev) => chat.handle_mcp_begin_now(ev), diff --git a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__update_popup.snap b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__update_popup.snap index 6a49cb253c4..cb7c29c9506 100644 --- a/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__update_popup.snap +++ b/codex-rs/tui/src/chatwidget/snapshots/codex_tui__chatwidget__tests__update_popup.snap @@ -4,7 +4,7 @@ expression: terminal.backend().vt100().screen().contents() --- ✨ New version available! Would you like to update? - Full release notes: https://github.com/openai/codex/releases/latest + Full release notes: https://github.com/Ixe1/codexel/releases/latest › 1. Yes, update now diff --git a/codex-rs/tui/src/chatwidget/tests.rs b/codex-rs/tui/src/chatwidget/tests.rs index 55c12a34278..a94047daa4e 100644 --- a/codex-rs/tui/src/chatwidget/tests.rs +++ b/codex-rs/tui/src/chatwidget/tests.rs @@ -27,6 +27,7 @@ use codex_core::protocol::ExecCommandBeginEvent; use codex_core::protocol::ExecCommandEndEvent; use codex_core::protocol::ExecCommandSource; use codex_core::protocol::ExecPolicyAmendment; +use codex_core::protocol::ExitedPlanModeEvent; use codex_core::protocol::ExitedReviewModeEvent; use codex_core::protocol::FileChange; use codex_core::protocol::McpStartupStatus; @@ -34,6 +35,7 @@ use codex_core::protocol::McpStartupUpdateEvent; use codex_core::protocol::Op; use codex_core::protocol::PatchApplyBeginEvent; use codex_core::protocol::PatchApplyEndEvent; +use codex_core::protocol::PlanOutputEvent; use codex_core::protocol::RateLimitWindow; use codex_core::protocol::ReviewRequest; use codex_core::protocol::ReviewTarget; @@ -153,6 +155,90 @@ fn resumed_initial_messages_render_history() { ); } +#[test] +fn resumed_session_does_not_start_rate_limit_poller_until_input() { + let (mut chat, _rx, _ops) = make_chatwidget_manual(None); + set_chatgpt_auth(&mut chat); + + let conversation_id = ConversationId::new(); + let rollout_file = NamedTempFile::new().unwrap(); + let configured = codex_core::protocol::SessionConfiguredEvent { + session_id: conversation_id, + model: "test-model".to_string(), + model_provider_id: "test-provider".to_string(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::ReadOnly, + cwd: PathBuf::from("/home/user/project"), + reasoning_effort: Some(ReasoningEffortConfig::default()), + history_log_id: 0, + history_entry_count: 0, + initial_messages: None, + rollout_path: rollout_file.path().to_path_buf(), + }; + + chat.handle_codex_event(Event { + id: "initial".into(), + msg: EventMsg::SessionConfigured(configured), + }); + + assert!( + chat.rate_limit_poller.is_none(), + "expected no rate limit polling until user input" + ); +} + +#[test] +fn resumed_session_does_not_auto_execute_plan() { + let (mut chat, _rx, mut op_rx) = make_chatwidget_manual(None); + set_chatgpt_auth(&mut chat); + + let conversation_id = ConversationId::new(); + let rollout_file = NamedTempFile::new().unwrap(); + let configured = codex_core::protocol::SessionConfiguredEvent { + session_id: conversation_id, + model: "test-model".to_string(), + model_provider_id: "test-provider".to_string(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::ReadOnly, + cwd: PathBuf::from("/home/user/project"), + reasoning_effort: Some(ReasoningEffortConfig::default()), + history_log_id: 0, + history_entry_count: 0, + initial_messages: Some(vec![EventMsg::ExitedPlanMode(ExitedPlanModeEvent { + plan_output: Some(PlanOutputEvent { + title: "Example".to_string(), + summary: "Summary".to_string(), + plan: UpdatePlanArgs { + explanation: None, + plan: vec![PlanItemArg { + step: "Step 1".to_string(), + status: StepStatus::Pending, + }], + }, + }), + })]), + rollout_path: rollout_file.path().to_path_buf(), + }; + + chat.handle_codex_event(Event { + id: "initial".into(), + msg: EventMsg::SessionConfigured(configured), + }); + + let mut saw_user_turn = false; + while let Ok(op) = op_rx.try_recv() { + if matches!(op, Op::UserTurn { .. } | Op::UserInput { .. }) { + saw_user_turn = true; + break; + } + } + + assert!( + !saw_user_turn, + "expected no auto-execute user turn after resume replay" + ); +} + /// Entering review mode uses the hint provided by the review request. #[test] fn entered_review_mode_uses_request_hint() { @@ -338,6 +424,39 @@ async fn helpers_are_available_and_do_not_panic() { let _ = &mut w; } +#[test] +fn exiting_plan_mode_with_approved_output_auto_executes() { + let (mut chat, _app_event_rx, mut op_rx) = make_chatwidget_manual(None); + + chat.on_exited_plan_mode(ExitedPlanModeEvent { + plan_output: Some(PlanOutputEvent { + title: "Example".to_string(), + summary: "Summary".to_string(), + plan: UpdatePlanArgs { + explanation: None, + plan: vec![PlanItemArg { + step: "Step 1".to_string(), + status: StepStatus::Pending, + }], + }, + }), + }); + + let op = op_rx + .try_recv() + .expect("expected an auto-execute user turn"); + let items = match op { + Op::UserTurn { items, .. } | Op::UserInput { items } => items, + other => panic!("unexpected op: {other:?}"), + }; + assert_eq!( + items, + vec![codex_protocol::user_input::UserInput::Text { + text: "Proceed with the approved plan.".to_string(), + }] + ); +} + // --- Helpers for tests that need direct construction and event draining --- fn make_chatwidget_manual( model_override: Option<&str>, @@ -381,6 +500,7 @@ fn make_chatwidget_manual( token_info: None, rate_limit_snapshot: None, plan_type: None, + last_plan_update_key: None, rate_limit_warnings: RateLimitWarningState::default(), rate_limit_switch_prompt: RateLimitSwitchPromptState::default(), rate_limit_poller: None, @@ -394,8 +514,9 @@ fn make_chatwidget_manual( interrupts: InterruptManager::new(), reasoning_buffer: String::new(), full_reasoning_buffer: String::new(), - current_status_header: String::from("Working"), + current_status_header: String::from("Ready"), retry_status_header: None, + plan_variants_progress: None, conversation_id: None, frame_requester: FrameRequester::test_dummy(), show_welcome_banner: true, @@ -1956,7 +2077,7 @@ fn model_reasoning_selection_popup_snapshot() { chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::High); let preset = get_available_model(&chat, "gpt-5.1-codex-max"); - chat.open_reasoning_popup(preset); + chat.open_reasoning_popup(crate::app_event::ModelPickerTarget::Chat, preset); let popup = render_bottom_popup(&chat, 80); assert_snapshot!("model_reasoning_selection_popup", popup); @@ -1970,7 +2091,7 @@ fn model_reasoning_selection_popup_extra_high_warning_snapshot() { chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::XHigh); let preset = get_available_model(&chat, "gpt-5.1-codex-max"); - chat.open_reasoning_popup(preset); + chat.open_reasoning_popup(crate::app_event::ModelPickerTarget::Chat, preset); let popup = render_bottom_popup(&chat, 80); assert_snapshot!("model_reasoning_selection_popup_extra_high_warning", popup); @@ -1983,7 +2104,7 @@ fn reasoning_popup_shows_extra_high_with_space() { set_chatgpt_auth(&mut chat); let preset = get_available_model(&chat, "gpt-5.1-codex-max"); - chat.open_reasoning_popup(preset); + chat.open_reasoning_popup(crate::app_event::ModelPickerTarget::Chat, preset); let popup = render_bottom_popup(&chat, 120); assert!( @@ -2016,7 +2137,7 @@ fn single_reasoning_option_skips_selection() { show_in_picker: true, supported_in_api: true, }; - chat.open_reasoning_popup(preset); + chat.open_reasoning_popup(crate::app_event::ModelPickerTarget::Chat, preset); let popup = render_bottom_popup(&chat, 80); assert!( @@ -2065,7 +2186,7 @@ fn reasoning_popup_escape_returns_to_model_popup() { chat.open_model_popup(); let preset = get_available_model(&chat, "gpt-5.1-codex-max"); - chat.open_reasoning_popup(preset); + chat.open_reasoning_popup(crate::app_event::ModelPickerTarget::Chat, preset); let before_escape = render_bottom_popup(&chat, 80); assert!(before_escape.contains("Select Reasoning Level")); @@ -3110,6 +3231,36 @@ fn plan_update_renders_history_cell() { assert!(blob.contains("Write tests")); } +#[test] +fn plan_update_dedupes_identical_updates() { + let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None); + let update = UpdatePlanArgs { + explanation: Some("Updating plan".to_string()), + plan: vec![ + PlanItemArg { + step: "Explore codebase".into(), + status: StepStatus::Completed, + }, + PlanItemArg { + step: "Implement feature".into(), + status: StepStatus::InProgress, + }, + ], + }; + + chat.handle_codex_event(Event { + id: "sub-1".into(), + msg: EventMsg::PlanUpdate(update.clone()), + }); + chat.handle_codex_event(Event { + id: "sub-1".into(), + msg: EventMsg::PlanUpdate(update), + }); + + let cells = drain_insert_history(&mut rx); + assert_eq!(cells.len(), 1, "expected a single plan update cell"); +} + #[test] fn stream_error_updates_status_indicator() { let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None); diff --git a/codex-rs/tui/src/cli.rs b/codex-rs/tui/src/cli.rs index 2b19b4c0649..1a1a45c373d 100644 --- a/codex-rs/tui/src/cli.rs +++ b/codex-rs/tui/src/cli.rs @@ -15,8 +15,8 @@ pub struct Cli { #[arg(long = "image", short = 'i', value_name = "FILE", value_delimiter = ',', num_args = 1..)] pub images: Vec, - // Internal controls set by the top-level `codex resume` subcommand. - // These are not exposed as user flags on the base `codex` command. + // Internal controls set by the top-level `codexel resume` subcommand. + // These are not exposed as user flags on the base `codexel` command. #[clap(skip)] pub resume_picker: bool, @@ -24,7 +24,7 @@ pub struct Cli { pub resume_last: bool, /// Internal: resume a specific recorded session by id (UUID). Set by the - /// top-level `codex resume ` wrapper; not exposed as a public flag. + /// top-level `codexel resume ` wrapper; not exposed as a public flag. #[clap(skip)] pub resume_session_id: Option, diff --git a/codex-rs/tui/src/history_cell.rs b/codex-rs/tui/src/history_cell.rs index 2c0a37ecea5..bcab071ed0b 100644 --- a/codex-rs/tui/src/history_cell.rs +++ b/codex-rs/tui/src/history_cell.rs @@ -307,7 +307,7 @@ impl HistoryCell for UpdateAvailableHistoryCell { } else { line![ "See ", - "https://github.com/openai/codex".cyan().underlined(), + "https://github.com/Ixe1/codexel".cyan().underlined(), " for installation options." ] }; @@ -322,7 +322,7 @@ impl HistoryCell for UpdateAvailableHistoryCell { update_instruction, "", "See full release notes:", - "https://github.com/openai/codex/releases/latest" + "https://github.com/Ixe1/codexel/releases/latest" .cyan() .underlined(), ]; @@ -833,10 +833,10 @@ impl HistoryCell for SessionHeaderHistoryCell { let make_row = |spans: Vec>| Line::from(spans); - // Title line rendered inside the box: ">_ OpenAI Codex (vX)" + // Title line rendered inside the box: ">_ Codexel (vX)" let title_spans: Vec> = vec![ Span::from(">_ ").dim(), - Span::from("OpenAI Codex").bold(), + Span::from("Codexel").bold(), Span::from(" ").dim(), Span::from(format!("(v{})", self.version)).dim(), ]; @@ -1087,9 +1087,9 @@ pub(crate) fn new_active_mcp_tool_call( McpToolCallCell::new(call_id, invocation, animations_enabled) } -pub(crate) fn new_web_search_call(query: String) -> PlainHistoryCell { - let lines: Vec> = vec![Line::from(vec![padded_emoji("🌐").into(), query.into()])]; - PlainHistoryCell { lines } +pub(crate) fn new_web_search_call(query: String) -> PrefixedWrappedHistoryCell { + let text: Text<'static> = Line::from(vec!["Searched".bold(), " ".into(), query.into()]).into(); + PrefixedWrappedHistoryCell::new(text, "• ".dim(), " ") } /// If the first content is an image, return a new cell with the image. @@ -1179,7 +1179,8 @@ pub(crate) fn empty_mcp_output() -> PlainHistoryCell { " • No MCP servers configured.".italic().into(), Line::from(vec![ " See the ".into(), - "\u{1b}]8;;https://github.com/openai/codex/blob/main/docs/config.md#mcp_servers\u{7}MCP docs\u{1b}]8;;\u{7}".underlined(), + "\u{1b}]8;;https://github.com/Ixe1/codexel/blob/main/docs/config.md#mcp_servers\u{7}MCP docs\u{1b}]8;;\u{7}" + .underlined(), " to configure them.".into(), ]) .style(Style::default().add_modifier(Modifier::DIM)), @@ -1764,6 +1765,50 @@ mod tests { ); } + #[test] + fn web_search_history_cell_snapshot() { + let cell = new_web_search_call( + "example search query with several generic words to exercise wrapping".to_string(), + ); + let rendered = render_lines(&cell.display_lines(64)).join("\n"); + + insta::assert_snapshot!(rendered); + } + + #[test] + fn web_search_history_cell_wraps_with_indented_continuation() { + let cell = new_web_search_call( + "example search query with several generic words to exercise wrapping".to_string(), + ); + let rendered = render_lines(&cell.display_lines(64)); + + assert_eq!( + rendered, + vec![ + "• Searched example search query with several generic words to".to_string(), + " exercise wrapping".to_string(), + ] + ); + } + + #[test] + fn web_search_history_cell_short_query_does_not_wrap() { + let cell = new_web_search_call("short query".to_string()); + let rendered = render_lines(&cell.display_lines(64)); + + assert_eq!(rendered, vec!["• Searched short query".to_string()]); + } + + #[test] + fn web_search_history_cell_transcript_snapshot() { + let cell = new_web_search_call( + "example search query with several generic words to exercise wrapping".to_string(), + ); + let rendered = render_lines(&cell.transcript_lines(64)).join("\n"); + + insta::assert_snapshot!(rendered); + } + #[test] fn active_mcp_tool_call_snapshot() { let invocation = McpInvocation { diff --git a/codex-rs/tui/src/lib.rs b/codex-rs/tui/src/lib.rs index 005446c5f0b..ab2d76052f9 100644 --- a/codex-rs/tui/src/lib.rs +++ b/codex-rs/tui/src/lib.rs @@ -241,7 +241,7 @@ pub async fn run_main( // Ensure the file is only readable and writable by the current user. // Doing the equivalent to `chmod 600` on Windows is quite a bit more code // and requires the Windows API crates, so we can reconsider that when - // Codex CLI is officially supported on Windows. + // Codexel is officially supported on Windows. #[cfg(unix)] { use std::os::unix::fs::OpenOptionsExt; @@ -429,7 +429,7 @@ async fn run_ratatui_app( let _ = tui.terminal.clear(); if let Err(err) = writeln!( std::io::stdout(), - "No saved session found with ID {id_str}. Run `codex resume` without an ID to choose from existing sessions." + "No saved session found with ID {id_str}. Run `codexel resume` without an ID to choose from existing sessions." ) { error!("Failed to write resume error message: {err}"); } diff --git a/codex-rs/tui/src/model_migration.rs b/codex-rs/tui/src/model_migration.rs index 5140a7a38ca..cf2208109aa 100644 --- a/codex-rs/tui/src/model_migration.rs +++ b/codex-rs/tui/src/model_migration.rs @@ -59,26 +59,36 @@ pub(crate) fn migration_copy_for_models( current_model: &str, target_model: &str, model_link: Option, + migration_copy: Option, target_display_name: String, target_description: Option, can_opt_out: bool, ) -> ModelMigrationCopy { - let heading_text = Span::from(format!("Try {target_display_name}")).bold(); - let description_line = target_description - .filter(|desc| !desc.is_empty()) - .map(Line::from) - .unwrap_or_else(|| { - Line::from(format!( - "{target_display_name} is recommended for better performance and reliability." - )) - }); - - let mut content = vec![ - Line::from(format!( + let heading_text = Span::from(format!( + "Codex just got an upgrade. Introducing {target_display_name}." + )) + .bold(); + let description_line: Line<'static>; + if let Some(migration_copy) = &migration_copy { + description_line = Line::from(migration_copy.clone()); + } else { + description_line = target_description + .filter(|desc| !desc.is_empty()) + .map(Line::from) + .unwrap_or_else(|| { + Line::from(format!( + "{target_display_name} is recommended for better performance and reliability." + )) + }); + } + + let mut content = vec![]; + if migration_copy.is_none() { + content.push(Line::from(format!( "We recommend switching from {current_model} to {target_model}." - )), - Line::from(""), - ]; + ))); + content.push(Line::from("")); + } if let Some(model_link) = model_link { content.push(Line::from(vec![ @@ -364,6 +374,10 @@ mod tests { "gpt-5.1-codex-mini", "gpt-5.1-codex-max", None, + Some( + "Upgrade to gpt-5.2-codex for the latest and greatest agentic coding model." + .to_string(), + ), "gpt-5.1-codex-max".to_string(), Some("Codex-optimized flagship for deep and fast reasoning.".to_string()), true, @@ -391,6 +405,7 @@ mod tests { "gpt-5", "gpt-5.1", Some("https://www.codex.com/models/gpt-5.1".to_string()), + None, "gpt-5.1".to_string(), Some("Broad world knowledge with strong general reasoning.".to_string()), false, @@ -416,6 +431,7 @@ mod tests { "gpt-5-codex", "gpt-5.1-codex-max", Some("https://www.codex.com/models/gpt-5.1-codex-max".to_string()), + None, "gpt-5.1-codex-max".to_string(), Some("Codex-optimized flagship for deep and fast reasoning.".to_string()), false, @@ -441,6 +457,7 @@ mod tests { "gpt-5-codex-mini", "gpt-5.1-codex-mini", Some("https://www.codex.com/models/gpt-5.1-codex-mini".to_string()), + None, "gpt-5.1-codex-mini".to_string(), Some("Optimized for codex. Cheaper, faster, but less capable.".to_string()), false, @@ -462,6 +479,7 @@ mod tests { "gpt-old", "gpt-new", Some("https://www.codex.com/models/gpt-new".to_string()), + None, "gpt-new".to_string(), Some("Latest recommended model for better performance.".to_string()), true, @@ -489,6 +507,7 @@ mod tests { "gpt-old", "gpt-new", Some("https://www.codex.com/models/gpt-new".to_string()), + None, "gpt-new".to_string(), Some("Latest recommended model for better performance.".to_string()), true, diff --git a/codex-rs/tui/src/onboarding/auth.rs b/codex-rs/tui/src/onboarding/auth.rs index 6307e6e7dc9..5cdf925b14d 100644 --- a/codex-rs/tui/src/onboarding/auth.rs +++ b/codex-rs/tui/src/onboarding/auth.rs @@ -296,7 +296,8 @@ impl AuthModeWidget { " Decide how much autonomy you want to grant Codex".into(), Line::from(vec![ " For more details see the ".into(), - "\u{1b}]8;;https://github.com/openai/codex\u{7}Codex docs\u{1b}]8;;\u{7}".underlined(), + "\u{1b}]8;;https://github.com/Ixe1/codexel\u{7}Codexel docs\u{1b}]8;;\u{7}" + .underlined(), ]) .dim(), "".into(), diff --git a/codex-rs/tui/src/selection_list.rs b/codex-rs/tui/src/selection_list.rs index 4816735437d..25a6450febd 100644 --- a/codex-rs/tui/src/selection_list.rs +++ b/codex-rs/tui/src/selection_list.rs @@ -11,6 +11,15 @@ pub(crate) fn selection_option_row( index: usize, label: String, is_selected: bool, +) -> Box { + selection_option_row_with_dim(index, label, is_selected, false) +} + +pub(crate) fn selection_option_row_with_dim( + index: usize, + label: String, + is_selected: bool, + dim: bool, ) -> Box { let prefix = if is_selected { format!("› {}. ", index + 1) @@ -19,6 +28,8 @@ pub(crate) fn selection_option_row( }; let style = if is_selected { Style::default().cyan() + } else if dim { + Style::default().dim() } else { Style::default() }; diff --git a/codex-rs/tui/src/slash_command.rs b/codex-rs/tui/src/slash_command.rs index bfc5616e264..fba062997a2 100644 --- a/codex-rs/tui/src/slash_command.rs +++ b/codex-rs/tui/src/slash_command.rs @@ -13,10 +13,12 @@ pub enum SlashCommand { // DO NOT ALPHA-SORT! Enum order is presentation order in the popup, so // more frequently used commands should be listed first. Model, + PlanModel, Approvals, Experimental, Skills, Review, + Plan, New, Resume, Init, @@ -43,6 +45,7 @@ impl SlashCommand { SlashCommand::Init => "create an AGENTS.md file with instructions for Codex", SlashCommand::Compact => "summarize conversation to prevent hitting the context limit", SlashCommand::Review => "review my current changes and find issues", + SlashCommand::Plan => "plan a task before making changes", SlashCommand::Resume => "resume a saved chat", SlashCommand::Undo => "ask Codex to undo a turn", SlashCommand::Quit | SlashCommand::Exit => "exit Codex", @@ -51,6 +54,7 @@ impl SlashCommand { SlashCommand::Skills => "use skills to improve how Codex performs specific tasks", SlashCommand::Status => "show current session configuration and token usage", SlashCommand::Model => "choose what model and reasoning effort to use", + SlashCommand::PlanModel => "choose what model and reasoning effort to use for /plan", SlashCommand::Approvals => "choose what Codex can do without approval", SlashCommand::Experimental => "toggle beta features", SlashCommand::Mcp => "list configured MCP tools", @@ -75,9 +79,11 @@ impl SlashCommand { | SlashCommand::Compact | SlashCommand::Undo | SlashCommand::Model + | SlashCommand::PlanModel | SlashCommand::Approvals | SlashCommand::Experimental | SlashCommand::Review + | SlashCommand::Plan | SlashCommand::Logout => false, SlashCommand::Diff | SlashCommand::Mention diff --git a/codex-rs/tui/src/snapshots/codex_tui__diff_render__tests__single_line_replacement_counts.snap b/codex-rs/tui/src/snapshots/codex_tui__diff_render__tests__single_line_replacement_counts.snap index d4a7bd8bf37..40942a71b38 100644 --- a/codex-rs/tui/src/snapshots/codex_tui__diff_render__tests__single_line_replacement_counts.snap +++ b/codex-rs/tui/src/snapshots/codex_tui__diff_render__tests__single_line_replacement_counts.snap @@ -4,8 +4,8 @@ assertion_line: 765 expression: terminal.backend() --- "• Proposed Change README.md (+1 -1) " -" 1 -# Codex CLI (Rust Implementation) " -" 1 +# Codex CLI (Rust Implementation) banana " +" 1 -# Codexel (Rust Implementation) " +" 1 +# Codexel (Rust Implementation) banana " " " " " " " diff --git a/codex-rs/tui/src/snapshots/codex_tui__history_cell__tests__web_search_history_cell_snapshot.snap b/codex-rs/tui/src/snapshots/codex_tui__history_cell__tests__web_search_history_cell_snapshot.snap new file mode 100644 index 00000000000..e119420f18c --- /dev/null +++ b/codex-rs/tui/src/snapshots/codex_tui__history_cell__tests__web_search_history_cell_snapshot.snap @@ -0,0 +1,6 @@ +--- +source: tui/src/history_cell.rs +expression: rendered +--- +• Searched example search query with several generic words to + exercise wrapping diff --git a/codex-rs/tui/src/snapshots/codex_tui__history_cell__tests__web_search_history_cell_transcript_snapshot.snap b/codex-rs/tui/src/snapshots/codex_tui__history_cell__tests__web_search_history_cell_transcript_snapshot.snap new file mode 100644 index 00000000000..e119420f18c --- /dev/null +++ b/codex-rs/tui/src/snapshots/codex_tui__history_cell__tests__web_search_history_cell_transcript_snapshot.snap @@ -0,0 +1,6 @@ +--- +source: tui/src/history_cell.rs +expression: rendered +--- +• Searched example search query with several generic words to + exercise wrapping diff --git a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap index 702c9140fd1..313e80cbb7d 100644 --- a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap +++ b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt.snap @@ -3,12 +3,10 @@ source: tui/src/model_migration.rs expression: terminal.backend() --- -> Try gpt-5.1-codex-max +> Codex just got an upgrade. Introducing gpt-5.1-codex-max. - We recommend switching from gpt-5.1-codex-mini to - gpt-5.1-codex-max. - - Codex-optimized flagship for deep and fast reasoning. + Upgrade to gpt-5.2-codex for the latest and greatest + agentic coding model. You can continue using gpt-5.1-codex-mini if you prefer. diff --git a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt_gpt5_codex.snap b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt_gpt5_codex.snap index d71154c5529..391eda72e80 100644 --- a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt_gpt5_codex.snap +++ b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt_gpt5_codex.snap @@ -3,7 +3,7 @@ source: tui/src/model_migration.rs expression: terminal.backend() --- -> Try gpt-5.1-codex-max +> Codex just got an upgrade. Introducing gpt-5.1-codex-max. We recommend switching from gpt-5-codex to gpt-5.1-codex-max. diff --git a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt_gpt5_codex_mini.snap b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt_gpt5_codex_mini.snap index 489bf7c97dd..67cc2603e25 100644 --- a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt_gpt5_codex_mini.snap +++ b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt_gpt5_codex_mini.snap @@ -3,7 +3,7 @@ source: tui/src/model_migration.rs expression: terminal.backend() --- -> Try gpt-5.1-codex-mini +> Codex just got an upgrade. Introducing gpt-5.1-codex-mini. We recommend switching from gpt-5-codex-mini to gpt-5.1-codex-mini. diff --git a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt_gpt5_family.snap b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt_gpt5_family.snap index 5713c656bbd..47ba59aa774 100644 --- a/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt_gpt5_family.snap +++ b/codex-rs/tui/src/snapshots/codex_tui__model_migration__tests__model_migration_prompt_gpt5_family.snap @@ -3,7 +3,7 @@ source: tui/src/model_migration.rs expression: terminal.backend() --- -> Try gpt-5.1 +> Codex just got an upgrade. Introducing gpt-5.1. We recommend switching from gpt-5 to gpt-5.1. diff --git a/codex-rs/tui/src/snapshots/codex_tui__update_prompt__tests__update_prompt_modal.snap b/codex-rs/tui/src/snapshots/codex_tui__update_prompt__tests__update_prompt_modal.snap index 24d8831c956..0d0182552a6 100644 --- a/codex-rs/tui/src/snapshots/codex_tui__update_prompt__tests__update_prompt_modal.snap +++ b/codex-rs/tui/src/snapshots/codex_tui__update_prompt__tests__update_prompt_modal.snap @@ -4,9 +4,9 @@ expression: terminal.backend() --- ✨ Update available! 0.0.0 -> 9.9.9 - Release notes: https://github.com/openai/codex/releases/latest + Release notes: https://github.com/Ixe1/codexel/releases/latest -› 1. Update now (runs `npm install -g @openai/codex@latest`) +› 1. Update now (runs `brew upgrade --cask codexel`) 2. Skip 3. Skip until next version diff --git a/codex-rs/tui/src/status/card.rs b/codex-rs/tui/src/status/card.rs index aac981c764e..2ba3bbdd7a1 100644 --- a/codex-rs/tui/src/status/card.rs +++ b/codex-rs/tui/src/status/card.rs @@ -300,7 +300,7 @@ impl HistoryCell for StatusHistoryCell { let mut lines: Vec> = Vec::new(); lines.push(Line::from(vec![ Span::from(format!("{}>_ ", FieldFormatter::INDENT)).dim(), - Span::from("OpenAI Codex").bold(), + Span::from("Codexel").bold(), Span::from(" ").dim(), Span::from(format!("(v{CODEX_CLI_VERSION})")).dim(), ])); @@ -319,7 +319,7 @@ impl HistoryCell for StatusHistoryCell { (None, None) => "ChatGPT".to_string(), }, StatusAccountDisplay::ApiKey => { - "API key configured (run codex login to use ChatGPT)".to_string() + "API key configured (run codexel login to use ChatGPT)".to_string() } }); diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap index dbb634bab1c..5867d2870ef 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭─────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_credits_and_limits.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_credits_and_limits.snap index 1707a4c5fbc..5165aa956c7 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_credits_and_limits.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_credits_and_limits.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap index 3ecc4fa8ed2..4a62384064f 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭────────────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap index c22577407ee..e1d1c433160 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap index f0e6b734454..0197032c388 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap index f0e6b734454..0197032c388 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap index a12be950bcc..2c61ece7e73 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap index 02ba1adec91..0c33b26fec8 100644 --- a/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap +++ b/codex-rs/tui/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui/src/status_indicator_widget.rs b/codex-rs/tui/src/status_indicator_widget.rs index 642b9ca2b71..f391b267ac9 100644 --- a/codex-rs/tui/src/status_indicator_widget.rs +++ b/codex-rs/tui/src/status_indicator_widget.rs @@ -23,6 +23,7 @@ use crate::tui::FrameRequester; pub(crate) struct StatusIndicatorWidget { /// Animated header text (defaults to "Working"). header: String, + detail_lines: Vec>, show_interrupt_hint: bool, elapsed_running: Duration, @@ -58,6 +59,7 @@ impl StatusIndicatorWidget { ) -> Self { Self { header: String::from("Working"), + detail_lines: Vec::new(), show_interrupt_hint: true, elapsed_running: Duration::ZERO, last_resume_at: Instant::now(), @@ -78,6 +80,14 @@ impl StatusIndicatorWidget { self.header = header; } + pub(crate) fn set_detail_lines(&mut self, lines: Vec>) { + self.detail_lines = lines; + } + + pub(crate) fn clear_detail_lines(&mut self) { + self.detail_lines.clear(); + } + #[cfg(test)] pub(crate) fn header(&self) -> &str { &self.header @@ -136,7 +146,7 @@ impl StatusIndicatorWidget { impl Renderable for StatusIndicatorWidget { fn desired_height(&self, _width: u16) -> u16 { - 1 + 1u16.saturating_add(self.detail_lines.len().try_into().unwrap_or(u16::MAX)) } fn render(&self, area: Rect, buf: &mut Buffer) { @@ -170,7 +180,23 @@ impl Renderable for StatusIndicatorWidget { spans.push(format!("({pretty_elapsed})").dim()); } - Line::from(spans).render_ref(area, buf); + let mut row = area; + row.height = 1; + Line::from(spans).render_ref(row, buf); + + for (idx, line) in self.detail_lines.iter().enumerate() { + let y = area.y.saturating_add((idx as u16).saturating_add(1)); + if y >= area.y.saturating_add(area.height) { + break; + } + let detail_area = Rect { + x: area.x, + y, + width: area.width, + height: 1, + }; + line.render_ref(detail_area, buf); + } } } diff --git a/codex-rs/tui/src/update_action.rs b/codex-rs/tui/src/update_action.rs index b5cf56a6b48..9f22f3578ab 100644 --- a/codex-rs/tui/src/update_action.rs +++ b/codex-rs/tui/src/update_action.rs @@ -1,11 +1,16 @@ +#[cfg(not(debug_assertions))] +const CODEX_MANAGED_BY_NPM_ENV_VAR: &str = "CODEX_MANAGED_BY_NPM"; +#[cfg(not(debug_assertions))] +const CODEX_MANAGED_BY_BUN_ENV_VAR: &str = "CODEX_MANAGED_BY_BUN"; + /// Update action the CLI should perform after the TUI exits. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UpdateAction { - /// Update via `npm install -g @openai/codex@latest`. - NpmGlobalLatest, - /// Update via `bun install -g @openai/codex@latest`. - BunGlobalLatest, - /// Update via `brew upgrade codex`. + /// Update via `npm install -g @ixe1/codexel@latest`. + NpmUpgrade, + /// Update via `bun install -g @ixe1/codexel@latest`. + BunUpgrade, + /// Update via `brew upgrade --cask codexel`. BrewUpgrade, } @@ -13,9 +18,9 @@ impl UpdateAction { /// Returns the list of command-line arguments for invoking the update. pub fn command_args(self) -> (&'static str, &'static [&'static str]) { match self { - UpdateAction::NpmGlobalLatest => ("npm", &["install", "-g", "@openai/codex"]), - UpdateAction::BunGlobalLatest => ("bun", &["install", "-g", "@openai/codex"]), - UpdateAction::BrewUpgrade => ("brew", &["upgrade", "codex"]), + UpdateAction::NpmUpgrade => ("npm", &["install", "-g", "@ixe1/codexel@latest"]), + UpdateAction::BunUpgrade => ("bun", &["install", "-g", "@ixe1/codexel@latest"]), + UpdateAction::BrewUpgrade => ("brew", &["upgrade", "--cask", "codexel"]), } } @@ -30,29 +35,20 @@ impl UpdateAction { #[cfg(not(debug_assertions))] pub(crate) fn get_update_action() -> Option { let exe = std::env::current_exe().unwrap_or_default(); - let managed_by_npm = std::env::var_os("CODEX_MANAGED_BY_NPM").is_some(); - let managed_by_bun = std::env::var_os("CODEX_MANAGED_BY_BUN").is_some(); - detect_update_action( - cfg!(target_os = "macos"), - &exe, - managed_by_npm, - managed_by_bun, - ) + detect_update_action(cfg!(target_os = "macos"), &exe, ManagedBy::from_env()) } #[cfg(any(not(debug_assertions), test))] fn detect_update_action( is_macos: bool, current_exe: &std::path::Path, - managed_by_npm: bool, - managed_by_bun: bool, + managed_by: Option, ) -> Option { - if managed_by_npm { - Some(UpdateAction::NpmGlobalLatest) - } else if managed_by_bun { - Some(UpdateAction::BunGlobalLatest) - } else if is_macos + if let Some(managed_by) = managed_by { + return Some(managed_by.to_update_action()); + } + if is_macos && (current_exe.starts_with("/opt/homebrew") || current_exe.starts_with("/usr/local")) { Some(UpdateAction::BrewUpgrade) @@ -61,6 +57,34 @@ fn detect_update_action( } } +#[cfg(any(not(debug_assertions), test))] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum ManagedBy { + Npm, + Bun, +} + +#[cfg(any(not(debug_assertions), test))] +impl ManagedBy { + #[cfg(not(debug_assertions))] + fn from_env() -> Option { + if std::env::var_os(CODEX_MANAGED_BY_BUN_ENV_VAR).is_some() { + return Some(Self::Bun); + } + if std::env::var_os(CODEX_MANAGED_BY_NPM_ENV_VAR).is_some() { + return Some(Self::Npm); + } + None + } + + fn to_update_action(self) -> UpdateAction { + match self { + ManagedBy::Npm => UpdateAction::NpmUpgrade, + ManagedBy::Bun => UpdateAction::BunUpgrade, + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -68,34 +92,48 @@ mod tests { #[test] fn detects_update_action_without_env_mutation() { assert_eq!( - detect_update_action(false, std::path::Path::new("/any/path"), false, false), + detect_update_action(false, std::path::Path::new("/any/path"), None), None ); assert_eq!( - detect_update_action(false, std::path::Path::new("/any/path"), true, false), - Some(UpdateAction::NpmGlobalLatest) + detect_update_action( + true, + std::path::Path::new("/opt/homebrew/bin/codexel"), + None + ), + Some(UpdateAction::BrewUpgrade) ); assert_eq!( - detect_update_action(false, std::path::Path::new("/any/path"), false, true), - Some(UpdateAction::BunGlobalLatest) + detect_update_action(true, std::path::Path::new("/usr/local/bin/codexel"), None), + Some(UpdateAction::BrewUpgrade) + ); + } + + #[test] + fn detects_update_action_from_package_manager() { + assert_eq!( + detect_update_action( + false, + std::path::Path::new("/any/path"), + Some(ManagedBy::Npm) + ), + Some(UpdateAction::NpmUpgrade) ); assert_eq!( detect_update_action( - true, - std::path::Path::new("/opt/homebrew/bin/codex"), false, - false + std::path::Path::new("/any/path"), + Some(ManagedBy::Bun) ), - Some(UpdateAction::BrewUpgrade) + Some(UpdateAction::BunUpgrade) ); assert_eq!( detect_update_action( true, - std::path::Path::new("/usr/local/bin/codex"), - false, - false + std::path::Path::new("/opt/homebrew/bin/codexel"), + Some(ManagedBy::Npm) ), - Some(UpdateAction::BrewUpgrade) + Some(UpdateAction::NpmUpgrade) ); } } diff --git a/codex-rs/tui/src/update_prompt.rs b/codex-rs/tui/src/update_prompt.rs index 43ee0dbd400..600e03191f8 100644 --- a/codex-rs/tui/src/update_prompt.rs +++ b/codex-rs/tui/src/update_prompt.rs @@ -204,7 +204,7 @@ impl WidgetRef for &UpdatePromptScreen { column.push( Line::from(vec![ "Release notes: ".dim(), - "https://github.com/openai/codex/releases/latest" + "https://github.com/Ixe1/codexel/releases/latest" .dim() .underlined(), ]) @@ -253,7 +253,7 @@ mod tests { UpdatePromptScreen::new( FrameRequester::test_dummy(), "9.9.9".into(), - UpdateAction::NpmGlobalLatest, + UpdateAction::BrewUpgrade, ) } diff --git a/codex-rs/tui/src/updates.rs b/codex-rs/tui/src/updates.rs index 89fd6f32f63..361b2cc024b 100644 --- a/codex-rs/tui/src/updates.rs +++ b/codex-rs/tui/src/updates.rs @@ -57,8 +57,8 @@ struct VersionInfo { const VERSION_FILENAME: &str = "version.json"; // We use the latest version from the cask if installation is via homebrew - homebrew does not immediately pick up the latest release and can lag behind. const HOMEBREW_CASK_URL: &str = - "https://raw.githubusercontent.com/Homebrew/homebrew-cask/HEAD/Casks/c/codex.rb"; -const LATEST_RELEASE_URL: &str = "https://api.github.com/repos/openai/codex/releases/latest"; + "https://raw.githubusercontent.com/Homebrew/homebrew-cask/HEAD/Casks/c/codexel.rb"; +const LATEST_RELEASE_URL: &str = "https://api.github.com/repos/Ixe1/codexel/releases/latest"; #[derive(Deserialize, Debug, Clone)] struct ReleaseInfo { @@ -192,7 +192,7 @@ mod tests { #[test] fn parses_version_from_cask_contents() { let cask = r#" - cask "codex" do + cask "codexel" do version "0.55.0" end "#; diff --git a/codex-rs/tui/src/version.rs b/codex-rs/tui/src/version.rs index 8c8d108dc61..bdbfedf1794 100644 --- a/codex-rs/tui/src/version.rs +++ b/codex-rs/tui/src/version.rs @@ -1,2 +1,2 @@ -/// The current Codex CLI version as embedded at compile time. +/// The current Codexel version as embedded at compile time. pub const CODEX_CLI_VERSION: &str = env!("CARGO_PKG_VERSION"); diff --git a/codex-rs/tui/tooltips.txt b/codex-rs/tui/tooltips.txt index b15bcdbd12c..94ad487bbb0 100644 --- a/codex-rs/tui/tooltips.txt +++ b/codex-rs/tui/tooltips.txt @@ -1,11 +1,11 @@ Use /compact when the conversation gets long to summarize history and free up context. Start a fresh idea with /new; the previous session stays in history. -If a turn went sideways, /undo asks Codex to revert the last changes. +If a turn went sideways, /undo asks Codexel to revert the last changes. Use /feedback to send logs to the maintainers when something looks off. Switch models or reasoning effort quickly with /model. -You can run any shell command from Codex using `!` (e.g. `!ls`) +You can run any shell command from Codexel using `!` (e.g. `!ls`) Type / to open the command popup; Tab autocompletes slash commands and saved prompts. You can define your own `/` commands with custom prompts. More info: https://developers.openai.com/codex/guides/slash-commands#create-your-own-slash-commands-with-custom-prompts When the composer is empty, press Esc to step back and edit your last message; Enter confirms. Paste an image with Ctrl+V to attach it to your next message. -You can resume a previous conversation by running `codex resume` +You can resume a previous conversation by running `codexel resume` diff --git a/codex-rs/tui2/src/app.rs b/codex-rs/tui2/src/app.rs index 1275c1a339f..0b435efe09e 100644 --- a/codex-rs/tui2/src/app.rs +++ b/codex-rs/tui2/src/app.rs @@ -108,7 +108,7 @@ fn session_summary( let usage_line = FinalOutput::from(token_usage).to_string(); let resume_command = - conversation_id.map(|conversation_id| format!("codex resume {conversation_id}")); + conversation_id.map(|conversation_id| format!("codexel resume {conversation_id}")); Some(SessionSummary { usage_line, resume_command, @@ -214,7 +214,7 @@ async fn handle_model_migration_prompt_if_needed( id: target_model, reasoning_effort_mapping, migration_config_key, - model_link: _, + .. }) = upgrade { if migration_prompt_hidden(config, migration_config_key.as_str()) { @@ -1605,11 +1605,19 @@ impl App { self.chat_widget.set_model(&model, model_family); self.current_model = model; } - AppEvent::OpenReasoningPopup { model } => { - self.chat_widget.open_reasoning_popup(model); + AppEvent::UpdatePlanModel(model) => { + self.config.plan_model = Some(model.clone()); + self.chat_widget.set_plan_model(&model); } - AppEvent::OpenAllModelsPopup { models } => { - self.chat_widget.open_all_models_popup(models); + AppEvent::UpdatePlanReasoningEffort(effort) => { + self.config.plan_model_reasoning_effort = effort; + self.chat_widget.set_plan_reasoning_effort(effort); + } + AppEvent::OpenReasoningPopup { model, target } => { + self.chat_widget.open_reasoning_popup(target, model); + } + AppEvent::OpenAllModelsPopup { models, target } => { + self.chat_widget.open_all_models_popup(target, models); } AppEvent::OpenFullAccessConfirmation { preset } => { self.chat_widget.open_full_access_confirmation(preset); @@ -1671,7 +1679,9 @@ impl App { approval_policy: Some(preset.approval), sandbox_policy: Some(preset.sandbox.clone()), model: None, + plan_model: None, effort: None, + plan_effort: None, summary: None, }, )); @@ -1738,6 +1748,45 @@ impl App { } } } + AppEvent::PersistPlanModelSelection { model, effort } => { + let profile = self.active_profile.as_deref(); + match ConfigEditsBuilder::new(&self.config.codex_home) + .with_profile(profile) + .set_plan_model(Some(model.as_str()), effort) + .apply() + .await + { + Ok(()) => { + let mut message = format!("Plan model changed to {model}"); + if let Some(label) = Self::reasoning_label_for(&model, effort) { + message.push(' '); + message.push_str(label); + } + message.push_str(" (used for /plan)"); + if let Some(profile) = profile { + message.push_str(" for "); + message.push_str(profile); + message.push_str(" profile"); + } + self.chat_widget.add_info_message(message, None); + } + Err(err) => { + tracing::error!( + error = %err, + "failed to persist plan model selection" + ); + if let Some(profile) = profile { + self.chat_widget.add_error_message(format!( + "Failed to save plan model for profile `{profile}`: {err}" + )); + } else { + self.chat_widget.add_error_message(format!( + "Failed to save default plan model: {err}" + )); + } + } + } + } AppEvent::UpdateAskForApprovalPolicy(policy) => { self.chat_widget.set_approval_policy(policy); } @@ -2500,7 +2549,7 @@ mod tests { ); assert_eq!( summary.resume_command, - Some("codex resume 123e4567-e89b-12d3-a456-426614174000".to_string()) + Some("codexel resume 123e4567-e89b-12d3-a456-426614174000".to_string()) ); } } diff --git a/codex-rs/tui2/src/app_event.rs b/codex-rs/tui2/src/app_event.rs index adb9c1308e8..374b1e2c37d 100644 --- a/codex-rs/tui2/src/app_event.rs +++ b/codex-rs/tui2/src/app_event.rs @@ -14,6 +14,12 @@ use codex_core::protocol::AskForApproval; use codex_core::protocol::SandboxPolicy; use codex_protocol::openai_models::ReasoningEffort; +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub(crate) enum ModelPickerTarget { + Chat, + Plan, +} + #[allow(clippy::large_enum_variant)] #[derive(Debug)] pub(crate) enum AppEvent { @@ -63,20 +69,34 @@ pub(crate) enum AppEvent { /// Update the current model slug in the running app and widget. UpdateModel(String), + /// Update the current plan model slug in the running app and widget. + UpdatePlanModel(String), + + /// Update the current plan reasoning effort in the running app and widget. + UpdatePlanReasoningEffort(Option), + /// Persist the selected model and reasoning effort to the appropriate config. PersistModelSelection { model: String, effort: Option, }, + /// Persist the selected plan model and reasoning effort to the appropriate config. + PersistPlanModelSelection { + model: String, + effort: Option, + }, + /// Open the reasoning selection popup after picking a model. OpenReasoningPopup { model: ModelPreset, + target: ModelPickerTarget, }, /// Open the full model picker (non-auto models). OpenAllModelsPopup { models: Vec, + target: ModelPickerTarget, }, /// Open the confirmation prompt before enabling full access mode. diff --git a/codex-rs/tui2/src/bottom_pane/ask_user_question_overlay.rs b/codex-rs/tui2/src/bottom_pane/ask_user_question_overlay.rs new file mode 100644 index 00000000000..59c69890ee8 --- /dev/null +++ b/codex-rs/tui2/src/bottom_pane/ask_user_question_overlay.rs @@ -0,0 +1,1002 @@ +use std::cell::RefCell; +use std::collections::HashMap; + +use crossterm::event::KeyCode; +use crossterm::event::KeyEvent; +use crossterm::event::KeyModifiers; +use ratatui::buffer::Buffer; +use ratatui::layout::Constraint; +use ratatui::layout::Layout; +use ratatui::layout::Rect; +use ratatui::style::Stylize; +use ratatui::text::Line; +use ratatui::text::Span; +use ratatui::widgets::Block; +use ratatui::widgets::Clear; +use ratatui::widgets::Paragraph; +use ratatui::widgets::StatefulWidgetRef; +use ratatui::widgets::Widget; +use textwrap::wrap; + +use codex_core::protocol::AskUserQuestion; +use codex_core::protocol::AskUserQuestionRequestEvent; +use codex_core::protocol::AskUserQuestionResponse; +use codex_core::protocol::Op; + +use crate::app_event::AppEvent; +use crate::app_event_sender::AppEventSender; +use crate::key_hint; +use crate::render::Insets; +use crate::render::RectExt as _; +use crate::style::user_message_style; + +use super::CancellationEvent; +use super::bottom_pane_view::BottomPaneView; +use super::popup_consts::MAX_POPUP_ROWS; +use super::scroll_state::ScrollState; +use super::selection_popup_common::GenericDisplayRow; +use super::selection_popup_common::measure_rows_height; +use super::selection_popup_common::render_rows; +use super::textarea::TextArea; +use super::textarea::TextAreaState; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum Mode { + Select, + OtherInput, + Review, +} + +fn normalize_choice_label(label: &str) -> String { + let trimmed = label.trim_start(); + + let mut chars = trimmed.char_indices().peekable(); + let mut saw_digit = false; + let mut after_digits = 0usize; + while let Some((idx, ch)) = chars.peek().copied() + && ch.is_ascii_digit() + { + saw_digit = true; + chars.next(); + after_digits = idx + ch.len_utf8(); + } + + if !saw_digit { + return trimmed.to_string(); + } + + // Only strip numeric prefixes when they look like enumeration: "1) Foo", "2. Bar", "3: Baz". + let Some((idx, ch)) = chars.peek().copied() else { + return trimmed.to_string(); + }; + if !matches!(ch, ')' | '.' | ':') { + return trimmed.to_string(); + } + + chars.next(); + let mut end = idx + ch.len_utf8(); + + while let Some((idx, ch)) = chars.peek().copied() + && ch.is_whitespace() + { + chars.next(); + end = idx + ch.len_utf8(); + } + + if end <= after_digits { + return trimmed.to_string(); + } + + let rest = trimmed[end..].trim_start(); + if rest.is_empty() { + trimmed.to_string() + } else { + rest.to_string() + } +} + +#[derive(Debug, Clone)] +struct AnswerDraft { + selected: Vec, + other_text: String, +} + +impl AnswerDraft { + fn new(q: &AskUserQuestion) -> Self { + Self { + selected: vec![false; q.options.len() + 1], // + Other + other_text: String::new(), + } + } + + fn any_selected(&self) -> bool { + self.selected.iter().any(|s| *s) + } + + fn other_selected(&self) -> bool { + self.selected.last().copied().unwrap_or(false) + } + + fn trimmed_other_text(&self) -> String { + self.other_text.trim().to_string() + } + + fn to_answer_string(&self, q: &AskUserQuestion) -> Option { + if !self.any_selected() { + return None; + } + + if self.other_selected() && self.trimmed_other_text().is_empty() { + return None; + } + + if q.multi_select { + let mut parts = Vec::new(); + for (idx, selected) in self.selected.iter().enumerate() { + if !*selected { + continue; + } + if idx == q.options.len() { + parts.push(self.trimmed_other_text()); + } else if let Some(opt) = q.options.get(idx) { + parts.push(normalize_choice_label(opt.label.as_str())); + } + } + Some(parts.join(", ")) + } else { + let (idx, _) = self.selected.iter().enumerate().find(|(_, s)| **s)?; + + if idx == q.options.len() { + let other = self.trimmed_other_text(); + if other.is_empty() { None } else { Some(other) } + } else { + q.options + .get(idx) + .map(|o| normalize_choice_label(o.label.as_str())) + } + } + } +} + +pub(crate) struct AskUserQuestionOverlay { + id: String, + questions: Vec, + current_idx: usize, + drafts: Vec, + + mode: Mode, + state: ScrollState, + multi_select: bool, + selected: Vec, + textarea: TextArea, + textarea_state: RefCell, + error: Option, + + return_to_review: bool, + + app_event_tx: AppEventSender, + complete: bool, +} + +impl AskUserQuestionOverlay { + pub(crate) fn new( + id: String, + ev: AskUserQuestionRequestEvent, + app_event_tx: AppEventSender, + ) -> Self { + let drafts = ev.questions.iter().map(AnswerDraft::new).collect(); + let mut overlay = Self { + id, + questions: ev.questions, + current_idx: 0, + drafts, + mode: Mode::Select, + state: ScrollState::new(), + multi_select: false, + selected: Vec::new(), + textarea: TextArea::new(), + textarea_state: RefCell::new(TextAreaState::default()), + error: None, + return_to_review: false, + app_event_tx, + complete: false, + }; + overlay.reset_for_current_question(); + overlay + } + + fn current_question(&self) -> Option<&AskUserQuestion> { + self.questions.get(self.current_idx) + } + + fn reset_for_current_question(&mut self) { + self.mode = Mode::Select; + self.error = None; + self.state.reset(); + self.textarea_state.replace(TextAreaState::default()); + + let Some(q) = self.current_question().cloned() else { + self.multi_select = false; + self.selected.clear(); + self.state.selected_idx = None; + self.textarea.set_text(""); + return; + }; + + self.multi_select = q.multi_select; + + let expected_len = q.options.len() + 1; + if let Some(draft) = self.drafts.get_mut(self.current_idx) + && draft.selected.len() != expected_len + { + *draft = AnswerDraft::new(&q); + } + + let draft = self + .drafts + .get(self.current_idx) + .cloned() + .unwrap_or_else(|| AnswerDraft::new(&q)); + + self.selected = draft.selected; + self.textarea.set_text(draft.other_text.as_str()); + self.state.selected_idx = Some(0); + } + + fn save_current_draft(&mut self) { + let Some(q) = self.current_question() else { + return; + }; + + let expected_len = q.options.len() + 1; + if self.selected.len() != expected_len { + self.selected = vec![false; expected_len]; + } + + if let Some(draft) = self.drafts.get_mut(self.current_idx) { + draft.selected.clone_from(&self.selected); + draft.other_text = self.textarea.text().to_string(); + } + } + + fn options_len(&self) -> usize { + self.current_question() + .map(|q| q.options.len() + 1) + .unwrap_or(0) + } + + fn is_other_idx(&self, idx: usize) -> bool { + self.current_question() + .map(|q| idx == q.options.len()) + .unwrap_or(false) + } + + fn move_up(&mut self) { + let len = self.rows_len(); + self.state.move_up_wrap(len); + self.state.ensure_visible(len, self.max_visible_rows()); + } + + fn move_down(&mut self) { + let len = self.rows_len(); + self.state.move_down_wrap(len); + self.state.ensure_visible(len, self.max_visible_rows()); + } + + fn max_visible_rows(&self) -> usize { + MAX_POPUP_ROWS.min(self.rows_len().max(1)) + } + + fn rows_len(&self) -> usize { + match self.mode { + Mode::Review => self.questions.len().saturating_add(2), // Submit, Cancel + Mode::Select | Mode::OtherInput => self.options_len(), + } + } + + fn toggle_current(&mut self) { + let Some(idx) = self.state.selected_idx else { + return; + }; + if let Some(flag) = self.selected.get_mut(idx) { + *flag = !*flag; + } + self.error = None; + } + + fn select_single(&mut self) { + let Some(idx) = self.state.selected_idx else { + return; + }; + self.selected.iter_mut().for_each(|s| *s = false); + if let Some(flag) = self.selected.get_mut(idx) { + *flag = true; + } + self.error = None; + } + + fn any_selected(&self) -> bool { + self.selected.iter().any(|s| *s) + } + + fn other_selected(&self) -> bool { + let Some(q) = self.current_question() else { + return false; + }; + self.selected.get(q.options.len()).copied().unwrap_or(false) + } + + fn other_text(&self) -> String { + self.textarea.text().trim().to_string() + } + + fn confirm_selection(&mut self) { + let Some(_) = self.current_question() else { + self.finish_answered(HashMap::new()); + return; + }; + + if self.multi_select { + if !self.any_selected() { + self.error = Some("Select at least one option.".to_string()); + return; + } + if self.other_selected() && self.other_text().is_empty() { + self.mode = Mode::OtherInput; + self.error = None; + return; + } + self.save_current_draft(); + self.advance_or_review(); + } else { + let Some((idx, _)) = self.selected.iter().enumerate().find(|(_, s)| **s) else { + self.error = Some("Select an option.".to_string()); + return; + }; + if self.is_other_idx(idx) { + if self.other_text().is_empty() { + self.mode = Mode::OtherInput; + self.error = None; + return; + } + self.save_current_draft(); + self.advance_or_review(); + return; + } + self.save_current_draft(); + self.advance_or_review(); + } + } + + fn accept_other_input(&mut self) { + if self.other_text().is_empty() { + self.error = Some("Other response cannot be empty.".to_string()); + return; + } + self.mode = Mode::Select; + self.confirm_selection(); + } + + fn advance_or_review(&mut self) { + if self.return_to_review || self.current_idx + 1 >= self.questions.len() { + self.enter_review(); + return; + } + + self.current_idx += 1; + self.reset_for_current_question(); + } + + fn enter_review(&mut self) { + self.save_current_draft(); + self.mode = Mode::Review; + self.error = None; + self.state.reset(); + self.state.selected_idx = Some(0); + self.return_to_review = true; + } + + fn submit_from_review(&mut self) { + let mut answers: HashMap = HashMap::new(); + for (idx, q) in self.questions.iter().enumerate() { + let Some(draft) = self.drafts.get(idx) else { + self.go_to_question( + idx, + Some("Please answer this question to submit.".to_string()), + ); + return; + }; + let Some(answer) = draft.to_answer_string(q) else { + self.go_to_question( + idx, + Some("Please answer this question to submit.".to_string()), + ); + return; + }; + answers.insert(q.header.clone(), answer); + } + + self.finish_answered(answers); + } + + fn go_to_question(&mut self, idx: usize, error: Option) { + if matches!(self.mode, Mode::Select | Mode::OtherInput) { + self.save_current_draft(); + } + self.current_idx = idx.min(self.questions.len().saturating_sub(1)); + self.reset_for_current_question(); + self.error = error; + } + + fn go_to_previous_question(&mut self) { + if self.current_idx == 0 { + return; + } + self.save_current_draft(); + self.current_idx -= 1; + self.reset_for_current_question(); + } + + fn finish_answered(&mut self, answers: HashMap) { + let response = AskUserQuestionResponse::Answered { answers }; + self.app_event_tx + .send(AppEvent::CodexOp(Op::ResolveAskUserQuestion { + id: self.id.clone(), + response, + })); + self.complete = true; + } + + fn finish_cancelled(&mut self) { + self.app_event_tx + .send(AppEvent::CodexOp(Op::ResolveAskUserQuestion { + id: self.id.clone(), + response: AskUserQuestionResponse::Cancelled, + })); + self.complete = true; + } + + fn build_rows(&self) -> Vec { + if self.mode == Mode::Review { + return self.build_review_rows(); + } + + let Some(q) = self.current_question() else { + return Vec::new(); + }; + + let mut rows = Vec::with_capacity(q.options.len() + 1); + for (idx, opt) in q.options.iter().enumerate() { + rows.push(GenericDisplayRow { + name: self.row_name(idx, opt.label.as_str()), + display_shortcut: None, + match_indices: None, + description: Some(opt.description.clone()), + wrap_indent: None, + }); + } + rows.push(GenericDisplayRow { + name: self.row_name(q.options.len(), "Other"), + display_shortcut: None, + match_indices: None, + description: Some("Provide custom text input.".to_string()), + wrap_indent: None, + }); + rows + } + + fn build_review_rows(&self) -> Vec { + let mut rows = Vec::with_capacity(self.questions.len() + 2); + for (idx, q) in self.questions.iter().enumerate() { + let answer = self + .drafts + .get(idx) + .and_then(|d| d.to_answer_string(q)) + .unwrap_or_else(|| "Unanswered".to_string()); + + rows.push(GenericDisplayRow { + name: format!("{}. {}", idx + 1, q.header), + display_shortcut: None, + match_indices: None, + description: Some(answer), + wrap_indent: Some(4), + }); + } + + rows.push(GenericDisplayRow { + name: "Submit".to_string(), + display_shortcut: None, + match_indices: None, + description: Some("Send answers.".to_string()), + wrap_indent: Some(4), + }); + + rows.push(GenericDisplayRow { + name: "Cancel".to_string(), + display_shortcut: None, + match_indices: None, + description: Some("Cancel without sending.".to_string()), + wrap_indent: Some(4), + }); + + rows + } + + fn row_name(&self, idx: usize, label: &str) -> String { + let n = idx + 1; + let label = normalize_choice_label(label); + if self.multi_select { + let checked = self.selected.get(idx).copied().unwrap_or(false); + let box_mark = if checked { "[x]" } else { "[ ]" }; + format!("{n}. {box_mark} {label}") + } else { + format!("{n}. {label}") + } + } + + fn footer_hint(&self) -> Line<'static> { + match self.mode { + Mode::Select => { + if self.multi_select { + Line::from(vec![ + "Space".into(), + " toggle, ".into(), + key_hint::plain(KeyCode::Enter).into(), + " next, ".into(), + key_hint::plain(KeyCode::BackTab).into(), + " back, ".into(), + key_hint::plain(KeyCode::Esc).into(), + " cancel".into(), + ]) + } else { + Line::from(vec![ + key_hint::plain(KeyCode::Enter).into(), + " choose, ".into(), + key_hint::plain(KeyCode::BackTab).into(), + " back, ".into(), + key_hint::plain(KeyCode::Esc).into(), + " cancel".into(), + ]) + } + } + Mode::OtherInput => Line::from(vec![ + key_hint::plain(KeyCode::Enter).into(), + " submit, ".into(), + key_hint::ctrl(KeyCode::Char('b')).into(), + " back, ".into(), + key_hint::plain(KeyCode::Esc).into(), + " cancel".into(), + ]), + Mode::Review => Line::from(vec![ + key_hint::plain(KeyCode::Enter).into(), + " edit/submit, ".into(), + key_hint::plain(KeyCode::Esc).into(), + " cancel".into(), + ]), + } + } + + fn header_lines(&self, width: u16) -> Vec> { + if self.mode == Mode::Review { + let progress = format!("Review ({})", self.questions.len()); + let mut lines = vec![Line::from(vec!["[".into(), progress.bold(), "]".into()])]; + lines.push(Line::from("Select a question to edit, then submit.")); + return lines; + } + + let Some(q) = self.current_question() else { + return vec![Line::from("No questions.".dim())]; + }; + + let usable_width = width.saturating_sub(4).max(1) as usize; + let progress = format!( + "{} ({}/{})", + q.header, + self.current_idx + 1, + self.questions.len() + ); + + let mut lines = vec![Line::from(vec!["[".into(), progress.bold(), "]".into()])]; + + for w in wrap(q.question.as_str(), usable_width) { + lines.push(Line::from(w.into_owned())); + } + + if let Some(err) = &self.error { + lines.push(Line::from(vec!["".into()])); + lines.push(Line::from(err.clone().red())); + } + + lines + } + + fn cursor_pos_for_other_input(&self, area: Rect) -> Option<(u16, u16)> { + if self.mode != Mode::OtherInput { + return None; + } + if area.height < 2 || area.width <= 2 { + return None; + } + let textarea_rect = self.textarea_rect(area); + let state = *self.textarea_state.borrow(); + self.textarea.cursor_pos_with_state(textarea_rect, state) + } + + fn textarea_rect(&self, area: Rect) -> Rect { + let inset = area.inset(Insets::vh(1, 2)); + Rect { + x: inset.x, + y: inset.y, + width: inset.width, + height: inset.height.clamp(1, 5), + } + } +} + +impl BottomPaneView for AskUserQuestionOverlay { + fn handle_key_event(&mut self, key_event: KeyEvent) { + match self.mode { + Mode::Select => match key_event { + KeyEvent { + code: KeyCode::Up, .. + } + | KeyEvent { + code: KeyCode::Char('p'), + modifiers: KeyModifiers::CONTROL, + .. + } + | KeyEvent { + code: KeyCode::Char('\u{0010}'), + modifiers: KeyModifiers::NONE, + .. + } /* ^P */ => self.move_up(), + KeyEvent { + code: KeyCode::Char('k'), + modifiers: KeyModifiers::NONE, + .. + } => self.move_up(), + KeyEvent { + code: KeyCode::Down, + .. + } + | KeyEvent { + code: KeyCode::Char('n'), + modifiers: KeyModifiers::CONTROL, + .. + } + | KeyEvent { + code: KeyCode::Char('\u{000e}'), + modifiers: KeyModifiers::NONE, + .. + } /* ^N */ => self.move_down(), + KeyEvent { + code: KeyCode::Char('j'), + modifiers: KeyModifiers::NONE, + .. + } => self.move_down(), + KeyEvent { + code: KeyCode::BackTab, + .. + } + | KeyEvent { + code: KeyCode::Left, + .. + } + | KeyEvent { + code: KeyCode::Char('h'), + modifiers: KeyModifiers::NONE, + .. + } + | KeyEvent { + code: KeyCode::Char('b'), + modifiers: KeyModifiers::CONTROL, + .. + } => self.go_to_previous_question(), + KeyEvent { + code: KeyCode::Esc, .. + } => { + self.on_ctrl_c(); + } + KeyEvent { + code: KeyCode::Char(' '), + modifiers: KeyModifiers::NONE, + .. + } if self.multi_select => { + self.toggle_current(); + } + KeyEvent { + code: KeyCode::Char(c), + modifiers, + .. + } if !modifiers.contains(KeyModifiers::CONTROL) + && !modifiers.contains(KeyModifiers::ALT) => + { + if let Some(idx) = c + .to_digit(10) + .map(|d| d as usize) + .and_then(|d| d.checked_sub(1)) + && idx < self.options_len() + { + self.state.selected_idx = Some(idx); + self.state.ensure_visible(self.options_len(), self.max_visible_rows()); + if self.multi_select { + self.toggle_current(); + } else { + self.select_single(); + self.confirm_selection(); + } + } + } + KeyEvent { + code: KeyCode::Enter, + modifiers: KeyModifiers::NONE, + .. + } => { + if self.multi_select { + self.confirm_selection(); + } else { + self.select_single(); + self.confirm_selection(); + } + } + _ => {} + }, + Mode::OtherInput => match key_event { + KeyEvent { + code: KeyCode::Esc, .. + } => { + self.on_ctrl_c(); + } + KeyEvent { + code: KeyCode::Char('b'), + modifiers: KeyModifiers::CONTROL, + .. + } => { + self.error = None; + self.mode = Mode::Select; + } + KeyEvent { + code: KeyCode::Enter, + modifiers: KeyModifiers::NONE, + .. + } => { + self.accept_other_input(); + } + KeyEvent { + code: KeyCode::Enter, + .. + } => { + self.textarea.input(key_event); + } + other => { + self.textarea.input(other); + } + }, + Mode::Review => match key_event { + KeyEvent { + code: KeyCode::Up, .. + } + | KeyEvent { + code: KeyCode::Char('p'), + modifiers: KeyModifiers::CONTROL, + .. + } => self.move_up(), + KeyEvent { + code: KeyCode::Char('k'), + modifiers: KeyModifiers::NONE, + .. + } => self.move_up(), + KeyEvent { + code: KeyCode::Down, + .. + } + | KeyEvent { + code: KeyCode::Char('n'), + modifiers: KeyModifiers::CONTROL, + .. + } => self.move_down(), + KeyEvent { + code: KeyCode::Char('j'), + modifiers: KeyModifiers::NONE, + .. + } => self.move_down(), + KeyEvent { + code: KeyCode::Esc, .. + } => { + self.on_ctrl_c(); + } + KeyEvent { + code: KeyCode::Char(c), + modifiers, + .. + } if !modifiers.contains(KeyModifiers::CONTROL) + && !modifiers.contains(KeyModifiers::ALT) => + { + if let Some(idx) = c + .to_digit(10) + .map(|d| d as usize) + .and_then(|d| d.checked_sub(1)) + && idx < self.questions.len() + { + self.state.selected_idx = Some(idx); + self.state + .ensure_visible(self.rows_len(), self.max_visible_rows()); + self.return_to_review = true; + self.go_to_question(idx, None); + } + } + KeyEvent { + code: KeyCode::Enter, + modifiers: KeyModifiers::NONE, + .. + } => { + let Some(idx) = self.state.selected_idx else { + return; + }; + + if idx < self.questions.len() { + self.return_to_review = true; + self.go_to_question(idx, None); + } else if idx == self.questions.len() { + self.submit_from_review(); + } else { + self.finish_cancelled(); + } + } + _ => {} + }, + } + } + + fn on_ctrl_c(&mut self) -> CancellationEvent { + self.finish_cancelled(); + CancellationEvent::Handled + } + + fn is_complete(&self) -> bool { + self.complete + } + + fn handle_paste(&mut self, pasted: String) -> bool { + if self.mode != Mode::OtherInput { + return false; + } + if pasted.is_empty() { + return false; + } + self.textarea.insert_str(&pasted); + true + } +} + +impl crate::render::renderable::Renderable for AskUserQuestionOverlay { + fn desired_height(&self, width: u16) -> u16 { + let header_height = self.header_lines(width).len() as u16; + let rows_height = measure_rows_height( + &self.build_rows(), + &self.state, + MAX_POPUP_ROWS, + width.saturating_sub(1).max(1), + ); + let footer_height = 1u16; + + let mut total = header_height + .saturating_add(1) + .saturating_add(rows_height) + .saturating_add(footer_height) + .saturating_add(2); + if self.mode == Mode::OtherInput { + total = total.saturating_add(6); + } + total + } + + fn cursor_pos(&self, area: Rect) -> Option<(u16, u16)> { + self.cursor_pos_for_other_input(area) + } + + fn render(&self, area: Rect, buf: &mut Buffer) { + if area.height == 0 || area.width == 0 { + return; + } + + Clear.render(area, buf); + Block::default() + .style(user_message_style()) + .render(area, buf); + + let [content_area, footer_area] = + Layout::vertical([Constraint::Fill(1), Constraint::Length(1)]).areas(area); + let inset = content_area.inset(Insets::vh(1, 2)); + + let header_lines = self.header_lines(inset.width); + let header_height = header_lines.len() as u16; + let [header_area, body_area] = + Layout::vertical([Constraint::Length(header_height), Constraint::Fill(1)]).areas(inset); + Paragraph::new(header_lines).render(header_area, buf); + + match self.mode { + Mode::Select => { + let rows = self.build_rows(); + let rows_height = measure_rows_height( + &rows, + &self.state, + MAX_POPUP_ROWS, + body_area.width.saturating_sub(1).max(1), + ); + let list_area = Rect { + x: body_area.x, + y: body_area.y, + width: body_area.width, + height: rows_height.min(body_area.height), + }; + render_rows( + list_area, + buf, + &rows, + &self.state, + MAX_POPUP_ROWS, + "no options", + ); + } + Mode::OtherInput => { + let label_area = Rect { + x: body_area.x, + y: body_area.y, + width: body_area.width, + height: 1, + }; + Paragraph::new(Line::from(vec![ + Span::from("Other response: ".to_string()).bold(), + "(press Enter to submit)".dim(), + ])) + .render(label_area, buf); + + let input_outer = Rect { + x: body_area.x, + y: body_area.y.saturating_add(1), + width: body_area.width, + height: body_area.height.saturating_sub(1).max(1), + }; + let textarea_rect = self.textarea_rect(input_outer); + let mut state = self.textarea_state.borrow_mut(); + StatefulWidgetRef::render_ref(&(&self.textarea), textarea_rect, buf, &mut state); + if self.textarea.text().is_empty() { + Paragraph::new(Line::from("Type your response…".dim())) + .render(textarea_rect, buf); + } + } + Mode::Review => { + let rows = self.build_rows(); + let rows_height = measure_rows_height( + &rows, + &self.state, + MAX_POPUP_ROWS, + body_area.width.saturating_sub(1).max(1), + ); + let list_area = Rect { + x: body_area.x, + y: body_area.y, + width: body_area.width, + height: rows_height.min(body_area.height), + }; + render_rows( + list_area, + buf, + &rows, + &self.state, + MAX_POPUP_ROWS, + "no questions", + ); + } + } + + let hint_area = Rect { + x: footer_area.x.saturating_add(2), + y: footer_area.y, + width: footer_area.width.saturating_sub(2), + height: 1, + }; + self.footer_hint().dim().render(hint_area, buf); + } +} diff --git a/codex-rs/tui2/src/bottom_pane/feedback_view.rs b/codex-rs/tui2/src/bottom_pane/feedback_view.rs index c563ab8e90b..ea062d62d51 100644 --- a/codex-rs/tui2/src/bottom_pane/feedback_view.rs +++ b/codex-rs/tui2/src/bottom_pane/feedback_view.rs @@ -27,8 +27,7 @@ use super::popup_consts::standard_popup_hint_line; use super::textarea::TextArea; use super::textarea::TextAreaState; -const BASE_BUG_ISSUE_URL: &str = - "https://github.com/openai/codex/issues/new?template=2-bug-report.yml"; +const BASE_BUG_ISSUE_URL: &str = "https://github.com/Ixe1/codexel/issues/new"; /// Minimal input overlay to collect an optional feedback note, then upload /// both logs and rollout with classification + metadata. @@ -338,7 +337,7 @@ fn feedback_classification(category: FeedbackCategory) -> &'static str { fn issue_url_for_category(category: FeedbackCategory, thread_id: &str) -> Option { match category { FeedbackCategory::Bug | FeedbackCategory::BadResult | FeedbackCategory::Other => Some( - format!("{BASE_BUG_ISSUE_URL}&steps=Uploaded%20thread:%20{thread_id}"), + format!("{BASE_BUG_ISSUE_URL}?steps=Uploaded%20thread:%20{thread_id}"), ), FeedbackCategory::GoodResult => None, } @@ -545,7 +544,7 @@ mod tests { assert!( bug_url .as_deref() - .is_some_and(|url| url.contains("template=2-bug-report")) + .is_some_and(|url| url.starts_with(BASE_BUG_ISSUE_URL)) ); let bad_result_url = issue_url_for_category(FeedbackCategory::BadResult, "thread-2"); diff --git a/codex-rs/tui2/src/bottom_pane/mod.rs b/codex-rs/tui2/src/bottom_pane/mod.rs index fbab5e14a2a..87141380c5d 100644 --- a/codex-rs/tui2/src/bottom_pane/mod.rs +++ b/codex-rs/tui2/src/bottom_pane/mod.rs @@ -20,6 +20,7 @@ use std::time::Duration; mod approval_overlay; pub(crate) use approval_overlay::ApprovalOverlay; pub(crate) use approval_overlay::ApprovalRequest; +mod ask_user_question_overlay; mod bottom_pane_view; mod chat_composer; mod chat_composer_history; @@ -28,6 +29,8 @@ pub mod custom_prompt_view; mod file_search_popup; mod footer; mod list_selection_view; +mod plan_approval_overlay; +mod plan_request_overlay; mod prompt_args; mod skill_popup; pub(crate) use list_selection_view::SelectionViewParams; @@ -53,8 +56,11 @@ pub(crate) use chat_composer::InputResult; use codex_protocol::custom_prompts::CustomPrompt; use crate::status_indicator_widget::StatusIndicatorWidget; +pub(crate) use ask_user_question_overlay::AskUserQuestionOverlay; pub(crate) use list_selection_view::SelectionAction; pub(crate) use list_selection_view::SelectionItem; +pub(crate) use plan_approval_overlay::PlanApprovalOverlay; +pub(crate) use plan_request_overlay::PlanRequestOverlay; /// Pane displayed in the lower half of the chat UI. pub(crate) struct BottomPane { @@ -276,6 +282,23 @@ impl BottomPane { } } + pub(crate) fn update_status_detail_lines( + &mut self, + detail_lines: Vec>, + ) { + if let Some(status) = self.status.as_mut() { + status.set_detail_lines(detail_lines); + self.request_redraw(); + } + } + + pub(crate) fn clear_status_detail_lines(&mut self) { + if let Some(status) = self.status.as_mut() { + status.clear_detail_lines(); + self.request_redraw(); + } + } + pub(crate) fn show_ctrl_c_quit_hint(&mut self) { self.ctrl_c_quit_hint = true; self.composer diff --git a/codex-rs/tui2/src/bottom_pane/plan_approval_overlay.rs b/codex-rs/tui2/src/bottom_pane/plan_approval_overlay.rs new file mode 100644 index 00000000000..7490adc0c71 --- /dev/null +++ b/codex-rs/tui2/src/bottom_pane/plan_approval_overlay.rs @@ -0,0 +1,622 @@ +use std::cell::RefCell; + +use crossterm::event::KeyCode; +use crossterm::event::KeyEvent; +use crossterm::event::KeyModifiers; +use ratatui::buffer::Buffer; +use ratatui::layout::Constraint; +use ratatui::layout::Layout; +use ratatui::layout::Rect; +use ratatui::style::Style; +use ratatui::style::Styled; +use ratatui::style::Stylize; +use ratatui::text::Line; +use ratatui::text::Span; +use ratatui::widgets::Block; +use ratatui::widgets::Clear; +use ratatui::widgets::Paragraph; +use ratatui::widgets::StatefulWidgetRef; +use ratatui::widgets::Widget; +use textwrap::wrap; +use unicode_width::UnicodeWidthStr; + +use codex_core::protocol::Op; +use codex_core::protocol::PlanApprovalRequestEvent; +use codex_core::protocol::PlanApprovalResponse; +use codex_core::protocol::PlanProposal; +use codex_protocol::plan_tool::PlanItemArg; +use codex_protocol::plan_tool::StepStatus; + +use crate::app_event::AppEvent; +use crate::app_event_sender::AppEventSender; +use crate::key_hint; +use crate::render::Insets; +use crate::render::RectExt as _; +use crate::render::line_utils::prefix_lines; +use crate::style::user_message_style; + +use super::CancellationEvent; +use super::bottom_pane_view::BottomPaneView; +use super::textarea::TextArea; +use super::textarea::TextAreaState; + +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum Mode { + Select, + FeedbackInput, +} + +const MAX_PLAN_APPROVAL_OVERLAY_ROWS: u16 = 44; +const FEEDBACK_BLOCK_HEIGHT: u16 = 8; + +pub(crate) struct PlanApprovalOverlay { + id: String, + proposal: PlanProposal, + mode: Mode, + scroll_top: usize, + selected_action: usize, + textarea: TextArea, + textarea_state: RefCell, + error: Option, + app_event_tx: AppEventSender, + complete: bool, +} + +impl PlanApprovalOverlay { + pub(crate) fn new( + id: String, + ev: PlanApprovalRequestEvent, + app_event_tx: AppEventSender, + ) -> Self { + Self { + id, + proposal: ev.proposal, + mode: Mode::Select, + scroll_top: 0, + selected_action: 0, + textarea: TextArea::new(), + textarea_state: RefCell::new(TextAreaState::default()), + error: None, + app_event_tx, + complete: false, + } + } + + fn finish(&mut self, response: PlanApprovalResponse) { + self.app_event_tx + .send(AppEvent::CodexOp(Op::ResolvePlanApproval { + id: self.id.clone(), + response, + })); + self.complete = true; + } + + fn other_text(&self) -> String { + self.textarea.text().trim().to_string() + } + + fn accept_selection(&mut self) { + match self.selected_action { + 0 => self.finish(PlanApprovalResponse::Approved), + 1 => { + self.mode = Mode::FeedbackInput; + self.error = None; + } + _ => self.finish(PlanApprovalResponse::Rejected), + } + } + + fn accept_feedback(&mut self) { + let feedback = self.other_text(); + if feedback.is_empty() { + self.error = Some("Feedback cannot be empty.".to_string()); + return; + } + self.finish(PlanApprovalResponse::Revised { feedback }); + } + + fn footer_hint(&self) -> Line<'static> { + match self.mode { + Mode::Select => Line::from(vec![ + "↑/↓ ".into(), + "scroll".bold(), + ", ".into(), + "←/→ ".into(), + "action".bold(), + ", ".into(), + key_hint::plain(KeyCode::Enter).into(), + " select, ".into(), + key_hint::plain(KeyCode::Esc).into(), + " reject".into(), + ]), + Mode::FeedbackInput => Line::from(vec![ + key_hint::plain(KeyCode::Enter).into(), + " submit, ".into(), + key_hint::plain(KeyCode::Esc).into(), + " back".into(), + ]), + } + } + + fn plan_lines(&self, width: u16) -> Vec> { + let usable_width = width.saturating_sub(4).max(1) as usize; + let mut lines = Vec::new(); + + lines.push( + vec![ + "[".into(), + "Plan".bold(), + "] ".into(), + self.proposal.title.clone().bold(), + ] + .into(), + ); + + let summary = self.proposal.summary.trim(); + if !summary.is_empty() { + lines.push(Line::from("")); + lines.push(Line::from("Summary:".bold())); + for raw_line in summary.lines() { + let raw_line = raw_line.trim_end(); + if raw_line.trim().is_empty() { + lines.push(Line::from("")); + continue; + } + for w in wrap(raw_line, usable_width) { + lines.push(Line::from(vec![" ".into(), w.into_owned().into()])); + } + } + } + + let explanation = self + .proposal + .plan + .explanation + .as_deref() + .unwrap_or_default() + .trim(); + if !explanation.is_empty() { + lines.push(Line::from("")); + lines.push(Line::from("Explanation:".bold())); + for raw_line in explanation.lines() { + let raw_line = raw_line.trim_end(); + if raw_line.trim().is_empty() { + lines.push(Line::from("")); + continue; + } + for w in wrap(raw_line, usable_width) { + lines.push(Line::from(vec![" ".into(), w.into_owned().into()])); + } + } + } + + lines.push(Line::from("")); + lines.push(Line::from("Steps:".bold())); + + let mut step_lines = Vec::new(); + if self.proposal.plan.plan.is_empty() { + step_lines.push(Line::from("(no steps provided)".dim().italic())); + } else { + for PlanItemArg { step, status } in &self.proposal.plan.plan { + step_lines.extend(render_step_lines(width, status, step.as_str())); + } + } + lines.extend(prefix_lines(step_lines, " ".into(), " ".into())); + + lines + } + + fn action_bar(&self) -> Line<'static> { + let selected = Style::default().cyan().bold(); + let normal = Style::default().dim(); + + let approve_style = if self.selected_action == 0 { + selected + } else { + normal + }; + let revise_style = if self.selected_action == 1 { + selected + } else { + normal + }; + let reject_style = if self.selected_action == 2 { + selected + } else { + normal + }; + + Line::from(vec![ + Span::from("[1] Approve").set_style(approve_style), + " ".into(), + Span::from("[2] Revise").set_style(revise_style), + " ".into(), + Span::from("[3] Reject").set_style(reject_style), + ]) + } + + fn move_action_left(&mut self) { + self.selected_action = self.selected_action.saturating_sub(1); + } + + fn move_action_right(&mut self) { + self.selected_action = (self.selected_action + 1).min(2); + } + + fn scroll_up(&mut self) { + self.scroll_top = self.scroll_top.saturating_sub(1); + } + + fn scroll_down(&mut self) { + self.scroll_top = self.scroll_top.saturating_add(1); + } + + fn page_up(&mut self) { + self.scroll_top = self.scroll_top.saturating_sub(8); + } + + fn page_down(&mut self) { + self.scroll_top = self.scroll_top.saturating_add(8); + } + + fn scroll_home(&mut self) { + self.scroll_top = 0; + } + + fn cursor_pos_for_feedback(&self, area: Rect) -> Option<(u16, u16)> { + if self.mode != Mode::FeedbackInput { + return None; + } + if area.height < 2 || area.width <= 2 { + return None; + } + let textarea_rect = self.textarea_rect(area); + let state = *self.textarea_state.borrow(); + self.textarea.cursor_pos_with_state(textarea_rect, state) + } + + fn textarea_rect(&self, area: Rect) -> Rect { + let inset = area.inset(Insets::vh(1, 2)); + Rect { + x: inset.x, + y: inset.y, + width: inset.width, + height: inset.height.clamp(1, 5), + } + } +} + +impl BottomPaneView for PlanApprovalOverlay { + fn handle_key_event(&mut self, key_event: KeyEvent) { + match self.mode { + Mode::Select => match key_event { + KeyEvent { + code: KeyCode::Left, + .. + } => self.move_action_left(), + KeyEvent { + code: KeyCode::Right, + .. + } => self.move_action_right(), + KeyEvent { + code: KeyCode::Up, .. + } + | KeyEvent { + code: KeyCode::Char('p'), + modifiers: KeyModifiers::CONTROL, + .. + } + | KeyEvent { + code: KeyCode::Char('\u{0010}'), + modifiers: KeyModifiers::NONE, + .. + } => self.scroll_up(), + KeyEvent { + code: KeyCode::Char('k'), + modifiers: KeyModifiers::NONE, + .. + } => self.scroll_up(), + KeyEvent { + code: KeyCode::Down, + .. + } + | KeyEvent { + code: KeyCode::Char('n'), + modifiers: KeyModifiers::CONTROL, + .. + } + | KeyEvent { + code: KeyCode::Char('\u{000e}'), + modifiers: KeyModifiers::NONE, + .. + } => self.scroll_down(), + KeyEvent { + code: KeyCode::Char('j'), + modifiers: KeyModifiers::NONE, + .. + } => self.scroll_down(), + KeyEvent { + code: KeyCode::PageUp, + .. + } => self.page_up(), + KeyEvent { + code: KeyCode::PageDown, + .. + } => self.page_down(), + KeyEvent { + code: KeyCode::Home, + .. + } => self.scroll_home(), + KeyEvent { + code: KeyCode::Esc, .. + } => { + self.finish(PlanApprovalResponse::Rejected); + } + KeyEvent { + code: KeyCode::Char(c), + modifiers, + .. + } if !modifiers.contains(KeyModifiers::CONTROL) + && !modifiers.contains(KeyModifiers::ALT) => + { + if let Some(idx) = c + .to_digit(10) + .map(|d| d as usize) + .and_then(|d| d.checked_sub(1)) + && idx <= 2 + { + self.selected_action = idx; + self.accept_selection(); + } + } + KeyEvent { + code: KeyCode::Enter, + modifiers: KeyModifiers::NONE, + .. + } => self.accept_selection(), + _ => {} + }, + Mode::FeedbackInput => match key_event { + KeyEvent { + code: KeyCode::Esc, .. + } => { + self.mode = Mode::Select; + self.error = None; + } + KeyEvent { + code: KeyCode::Enter, + modifiers: KeyModifiers::NONE, + .. + } => self.accept_feedback(), + KeyEvent { + code: KeyCode::Enter, + .. + } => { + self.textarea.input(key_event); + } + other => { + self.textarea.input(other); + } + }, + } + } + + fn on_ctrl_c(&mut self) -> CancellationEvent { + self.finish(PlanApprovalResponse::Rejected); + CancellationEvent::Handled + } + + fn is_complete(&self) -> bool { + self.complete + } + + fn handle_paste(&mut self, pasted: String) -> bool { + if self.mode != Mode::FeedbackInput { + return false; + } + if pasted.is_empty() { + return false; + } + self.textarea.insert_str(&pasted); + true + } +} + +impl crate::render::renderable::Renderable for PlanApprovalOverlay { + fn desired_height(&self, width: u16) -> u16 { + let plan_lines = self.plan_lines(width); + let plan_height = u16::try_from(plan_lines.len()).unwrap_or(u16::MAX); + + let mut total = 2u16; // outer padding + total = total.saturating_add(1); // action bar + total = total.saturating_add(1); // footer hint + total = total.saturating_add(plan_height.max(4)); + if self.mode == Mode::FeedbackInput { + total = total.saturating_add(FEEDBACK_BLOCK_HEIGHT); + } + total.clamp(8, MAX_PLAN_APPROVAL_OVERLAY_ROWS) + } + + fn cursor_pos(&self, area: Rect) -> Option<(u16, u16)> { + self.cursor_pos_for_feedback(area) + } + + fn render(&self, area: Rect, buf: &mut Buffer) { + if area.height == 0 || area.width == 0 { + return; + } + + Clear.render(area, buf); + Block::default() + .style(user_message_style()) + .render(area, buf); + + let [content_area, footer_area] = + Layout::vertical([Constraint::Fill(1), Constraint::Length(1)]).areas(area); + let inset = content_area.inset(Insets::vh(1, 2)); + + match self.mode { + Mode::Select => { + let [plan_area, actions_area] = + Layout::vertical([Constraint::Fill(1), Constraint::Length(1)]).areas(inset); + + let plan_lines = self.plan_lines(plan_area.width); + let max_scroll = plan_lines.len().saturating_sub(plan_area.height as usize); + let scroll = self.scroll_top.min(max_scroll) as u16; + Paragraph::new(plan_lines) + .scroll((scroll, 0)) + .render(plan_area, buf); + + self.action_bar().render(actions_area, buf); + } + Mode::FeedbackInput => { + let [plan_area, feedback_area] = Layout::vertical([ + Constraint::Fill(1), + Constraint::Length(FEEDBACK_BLOCK_HEIGHT), + ]) + .areas(inset); + + let plan_lines = self.plan_lines(plan_area.width); + let max_scroll = plan_lines.len().saturating_sub(plan_area.height as usize); + let scroll = self.scroll_top.min(max_scroll) as u16; + Paragraph::new(plan_lines) + .scroll((scroll, 0)) + .render(plan_area, buf); + + let label_area = Rect { + x: feedback_area.x, + y: feedback_area.y, + width: feedback_area.width, + height: 1, + }; + Paragraph::new(Line::from(vec![ + Span::from("Feedback: ").bold(), + "(press Enter to submit)".dim(), + ])) + .render(label_area, buf); + + if let Some(err) = &self.error { + let err_area = Rect { + x: feedback_area.x, + y: feedback_area.y.saturating_add(1), + width: feedback_area.width, + height: 1, + }; + Line::from(err.clone().red()).render(err_area, buf); + } + + let input_outer = Rect { + x: feedback_area.x, + y: feedback_area.y.saturating_add(2), + width: feedback_area.width, + height: feedback_area.height.saturating_sub(2).max(1), + }; + let textarea_rect = self.textarea_rect(input_outer); + let mut state = self.textarea_state.borrow_mut(); + StatefulWidgetRef::render_ref(&(&self.textarea), textarea_rect, buf, &mut state); + if self.textarea.text().is_empty() { + Paragraph::new(Line::from("Type your feedback…".dim())) + .render(textarea_rect, buf); + } + } + } + + let hint_area = Rect { + x: footer_area.x.saturating_add(2), + y: footer_area.y, + width: footer_area.width.saturating_sub(2), + height: 1, + }; + self.footer_hint().dim().render(hint_area, buf); + } +} + +fn render_step_lines(width: u16, status: &StepStatus, text: &str) -> Vec> { + let (box_str, step_style) = match status { + StepStatus::Completed => ("[x] ", Style::default().crossed_out().dim()), + StepStatus::InProgress => ("[~] ", Style::default().cyan().bold()), + StepStatus::Pending => ("[ ] ", Style::default().dim()), + }; + let wrap_width = (width as usize) + .saturating_sub(4) + .saturating_sub(box_str.width()) + .max(1); + let parts = wrap(text, wrap_width); + let lines: Vec> = parts + .into_iter() + .map(|s| Line::from(Span::from(s.into_owned()).set_style(step_style))) + .collect(); + prefix_lines(lines, box_str.into(), " ".into()) +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::app_event::AppEvent; + use crate::render::renderable::Renderable; + use pretty_assertions::assert_eq; + use tokio::sync::mpsc::unbounded_channel; + + fn make_proposal(summary: &str, steps: usize) -> PlanProposal { + PlanProposal { + title: "Plan title".to_string(), + summary: summary.to_string(), + plan: codex_protocol::plan_tool::UpdatePlanArgs { + explanation: None, + plan: (0..steps) + .map(|i| PlanItemArg { + step: format!("step {}", i + 1), + status: StepStatus::Pending, + }) + .collect(), + }, + } + } + + fn render_to_lines(view: &PlanApprovalOverlay, width: u16) -> Vec { + let height = view.desired_height(width); + let mut buf = Buffer::empty(Rect::new(0, 0, width, height)); + view.render(Rect::new(0, 0, width, height), &mut buf); + (0..buf.area.height) + .map(|row| { + (0..buf.area.width) + .map(|col| buf[(col, row)].symbol().to_string()) + .collect() + }) + .collect() + } + + #[test] + fn summary_label_is_only_rendered_once() { + let (tx, _rx) = unbounded_channel::(); + let tx = AppEventSender::new(tx); + let ev = PlanApprovalRequestEvent { + call_id: "call-1".to_string(), + proposal: make_proposal( + "A summary that should wrap across multiple lines but should only show a single label.", + 1, + ), + }; + let view = PlanApprovalOverlay::new("id-1".to_string(), ev, tx); + + let rendered = render_to_lines(&view, 40); + let label_count = rendered + .iter() + .filter(|line| line.contains("Summary:")) + .count(); + assert_eq!(label_count, 1); + } + + #[test] + fn desired_height_clamps_to_max_rows_for_long_plans() { + let (tx, _rx) = unbounded_channel::(); + let tx = AppEventSender::new(tx); + let ev = PlanApprovalRequestEvent { + call_id: "call-1".to_string(), + proposal: make_proposal("short summary", 200), + }; + let view = PlanApprovalOverlay::new("id-1".to_string(), ev, tx); + + assert_eq!(view.desired_height(80), MAX_PLAN_APPROVAL_OVERLAY_ROWS); + } +} diff --git a/codex-rs/tui2/src/bottom_pane/plan_request_overlay.rs b/codex-rs/tui2/src/bottom_pane/plan_request_overlay.rs new file mode 100644 index 00000000000..e4c124ee3f7 --- /dev/null +++ b/codex-rs/tui2/src/bottom_pane/plan_request_overlay.rs @@ -0,0 +1,269 @@ +use std::cell::RefCell; + +use crossterm::event::KeyCode; +use crossterm::event::KeyEvent; +use crossterm::event::KeyModifiers; +use ratatui::buffer::Buffer; +use ratatui::layout::Constraint; +use ratatui::layout::Layout; +use ratatui::layout::Rect; +use ratatui::style::Stylize; +use ratatui::text::Line; +use ratatui::widgets::Block; +use ratatui::widgets::Clear; +use ratatui::widgets::Paragraph; +use ratatui::widgets::StatefulWidgetRef; +use ratatui::widgets::Widget; + +use codex_core::protocol::Op; +use codex_core::protocol::PlanRequest; + +use crate::app_event::AppEvent; +use crate::app_event_sender::AppEventSender; +use crate::key_hint; +use crate::render::Insets; +use crate::render::RectExt as _; +use crate::style::user_message_style; + +use super::CancellationEvent; +use super::bottom_pane_view::BottomPaneView; +use super::textarea::TextArea; +use super::textarea::TextAreaState; + +struct PlanRequestOverlayLayout { + header_lines: Vec>, + header_area: Rect, + textarea_rect: Rect, + hint_area: Rect, +} + +pub(crate) struct PlanRequestOverlay { + textarea: TextArea, + textarea_state: RefCell, + error: Option, + app_event_tx: AppEventSender, + complete: bool, +} + +impl PlanRequestOverlay { + pub(crate) fn new(app_event_tx: AppEventSender) -> Self { + Self { + textarea: TextArea::new(), + textarea_state: RefCell::new(TextAreaState::default()), + error: None, + app_event_tx, + complete: false, + } + } + + fn goal_text(&self) -> String { + self.textarea.text().trim().to_string() + } + + fn header_lines(&self) -> Vec> { + let mut lines = vec![Line::from(vec![ + "[".into(), + "Plan Mode".bold(), + "] ".into(), + "Describe what you want to do.".into(), + ])]; + if let Some(err) = &self.error { + lines.push(Line::from("")); + lines.push(Line::from(err.clone().red())); + } + lines + } + + fn layout(&self, area: Rect) -> PlanRequestOverlayLayout { + let [content_area, footer_area] = + Layout::vertical([Constraint::Fill(1), Constraint::Length(1)]).areas(area); + let inset = content_area.inset(Insets::vh(1, 2)); + + let header_lines = self.header_lines(); + let header_height = header_lines.len() as u16; + let [header_area, body_area] = + Layout::vertical([Constraint::Length(header_height), Constraint::Fill(1)]).areas(inset); + + let hint_area = Rect { + x: footer_area.x.saturating_add(2), + y: footer_area.y, + width: footer_area.width.saturating_sub(2), + height: 1, + }; + + PlanRequestOverlayLayout { + header_lines, + header_area, + textarea_rect: self.textarea_rect(body_area), + hint_area, + } + } + + fn submit(&mut self) { + let goal = self.goal_text(); + if goal.is_empty() { + self.error = Some("Goal cannot be empty.".to_string()); + return; + } + self.app_event_tx.send(AppEvent::CodexOp(Op::Plan { + plan_request: PlanRequest { goal }, + })); + self.complete = true; + } + + fn footer_hint(&self) -> Line<'static> { + Line::from(vec![ + key_hint::plain(KeyCode::Enter).into(), + " submit, ".into(), + key_hint::shift(KeyCode::Enter).into(), + "/".into(), + key_hint::ctrl(KeyCode::Char('j')).into(), + " newline, ".into(), + key_hint::plain(KeyCode::Esc).into(), + " cancel".into(), + ]) + } + + fn textarea_rect(&self, area: Rect) -> Rect { + let inset = area.inset(Insets::vh(1, 2)); + Rect { + x: inset.x, + y: inset.y, + width: inset.width, + height: inset.height.clamp(1, 6), + } + } +} + +impl BottomPaneView for PlanRequestOverlay { + fn handle_key_event(&mut self, key_event: KeyEvent) { + match key_event { + KeyEvent { + code: KeyCode::Esc, .. + } => { + self.complete = true; + } + KeyEvent { + code: KeyCode::Enter, + modifiers: KeyModifiers::NONE, + .. + } => self.submit(), + KeyEvent { + code: KeyCode::Enter, + .. + } => { + self.textarea.input(key_event); + } + other => { + self.textarea.input(other); + } + } + } + + fn on_ctrl_c(&mut self) -> CancellationEvent { + self.complete = true; + CancellationEvent::Handled + } + + fn is_complete(&self) -> bool { + self.complete + } + + fn handle_paste(&mut self, pasted: String) -> bool { + if pasted.is_empty() { + return false; + } + self.textarea.insert_str(&pasted); + true + } +} + +impl crate::render::renderable::Renderable for PlanRequestOverlay { + fn desired_height(&self, _width: u16) -> u16 { + 10 + } + + fn cursor_pos(&self, area: Rect) -> Option<(u16, u16)> { + if area.height < 2 || area.width <= 2 { + return None; + } + let textarea_rect = self.layout(area).textarea_rect; + let state = *self.textarea_state.borrow(); + self.textarea.cursor_pos_with_state(textarea_rect, state) + } + + fn render(&self, area: Rect, buf: &mut Buffer) { + if area.height == 0 || area.width == 0 { + return; + } + + let layout = self.layout(area); + + Clear.render(area, buf); + Block::default() + .style(user_message_style()) + .render(area, buf); + + Paragraph::new(layout.header_lines).render(layout.header_area, buf); + + let textarea_rect = layout.textarea_rect; + let mut state = self.textarea_state.borrow_mut(); + StatefulWidgetRef::render_ref(&(&self.textarea), textarea_rect, buf, &mut state); + if self.textarea.text().is_empty() { + Paragraph::new(Line::from( + "e.g. \"Add pagination to search results\"".dim(), + )) + .render(textarea_rect, buf); + } + + self.footer_hint().dim().render(layout.hint_area, buf); + } +} + +#[cfg(test)] +mod tests { + use crate::render::renderable::Renderable as _; + + use super::*; + + #[test] + fn cursor_pos_accounts_for_header_and_insets() { + let (app_event_tx, _app_event_rx) = tokio::sync::mpsc::unbounded_channel(); + let overlay = PlanRequestOverlay::new(AppEventSender::new(app_event_tx)); + assert_eq!(overlay.cursor_pos(Rect::new(0, 0, 80, 10)), Some((4, 3))); + } + + #[test] + fn cursor_pos_accounts_for_error_header_height() { + let (app_event_tx, _app_event_rx) = tokio::sync::mpsc::unbounded_channel(); + let mut overlay = PlanRequestOverlay::new(AppEventSender::new(app_event_tx)); + overlay.error = Some("Goal cannot be empty.".to_string()); + assert_eq!(overlay.cursor_pos(Rect::new(0, 0, 80, 10)), Some((4, 5))); + } + + #[test] + fn plan_request_overlay_supports_multiline_goal_entry() { + let (app_event_tx, mut app_event_rx) = tokio::sync::mpsc::unbounded_channel(); + let mut overlay = PlanRequestOverlay::new(AppEventSender::new(app_event_tx)); + + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('a'), KeyModifiers::NONE)); + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('j'), KeyModifiers::CONTROL)); + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('b'), KeyModifiers::NONE)); + overlay.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::SHIFT)); + overlay.handle_key_event(KeyEvent::new(KeyCode::Char('c'), KeyModifiers::NONE)); + + assert_eq!(overlay.goal_text(), "a\nb\nc"); + assert!(!overlay.is_complete()); + + overlay.handle_key_event(KeyEvent::new(KeyCode::Enter, KeyModifiers::NONE)); + assert!(overlay.is_complete()); + + let ev = app_event_rx.try_recv().expect("plan op"); + match ev { + AppEvent::CodexOp(Op::Plan { plan_request }) => { + assert_eq!(plan_request.goal, "a\nb\nc"); + } + other => panic!("unexpected event: {other:?}"), + } + } +} diff --git a/codex-rs/tui2/src/chatwidget.rs b/codex-rs/tui2/src/chatwidget.rs index 62d026d61ce..a4e7f76f7aa 100644 --- a/codex-rs/tui2/src/chatwidget.rs +++ b/codex-rs/tui2/src/chatwidget.rs @@ -2,6 +2,7 @@ use std::collections::HashMap; use std::collections::HashSet; use std::collections::VecDeque; use std::path::PathBuf; +use std::str::FromStr; use std::sync::Arc; use std::time::Duration; @@ -21,6 +22,7 @@ use codex_core::protocol::AgentReasoningEvent; use codex_core::protocol::AgentReasoningRawContentDeltaEvent; use codex_core::protocol::AgentReasoningRawContentEvent; use codex_core::protocol::ApplyPatchApprovalRequestEvent; +use codex_core::protocol::AskUserQuestionRequestEvent; use codex_core::protocol::BackgroundEventEvent; use codex_core::protocol::CreditsSnapshot; use codex_core::protocol::DeprecationNoticeEvent; @@ -31,6 +33,7 @@ use codex_core::protocol::ExecApprovalRequestEvent; use codex_core::protocol::ExecCommandBeginEvent; use codex_core::protocol::ExecCommandEndEvent; use codex_core::protocol::ExecCommandSource; +use codex_core::protocol::ExitedPlanModeEvent; use codex_core::protocol::ExitedReviewModeEvent; use codex_core::protocol::ListCustomPromptsResponseEvent; use codex_core::protocol::ListSkillsResponseEvent; @@ -42,6 +45,8 @@ use codex_core::protocol::McpToolCallBeginEvent; use codex_core::protocol::McpToolCallEndEvent; use codex_core::protocol::Op; use codex_core::protocol::PatchApplyBeginEvent; +use codex_core::protocol::PlanApprovalRequestEvent; +use codex_core::protocol::PlanRequest; use codex_core::protocol::RateLimitSnapshot; use codex_core::protocol::ReviewRequest; use codex_core::protocol::ReviewTarget; @@ -85,10 +90,13 @@ use tracing::debug; use crate::app_event::AppEvent; use crate::app_event_sender::AppEventSender; use crate::bottom_pane::ApprovalRequest; +use crate::bottom_pane::AskUserQuestionOverlay; use crate::bottom_pane::BottomPane; use crate::bottom_pane::BottomPaneParams; use crate::bottom_pane::CancellationEvent; use crate::bottom_pane::InputResult; +use crate::bottom_pane::PlanApprovalOverlay; +use crate::bottom_pane::PlanRequestOverlay; use crate::bottom_pane::SelectionAction; use crate::bottom_pane::SelectionItem; use crate::bottom_pane::SelectionViewParams; @@ -291,6 +299,7 @@ pub(crate) struct ChatWidget { token_info: Option, rate_limit_snapshot: Option, plan_type: Option, + last_plan_update_key: Option, rate_limit_warnings: RateLimitWarningState, rate_limit_switch_prompt: RateLimitSwitchPromptState, rate_limit_poller: Option>, @@ -311,6 +320,7 @@ pub(crate) struct ChatWidget { current_status_header: String, // Previous status header to restore after a transient stream retry. retry_status_header: Option, + plan_variants_progress: Option, conversation_id: Option, frame_requester: FrameRequester, // Whether to include the initial welcome banner on session configured @@ -341,6 +351,127 @@ struct UserMessage { image_paths: Vec, } +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum ProgressStatus { + Pending, + InProgress, + Completed, +} + +#[derive(Debug, Clone)] +struct PlanVariantsProgress { + total: usize, + steps: Vec, + durations: Vec>, + last_activity: Vec>, + tokens: Vec>, +} + +impl PlanVariantsProgress { + fn new(total: usize) -> Self { + Self { + total, + steps: vec![ProgressStatus::Pending; total], + durations: vec![None; total], + last_activity: vec![None; total], + tokens: vec![None; total], + } + } + + fn variant_label(&self, idx: usize) -> String { + if self.total == 3 { + match idx { + 0 => "Minimal".to_string(), + 1 => "Correctness".to_string(), + 2 => "DX".to_string(), + _ => format!("Variant {}/{}", idx + 1, self.total), + } + } else { + format!("Variant {}/{}", idx + 1, self.total) + } + } + + fn set_in_progress(&mut self, idx: usize) { + if idx < self.steps.len() { + self.steps[idx] = ProgressStatus::InProgress; + } + } + + fn set_completed(&mut self, idx: usize) { + if idx < self.steps.len() { + self.steps[idx] = ProgressStatus::Completed; + } + } + + fn set_duration(&mut self, idx: usize, duration: Option) { + if idx < self.durations.len() { + self.durations[idx] = duration; + } + } + + fn set_activity(&mut self, idx: usize, activity: Option) { + if idx < self.last_activity.len() { + self.last_activity[idx] = activity; + } + } + + fn set_tokens(&mut self, idx: usize, tokens: Option) { + if idx < self.tokens.len() { + self.tokens[idx] = tokens; + } + } + + fn render_detail_lines(&self) -> Vec> { + use ratatui::style::Stylize; + let mut lines = Vec::with_capacity(self.total); + for (idx, status) in self.steps.iter().copied().enumerate() { + let label = self.variant_label(idx); + let status_span = match status { + ProgressStatus::Pending => "○".dim(), + ProgressStatus::InProgress => "●".cyan(), + ProgressStatus::Completed => "✓".green(), + }; + + let mut spans = vec![" ".into(), status_span, " ".into(), label.into()]; + let duration = self.durations.get(idx).and_then(|d| d.as_deref()); + let tokens = self.tokens.get(idx).and_then(|t| t.as_deref()); + if duration.is_some() || tokens.is_some() { + let mut meta = String::new(); + meta.push('('); + if let Some(duration) = duration { + meta.push_str(duration); + } + if let Some(tokens) = tokens { + if duration.is_some() { + meta.push_str(", "); + } + meta.push_str(tokens); + meta.push_str(" tok"); + } + meta.push(')'); + spans.push(" ".into()); + spans.push(meta.dim()); + } + if status == ProgressStatus::Completed { + spans.push(" ".into()); + spans.push("—".dim()); + spans.push(" ".into()); + spans.push("done".dim()); + } else if let Some(activity) = self.last_activity.get(idx).and_then(|a| a.as_deref()) { + let activity = activity.strip_prefix("shell ").unwrap_or(activity).trim(); + if !activity.is_empty() { + spans.push(" ".into()); + spans.push("—".dim()); + spans.push(" ".into()); + spans.push(activity.to_string().dim()); + } + } + lines.push(spans.into()); + } + lines + } +} + impl From for UserMessage { fn from(text: String) -> Self { Self { @@ -368,6 +499,25 @@ fn create_initial_user_message(text: String, image_paths: Vec) -> Optio } impl ChatWidget { + fn prepare_for_immediate_interrupt(&mut self) { + if self.stream_controller.is_some() { + self.flush_answer_stream_with_separator(); + } + if !self.interrupts.is_empty() { + self.flush_interrupt_queue(); + } + } + + fn prepare_for_immediate_interrupt_discard_stream(&mut self) { + if self.stream_controller.is_some() { + self.stream_controller = None; + self.app_event_tx.send(AppEvent::StopCommitAnimation); + } + if !self.interrupts.is_empty() { + self.flush_interrupt_queue(); + } + } + fn flush_answer_stream_with_separator(&mut self) { if let Some(mut controller) = self.stream_controller.take() && let Some(cell) = controller.finalize() @@ -377,10 +527,22 @@ impl ChatWidget { } fn set_status_header(&mut self, header: String) { + if self.plan_variants_progress.is_some() && header != "Planning plan variants" { + self.plan_variants_progress = None; + self.clear_status_detail_lines(); + } self.current_status_header = header.clone(); self.bottom_pane.update_status_header(header); } + fn set_status_detail_lines(&mut self, lines: Vec>) { + self.bottom_pane.update_status_detail_lines(lines); + } + + fn clear_status_detail_lines(&mut self) { + self.bottom_pane.clear_status_detail_lines(); + } + fn restore_retry_status_header_if_present(&mut self) { if let Some(header) = self.retry_status_header.take() && self.current_status_header != header @@ -523,6 +685,7 @@ impl ChatWidget { self.bottom_pane.clear_ctrl_c_quit_hint(); self.bottom_pane.set_task_running(true); self.retry_status_header = None; + self.plan_variants_progress = None; self.bottom_pane.set_interrupt_hint_visible(true); self.set_status_header(String::from("Working")); self.full_reasoning_buffer.clear(); @@ -800,10 +963,18 @@ impl ChatWidget { } fn on_plan_update(&mut self, update: UpdatePlanArgs) { + let update_key = serde_json::to_string(&update).ok(); + if let Some(key) = update_key.as_deref() + && self.last_plan_update_key.as_deref() == Some(key) + { + return; + } + self.last_plan_update_key = update_key; self.add_to_history(history_cell::new_plan_update(update)); } fn on_exec_approval_request(&mut self, id: String, ev: ExecApprovalRequestEvent) { + self.prepare_for_immediate_interrupt(); let id2 = id.clone(); let ev2 = ev.clone(); self.defer_or_handle( @@ -813,6 +984,7 @@ impl ChatWidget { } fn on_apply_patch_approval_request(&mut self, id: String, ev: ApplyPatchApprovalRequestEvent) { + self.prepare_for_immediate_interrupt(); let id2 = id.clone(); let ev2 = ev.clone(); self.defer_or_handle( @@ -822,6 +994,7 @@ impl ChatWidget { } fn on_elicitation_request(&mut self, ev: ElicitationRequestEvent) { + self.prepare_for_immediate_interrupt(); let ev2 = ev.clone(); self.defer_or_handle( |q| q.push_elicitation(ev), @@ -829,6 +1002,26 @@ impl ChatWidget { ); } + fn on_ask_user_question_request(&mut self, id: String, ev: AskUserQuestionRequestEvent) { + self.prepare_for_immediate_interrupt_discard_stream(); + let id2 = id.clone(); + let ev2 = ev.clone(); + self.defer_or_handle( + |q| q.push_ask_user_question(id, ev), + |s| s.handle_ask_user_question_request_now(id2, ev2), + ); + } + + fn on_plan_approval_request(&mut self, id: String, ev: PlanApprovalRequestEvent) { + self.prepare_for_immediate_interrupt_discard_stream(); + let id2 = id.clone(); + let ev2 = ev.clone(); + self.defer_or_handle( + |q| q.push_plan_approval(id, ev), + |s| s.handle_plan_approval_request_now(id2, ev2), + ); + } + fn on_exec_command_begin(&mut self, ev: ExecCommandBeginEvent) { self.flush_answer_stream_with_separator(); let ev2 = ev.clone(); @@ -891,10 +1084,7 @@ impl ChatWidget { fn on_web_search_end(&mut self, ev: WebSearchEndEvent) { self.flush_answer_stream_with_separator(); - self.add_to_history(history_cell::new_web_search_call(format!( - "Searched: {}", - ev.query - ))); + self.add_to_history(history_cell::new_web_search_call(ev.query)); } fn on_get_history_entry_response( @@ -928,9 +1118,121 @@ impl ChatWidget { debug!("BackgroundEvent: {message}"); self.bottom_pane.ensure_status_indicator(); self.bottom_pane.set_interrupt_hint_visible(true); + + if let Some(progress) = self.maybe_update_plan_variants_progress(message.as_str()) { + self.plan_variants_progress = Some(progress); + self.set_status_header("Planning plan variants".to_string()); + self.set_status_detail_lines( + self.plan_variants_progress + .as_ref() + .map(PlanVariantsProgress::render_detail_lines) + .unwrap_or_default(), + ); + return; + } + + self.plan_variants_progress = None; + self.clear_status_detail_lines(); self.set_status_header(message); } + fn maybe_update_plan_variants_progress( + &mut self, + message: &str, + ) -> Option { + let message = message.trim(); + if message.starts_with("Plan variants:") { + // Expected shapes: + // - "Plan variants: generating 1/3…" + // - "Plan variants: finished 1/3 (12.3s)" + let tokens: Vec<&str> = message.split_whitespace().collect(); + if tokens.len() < 4 { + return None; + } + + let action = tokens.get(2).copied()?; + let fraction = tokens.get(3).copied()?; + let fraction = fraction.trim_end_matches('…'); + let (idx_str, total_str) = fraction.split_once('/')?; + let idx = usize::from_str(idx_str).ok()?.saturating_sub(1); + let total = usize::from_str(total_str).ok()?; + if total == 0 { + return None; + } + + let duration = message + .find('(') + .and_then(|start| message.rfind(')').map(|end| (start, end))) + .and_then(|(start, end)| { + if end > start + 1 { + Some(message[start + 1..end].to_string()) + } else { + None + } + }); + + let mut progress = self + .plan_variants_progress + .clone() + .filter(|p| p.total == total) + .unwrap_or_else(|| PlanVariantsProgress::new(total)); + + match action { + "generating" => { + progress.set_in_progress(idx); + progress.set_duration(idx, None); + } + "finished" => { + progress.set_completed(idx); + progress.set_duration(idx, duration); + progress.set_activity(idx, None); + } + _ => return None, + } + + return Some(progress); + } + + if let Some(rest) = message.strip_prefix("Plan variant ") { + // Expected shape: + // - "Plan variant 2/3: rg -n ..." + // - "Plan variant 2/3: shell rg -n ..." (legacy) + let (fraction, activity) = rest.split_once(':')?; + let fraction = fraction.trim(); + let (idx_str, total_str) = fraction.split_once('/')?; + let idx = usize::from_str(idx_str).ok()?.saturating_sub(1); + let total = usize::from_str(total_str).ok()?; + if total == 0 { + return None; + } + + let mut progress = self + .plan_variants_progress + .clone() + .filter(|p| p.total == total) + .unwrap_or_else(|| PlanVariantsProgress::new(total)); + + if idx < progress.steps.len() && progress.steps[idx] == ProgressStatus::Pending { + progress.set_in_progress(idx); + } + + let activity = activity.trim(); + if let Some(tokens) = activity.strip_prefix("tokens ") { + progress.set_tokens(idx, Some(tokens.trim().to_string())); + } else { + let activity = activity.strip_prefix("shell ").unwrap_or(activity).trim(); + if activity.is_empty() { + progress.set_activity(idx, None); + } else { + progress.set_activity(idx, Some(activity.to_string())); + } + } + return Some(progress); + } + + None + } + fn on_undo_started(&mut self, event: UndoStartedEvent) { self.bottom_pane.ensure_status_indicator(); self.bottom_pane.set_interrupt_hint_visible(false); @@ -1157,6 +1459,36 @@ impl ChatWidget { self.request_redraw(); } + pub(crate) fn handle_ask_user_question_request_now( + &mut self, + id: String, + ev: AskUserQuestionRequestEvent, + ) { + self.flush_answer_stream_with_separator(); + self.bottom_pane + .show_view(Box::new(AskUserQuestionOverlay::new( + id, + ev, + self.app_event_tx.clone(), + ))); + self.request_redraw(); + } + + pub(crate) fn handle_plan_approval_request_now( + &mut self, + id: String, + ev: PlanApprovalRequestEvent, + ) { + self.flush_answer_stream_with_separator(); + self.bottom_pane + .show_view(Box::new(PlanApprovalOverlay::new( + id, + ev, + self.app_event_tx.clone(), + ))); + self.request_redraw(); + } + pub(crate) fn handle_exec_begin_now(&mut self, ev: ExecCommandBeginEvent) { // Ensure the status indicator is visible while the command runs. self.running_commands.insert( @@ -1287,7 +1619,7 @@ impl ChatWidget { let placeholder = EXAMPLE_PROMPTS[rng.random_range(0..EXAMPLE_PROMPTS.len())].to_string(); let codex_op_tx = spawn_agent(config.clone(), app_event_tx.clone(), conversation_manager); - let mut widget = Self { + Self { app_event_tx: app_event_tx.clone(), frame_requester: frame_requester.clone(), codex_op_tx, @@ -1314,6 +1646,7 @@ impl ChatWidget { token_info: None, rate_limit_snapshot: None, plan_type: None, + last_plan_update_key: None, rate_limit_warnings: RateLimitWarningState::default(), rate_limit_switch_prompt: RateLimitSwitchPromptState::default(), rate_limit_poller: None, @@ -1326,8 +1659,9 @@ impl ChatWidget { interrupts: InterruptManager::new(), reasoning_buffer: String::new(), full_reasoning_buffer: String::new(), - current_status_header: String::from("Working"), + current_status_header: String::from("Ready"), retry_status_header: None, + plan_variants_progress: None, conversation_id: None, queued_user_messages: VecDeque::new(), show_welcome_banner: is_first_run, @@ -1339,11 +1673,7 @@ impl ChatWidget { last_rendered_width: std::cell::Cell::new(None), feedback, current_rollout_path: None, - }; - - widget.prefetch_rate_limits(); - - widget + } } /// Create a ChatWidget attached to an existing conversation (e.g., a fork). @@ -1372,7 +1702,7 @@ impl ChatWidget { let codex_op_tx = spawn_agent_from_existing(conversation, session_configured, app_event_tx.clone()); - let mut widget = Self { + Self { app_event_tx: app_event_tx.clone(), frame_requester: frame_requester.clone(), codex_op_tx, @@ -1399,6 +1729,7 @@ impl ChatWidget { token_info: None, rate_limit_snapshot: None, plan_type: None, + last_plan_update_key: None, rate_limit_warnings: RateLimitWarningState::default(), rate_limit_switch_prompt: RateLimitSwitchPromptState::default(), rate_limit_poller: None, @@ -1411,8 +1742,9 @@ impl ChatWidget { interrupts: InterruptManager::new(), reasoning_buffer: String::new(), full_reasoning_buffer: String::new(), - current_status_header: String::from("Working"), + current_status_header: String::from("Ready"), retry_status_header: None, + plan_variants_progress: None, conversation_id: None, queued_user_messages: VecDeque::new(), show_welcome_banner: false, @@ -1424,11 +1756,7 @@ impl ChatWidget { last_rendered_width: std::cell::Cell::new(None), feedback, current_rollout_path: None, - }; - - widget.prefetch_rate_limits(); - - widget + } } pub(crate) fn handle_key_event(&mut self, key_event: KeyEvent) { @@ -1565,9 +1893,17 @@ impl ChatWidget { SlashCommand::Review => { self.open_review_popup(); } + SlashCommand::Plan => { + self.bottom_pane + .show_view(Box::new(PlanRequestOverlay::new(self.app_event_tx.clone()))); + self.request_redraw(); + } SlashCommand::Model => { self.open_model_popup(); } + SlashCommand::PlanModel => { + self.open_plan_model_popup(); + } SlashCommand::Approvals => { self.open_approvals_popup(); } @@ -1761,6 +2097,7 @@ impl ChatWidget { } } + self.prefetch_rate_limits(); self.codex_op_tx .send(Op::UserInput { items }) .unwrap_or_else(|e| { @@ -1840,10 +2177,11 @@ impl ChatWidget { self.on_agent_reasoning_final(); } EventMsg::AgentReasoningSectionBreak(_) => self.on_reasoning_section_break(), - EventMsg::TaskStarted(_) => self.on_task_started(), - EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }) => { + EventMsg::TaskStarted(_) if !from_replay => self.on_task_started(), + EventMsg::TaskComplete(TaskCompleteEvent { last_agent_message }) if !from_replay => { self.on_task_complete(last_agent_message) } + EventMsg::TaskStarted(_) | EventMsg::TaskComplete(_) => {} EventMsg::TokenCount(ev) => { self.set_token_info(ev.info); self.on_rate_limit_snapshot(ev.rate_limits); @@ -1874,6 +2212,12 @@ impl ChatWidget { EventMsg::ElicitationRequest(ev) => { self.on_elicitation_request(ev); } + EventMsg::AskUserQuestionRequest(ev) => { + self.on_ask_user_question_request(id.unwrap_or_default(), ev) + } + EventMsg::PlanApprovalRequest(ev) => { + self.on_plan_approval_request(id.unwrap_or_default(), ev) + } EventMsg::ExecCommandBegin(ev) => self.on_exec_command_begin(ev), EventMsg::TerminalInteraction(delta) => self.on_terminal_interaction(delta), EventMsg::ExecCommandOutputDelta(delta) => self.on_exec_command_output_delta(delta), @@ -1898,14 +2242,15 @@ impl ChatWidget { EventMsg::ShutdownComplete => self.on_shutdown_complete(), EventMsg::TurnDiff(TurnDiffEvent { unified_diff }) => self.on_turn_diff(unified_diff), EventMsg::DeprecationNotice(ev) => self.on_deprecation_notice(ev), - EventMsg::BackgroundEvent(BackgroundEventEvent { message }) => { + EventMsg::BackgroundEvent(BackgroundEventEvent { message }) if !from_replay => { self.on_background_event(message) } - EventMsg::UndoStarted(ev) => self.on_undo_started(ev), + EventMsg::UndoStarted(ev) if !from_replay => self.on_undo_started(ev), EventMsg::UndoCompleted(ev) => self.on_undo_completed(ev), - EventMsg::StreamError(StreamErrorEvent { message, .. }) => { + EventMsg::StreamError(StreamErrorEvent { message, .. }) if !from_replay => { self.on_stream_error(message) } + EventMsg::BackgroundEvent(_) | EventMsg::UndoStarted(_) | EventMsg::StreamError(_) => {} EventMsg::UserMessage(ev) => { if from_replay { self.on_user_message_event(ev); @@ -1915,6 +2260,14 @@ impl ChatWidget { self.on_entered_review_mode(review_request) } EventMsg::ExitedReviewMode(review) => self.on_exited_review_mode(review), + EventMsg::EnteredPlanMode(request) => self.on_entered_plan_mode(request), + EventMsg::ExitedPlanMode(ev) => { + if from_replay { + self.on_exited_plan_mode_replay(ev); + } else { + self.on_exited_plan_mode(ev); + } + } EventMsg::ContextCompacted(_) => self.on_agent_message("Context compacted".to_owned()), EventMsg::RawResponseItem(_) | EventMsg::ItemStarted(_) @@ -1974,6 +2327,45 @@ impl ChatWidget { self.request_redraw(); } + fn on_entered_plan_mode(&mut self, request: PlanRequest) { + let goal = request.goal.trim(); + if goal.is_empty() { + self.add_info_message(">> Plan mode started <<".to_string(), None); + } else { + self.add_info_message(format!(">> Plan mode started: {goal} <<"), None); + } + self.request_redraw(); + } + + fn on_exited_plan_mode(&mut self, ev: ExitedPlanModeEvent) { + if ev.plan_output.is_some() { + self.add_info_message( + "<< Plan mode finished; executing approved plan >>".to_string(), + None, + ); + self.queue_user_message(UserMessage { + text: "Proceed with the approved plan.".to_string(), + image_paths: Vec::new(), + }); + } else { + self.add_info_message("<< Plan mode ended <<".to_string(), None); + } + self.request_redraw(); + } + + fn on_exited_plan_mode_replay(&mut self, ev: ExitedPlanModeEvent) { + if ev.plan_output.is_some() { + self.add_info_message( + "<< Plan mode finished; send 'Proceed with the approved plan.' to continue >>" + .to_string(), + None, + ); + } else { + self.add_info_message("<< Plan mode ended >>".to_string(), None); + } + self.request_redraw(); + } + fn on_user_message_event(&mut self, event: UserMessageEvent) { let message = event.message.trim(); if !message.is_empty() { @@ -2073,7 +2465,12 @@ impl ChatWidget { } fn prefetch_rate_limits(&mut self) { - self.stop_rate_limit_poller(); + if self.rate_limit_poller.is_some() { + return; + } + if tokio::runtime::Handle::try_current().is_err() { + return; + } let Some(auth) = self.auth_manager.auth() else { return; @@ -2144,7 +2541,9 @@ impl ChatWidget { approval_policy: None, sandbox_policy: None, model: Some(switch_model.clone()), + plan_model: None, effort: Some(Some(default_effort)), + plan_effort: None, summary: None, })); tx.send(AppEvent::UpdateModel(switch_model.clone())); @@ -2206,7 +2605,23 @@ impl ChatWidget { /// Open a popup to choose a quick auto model. Selecting "All models" /// opens the full picker with every available preset. pub(crate) fn open_model_popup(&mut self) { - let current_model = self.model_family.get_model_slug().to_string(); + self.open_model_popup_for_target(crate::app_event::ModelPickerTarget::Chat); + } + + pub(crate) fn open_plan_model_popup(&mut self) { + self.open_model_popup_for_target(crate::app_event::ModelPickerTarget::Plan); + } + + fn open_model_popup_for_target(&mut self, target: crate::app_event::ModelPickerTarget) { + let chat_model = self.model_family.get_model_slug(); + let current_model = match target { + crate::app_event::ModelPickerTarget::Chat => chat_model.to_string(), + crate::app_event::ModelPickerTarget::Plan => self + .config + .plan_model + .clone() + .unwrap_or_else(|| chat_model.to_string()), + }; let presets: Vec = // todo(aibrahim): make this async function match self.models_manager.try_list_models(&self.config) { @@ -2232,7 +2647,7 @@ impl ChatWidget { .partition(|preset| Self::is_auto_model(&preset.model)); if auto_presets.is_empty() { - self.open_all_models_popup(other_presets); + self.open_all_models_popup(target, other_presets); return; } @@ -2245,6 +2660,7 @@ impl ChatWidget { (!preset.description.is_empty()).then_some(preset.description.clone()); let model = preset.model.clone(); let actions = Self::model_selection_actions( + target, model.clone(), Some(preset.default_reasoning_effort), ); @@ -2265,13 +2681,23 @@ impl ChatWidget { let actions: Vec = vec![Box::new(move |tx| { tx.send(AppEvent::OpenAllModelsPopup { models: all_models.clone(), + target, }); })]; let is_current = !items.iter().any(|item| item.is_current); - let description = Some(format!( - "Choose a specific model and reasoning level (current: {current_label})" - )); + let description = Some(match target { + crate::app_event::ModelPickerTarget::Chat => { + format!( + "Choose a specific model and reasoning level (current: {current_label})" + ) + } + crate::app_event::ModelPickerTarget::Plan => { + format!( + "Choose a specific model and reasoning level for /plan (current: {current_label})" + ) + } + }); items.push(SelectionItem { name: "All models".to_string(), @@ -2284,8 +2710,18 @@ impl ChatWidget { } self.bottom_pane.show_selection_view(SelectionViewParams { - title: Some("Select Model".to_string()), - subtitle: Some("Pick a quick auto mode or browse all models.".to_string()), + title: Some(match target { + crate::app_event::ModelPickerTarget::Chat => "Select Model".to_string(), + crate::app_event::ModelPickerTarget::Plan => "Select Plan Model".to_string(), + }), + subtitle: Some(match target { + crate::app_event::ModelPickerTarget::Chat => { + "Pick a quick auto mode or browse all models.".to_string() + } + crate::app_event::ModelPickerTarget::Plan => { + "Pick a quick auto mode or browse all models for /plan.".to_string() + } + }), footer_hint: Some(standard_popup_hint_line()), items, ..Default::default() @@ -2305,7 +2741,11 @@ impl ChatWidget { } } - pub(crate) fn open_all_models_popup(&mut self, presets: Vec) { + pub(crate) fn open_all_models_popup( + &mut self, + target: crate::app_event::ModelPickerTarget, + presets: Vec, + ) { if presets.is_empty() { self.add_info_message( "No additional models are available right now.".to_string(), @@ -2314,7 +2754,15 @@ impl ChatWidget { return; } - let current_model = self.model_family.get_model_slug().to_string(); + let chat_model = self.model_family.get_model_slug(); + let current_model = match target { + crate::app_event::ModelPickerTarget::Chat => chat_model.to_string(), + crate::app_event::ModelPickerTarget::Plan => self + .config + .plan_model + .clone() + .unwrap_or_else(|| chat_model.to_string()), + }; let mut items: Vec = Vec::new(); for preset in presets.into_iter() { let description = @@ -2326,6 +2774,7 @@ impl ChatWidget { let preset_for_event = preset_for_action.clone(); tx.send(AppEvent::OpenReasoningPopup { model: preset_for_event, + target, }); })]; items.push(SelectionItem { @@ -2340,7 +2789,12 @@ impl ChatWidget { } self.bottom_pane.show_selection_view(SelectionViewParams { - title: Some("Select Model and Effort".to_string()), + title: Some(match target { + crate::app_event::ModelPickerTarget::Chat => "Select Model and Effort".to_string(), + crate::app_event::ModelPickerTarget::Plan => { + "Select Plan Model and Effort".to_string() + } + }), subtitle: Some( "Access legacy models by running codex -m or in your config.toml" .to_string(), @@ -2352,6 +2806,7 @@ impl ChatWidget { } fn model_selection_actions( + target: crate::app_event::ModelPickerTarget, model_for_action: String, effort_for_action: Option, ) -> Vec { @@ -2359,30 +2814,63 @@ impl ChatWidget { let effort_label = effort_for_action .map(|effort| effort.to_string()) .unwrap_or_else(|| "default".to_string()); - tx.send(AppEvent::CodexOp(Op::OverrideTurnContext { - cwd: None, - approval_policy: None, - sandbox_policy: None, - model: Some(model_for_action.clone()), - effort: Some(effort_for_action), - summary: None, - })); - tx.send(AppEvent::UpdateModel(model_for_action.clone())); - tx.send(AppEvent::UpdateReasoningEffort(effort_for_action)); - tx.send(AppEvent::PersistModelSelection { - model: model_for_action.clone(), - effort: effort_for_action, - }); - tracing::info!( - "Selected model: {}, Selected effort: {}", - model_for_action, - effort_label - ); + match target { + crate::app_event::ModelPickerTarget::Chat => { + tx.send(AppEvent::CodexOp(Op::OverrideTurnContext { + cwd: None, + approval_policy: None, + sandbox_policy: None, + model: Some(model_for_action.clone()), + plan_model: None, + effort: Some(effort_for_action), + plan_effort: None, + summary: None, + })); + tx.send(AppEvent::UpdateModel(model_for_action.clone())); + tx.send(AppEvent::UpdateReasoningEffort(effort_for_action)); + tx.send(AppEvent::PersistModelSelection { + model: model_for_action.clone(), + effort: effort_for_action, + }); + tracing::info!( + "Selected model: {}, Selected effort: {}", + model_for_action, + effort_label + ); + } + crate::app_event::ModelPickerTarget::Plan => { + tx.send(AppEvent::CodexOp(Op::OverrideTurnContext { + cwd: None, + approval_policy: None, + sandbox_policy: None, + model: None, + plan_model: Some(model_for_action.clone()), + effort: None, + plan_effort: Some(effort_for_action), + summary: None, + })); + tx.send(AppEvent::UpdatePlanModel(model_for_action.clone())); + tx.send(AppEvent::UpdatePlanReasoningEffort(effort_for_action)); + tx.send(AppEvent::PersistPlanModelSelection { + model: model_for_action.clone(), + effort: effort_for_action, + }); + tracing::info!( + "Selected plan model: {}, Selected effort: {}", + model_for_action, + effort_label + ); + } + } })] } /// Open a popup to choose the reasoning effort (stage 2) for the given model. - pub(crate) fn open_reasoning_popup(&mut self, preset: ModelPreset) { + pub(crate) fn open_reasoning_popup( + &mut self, + target: crate::app_event::ModelPickerTarget, + preset: ModelPreset, + ) { let default_effort: ReasoningEffortConfig = preset.default_reasoning_effort; let supported = preset.supported_reasoning_efforts; @@ -2429,9 +2917,9 @@ impl ChatWidget { if choices.len() == 1 { if let Some(effort) = choices.first().and_then(|c| c.stored) { - self.apply_model_and_effort(preset.model, Some(effort)); + self.apply_model_and_effort(target, preset.model, Some(effort)); } else { - self.apply_model_and_effort(preset.model, None); + self.apply_model_and_effort(target, preset.model, None); } return; } @@ -2445,9 +2933,25 @@ impl ChatWidget { .or(Some(default_effort)); let model_slug = preset.model.to_string(); - let is_current_model = self.model_family.get_model_slug() == preset.model; + let chat_model = self.model_family.get_model_slug(); + let effective_current_model = match target { + crate::app_event::ModelPickerTarget::Chat => chat_model, + crate::app_event::ModelPickerTarget::Plan => { + self.config.plan_model.as_deref().unwrap_or(chat_model) + } + }; + let is_current_model = effective_current_model == preset.model; let highlight_choice = if is_current_model { - self.config.model_reasoning_effort + match target { + crate::app_event::ModelPickerTarget::Chat => self.config.model_reasoning_effort, + crate::app_event::ModelPickerTarget::Plan => { + if self.config.plan_model.as_deref() == Some(preset.model.as_str()) { + self.config.plan_model_reasoning_effort + } else { + self.config.model_reasoning_effort + } + } + } } else { default_choice }; @@ -2490,7 +2994,7 @@ impl ChatWidget { }; let model_for_action = model_slug.clone(); - let actions = Self::model_selection_actions(model_for_action, choice.stored); + let actions = Self::model_selection_actions(target, model_for_action, choice.stored); items.push(SelectionItem { name: effort_label, @@ -2528,30 +3032,68 @@ impl ChatWidget { } } - fn apply_model_and_effort(&self, model: String, effort: Option) { - self.app_event_tx - .send(AppEvent::CodexOp(Op::OverrideTurnContext { - cwd: None, - approval_policy: None, - sandbox_policy: None, - model: Some(model.clone()), - effort: Some(effort), - summary: None, - })); - self.app_event_tx.send(AppEvent::UpdateModel(model.clone())); - self.app_event_tx - .send(AppEvent::UpdateReasoningEffort(effort)); - self.app_event_tx.send(AppEvent::PersistModelSelection { - model: model.clone(), - effort, - }); - tracing::info!( - "Selected model: {}, Selected effort: {}", - model, - effort - .map(|e| e.to_string()) - .unwrap_or_else(|| "default".to_string()) - ); + fn apply_model_and_effort( + &self, + target: crate::app_event::ModelPickerTarget, + model: String, + effort: Option, + ) { + let effort_label = effort + .map(|e| e.to_string()) + .unwrap_or_else(|| "default".to_string()); + match target { + crate::app_event::ModelPickerTarget::Chat => { + self.app_event_tx + .send(AppEvent::CodexOp(Op::OverrideTurnContext { + cwd: None, + approval_policy: None, + sandbox_policy: None, + model: Some(model.clone()), + plan_model: None, + effort: Some(effort), + plan_effort: None, + summary: None, + })); + self.app_event_tx.send(AppEvent::UpdateModel(model.clone())); + self.app_event_tx + .send(AppEvent::UpdateReasoningEffort(effort)); + self.app_event_tx.send(AppEvent::PersistModelSelection { + model: model.clone(), + effort, + }); + tracing::info!( + "Selected model: {}, Selected effort: {}", + model, + effort_label + ); + } + crate::app_event::ModelPickerTarget::Plan => { + self.app_event_tx + .send(AppEvent::CodexOp(Op::OverrideTurnContext { + cwd: None, + approval_policy: None, + sandbox_policy: None, + model: None, + plan_model: Some(model.clone()), + effort: None, + plan_effort: Some(effort), + summary: None, + })); + self.app_event_tx + .send(AppEvent::UpdatePlanModel(model.clone())); + self.app_event_tx + .send(AppEvent::UpdatePlanReasoningEffort(effort)); + self.app_event_tx.send(AppEvent::PersistPlanModelSelection { + model: model.clone(), + effort, + }); + tracing::info!( + "Selected plan model: {}, Selected effort: {}", + model, + effort_label + ); + } + } } /// Open a popup to choose the approvals mode (ask for approval policy + sandbox policy). @@ -2642,7 +3184,9 @@ impl ChatWidget { approval_policy: Some(approval), sandbox_policy: Some(sandbox_clone.clone()), model: None, + plan_model: None, effort: None, + plan_effort: None, summary: None, })); tx.send(AppEvent::UpdateAskForApprovalPolicy(approval)); @@ -3001,12 +3545,22 @@ impl ChatWidget { self.config.model_reasoning_effort = effort; } + /// Set the plan reasoning effort in the widget's config copy. + pub(crate) fn set_plan_reasoning_effort(&mut self, effort: Option) { + self.config.plan_model_reasoning_effort = effort; + } + /// Set the model in the widget's config copy. pub(crate) fn set_model(&mut self, model: &str, model_family: ModelFamily) { self.session_header.set_model(model); self.model_family = model_family; } + /// Set the plan model in the widget's config copy. + pub(crate) fn set_plan_model(&mut self, model: &str) { + self.config.plan_model = Some(model.to_string()); + } + pub(crate) fn add_info_message(&mut self, message: String, hint: Option) { self.add_to_history(history_cell::new_info_event(message, hint)); self.request_redraw(); diff --git a/codex-rs/tui2/src/chatwidget/interrupts.rs b/codex-rs/tui2/src/chatwidget/interrupts.rs index dc1e683ea55..7a49547f181 100644 --- a/codex-rs/tui2/src/chatwidget/interrupts.rs +++ b/codex-rs/tui2/src/chatwidget/interrupts.rs @@ -1,12 +1,14 @@ use std::collections::VecDeque; use codex_core::protocol::ApplyPatchApprovalRequestEvent; +use codex_core::protocol::AskUserQuestionRequestEvent; use codex_core::protocol::ExecApprovalRequestEvent; use codex_core::protocol::ExecCommandBeginEvent; use codex_core::protocol::ExecCommandEndEvent; use codex_core::protocol::McpToolCallBeginEvent; use codex_core::protocol::McpToolCallEndEvent; use codex_core::protocol::PatchApplyEndEvent; +use codex_core::protocol::PlanApprovalRequestEvent; use codex_protocol::approvals::ElicitationRequestEvent; use super::ChatWidget; @@ -16,6 +18,8 @@ pub(crate) enum QueuedInterrupt { ExecApproval(String, ExecApprovalRequestEvent), ApplyPatchApproval(String, ApplyPatchApprovalRequestEvent), Elicitation(ElicitationRequestEvent), + AskUserQuestion(String, AskUserQuestionRequestEvent), + PlanApproval(String, PlanApprovalRequestEvent), ExecBegin(ExecCommandBeginEvent), ExecEnd(ExecCommandEndEvent), McpBegin(McpToolCallBeginEvent), @@ -57,6 +61,15 @@ impl InterruptManager { self.queue.push_back(QueuedInterrupt::Elicitation(ev)); } + pub(crate) fn push_ask_user_question(&mut self, id: String, ev: AskUserQuestionRequestEvent) { + self.queue + .push_back(QueuedInterrupt::AskUserQuestion(id, ev)); + } + + pub(crate) fn push_plan_approval(&mut self, id: String, ev: PlanApprovalRequestEvent) { + self.queue.push_back(QueuedInterrupt::PlanApproval(id, ev)); + } + pub(crate) fn push_exec_begin(&mut self, ev: ExecCommandBeginEvent) { self.queue.push_back(QueuedInterrupt::ExecBegin(ev)); } @@ -85,6 +98,12 @@ impl InterruptManager { chat.handle_apply_patch_approval_now(id, ev) } QueuedInterrupt::Elicitation(ev) => chat.handle_elicitation_request_now(ev), + QueuedInterrupt::AskUserQuestion(id, ev) => { + chat.handle_ask_user_question_request_now(id, ev) + } + QueuedInterrupt::PlanApproval(id, ev) => { + chat.handle_plan_approval_request_now(id, ev) + } QueuedInterrupt::ExecBegin(ev) => chat.handle_exec_begin_now(ev), QueuedInterrupt::ExecEnd(ev) => chat.handle_exec_end_now(ev), QueuedInterrupt::McpBegin(ev) => chat.handle_mcp_begin_now(ev), diff --git a/codex-rs/tui2/src/chatwidget/snapshots/codex_tui__chatwidget__tests__update_popup.snap b/codex-rs/tui2/src/chatwidget/snapshots/codex_tui__chatwidget__tests__update_popup.snap index 6a49cb253c4..cb7c29c9506 100644 --- a/codex-rs/tui2/src/chatwidget/snapshots/codex_tui__chatwidget__tests__update_popup.snap +++ b/codex-rs/tui2/src/chatwidget/snapshots/codex_tui__chatwidget__tests__update_popup.snap @@ -4,7 +4,7 @@ expression: terminal.backend().vt100().screen().contents() --- ✨ New version available! Would you like to update? - Full release notes: https://github.com/openai/codex/releases/latest + Full release notes: https://github.com/Ixe1/codexel/releases/latest › 1. Yes, update now diff --git a/codex-rs/tui2/src/chatwidget/tests.rs b/codex-rs/tui2/src/chatwidget/tests.rs index b90cc6e9695..2477466dd41 100644 --- a/codex-rs/tui2/src/chatwidget/tests.rs +++ b/codex-rs/tui2/src/chatwidget/tests.rs @@ -26,6 +26,7 @@ use codex_core::protocol::ExecCommandBeginEvent; use codex_core::protocol::ExecCommandEndEvent; use codex_core::protocol::ExecCommandSource; use codex_core::protocol::ExecPolicyAmendment; +use codex_core::protocol::ExitedPlanModeEvent; use codex_core::protocol::ExitedReviewModeEvent; use codex_core::protocol::FileChange; use codex_core::protocol::McpStartupStatus; @@ -33,6 +34,7 @@ use codex_core::protocol::McpStartupUpdateEvent; use codex_core::protocol::Op; use codex_core::protocol::PatchApplyBeginEvent; use codex_core::protocol::PatchApplyEndEvent; +use codex_core::protocol::PlanOutputEvent; use codex_core::protocol::RateLimitWindow; use codex_core::protocol::ReviewRequest; use codex_core::protocol::ReviewTarget; @@ -152,6 +154,90 @@ fn resumed_initial_messages_render_history() { ); } +#[test] +fn resumed_session_does_not_start_rate_limit_poller_until_input() { + let (mut chat, _rx, _ops) = make_chatwidget_manual(None); + set_chatgpt_auth(&mut chat); + + let conversation_id = ConversationId::new(); + let rollout_file = NamedTempFile::new().unwrap(); + let configured = codex_core::protocol::SessionConfiguredEvent { + session_id: conversation_id, + model: "test-model".to_string(), + model_provider_id: "test-provider".to_string(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::ReadOnly, + cwd: PathBuf::from("/home/user/project"), + reasoning_effort: Some(ReasoningEffortConfig::default()), + history_log_id: 0, + history_entry_count: 0, + initial_messages: None, + rollout_path: rollout_file.path().to_path_buf(), + }; + + chat.handle_codex_event(Event { + id: "initial".into(), + msg: EventMsg::SessionConfigured(configured), + }); + + assert!( + chat.rate_limit_poller.is_none(), + "expected no rate limit polling until user input" + ); +} + +#[test] +fn resumed_session_does_not_auto_execute_plan() { + let (mut chat, _rx, mut op_rx) = make_chatwidget_manual(None); + set_chatgpt_auth(&mut chat); + + let conversation_id = ConversationId::new(); + let rollout_file = NamedTempFile::new().unwrap(); + let configured = codex_core::protocol::SessionConfiguredEvent { + session_id: conversation_id, + model: "test-model".to_string(), + model_provider_id: "test-provider".to_string(), + approval_policy: AskForApproval::Never, + sandbox_policy: SandboxPolicy::ReadOnly, + cwd: PathBuf::from("/home/user/project"), + reasoning_effort: Some(ReasoningEffortConfig::default()), + history_log_id: 0, + history_entry_count: 0, + initial_messages: Some(vec![EventMsg::ExitedPlanMode(ExitedPlanModeEvent { + plan_output: Some(PlanOutputEvent { + title: "Example".to_string(), + summary: "Summary".to_string(), + plan: UpdatePlanArgs { + explanation: None, + plan: vec![PlanItemArg { + step: "Step 1".to_string(), + status: StepStatus::Pending, + }], + }, + }), + })]), + rollout_path: rollout_file.path().to_path_buf(), + }; + + chat.handle_codex_event(Event { + id: "initial".into(), + msg: EventMsg::SessionConfigured(configured), + }); + + let mut saw_user_turn = false; + while let Ok(op) = op_rx.try_recv() { + if matches!(op, Op::UserTurn { .. } | Op::UserInput { .. }) { + saw_user_turn = true; + break; + } + } + + assert!( + !saw_user_turn, + "expected no auto-execute user turn after resume replay" + ); +} + /// Entering review mode uses the hint provided by the review request. #[test] fn entered_review_mode_uses_request_hint() { @@ -380,6 +466,7 @@ fn make_chatwidget_manual( token_info: None, rate_limit_snapshot: None, plan_type: None, + last_plan_update_key: None, rate_limit_warnings: RateLimitWarningState::default(), rate_limit_switch_prompt: RateLimitSwitchPromptState::default(), rate_limit_poller: None, @@ -392,8 +479,9 @@ fn make_chatwidget_manual( interrupts: InterruptManager::new(), reasoning_buffer: String::new(), full_reasoning_buffer: String::new(), - current_status_header: String::from("Working"), + current_status_header: String::from("Ready"), retry_status_header: None, + plan_variants_progress: None, conversation_id: None, frame_requester: FrameRequester::test_dummy(), show_welcome_banner: true, @@ -1848,7 +1936,7 @@ fn model_reasoning_selection_popup_snapshot() { chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::High); let preset = get_available_model(&chat, "gpt-5.1-codex-max"); - chat.open_reasoning_popup(preset); + chat.open_reasoning_popup(crate::app_event::ModelPickerTarget::Chat, preset); let popup = render_bottom_popup(&chat, 80); assert_snapshot!("model_reasoning_selection_popup", popup); @@ -1862,7 +1950,7 @@ fn model_reasoning_selection_popup_extra_high_warning_snapshot() { chat.config.model_reasoning_effort = Some(ReasoningEffortConfig::XHigh); let preset = get_available_model(&chat, "gpt-5.1-codex-max"); - chat.open_reasoning_popup(preset); + chat.open_reasoning_popup(crate::app_event::ModelPickerTarget::Chat, preset); let popup = render_bottom_popup(&chat, 80); assert_snapshot!("model_reasoning_selection_popup_extra_high_warning", popup); @@ -1875,7 +1963,7 @@ fn reasoning_popup_shows_extra_high_with_space() { set_chatgpt_auth(&mut chat); let preset = get_available_model(&chat, "gpt-5.1-codex-max"); - chat.open_reasoning_popup(preset); + chat.open_reasoning_popup(crate::app_event::ModelPickerTarget::Chat, preset); let popup = render_bottom_popup(&chat, 120); assert!( @@ -1908,7 +1996,7 @@ fn single_reasoning_option_skips_selection() { show_in_picker: true, supported_in_api: true, }; - chat.open_reasoning_popup(preset); + chat.open_reasoning_popup(crate::app_event::ModelPickerTarget::Chat, preset); let popup = render_bottom_popup(&chat, 80); assert!( @@ -1957,7 +2045,7 @@ fn reasoning_popup_escape_returns_to_model_popup() { chat.open_model_popup(); let preset = get_available_model(&chat, "gpt-5.1-codex-max"); - chat.open_reasoning_popup(preset); + chat.open_reasoning_popup(crate::app_event::ModelPickerTarget::Chat, preset); let before_escape = render_bottom_popup(&chat, 80); assert!(before_escape.contains("Select Reasoning Level")); @@ -2884,6 +2972,36 @@ fn plan_update_renders_history_cell() { assert!(blob.contains("Write tests")); } +#[test] +fn plan_update_dedupes_identical_updates() { + let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None); + let update = UpdatePlanArgs { + explanation: Some("Updating plan".to_string()), + plan: vec![ + PlanItemArg { + step: "Explore codebase".into(), + status: StepStatus::Completed, + }, + PlanItemArg { + step: "Implement feature".into(), + status: StepStatus::InProgress, + }, + ], + }; + + chat.handle_codex_event(Event { + id: "sub-1".into(), + msg: EventMsg::PlanUpdate(update.clone()), + }); + chat.handle_codex_event(Event { + id: "sub-1".into(), + msg: EventMsg::PlanUpdate(update), + }); + + let cells = drain_insert_history(&mut rx); + assert_eq!(cells.len(), 1, "expected a single plan update cell"); +} + #[test] fn stream_error_updates_status_indicator() { let (mut chat, mut rx, _op_rx) = make_chatwidget_manual(None); diff --git a/codex-rs/tui2/src/cli.rs b/codex-rs/tui2/src/cli.rs index b0daa447701..50b806a5ece 100644 --- a/codex-rs/tui2/src/cli.rs +++ b/codex-rs/tui2/src/cli.rs @@ -15,8 +15,8 @@ pub struct Cli { #[arg(long = "image", short = 'i', value_name = "FILE", value_delimiter = ',', num_args = 1..)] pub images: Vec, - // Internal controls set by the top-level `codex resume` subcommand. - // These are not exposed as user flags on the base `codex` command. + // Internal controls set by the top-level `codexel resume` subcommand. + // These are not exposed as user flags on the base `codexel` command. #[clap(skip)] pub resume_picker: bool, @@ -24,7 +24,7 @@ pub struct Cli { pub resume_last: bool, /// Internal: resume a specific recorded session by id (UUID). Set by the - /// top-level `codex resume ` wrapper; not exposed as a public flag. + /// top-level `codexel resume ` wrapper; not exposed as a public flag. #[clap(skip)] pub resume_session_id: Option, diff --git a/codex-rs/tui2/src/history_cell.rs b/codex-rs/tui2/src/history_cell.rs index 5440040f6b5..6862557a7ac 100644 --- a/codex-rs/tui2/src/history_cell.rs +++ b/codex-rs/tui2/src/history_cell.rs @@ -307,7 +307,7 @@ impl HistoryCell for UpdateAvailableHistoryCell { } else { line![ "See ", - "https://github.com/openai/codex".cyan().underlined(), + "https://github.com/Ixe1/codexel".cyan().underlined(), " for installation options." ] }; @@ -322,7 +322,7 @@ impl HistoryCell for UpdateAvailableHistoryCell { update_instruction, "", "See full release notes:", - "https://github.com/openai/codex/releases/latest" + "https://github.com/Ixe1/codexel/releases/latest" .cyan() .underlined(), ]; @@ -767,10 +767,10 @@ impl HistoryCell for SessionHeaderHistoryCell { let make_row = |spans: Vec>| Line::from(spans); - // Title line rendered inside the box: ">_ OpenAI Codex (vX)" + // Title line rendered inside the box: ">_ Codexel (vX)" let title_spans: Vec> = vec![ Span::from(">_ ").dim(), - Span::from("OpenAI Codex").bold(), + Span::from("Codexel").bold(), Span::from(" ").dim(), Span::from(format!("(v{})", self.version)).dim(), ]; @@ -1021,9 +1021,9 @@ pub(crate) fn new_active_mcp_tool_call( McpToolCallCell::new(call_id, invocation, animations_enabled) } -pub(crate) fn new_web_search_call(query: String) -> PlainHistoryCell { - let lines: Vec> = vec![Line::from(vec![padded_emoji("🌐").into(), query.into()])]; - PlainHistoryCell { lines } +pub(crate) fn new_web_search_call(query: String) -> PrefixedWrappedHistoryCell { + let text: Text<'static> = Line::from(vec!["Searched".bold(), " ".into(), query.into()]).into(); + PrefixedWrappedHistoryCell::new(text, "• ".dim(), " ") } /// If the first content is an image, return a new cell with the image. @@ -1113,7 +1113,8 @@ pub(crate) fn empty_mcp_output() -> PlainHistoryCell { " • No MCP servers configured.".italic().into(), Line::from(vec![ " See the ".into(), - "\u{1b}]8;;https://github.com/openai/codex/blob/main/docs/config.md#mcp_servers\u{7}MCP docs\u{1b}]8;;\u{7}".underlined(), + "\u{1b}]8;;https://github.com/Ixe1/codexel/blob/main/docs/config.md#mcp_servers\u{7}MCP docs\u{1b}]8;;\u{7}" + .underlined(), " to configure them.".into(), ]) .style(Style::default().add_modifier(Modifier::DIM)), @@ -1673,6 +1674,50 @@ mod tests { ); } + #[test] + fn web_search_history_cell_snapshot() { + let cell = new_web_search_call( + "example search query with several generic words to exercise wrapping".to_string(), + ); + let rendered = render_lines(&cell.display_lines(64)).join("\n"); + + insta::assert_snapshot!(rendered); + } + + #[test] + fn web_search_history_cell_wraps_with_indented_continuation() { + let cell = new_web_search_call( + "example search query with several generic words to exercise wrapping".to_string(), + ); + let rendered = render_lines(&cell.display_lines(64)); + + assert_eq!( + rendered, + vec![ + "• Searched example search query with several generic words to".to_string(), + " exercise wrapping".to_string(), + ] + ); + } + + #[test] + fn web_search_history_cell_short_query_does_not_wrap() { + let cell = new_web_search_call("short query".to_string()); + let rendered = render_lines(&cell.display_lines(64)); + + assert_eq!(rendered, vec!["• Searched short query".to_string()]); + } + + #[test] + fn web_search_history_cell_transcript_snapshot() { + let cell = new_web_search_call( + "example search query with several generic words to exercise wrapping".to_string(), + ); + let rendered = render_lines(&cell.transcript_lines(64)).join("\n"); + + insta::assert_snapshot!(rendered); + } + #[test] fn active_mcp_tool_call_snapshot() { let invocation = McpInvocation { diff --git a/codex-rs/tui2/src/lib.rs b/codex-rs/tui2/src/lib.rs index a9b34c495cf..cb3ac87c787 100644 --- a/codex-rs/tui2/src/lib.rs +++ b/codex-rs/tui2/src/lib.rs @@ -247,7 +247,7 @@ pub async fn run_main( // Ensure the file is only readable and writable by the current user. // Doing the equivalent to `chmod 600` on Windows is quite a bit more code // and requires the Windows API crates, so we can reconsider that when - // Codex CLI is officially supported on Windows. + // Codexel is officially supported on Windows. #[cfg(unix)] { use std::os::unix::fs::OpenOptionsExt; @@ -437,7 +437,7 @@ async fn run_ratatui_app( let _ = tui.terminal.clear(); if let Err(err) = writeln!( std::io::stdout(), - "No saved session found with ID {id_str}. Run `codex resume` without an ID to choose from existing sessions." + "No saved session found with ID {id_str}. Run `codexel resume` without an ID to choose from existing sessions." ) { error!("Failed to write resume error message: {err}"); } diff --git a/codex-rs/tui2/src/onboarding/auth.rs b/codex-rs/tui2/src/onboarding/auth.rs index 6307e6e7dc9..5cdf925b14d 100644 --- a/codex-rs/tui2/src/onboarding/auth.rs +++ b/codex-rs/tui2/src/onboarding/auth.rs @@ -296,7 +296,8 @@ impl AuthModeWidget { " Decide how much autonomy you want to grant Codex".into(), Line::from(vec![ " For more details see the ".into(), - "\u{1b}]8;;https://github.com/openai/codex\u{7}Codex docs\u{1b}]8;;\u{7}".underlined(), + "\u{1b}]8;;https://github.com/Ixe1/codexel\u{7}Codexel docs\u{1b}]8;;\u{7}" + .underlined(), ]) .dim(), "".into(), diff --git a/codex-rs/tui2/src/slash_command.rs b/codex-rs/tui2/src/slash_command.rs index e0c676812c8..df759a40ccd 100644 --- a/codex-rs/tui2/src/slash_command.rs +++ b/codex-rs/tui2/src/slash_command.rs @@ -13,9 +13,11 @@ pub enum SlashCommand { // DO NOT ALPHA-SORT! Enum order is presentation order in the popup, so // more frequently used commands should be listed first. Model, + PlanModel, Approvals, Skills, Review, + Plan, New, Resume, Init, @@ -42,6 +44,7 @@ impl SlashCommand { SlashCommand::Init => "create an AGENTS.md file with instructions for Codex", SlashCommand::Compact => "summarize conversation to prevent hitting the context limit", SlashCommand::Review => "review my current changes and find issues", + SlashCommand::Plan => "plan a task before making changes", SlashCommand::Resume => "resume a saved chat", SlashCommand::Undo => "ask Codex to undo a turn", SlashCommand::Quit | SlashCommand::Exit => "exit Codex", @@ -50,6 +53,7 @@ impl SlashCommand { SlashCommand::Skills => "use skills to improve how Codex performs specific tasks", SlashCommand::Status => "show current session configuration and token usage", SlashCommand::Model => "choose what model and reasoning effort to use", + SlashCommand::PlanModel => "choose what model and reasoning effort to use for /plan", SlashCommand::Approvals => "choose what Codex can do without approval", SlashCommand::Mcp => "list configured MCP tools", SlashCommand::Logout => "log out of Codex", @@ -73,8 +77,10 @@ impl SlashCommand { | SlashCommand::Compact | SlashCommand::Undo | SlashCommand::Model + | SlashCommand::PlanModel | SlashCommand::Approvals | SlashCommand::Review + | SlashCommand::Plan | SlashCommand::Logout => false, SlashCommand::Diff | SlashCommand::Mention diff --git a/codex-rs/tui2/src/snapshots/codex_tui2__history_cell__tests__web_search_history_cell_snapshot.snap b/codex-rs/tui2/src/snapshots/codex_tui2__history_cell__tests__web_search_history_cell_snapshot.snap new file mode 100644 index 00000000000..5b365e31781 --- /dev/null +++ b/codex-rs/tui2/src/snapshots/codex_tui2__history_cell__tests__web_search_history_cell_snapshot.snap @@ -0,0 +1,6 @@ +--- +source: tui2/src/history_cell.rs +expression: rendered +--- +• Searched example search query with several generic words to + exercise wrapping diff --git a/codex-rs/tui2/src/snapshots/codex_tui2__history_cell__tests__web_search_history_cell_transcript_snapshot.snap b/codex-rs/tui2/src/snapshots/codex_tui2__history_cell__tests__web_search_history_cell_transcript_snapshot.snap new file mode 100644 index 00000000000..5b365e31781 --- /dev/null +++ b/codex-rs/tui2/src/snapshots/codex_tui2__history_cell__tests__web_search_history_cell_transcript_snapshot.snap @@ -0,0 +1,6 @@ +--- +source: tui2/src/history_cell.rs +expression: rendered +--- +• Searched example search query with several generic words to + exercise wrapping diff --git a/codex-rs/tui2/src/snapshots/codex_tui__diff_render__tests__single_line_replacement_counts.snap b/codex-rs/tui2/src/snapshots/codex_tui__diff_render__tests__single_line_replacement_counts.snap index d4a7bd8bf37..40942a71b38 100644 --- a/codex-rs/tui2/src/snapshots/codex_tui__diff_render__tests__single_line_replacement_counts.snap +++ b/codex-rs/tui2/src/snapshots/codex_tui__diff_render__tests__single_line_replacement_counts.snap @@ -4,8 +4,8 @@ assertion_line: 765 expression: terminal.backend() --- "• Proposed Change README.md (+1 -1) " -" 1 -# Codex CLI (Rust Implementation) " -" 1 +# Codex CLI (Rust Implementation) banana " +" 1 -# Codexel (Rust Implementation) " +" 1 +# Codexel (Rust Implementation) banana " " " " " " " diff --git a/codex-rs/tui2/src/snapshots/codex_tui__update_prompt__tests__update_prompt_modal.snap b/codex-rs/tui2/src/snapshots/codex_tui__update_prompt__tests__update_prompt_modal.snap index 24d8831c956..0d0182552a6 100644 --- a/codex-rs/tui2/src/snapshots/codex_tui__update_prompt__tests__update_prompt_modal.snap +++ b/codex-rs/tui2/src/snapshots/codex_tui__update_prompt__tests__update_prompt_modal.snap @@ -4,9 +4,9 @@ expression: terminal.backend() --- ✨ Update available! 0.0.0 -> 9.9.9 - Release notes: https://github.com/openai/codex/releases/latest + Release notes: https://github.com/Ixe1/codexel/releases/latest -› 1. Update now (runs `npm install -g @openai/codex@latest`) +› 1. Update now (runs `brew upgrade --cask codexel`) 2. Skip 3. Skip until next version diff --git a/codex-rs/tui2/src/status/card.rs b/codex-rs/tui2/src/status/card.rs index aac981c764e..2ba3bbdd7a1 100644 --- a/codex-rs/tui2/src/status/card.rs +++ b/codex-rs/tui2/src/status/card.rs @@ -300,7 +300,7 @@ impl HistoryCell for StatusHistoryCell { let mut lines: Vec> = Vec::new(); lines.push(Line::from(vec![ Span::from(format!("{}>_ ", FieldFormatter::INDENT)).dim(), - Span::from("OpenAI Codex").bold(), + Span::from("Codexel").bold(), Span::from(" ").dim(), Span::from(format!("(v{CODEX_CLI_VERSION})")).dim(), ])); @@ -319,7 +319,7 @@ impl HistoryCell for StatusHistoryCell { (None, None) => "ChatGPT".to_string(), }, StatusAccountDisplay::ApiKey => { - "API key configured (run codex login to use ChatGPT)".to_string() + "API key configured (run codexel login to use ChatGPT)".to_string() } }); diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap index 5c805561461..427d0313119 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭─────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_credits_and_limits.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_credits_and_limits.snap index 7a914837399..d1854d6d55c 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_credits_and_limits.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_credits_and_limits.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_monthly_limit.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_monthly_limit.snap index 61701111155..eac859dcf40 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_monthly_limit.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_monthly_limit.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭────────────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_reasoning_details.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_reasoning_details.snap index 1e88139cc43..a13b5ecf559 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_reasoning_details.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_includes_reasoning_details.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_empty_limits_message.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_empty_limits_message.snap index ac824827e3a..35bb4febb26 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_empty_limits_message.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_empty_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_missing_limits_message.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_missing_limits_message.snap index ac824827e3a..35bb4febb26 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_missing_limits_message.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_missing_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_stale_limits_message.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_stale_limits_message.snap index ffdb825bac6..82403d290e7 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_stale_limits_message.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_shows_stale_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_truncates_in_narrow_terminal.snap b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_truncates_in_narrow_terminal.snap index 1762b1b715f..6e9e8158717 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_truncates_in_narrow_terminal.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui2__status__tests__status_snapshot_truncates_in_narrow_terminal.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.1.2) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap index dbb634bab1c..932d4fa901a 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_cached_limits_hide_credits_without_flag.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭─────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.0.0) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ @@ -22,3 +22,4 @@ expression: sanitized │ Weekly limit: [█████████████░░░░░░░] 65% left (resets 11:52) │ │ Warning: limits may be stale - start new turn to refresh. │ ╰─────────────────────────────────────────────────────────────────────╯ + diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_credits_and_limits.snap b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_credits_and_limits.snap index 1707a4c5fbc..3c6837834ba 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_credits_and_limits.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_credits_and_limits.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.0.0) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ @@ -22,3 +22,4 @@ expression: sanitized │ Weekly limit: [██████████████░░░░░░] 70% left (resets 09:55) │ │ Credits: 38 credits │ ╰───────────────────────────────────────────────────────────────────╯ + diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap index 3ecc4fa8ed2..37470165e34 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_monthly_limit.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭────────────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.0.0) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ @@ -20,3 +20,4 @@ expression: sanitized │ Context window: 100% left (1.2K used / 272K) │ │ Monthly limit: [██████████████████░░] 88% left (resets 07:08 on 7 May) │ ╰────────────────────────────────────────────────────────────────────────────╯ + diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap index c22577407ee..964409e7ace 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_includes_reasoning_details.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.0.0) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ @@ -21,3 +21,4 @@ expression: sanitized │ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ │ Weekly limit: [███████████░░░░░░░░░] 55% left (resets 03:24) │ ╰───────────────────────────────────────────────────────────────────────────╯ + diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap index f0e6b734454..9af9033b633 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_empty_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.0.0) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ @@ -20,3 +20,4 @@ expression: sanitized │ Context window: 100% left (750 used / 272K) │ │ Limits: data not available yet │ ╰───────────────────────────────────────────────────────────────────────╯ + diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap index f0e6b734454..9af9033b633 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_missing_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.0.0) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ @@ -20,3 +20,4 @@ expression: sanitized │ Context window: 100% left (750 used / 272K) │ │ Limits: data not available yet │ ╰───────────────────────────────────────────────────────────────────────╯ + diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap index a12be950bcc..b173cfe7438 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_shows_stale_limits_message.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭───────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.0.0) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ @@ -22,3 +22,4 @@ expression: sanitized │ Weekly limit: [████████████░░░░░░░░] 60% left (resets 03:34) │ │ Warning: limits may be stale - start new turn to refresh. │ ╰───────────────────────────────────────────────────────────────────────╯ + diff --git a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap index 02ba1adec91..792905520c5 100644 --- a/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap +++ b/codex-rs/tui2/src/status/snapshots/codex_tui__status__tests__status_snapshot_truncates_in_narrow_terminal.snap @@ -5,7 +5,7 @@ expression: sanitized /status ╭────────────────────────────────────────────────────────────────────╮ -│ >_ OpenAI Codex (v0.0.0) │ +│ >_ Codexel (v0.0.0) │ │ │ │ Visit https://chatgpt.com/codex/settings/usage for up-to-date │ │ information on rate limits and credits │ @@ -20,3 +20,4 @@ expression: sanitized │ Context window: 100% left (2.25K used / 272K) │ │ 5h limit: [██████░░░░░░░░░░░░░░] 28% left (resets 03:14) │ ╰────────────────────────────────────────────────────────────────────╯ + diff --git a/codex-rs/tui2/src/status_indicator_widget.rs b/codex-rs/tui2/src/status_indicator_widget.rs index 642b9ca2b71..f391b267ac9 100644 --- a/codex-rs/tui2/src/status_indicator_widget.rs +++ b/codex-rs/tui2/src/status_indicator_widget.rs @@ -23,6 +23,7 @@ use crate::tui::FrameRequester; pub(crate) struct StatusIndicatorWidget { /// Animated header text (defaults to "Working"). header: String, + detail_lines: Vec>, show_interrupt_hint: bool, elapsed_running: Duration, @@ -58,6 +59,7 @@ impl StatusIndicatorWidget { ) -> Self { Self { header: String::from("Working"), + detail_lines: Vec::new(), show_interrupt_hint: true, elapsed_running: Duration::ZERO, last_resume_at: Instant::now(), @@ -78,6 +80,14 @@ impl StatusIndicatorWidget { self.header = header; } + pub(crate) fn set_detail_lines(&mut self, lines: Vec>) { + self.detail_lines = lines; + } + + pub(crate) fn clear_detail_lines(&mut self) { + self.detail_lines.clear(); + } + #[cfg(test)] pub(crate) fn header(&self) -> &str { &self.header @@ -136,7 +146,7 @@ impl StatusIndicatorWidget { impl Renderable for StatusIndicatorWidget { fn desired_height(&self, _width: u16) -> u16 { - 1 + 1u16.saturating_add(self.detail_lines.len().try_into().unwrap_or(u16::MAX)) } fn render(&self, area: Rect, buf: &mut Buffer) { @@ -170,7 +180,23 @@ impl Renderable for StatusIndicatorWidget { spans.push(format!("({pretty_elapsed})").dim()); } - Line::from(spans).render_ref(area, buf); + let mut row = area; + row.height = 1; + Line::from(spans).render_ref(row, buf); + + for (idx, line) in self.detail_lines.iter().enumerate() { + let y = area.y.saturating_add((idx as u16).saturating_add(1)); + if y >= area.y.saturating_add(area.height) { + break; + } + let detail_area = Rect { + x: area.x, + y, + width: area.width, + height: 1, + }; + line.render_ref(detail_area, buf); + } } } diff --git a/codex-rs/tui2/src/update_action.rs b/codex-rs/tui2/src/update_action.rs index e2cb7b67334..e29e0f80f2f 100644 --- a/codex-rs/tui2/src/update_action.rs +++ b/codex-rs/tui2/src/update_action.rs @@ -1,23 +1,24 @@ +#[cfg(not(debug_assertions))] +const CODEX_MANAGED_BY_NPM_ENV_VAR: &str = "CODEX_MANAGED_BY_NPM"; +#[cfg(not(debug_assertions))] +const CODEX_MANAGED_BY_BUN_ENV_VAR: &str = "CODEX_MANAGED_BY_BUN"; + /// Update action the CLI should perform after the TUI exits. #[derive(Debug, Clone, Copy, PartialEq, Eq)] pub enum UpdateAction { - /// Update via `npm install -g @openai/codex@latest`. - NpmGlobalLatest, - /// Update via `bun install -g @openai/codex@latest`. - BunGlobalLatest, - /// Update via `brew upgrade codex`. + /// Update via `npm install -g @ixe1/codexel@latest`. + NpmUpgrade, + /// Update via `bun install -g @ixe1/codexel@latest`. + BunUpgrade, + /// Update via `brew upgrade --cask codexel`. BrewUpgrade, } impl From for codex_tui::update_action::UpdateAction { fn from(action: UpdateAction) -> Self { match action { - UpdateAction::NpmGlobalLatest => { - codex_tui::update_action::UpdateAction::NpmGlobalLatest - } - UpdateAction::BunGlobalLatest => { - codex_tui::update_action::UpdateAction::BunGlobalLatest - } + UpdateAction::NpmUpgrade => codex_tui::update_action::UpdateAction::NpmUpgrade, + UpdateAction::BunUpgrade => codex_tui::update_action::UpdateAction::BunUpgrade, UpdateAction::BrewUpgrade => codex_tui::update_action::UpdateAction::BrewUpgrade, } } @@ -27,9 +28,9 @@ impl UpdateAction { /// Returns the list of command-line arguments for invoking the update. pub fn command_args(self) -> (&'static str, &'static [&'static str]) { match self { - UpdateAction::NpmGlobalLatest => ("npm", &["install", "-g", "@openai/codex"]), - UpdateAction::BunGlobalLatest => ("bun", &["install", "-g", "@openai/codex"]), - UpdateAction::BrewUpgrade => ("brew", &["upgrade", "codex"]), + UpdateAction::NpmUpgrade => ("npm", &["install", "-g", "@ixe1/codexel@latest"]), + UpdateAction::BunUpgrade => ("bun", &["install", "-g", "@ixe1/codexel@latest"]), + UpdateAction::BrewUpgrade => ("brew", &["upgrade", "--cask", "codexel"]), } } @@ -44,29 +45,20 @@ impl UpdateAction { #[cfg(not(debug_assertions))] pub(crate) fn get_update_action() -> Option { let exe = std::env::current_exe().unwrap_or_default(); - let managed_by_npm = std::env::var_os("CODEX_MANAGED_BY_NPM").is_some(); - let managed_by_bun = std::env::var_os("CODEX_MANAGED_BY_BUN").is_some(); - detect_update_action( - cfg!(target_os = "macos"), - &exe, - managed_by_npm, - managed_by_bun, - ) + detect_update_action(cfg!(target_os = "macos"), &exe, ManagedBy::from_env()) } #[cfg(any(not(debug_assertions), test))] fn detect_update_action( is_macos: bool, current_exe: &std::path::Path, - managed_by_npm: bool, - managed_by_bun: bool, + managed_by: Option, ) -> Option { - if managed_by_npm { - Some(UpdateAction::NpmGlobalLatest) - } else if managed_by_bun { - Some(UpdateAction::BunGlobalLatest) - } else if is_macos + if let Some(managed_by) = managed_by { + return Some(managed_by.to_update_action()); + } + if is_macos && (current_exe.starts_with("/opt/homebrew") || current_exe.starts_with("/usr/local")) { Some(UpdateAction::BrewUpgrade) @@ -75,6 +67,34 @@ fn detect_update_action( } } +#[cfg(any(not(debug_assertions), test))] +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +enum ManagedBy { + Npm, + Bun, +} + +#[cfg(any(not(debug_assertions), test))] +impl ManagedBy { + #[cfg(not(debug_assertions))] + fn from_env() -> Option { + if std::env::var_os(CODEX_MANAGED_BY_BUN_ENV_VAR).is_some() { + return Some(Self::Bun); + } + if std::env::var_os(CODEX_MANAGED_BY_NPM_ENV_VAR).is_some() { + return Some(Self::Npm); + } + None + } + + fn to_update_action(self) -> UpdateAction { + match self { + ManagedBy::Npm => UpdateAction::NpmUpgrade, + ManagedBy::Bun => UpdateAction::BunUpgrade, + } + } +} + #[cfg(test)] mod tests { use super::*; @@ -82,34 +102,48 @@ mod tests { #[test] fn detects_update_action_without_env_mutation() { assert_eq!( - detect_update_action(false, std::path::Path::new("/any/path"), false, false), + detect_update_action(false, std::path::Path::new("/any/path"), None), None ); assert_eq!( - detect_update_action(false, std::path::Path::new("/any/path"), true, false), - Some(UpdateAction::NpmGlobalLatest) + detect_update_action( + true, + std::path::Path::new("/opt/homebrew/bin/codexel"), + None + ), + Some(UpdateAction::BrewUpgrade) ); assert_eq!( - detect_update_action(false, std::path::Path::new("/any/path"), false, true), - Some(UpdateAction::BunGlobalLatest) + detect_update_action(true, std::path::Path::new("/usr/local/bin/codexel"), None), + Some(UpdateAction::BrewUpgrade) + ); + } + + #[test] + fn detects_update_action_from_package_manager() { + assert_eq!( + detect_update_action( + false, + std::path::Path::new("/any/path"), + Some(ManagedBy::Npm) + ), + Some(UpdateAction::NpmUpgrade) ); assert_eq!( detect_update_action( - true, - std::path::Path::new("/opt/homebrew/bin/codex"), false, - false + std::path::Path::new("/any/path"), + Some(ManagedBy::Bun) ), - Some(UpdateAction::BrewUpgrade) + Some(UpdateAction::BunUpgrade) ); assert_eq!( detect_update_action( true, - std::path::Path::new("/usr/local/bin/codex"), - false, - false + std::path::Path::new("/opt/homebrew/bin/codexel"), + Some(ManagedBy::Npm) ), - Some(UpdateAction::BrewUpgrade) + Some(UpdateAction::NpmUpgrade) ); } } diff --git a/codex-rs/tui2/src/update_prompt.rs b/codex-rs/tui2/src/update_prompt.rs index 822b0f24d55..3d8a73d5b0c 100644 --- a/codex-rs/tui2/src/update_prompt.rs +++ b/codex-rs/tui2/src/update_prompt.rs @@ -205,7 +205,7 @@ impl WidgetRef for &UpdatePromptScreen { column.push( Line::from(vec![ "Release notes: ".dim(), - "https://github.com/openai/codex/releases/latest" + "https://github.com/Ixe1/codexel/releases/latest" .dim() .underlined(), ]) @@ -254,7 +254,7 @@ mod tests { UpdatePromptScreen::new( FrameRequester::test_dummy(), "9.9.9".into(), - UpdateAction::NpmGlobalLatest, + UpdateAction::BrewUpgrade, ) } diff --git a/codex-rs/tui2/src/updates.rs b/codex-rs/tui2/src/updates.rs index 89fd6f32f63..361b2cc024b 100644 --- a/codex-rs/tui2/src/updates.rs +++ b/codex-rs/tui2/src/updates.rs @@ -57,8 +57,8 @@ struct VersionInfo { const VERSION_FILENAME: &str = "version.json"; // We use the latest version from the cask if installation is via homebrew - homebrew does not immediately pick up the latest release and can lag behind. const HOMEBREW_CASK_URL: &str = - "https://raw.githubusercontent.com/Homebrew/homebrew-cask/HEAD/Casks/c/codex.rb"; -const LATEST_RELEASE_URL: &str = "https://api.github.com/repos/openai/codex/releases/latest"; + "https://raw.githubusercontent.com/Homebrew/homebrew-cask/HEAD/Casks/c/codexel.rb"; +const LATEST_RELEASE_URL: &str = "https://api.github.com/repos/Ixe1/codexel/releases/latest"; #[derive(Deserialize, Debug, Clone)] struct ReleaseInfo { @@ -192,7 +192,7 @@ mod tests { #[test] fn parses_version_from_cask_contents() { let cask = r#" - cask "codex" do + cask "codexel" do version "0.55.0" end "#; diff --git a/codex-rs/tui2/src/version.rs b/codex-rs/tui2/src/version.rs index 8c8d108dc61..bdbfedf1794 100644 --- a/codex-rs/tui2/src/version.rs +++ b/codex-rs/tui2/src/version.rs @@ -1,2 +1,2 @@ -/// The current Codex CLI version as embedded at compile time. +/// The current Codexel version as embedded at compile time. pub const CODEX_CLI_VERSION: &str = env!("CARGO_PKG_VERSION"); diff --git a/codex-rs/tui2/tooltips.txt b/codex-rs/tui2/tooltips.txt index b15bcdbd12c..94ad487bbb0 100644 --- a/codex-rs/tui2/tooltips.txt +++ b/codex-rs/tui2/tooltips.txt @@ -1,11 +1,11 @@ Use /compact when the conversation gets long to summarize history and free up context. Start a fresh idea with /new; the previous session stays in history. -If a turn went sideways, /undo asks Codex to revert the last changes. +If a turn went sideways, /undo asks Codexel to revert the last changes. Use /feedback to send logs to the maintainers when something looks off. Switch models or reasoning effort quickly with /model. -You can run any shell command from Codex using `!` (e.g. `!ls`) +You can run any shell command from Codexel using `!` (e.g. `!ls`) Type / to open the command popup; Tab autocompletes slash commands and saved prompts. You can define your own `/` commands with custom prompts. More info: https://developers.openai.com/codex/guides/slash-commands#create-your-own-slash-commands-with-custom-prompts When the composer is empty, press Esc to step back and edit your last message; Enter confirms. Paste an image with Ctrl+V to attach it to your next message. -You can resume a previous conversation by running `codex resume` +You can resume a previous conversation by running `codexel resume` diff --git a/codex-rs/windows-sandbox-rs/sandbox_smoketests.py b/codex-rs/windows-sandbox-rs/sandbox_smoketests.py index 0fc049d4edc..a1ce727e00d 100644 --- a/codex-rs/windows-sandbox-rs/sandbox_smoketests.py +++ b/codex-rs/windows-sandbox-rs/sandbox_smoketests.py @@ -1,5 +1,5 @@ # sandbox_smoketests.py -# Run a suite of smoke tests against the Windows sandbox via the Codex CLI +# Run a suite of smoke tests against the Windows sandbox via Codexel # Requires: Python 3.8+ on Windows. No pip requirements. import os @@ -10,35 +10,35 @@ from typing import List, Optional, Tuple def _resolve_codex_cmd() -> List[str]: - """Resolve the Codex CLI to invoke `codex sandbox windows`. + """Resolve Codexel to invoke `codexel sandbox windows`. Prefer local builds (debug first), then fall back to PATH. - Returns the argv prefix to run Codex. + Returns the argv prefix to run Codexel. """ root = Path(__file__).parent ws_root = root.parent cargo_target = os.environ.get("CARGO_TARGET_DIR") candidates = [ - ws_root / "target" / "debug" / "codex.exe", - ws_root / "target" / "release" / "codex.exe", + ws_root / "target" / "debug" / "codexel.exe", + ws_root / "target" / "release" / "codexel.exe", ] if cargo_target: cargo_base = Path(cargo_target) candidates.extend([ - cargo_base / "debug" / "codex.exe", - cargo_base / "release" / "codex.exe", + cargo_base / "debug" / "codexel.exe", + cargo_base / "release" / "codexel.exe", ]) for candidate in candidates: if candidate.exists(): return [str(candidate)] - if shutil.which("codex"): - return ["codex"] + if shutil.which("codexel"): + return ["codexel"] raise FileNotFoundError( - "Codex CLI not found. Build it first, e.g.\n" + "Codexel not found. Build it first, e.g.\n" " cargo build -p codex-cli --release\n" "or for debug:\n" " cargo build -p codex-cli\n" @@ -69,7 +69,7 @@ def run_sbx( env.update(ENV_BASE) if env_extra: env.update(env_extra) - # Map policy to codex CLI flags + # Map policy to Codexel CLI flags # read-only => default; workspace-write => --full-auto if policy not in ("read-only", "workspace-write"): raise ValueError(f"unknown policy: {policy}") diff --git a/codex-rs/windows-sandbox-rs/src/setup_main_win.rs b/codex-rs/windows-sandbox-rs/src/setup_main_win.rs index 9a2f5ec245b..0ed0adea031 100644 --- a/codex-rs/windows-sandbox-rs/src/setup_main_win.rs +++ b/codex-rs/windows-sandbox-rs/src/setup_main_win.rs @@ -680,7 +680,11 @@ pub fn main() -> Result<()> { let ret = real_main(); if let Err(e) = &ret { // Best-effort: log unexpected top-level errors. - if let Ok(codex_home) = std::env::var("CODEX_HOME") { + if let Some(codex_home) = std::env::var("CODEXEL_HOME") + .or_else(|_| std::env::var("CODEX_HOME")) + .ok() + .filter(|val| !val.is_empty()) + { let sbx_dir = sandbox_dir(Path::new(&codex_home)); let _ = std::fs::create_dir_all(&sbx_dir); let log_path = sbx_dir.join(LOG_FILE_NAME); diff --git a/docs/CLA.md b/docs/CLA.md index 804f202c0d3..33581396015 100644 --- a/docs/CLA.md +++ b/docs/CLA.md @@ -4,7 +4,7 @@ _Based on the Apache Software Foundation Individual CLA v 2.2._ By commenting **“I have read the CLA Document and I hereby sign the CLA”** on a Pull Request, **you (“Contributor”) agree to the following terms** for any -past and future “Contributions” submitted to the **OpenAI Codex CLI project +past and future “Contributions” submitted to the **OpenAI Codexel project (the “Project”)**. --- diff --git a/docs/advanced.md b/docs/advanced.md index 26ffca8a92e..50988e6c0c7 100644 --- a/docs/advanced.md +++ b/docs/advanced.md @@ -1,6 +1,6 @@ ## Advanced -If you already lean on Codex every day and just need a little more control, this page collects the knobs you are most likely to reach for: tweak defaults in [Config](./config.md), add extra tools through [Model Context Protocol support](#model-context-protocol), and script full runs with [`codex exec`](./exec.md). Jump to the section you need and keep building. +If you already lean on Codexel every day and just need a little more control, this page collects the knobs you are most likely to reach for: tweak defaults in [Config](./config.md), add extra tools through [Model Context Protocol support](#model-context-protocol), and script full runs with [`codexel exec`](./exec.md). Jump to the section you need and keep building. ## Config quickstart @@ -8,62 +8,62 @@ Most day-to-day tuning lives in `config.toml`: set approval + sandbox presets, p ## Tracing / verbose logging -Because Codex is written in Rust, it honors the `RUST_LOG` environment variable to configure its logging behavior. +Because Codexel is written in Rust, it honors the `RUST_LOG` environment variable to configure its logging behavior. -The TUI defaults to `RUST_LOG=codex_core=info,codex_tui=info,codex_rmcp_client=info` and log messages are written to `~/.codex/log/codex-tui.log`, so you can leave the following running in a separate terminal to monitor log messages as they are written: +The TUI defaults to `RUST_LOG=codex_core=info,codex_tui=info,codex_rmcp_client=info` and log messages are written to `~/.codexel/log/codex-tui.log`, so you can leave the following running in a separate terminal to monitor log messages as they are written: ```bash -tail -F ~/.codex/log/codex-tui.log +tail -F ~/.codexel/log/codex-tui.log ``` -By comparison, the non-interactive mode (`codex exec`) defaults to `RUST_LOG=error`, but messages are printed inline, so there is no need to monitor a separate file. +By comparison, the non-interactive mode (`codexel exec`) defaults to `RUST_LOG=error`, but messages are printed inline, so there is no need to monitor a separate file. See the Rust documentation on [`RUST_LOG`](https://docs.rs/env_logger/latest/env_logger/#enabling-logging) for more information on the configuration options. ## Model Context Protocol (MCP) -The Codex CLI and IDE extension is a MCP client which means that it can be configured to connect to MCP servers. For more information, refer to the [`config docs`](./config.md#mcp-integration). +Codexel and the IDE extension are MCP clients which means they can be configured to connect to MCP servers. For more information, refer to the [`config docs`](./config.md#mcp-integration). -## Using Codex as an MCP Server +## Using Codexel as an MCP Server -The Codex CLI can also be run as an MCP _server_ via `codex mcp-server`. For example, you can use `codex mcp-server` to make Codex available as a tool inside of a multi-agent framework like the OpenAI [Agents SDK](https://platform.openai.com/docs/guides/agents). Use `codex mcp` separately to add/list/get/remove MCP server launchers in your configuration. +Codexel can also be run as an MCP _server_ via `codexel mcp-server`. For example, you can use `codexel mcp-server` to make Codexel available as a tool inside of a multi-agent framework like the OpenAI [Agents SDK](https://platform.openai.com/docs/guides/agents). Use `codexel mcp` separately to add/list/get/remove MCP server launchers in your configuration. -### Codex MCP Server Quickstart +### Codexel MCP Server Quickstart You can launch a Codex MCP server with the [Model Context Protocol Inspector](https://modelcontextprotocol.io/legacy/tools/inspector): ```bash -npx @modelcontextprotocol/inspector codex mcp-server +npx @modelcontextprotocol/inspector codexel mcp-server ``` Send a `tools/list` request and you will see that there are two tools available: -**`codex`** - Run a Codex session. Accepts configuration parameters matching the Codex Config struct. The `codex` tool takes the following properties: +**`codex`** - Run a Codexel session. Accepts configuration parameters matching the Codexel config schema. The `codex` tool takes the following properties: -| Property | Type | Description | -| ----------------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------------ | -| **`prompt`** (required) | string | The initial user prompt to start the Codex conversation. | -| `approval-policy` | string | Approval policy for shell commands generated by the model: `untrusted`, `on-failure`, `on-request`, `never`. | -| `base-instructions` | string | The set of instructions to use instead of the default ones. | -| `config` | object | Individual [config settings](https://github.com/openai/codex/blob/main/docs/config.md#config) that will override what is in `$CODEX_HOME/config.toml`. | -| `cwd` | string | Working directory for the session. If relative, resolved against the server process's current directory. | -| `model` | string | Optional override for the model name (e.g. `o3`, `o4-mini`). | -| `profile` | string | Configuration profile from `config.toml` to specify default options. | -| `sandbox` | string | Sandbox mode: `read-only`, `workspace-write`, or `danger-full-access`. | +| Property | Type | Description | +| ----------------------- | ------ | ------------------------------------------------------------------------------------------------------------------------------------------------- | +| **`prompt`** (required) | string | The initial user prompt to start the Codexel conversation. | +| `approval-policy` | string | Approval policy for shell commands generated by the model: `untrusted`, `on-failure`, `on-request`, `never`. | +| `base-instructions` | string | The set of instructions to use instead of the default ones. | +| `config` | object | Individual [config settings](./config.md#config) that will override what is in `$CODEXEL_HOME/config.toml` (or legacy `$CODEX_HOME/config.toml`). | +| `cwd` | string | Working directory for the session. If relative, resolved against the server process's current directory. | +| `model` | string | Optional override for the model name (e.g. `o3`, `o4-mini`). | +| `profile` | string | Configuration profile from `config.toml` to specify default options. | +| `sandbox` | string | Sandbox mode: `read-only`, `workspace-write`, or `danger-full-access`. | -**`codex-reply`** - Continue a Codex session by providing the conversation id and prompt. The `codex-reply` tool takes the following properties: +**`codex-reply`** - Continue a Codexel session by providing the conversation id and prompt. The `codex-reply` tool takes the following properties: -| Property | Type | Description | -| ------------------------------- | ------ | -------------------------------------------------------- | -| **`prompt`** (required) | string | The next user prompt to continue the Codex conversation. | -| **`conversationId`** (required) | string | The id of the conversation to continue. | +| Property | Type | Description | +| ------------------------------- | ------ | ---------------------------------------------------------- | +| **`prompt`** (required) | string | The next user prompt to continue the Codexel conversation. | +| **`conversationId`** (required) | string | The id of the conversation to continue. | ### Trying it Out > [!TIP] > Codex often takes a few minutes to run. To accommodate this, adjust the MCP inspector's Request and Total timeouts to 600000ms (10 minutes) under ⛭ Configuration. -Use the MCP inspector and `codex mcp-server` to build a simple tic-tac-toe game with the following settings: +Use the MCP inspector and `codexel mcp-server` to build a simple tic-tac-toe game with the following settings: **approval-policy:** never diff --git a/docs/agents_md.md b/docs/agents_md.md index ff2243a0ca1..46c113d7f0b 100644 --- a/docs/agents_md.md +++ b/docs/agents_md.md @@ -1,38 +1,38 @@ # AGENTS.md Discovery -Codex uses [`AGENTS.md`](https://agents.md/) files to gather helpful guidance before it starts assisting you. This page explains how those files are discovered and combined, so you can decide where to place your instructions. +Codexel uses [`AGENTS.md`](https://agents.md/) files to gather helpful guidance before it starts assisting you. This page explains how those files are discovered and combined, so you can decide where to place your instructions. -## Global Instructions (`~/.codex`) +## Global Instructions (`~/.codexel`) -- Codex looks for global guidance in your Codex home directory (usually `~/.codex`; set `CODEX_HOME` to change it). For a quick overview, see the [Memory with AGENTS.md section](../docs/getting-started.md#memory-with-agentsmd) in the getting started guide. -- If an `AGENTS.override.md` file exists there, it takes priority. If not, Codex falls back to `AGENTS.md`. -- Only the first non-empty file is used. Other filenames, such as `instructions.md`, have no effect unless Codex is specifically instructed to use them. -- Whatever Codex finds here stays active for the whole session, and Codex combines it with any project-specific instructions it discovers. +- Codexel looks for global guidance in your Codexel home directory (usually `~/.codexel`; set `CODEXEL_HOME` to change it, or legacy `CODEX_HOME`). For a quick overview, see the [Memory with AGENTS.md section](../docs/getting-started.md#memory-with-agentsmd) in the getting started guide. +- If an `AGENTS.override.md` file exists there, it takes priority. If not, Codexel falls back to `AGENTS.md`. +- Only the first non-empty file is used. Other filenames, such as `instructions.md`, have no effect unless Codexel is specifically instructed to use them. +- Whatever Codexel finds here stays active for the whole session, and Codexel combines it with any project-specific instructions it discovers. ## Project Instructions (per-repository) -When you work inside a project, Codex builds on those global instructions by collecting project docs: +When you work inside a project, Codexel builds on those global instructions by collecting project docs: - The search starts at the repository root and continues down to your current directory. If a Git root is not found, only the current directory is checked. -- In each directory along that path, Codex looks for `AGENTS.override.md` first, then `AGENTS.md`, and then any fallback names listed in your Codex configuration (see [`project_doc_fallback_filenames`](../docs/config.md#project_doc_fallback_filenames)). At most one file per directory is included. +- In each directory along that path, Codexel looks for `AGENTS.override.md` first, then `AGENTS.md`, and then any fallback names listed in your Codexel configuration (see [`project_doc_fallback_filenames`](../docs/config.md#project_doc_fallback_filenames)). At most one file per directory is included. - Files are read in order from root to leaf and joined together with blank lines. Empty files are skipped, and very large files are truncated once the combined size reaches 32 KiB (the default [`project_doc_max_bytes`](../docs/config.md#project_doc_max_bytes) limit). If you need more space, split guidance across nested directories or raise the limit in your configuration. ## How They Come Together -Before Codex gets to work, the instructions are ingested in precedence order: global guidance from `~/.codex` comes first, then each project doc from the repository root down to your current directory. Guidance in deeper directories overrides earlier layers, so the most specific file controls the final behavior. +Before Codexel gets to work, the instructions are ingested in precedence order: global guidance from `~/.codexel` comes first, then each project doc from the repository root down to your current directory. Guidance in deeper directories overrides earlier layers, so the most specific file controls the final behavior. ### Priority Summary 1. Global `AGENTS.override.md` (if present), otherwise global `AGENTS.md`. 2. For each directory from the repository root to your working directory: `AGENTS.override.md`, then `AGENTS.md`, then configured fallback names. -Only these filenames are considered. To use a different name, add it to the fallback list in your Codex configuration or rename the file accordingly. +Only these filenames are considered. To use a different name, add it to the fallback list in your Codexel configuration or rename the file accordingly. ## Fallback Filenames -Codex can look for additional instruction filenames beyond the two defaults if you add them to `project_doc_fallback_filenames` in your Codex configuration. Each fallback is checked after `AGENTS.override.md` and `AGENTS.md` in every directory along the search path. +Codexel can look for additional instruction filenames beyond the two defaults if you add them to `project_doc_fallback_filenames` in your Codexel configuration. Each fallback is checked after `AGENTS.override.md` and `AGENTS.md` in every directory along the search path. -Example: suppose your configuration lists `["TEAM_GUIDE.md", ".agents.md"]`. Inside each directory Codex will look in this order: +Example: suppose your configuration lists `["TEAM_GUIDE.md", ".agents.md"]`. Inside each directory Codexel will look in this order: 1. `AGENTS.override.md` 2. `AGENTS.md` @@ -41,7 +41,7 @@ Example: suppose your configuration lists `["TEAM_GUIDE.md", ".agents.md"]`. Ins If the repository root contains `TEAM_GUIDE.md` and the `backend/` directory contains `AGENTS.override.md`, the overall instructions will combine the root `TEAM_GUIDE.md` (because no override or default file was present there) with the `backend/AGENTS.override.md` file (which takes precedence over the fallback names). -You can configure those fallbacks in `~/.codex/config.toml` (or another profile) like this: +You can configure those fallbacks in `~/.codexel/config.toml` (or another profile) like this: ```toml project_doc_fallback_filenames = ["TEAM_GUIDE.md", ".agents.md"] diff --git a/docs/authentication.md b/docs/authentication.md index 617161f6488..2b67e93f978 100644 --- a/docs/authentication.md +++ b/docs/authentication.md @@ -5,13 +5,13 @@ If you prefer to pay-as-you-go, you can still authenticate with your OpenAI API key: ```shell -printenv OPENAI_API_KEY | codex login --with-api-key +printenv OPENAI_API_KEY | codexel login --with-api-key ``` Alternatively, read from a file: ```shell -codex login --with-api-key < my_key.txt +codexel login --with-api-key < my_key.txt ``` The legacy `--api-key` flag now exits with an error instructing you to use `--with-api-key` so that the key never appears in shell history or process listings. @@ -20,11 +20,11 @@ This key must, at minimum, have write access to the Responses API. ## Migrating to ChatGPT login from API key -If you've used the Codex CLI before with usage-based billing via an API key and want to switch to using your ChatGPT plan, follow these steps: +If you've used Codexel before with usage-based billing via an API key and want to switch to using your ChatGPT plan, follow these steps: -1. Update the CLI and ensure `codex --version` is `0.20.0` or later -2. Delete `~/.codex/auth.json` (on Windows: `C:\\Users\\USERNAME\\.codex\\auth.json`) -3. Run `codex login` again +1. Update the CLI and ensure `codexel --version` is `0.20.0` or later +2. Delete `~/.codexel/auth.json` (on Windows: `C:\\Users\\USERNAME\\.codexel\\auth.json`) +3. Run `codexel login` again ## Connecting on a "Headless" Machine @@ -32,37 +32,37 @@ Today, the login process entails running a server on `localhost:1455`. If you ar ### Authenticate locally and copy your credentials to the "headless" machine -The easiest solution is likely to run through the `codex login` process on your local machine such that `localhost:1455` _is_ accessible in your web browser. When you complete the authentication process, an `auth.json` file should be available at `$CODEX_HOME/auth.json` (on Mac/Linux, `$CODEX_HOME` defaults to `~/.codex` whereas on Windows, it defaults to `%USERPROFILE%\\.codex`). +The easiest solution is likely to run through the `codexel login` process on your local machine such that `localhost:1455` _is_ accessible in your web browser. When you complete the authentication process, an `auth.json` file should be available at `$CODEXEL_HOME/auth.json` (on Mac/Linux, `$CODEXEL_HOME` defaults to `~/.codexel` whereas on Windows, it defaults to `%USERPROFILE%\\.codexel`). For compatibility, Codexel also supports the legacy `$CODEX_HOME` environment variable. -Because the `auth.json` file is not tied to a specific host, once you complete the authentication flow locally, you can copy the `$CODEX_HOME/auth.json` file to the headless machine and then `codex` should "just work" on that machine. Note to copy a file to a Docker container, you can do: +Because the `auth.json` file is not tied to a specific host, once you complete the authentication flow locally, you can copy the `$CODEXEL_HOME/auth.json` file to the headless machine and then `codexel` should "just work" on that machine. Note to copy a file to a Docker container, you can do: ```shell # substitute MY_CONTAINER with the name or id of your Docker container: CONTAINER_HOME=$(docker exec MY_CONTAINER printenv HOME) -docker exec MY_CONTAINER mkdir -p "$CONTAINER_HOME/.codex" -docker cp auth.json MY_CONTAINER:"$CONTAINER_HOME/.codex/auth.json" +docker exec MY_CONTAINER mkdir -p "$CONTAINER_HOME/.codexel" +docker cp auth.json MY_CONTAINER:"$CONTAINER_HOME/.codexel/auth.json" ``` whereas if you are `ssh`'d into a remote machine, you likely want to use [`scp`](https://en.wikipedia.org/wiki/Secure_copy_protocol): ```shell -ssh user@remote 'mkdir -p ~/.codex' -scp ~/.codex/auth.json user@remote:~/.codex/auth.json +ssh user@remote 'mkdir -p ~/.codexel' +scp ~/.codexel/auth.json user@remote:~/.codexel/auth.json ``` or try this one-liner: ```shell -ssh user@remote 'mkdir -p ~/.codex && cat > ~/.codex/auth.json' < ~/.codex/auth.json +ssh user@remote 'mkdir -p ~/.codexel && cat > ~/.codexel/auth.json' < ~/.codexel/auth.json ``` ### Connecting through VPS or remote -If you run Codex on a remote machine (VPS/server) without a local browser, the login helper starts a server on `localhost:1455` on the remote host. To complete login in your local browser, forward that port to your machine before starting the login flow: +If you run Codexel on a remote machine (VPS/server) without a local browser, the login helper starts a server on `localhost:1455` on the remote host. To complete login in your local browser, forward that port to your machine before starting the login flow: ```bash # From your local machine ssh -L 1455:localhost:1455 @ ``` -Then, in that SSH session, run `codex` and select "Sign in with ChatGPT". When prompted, open the printed URL (it will be `http://localhost:1455/...`) in your local browser. The traffic will be tunneled to the remote server. +Then, in that SSH session, run `codexel` and select "Sign in with ChatGPT". When prompted, open the printed URL (it will be `http://localhost:1455/...`) in your local browser. The traffic will be tunneled to the remote server. diff --git a/docs/config.md b/docs/config.md index 8d4cfe349ed..bf8dd9ecad1 100644 --- a/docs/config.md +++ b/docs/config.md @@ -1,6 +1,6 @@ # Config -Codex configuration gives you fine-grained control over the model, execution environment, and integrations available to the CLI. Use this guide alongside the workflows in [`codex exec`](./exec.md), the guardrails in [Sandbox & approvals](./sandbox.md), and project guidance from [AGENTS.md discovery](./agents_md.md). +Codexel configuration gives you fine-grained control over the model, execution environment, and integrations available to the CLI. Use this guide alongside the workflows in [`codexel exec`](./exec.md), the guardrails in [Sandbox & approvals](./sandbox.md), and project guidance from [AGENTS.md discovery](./agents_md.md). ## Quick navigation @@ -18,18 +18,18 @@ Codex supports several mechanisms for setting config values: - A generic `-c`/`--config` flag that takes a `key=value` pair, such as `--config model="o3"`. - The key can contain dots to set a value deeper than the root, e.g. `--config model_providers.openai.wire_api="chat"`. - For consistency with `config.toml`, values are a string in TOML format rather than JSON format, so use `key='{a = 1, b = 2}'` rather than `key='{"a": 1, "b": 2}'`. - - The quotes around the value are necessary, as without them your shell would split the config argument on spaces, resulting in `codex` receiving `-c key={a` with (invalid) additional arguments `=`, `1,`, `b`, `=`, `2}`. + - The quotes around the value are necessary, as without them your shell would split the config argument on spaces, resulting in `codexel` receiving `-c key={a` with (invalid) additional arguments `=`, `1,`, `b`, `=`, `2}`. - Values can contain any TOML object, such as `--config shell_environment_policy.include_only='["PATH", "HOME", "USER"]'`. - If `value` cannot be parsed as a valid TOML value, it is treated as a string value. This means that `-c model='"o3"'` and `-c model=o3` are equivalent. - In the first case, the value is the TOML string `"o3"`, while in the second the value is `o3`, which is not valid TOML and therefore treated as the TOML string `"o3"`. - Because quotes are interpreted by one's shell, `-c key="true"` will be correctly interpreted in TOML as `key = true` (a boolean) and not `key = "true"` (a string). If for some reason you needed the string `"true"`, you would need to use `-c key='"true"'` (note the two sets of quotes). -- The `$CODEX_HOME/config.toml` configuration file where the `CODEX_HOME` environment value defaults to `~/.codex`. (Note `CODEX_HOME` will also be where logs and other Codex-related information are stored.) +- The `$CODEXEL_HOME/config.toml` configuration file where the `CODEXEL_HOME` environment value defaults to `~/.codexel`. (For compatibility, `CODEX_HOME` is also supported; when set, it overrides the default.) Both the `--config` flag and the `config.toml` file support the following options: ## Feature flags -Optional and experimental capabilities are toggled via the `[features]` table in `$CODEX_HOME/config.toml`. If you see a deprecation notice mentioning a legacy key (for example `experimental_use_exec_command_tool`), move the setting into `[features]` or pass `--enable `. +Optional and experimental capabilities are toggled via the `[features]` table in `$CODEXEL_HOME/config.toml` (or legacy `$CODEX_HOME/config.toml`). If you see a deprecation notice mentioning a legacy key (for example `experimental_use_exec_command_tool`), move the setting into `[features]` or pass `--enable `. ```toml [features] @@ -66,6 +66,15 @@ The model that Codex should use. model = "gpt-5.1" # overrides the default ("gpt-5.1-codex-max" across platforms) ``` +### plan_model + +Optional model to use for planning flows such as `/plan` (and plan-variant subagents). When unset, planning uses the active `model`. + +```toml +# Use a cheaper/faster model for planning, while keeping a stronger model for coding turns. +plan_model = "gpt-5.1-codex" +``` + ### model_providers This option lets you add to the default set of model providers bundled with Codex. The map key becomes the value you use with `model_provider` to select the provider. @@ -97,7 +106,7 @@ wire_api = "chat" query_params = {} ``` -Note this makes it possible to use Codex CLI with non-OpenAI models, so long as they use a wire API that is compatible with the OpenAI chat completions API. For example, you could define the following provider to use Codex CLI with Ollama running locally: +Note this makes it possible to use Codexel with non-OpenAI models, so long as they use a wire API that is compatible with the OpenAI chat completions API. For example, you could define the following provider to use Codexel with Ollama running locally: ```toml [model_providers.ollama] @@ -200,6 +209,10 @@ If the selected model is known to support reasoning (for example: `o3`, `o4-mini Note: to minimize reasoning, choose `"minimal"`. +### plan_model_reasoning_effort + +Optional reasoning effort to use for planning flows such as `/plan`. When unset, planning uses `model_reasoning_effort`. + ### model_reasoning_summary If the model name starts with `"o"` (as in `"o3"` or `"o4-mini"`) or `"codex"`, reasoning is enabled by default when using the Responses API. As explained in the [OpenAI Platform documentation](https://platform.openai.com/docs/guides/reasoning?api-mode=responses#reasoning-summaries), this can be set to: @@ -245,7 +258,7 @@ model_supports_reasoning_summaries = true The size of the context window for the model, in tokens. -In general, Codex knows the context window for the most common OpenAI models, but if you are using a new model with an old version of the Codex CLI, then you can use `model_context_window` to tell Codex what value to use to determine how much context is left during a conversation. +In general, Codexel knows the context window for the most common OpenAI models, but if you are using a new model with an old version of Codexel, then you can use `model_context_window` to tell Codexel what value to use to determine how much context is left during a conversation. ### oss_provider @@ -472,7 +485,7 @@ Streamable HTTP connections always use the experimental Rust MCP client under th rmcp_client = true ``` -After enabling it, run `codex mcp login ` when the server supports OAuth. +After enabling it, run `codexel mcp login ` when the server supports OAuth. #### Other configuration options @@ -495,27 +508,27 @@ When both `enabled_tools` and `disabled_tools` are specified, Codex first restri ```shell # List all available commands -codex mcp --help +codexel mcp --help # Add a server (env can be repeated; `--` separates the launcher command) -codex mcp add docs -- docs-server --port 4000 +codexel mcp add docs -- docs-server --port 4000 # List configured servers (pretty table or JSON) -codex mcp list -codex mcp list --json +codexel mcp list +codexel mcp list --json # Show one server (table or JSON) -codex mcp get docs -codex mcp get docs --json +codexel mcp get docs +codexel mcp get docs --json # Remove a server -codex mcp remove docs +codexel mcp remove docs # Log in to a streamable HTTP server that supports oauth -codex mcp login SERVER_NAME +codexel mcp login SERVER_NAME # Log out from a streamable HTTP server that supports oauth -codex mcp logout SERVER_NAME +codexel mcp logout SERVER_NAME ``` ### Examples of useful MCPs @@ -633,7 +646,7 @@ Set `otel.exporter` to control where events go: ``` Both OTLP exporters accept an optional `tls` block so you can trust a custom CA -or enable mutual TLS. Relative paths are resolved against `~/.codex/`: +or enable mutual TLS. Relative paths are resolved against `~/.codexel/`: ```toml [otel.exporter."otlp-http"] @@ -645,8 +658,8 @@ protocol = "binary" [otel.exporter."otlp-http".tls] ca-certificate = "certs/otel-ca.pem" -client-certificate = "/etc/codex/certs/client.pem" -client-private-key = "/etc/codex/certs/client-key.pem" +client-certificate = "/etc/codexel/certs/client.pem" +client-private-key = "/etc/codexel/certs/client-key.pem" ``` If the exporter is `none` nothing is written anywhere; otherwise you must run or point to your @@ -737,10 +750,10 @@ if __name__ == "__main__": sys.exit(main()) ``` -To have Codex use this script for notifications, you would configure it via `notify` in `~/.codex/config.toml` using the appropriate path to `notify.py` on your computer: +To have Codexel use this script for notifications, you would configure it via `notify` in `~/.codexel/config.toml` using the appropriate path to `notify.py` on your computer: ```toml -notify = ["python3", "/Users/mbolin/.codex/notify.py"] +notify = ["python3", "/Users/mbolin/.codexel/notify.py"] ``` > [!NOTE] @@ -818,11 +831,11 @@ Users can specify config values at multiple levels. Order of precedence is as fo 1. custom command-line argument, e.g., `--model o3` 2. as part of a profile, where the `--profile` is specified via a CLI (or in the config file itself) 3. as an entry in `config.toml`, e.g., `model = "o3"` -4. the default value that comes with Codex CLI (i.e., Codex CLI defaults to `gpt-5.1-codex-max`) +4. the default value that comes with Codexel (i.e., Codexel defaults to `gpt-5.1-codex-max`) ### history -By default, Codex CLI records messages sent to the model in `$CODEX_HOME/history.jsonl`. Note that on UNIX, the file permissions are set to `o600`, so it should only be readable and writable by the owner. +By default, Codexel records messages sent to the model in `$CODEXEL_HOME/history.jsonl` (or legacy `$CODEX_HOME/history.jsonl`). Note that on UNIX, the file permissions are set to `o600`, so it should only be readable and writable by the owner. To disable this behavior, configure `[history]` as follows: @@ -918,13 +931,13 @@ cli_auth_credentials_store = "keyring" Valid values: -- `file` (default) – Store credentials in `auth.json` under `$CODEX_HOME`. +- `file` (default) – Store credentials in `auth.json` under `$CODEXEL_HOME` (or legacy `$CODEX_HOME`). - `keyring` – Store credentials in the operating system keyring via the [`keyring` crate](https://crates.io/crates/keyring); the CLI reports an error if secure storage is unavailable. Backends by OS: - macOS: macOS Keychain - Windows: Windows Credential Manager - Linux: DBus‑based Secret Service, the kernel keyutils, or a combination - FreeBSD/OpenBSD: DBus‑based Secret Service -- `auto` – Save credentials to the operating system keyring when available; otherwise, fall back to `auth.json` under `$CODEX_HOME`. +- `auto` – Save credentials to the operating system keyring when available; otherwise, fall back to `auth.json` under `$CODEXEL_HOME` (or legacy `$CODEX_HOME`). ## Config reference @@ -979,6 +992,8 @@ Valid values: | `check_for_update_on_startup` | boolean | Check for Codex updates on startup (default: true). Set to `false` only if updates are centrally managed. | | `show_raw_agent_reasoning` | boolean | Show raw reasoning (when available). | | `model_reasoning_effort` | `minimal` \| `low` \| `medium` \| `high`\|`xhigh` | Responses API reasoning effort. | +| `plan_model` | string | Optional model for planning flows (defaults to `model`). | +| `plan_model_reasoning_effort` | `minimal` \| `low` \| `medium` \| `high`\|`xhigh` | Optional reasoning effort for planning flows (defaults to `model_reasoning_effort`). | | `model_reasoning_summary` | `auto` \| `concise` \| `detailed` \| `none` | Reasoning summaries. | | `model_verbosity` | `low` \| `medium` \| `high` | GPT‑5 text verbosity (Responses API). | | `model_supports_reasoning_summaries` | boolean | Force‑enable reasoning summaries. | diff --git a/docs/contributing.md b/docs/contributing.md index ec188631d1b..149edbb4c14 100644 --- a/docs/contributing.md +++ b/docs/contributing.md @@ -14,11 +14,22 @@ If you want to add a new feature or change the behavior of an existing one, plea - Keep your changes focused. Multiple unrelated fixes should be opened as separate PRs. - Ensure your change is free of lint warnings and test failures. +### Changelog (Codexel fork) + +- The changelog tracks Codexel-only changes (commits not in `upstream/main`). +- Refresh generated Details blocks with `scripts/gen-changelog.ps1` (Windows) or + `bash scripts/gen-changelog.sh` (macOS/Linux). +- Use `--check` in CI to ensure the changelog is up to date. +- When cutting a release, pin the release commit and upstream baseline in + `CHANGELOG.md`, then update the generated range for that release section. +- Rollback is just reverting `CHANGELOG.md`, `cliff.toml`, and the generator + scripts if the changelog workflow needs to be removed. + ### Writing high-impact code changes 1. **Start with an issue.** Open a new one or comment on an existing discussion so we can agree on the solution before code is written. 2. **Add or update tests.** Every new feature or bug-fix should come with test coverage that fails before your change and passes afterwards. 100% coverage is not required, but aim for meaningful assertions. -3. **Document behaviour.** If your change affects user-facing behaviour, update the README, inline help (`codex --help`), or relevant example projects. +3. **Document behaviour.** If your change affects user-facing behaviour, update the README, inline help (`codexel --help`), or relevant example projects. Avoid changing the system prompt unless absolutely necessary; prefer adding developer-instruction guidance that is easy to merge and iterate on. 4. **Keep commits atomic.** Each commit should compile and the tests should pass. This makes reviews and potential rollbacks easier. ### Opening a pull request @@ -46,7 +57,7 @@ If you want to add a new feature or change the behavior of an existing one, plea If you run into problems setting up the project, would like feedback on an idea, or just want to say _hi_ - please open a Discussion or jump into the relevant issue. We are happy to help. -Together we can make Codex CLI an incredible tool. **Happy hacking!** :rocket: +Together we can make Codexel an incredible tool. **Happy hacking!** :rocket: ### Contributor license agreement (CLA) diff --git a/docs/example-config.md b/docs/example-config.md index fd69faddde8..2a7d8bd4697 100644 --- a/docs/example-config.md +++ b/docs/example-config.md @@ -1,11 +1,11 @@ # Example config.toml -Use this example configuration as a starting point. For an explanation of each field and additional context, see [Configuration](./config.md). Copy the snippet below to `~/.codex/config.toml` and adjust values as needed. +Use this example configuration as a starting point. For an explanation of each field and additional context, see [Configuration](./config.md). Copy the snippet below to `~/.codexel/config.toml` and adjust values as needed. ```toml -# Codex example configuration (config.toml) +# Codexel example configuration (config.toml) # -# This file lists all keys Codex reads from config.toml, their default values, +# This file lists all keys Codexel reads from config.toml, their default values, # and concise explanations. Values here mirror the effective defaults compiled # into the CLI. Adjust as needed. # @@ -18,16 +18,19 @@ Use this example configuration as a starting point. For an explanation of each f # Core Model Selection ################################################################################ -# Primary model used by Codex. Default: "gpt-5.1-codex-max" on all platforms. +# Primary model used by Codexel. Default: "gpt-5.1-codex-max" on all platforms. model = "gpt-5.1-codex-max" +# Optional model used for `/plan` (defaults to `model` when unset). +# plan_model = "gpt-5.1-codex" + # Model used by the /review feature (code reviews). Default: "gpt-5.1-codex-max". review_model = "gpt-5.1-codex-max" # Provider id selected from [model_providers]. Default: "openai". model_provider = "openai" -# Optional manual model metadata. When unset, Codex auto-detects from model. +# Optional manual model metadata. When unset, Codexel auto-detects from model. # Uncomment to force values. # model_context_window = 128000 # tokens; default: auto for model # model_auto_compact_token_limit = 0 # disable/override auto; default: model family specific @@ -40,6 +43,9 @@ model_provider = "openai" # Reasoning effort: minimal | low | medium | high | xhigh (default: medium; xhigh on gpt-5.1-codex-max and gpt-5.2) model_reasoning_effort = "medium" +# Optional reasoning effort for `/plan` (defaults to `model_reasoning_effort` when unset). +# plan_model_reasoning_effort = "medium" + # Reasoning summary: auto | concise | detailed | none (default: auto) model_reasoning_summary = "auto" @@ -362,6 +368,6 @@ exporter = "none" # [otel.exporter."otlp-http".tls] # ca-certificate = "certs/otel-ca.pem" -# client-certificate = "/etc/codex/certs/client.pem" -# client-private-key = "/etc/codex/certs/client-key.pem" +# client-certificate = "/etc/codexel/certs/client.pem" +# client-private-key = "/etc/codexel/certs/client-key.pem" ``` diff --git a/docs/exec.md b/docs/exec.md index 5a17155a829..35a457c78e9 100644 --- a/docs/exec.md +++ b/docs/exec.md @@ -1,24 +1,28 @@ ## Non-interactive mode -Use Codex in non-interactive mode to automate common workflows. +Use Codexel in non-interactive mode to automate common workflows. ```shell -codex exec "count the total number of lines of code in this project" +codexel exec "count the total number of lines of code in this project" ``` -In non-interactive mode, Codex does not ask for command or edit approvals. By default it runs in `read-only` mode, so it cannot edit files or run commands that require network access. +In non-interactive mode, Codexel does not ask for command or edit approvals. By default it runs in `read-only` mode, so it cannot edit files or run commands that require network access. -Use `codex exec --full-auto` to allow file edits. Use `codex exec --sandbox danger-full-access` to allow edits and networked commands. +Use `codexel exec --full-auto` to allow file edits. Use `codexel exec --sandbox danger-full-access` to allow edits and networked commands. + +### Interactive prompts are disabled + +`codexel exec` is non-interactive. If the agent attempts to ask an interactive multiple-choice question (AskUserQuestion), the request is automatically cancelled. Plan approval prompts are automatically rejected. ### Default output mode -By default, Codex streams its activity to stderr and only writes the final message from the agent to stdout. This makes it easier to pipe `codex exec` into another tool without extra filtering. +By default, Codexel streams its activity to stderr and only writes the final message from the agent to stdout. This makes it easier to pipe `codexel exec` into another tool without extra filtering. -To write the output of `codex exec` to a file, in addition to using a shell redirect like `>`, there is also a dedicated flag to specify an output file: `-o`/`--output-last-message`. +To write the output of `codexel exec` to a file, in addition to using a shell redirect like `>`, there is also a dedicated flag to specify an output file: `-o`/`--output-last-message`. ### JSON output mode -`codex exec` supports a `--json` mode that streams events to stdout as JSON Lines (JSONL) while the agent runs. +`codexel exec` supports a `--json` mode that streams events to stdout as JSON Lines (JSONL) while the agent runs. Supported event types: @@ -75,40 +79,40 @@ Sample schema: ``` ```shell -codex exec "Extract details of the project" --output-schema ~/schema.json +codexel exec "Extract details of the project" --output-schema ~/schema.json ... -{"project_name":"Codex CLI","programming_languages":["Rust","TypeScript","Shell"]} +{"project_name":"Codexel","programming_languages":["Rust","TypeScript","Shell"]} ``` Combine `--output-schema` with `-o` to only print the final JSON output. You can also pass a file path to `-o` to save the JSON output to a file. ### Git repository requirement -Codex requires a Git repository to avoid destructive changes. To disable this check, use `codex exec --skip-git-repo-check`. +Codexel requires a Git repository to avoid destructive changes. To disable this check, use `codexel exec --skip-git-repo-check`. ### Resuming non-interactive sessions -Resume a previous non-interactive session with `codex exec resume ` or `codex exec resume --last`. This preserves conversation context so you can ask follow-up questions or give new tasks to the agent. +Resume a previous non-interactive session with `codexel exec resume ` or `codexel exec resume --last`. This preserves conversation context so you can ask follow-up questions or give new tasks to the agent. ```shell -codex exec "Review the change, look for use-after-free issues" -codex exec resume --last "Fix use-after-free issues" +codexel exec "Review the change, look for use-after-free issues" +codexel exec resume --last "Fix use-after-free issues" ``` -Only the conversation context is preserved; you must still provide flags to customize Codex behavior. +Only the conversation context is preserved; you must still provide flags to customize Codexel behavior. ```shell -codex exec --model gpt-5.1-codex-max --json "Review the change, look for use-after-free issues" -codex exec --model gpt-5.1 --json resume --last "Fix use-after-free issues" +codexel exec --model gpt-5.1-codex-max --json "Review the change, look for use-after-free issues" +codexel exec --model gpt-5.1 --json resume --last "Fix use-after-free issues" ``` ## Authentication -By default, `codex exec` will use the same authentication method as Codex CLI and VSCode extension. You can override the api key by setting the `CODEX_API_KEY` environment variable. +By default, `codexel exec` will use the same authentication method as Codexel and the VSCode extension. You can override the api key by setting the `CODEX_API_KEY` environment variable. ```shell -CODEX_API_KEY=your-api-key-here codex exec "Fix merge conflict" +CODEX_API_KEY=your-api-key-here codexel exec "Fix merge conflict" ``` -NOTE: `CODEX_API_KEY` is only supported in `codex exec`. +NOTE: `CODEX_API_KEY` is only supported in `codexel exec`. diff --git a/docs/execpolicy.md b/docs/execpolicy.md index ecc79f33d20..235ce15425b 100644 --- a/docs/execpolicy.md +++ b/docs/execpolicy.md @@ -1,23 +1,23 @@ # Execpolicy quickstart -Codex can enforce your own rules-based execution policy before it runs shell commands. Policies live in `.rules` files under `~/.codex/rules`. +Codexel can enforce your own rules-based execution policy before it runs shell commands. Policies live in `.rules` files under `~/.codexel/rules`. ## How to create and edit rules ### TUI interactions -Codex CLI will present the option to whitelist commands when a command causes a prompt. +Codexel will present the option to whitelist commands when a command causes a prompt. Screenshot 2025-12-04 at 9 23 54 AM Whitelisted commands will no longer require your permission to run in current and subsequent sessions. -Under the hood, when you approve and whitelist a command, codex will edit `~/.codex/rules/default.rules`. +Under the hood, when you approve and whitelist a command, Codexel will edit `~/.codexel/rules/default.rules`. ### Editing `.rules` files -1. Create a policy directory: `mkdir -p ~/.codex/rules`. -2. Add one or more `.rules` files in that folder. Codex automatically loads every `.rules` file in there on startup. +1. Create a policy directory: `mkdir -p ~/.codexel/rules`. +2. Add one or more `.rules` files in that folder. Codexel automatically loads every `.rules` file in there on startup. 3. Write `prefix_rule` entries to describe the commands you want to allow, prompt, or block: ```starlark @@ -37,10 +37,10 @@ In this example rule, if Codex wants to run commands with the prefix `git push` ## Preview decisions -Use the `codex execpolicy check` subcommand to preview decisions before you save a rule (see the [`codex-execpolicy` README](../codex-rs/execpolicy/README.md) for syntax details): +Use the `codexel execpolicy check` subcommand to preview decisions before you save a rule (see the [`codex-execpolicy` README](../codex-rs/execpolicy/README.md) for syntax details): ```shell -codex execpolicy check --rules ~/.codex/rules/default.rules git push origin main +codexel execpolicy check --rules ~/.codexel/rules/default.rules git push origin main ``` Pass multiple `--rules` flags to test how several files combine, and use `--pretty` for formatted JSON output. See the [`codex-rs/execpolicy` README](../codex-rs/execpolicy/README.md) for a more detailed walkthrough of the available syntax. diff --git a/docs/experimental.md b/docs/experimental.md index 48e307030b5..9c53f4d6b7c 100644 --- a/docs/experimental.md +++ b/docs/experimental.md @@ -1,10 +1,10 @@ ## Experimental technology disclaimer -Codex CLI is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We're building it in the open with the community and welcome: +Codexel is an experimental project under active development. It is not yet stable, may contain bugs, incomplete features, or undergo breaking changes. We're building it in the open with the community and welcome: - Bug reports - Feature requests - Pull requests - Good vibes -Help us improve by filing issues or submitting PRs (see [docs/contributing.md](docs/contributing.md) for guidance)! +Help us improve by filing issues or submitting PRs (see [contributing.md](./contributing.md) for guidance)! diff --git a/docs/faq.md b/docs/faq.md index 93776b957a0..909c72ed2d1 100644 --- a/docs/faq.md +++ b/docs/faq.md @@ -4,13 +4,13 @@ This FAQ highlights the most common questions and points you to the right deep-d ### OpenAI released a model called Codex in 2021 - is this related? -In 2021, OpenAI released Codex, an AI system designed to generate code from natural language prompts. That original Codex model was deprecated as of March 2023 and is separate from the CLI tool. +In 2021, OpenAI released Codex, an AI system designed to generate code from natural language prompts. That original Codex model was deprecated as of March 2023 and is separate from Codexel (the CLI tool). ### Which models are supported? -We recommend using Codex with GPT-5.1 Codex Max, our best coding model. The default reasoning level is medium, and you can upgrade to high or xhigh (where supported, e.g. `gpt-5.1-codex-max` and `gpt-5.2`) for complex tasks with the `/model` command. +We recommend using Codexel with GPT-5.1 Codex Max, our best coding model. The default reasoning level is medium, and you can upgrade to high or xhigh (where supported, e.g. `gpt-5.1-codex-max` and `gpt-5.2`) for complex tasks with the `/model` command. -You can also use older models by using API-based auth and launching codex with the `--model` flag. +You can also use older models by using API-based auth and launching `codexel` with the `--model` flag. ### How do approvals and sandbox modes work together? @@ -18,11 +18,11 @@ Approvals are the mechanism Codex uses to ask before running a tool call with el ### Can I automate tasks without the TUI? -Yes. [`codex exec`](./exec.md) runs Codex in non-interactive mode with streaming logs, JSONL output, and structured schema support. The command respects the same sandbox and approval settings you configure in the [Config guide](./config.md). +Yes. [`codexel exec`](./exec.md) runs Codexel in non-interactive mode with streaming logs, JSONL output, and structured schema support. The command respects the same sandbox and approval settings you configure in the [Config guide](./config.md). ### How do I stop Codex from editing my files? -By default, Codex can modify files in your current working directory (Auto mode). To prevent edits, run `codex` in read-only mode with the CLI flag `--sandbox read-only`. Alternatively, you can change the approval level mid-conversation with `/approvals`. +By default, Codexel can modify files in your current working directory (Auto mode). To prevent edits, run `codexel` in read-only mode with the CLI flag `--sandbox read-only`. Alternatively, you can change the approval level mid-conversation with `/approvals`. ### How do I connect Codex to MCP servers? @@ -32,7 +32,7 @@ Configure MCP servers through your `config.toml` using the examples in [Config - Confirm your setup in three steps: -1. Walk through the auth flows in [Authentication](./authentication.md) to ensure the correct credentials are present in `~/.codex/auth.json`. +1. Walk through the auth flows in [Authentication](./authentication.md) to ensure the correct credentials are present in `~/.codexel/auth.json` (or legacy `~/.codex/auth.json`). 2. If you're on a headless or remote machine, make sure port-forwarding is configured as described in [Authentication -> Connecting on a "Headless" Machine](./authentication.md#connecting-on-a-headless-machine). ### Does it work on Windows? @@ -43,13 +43,13 @@ Running Codex directly on Windows may work, but is not officially supported. We Follow the quick setup in [Install & build](./install.md) and then jump into [Getting started](./getting-started.md) for interactive usage tips, prompt examples, and AGENTS.md guidance. -### `brew upgrade codex` isn't upgrading me +### `brew upgrade codexel` isn't upgrading me -If you're running Codex v0.46.0 or older, `brew upgrade codex` will not move you to the latest version because we migrated from a Homebrew formula to a cask. To upgrade, uninstall the existing oudated formula and then install the new cask: +If you're running Codexel v0.46.0 or older, `brew upgrade codexel` will not move you to the latest version because we migrated from a Homebrew formula to a cask. To upgrade, uninstall the existing oudated formula and then install the new cask: ```bash -brew uninstall --formula codex -brew install --cask codex +brew uninstall --formula codexel +brew install --cask codexel ``` -After reinstalling, `brew upgrade --cask codex` will keep future releases up to date. +After reinstalling, `brew upgrade --cask codexel` will keep future releases up to date. diff --git a/docs/getting-started.md b/docs/getting-started.md index 923eb095682..25223e523f5 100644 --- a/docs/getting-started.md +++ b/docs/getting-started.md @@ -3,69 +3,69 @@ Looking for something specific? Jump ahead: - [Tips & shortcuts](#tips--shortcuts) – hotkeys, resume flow, prompts -- [Non-interactive runs](./exec.md) – automate with `codex exec` +- [Non-interactive runs](./exec.md) – automate with `codexel exec` - Ready for deeper customization? Head to [`advanced.md`](./advanced.md) ### CLI usage -| Command | Purpose | Example | -| ------------------ | ---------------------------------- | ------------------------------- | -| `codex` | Interactive TUI | `codex` | -| `codex "..."` | Initial prompt for interactive TUI | `codex "fix lint errors"` | -| `codex exec "..."` | Non-interactive "automation mode" | `codex exec "explain utils.ts"` | +| Command | Purpose | Example | +| -------------------- | ---------------------------------- | --------------------------------- | +| `codexel` | Interactive TUI | `codexel` | +| `codexel "..."` | Initial prompt for interactive TUI | `codexel "fix lint errors"` | +| `codexel exec "..."` | Non-interactive "automation mode" | `codexel exec "explain utils.ts"` | Key flags: `--model/-m`, `--ask-for-approval/-a`. ### Resuming interactive sessions -- Run `codex resume` to display the session picker UI -- Resume most recent: `codex resume --last` -- Resume by id: `codex resume ` (You can get session ids from /status or `~/.codex/sessions/`) +- Run `codexel resume` to display the session picker UI +- Resume most recent: `codexel resume --last` +- Resume by id: `codexel resume ` (You can get session ids from /status or `~/.codexel/sessions/`) - The picker shows the session's recorded Git branch when available. -- To show the session's original working directory (CWD), run `codex resume --all` (this also disables cwd filtering and adds a `CWD` column). +- To show the session's original working directory (CWD), run `codexel resume --all` (this also disables cwd filtering and adds a `CWD` column). Examples: ```shell # Open a picker of recent sessions -codex resume +codexel resume # Resume the most recent session -codex resume --last +codexel resume --last # Resume a specific session by id -codex resume 7f9f9a2e-1b3c-4c7a-9b0e-123456789abc +codexel resume 7f9f9a2e-1b3c-4c7a-9b0e-123456789abc ``` ### Running with a prompt as input -You can also run Codex CLI with a prompt as input: +You can also run Codexel with a prompt as input: ```shell -codex "explain this codebase to me" +codexel "explain this codebase to me" ``` ### Example prompts Below are a few bite-size examples you can copy-paste. Replace the text in quotes with your own task. -| ✨ | What you type | What happens | -| --- | ------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | -| 1 | `codex "Refactor the Dashboard component to React Hooks"` | Codex rewrites the class component, runs `npm test`, and shows the diff. | -| 2 | `codex "Generate SQL migrations for adding a users table"` | Infers your ORM, creates migration files, and runs them in a sandboxed DB. | -| 3 | `codex "Write unit tests for utils/date.ts"` | Generates tests, executes them, and iterates until they pass. | -| 4 | `codex "Bulk-rename *.jpeg -> *.jpg with git mv"` | Safely renames files and updates imports/usages. | -| 5 | `codex "Explain what this regex does: ^(?=.*[A-Z]).{8,}$"` | Outputs a step-by-step human explanation. | -| 6 | `codex "Carefully review this repo, and propose 3 high impact well-scoped PRs"` | Suggests impactful PRs in the current codebase. | -| 7 | `codex "Look for vulnerabilities and create a security review report"` | Finds and explains security bugs. | +| ✨ | What you type | What happens | +| --- | --------------------------------------------------------------------------------- | -------------------------------------------------------------------------- | +| 1 | `codexel "Refactor the Dashboard component to React Hooks"` | Codexel rewrites the class component, runs `npm test`, and shows the diff. | +| 2 | `codexel "Generate SQL migrations for adding a users table"` | Infers your ORM, creates migration files, and runs them in a sandboxed DB. | +| 3 | `codexel "Write unit tests for utils/date.ts"` | Generates tests, executes them, and iterates until they pass. | +| 4 | `codexel "Bulk-rename *.jpeg -> *.jpg with git mv"` | Safely renames files and updates imports/usages. | +| 5 | `codexel "Explain what this regex does: ^(?=.*[A-Z]).{8,}$"` | Outputs a step-by-step human explanation. | +| 6 | `codexel "Carefully review this repo, and propose 3 high impact well-scoped PRs"` | Suggests impactful PRs in the current codebase. | +| 7 | `codexel "Look for vulnerabilities and create a security review report"` | Finds and explains security bugs. | Looking to reuse your own instructions? Create slash commands with [custom prompts](./prompts.md). ### Memory with AGENTS.md -You can give Codex extra instructions and guidance using `AGENTS.md` files. Codex looks for them in the following places, and merges them top-down: +You can give Codexel extra instructions and guidance using `AGENTS.md` files. Codexel looks for them in the following places, and merges them top-down: -1. `~/.codex/AGENTS.md` - personal global guidance +1. `~/.codexel/AGENTS.md` - personal global guidance 2. Every directory from the repository root down to your current working directory (inclusive). In each directory, Codex first looks for `AGENTS.override.md` and uses it if present; otherwise it falls back to `AGENTS.md`. Use the override form when you want to replace inherited instructions for that directory. For more information on how to use AGENTS.md, see the [official AGENTS.md documentation](https://agents.md/). @@ -76,7 +76,19 @@ For more information on how to use AGENTS.md, see the [official AGENTS.md docume Typing `@` triggers a fuzzy-filename search over the workspace root. Use up/down to select among the results and Tab or Enter to replace the `@` with the selected path. You can use Esc to cancel the search. -#### Esc–Esc to edit a previous message +#### Answer interactive questions + +When Codexel needs a decision mid-run, it may pause and show an interactive question picker instead of continuing. + +- Use arrow keys to move, Enter to choose/confirm, and Esc to cancel. +- Some questions support multi-select (Space toggles selections). +- A free-text option is always available for custom input (you do not need to type it as an explicit option). + +#### Plan with `/plan` + +Use `/plan` to create a plan and approve it before making changes. + +#### Esc—Esc to edit a previous message When the chat composer is empty, press Esc to prime “backtrack” mode. Press Esc again to open a transcript preview highlighting the last user message; press Esc repeatedly to step to older user messages. Press Enter to confirm and Codex will fork the conversation from that point, trim the visible transcript accordingly, and pre‑fill the composer with the selected user message so you can edit and resubmit it. @@ -84,14 +96,14 @@ In the transcript preview, the footer shows an `Esc edit prev` hint while editin #### `--cd`/`-C` flag -Sometimes it is not convenient to `cd` to the directory you want Codex to use as the "working root" before running Codex. Fortunately, `codex` supports a `--cd` option so you can specify whatever folder you want. You can confirm that Codex is honoring `--cd` by double-checking the **workdir** it reports in the TUI at the start of a new session. +Sometimes it is not convenient to `cd` to the directory you want Codexel to use as the "working root" before running Codexel. Fortunately, `codexel` supports a `--cd` option so you can specify whatever folder you want. You can confirm that Codexel is honoring `--cd` by double-checking the **workdir** it reports in the TUI at the start of a new session. #### `--add-dir` flag Need to work across multiple projects in one run? Pass `--add-dir` one or more times to expose extra directories as writable roots for the current session while keeping the main working directory unchanged. For example: ```shell -codex --cd apps/frontend --add-dir ../backend --add-dir ../shared +codexel --cd apps/frontend --add-dir ../backend --add-dir ../shared ``` Codex can then inspect and edit files in each listed directory without leaving the primary workspace. @@ -101,9 +113,9 @@ Codex can then inspect and edit files in each listed directory without leaving t Generate shell completion scripts via: ```shell -codex completion bash -codex completion zsh -codex completion fish +codexel completion bash +codexel completion zsh +codexel completion fish ``` #### Image input @@ -111,10 +123,10 @@ codex completion fish Paste images directly into the composer (Ctrl+V / Cmd+V) to attach them to your prompt. You can also attach files via the CLI using `-i/--image` (comma‑separated): ```bash -codex -i screenshot.png "Explain this error" -codex --image img1.png,img2.jpg "Summarize these diagrams" +codexel -i screenshot.png "Explain this error" +codexel --image img1.png,img2.jpg "Summarize these diagrams" ``` #### Environment variables and executables -Make sure your environment is already set up before launching Codex so it does not spend tokens probing what to activate. For example, source your Python virtualenv (or other language runtimes), start any required daemons, and export the env vars you expect to use ahead of time. +Make sure your environment is already set up before launching Codexel so it does not spend tokens probing what to activate. For example, source your Python virtualenv (or other language runtimes), start any required daemons, and export the env vars you expect to use ahead of time. diff --git a/docs/install.md b/docs/install.md index b54b74f16c1..b7e6f6e8ccf 100644 --- a/docs/install.md +++ b/docs/install.md @@ -10,14 +10,14 @@ ### DotSlash -The GitHub Release also contains a [DotSlash](https://dotslash-cli.com/) file for the Codex CLI named `codex`. Using a DotSlash file makes it possible to make a lightweight commit to source control to ensure all contributors use the same version of an executable, regardless of what platform they use for development. +The GitHub Release also contains a [DotSlash](https://dotslash-cli.com/) file for Codexel named `codexel`. Using a DotSlash file makes it possible to make a lightweight commit to source control to ensure all contributors use the same version of an executable, regardless of what platform they use for development. ### Build from source ```bash # Clone the repository and navigate to the root of the Cargo workspace. -git clone https://github.com/openai/codex.git -cd codex/codex-rs +git clone https://github.com/Ixe1/codexel.git +cd codexel/codex-rs # Install the Rust toolchain, if necessary. curl --proto '=https' --tlsv1.2 -sSf https://sh.rustup.rs | sh -s -- -y @@ -29,11 +29,11 @@ cargo install just # Optional: install nextest for the `just test` helper (or use `cargo test --all-features` as a fallback) cargo install cargo-nextest -# Build Codex. +# Build Codexel. cargo build # Launch the TUI with a sample prompt. -cargo run --bin codex -- "explain this codebase to me" +cargo run --bin codexel -- "explain this codebase to me" # After making changes, use the root justfile helpers (they default to codex-rs): just fmt diff --git a/docs/open-source-fund.md b/docs/open-source-fund.md index 2da0cdce3e5..c64b412e4bf 100644 --- a/docs/open-source-fund.md +++ b/docs/open-source-fund.md @@ -1,6 +1,6 @@ -## Codex open source fund +## Codexel open source fund -We're excited to launch a **$1 million initiative** supporting open source projects that use Codex CLI and other OpenAI models. +We're excited to launch a **$1 million initiative** supporting open source projects that use Codexel and other OpenAI models. - Grants are awarded up to **$25,000** API credits. - Applications are reviewed **on a rolling basis**. diff --git a/docs/prompts.md b/docs/prompts.md index c995cb912f6..be31cde3a2f 100644 --- a/docs/prompts.md +++ b/docs/prompts.md @@ -1,13 +1,13 @@ ## Custom Prompts -Custom prompts turn your repeatable instructions into reusable slash commands, so you can trigger them without retyping or copy/pasting. Each prompt is a Markdown file that Codex expands into the conversation the moment you run it. +Custom prompts turn your repeatable instructions into reusable slash commands, so you can trigger them without retyping or copy/pasting. Each prompt is a Markdown file that Codexel expands into the conversation the moment you run it. ### Where prompts live -- Location: store prompts in `$CODEX_HOME/prompts/` (defaults to `~/.codex/prompts/`). Set `CODEX_HOME` if you want to use a different folder. +- Location: store prompts in `$CODEXEL_HOME/prompts/` (defaults to `~/.codexel/prompts/`). Set `CODEXEL_HOME` if you want to use a different folder (legacy `CODEX_HOME` is also supported). - File type: Codex only loads `.md` files. Non-Markdown files are ignored. Both regular files and symlinks to Markdown files are supported. - Naming: The filename (without `.md`) becomes the prompt name. A file called `review.md` registers the prompt `review`. -- Refresh: Prompts are loaded when a session starts. Restart Codex (or start a new session) after adding or editing files. +- Refresh: Prompts are loaded when a session starts. Restart Codexel (or start a new session) after adding or editing files. - Conflicts: Files whose names collide with built-in commands (like `init`) stay hidden in the slash popup, but you can still invoke them with `/prompts:`. ### File format @@ -27,24 +27,24 @@ Custom prompts turn your repeatable instructions into reusable slash commands, s ### Placeholders and arguments -- Numeric placeholders: `$1`–`$9` insert the first nine positional arguments you type after the command. `$ARGUMENTS` inserts all positional arguments joined by a single space. Use `$$` to emit a literal dollar sign (Codex leaves `$$` untouched). +- Numeric placeholders: `$1`–`$9` insert the first nine positional arguments you type after the command. `$ARGUMENTS` inserts all positional arguments joined by a single space. Use `$$` to emit a literal dollar sign (Codexel leaves `$$` untouched). - Named placeholders: Tokens such as `$FILE` or `$TICKET_ID` expand from `KEY=value` pairs you supply. Keys are case-sensitive—use the same uppercase name in the command (for example, `FILE=...`). - Quoted arguments: Double-quote any value that contains spaces, e.g. `TICKET_TITLE="Fix logging"`. - Invocation syntax: Run prompts via `/prompts: ...`. When the slash popup is open, typing either `prompts:` or the bare prompt name will surface `/prompts:` suggestions. -- Error handling: If a prompt contains named placeholders, Codex requires them all. You will see a validation message if any are missing or malformed. +- Error handling: If a prompt contains named placeholders, Codexel requires them all. You will see a validation message if any are missing or malformed. ### Running a prompt -1. Start a new Codex session (ensures the prompt list is fresh). +1. Start a new Codexel session (ensures the prompt list is fresh). 2. In the composer, type `/` to open the slash popup. 3. Type `prompts:` (or start typing the prompt name) and select it with ↑/↓. -4. Provide any required arguments, press Enter, and Codex sends the expanded content. +4. Provide any required arguments, press Enter, and Codexel sends the expanded content. ### Examples ### Example 1: Basic named arguments -**File**: `~/.codex/prompts/ticket.md` +**File**: `~/.codexel/prompts/ticket.md` ```markdown --- @@ -61,17 +61,17 @@ Please write a concise commit message for ticket $TICKET_ID: $TICKET_TITLE /prompts:ticket TICKET_ID=JIRA-1234 TICKET_TITLE="Fix login bug" ``` -**Expanded prompt sent to Codex**: +**Expanded prompt sent to Codexel**: ``` Please write a concise commit message for ticket JIRA-1234: Fix login bug ``` -**Note**: Both `TICKET_ID` and `TICKET_TITLE` are required. If either is missing, Codex will show a validation error. Values with spaces must be double-quoted. +**Note**: Both `TICKET_ID` and `TICKET_TITLE` are required. If either is missing, Codexel will show a validation error. Values with spaces must be double-quoted. ### Example 2: Mixed positional and named arguments -**File**: `~/.codex/prompts/review.md` +**File**: `~/.codexel/prompts/review.md` ```markdown --- diff --git a/docs/releasing.md b/docs/releasing.md new file mode 100644 index 00000000000..5387052e9e5 --- /dev/null +++ b/docs/releasing.md @@ -0,0 +1,27 @@ +## Releasing Codexel (npm) + +Codexel is published as `@ixe1/codexel` with prebuilt native binaries bundled in +`codex-cli/vendor/`. Publishing is handled by GitHub Actions. + +### Release checklist + +- Update `codex-cli/package.json` to the release version (no `-dev` suffix). +- Merge the release commit to `main`. +- Create and push a tag: + - Stable: `codexel-vX.Y.Z` + - Pre-release: `codexel-vX.Y.Z-alpha.N` or `codexel-vX.Y.Z-beta.N` + +### What the workflow does + +The `npm-publish-codexel` workflow: + +- Builds the `codexel` binary for all supported targets. +- Assembles `codex-cli/vendor//codex/codexel(.exe)`. +- Packs an npm tarball and runs a smoke test (`codexel --help`). +- Creates a GitHub Release containing the per-target binaries and npm tarball. +- Publishes `@ixe1/codexel` using npm Trusted Publishing (OIDC). + +### One-time setup + +Before the first publish, configure npm Trusted Publishing for `@ixe1/codexel` +to trust this repository and the `npm-publish-codexel` workflow in the npm UI. diff --git a/docs/sandbox.md b/docs/sandbox.md index 94f9c8280cf..738ad079206 100644 --- a/docs/sandbox.md +++ b/docs/sandbox.md @@ -1,28 +1,28 @@ ## Sandbox & approvals -What Codex is allowed to do is governed by a combination of **sandbox modes** (what Codex is allowed to do without supervision) and **approval policies** (when you must confirm an action). This page explains the options, how they interact, and how the sandbox behaves on each platform. +What Codexel is allowed to do is governed by a combination of **sandbox modes** (what Codexel is allowed to do without supervision) and **approval policies** (when you must confirm an action). This page explains the options, how they interact, and how the sandbox behaves on each platform. ### Approval policies -Codex starts conservatively. Until you explicitly tell it a working directory is trusted, the CLI defaults to **read-only**. Codex can inspect files and answer questions, but every edit or command requires approval. +Codexel starts conservatively. Until you explicitly tell it a working directory is trusted, the CLI defaults to **read-only**. Codexel can inspect files and answer questions, but every edit or command requires approval. -When you mark a working directory as trusted (for example via the onboarding prompt or `/approvals` → “Trust this directory”), Codex upgrades the default preset to **Agent**, which allows writes inside the workspace. Codex only interrupts you when it needs to leave the workspace or rerun something outside the sandbox. Note that the workspace includes the working directory plus temporary directories like `/tmp`. Use `/status` to confirm the exact writable roots. +When you mark a working directory as trusted (for example via the onboarding prompt or `/approvals` → “Trust this directory”), Codexel upgrades the default preset to **Agent**, which allows writes inside the workspace. Codexel only interrupts you when it needs to leave the workspace or rerun something outside the sandbox. Note that the workspace includes the working directory plus temporary directories like `/tmp`. Use `/status` to confirm the exact writable roots. If you want maximum guardrails for a trusted repo, switch back to Read Only from the `/approvals` picker. If you truly need hands-off automation, use `Full Access`—but be deliberate, because that skips both the sandbox and approvals. ### Can I run without ANY approvals? -Yes, you can disable all approval prompts with `--ask-for-approval never`. This option works with all `--sandbox` modes, so you still have full control over Codex's level of autonomy. It will make its best attempt with whatever constraints you provide. +Yes, you can disable all approval prompts with `--ask-for-approval never`. This option works with all `--sandbox` modes, so you still have full control over Codexel's level of autonomy. It will make its best attempt with whatever constraints you provide. ### Common sandbox + approvals combinations -| Intent | Flags | Effect | -| ---------------------------------- | ------------------------------------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------- | -| Safe read-only browsing | `--sandbox read-only --ask-for-approval on-request` | Codex can read files and answer questions. Codex requires approval to make edits, run commands, or access network. | -| Read-only non-interactive (CI) | `--sandbox read-only --ask-for-approval never` | Reads only; never escalates | -| Let it edit the repo, ask if risky | `--sandbox workspace-write --ask-for-approval on-request` | Codex can read files, make edits, and run commands in the workspace. Codex requires approval for actions outside the workspace or for network access. | -| Auto (preset; trusted repos) | `--full-auto` (equivalent to `--sandbox workspace-write` + `--ask-for-approval on-request`) | Codex runs sandboxed commands that can write inside the workspace without prompting. Escalates only when it must leave the sandbox. | -| YOLO (not recommended) | `--dangerously-bypass-approvals-and-sandbox` (alias: `--yolo`) | No sandbox; no prompts | +| Intent | Flags | Effect | +| ---------------------------------- | ------------------------------------------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------------- | +| Safe read-only browsing | `--sandbox read-only --ask-for-approval on-request` | Codexel can read files and answer questions. Codexel requires approval to make edits, run commands, or access network. | +| Read-only non-interactive (CI) | `--sandbox read-only --ask-for-approval never` | Reads only; never escalates | +| Let it edit the repo, ask if risky | `--sandbox workspace-write --ask-for-approval on-request` | Codexel can read files, make edits, and run commands in the workspace. Codexel requires approval for actions outside the workspace or for network access. | +| Auto (preset; trusted repos) | `--full-auto` (equivalent to `--sandbox workspace-write` + `--ask-for-approval on-request`) | Codexel runs sandboxed commands that can write inside the workspace without prompting. Escalates only when it must leave the sandbox. | +| YOLO (not recommended) | `--dangerously-bypass-approvals-and-sandbox` (alias: `--yolo`) | No sandbox; no prompts | > Note: In `workspace-write`, network is disabled by default unless enabled in config (`[sandbox_workspace_write].network_access = true`). @@ -56,17 +56,17 @@ sandbox_mode = "read-only" ### Sandbox mechanics by platform -The mechanism Codex uses to enforce the sandbox policy depends on your OS: +The mechanism Codexel uses to enforce the sandbox policy depends on your OS: #### macOS 12+ -Uses **Apple Seatbelt**. Codex invokes `sandbox-exec` with a profile that corresponds to the selected `--sandbox` mode, constraining filesystem and network access at the OS level. +Uses **Apple Seatbelt**. Codexel invokes `sandbox-exec` with a profile that corresponds to the selected `--sandbox` mode, constraining filesystem and network access at the OS level. #### Linux Combines **Landlock** and **seccomp** APIs to approximate the same guarantees. Kernel support is required; older kernels may not expose the necessary features. -In containerized Linux environments (for example Docker), sandboxing may not work when the host or container configuration does not expose Landlock/seccomp. In those cases, configure the container to provide the isolation you need and run Codex with `--sandbox danger-full-access` (or the shorthand `--dangerously-bypass-approvals-and-sandbox`) inside that container. +In containerized Linux environments (for example Docker), sandboxing may not work when the host or container configuration does not expose Landlock/seccomp. In those cases, configure the container to provide the isolation you need and run Codexel with `--sandbox danger-full-access` (or the shorthand `--dangerously-bypass-approvals-and-sandbox`) inside that container. #### Windows @@ -79,18 +79,18 @@ Windows sandbox support remains experimental. How it works: Its primary limitation is that it cannot prevent file writes, deletions, or creations in any directory where the Everyone SID already has write permissions (for example, world-writable folders). See more discussion and limitations at [Windows Sandbox Security Details](./windows_sandbox_security.md). -## Experimenting with the Codex Sandbox +## Experimenting with the Codexel Sandbox -To test how commands behave under Codex's sandbox, use the CLI helpers: +To test how commands behave under Codexel's sandbox, use the CLI helpers: ``` # macOS -codex sandbox macos [--full-auto] [COMMAND]... +codexel sandbox macos [--full-auto] [COMMAND]... # Linux -codex sandbox linux [--full-auto] [COMMAND]... +codexel sandbox linux [--full-auto] [COMMAND]... # Legacy aliases -codex debug seatbelt [--full-auto] [COMMAND]... -codex debug landlock [--full-auto] [COMMAND]... +codexel debug seatbelt [--full-auto] [COMMAND]... +codexel debug landlock [--full-auto] [COMMAND]... ``` diff --git a/docs/skills.md b/docs/skills.md index 5a9f17f0b07..47d515af80f 100644 --- a/docs/skills.md +++ b/docs/skills.md @@ -2,24 +2,24 @@ > **Warning:** This is an experimental and non-stable feature. If you depend on it, please expect breaking changes over the coming weeks and understand that there is currently no guarantee that this works well. Use at your own risk! -Codex can automatically discover reusable "skills" you keep on disk. A skill is a small bundle with a name, a short description (what it does and when to use it), and an optional body of instructions you can open when needed. Codex injects only the name, description, and file path into the runtime context; the body stays on disk. +Codexel can automatically discover reusable "skills" you keep on disk. A skill is a small bundle with a name, a short description (what it does and when to use it), and an optional body of instructions you can open when needed. Codexel injects only the name, description, and file path into the runtime context; the body stays on disk. ## Enable skills Skills are behind the experimental `skills` feature flag and are disabled by default. -- Enable in config (preferred): add the following to `$CODEX_HOME/config.toml` (usually `~/.codex/config.toml`) and restart Codex: +- Enable in config (preferred): add the following to `$CODEXEL_HOME/config.toml` (usually `~/.codexel/config.toml`, or legacy `~/.codex/config.toml`) and restart Codexel: ```toml [features] skills = true ``` -- Enable for a single run: launch Codex with `codex --enable skills` +- Enable for a single run: launch Codexel with `codexel --enable skills` ## Where skills live -- Location (v1): `~/.codex/skills/**/SKILL.md` (recursive). Hidden entries and symlinks are skipped. Only files named exactly `SKILL.md` count. +- Location (v1): `~/.codexel/skills/**/SKILL.md` (recursive). Hidden entries and symlinks are skipped. Only files named exactly `SKILL.md` count. - Sorting: rendered by name, then path for stability. ## File format @@ -33,7 +33,7 @@ Skills are behind the experimental `skills` feature flag and are disabled by def ## Loading and rendering - Loaded once at startup. -- If valid skills exist, Codex appends a runtime-only `## Skills` section after `AGENTS.md`, one bullet per skill: `- : (file: /absolute/path/to/SKILL.md)`. +- If valid skills exist, Codexel appends a runtime-only `## Skills` section after `AGENTS.md`, one bullet per skill: `- : (file: /absolute/path/to/SKILL.md)`. - If no valid skills exist, the section is omitted. On-disk files are never modified. ## Using skills @@ -47,7 +47,7 @@ Skills are behind the experimental `skills` feature flag and are disabled by def ## Create a skill -1. Create `~/.codex/skills//`. +1. Create `~/.codexel/skills//`. 2. Add `SKILL.md`: ``` @@ -61,13 +61,13 @@ Skills are behind the experimental `skills` feature flag and are disabled by def ``` 3. Keep `name`/`description` within the limits; avoid newlines in those fields. -4. Restart Codex to load the new skill. +4. Restart Codexel to load the new skill. ## Example ``` -mkdir -p ~/.codex/skills/pdf-processing -cat <<'SKILL_EXAMPLE' > ~/.codex/skills/pdf-processing/SKILL.md +mkdir -p ~/.codexel/skills/pdf-processing +cat <<'SKILL_EXAMPLE' > ~/.codexel/skills/pdf-processing/SKILL.md --- name: pdf-processing description: Extract text and tables from PDFs; use when PDFs, forms, or document extraction are mentioned. diff --git a/docs/slash_commands.md b/docs/slash_commands.md index c1f9daf9d4c..130aff0851c 100644 --- a/docs/slash_commands.md +++ b/docs/slash_commands.md @@ -13,8 +13,10 @@ Control Codex’s behavior during an interactive session with slash commands. | Command | Purpose | | --------------- | -------------------------------------------------------------------------- | | `/model` | choose what model and reasoning effort to use | +| `/plan-model` | choose what model and reasoning effort to use for `/plan` | | `/approvals` | choose what Codex can do without approval | | `/review` | review my current changes and find issues | +| `/plan` | create and approve a plan before making changes | | `/new` | start a new chat during a conversation | | `/resume` | resume an old chat | | `/init` | create an AGENTS.md file with instructions for Codex | diff --git a/docs/windows_sandbox_security.md b/docs/windows_sandbox_security.md index 79f8f781b11..508d2085b19 100644 --- a/docs/windows_sandbox_security.md +++ b/docs/windows_sandbox_security.md @@ -1,10 +1,10 @@ # Windows Sandbox Security Details -For overall context on sandboxing in Codex, see [sandbox.md](./sandbox.md). +For overall context on sandboxing in Codexel, see [sandbox.md](./sandbox.md). ## Implementation Overview -When commands run via `codex sandbox windows …` (or when the CLI/TUI calls into the same crate in-process for sandboxed turns), the launcher configures a restricted Windows token and an allowlist policy scoped to the declared workspace roots. Writes are blocked everywhere except inside those roots (plus `%TEMP%` when workspace-write mode is requested), and common escape vectors such as alternate data streams, UNC paths, and device handles are denied proactively. The CLI also injects stub executables (for example, wrapping `ssh`) ahead of the host PATH so we can intercept dangerous tools before they ever leave the sandbox. +When commands run via `codexel sandbox windows …` (or when the CLI/TUI calls into the same crate in-process for sandboxed turns), the launcher configures a restricted Windows token and an allowlist policy scoped to the declared workspace roots. Writes are blocked everywhere except inside those roots (plus `%TEMP%` when workspace-write mode is requested), and common escape vectors such as alternate data streams, UNC paths, and device handles are denied proactively. The CLI also injects stub executables (for example, wrapping `ssh`) ahead of the host PATH so we can intercept dangerous tools before they ever leave the sandbox. ## Known Security Limitations @@ -19,4 +19,4 @@ Running `python windows-sandbox-rs/sandbox_smoketests.py` with full filesystem a ## Want to Help? -If you are a security-minded Windows user, help us get these tests passing! Improved implementations that make these smoke tests pass meaningfully reduce Codex's escape surface. After iterating, rerun `python windows-sandbox-rs/sandbox_smoketests.py` to validate the fixes and help us drive the suite toward 41/41. +If you are a security-minded Windows user, help us get these tests passing! Improved implementations that make these smoke tests pass meaningfully reduce Codexel's escape surface. After iterating, rerun `python windows-sandbox-rs/sandbox_smoketests.py` to validate the fixes and help us drive the suite toward 41/41. diff --git a/docs/zdr.md b/docs/zdr.md index d030e8d07fb..f193756c6d7 100644 --- a/docs/zdr.md +++ b/docs/zdr.md @@ -1,3 +1,3 @@ ## Zero data retention (ZDR) usage -Codex CLI natively supports OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled. +Codexel natively supports OpenAI organizations with [Zero Data Retention (ZDR)](https://platform.openai.com/docs/guides/your-data#zero-data-retention) enabled. diff --git a/flake.nix b/flake.nix index b331c443bbf..62d81144a13 100644 --- a/flake.nix +++ b/flake.nix @@ -1,5 +1,5 @@ { - description = "Development Nix flake for OpenAI Codex CLI"; + description = "Development Nix flake for Codexel"; inputs.nixpkgs.url = "github:NixOS/nixpkgs/nixos-unstable"; diff --git a/justfile b/justfile index 79b691e0a02..65ac51dfc6b 100644 --- a/justfile +++ b/justfile @@ -5,18 +5,18 @@ set positional-arguments help: just -l -# `codex` -alias c := codex -codex *args: - cargo run --bin codex -- "$@" +# `codexel` +alias c := codexel +codexel *args: + cargo run --bin codexel -- "$@" -# `codex exec` +# `codexel exec` exec *args: - cargo run --bin codex -- exec "$@" + cargo run --bin codexel -- exec "$@" -# `codex tui` +# `codexel tui` tui *args: - cargo run --bin codex -- tui "$@" + cargo run --bin codexel -- tui "$@" # Run the CLI version of the file-search crate. file-search *args: @@ -25,12 +25,19 @@ file-search *args: # Build the CLI and run the app-server test client app-server-test-client *args: cargo build -p codex-cli - cargo run -p codex-app-server-test-client -- --codex-bin ./target/debug/codex "$@" + cargo run -p codex-app-server-test-client -- --codex-bin ./target/debug/codexel "$@" # format code fmt: cargo fmt -- --config imports_granularity=Item +# Changelog (run from repo root) +changelog: + ../scripts/gen-changelog.sh + +changelog-check: + ../scripts/gen-changelog.sh --check + fix *args: cargo clippy --fix --all-features --tests --allow-dirty "$@" diff --git a/scripts/gen-changelog.ps1 b/scripts/gen-changelog.ps1 new file mode 100644 index 00000000000..b65b5960ba7 --- /dev/null +++ b/scripts/gen-changelog.ps1 @@ -0,0 +1,64 @@ +param( + [switch]$Check +) + +Set-StrictMode -Version Latest +$ErrorActionPreference = "Stop" + +$repoRoot = Resolve-Path (Join-Path $PSScriptRoot "..") +$changelogPath = Join-Path $repoRoot "CHANGELOG.md" +$configPath = Join-Path $repoRoot "cliff.toml" + +function Require-Command([string]$Name) { + if (-not (Get-Command $Name -ErrorAction SilentlyContinue)) { + throw "Missing required command: $Name" + } +} + +Require-Command git +Require-Command git-cliff + +if (-not (Test-Path $changelogPath)) { + throw "CHANGELOG.md not found at $changelogPath" +} + +$text = Get-Content -Raw -Path $changelogPath +$newline = if ($text -match "`r`n") { "`r`n" } else { "`n" } + +$pattern = '\s*(?.*?)\s*' +$matches = [regex]::Matches($text, $pattern, [System.Text.RegularExpressions.RegexOptions]::Singleline) +if ($matches.Count -eq 0) { + throw "No generated details blocks found in CHANGELOG.md." +} + +$updated = [regex]::Replace($text, $pattern, { + param($match) + $range = $match.Groups["range"].Value + $details = & git-cliff -c $configPath -- $range | Out-String + if ($LASTEXITCODE -ne 0) { + throw "git-cliff failed for range $range" + } + $details = $details -replace "\r\n|\r|\n", $newline + $details = $details.Trim() + if ([string]::IsNullOrWhiteSpace($details)) { + $details = "_No fork-only changes yet._" + } + return "$newline$details$newline" +}, [System.Text.RegularExpressions.RegexOptions]::Singleline) + +if ($updated -eq $text) { + if ($Check) { + Write-Host "CHANGELOG.md is up to date." + } else { + Write-Host "No changelog updates needed." + } + exit 0 +} + +if ($Check) { + Write-Host "CHANGELOG.md is out of date. Run scripts/gen-changelog.ps1." + exit 1 +} + +Set-Content -Path $changelogPath -Value $updated -NoNewline +Write-Host "Updated CHANGELOG.md" diff --git a/scripts/gen-changelog.sh b/scripts/gen-changelog.sh new file mode 100644 index 00000000000..f4b935fb34a --- /dev/null +++ b/scripts/gen-changelog.sh @@ -0,0 +1,69 @@ +#!/usr/bin/env bash +set -euo pipefail + +repo_root="$(cd "$(dirname "${BASH_SOURCE[0]}")/.." && pwd)" +changelog="${repo_root}/CHANGELOG.md" +config="${repo_root}/cliff.toml" + +check="false" +if [[ "${1:-}" == "--check" ]]; then + check="true" +fi + +if ! command -v git >/dev/null 2>&1; then + echo "Missing required command: git" >&2 + exit 1 +fi + +if ! command -v git-cliff >/dev/null 2>&1; then + echo "Missing required command: git-cliff" >&2 + exit 1 +fi + +python3 - "$changelog" "$config" "$check" <<'PY' +import pathlib +import re +import subprocess +import sys + +changelog, config, check = sys.argv[1], sys.argv[2], sys.argv[3] == "true" +text = pathlib.Path(changelog).read_text() +newline = "\r\n" if "\r\n" in text else "\n" + +pattern = re.compile( + r"\s*(?P.*?)\s*", + re.S, +) + +if not pattern.search(text): + print("No generated details blocks found in CHANGELOG.md.", file=sys.stderr) + sys.exit(1) + +def render(match: re.Match[str]) -> str: + range_ = match.group("range") + result = subprocess.run( + ["git-cliff", "-c", config, "--", range_], + capture_output=True, + text=True, + ) + if result.returncode != 0: + sys.stderr.write(result.stderr) + raise SystemExit(f"git-cliff failed for range {range_}") + details = result.stdout.replace("\r\n", "\n").replace("\r", "\n").strip() + if not details: + details = "_No fork-only changes yet._" + details = details.replace("\n", newline) + return f"{newline}{details}{newline}" + +updated = pattern.sub(render, text) +if updated == text: + print("CHANGELOG.md is up to date." if check else "No changelog updates needed.") + sys.exit(0) + +if check: + print("CHANGELOG.md is out of date. Run scripts/gen-changelog.sh.") + sys.exit(1) + +pathlib.Path(changelog).write_text(updated) +print("Updated CHANGELOG.md") +PY diff --git a/scripts/stage_npm_packages.py b/scripts/stage_npm_packages.py index f87a75815fa..d1fc1c3a865 100755 --- a/scripts/stage_npm_packages.py +++ b/scripts/stage_npm_packages.py @@ -17,7 +17,7 @@ BUILD_SCRIPT = REPO_ROOT / "codex-cli" / "scripts" / "build_npm_package.py" INSTALL_NATIVE_DEPS = REPO_ROOT / "codex-cli" / "scripts" / "install_native_deps.py" WORKFLOW_NAME = ".github/workflows/rust-release.yml" -GITHUB_REPO = "openai/codex" +GITHUB_REPO = "Ixe1/codexel" _SPEC = importlib.util.spec_from_file_location("codex_build_npm_package", BUILD_SCRIPT) if _SPEC is None or _SPEC.loader is None: diff --git a/sdk/typescript/README.md b/sdk/typescript/README.md index 09e8a513d3b..e433a3b5c22 100644 --- a/sdk/typescript/README.md +++ b/sdk/typescript/README.md @@ -2,12 +2,12 @@ Embed the Codex agent in your workflows and apps. -The TypeScript SDK wraps the bundled `codex` binary. It spawns the CLI and exchanges JSONL events over stdin/stdout. +The TypeScript SDK wraps the bundled `codexel` binary. It spawns the CLI and exchanges JSONL events over stdin/stdout. ## Installation ```bash -npm install @openai/codex-sdk +npm install @ixe1/codexel-sdk ``` Requires Node.js 18+. @@ -15,7 +15,7 @@ Requires Node.js 18+. ## Quickstart ```typescript -import { Codex } from "@openai/codex-sdk"; +import { Codex } from "@ixe1/codexel-sdk"; const codex = new Codex(); const thread = codex.startThread(); @@ -85,7 +85,7 @@ console.log(turn.finalResponse); ### Attaching images -Provide structured input entries when you need to include images alongside text. Text entries are concatenated into the final prompt while image entries are passed to the Codex CLI via `--image`. +Provide structured input entries when you need to include images alongside text. Text entries are concatenated into the final prompt while image entries are passed to Codexel via `--image`. ```typescript const turn = await thread.run([ @@ -97,7 +97,7 @@ const turn = await thread.run([ ### Resuming an existing thread -Threads are persisted in `~/.codex/sessions`. If you lose the in-memory `Thread` object, reconstruct it with `resumeThread()` and keep going. +Threads are persisted in `~/.codexel/sessions` (or legacy `~/.codex/sessions`). If you lose the in-memory `Thread` object, reconstruct it with `resumeThread()` and keep going. ```typescript const savedThreadId = process.env.CODEX_THREAD_ID!; @@ -107,7 +107,7 @@ await thread.run("Implement the fix"); ### Working directory controls -Codex runs in the current working directory by default. To avoid unrecoverable errors, Codex requires the working directory to be a Git repository. You can skip the Git repository check by passing the `skipGitRepoCheck` option when creating a thread. +Codexel runs in the current working directory by default. To avoid unrecoverable errors, Codexel requires the working directory to be a Git repository. You can skip the Git repository check by passing the `skipGitRepoCheck` option when creating a thread. ```typescript const thread = codex.startThread({ @@ -116,9 +116,9 @@ const thread = codex.startThread({ }); ``` -### Controlling the Codex CLI environment +### Controlling the Codexel environment -By default, the Codex CLI inherits the Node.js process environment. Provide the optional `env` parameter when instantiating the +By default, Codexel inherits the Node.js process environment. Provide the optional `env` parameter when instantiating the `Codex` client to fully control which variables the CLI receives—useful for sandboxed hosts like Electron apps. ```typescript diff --git a/sdk/typescript/package.json b/sdk/typescript/package.json index 55ecd1abf38..a0b929e380d 100644 --- a/sdk/typescript/package.json +++ b/sdk/typescript/package.json @@ -1,10 +1,10 @@ { - "name": "@openai/codex-sdk", + "name": "@ixe1/codexel-sdk", "version": "0.0.0-dev", "description": "TypeScript SDK for Codex APIs.", "repository": { "type": "git", - "url": "git+https://github.com/openai/codex.git", + "url": "git+https://github.com/Ixe1/codexel.git", "directory": "sdk/typescript" }, "keywords": [ diff --git a/sdk/typescript/samples/basic_streaming.ts b/sdk/typescript/samples/basic_streaming.ts index f9ccbe40d13..78b8c264d86 100755 --- a/sdk/typescript/samples/basic_streaming.ts +++ b/sdk/typescript/samples/basic_streaming.ts @@ -3,8 +3,8 @@ import { createInterface } from "node:readline/promises"; import { stdin as input, stdout as output } from "node:process"; -import { Codex } from "@openai/codex-sdk"; -import type { ThreadEvent, ThreadItem } from "@openai/codex-sdk"; +import { Codex } from "@ixe1/codexel-sdk"; +import type { ThreadEvent, ThreadItem } from "@ixe1/codexel-sdk"; import { codexPathOverride } from "./helpers.ts"; const codex = new Codex({ codexPathOverride: codexPathOverride() }); diff --git a/sdk/typescript/samples/structured_output.ts b/sdk/typescript/samples/structured_output.ts index 60063c10fa3..4a800a3b8e0 100755 --- a/sdk/typescript/samples/structured_output.ts +++ b/sdk/typescript/samples/structured_output.ts @@ -1,6 +1,6 @@ #!/usr/bin/env -S NODE_NO_WARNINGS=1 pnpm ts-node-esm --files -import { Codex } from "@openai/codex-sdk"; +import { Codex } from "@ixe1/codexel-sdk"; import { codexPathOverride } from "./helpers.ts"; diff --git a/sdk/typescript/samples/structured_output_zod.ts b/sdk/typescript/samples/structured_output_zod.ts index 917bc39114f..91ee773eb6c 100755 --- a/sdk/typescript/samples/structured_output_zod.ts +++ b/sdk/typescript/samples/structured_output_zod.ts @@ -1,6 +1,6 @@ #!/usr/bin/env -S NODE_NO_WARNINGS=1 pnpm ts-node-esm --files -import { Codex } from "@openai/codex-sdk"; +import { Codex } from "@ixe1/codexel-sdk"; import { codexPathOverride } from "./helpers.ts"; import z from "zod"; import zodToJsonSchema from "zod-to-json-schema"; diff --git a/sdk/typescript/src/codex.ts b/sdk/typescript/src/codex.ts index a42159232ef..d2d03c57083 100644 --- a/sdk/typescript/src/codex.ts +++ b/sdk/typescript/src/codex.ts @@ -27,7 +27,7 @@ export class Codex { /** * Resumes a conversation with an agent based on the thread id. - * Threads are persisted in ~/.codex/sessions. + * Threads are persisted in ~/.codexel/sessions (or legacy ~/.codex/sessions). * * @param id The id of the thread to resume. * @returns A new thread instance. diff --git a/sdk/typescript/src/codexOptions.ts b/sdk/typescript/src/codexOptions.ts index 31fb637d4ce..c31eadfc968 100644 --- a/sdk/typescript/src/codexOptions.ts +++ b/sdk/typescript/src/codexOptions.ts @@ -3,7 +3,7 @@ export type CodexOptions = { baseUrl?: string; apiKey?: string; /** - * Environment variables passed to the Codex CLI process. When provided, the SDK + * Environment variables passed to the Codexel process. When provided, the SDK * will not inherit variables from `process.env`. */ env?: Record; diff --git a/sdk/typescript/src/events.ts b/sdk/typescript/src/events.ts index b8adcfb4b0b..f8404d964be 100644 --- a/sdk/typescript/src/events.ts +++ b/sdk/typescript/src/events.ts @@ -68,7 +68,7 @@ export type ThreadErrorEvent = { message: string; }; -/** Top-level JSONL events emitted by codex exec. */ +/** Top-level JSONL events emitted by codexel exec. */ export type ThreadEvent = | ThreadStartedEvent | TurnStartedEvent diff --git a/sdk/typescript/src/exec.ts b/sdk/typescript/src/exec.ts index fb7ed54ad18..e162ae35037 100644 --- a/sdk/typescript/src/exec.ts +++ b/sdk/typescript/src/exec.ts @@ -171,7 +171,7 @@ export class CodexExec { } else { const stderrBuffer = Buffer.concat(stderrChunks); reject( - new Error(`Codex Exec exited with code ${code}: ${stderrBuffer.toString("utf8")}`), + new Error(`Codexel exec exited with code ${code}: ${stderrBuffer.toString("utf8")}`), ); } }); @@ -246,8 +246,8 @@ function findCodexPath() { const vendorRoot = path.join(scriptDirName, "..", "vendor"); const archRoot = path.join(vendorRoot, targetTriple); - const codexBinaryName = process.platform === "win32" ? "codex.exe" : "codex"; - const binaryPath = path.join(archRoot, "codex", codexBinaryName); + const codexelBinaryName = process.platform === "win32" ? "codexel.exe" : "codexel"; + const binaryPath = path.join(archRoot, "codex", codexelBinaryName); return binaryPath; } diff --git a/sdk/typescript/tests/abort.test.ts b/sdk/typescript/tests/abort.test.ts index d79319d654f..114312b8b24 100644 --- a/sdk/typescript/tests/abort.test.ts +++ b/sdk/typescript/tests/abort.test.ts @@ -14,7 +14,16 @@ import { startResponsesTestProxy, } from "./responsesProxy"; -const codexExecPath = path.join(process.cwd(), "..", "..", "codex-rs", "target", "debug", "codex"); +const codexExecBinary = process.platform === "win32" ? "codexel.exe" : "codexel"; +const codexExecPath = path.join( + process.cwd(), + "..", + "..", + "codex-rs", + "target", + "debug", + codexExecBinary, +); function* infiniteShellCall(): Generator { while (true) { diff --git a/sdk/typescript/tests/run.test.ts b/sdk/typescript/tests/run.test.ts index fcd9fea8381..ce344f5c699 100644 --- a/sdk/typescript/tests/run.test.ts +++ b/sdk/typescript/tests/run.test.ts @@ -17,7 +17,16 @@ import { SseResponseBody, } from "./responsesProxy"; -const codexExecPath = path.join(process.cwd(), "..", "..", "codex-rs", "target", "debug", "codex"); +const codexExecBinary = process.platform === "win32" ? "codexel.exe" : "codexel"; +const codexExecPath = path.join( + process.cwd(), + "..", + "..", + "codex-rs", + "target", + "debug", + codexExecBinary, +); describe("Codex", () => { it("returns thread events", async () => { @@ -348,7 +357,7 @@ describe("Codex", () => { } }); - it("allows overriding the env passed to the Codex CLI", async () => { + it("allows overriding the env passed to Codexel", async () => { const { url, close } = await startResponsesTestProxy({ statusCode: 200, responseBodies: [ diff --git a/sdk/typescript/tests/runStreamed.test.ts b/sdk/typescript/tests/runStreamed.test.ts index 6cdf22fea5c..919cdd9446c 100644 --- a/sdk/typescript/tests/runStreamed.test.ts +++ b/sdk/typescript/tests/runStreamed.test.ts @@ -13,7 +13,16 @@ import { startResponsesTestProxy, } from "./responsesProxy"; -const codexExecPath = path.join(process.cwd(), "..", "..", "codex-rs", "target", "debug", "codex"); +const codexExecBinary = process.platform === "win32" ? "codexel.exe" : "codexel"; +const codexExecPath = path.join( + process.cwd(), + "..", + "..", + "codex-rs", + "target", + "debug", + codexExecBinary, +); describe("Codex", () => { it("returns thread events", async () => { diff --git a/shell-tool-mcp/README.md b/shell-tool-mcp/README.md index 16a8492656e..09b6b89d942 100644 --- a/shell-tool-mcp/README.md +++ b/shell-tool-mcp/README.md @@ -1,8 +1,8 @@ -# @openai/codex-shell-tool-mcp +# @ixe1/codexel-shell-tool-mcp -**Note: This MCP server is still experimental. When using it with Codex CLI, ensure the CLI version matches the MCP server version.** +**Note: This MCP server is still experimental. When using it with Codexel, ensure the CLI version matches the MCP server version.** -`@openai/codex-shell-tool-mcp` is an MCP server that provides a tool named `shell` that runs a shell command inside a sandboxed instance of Bash. This special instance of Bash intercepts requests to spawn new processes (specifically, [`execve(2)`](https://man7.org/linux/man-pages/man2/execve.2.html) calls). For each call, it makes a request back to the MCP server to determine whether to allow the proposed command to execute. It also has the option of _escalating_ the command to run unprivileged outside of the sandbox governing the Bash process. +`@ixe1/codexel-shell-tool-mcp` is an MCP server that provides a tool named `shell` that runs a shell command inside a sandboxed instance of Bash. This special instance of Bash intercepts requests to spawn new processes (specifically, [`execve(2)`](https://man7.org/linux/man-pages/man2/execve.2.html) calls). For each call, it makes a request back to the MCP server to determine whether to allow the proposed command to execute. It also has the option of _escalating_ the command to run unprivileged outside of the sandbox governing the Bash process. The user can use [Codex `.rules`](https://developers.openai.com/codex/local-config#rules-preview) files to define how a command should be handled. The action to take is determined by the `decision` parameter of a matching rule as follows: @@ -19,24 +19,24 @@ When a software agent asks if it is safe to run a command like `ls`, without mor - There could be another executable named `ls` that appears before `/bin/ls` on the `$PATH`. - `ls` could be mapped to a shell alias or function. -Because `@openai/codex-shell-tool-mcp` intercepts `execve(2)` calls directly, it _always_ knows the full path to the program being executed. In turn, this makes it possible to provide stronger guarantees on how [Codex `.rules`](https://developers.openai.com/codex/local-config#rules-preview) are enforced. +Because `@ixe1/codexel-shell-tool-mcp` intercepts `execve(2)` calls directly, it _always_ knows the full path to the program being executed. In turn, this makes it possible to provide stronger guarantees on how [Codex `.rules`](https://developers.openai.com/codex/local-config#rules-preview) are enforced. ## Usage First, verify that you can download and run the MCP executable: ```bash -npx -y @openai/codex-shell-tool-mcp --version +npx -y @ixe1/codexel-shell-tool-mcp --version ``` -To test out the MCP with a one-off invocation of Codex CLI, it is important to _disable_ the default shell tool in addition to enabling the MCP so Codex has exactly one shell-like tool available to it: +To test out the MCP with a one-off invocation of Codexel, it is important to _disable_ the default shell tool in addition to enabling the MCP so Codexel has exactly one shell-like tool available to it: ```bash -codex --disable shell_tool \ - --config 'mcp_servers.bash={command = "npx", args = ["-y", "@openai/codex-shell-tool-mcp"]}' +codexel --disable shell_tool \ + --config 'mcp_servers.bash={command = "npx", args = ["-y", "@ixe1/codexel-shell-tool-mcp"]}' ``` -To configure this permanently so you can use the MCP while running `codex` without additional command-line flags, add the following to your `~/.codex/config.toml`: +To configure this permanently so you can use the MCP while running `codexel` without additional command-line flags, add the following to your `~/.codexel/config.toml`: ```toml [features] @@ -44,10 +44,10 @@ shell_tool = false [mcp_servers.shell-tool] command = "npx" -args = ["-y", "@openai/codex-shell-tool-mcp"] +args = ["-y", "@ixe1/codexel-shell-tool-mcp"] ``` -Note when the `@openai/codex-shell-tool-mcp` launcher runs, it selects the appropriate native binary to run based on the host OS/architecture. For the Bash wrapper, it inspects `/etc/os-release` on Linux or the Darwin major version on macOS to try to find the best match it has available. See [`bashSelection.ts`](https://github.com/openai/codex/blob/main/shell-tool-mcp/src/bashSelection.ts) for details. +Note when the `@ixe1/codexel-shell-tool-mcp` launcher runs, it selects the appropriate native binary to run based on the host OS/architecture. For the Bash wrapper, it inspects `/etc/os-release` on Linux or the Darwin major version on macOS to try to find the best match it has available. See `shell-tool-mcp/src/bashSelection.ts` for details. ## MCP Client Requirements @@ -82,14 +82,14 @@ This capability means the MCP server honors notifications like the following to } ``` -The Codex harness (used by the CLI and the VS Code extension) sends such notifications to MCP servers that declare the `codex/sandbox-state` capability. +The Codexel harness (used by the CLI and the VS Code extension) sends such notifications to MCP servers that declare the `codex/sandbox-state` capability. ## Package Contents -This package wraps the `codex-exec-mcp-server` binary and its helpers so that the shell MCP can be invoked via `npx -y @openai/codex-shell-tool-mcp`. It bundles: +This package wraps the `codex-exec-mcp-server` binary and its helpers so that the shell MCP can be invoked via `npx -y @ixe1/codexel-shell-tool-mcp`. It bundles: - `codex-exec-mcp-server` and `codex-execve-wrapper` built for macOS (arm64, x64) and Linux (musl arm64, musl x64). - A patched Bash that honors `BASH_EXEC_WRAPPER`, built for multiple glibc baselines (Ubuntu 24.04/22.04/20.04, Debian 12/11, CentOS-like 9) and macOS (15/14/13). - A launcher (`bin/mcp-server.js`) that picks the correct binaries for the current `process.platform` / `process.arch`, specifying `--execve` and `--bash` for the MCP, as appropriate. -See [the README in the Codex repo](https://github.com/openai/codex/blob/main/codex-rs/exec-server/README.md) for details. +See [the exec-server README](../codex-rs/exec-server/README.md) for details. diff --git a/shell-tool-mcp/package.json b/shell-tool-mcp/package.json index d27c0a0c59d..09eb9a09a97 100644 --- a/shell-tool-mcp/package.json +++ b/shell-tool-mcp/package.json @@ -1,5 +1,5 @@ { - "name": "@openai/codex-shell-tool-mcp", + "name": "@ixe1/codexel-shell-tool-mcp", "version": "0.0.0-dev", "description": "Codex MCP server for the shell tool with patched Bash and exec wrappers.", "license": "Apache-2.0", @@ -16,7 +16,7 @@ ], "repository": { "type": "git", - "url": "git+https://github.com/openai/codex.git", + "url": "git+https://github.com/Ixe1/codexel.git", "directory": "shell-tool-mcp" }, "scripts": {