Compare commits
60 Commits
cli-imp
...
050e00345a
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
050e00345a | ||
|
|
90c8b43267 | ||
|
|
c5b7f4c864 | ||
|
|
28ce63f818 | ||
|
|
eb5b464d03 | ||
|
|
4664e0cfe3 | ||
|
|
63bd58c9b4 | ||
|
|
714c8c2623 | ||
|
|
171260a772 | ||
|
|
a1bca10408 | ||
|
|
491dc52864 | ||
|
|
b9063aa17a | ||
|
|
fc0d9cb1d3 | ||
|
|
c8b47bf8f8 | ||
|
|
a570327a6b | ||
|
|
eef73decb5 | ||
|
|
bb6660178c | ||
|
|
64e73b1cab | ||
|
|
361757568f | ||
|
|
8572f6cc04 | ||
|
|
d0744039ef | ||
|
|
4b372dfb38 | ||
|
|
af8fc4af76 | ||
|
|
96b288ccdd | ||
|
|
d710403567 | ||
|
|
ebf64816c9 | ||
|
|
450951dee1 | ||
|
|
81f049a7fa | ||
|
|
dd00a2b840 | ||
|
|
c6a5461d41 | ||
|
|
a7f86b26e4 | ||
|
|
5ee8b0841c | ||
|
|
7062a3f1fd | ||
|
|
159c490ad7 | ||
|
|
e0041ed4d9 | ||
|
|
a34751bd47 | ||
|
|
0aecbf33c0 | ||
|
|
c10471ddb9 | ||
|
|
cbce4c9f59 | ||
|
|
94435c37f0 | ||
|
|
59f65b127a | ||
|
|
f36e900570 | ||
|
|
e2efc61beb | ||
|
|
2da1a228b3 | ||
|
|
0e65202778 | ||
|
|
f439c42b3d | ||
|
|
4f3ec72923 | ||
|
|
e6771709f1 | ||
|
|
8c86b0dfd7 | ||
|
|
6e55b2470d | ||
|
|
b05922d60b | ||
|
|
11fe02fac9 | ||
|
|
48fbd4bfdb | ||
|
|
9786ef27f5 | ||
|
|
7e0e6a91f2 | ||
|
|
5c2df3df3b | ||
|
|
94c8613420 | ||
|
|
ad4dd6e855 | ||
|
|
83cd16c918 | ||
|
|
fda9cd8835 |
295
.beads/.br_history/issues.20260212_171003.jsonl
Normal file
295
.beads/.br_history/issues.20260212_171003.jsonl
Normal file
File diff suppressed because one or more lines are too long
304
.beads/.br_history/issues.20260212_171103.jsonl
Normal file
304
.beads/.br_history/issues.20260212_171103.jsonl
Normal file
File diff suppressed because one or more lines are too long
312
.beads/.br_history/issues.20260212_211122.jsonl
Normal file
312
.beads/.br_history/issues.20260212_211122.jsonl
Normal file
File diff suppressed because one or more lines are too long
File diff suppressed because one or more lines are too long
@@ -1 +1 @@
|
||||
bd-xsgw
|
||||
bd-2kr0
|
||||
|
||||
99
.claude/plan.md
Normal file
99
.claude/plan.md
Normal file
@@ -0,0 +1,99 @@
|
||||
# Plan: Add Colors to Sync Command Output
|
||||
|
||||
## Current State
|
||||
|
||||
The sync output has three layers, each needing color treatment:
|
||||
|
||||
### Layer 1: Stage Lines (during sync)
|
||||
```
|
||||
✓ Issues 10 issues from 2 projects 4.2s
|
||||
✓ Status 3 statuses updated · 5 seen 4.2s
|
||||
vs/typescript-code 2 issues · 1 statuses updated
|
||||
✓ MRs 5 merge requests from 2 projects 12.3s
|
||||
vs/python-code 3 MRs · 10 discussions
|
||||
✓ Docs 1,200 documents generated 8.1s
|
||||
✓ Embed 3,400 chunks embedded 45.2s
|
||||
```
|
||||
|
||||
**What's uncolored:** icons, labels, numbers, elapsed times, sub-row project paths, failure counts in parentheses.
|
||||
|
||||
### Layer 2: Summary (after sync)
|
||||
```
|
||||
Synced 10 issues and 5 MRs in 42.3s
|
||||
120 discussions · 45 events · 12 diffs · 3 statuses updated
|
||||
1,200 docs regenerated · 3,400 embedded
|
||||
```
|
||||
|
||||
**What's already colored:** headline ("Synced" = green bold, "Sync completed with issues" = warning bold), issue/MR counts (bold), error line (red). Detail lines are all dim.
|
||||
|
||||
### Layer 3: Timing breakdown (`-t` flag)
|
||||
```
|
||||
── Timing ──────────────────────
|
||||
issues .............. 4.2s
|
||||
merge_requests ...... 12.3s
|
||||
```
|
||||
|
||||
**What's already colored:** dots (dim), time (bold), errors (red), rate limits (warning).
|
||||
|
||||
---
|
||||
|
||||
## Color Plan
|
||||
|
||||
Using only existing `Theme` methods — no new colors needed.
|
||||
|
||||
### Stage Lines (`format_stage_line` + callers in sync.rs)
|
||||
|
||||
| Element | Current | Proposed | Theme method |
|
||||
|---------|---------|----------|-------------|
|
||||
| Icon (✓/⚠) | plain | green for success, yellow for warning | `Theme::success()` / `Theme::warning()` |
|
||||
| Label ("Issues", "MRs", etc.) | plain | bold | `Theme::bold()` |
|
||||
| Numbers in summary text | plain | bold | `Theme::bold()` (just the count) |
|
||||
| Elapsed time | plain | muted gray | `Theme::timing()` |
|
||||
| Failure text in parens | plain | warning/error color | `Theme::warning()` |
|
||||
|
||||
### Sub-rows (project breakdown lines)
|
||||
|
||||
| Element | Current | Proposed |
|
||||
|---------|---------|----------|
|
||||
| Project path | dim | `Theme::muted()` (slightly brighter than dim) |
|
||||
| Counts (numbers only) | dim | `Theme::dim()` but numbers in normal weight |
|
||||
| Error/failure counts | dim | `Theme::warning()` |
|
||||
| Middle dots | dim | keep dim (they're separators, should recede) |
|
||||
|
||||
### Summary (`print_sync`)
|
||||
|
||||
| Element | Current | Proposed |
|
||||
|---------|---------|----------|
|
||||
| Issue/MR counts in headline | bold only | `Theme::info()` + bold (cyan numbers pop) |
|
||||
| Time in headline | plain | `Theme::timing()` |
|
||||
| Detail line numbers | all dim | numbers in `Theme::info()`, rest stays dim |
|
||||
| Doc line numbers | all dim | numbers in `Theme::info()`, rest stays dim |
|
||||
| "Already up to date" time | plain | `Theme::timing()` |
|
||||
|
||||
---
|
||||
|
||||
## Files to Change
|
||||
|
||||
1. **`src/cli/progress.rs`** — `format_stage_line()`: apply color to icon, bold to label, `Theme::timing()` to elapsed
|
||||
2. **`src/cli/commands/sync.rs`** —
|
||||
- Pass colored icons to `format_stage_line` / `emit_stage_line` / `emit_stage_block`
|
||||
- Color failure text in `append_failures()`
|
||||
- Color numbers and time in `print_sync()`
|
||||
- Color error/failure counts in sub-row functions (`issue_sub_rows`, `mr_sub_rows`, `status_sub_rows`)
|
||||
|
||||
## Approach
|
||||
|
||||
- `format_stage_line` already receives the icon string — color it before passing
|
||||
- Add a `color_icon` helper that applies success/warning color to the icon glyph
|
||||
- Bold the label in `format_stage_line`
|
||||
- Apply `Theme::timing()` to elapsed in `format_stage_line`
|
||||
- In `append_failures`, wrap failure text in `Theme::warning()`
|
||||
- In `print_sync`, wrap count numbers with `Theme::info().bold()`
|
||||
- In sub-row functions, apply `Theme::warning()` to error/failure parts only (keep rest dim)
|
||||
|
||||
## Non-goals
|
||||
|
||||
- No changes to robot mode (JSON output)
|
||||
- No changes to dry-run output (already reasonably colored)
|
||||
- No new Theme colors — use existing palette
|
||||
- No changes to timing breakdown (already colored)
|
||||
21
.github/workflows/roam.yml
vendored
Normal file
21
.github/workflows/roam.yml
vendored
Normal file
@@ -0,0 +1,21 @@
|
||||
name: Roam Code Analysis
|
||||
on:
|
||||
pull_request:
|
||||
branches: [main, master]
|
||||
permissions:
|
||||
contents: read
|
||||
pull-requests: write
|
||||
jobs:
|
||||
roam:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 0
|
||||
- uses: actions/setup-python@v5
|
||||
with:
|
||||
python-version: "3.12"
|
||||
- run: pip install roam-code
|
||||
- run: roam index
|
||||
- run: roam fitness
|
||||
- run: roam pr-risk --json
|
||||
24
.gitignore
vendored
24
.gitignore
vendored
@@ -1,11 +1,6 @@
|
||||
# Dependencies
|
||||
node_modules/
|
||||
|
||||
# Build output
|
||||
dist/
|
||||
|
||||
# Test coverage
|
||||
coverage/
|
||||
# Rust build output
|
||||
/target
|
||||
**/target/
|
||||
|
||||
# IDE
|
||||
.idea/
|
||||
@@ -25,14 +20,11 @@ Thumbs.db
|
||||
|
||||
# Logs
|
||||
*.log
|
||||
npm-debug.log*
|
||||
yarn-debug.log*
|
||||
yarn-error.log*
|
||||
|
||||
# Local config files
|
||||
lore.config.json
|
||||
|
||||
# beads
|
||||
# beads viewer cache
|
||||
.bv/
|
||||
|
||||
# SQLite databases (local development)
|
||||
@@ -40,7 +32,15 @@ lore.config.json
|
||||
*.db-wal
|
||||
*.db-shm
|
||||
|
||||
# Mock seed data
|
||||
tools/mock-seed/
|
||||
|
||||
# Added by cargo
|
||||
|
||||
/target
|
||||
|
||||
# Profiling / benchmarks
|
||||
perf.data
|
||||
perf.data.old
|
||||
flamegraph.svg
|
||||
*.profraw
|
||||
|
||||
11
.roam/fitness.yaml
Normal file
11
.roam/fitness.yaml
Normal file
@@ -0,0 +1,11 @@
|
||||
rules:
|
||||
- name: No circular imports in core
|
||||
type: dependency
|
||||
source: "src/**"
|
||||
forbidden_target: "tests/**"
|
||||
reason: "Production code should not import test modules"
|
||||
- name: Complexity threshold
|
||||
type: metric
|
||||
metric: cognitive_complexity
|
||||
threshold: 30
|
||||
reason: "Functions above 30 cognitive complexity need refactoring"
|
||||
35
AGENTS.md
35
AGENTS.md
@@ -16,43 +16,10 @@ If I tell you to do something, even if it goes against what follows below, YOU M
|
||||
|
||||
## Version Control: jj-First (CRITICAL)
|
||||
|
||||
**ALWAYS prefer jj (Jujutsu) over git for VCS mutations** (commit, describe, rebase, push, bookmark, undo). This is a colocated repo with both `.jj/` and `.git/`. Only fall back to raw `git` for things jj cannot do (hooks, LFS, submodules, `gh` CLI interop).
|
||||
|
||||
**Exception — read-only inspection:** Use `git status`, `git diff`, `git log` instead of their jj equivalents. In a colocated repo these see accurate data, and unlike jj, they don't create operations that cause divergences when multiple agents run concurrently. See "Parallel Agent VCS Protocol" below.
|
||||
**ALWAYS prefer jj (Jujutsu) over git for all VCS operations.** This is a colocated repo with both `.jj/` and `.git/`. When instructed to use git by anything — even later in this file — use the best jj replacement commands instead. Only fall back to raw `git` for things jj cannot do (hooks, LFS, submodules, `gh` CLI interop).
|
||||
|
||||
See `~/.claude/rules/jj-vcs/` for the full command reference, translation table, revsets, patterns, and recovery recipes.
|
||||
|
||||
### Parallel Agent VCS Protocol (CRITICAL)
|
||||
|
||||
Multiple agents often run concurrently in separate terminal panes, sharing the same repo directory. This requires care because jj's auto-snapshot creates operations on EVERY command — even read-only ones like `jj status`. Concurrent jj commands fork from the same parent operation and create **divergent changes**.
|
||||
|
||||
**The rule: use git for reads, jj for writes.**
|
||||
|
||||
In a colocated repo, git reads see accurate data because jj keeps `.git/` in sync.
|
||||
|
||||
| Operation | Use | Why |
|
||||
|-----------|-----|-----|
|
||||
| Check status | `git status` | No jj operation created |
|
||||
| View diff | `git diff` | No jj operation created |
|
||||
| Browse history | `git log` | No jj operation created |
|
||||
| Commit work | `jj commit -m "msg"` | jj mutation (better UX) |
|
||||
| Update description | `jj describe -m "msg"` | jj mutation |
|
||||
| Rebase | `jj rebase -d trunk()` | jj mutation |
|
||||
| Push | `jj git push -b <name>` | jj mutation |
|
||||
| Manage bookmarks | `jj bookmark set ...` | jj mutation |
|
||||
| Undo a mistake | `jj undo` | jj mutation |
|
||||
|
||||
**NEVER run `jj status`, `jj diff`, `jj log`, or `jj show` when other agents may be active** — these trigger snapshots that cause divergences.
|
||||
|
||||
**If using Claude Code's built-in agent teams:** Only the team lead runs ANY VCS commands (git or jj). Workers only edit files via Edit/Write tools and do NOT run "Landing the Plane".
|
||||
|
||||
**Resolving divergences if they occur:**
|
||||
|
||||
```bash
|
||||
jj log -r 'divergent()' # Find divergent changes
|
||||
jj abandon <unwanted-commit-id> # Keep the version you want
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Irreversible Git & Filesystem Actions — DO NOT EVER BREAK GLASS
|
||||
|
||||
173
Cargo.lock
generated
173
Cargo.lock
generated
@@ -169,6 +169,23 @@ version = "1.0.4"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801"
|
||||
|
||||
[[package]]
|
||||
name = "charmed-lipgloss"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "45e10db01f5eaea11d98ca5c5cffd8cc4add7ac56d0128d91ba1f2a3757b6c5a"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"colored",
|
||||
"crossterm",
|
||||
"serde",
|
||||
"serde_json",
|
||||
"thiserror",
|
||||
"toml",
|
||||
"tracing",
|
||||
"unicode-width 0.1.14",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "chrono"
|
||||
version = "0.4.43"
|
||||
@@ -239,14 +256,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75"
|
||||
|
||||
[[package]]
|
||||
name = "comfy-table"
|
||||
version = "7.2.2"
|
||||
name = "colored"
|
||||
version = "2.2.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "958c5d6ecf1f214b4c2bbbbf6ab9523a864bd136dcf71a7e8904799acfe1ad47"
|
||||
checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c"
|
||||
dependencies = [
|
||||
"crossterm",
|
||||
"unicode-segmentation",
|
||||
"unicode-width",
|
||||
"lazy_static",
|
||||
"windows-sys 0.52.0",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
@@ -258,10 +274,19 @@ dependencies = [
|
||||
"encode_unicode",
|
||||
"libc",
|
||||
"once_cell",
|
||||
"unicode-width",
|
||||
"unicode-width 0.2.2",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "convert_case"
|
||||
version = "0.10.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "633458d4ef8c78b72454de2d54fd6ab2e60f9e02be22f3c6104cdc8a4e0fceb9"
|
||||
dependencies = [
|
||||
"unicode-segmentation",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "core-foundation"
|
||||
version = "0.9.4"
|
||||
@@ -319,9 +344,13 @@ checksum = "d8b9f2e4c67f833b660cdb0a3523065869fb35570177239812ed4c905aeff87b"
|
||||
dependencies = [
|
||||
"bitflags",
|
||||
"crossterm_winapi",
|
||||
"derive_more",
|
||||
"document-features",
|
||||
"mio",
|
||||
"parking_lot",
|
||||
"rustix",
|
||||
"signal-hook",
|
||||
"signal-hook-mio",
|
||||
"winapi",
|
||||
]
|
||||
|
||||
@@ -371,6 +400,28 @@ dependencies = [
|
||||
"powerfmt",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_more"
|
||||
version = "2.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d751e9e49156b02b44f9c1815bcb94b984cdcc4396ecc32521c739452808b134"
|
||||
dependencies = [
|
||||
"derive_more-impl",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "derive_more-impl"
|
||||
version = "2.1.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "799a97264921d8623a957f6c3b9011f3b5492f557bbb7a5a19b7fa6d06ba8dcb"
|
||||
dependencies = [
|
||||
"convert_case",
|
||||
"proc-macro2",
|
||||
"quote",
|
||||
"rustc_version",
|
||||
"syn",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "dialoguer"
|
||||
version = "0.12.0"
|
||||
@@ -976,7 +1027,7 @@ checksum = "9375e112e4b463ec1b1c6c011953545c65a30164fbab5b581df32b3abf0dcb88"
|
||||
dependencies = [
|
||||
"console",
|
||||
"portable-atomic",
|
||||
"unicode-width",
|
||||
"unicode-width 0.2.2",
|
||||
"unit-prefix",
|
||||
"web-time",
|
||||
]
|
||||
@@ -1106,13 +1157,13 @@ checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897"
|
||||
|
||||
[[package]]
|
||||
name = "lore"
|
||||
version = "0.6.2"
|
||||
version = "0.8.3"
|
||||
dependencies = [
|
||||
"async-stream",
|
||||
"charmed-lipgloss",
|
||||
"chrono",
|
||||
"clap",
|
||||
"clap_complete",
|
||||
"comfy-table",
|
||||
"console",
|
||||
"dialoguer",
|
||||
"dirs",
|
||||
@@ -1181,6 +1232,7 @@ source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "a69bcab0ad47271a0234d9422b131806bf3968021e5dc9328caf2d4cd58557fc"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"log",
|
||||
"wasi",
|
||||
"windows-sys 0.61.2",
|
||||
]
|
||||
@@ -1574,6 +1626,15 @@ dependencies = [
|
||||
"sqlite-wasm-rs",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustc_version"
|
||||
version = "0.4.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "cfcb3a22ef46e85b45de6ee7e79d063319ebb6594faafcf1c225ea92ab6e9b92"
|
||||
dependencies = [
|
||||
"semver",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "rustix"
|
||||
version = "1.1.3"
|
||||
@@ -1670,6 +1731,12 @@ dependencies = [
|
||||
"libc",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "semver"
|
||||
version = "1.0.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d767eb0aabc880b29956c35734170f26ed551a859dbd361d140cdbeca61ab1e2"
|
||||
|
||||
[[package]]
|
||||
name = "serde"
|
||||
version = "1.0.228"
|
||||
@@ -1713,6 +1780,15 @@ dependencies = [
|
||||
"zmij",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_spanned"
|
||||
version = "0.6.9"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "serde_urlencoded"
|
||||
version = "0.7.1"
|
||||
@@ -1757,6 +1833,27 @@ version = "1.3.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "0fda2ff0d084019ba4d7c6f371c95d8fd75ce3524c3cb8fb653a3023f6323e64"
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook"
|
||||
version = "0.3.18"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d881a16cf4426aa584979d30bd82cb33429027e42122b169753d6ef1085ed6e2"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"signal-hook-registry",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook-mio"
|
||||
version = "0.2.5"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "b75a19a7a740b25bc7944bdee6172368f988763b744e3d4dfe753f6b4ece40cc"
|
||||
dependencies = [
|
||||
"libc",
|
||||
"mio",
|
||||
"signal-hook",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "signal-hook-registry"
|
||||
version = "1.4.5"
|
||||
@@ -2028,6 +2125,47 @@ dependencies = [
|
||||
"tokio",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml"
|
||||
version = "0.8.23"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362"
|
||||
dependencies = [
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"toml_edit",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_datetime"
|
||||
version = "0.6.11"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c"
|
||||
dependencies = [
|
||||
"serde",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_edit"
|
||||
version = "0.22.27"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a"
|
||||
dependencies = [
|
||||
"indexmap",
|
||||
"serde",
|
||||
"serde_spanned",
|
||||
"toml_datetime",
|
||||
"toml_write",
|
||||
"winnow",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "toml_write"
|
||||
version = "0.1.2"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801"
|
||||
|
||||
[[package]]
|
||||
name = "tower"
|
||||
version = "0.5.3"
|
||||
@@ -2183,6 +2321,12 @@ version = "1.12.0"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.1.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af"
|
||||
|
||||
[[package]]
|
||||
name = "unicode-width"
|
||||
version = "0.2.2"
|
||||
@@ -2611,6 +2755,15 @@ version = "0.53.1"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650"
|
||||
|
||||
[[package]]
|
||||
name = "winnow"
|
||||
version = "0.7.14"
|
||||
source = "registry+https://github.com/rust-lang/crates.io-index"
|
||||
checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829"
|
||||
dependencies = [
|
||||
"memchr",
|
||||
]
|
||||
|
||||
[[package]]
|
||||
name = "wiremock"
|
||||
version = "0.6.5"
|
||||
|
||||
@@ -1,6 +1,6 @@
|
||||
[package]
|
||||
name = "lore"
|
||||
version = "0.6.2"
|
||||
version = "0.8.3"
|
||||
edition = "2024"
|
||||
description = "Gitlore - Local GitLab data management with semantic search"
|
||||
authors = ["Taylor Eernisse"]
|
||||
@@ -25,7 +25,7 @@ clap_complete = "4"
|
||||
dialoguer = "0.12"
|
||||
console = "0.16"
|
||||
indicatif = "0.18"
|
||||
comfy-table = "7"
|
||||
lipgloss = { package = "charmed-lipgloss", version = "0.1", default-features = false, features = ["native"] }
|
||||
open = "5"
|
||||
|
||||
# HTTP
|
||||
|
||||
425
PROPOSED_CODE_FILE_REORGANIZATION_PLAN.md
Normal file
425
PROPOSED_CODE_FILE_REORGANIZATION_PLAN.md
Normal file
@@ -0,0 +1,425 @@
|
||||
# Proposed Code File Reorganization Plan
|
||||
|
||||
## Executive Summary
|
||||
|
||||
The codebase is 79 Rust source files / 46K lines across 7 top-level modules. Most modules (`gitlab/`, `embedding/`, `search/`, `documents/`, `ingestion/`) are well-organized. The pain points are:
|
||||
|
||||
1. **`core/` is a grab-bag** — 22 files mixing infrastructure, domain logic, DB operations, and an entire timeline pipeline
|
||||
2. **`main.rs` is 2713 lines** — ~30 handler functions that bridge CLI args to commands
|
||||
3. **`cli/mod.rs` is 949 lines** — every clap argument struct is packed into one file
|
||||
4. **Giant command files** — `who.rs` (6067 lines), `list.rs` (2931 lines) are unwieldy
|
||||
|
||||
This plan is organized into **three tiers** based on impact-to-risk ratio. Tier 1 changes are "no-brainers" — they reduce confusion with minimal import churn. Tier 2 changes are valuable but involve more cross-cutting import updates. Tier 3 changes are "maybe later" — they'd be nice but the juice might not be worth the squeeze right now.
|
||||
|
||||
---
|
||||
|
||||
## Current Structure (Annotated)
|
||||
|
||||
```
|
||||
src/
|
||||
├── main.rs (2713 lines) ← dispatch + ~30 handler functions + error helpers
|
||||
├── lib.rs (9 lines)
|
||||
├── cli/
|
||||
│ ├── mod.rs (949 lines) ← ALL clap arg structs crammed here
|
||||
│ ├── autocorrect.rs (945 lines)
|
||||
│ ├── progress.rs (92 lines)
|
||||
│ ├── robot.rs (111 lines)
|
||||
│ └── commands/
|
||||
│ ├── mod.rs (50 lines) — re-exports
|
||||
│ ├── auth_test.rs
|
||||
│ ├── count.rs (406 lines)
|
||||
│ ├── doctor.rs (576 lines)
|
||||
│ ├── drift.rs (642 lines)
|
||||
│ ├── embed.rs
|
||||
│ ├── generate_docs.rs (320 lines)
|
||||
│ ├── ingest.rs (1064 lines)
|
||||
│ ├── init.rs (174 lines)
|
||||
│ ├── list.rs (2931 lines) ← handles issues, MRs, AND notes listing
|
||||
│ ├── search.rs (418 lines)
|
||||
│ ├── show.rs (1377 lines)
|
||||
│ ├── stats.rs (505 lines)
|
||||
│ ├── sync_status.rs (454 lines)
|
||||
│ ├── sync.rs (576 lines)
|
||||
│ ├── timeline.rs (488 lines)
|
||||
│ └── who.rs (6067 lines) ← 5 sub-modes: expert, workload, active, overlap, reviews
|
||||
├── core/
|
||||
│ ├── mod.rs (25 lines)
|
||||
│ ├── backoff.rs ← retry logic (used by ingestion)
|
||||
│ ├── config.rs (789 lines) ← configuration types
|
||||
│ ├── db.rs (970 lines) ← connection + 22 migrations
|
||||
│ ├── dependent_queue.rs (330 lines) ← job queue (used by ingestion orchestrator)
|
||||
│ ├── error.rs (295 lines) ← error enum + exit codes
|
||||
│ ├── events_db.rs (199 lines) ← resource event upserts (used by ingestion)
|
||||
│ ├── lock.rs (228 lines) ← filesystem sync lock
|
||||
│ ├── logging.rs (179 lines) ← tracing filter builders
|
||||
│ ├── metrics.rs (566 lines) ← tracing-based stage timing
|
||||
│ ├── note_parser.rs (563 lines) ← cross-ref extraction from note bodies
|
||||
│ ├── paths.rs ← config/db/log file path resolution
|
||||
│ ├── payloads.rs (204 lines) ← raw JSON payload storage
|
||||
│ ├── project.rs (274 lines) ← fuzzy project resolution from DB
|
||||
│ ├── references.rs (551 lines) ← entity cross-reference extraction
|
||||
│ ├── shutdown.rs ← graceful shutdown via tokio signal
|
||||
│ ├── sync_run.rs (218 lines) ← sync run recording to DB
|
||||
│ ├── time.rs ← time conversion utilities
|
||||
│ ├── timeline.rs (284 lines) ← timeline types + EntityRef
|
||||
│ ├── timeline_collect.rs (695 lines) ← Stage 4: collect events from DB
|
||||
│ ├── timeline_expand.rs (557 lines) ← Stage 3: expand via cross-refs
|
||||
│ └── timeline_seed.rs (552 lines) ← Stage 1: FTS search seeding
|
||||
├── documents/ ← well-organized, 3 focused files
|
||||
├── embedding/ ← well-organized, 6 focused files
|
||||
├── gitlab/ ← well-organized, with transformers/ subdir
|
||||
├── ingestion/ ← well-organized, 8 focused files
|
||||
└── search/ ← well-organized, 5 focused files
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Tier 1: No-Brainers (Do First)
|
||||
|
||||
### 1.1 Extract `timeline/` from `core/`
|
||||
|
||||
**What:** Move the 4 timeline files into their own top-level module `src/timeline/`.
|
||||
|
||||
**Current location:**
|
||||
- `core/timeline.rs` (284 lines) — types: `EntityRef`, `ExpandedEntityRef`, `TimelineEvent`, `TimelineEventType`, etc.
|
||||
- `core/timeline_seed.rs` (552 lines) — Stage 1: FTS-based seeding
|
||||
- `core/timeline_expand.rs` (557 lines) — Stage 3: cross-reference expansion
|
||||
- `core/timeline_collect.rs` (695 lines) — Stage 4: event collection from DB
|
||||
|
||||
**New structure:**
|
||||
```
|
||||
src/timeline/
|
||||
├── mod.rs ← types (from timeline.rs) + re-exports
|
||||
├── seed.rs ← from timeline_seed.rs
|
||||
├── expand.rs ← from timeline_expand.rs
|
||||
└── collect.rs ← from timeline_collect.rs
|
||||
```
|
||||
|
||||
**Rationale:** These 4 files form a cohesive 5-stage pipeline (SEED→HYDRATE→EXPAND→COLLECT→RENDER). They have nothing to do with "core" infrastructure like `db.rs`, `config.rs`, or `error.rs`. They only import from `core::error`, `core::time`, and `search::fts` — all of which remain accessible via `crate::core::*` and `crate::search::*` after the move.
|
||||
|
||||
**Import changes needed:**
|
||||
- `cli/commands/timeline.rs`: `use crate::core::timeline::*` → `use crate::timeline::*`, same for `timeline_seed`, `timeline_expand`, `timeline_collect`
|
||||
- `core/mod.rs`: remove the 4 `pub mod timeline*` lines
|
||||
- `lib.rs`: add `pub mod timeline;`
|
||||
|
||||
**Risk: LOW** — Only 1 consumer (`cli/commands/timeline.rs`) + internal cross-references between the 4 files.
|
||||
|
||||
---
|
||||
|
||||
### 1.2 Extract `xref/` (cross-reference extraction) from `core/`
|
||||
|
||||
**What:** Move `note_parser.rs` and `references.rs` into `src/xref/`.
|
||||
|
||||
**Current location:**
|
||||
- `core/note_parser.rs` (563 lines) — parses note bodies for "mentioned in group/repo#123" patterns, persists to `note_cross_references` table
|
||||
- `core/references.rs` (551 lines) — extracts entity references from state events and closing MRs, writes to `entity_references` table
|
||||
|
||||
**New structure:**
|
||||
```
|
||||
src/xref/
|
||||
├── mod.rs ← re-exports
|
||||
├── note_parser.rs ← from core/note_parser.rs
|
||||
└── references.rs ← from core/references.rs
|
||||
```
|
||||
|
||||
**Rationale:** These files implement a specific domain concept — extracting and persisting cross-references between issues and MRs. They are not "core infrastructure." They're consumed by `ingestion/orchestrator.rs` for the cross-reference extraction phase, and the data they produce is consumed by the timeline pipeline. Putting them in their own module makes the data flow clearer: `ingestion → xref → timeline`.
|
||||
|
||||
**Import changes needed:**
|
||||
- `ingestion/orchestrator.rs`: `use crate::core::references::*` → `use crate::xref::references::*`
|
||||
- `ingestion/orchestrator.rs`: `use crate::core::note_parser::*` (if used directly — needs verification) → `use crate::xref::*`
|
||||
- `core/mod.rs`: remove `pub mod note_parser; pub mod references;`
|
||||
- `lib.rs`: add `pub mod xref;`
|
||||
- Internal: the files use `super::error::Result` and `super::time::now_ms` which become `crate::core::error::Result` and `crate::core::time::now_ms`
|
||||
|
||||
**Risk: LOW** — 2-3 consumers at most. The files already use `super::` internally which just needs updating to `crate::core::`.
|
||||
|
||||
---
|
||||
|
||||
## Tier 2: Good Improvements (Do After Tier 1)
|
||||
|
||||
### 2.1 Group ingestion-adjacent DB operations
|
||||
|
||||
**What:** Move `events_db.rs`, `dependent_queue.rs`, `payloads.rs`, and `sync_run.rs` from `core/` into `ingestion/` since they exclusively serve the ingestion pipeline.
|
||||
|
||||
**Current consumers:**
|
||||
- `events_db.rs` → only used by `cli/commands/count.rs` (for event counts)
|
||||
- `dependent_queue.rs` → only used by `ingestion/orchestrator.rs` and `main.rs` (to release locked jobs)
|
||||
- `payloads.rs` → only used by `ingestion/discussions.rs`, `ingestion/issues.rs`, `ingestion/merge_requests.rs`, `ingestion/mr_discussions.rs`
|
||||
- `sync_run.rs` → only used by `cli/commands/sync.rs` and `cli/commands/sync_status.rs`
|
||||
|
||||
**New structure:**
|
||||
```
|
||||
src/ingestion/
|
||||
├── (existing files...)
|
||||
├── events_db.rs ← from core/events_db.rs
|
||||
├── dependent_queue.rs ← from core/dependent_queue.rs
|
||||
├── payloads.rs ← from core/payloads.rs
|
||||
└── sync_run.rs ← from core/sync_run.rs
|
||||
```
|
||||
|
||||
**Rationale:** All 4 files exist to support the ingestion pipeline:
|
||||
- `events_db.rs` upserts resource state/label/milestone events fetched during ingestion
|
||||
- `dependent_queue.rs` manages the job queue that drives incremental discussion fetching
|
||||
- `payloads.rs` stores the raw JSON payloads fetched from GitLab
|
||||
- `sync_run.rs` records when syncs start/finish and their metrics
|
||||
|
||||
When you're looking for "how does ingestion work?", you'd naturally look in `ingestion/`. Having these scattered in `core/` requires knowing the hidden dependency.
|
||||
|
||||
**Import changes needed:**
|
||||
- `events_db.rs`: 1 consumer in `cli/commands/count.rs` changes from `crate::core::events_db` → `crate::ingestion::events_db`
|
||||
- `dependent_queue.rs`: 2 consumers — `ingestion/orchestrator.rs` (becomes `super::dependent_queue`) and `main.rs`
|
||||
- `payloads.rs`: 4 consumers in `ingestion/*.rs` (become `super::payloads`)
|
||||
- `sync_run.rs`: 2 consumers in `cli/commands/sync.rs` and `sync_status.rs`
|
||||
- Internal references change from `super::error` / `super::time` to `crate::core::error` / `crate::core::time`
|
||||
|
||||
**Risk: MEDIUM** — More import changes, but all straightforward. The internal `super::` references need the most attention.
|
||||
|
||||
**Alternatively:** If moving feels like too much churn, a lighter option is to create `core/ingestion_db.rs` that re-exports from these 4 files, making the grouping visible without moving files. But I think the move is cleaner.
|
||||
|
||||
---
|
||||
|
||||
### 2.2 Split `cli/mod.rs` — move arg structs to their command files
|
||||
|
||||
**What:** Move each `*Args` struct from `cli/mod.rs` into the corresponding `cli/commands/*.rs` file. Keep `Cli` struct, `Commands` enum, and `detect_robot_mode_from_env()` in `cli/mod.rs`.
|
||||
|
||||
**Currently `cli/mod.rs` (949 lines) contains:**
|
||||
- `Cli` struct (81 lines) — the root clap parser
|
||||
- `Commands` enum (193 lines) — all subcommand variants
|
||||
- `IssuesArgs` (86 lines) → move to `commands/list.rs` or stay near issues handling
|
||||
- `MrsArgs` (93 lines) → move to `commands/list.rs` or stay near MRs handling
|
||||
- `NotesArgs` (99 lines) → move to `commands/list.rs`
|
||||
- `IngestArgs` (33 lines) → move to `commands/ingest.rs`
|
||||
- `StatsArgs` (19 lines) → move to `commands/stats.rs`
|
||||
- `SearchArgs` (58 lines) → move to `commands/search.rs`
|
||||
- `GenerateDocsArgs` (9 lines) → move to `commands/generate_docs.rs`
|
||||
- `SyncArgs` (39 lines) → move to `commands/sync.rs`
|
||||
- `EmbedArgs` (15 lines) → move to `commands/embed.rs`
|
||||
- `TimelineArgs` (53 lines) → move to `commands/timeline.rs`
|
||||
- `WhoArgs` (76 lines) → move to `commands/who.rs`
|
||||
- `CountArgs` (9 lines) → move to `commands/count.rs`
|
||||
|
||||
**After refactoring, `cli/mod.rs` shrinks to ~300 lines** (just `Cli` + `Commands` + the inlined variants like `Init`, `Drift`, `Backup`, `Reset`).
|
||||
|
||||
**Rationale:** When adding a new flag to the `who` command, you currently have to edit `cli/mod.rs` (the args struct), `cli/commands/who.rs` (the implementation), and `main.rs` (the dispatch). If the args struct lives in `commands/who.rs`, you only need two files. This is the standard pattern in mature clap-based Rust CLIs.
|
||||
|
||||
**Import changes needed:**
|
||||
- `main.rs` currently does `use lore::cli::{..., WhoArgs, ...}` — these would become `use lore::cli::commands::{..., WhoArgs, ...}` or the `commands/mod.rs` re-exports them
|
||||
- Each `commands/*.rs` gets its own `#[derive(Parser)]` struct
|
||||
- `Commands` enum in `cli/mod.rs` keeps using the types but imports from `commands::*`
|
||||
|
||||
**Risk: MEDIUM** — Lots of `use` path changes in `main.rs`, but purely mechanical. No logic changes.
|
||||
|
||||
---
|
||||
|
||||
## Tier 3: Consider Later
|
||||
|
||||
### 3.1 Split `main.rs` (2713 lines)
|
||||
|
||||
**The problem:** `main.rs` contains `main()`, ~30 `handle_*` functions, error handling, clap error formatting, fuzzy command matching, and the `robot-docs` JSON manifest (a 400+ line inline JSON literal).
|
||||
|
||||
**Possible approach:**
|
||||
- Extract `handle_*` functions into `cli/dispatch.rs` (the routing layer)
|
||||
- Extract error handling into `cli/errors.rs`
|
||||
- Extract `handle_robot_docs` + the JSON manifest into `cli/robot_docs.rs`
|
||||
- Keep `main()` in `main.rs` at ~150 lines (just the tracing setup + dispatch call)
|
||||
|
||||
**Why Tier 3:** This is the messiest split. The handler functions depend on the `cli::commands::*` functions AND the `cli::robot::*` helpers AND direct `std::process::exit` calls. Making this work cleanly requires careful thought about the error boundary between `main.rs` (binary) and `lib.rs` (library).
|
||||
|
||||
**Risk: HIGH** — Every handler function touches `robot_mode`, constructs its own timer, opens the DB, and manages error display. The boilerplate is high but consistent, so splitting would just move it around without reducing complexity.
|
||||
|
||||
---
|
||||
|
||||
### 3.2 Split `cli/commands/who.rs` (6067 lines)
|
||||
|
||||
**The problem:** This file implements 5 distinct modes (expert, workload, active, overlap, reviews), each with its own query, scoring model, and output formatting. It also includes the time-decay scoring model (~500 lines) and per-MR detail breakdown logic.
|
||||
|
||||
**Possible split:**
|
||||
```
|
||||
src/cli/commands/who/
|
||||
├── mod.rs ← WhoRun dispatcher, shared types
|
||||
├── expert.rs ← expert mode (path-based file expertise lookup)
|
||||
├── workload.rs ← workload mode (user's assigned issues/MRs)
|
||||
├── active.rs ← active discussions mode
|
||||
├── overlap.rs ← file overlap between users
|
||||
├── reviews.rs ← review pattern analysis
|
||||
└── scoring.rs ← time-decay expert scoring model
|
||||
```
|
||||
|
||||
**Why Tier 3:** The 5 modes share many helper functions, database connection patterns, and output formatting logic. Splitting would require carefully identifying the shared helpers and deciding where they live. The file is big but internally consistent — the modes use a shared dispatcher pattern and common types.
|
||||
|
||||
---
|
||||
|
||||
### 3.3 Split `cli/commands/list.rs` (2931 lines)
|
||||
|
||||
**The problem:** This file handles issue listing, MR listing, AND note listing — three related but distinct operations with separate query builders, output formatters, and test suites.
|
||||
|
||||
**Possible split:**
|
||||
```
|
||||
src/cli/commands/
|
||||
├── list_issues.rs ← issue listing + query builder
|
||||
├── list_mrs.rs ← MR listing + query builder
|
||||
├── list_notes.rs ← note listing + query builder
|
||||
└── list.rs ← shared types (ListFilters, etc.) + re-exports
|
||||
```
|
||||
|
||||
**Why Tier 3:** Same issue as `who.rs` — the three listing modes share query building patterns, field selection logic, and sorting code. Splitting requires identifying and extracting the shared pieces first.
|
||||
|
||||
---
|
||||
|
||||
## Files NOT Recommended to Move
|
||||
|
||||
These files belong exactly where they are:
|
||||
|
||||
| File | Why it belongs in `core/` |
|
||||
|------|--------------------------|
|
||||
| `config.rs` | Config types used by nearly everything |
|
||||
| `db.rs` | Database connection + migrations — foundational |
|
||||
| `error.rs` | Error types used by every module |
|
||||
| `paths.rs` | File path resolution — infrastructure |
|
||||
| `logging.rs` | Tracing setup — infrastructure |
|
||||
| `lock.rs` | Filesystem sync lock — infrastructure |
|
||||
| `shutdown.rs` | Graceful shutdown signal — infrastructure |
|
||||
| `backoff.rs` | Retry math — infrastructure |
|
||||
| `time.rs` | Time conversion — used everywhere |
|
||||
| `metrics.rs` | Tracing metrics layer — infrastructure |
|
||||
| `project.rs` | Fuzzy project resolution — used by 8+ consumers across modules |
|
||||
|
||||
These files are legitimate "core infrastructure" used across multiple modules. Moving them would create import churn with no clarity gain.
|
||||
|
||||
---
|
||||
|
||||
## Files NOT Recommended to Split/Merge
|
||||
|
||||
| File | Why leave it alone |
|
||||
|------|-------------------|
|
||||
| `documents/extractor.rs` (2341 lines) | One cohesive extractor per entity type — the size comes from per-type formatting logic, not mixed concerns |
|
||||
| `ingestion/orchestrator.rs` (1703 lines) | Single orchestration flow — splitting would scatter the pipeline |
|
||||
| `gitlab/graphql.rs` (1293 lines) | GraphQL client with adaptive paging — cohesive |
|
||||
| `gitlab/client.rs` (851 lines) | REST client with all endpoints — cohesive |
|
||||
| `cli/autocorrect.rs` (945 lines) | Correction registry + fuzzy matching — splitting gains nothing |
|
||||
|
||||
---
|
||||
|
||||
## Proposed Final Structure (Tiers 1+2)
|
||||
|
||||
```
|
||||
src/
|
||||
├── main.rs (2713 lines — unchanged for now)
|
||||
├── lib.rs (adds: pub mod timeline; pub mod xref;)
|
||||
├── cli/
|
||||
│ ├── mod.rs (~300 lines — Cli + Commands only, args moved out)
|
||||
│ ├── autocorrect.rs (unchanged)
|
||||
│ ├── progress.rs (unchanged)
|
||||
│ ├── robot.rs (unchanged)
|
||||
│ └── commands/
|
||||
│ ├── mod.rs (re-exports + WhoArgs, IssuesArgs, etc.)
|
||||
│ ├── (all existing files — unchanged but with args structs moved in)
|
||||
│ └── ...
|
||||
├── core/ (slimmed: 14 files → infrastructure only)
|
||||
│ ├── mod.rs
|
||||
│ ├── backoff.rs
|
||||
│ ├── config.rs
|
||||
│ ├── db.rs
|
||||
│ ├── error.rs
|
||||
│ ├── lock.rs
|
||||
│ ├── logging.rs
|
||||
│ ├── metrics.rs
|
||||
│ ├── paths.rs
|
||||
│ ├── project.rs
|
||||
│ ├── shutdown.rs
|
||||
│ └── time.rs
|
||||
├── timeline/ (NEW — extracted from core/)
|
||||
│ ├── mod.rs (types from core/timeline.rs)
|
||||
│ ├── seed.rs (from core/timeline_seed.rs)
|
||||
│ ├── expand.rs (from core/timeline_expand.rs)
|
||||
│ └── collect.rs (from core/timeline_collect.rs)
|
||||
├── xref/ (NEW — extracted from core/)
|
||||
│ ├── mod.rs
|
||||
│ ├── note_parser.rs (from core/note_parser.rs)
|
||||
│ └── references.rs (from core/references.rs)
|
||||
├── ingestion/ (gains 4 files from core/)
|
||||
│ ├── (existing files...)
|
||||
│ ├── events_db.rs (from core/events_db.rs)
|
||||
│ ├── dependent_queue.rs (from core/dependent_queue.rs)
|
||||
│ ├── payloads.rs (from core/payloads.rs)
|
||||
│ └── sync_run.rs (from core/sync_run.rs)
|
||||
├── documents/ (unchanged)
|
||||
├── embedding/ (unchanged)
|
||||
├── gitlab/ (unchanged)
|
||||
└── search/ (unchanged)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
## Import Change Tracking
|
||||
|
||||
### Tier 1.1: Timeline extraction
|
||||
|
||||
| Consumer file | Old import | New import |
|
||||
|---------------|-----------|------------|
|
||||
| `cli/commands/timeline.rs:10-15` | `crate::core::timeline::*` | `crate::timeline::*` |
|
||||
| `cli/commands/timeline.rs:13` | `crate::core::timeline_collect::collect_events` | `crate::timeline::collect_events` (or `crate::timeline::collect::collect_events`) |
|
||||
| `cli/commands/timeline.rs:14` | `crate::core::timeline_expand::expand_timeline` | `crate::timeline::expand_timeline` |
|
||||
| `cli/commands/timeline.rs:15` | `crate::core::timeline_seed::seed_timeline` | `crate::timeline::seed_timeline` |
|
||||
| `core/timeline_seed.rs:7-8` | `super::timeline::*` | `super::*` (or `crate::timeline::*` depending on structure) |
|
||||
| `core/timeline_expand.rs:6` | `super::timeline::*` | `super::*` |
|
||||
| `core/timeline_collect.rs:4` | `super::timeline::*` | `super::*` |
|
||||
| `core/timeline_seed.rs:8` | `crate::search::*` | `crate::search::*` (no change) |
|
||||
| `core/timeline_seed.rs:6-7` | `super::error::Result` | `crate::core::error::Result` |
|
||||
| `core/timeline_expand.rs:5` | `super::error::Result` | `crate::core::error::Result` |
|
||||
| `core/timeline_collect.rs:3` | `super::error::*` | `crate::core::error::*` |
|
||||
|
||||
### Tier 1.2: Cross-reference extraction
|
||||
|
||||
| Consumer file | Old import | New import |
|
||||
|---------------|-----------|------------|
|
||||
| `ingestion/orchestrator.rs:10-12` | `crate::core::references::*` | `crate::xref::references::*` |
|
||||
| `core/note_parser.rs:7-8` | `super::error::Result`, `super::time::now_ms` | `crate::core::error::Result`, `crate::core::time::now_ms` |
|
||||
| `core/references.rs:4-5` | `super::error::Result`, `super::time::now_ms` | `crate::core::error::Result`, `crate::core::time::now_ms` |
|
||||
|
||||
### Tier 2.1: Ingestion-adjacent DB ops
|
||||
|
||||
| Consumer file | Old import | New import |
|
||||
|---------------|-----------|------------|
|
||||
| `cli/commands/count.rs:9` | `crate::core::events_db::*` | `crate::ingestion::events_db::*` |
|
||||
| `ingestion/orchestrator.rs:6-8` | `crate::core::dependent_queue::*` | `super::dependent_queue::*` |
|
||||
| `main.rs:37` | `crate::core::dependent_queue::release_all_locked_jobs` | `crate::ingestion::dependent_queue::release_all_locked_jobs` |
|
||||
| `ingestion/discussions.rs:7` | `crate::core::payloads::*` | `super::payloads::*` |
|
||||
| `ingestion/issues.rs:9` | `crate::core::payloads::*` | `super::payloads::*` |
|
||||
| `ingestion/merge_requests.rs:8` | `crate::core::payloads::*` | `super::payloads::*` |
|
||||
| `ingestion/mr_discussions.rs:7` | `crate::core::payloads::*` | `super::payloads::*` |
|
||||
| `cli/commands/sync.rs` | (uses `crate::core::sync_run::*`) | `crate::ingestion::sync_run::*` |
|
||||
| `cli/commands/sync_status.rs` | (uses `crate::core::sync_run::*` or `crate::core::metrics::*`) | check and update |
|
||||
| Internal: `events_db.rs:4-5` | `super::error::*`, `super::time::*` | `crate::core::error::*`, `crate::core::time::*` |
|
||||
| Internal: `dependent_queue.rs:5-6` | `super::error::Result`, `super::time::now_ms` | `crate::core::error::Result`, `crate::core::time::now_ms` |
|
||||
| Internal: `payloads.rs:9-10` | `super::error::Result`, `super::time::now_ms` | `crate::core::error::Result`, `crate::core::time::now_ms` |
|
||||
| Internal: `sync_run.rs:2-4` | `super::error::*`, `super::metrics::*`, `super::time::*` | `crate::core::error::*`, `crate::core::metrics::*`, `crate::core::time::*` |
|
||||
|
||||
---
|
||||
|
||||
## Execution Order
|
||||
|
||||
1. **Tier 1.1** — Extract timeline → `src/timeline/` (LOW risk, 1 consumer)
|
||||
2. **Tier 1.2** — Extract xref → `src/xref/` (LOW risk, 1-2 consumers)
|
||||
3. **Cargo check + clippy + test** after each tier
|
||||
4. **Tier 2.1** — Move ingestion DB ops (MEDIUM risk, more consumers)
|
||||
5. **Cargo check + clippy + test**
|
||||
6. **Tier 2.2** — Split `cli/mod.rs` args (MEDIUM risk, mostly mechanical)
|
||||
7. **Cargo check + clippy + test + fmt**
|
||||
|
||||
Each tier should be its own commit for easy rollback.
|
||||
|
||||
---
|
||||
|
||||
## What This Achieves
|
||||
|
||||
**Before:** A developer looking at `core/` sees 22 files and has to mentally sort "infrastructure vs. domain logic vs. pipeline stage." The timeline pipeline is invisible unless you know to look in `core/`.
|
||||
|
||||
**After:**
|
||||
- `core/` has 12 files, all clearly infrastructure (db, config, error, paths, logging, lock, shutdown, backoff, time, metrics, project)
|
||||
- `timeline/` is a discoverable first-class module showing the 5-stage pipeline
|
||||
- `xref/` makes the cross-reference extraction domain visible
|
||||
- `ingestion/` contains everything related to data fetching: the orchestrator, entity ingestors, AND their supporting DB operations
|
||||
- `cli/mod.rs` is lean — just the top-level Cli struct and Commands enum
|
||||
|
||||
A new developer (or coding agent) can now answer "where is the timeline code?" → `src/timeline/`, "where is ingestion?" → `src/ingestion/`, "where is cross-reference extraction?" → `src/xref/`, without needing institutional knowledge.
|
||||
174
README.md
174
README.md
@@ -19,7 +19,10 @@ Local GitLab data management with semantic search, people intelligence, and temp
|
||||
- **Cross-reference tracking**: Automatic extraction of "closes", "mentioned" relationships between MRs and issues
|
||||
- **Work item status enrichment**: Fetches issue statuses (e.g., "To do", "In progress", "Done") from GitLab's GraphQL API with adaptive page sizing, color-coded display, and case-insensitive filtering
|
||||
- **Resource event history**: Tracks state changes, label events, and milestone events for issues and MRs
|
||||
- **Note querying**: Rich filtering over discussion notes by author, type, path, resolution status, time range, and body content
|
||||
- **Discussion drift detection**: Semantic analysis of how discussions diverge from original issue intent
|
||||
- **Robot mode**: Machine-readable JSON output with structured errors, meaningful exit codes, and actionable recovery steps
|
||||
- **Error tolerance**: Auto-corrects common CLI mistakes (case, typos, single-dash flags, value casing) with teaching feedback
|
||||
- **Observability**: Verbosity controls, JSON log format, structured metrics, and stage timing
|
||||
|
||||
## Installation
|
||||
@@ -71,6 +74,12 @@ lore who @asmith
|
||||
# Timeline of events related to deployments
|
||||
lore timeline "deployment"
|
||||
|
||||
# Timeline for a specific issue
|
||||
lore timeline issue:42
|
||||
|
||||
# Query notes by author
|
||||
lore notes --author alice --since 7d
|
||||
|
||||
# Robot mode (machine-readable JSON)
|
||||
lore -J issues -n 5 | jq .
|
||||
```
|
||||
@@ -109,6 +118,15 @@ Configuration is stored in `~/.config/lore/config.json` (or `$XDG_CONFIG_HOME/lo
|
||||
"model": "nomic-embed-text",
|
||||
"baseUrl": "http://localhost:11434",
|
||||
"concurrency": 4
|
||||
},
|
||||
"scoring": {
|
||||
"authorWeight": 25,
|
||||
"reviewerWeight": 10,
|
||||
"noteBonus": 1,
|
||||
"authorHalfLifeDays": 180,
|
||||
"reviewerHalfLifeDays": 90,
|
||||
"noteHalfLifeDays": 45,
|
||||
"excludedUsernames": ["bot-user"]
|
||||
}
|
||||
}
|
||||
```
|
||||
@@ -135,6 +153,15 @@ Configuration is stored in `~/.config/lore/config.json` (or `$XDG_CONFIG_HOME/lo
|
||||
| `embedding` | `model` | `nomic-embed-text` | Model name for embeddings |
|
||||
| `embedding` | `baseUrl` | `http://localhost:11434` | Ollama server URL |
|
||||
| `embedding` | `concurrency` | `4` | Concurrent embedding requests |
|
||||
| `scoring` | `authorWeight` | `25` | Points per MR where the user authored code touching the path |
|
||||
| `scoring` | `reviewerWeight` | `10` | Points per MR where the user reviewed code touching the path |
|
||||
| `scoring` | `noteBonus` | `1` | Bonus per inline review comment (DiffNote) |
|
||||
| `scoring` | `reviewerAssignmentWeight` | `3` | Points per MR where the user was assigned as reviewer |
|
||||
| `scoring` | `authorHalfLifeDays` | `180` | Half-life in days for author contribution decay |
|
||||
| `scoring` | `reviewerHalfLifeDays` | `90` | Half-life in days for reviewer contribution decay |
|
||||
| `scoring` | `noteHalfLifeDays` | `45` | Half-life in days for note/comment decay |
|
||||
| `scoring` | `closedMrMultiplier` | `0.5` | Score multiplier for closed (not merged) MRs |
|
||||
| `scoring` | `excludedUsernames` | `[]` | Usernames excluded from expert results (e.g., bots) |
|
||||
|
||||
### Config File Resolution
|
||||
|
||||
@@ -262,18 +289,21 @@ lore search "login flow" --mode semantic # Vector similarity only
|
||||
lore search "auth" --type issue # Filter by source type
|
||||
lore search "auth" --type mr # MR documents only
|
||||
lore search "auth" --type discussion # Discussion documents only
|
||||
lore search "auth" --type note # Individual notes only
|
||||
lore search "deploy" --author username # Filter by author
|
||||
lore search "deploy" -p group/repo # Filter by project
|
||||
lore search "deploy" --label backend # Filter by label (AND logic)
|
||||
lore search "deploy" --path src/ # Filter by file path (trailing / for prefix)
|
||||
lore search "deploy" --after 7d # Created after (7d, 2w, 1m, or YYYY-MM-DD)
|
||||
lore search "deploy" --updated-after 2w # Updated after
|
||||
lore search "deploy" --since 7d # Created since (7d, 2w, 1m, or YYYY-MM-DD)
|
||||
lore search "deploy" --updated-since 2w # Updated since
|
||||
lore search "deploy" -n 50 # Limit results (default 20, max 100)
|
||||
lore search "deploy" --explain # Show ranking explanation per result
|
||||
lore search "deploy" --fts-mode raw # Raw FTS5 query syntax (advanced)
|
||||
```
|
||||
|
||||
The `--fts-mode` flag defaults to `safe`, which sanitizes user input into valid FTS5 queries with automatic fallback. Use `raw` for advanced FTS5 query syntax (AND, OR, NOT, phrase matching, prefix queries).
|
||||
The `--fts-mode` flag defaults to `safe`, which sanitizes user input into valid FTS5 queries with automatic fallback. FTS5 boolean operators (`AND`, `OR`, `NOT`, `NEAR`) are passed through in safe mode, so queries like `"switch AND health"` work without switching to raw mode. Use `raw` for advanced FTS5 query syntax (phrase matching, column filters, prefix queries).
|
||||
|
||||
A progress spinner displays during search, showing the active mode (e.g., `Searching (hybrid)...`). In robot mode, spinners are suppressed for clean JSON output.
|
||||
|
||||
Requires `lore generate-docs` (or `lore sync`) to have been run at least once. Semantic and hybrid modes require `lore embed` (or `lore sync`) to have generated vector embeddings via Ollama.
|
||||
|
||||
@@ -283,7 +313,7 @@ People intelligence: discover experts, analyze workloads, review patterns, activ
|
||||
|
||||
#### Expert Mode
|
||||
|
||||
Find who has expertise in a code area based on authoring and reviewing history (DiffNote analysis).
|
||||
Find who has expertise in a code area based on authoring and reviewing history (DiffNote analysis). Scores use exponential half-life decay so recent contributions count more than older ones. Scoring weights and half-life periods are configurable via the `scoring` config section.
|
||||
|
||||
```bash
|
||||
lore who src/features/auth/ # Who knows about this directory?
|
||||
@@ -292,6 +322,9 @@ lore who --path README.md # Root files need --path flag
|
||||
lore who --path Makefile # Dotless root files too
|
||||
lore who src/ --since 3m # Limit to recent 3 months
|
||||
lore who src/ -p group/repo # Scope to project
|
||||
lore who src/ --explain-score # Show per-component score breakdown
|
||||
lore who src/ --as-of 30d # Score as if "now" was 30 days ago
|
||||
lore who src/ --include-bots # Include bot users in results
|
||||
```
|
||||
|
||||
The target is auto-detected as a path when it contains `/`. For root files without `/` (e.g., `README.md`), use the `--path` flag. Default time window: 6 months.
|
||||
@@ -348,21 +381,32 @@ Shows: users with touch counts (author vs. review), linked MR references. Defaul
|
||||
| `-p` / `--project` | Scope to a project (fuzzy match) |
|
||||
| `--since` | Time window (7d, 2w, 6m, YYYY-MM-DD). Default varies by mode. |
|
||||
| `-n` / `--limit` | Max results per section (1-500, default 20) |
|
||||
| `--all-history` | Remove the default time window, query all history |
|
||||
| `--detail` | Show per-MR detail breakdown (expert mode only) |
|
||||
| `--explain-score` | Show per-component score breakdown (expert mode only) |
|
||||
| `--as-of` | Score as if "now" is a past date (ISO 8601 or duration like 30d, expert mode only) |
|
||||
| `--include-bots` | Include bot users normally excluded via `scoring.excludedUsernames` |
|
||||
|
||||
### `lore timeline`
|
||||
|
||||
Reconstruct a chronological timeline of events matching a keyword query. The pipeline discovers related entities through cross-reference graph traversal and assembles a unified, time-ordered event stream.
|
||||
|
||||
```bash
|
||||
lore timeline "deployment" # Events related to deployments
|
||||
lore timeline "deployment" # Search-based seeding (hybrid search)
|
||||
lore timeline issue:42 # Direct entity seeding by issue IID
|
||||
lore timeline i:42 # Shorthand for issue:42
|
||||
lore timeline mr:99 # Direct entity seeding by MR IID
|
||||
lore timeline m:99 # Shorthand for mr:99
|
||||
lore timeline "auth" -p group/repo # Scoped to a project
|
||||
lore timeline "auth" --since 30d # Only recent events
|
||||
lore timeline "migration" --depth 2 # Deeper cross-reference expansion
|
||||
lore timeline "migration" --expand-mentions # Follow 'mentioned' edges (high fan-out)
|
||||
lore timeline "migration" --no-mentions # Skip 'mentioned' edges (reduces fan-out)
|
||||
lore timeline "deploy" -n 50 # Limit event count
|
||||
lore timeline "auth" --max-seeds 5 # Fewer seed entities
|
||||
```
|
||||
|
||||
The query can be either a search string (hybrid search finds matching entities) or an entity reference (`issue:N`, `i:N`, `mr:N`, `m:N`) which directly seeds the timeline from a specific entity and its cross-references.
|
||||
|
||||
#### Flags
|
||||
|
||||
| Flag | Default | Description |
|
||||
@@ -370,18 +414,21 @@ lore timeline "auth" --max-seeds 5 # Fewer seed entities
|
||||
| `-p` / `--project` | all | Scope to a specific project (fuzzy match) |
|
||||
| `--since` | none | Only events after this date (7d, 2w, 6m, YYYY-MM-DD) |
|
||||
| `--depth` | `1` | Cross-reference expansion depth (0 = seeds only) |
|
||||
| `--expand-mentions` | off | Also follow "mentioned" edges during expansion |
|
||||
| `--no-mentions` | off | Skip "mentioned" edges during expansion (reduces fan-out) |
|
||||
| `-n` / `--limit` | `100` | Maximum events to display |
|
||||
| `--max-seeds` | `10` | Maximum seed entities from search |
|
||||
| `--max-entities` | `50` | Maximum entities discovered via cross-references |
|
||||
| `--max-evidence` | `10` | Maximum evidence notes included |
|
||||
| `--fields` | all | Select output fields (comma-separated, or 'minimal' preset) |
|
||||
|
||||
#### Pipeline Stages
|
||||
|
||||
1. **SEED** -- Full-text search identifies the most relevant issues and MRs matching the query. Documents are ranked by BM25 relevance.
|
||||
2. **HYDRATE** -- Evidence notes are extracted: the top FTS-matched discussion notes with 200-character snippets explaining *why* each entity was surfaced.
|
||||
3. **EXPAND** -- Breadth-first traversal over the `entity_references` graph discovers related entities via "closes", "related", and optionally "mentioned" references up to the configured depth.
|
||||
4. **COLLECT** -- Events are gathered for all discovered entities. Event types include: creation, state changes, label adds/removes, milestone assignments, merge events, and evidence notes. Events are sorted chronologically with stable tiebreaking.
|
||||
Each stage displays a numbered progress spinner (e.g., `[1/3] Seeding timeline...`). In robot mode, spinners are suppressed for clean JSON output.
|
||||
|
||||
1. **SEED** -- Hybrid search (FTS5 lexical + Ollama vector similarity via Reciprocal Rank Fusion) identifies the most relevant issues and MRs. Falls back to lexical-only if Ollama is unavailable. Discussion notes matching the query are also discovered and attached to their parent entities.
|
||||
2. **HYDRATE** -- Evidence notes are extracted: the top search-matched discussion notes with 200-character snippets explaining *why* each entity was surfaced. Matched discussions are collected as full thread candidates.
|
||||
3. **EXPAND** -- Breadth-first traversal over the `entity_references` graph discovers related entities via "closes", "related", and "mentioned" references up to the configured depth. Use `--no-mentions` to exclude "mentioned" edges and reduce fan-out.
|
||||
4. **COLLECT** -- Events are gathered for all discovered entities. Event types include: creation, state changes, label adds/removes, milestone assignments, merge events, evidence notes, and full discussion threads. Events are sorted chronologically with stable tiebreaking.
|
||||
5. **RENDER** -- Events are formatted as human-readable text or structured JSON (robot mode).
|
||||
|
||||
#### Event Types
|
||||
@@ -395,13 +442,70 @@ lore timeline "auth" --max-seeds 5 # Fewer seed entities
|
||||
| `MilestoneSet` | Milestone assigned |
|
||||
| `MilestoneRemoved` | Milestone removed |
|
||||
| `Merged` | MR merged (deduplicated against state events) |
|
||||
| `NoteEvidence` | Discussion note matched by FTS, with snippet |
|
||||
| `NoteEvidence` | Discussion note matched by search, with snippet |
|
||||
| `DiscussionThread` | Full discussion thread with all non-system notes |
|
||||
| `CrossReferenced` | Reference to another entity |
|
||||
|
||||
#### Unresolved References
|
||||
|
||||
When graph expansion encounters cross-project references to entities not yet synced locally, these are collected as unresolved references in the output. This enables discovery of external dependencies and can inform future sync targets.
|
||||
|
||||
### `lore notes`
|
||||
|
||||
Query individual notes from discussions with rich filtering options.
|
||||
|
||||
```bash
|
||||
lore notes # List 50 most recent notes
|
||||
lore notes --author alice --since 7d # Notes by alice in last 7 days
|
||||
lore notes --for-issue 42 -p group/repo # Notes on issue #42
|
||||
lore notes --for-mr 99 -p group/repo # Notes on MR !99
|
||||
lore notes --path src/ --resolution unresolved # Unresolved diff notes in src/
|
||||
lore notes --note-type DiffNote # Only inline code review comments
|
||||
lore notes --contains "TODO" # Substring search in note body
|
||||
lore notes --include-system # Include system-generated notes
|
||||
lore notes --since 2w --until 2024-12-31 # Time-bounded range
|
||||
lore notes --sort updated --asc # Sort by update time, ascending
|
||||
lore notes --format csv # CSV output
|
||||
lore notes --format jsonl # Line-delimited JSON
|
||||
lore notes -o # Open first result in browser
|
||||
|
||||
# Field selection (robot mode)
|
||||
lore -J notes --fields minimal # Compact: id, author_username, body, created_at_iso
|
||||
```
|
||||
|
||||
#### Filters
|
||||
|
||||
| Flag | Description |
|
||||
|------|-------------|
|
||||
| `-a` / `--author` | Filter by note author username |
|
||||
| `--note-type` | Filter by note type (DiffNote, DiscussionNote) |
|
||||
| `--contains` | Substring search in note body |
|
||||
| `--note-id` | Filter by internal note ID |
|
||||
| `--gitlab-note-id` | Filter by GitLab note ID |
|
||||
| `--discussion-id` | Filter by discussion ID |
|
||||
| `--include-system` | Include system notes (excluded by default) |
|
||||
| `--for-issue` | Notes on a specific issue IID (requires `-p`) |
|
||||
| `--for-mr` | Notes on a specific MR IID (requires `-p`) |
|
||||
| `-p` / `--project` | Scope to a project (fuzzy match) |
|
||||
| `--since` | Notes created since (7d, 2w, 1m, or YYYY-MM-DD) |
|
||||
| `--until` | Notes created until (YYYY-MM-DD, inclusive end-of-day) |
|
||||
| `--path` | Filter by file path (DiffNotes only; trailing `/` for prefix match) |
|
||||
| `--resolution` | Filter by resolution status (`any`, `unresolved`, `resolved`) |
|
||||
| `--sort` | Sort by `created` (default) or `updated` |
|
||||
| `--asc` | Sort ascending (default: descending) |
|
||||
| `--format` | Output format: `table` (default), `json`, `jsonl`, `csv` |
|
||||
| `-o` / `--open` | Open first result in browser |
|
||||
|
||||
### `lore drift`
|
||||
|
||||
Detect discussion divergence from the original intent of an issue by comparing the semantic similarity of discussion content against the issue description.
|
||||
|
||||
```bash
|
||||
lore drift issues 42 # Check divergence on issue #42
|
||||
lore drift issues 42 --threshold 0.6 # Higher threshold (stricter)
|
||||
lore drift issues 42 -p group/repo # Scope to project
|
||||
```
|
||||
|
||||
### `lore sync`
|
||||
|
||||
Run the full sync pipeline: ingest from GitLab (including work item status enrichment via GraphQL), generate searchable documents, and compute embeddings.
|
||||
@@ -413,6 +517,7 @@ lore sync --force # Override stale lock
|
||||
lore sync --no-embed # Skip embedding step
|
||||
lore sync --no-docs # Skip document regeneration
|
||||
lore sync --no-events # Skip resource event fetching
|
||||
lore sync --no-file-changes # Skip MR file change fetching
|
||||
lore sync --dry-run # Preview what would be synced
|
||||
```
|
||||
|
||||
@@ -571,6 +676,7 @@ Machine-readable command manifest for agent self-discovery. Returns a JSON schem
|
||||
```bash
|
||||
lore robot-docs # Pretty-printed JSON
|
||||
lore --robot robot-docs # Compact JSON for parsing
|
||||
lore robot-docs --brief # Omit response_schema (~60% smaller)
|
||||
```
|
||||
|
||||
### `lore version`
|
||||
@@ -622,7 +728,7 @@ The `actions` array contains executable shell commands an agent can run to recov
|
||||
|
||||
### Field Selection
|
||||
|
||||
The `--fields` flag on `issues` and `mrs` list commands controls which fields appear in the JSON response, reducing token usage for AI agent workflows:
|
||||
The `--fields` flag controls which fields appear in the JSON response, reducing token usage for AI agent workflows. Supported on `issues`, `mrs`, `notes`, `search`, `timeline`, and `who` list commands:
|
||||
|
||||
```bash
|
||||
# Minimal preset (~60% fewer tokens)
|
||||
@@ -639,6 +745,48 @@ Valid fields for issues: `iid`, `title`, `state`, `author_username`, `labels`, `
|
||||
|
||||
Valid fields for MRs: `iid`, `title`, `state`, `author_username`, `labels`, `draft`, `target_branch`, `source_branch`, `discussion_count`, `unresolved_count`, `created_at_iso`, `updated_at_iso`, `web_url`, `project_path`, `reviewers`
|
||||
|
||||
### Error Tolerance
|
||||
|
||||
The CLI auto-corrects common mistakes before parsing, emitting a teaching note to stderr. Corrections work in both human and robot modes:
|
||||
|
||||
| Correction | Example | Mode |
|
||||
|-----------|---------|------|
|
||||
| Single-dash long flag | `-robot` -> `--robot` | All |
|
||||
| Case normalization | `--Robot` -> `--robot` | All |
|
||||
| Flag prefix expansion | `--proj` -> `--project` (unambiguous only) | All |
|
||||
| Fuzzy flag match | `--projct` -> `--project` | All (threshold 0.9 in robot, 0.8 in human) |
|
||||
| Subcommand alias | `merge_requests` -> `mrs`, `robotdocs` -> `robot-docs` | All |
|
||||
| Value normalization | `--state Opened` -> `--state opened` | All |
|
||||
| Value fuzzy match | `--state opend` -> `--state opened` | All |
|
||||
| Subcommand prefix | `lore iss` -> `lore issues` (unambiguous only, via clap) | All |
|
||||
|
||||
In robot mode, corrections emit structured JSON to stderr:
|
||||
|
||||
```json
|
||||
{"warning":{"type":"ARG_CORRECTED","corrections":[...],"teaching":["Use double-dash for long flags: --robot (not -robot)"]}}
|
||||
```
|
||||
|
||||
When a command or flag is still unrecognized after corrections, the error response includes a fuzzy suggestion and, for enum-like flags, lists valid values:
|
||||
|
||||
```json
|
||||
{"error":{"code":"UNKNOWN_COMMAND","message":"...","suggestion":"Did you mean 'lore issues'? Example: lore --robot issues -n 10. Run 'lore robot-docs' for all commands"}}
|
||||
```
|
||||
|
||||
### Command Aliases
|
||||
|
||||
Commands accept aliases for common variations:
|
||||
|
||||
| Primary | Aliases |
|
||||
|---------|---------|
|
||||
| `issues` | `issue` |
|
||||
| `mrs` | `mr`, `merge-requests`, `merge-request` |
|
||||
| `notes` | `note` |
|
||||
| `search` | `find`, `query` |
|
||||
| `stats` | `stat` |
|
||||
| `status` | `st` |
|
||||
|
||||
Unambiguous prefixes also work via subcommand inference (e.g., `lore iss` -> `lore issues`, `lore time` -> `lore timeline`).
|
||||
|
||||
### Agent Self-Discovery
|
||||
|
||||
The `robot-docs` command provides a complete machine-readable manifest including response schemas for every command:
|
||||
|
||||
3252
crates/lore-tui/Cargo.lock
generated
Normal file
3252
crates/lore-tui/Cargo.lock
generated
Normal file
File diff suppressed because it is too large
Load Diff
46
crates/lore-tui/Cargo.toml
Normal file
46
crates/lore-tui/Cargo.toml
Normal file
@@ -0,0 +1,46 @@
|
||||
[package]
|
||||
name = "lore-tui"
|
||||
version = "0.1.0"
|
||||
edition = "2024"
|
||||
description = "Terminal UI for Gitlore — local GitLab data explorer"
|
||||
authors = ["Taylor Eernisse"]
|
||||
license = "MIT"
|
||||
|
||||
[[bin]]
|
||||
name = "lore-tui"
|
||||
path = "src/main.rs"
|
||||
|
||||
[dependencies]
|
||||
# FrankenTUI (Elm-architecture TUI framework)
|
||||
ftui = "0.1.1"
|
||||
|
||||
# Lore library (config, db, ingestion, search, etc.)
|
||||
lore = { path = "../.." }
|
||||
|
||||
# CLI
|
||||
clap = { version = "4", features = ["derive", "env"] }
|
||||
|
||||
# Error handling
|
||||
anyhow = "1"
|
||||
|
||||
# Time
|
||||
chrono = { version = "0.4", features = ["serde"] }
|
||||
|
||||
# Paths
|
||||
dirs = "6"
|
||||
|
||||
# Database (read-only queries from TUI)
|
||||
rusqlite = { version = "0.38", features = ["bundled"] }
|
||||
|
||||
# Terminal (crossterm for raw mode + event reading, used by ftui runtime)
|
||||
crossterm = "0.28"
|
||||
|
||||
# Serialization (crash context NDJSON dumps)
|
||||
serde = { version = "1", features = ["derive"] }
|
||||
serde_json = "1"
|
||||
|
||||
# Regex (used by safety module for PII/secret redaction)
|
||||
regex = "1"
|
||||
|
||||
[dev-dependencies]
|
||||
tempfile = "3"
|
||||
4
crates/lore-tui/rust-toolchain.toml
Normal file
4
crates/lore-tui/rust-toolchain.toml
Normal file
@@ -0,0 +1,4 @@
|
||||
[toolchain]
|
||||
channel = "nightly-2026-02-08"
|
||||
profile = "minimal"
|
||||
components = ["rustfmt", "clippy"]
|
||||
2835
crates/lore-tui/src/action.rs
Normal file
2835
crates/lore-tui/src/action.rs
Normal file
File diff suppressed because it is too large
Load Diff
73
crates/lore-tui/src/app/mod.rs
Normal file
73
crates/lore-tui/src/app/mod.rs
Normal file
@@ -0,0 +1,73 @@
|
||||
#![allow(dead_code)] // Phase 1: methods consumed as screens are implemented
|
||||
|
||||
//! Full FrankenTUI Model implementation for the lore TUI.
|
||||
//!
|
||||
//! LoreApp is the central coordinator: it owns all state, dispatches
|
||||
//! messages through a 5-stage key pipeline, records crash context
|
||||
//! breadcrumbs, manages async tasks via the supervisor, and routes
|
||||
//! view() to per-screen render functions.
|
||||
|
||||
mod tests;
|
||||
mod update;
|
||||
|
||||
use crate::clock::{Clock, SystemClock};
|
||||
use crate::commands::{CommandRegistry, build_registry};
|
||||
use crate::crash_context::CrashContext;
|
||||
use crate::db::DbManager;
|
||||
use crate::message::InputMode;
|
||||
use crate::navigation::NavigationStack;
|
||||
use crate::state::AppState;
|
||||
use crate::task_supervisor::TaskSupervisor;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// LoreApp
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Root model for the lore TUI.
|
||||
///
|
||||
/// Owns all state and implements the FrankenTUI Model trait. The
|
||||
/// update() method is the single entry point for all state transitions.
|
||||
pub struct LoreApp {
|
||||
pub state: AppState,
|
||||
pub navigation: NavigationStack,
|
||||
pub supervisor: TaskSupervisor,
|
||||
pub crash_context: CrashContext,
|
||||
pub command_registry: CommandRegistry,
|
||||
pub input_mode: InputMode,
|
||||
pub clock: Box<dyn Clock>,
|
||||
pub db: Option<DbManager>,
|
||||
}
|
||||
|
||||
impl LoreApp {
|
||||
/// Create a new LoreApp with default state.
|
||||
///
|
||||
/// Uses a real system clock and no DB connection (set separately).
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
state: AppState::default(),
|
||||
navigation: NavigationStack::new(),
|
||||
supervisor: TaskSupervisor::new(),
|
||||
crash_context: CrashContext::new(),
|
||||
command_registry: build_registry(),
|
||||
input_mode: InputMode::Normal,
|
||||
clock: Box::new(SystemClock),
|
||||
db: None,
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a LoreApp for testing with a custom clock.
|
||||
#[cfg(test)]
|
||||
fn with_clock(clock: Box<dyn Clock>) -> Self {
|
||||
Self {
|
||||
clock,
|
||||
..Self::new()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for LoreApp {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
330
crates/lore-tui/src/app/tests.rs
Normal file
330
crates/lore-tui/src/app/tests.rs
Normal file
@@ -0,0 +1,330 @@
|
||||
//! Tests for LoreApp.
|
||||
|
||||
#![cfg(test)]
|
||||
|
||||
use chrono::TimeDelta;
|
||||
use ftui::{Cmd, Event, KeyCode, KeyEvent, Model, Modifiers};
|
||||
|
||||
use crate::clock::FakeClock;
|
||||
use crate::message::{InputMode, Msg, Screen};
|
||||
|
||||
use super::LoreApp;
|
||||
|
||||
fn test_app() -> LoreApp {
|
||||
LoreApp::with_clock(Box::new(FakeClock::new(chrono::Utc::now())))
|
||||
}
|
||||
|
||||
/// Verify that `App::fullscreen(LoreApp::new()).run()` compiles.
|
||||
fn _assert_app_fullscreen_compiles() {
|
||||
fn _inner() {
|
||||
use ftui::App;
|
||||
let _app_builder = App::fullscreen(LoreApp::new());
|
||||
}
|
||||
}
|
||||
|
||||
/// Verify that `App::inline(LoreApp::new(), 12).run()` compiles.
|
||||
fn _assert_app_inline_compiles() {
|
||||
fn _inner() {
|
||||
use ftui::App;
|
||||
let _app_builder = App::inline(LoreApp::new(), 12);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lore_app_init_returns_none() {
|
||||
let mut app = test_app();
|
||||
let cmd = app.init();
|
||||
assert!(matches!(cmd, Cmd::None));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lore_app_quit_returns_quit_cmd() {
|
||||
let mut app = test_app();
|
||||
let cmd = app.update(Msg::Quit);
|
||||
assert!(matches!(cmd, Cmd::Quit));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lore_app_tick_returns_none() {
|
||||
let mut app = test_app();
|
||||
let cmd = app.update(Msg::Tick);
|
||||
assert!(matches!(cmd, Cmd::None));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lore_app_navigate_to_updates_nav_stack() {
|
||||
let mut app = test_app();
|
||||
let cmd = app.update(Msg::NavigateTo(Screen::IssueList));
|
||||
assert!(matches!(cmd, Cmd::None));
|
||||
assert!(app.navigation.is_at(&Screen::IssueList));
|
||||
assert_eq!(app.navigation.depth(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lore_app_go_back() {
|
||||
let mut app = test_app();
|
||||
app.update(Msg::NavigateTo(Screen::IssueList));
|
||||
app.update(Msg::GoBack);
|
||||
assert!(app.navigation.is_at(&Screen::Dashboard));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lore_app_go_forward() {
|
||||
let mut app = test_app();
|
||||
app.update(Msg::NavigateTo(Screen::IssueList));
|
||||
app.update(Msg::GoBack);
|
||||
app.update(Msg::GoForward);
|
||||
assert!(app.navigation.is_at(&Screen::IssueList));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ctrl_c_always_quits() {
|
||||
let mut app = test_app();
|
||||
let key = KeyEvent::new(KeyCode::Char('c')).with_modifiers(Modifiers::CTRL);
|
||||
let cmd = app.update(Msg::RawEvent(Event::Key(key)));
|
||||
assert!(matches!(cmd, Cmd::Quit));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_q_key_quits_in_normal_mode() {
|
||||
let mut app = test_app();
|
||||
let key = KeyEvent::new(KeyCode::Char('q'));
|
||||
let cmd = app.update(Msg::RawEvent(Event::Key(key)));
|
||||
assert!(matches!(cmd, Cmd::Quit));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_q_key_blocked_in_text_mode() {
|
||||
let mut app = test_app();
|
||||
app.input_mode = InputMode::Text;
|
||||
let key = KeyEvent::new(KeyCode::Char('q'));
|
||||
let cmd = app.update(Msg::RawEvent(Event::Key(key)));
|
||||
// q in text mode should NOT quit.
|
||||
assert!(matches!(cmd, Cmd::None));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_esc_blurs_text_mode() {
|
||||
let mut app = test_app();
|
||||
app.input_mode = InputMode::Text;
|
||||
app.state.search.query_focused = true;
|
||||
|
||||
let key = KeyEvent::new(KeyCode::Escape);
|
||||
app.update(Msg::RawEvent(Event::Key(key)));
|
||||
|
||||
assert!(matches!(app.input_mode, InputMode::Normal));
|
||||
assert!(!app.state.has_text_focus());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_g_prefix_enters_go_mode() {
|
||||
let mut app = test_app();
|
||||
let key = KeyEvent::new(KeyCode::Char('g'));
|
||||
app.update(Msg::RawEvent(Event::Key(key)));
|
||||
assert!(matches!(app.input_mode, InputMode::GoPrefix { .. }));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_g_then_i_navigates_to_issues() {
|
||||
let mut app = test_app();
|
||||
|
||||
// First key: 'g'
|
||||
let key_g = KeyEvent::new(KeyCode::Char('g'));
|
||||
app.update(Msg::RawEvent(Event::Key(key_g)));
|
||||
|
||||
// Second key: 'i'
|
||||
let key_i = KeyEvent::new(KeyCode::Char('i'));
|
||||
app.update(Msg::RawEvent(Event::Key(key_i)));
|
||||
|
||||
assert!(app.navigation.is_at(&Screen::IssueList));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_go_prefix_timeout_cancels() {
|
||||
let clock = FakeClock::new(chrono::Utc::now());
|
||||
let mut app = LoreApp::with_clock(Box::new(clock.clone()));
|
||||
|
||||
// Press 'g'.
|
||||
let key_g = KeyEvent::new(KeyCode::Char('g'));
|
||||
app.update(Msg::RawEvent(Event::Key(key_g)));
|
||||
assert!(matches!(app.input_mode, InputMode::GoPrefix { .. }));
|
||||
|
||||
// Advance clock past timeout.
|
||||
clock.advance(TimeDelta::milliseconds(600));
|
||||
|
||||
// Press 'i' after timeout — should NOT navigate to issues.
|
||||
let key_i = KeyEvent::new(KeyCode::Char('i'));
|
||||
app.update(Msg::RawEvent(Event::Key(key_i)));
|
||||
|
||||
// Should still be at Dashboard (no navigation happened).
|
||||
assert!(app.navigation.is_at(&Screen::Dashboard));
|
||||
assert!(matches!(app.input_mode, InputMode::Normal));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_show_help_toggles() {
|
||||
let mut app = test_app();
|
||||
assert!(!app.state.show_help);
|
||||
|
||||
app.update(Msg::ShowHelp);
|
||||
assert!(app.state.show_help);
|
||||
|
||||
app.update(Msg::ShowHelp);
|
||||
assert!(!app.state.show_help);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_msg_sets_toast() {
|
||||
let mut app = test_app();
|
||||
app.update(Msg::Error(crate::message::AppError::DbBusy));
|
||||
assert!(app.state.error_toast.is_some());
|
||||
assert!(app.state.error_toast.as_ref().unwrap().contains("busy"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_resize_updates_terminal_size() {
|
||||
let mut app = test_app();
|
||||
app.update(Msg::Resize {
|
||||
width: 120,
|
||||
height: 40,
|
||||
});
|
||||
assert_eq!(app.state.terminal_size, (120, 40));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stale_result_dropped() {
|
||||
use crate::message::Screen;
|
||||
use crate::task_supervisor::TaskKey;
|
||||
|
||||
let mut app = test_app();
|
||||
|
||||
// Submit two tasks for IssueList — second supersedes first.
|
||||
let gen1 = app
|
||||
.supervisor
|
||||
.submit(TaskKey::LoadScreen(Screen::IssueList))
|
||||
.generation;
|
||||
let gen2 = app
|
||||
.supervisor
|
||||
.submit(TaskKey::LoadScreen(Screen::IssueList))
|
||||
.generation;
|
||||
|
||||
// Stale result with gen1 should be ignored.
|
||||
app.update(Msg::IssueListLoaded {
|
||||
generation: gen1,
|
||||
page: crate::state::issue_list::IssueListPage {
|
||||
rows: vec![crate::state::issue_list::IssueListRow {
|
||||
project_path: "group/project".into(),
|
||||
iid: 1,
|
||||
title: "stale".into(),
|
||||
state: "opened".into(),
|
||||
author: "taylor".into(),
|
||||
labels: vec![],
|
||||
updated_at: 1_700_000_000_000,
|
||||
}],
|
||||
next_cursor: None,
|
||||
total_count: 1,
|
||||
},
|
||||
});
|
||||
assert!(app.state.issue_list.rows.is_empty());
|
||||
|
||||
// Current result with gen2 should be applied.
|
||||
app.update(Msg::IssueListLoaded {
|
||||
generation: gen2,
|
||||
page: crate::state::issue_list::IssueListPage {
|
||||
rows: vec![crate::state::issue_list::IssueListRow {
|
||||
project_path: "group/project".into(),
|
||||
iid: 2,
|
||||
title: "fresh".into(),
|
||||
state: "opened".into(),
|
||||
author: "taylor".into(),
|
||||
labels: vec![],
|
||||
updated_at: 1_700_000_000_000,
|
||||
}],
|
||||
next_cursor: None,
|
||||
total_count: 1,
|
||||
},
|
||||
});
|
||||
assert_eq!(app.state.issue_list.rows.len(), 1);
|
||||
assert_eq!(app.state.issue_list.rows[0].title, "fresh");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_crash_context_records_events() {
|
||||
let mut app = test_app();
|
||||
app.update(Msg::Tick);
|
||||
app.update(Msg::NavigateTo(Screen::IssueList));
|
||||
|
||||
// Should have recorded at least 2 events.
|
||||
assert!(app.crash_context.len() >= 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_navigate_sets_loading_initial_on_first_visit() {
|
||||
use crate::state::LoadState;
|
||||
|
||||
let mut app = test_app();
|
||||
app.update(Msg::NavigateTo(Screen::IssueList));
|
||||
// First visit should show full-screen spinner (LoadingInitial).
|
||||
assert_eq!(
|
||||
*app.state.load_state.get(&Screen::IssueList),
|
||||
LoadState::LoadingInitial
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_navigate_sets_refreshing_on_revisit() {
|
||||
use crate::state::LoadState;
|
||||
|
||||
let mut app = test_app();
|
||||
// First visit → LoadingInitial.
|
||||
app.update(Msg::NavigateTo(Screen::IssueList));
|
||||
// Simulate load completing.
|
||||
app.state.set_loading(Screen::IssueList, LoadState::Idle);
|
||||
// Go back, then revisit.
|
||||
app.update(Msg::GoBack);
|
||||
app.update(Msg::NavigateTo(Screen::IssueList));
|
||||
// Second visit should show corner spinner (Refreshing).
|
||||
assert_eq!(
|
||||
*app.state.load_state.get(&Screen::IssueList),
|
||||
LoadState::Refreshing
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_command_palette_opens_from_ctrl_p() {
|
||||
let mut app = test_app();
|
||||
let key = KeyEvent::new(KeyCode::Char('p')).with_modifiers(Modifiers::CTRL);
|
||||
app.update(Msg::RawEvent(Event::Key(key)));
|
||||
assert!(matches!(app.input_mode, InputMode::Palette));
|
||||
assert!(app.state.command_palette.query_focused);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_esc_closes_palette() {
|
||||
let mut app = test_app();
|
||||
app.input_mode = InputMode::Palette;
|
||||
|
||||
let key = KeyEvent::new(KeyCode::Escape);
|
||||
app.update(Msg::RawEvent(Event::Key(key)));
|
||||
|
||||
assert!(matches!(app.input_mode, InputMode::Normal));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_blur_text_input_msg() {
|
||||
let mut app = test_app();
|
||||
app.input_mode = InputMode::Text;
|
||||
app.state.search.query_focused = true;
|
||||
|
||||
app.update(Msg::BlurTextInput);
|
||||
|
||||
assert!(matches!(app.input_mode, InputMode::Normal));
|
||||
assert!(!app.state.has_text_focus());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_is_new() {
|
||||
let app = LoreApp::default();
|
||||
assert!(app.navigation.is_at(&Screen::Dashboard));
|
||||
assert!(matches!(app.input_mode, InputMode::Normal));
|
||||
}
|
||||
419
crates/lore-tui/src/app/update.rs
Normal file
419
crates/lore-tui/src/app/update.rs
Normal file
@@ -0,0 +1,419 @@
|
||||
//! Model trait impl and key dispatch for LoreApp.
|
||||
|
||||
use chrono::TimeDelta;
|
||||
use ftui::{Cmd, Event, Frame, KeyCode, KeyEvent, Model, Modifiers};
|
||||
|
||||
use crate::crash_context::CrashEvent;
|
||||
use crate::message::{InputMode, Msg, Screen};
|
||||
use crate::state::LoadState;
|
||||
use crate::task_supervisor::TaskKey;
|
||||
|
||||
use super::LoreApp;
|
||||
|
||||
/// Timeout for the g-prefix key sequence.
|
||||
const GO_PREFIX_TIMEOUT: TimeDelta = TimeDelta::milliseconds(500);
|
||||
|
||||
impl LoreApp {
|
||||
// -----------------------------------------------------------------------
|
||||
// Key dispatch
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
/// Normalize terminal key variants for cross-terminal consistency.
|
||||
fn normalize_key(key: &mut KeyEvent) {
|
||||
// BackTab -> Shift+Tab canonical form.
|
||||
if key.code == KeyCode::BackTab {
|
||||
key.code = KeyCode::Tab;
|
||||
key.modifiers |= Modifiers::SHIFT;
|
||||
}
|
||||
}
|
||||
|
||||
/// 5-stage key dispatch pipeline.
|
||||
///
|
||||
/// Returns the Cmd to execute (Quit, None, or a task command).
|
||||
pub(crate) fn interpret_key(&mut self, mut key: KeyEvent) -> Cmd<Msg> {
|
||||
Self::normalize_key(&mut key);
|
||||
|
||||
let screen = self.navigation.current().clone();
|
||||
|
||||
// Record key press in crash context.
|
||||
self.crash_context.push(CrashEvent::KeyPress {
|
||||
key: format!("{:?}", key.code),
|
||||
mode: format!("{:?}", self.input_mode),
|
||||
screen: screen.label().to_string(),
|
||||
});
|
||||
|
||||
// --- Stage 1: Quit check ---
|
||||
// Ctrl+C always quits regardless of mode.
|
||||
if key.code == KeyCode::Char('c') && key.modifiers.contains(Modifiers::CTRL) {
|
||||
return Cmd::quit();
|
||||
}
|
||||
|
||||
// --- Stage 2: InputMode routing ---
|
||||
match &self.input_mode {
|
||||
InputMode::Text => {
|
||||
return self.handle_text_mode_key(&key, &screen);
|
||||
}
|
||||
InputMode::Palette => {
|
||||
return self.handle_palette_mode_key(&key, &screen);
|
||||
}
|
||||
InputMode::GoPrefix { started_at } => {
|
||||
let elapsed = self.clock.now().signed_duration_since(*started_at);
|
||||
if elapsed > GO_PREFIX_TIMEOUT {
|
||||
// Timeout expired — cancel prefix and re-process as normal.
|
||||
self.input_mode = InputMode::Normal;
|
||||
} else {
|
||||
return self.handle_go_prefix_key(&key, &screen);
|
||||
}
|
||||
}
|
||||
InputMode::Normal => {}
|
||||
}
|
||||
|
||||
// --- Stage 3: Global shortcuts (Normal mode) ---
|
||||
// 'q' quits.
|
||||
if key.code == KeyCode::Char('q') && key.modifiers == Modifiers::NONE {
|
||||
return Cmd::quit();
|
||||
}
|
||||
|
||||
// 'g' starts prefix sequence.
|
||||
if self
|
||||
.command_registry
|
||||
.is_sequence_starter(&key.code, &key.modifiers)
|
||||
{
|
||||
self.input_mode = InputMode::GoPrefix {
|
||||
started_at: self.clock.now(),
|
||||
};
|
||||
return Cmd::none();
|
||||
}
|
||||
|
||||
// Registry-based single-key lookup.
|
||||
if let Some(cmd_def) =
|
||||
self.command_registry
|
||||
.lookup_key(&key.code, &key.modifiers, &screen, &self.input_mode)
|
||||
{
|
||||
return self.execute_command(cmd_def.id, &screen);
|
||||
}
|
||||
|
||||
// --- Stage 4: Screen-local keys ---
|
||||
// Delegated to AppState::interpret_screen_key in future phases.
|
||||
|
||||
// --- Stage 5: Fallback (unhandled) ---
|
||||
Cmd::none()
|
||||
}
|
||||
|
||||
/// Handle keys in Text input mode.
|
||||
///
|
||||
/// Only Esc and Ctrl+P pass through; everything else is consumed by
|
||||
/// the focused text widget (handled in future phases).
|
||||
fn handle_text_mode_key(&mut self, key: &KeyEvent, screen: &Screen) -> Cmd<Msg> {
|
||||
// Esc blurs the text input.
|
||||
if key.code == KeyCode::Escape {
|
||||
self.state.blur_text_focus();
|
||||
self.input_mode = InputMode::Normal;
|
||||
return Cmd::none();
|
||||
}
|
||||
|
||||
// Ctrl+P opens palette even in text mode.
|
||||
if let Some(cmd_def) =
|
||||
self.command_registry
|
||||
.lookup_key(&key.code, &key.modifiers, screen, &InputMode::Text)
|
||||
{
|
||||
return self.execute_command(cmd_def.id, screen);
|
||||
}
|
||||
|
||||
// All other keys consumed by text widget (future).
|
||||
Cmd::none()
|
||||
}
|
||||
|
||||
/// Handle keys in Palette mode.
|
||||
fn handle_palette_mode_key(&mut self, key: &KeyEvent, _screen: &Screen) -> Cmd<Msg> {
|
||||
if key.code == KeyCode::Escape {
|
||||
self.input_mode = InputMode::Normal;
|
||||
return Cmd::none();
|
||||
}
|
||||
// Palette key dispatch will be expanded in the palette widget phase.
|
||||
Cmd::none()
|
||||
}
|
||||
|
||||
/// Handle the second key of a g-prefix sequence.
|
||||
fn handle_go_prefix_key(&mut self, key: &KeyEvent, screen: &Screen) -> Cmd<Msg> {
|
||||
self.input_mode = InputMode::Normal;
|
||||
|
||||
if let Some(cmd_def) = self.command_registry.complete_sequence(
|
||||
&KeyCode::Char('g'),
|
||||
&Modifiers::NONE,
|
||||
&key.code,
|
||||
&key.modifiers,
|
||||
screen,
|
||||
) {
|
||||
return self.execute_command(cmd_def.id, screen);
|
||||
}
|
||||
|
||||
// Invalid second key — cancel prefix silently.
|
||||
Cmd::none()
|
||||
}
|
||||
|
||||
/// Execute a command by ID.
|
||||
fn execute_command(&mut self, id: &str, _screen: &Screen) -> Cmd<Msg> {
|
||||
match id {
|
||||
"quit" => Cmd::quit(),
|
||||
"go_back" => {
|
||||
self.navigation.pop();
|
||||
Cmd::none()
|
||||
}
|
||||
"show_help" => {
|
||||
self.state.show_help = !self.state.show_help;
|
||||
Cmd::none()
|
||||
}
|
||||
"command_palette" => {
|
||||
self.input_mode = InputMode::Palette;
|
||||
self.state.command_palette.query_focused = true;
|
||||
Cmd::none()
|
||||
}
|
||||
"open_in_browser" => {
|
||||
// Will dispatch OpenInBrowser msg in future phase.
|
||||
Cmd::none()
|
||||
}
|
||||
"show_cli" => {
|
||||
// Will show CLI equivalent in future phase.
|
||||
Cmd::none()
|
||||
}
|
||||
"go_home" => self.navigate_to(Screen::Dashboard),
|
||||
"go_issues" => self.navigate_to(Screen::IssueList),
|
||||
"go_mrs" => self.navigate_to(Screen::MrList),
|
||||
"go_search" => self.navigate_to(Screen::Search),
|
||||
"go_timeline" => self.navigate_to(Screen::Timeline),
|
||||
"go_who" => self.navigate_to(Screen::Who),
|
||||
"go_sync" => self.navigate_to(Screen::Sync),
|
||||
"jump_back" => {
|
||||
self.navigation.jump_back();
|
||||
Cmd::none()
|
||||
}
|
||||
"jump_forward" => {
|
||||
self.navigation.jump_forward();
|
||||
Cmd::none()
|
||||
}
|
||||
"move_down" | "move_up" | "select_item" | "focus_filter" | "scroll_to_top" => {
|
||||
// Screen-specific actions — delegated in future phases.
|
||||
Cmd::none()
|
||||
}
|
||||
_ => Cmd::none(),
|
||||
}
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Navigation helpers
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
/// Navigate to a screen, pushing the nav stack and starting a data load.
|
||||
fn navigate_to(&mut self, screen: Screen) -> Cmd<Msg> {
|
||||
let screen_label = screen.label().to_string();
|
||||
let current_label = self.navigation.current().label().to_string();
|
||||
|
||||
self.crash_context.push(CrashEvent::StateTransition {
|
||||
from: current_label,
|
||||
to: screen_label,
|
||||
});
|
||||
|
||||
self.navigation.push(screen.clone());
|
||||
|
||||
// First visit → full-screen spinner; revisit → corner spinner over stale data.
|
||||
let load_state = if self.state.load_state.was_visited(&screen) {
|
||||
LoadState::Refreshing
|
||||
} else {
|
||||
LoadState::LoadingInitial
|
||||
};
|
||||
self.state.set_loading(screen.clone(), load_state);
|
||||
|
||||
// Spawn supervised task for data loading (placeholder — actual DB
|
||||
// query dispatch comes in Phase 2 screen implementations).
|
||||
let _handle = self.supervisor.submit(TaskKey::LoadScreen(screen));
|
||||
|
||||
Cmd::none()
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Message dispatch (non-key)
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
/// Handle non-key messages.
|
||||
pub(crate) fn handle_msg(&mut self, msg: Msg) -> Cmd<Msg> {
|
||||
// Record in crash context.
|
||||
self.crash_context.push(CrashEvent::MsgDispatched {
|
||||
msg_name: format!("{msg:?}")
|
||||
.split('(')
|
||||
.next()
|
||||
.unwrap_or("?")
|
||||
.to_string(),
|
||||
screen: self.navigation.current().label().to_string(),
|
||||
});
|
||||
|
||||
match msg {
|
||||
Msg::Quit => Cmd::quit(),
|
||||
|
||||
// --- Navigation ---
|
||||
Msg::NavigateTo(screen) => self.navigate_to(screen),
|
||||
Msg::GoBack => {
|
||||
self.navigation.pop();
|
||||
Cmd::none()
|
||||
}
|
||||
Msg::GoForward => {
|
||||
self.navigation.go_forward();
|
||||
Cmd::none()
|
||||
}
|
||||
Msg::GoHome => self.navigate_to(Screen::Dashboard),
|
||||
Msg::JumpBack(_) => {
|
||||
self.navigation.jump_back();
|
||||
Cmd::none()
|
||||
}
|
||||
Msg::JumpForward(_) => {
|
||||
self.navigation.jump_forward();
|
||||
Cmd::none()
|
||||
}
|
||||
|
||||
// --- Error ---
|
||||
Msg::Error(err) => {
|
||||
self.state.set_error(err.to_string());
|
||||
Cmd::none()
|
||||
}
|
||||
|
||||
// --- Help / UI ---
|
||||
Msg::ShowHelp => {
|
||||
self.state.show_help = !self.state.show_help;
|
||||
Cmd::none()
|
||||
}
|
||||
Msg::BlurTextInput => {
|
||||
self.state.blur_text_focus();
|
||||
self.input_mode = InputMode::Normal;
|
||||
Cmd::none()
|
||||
}
|
||||
|
||||
// --- Terminal ---
|
||||
Msg::Resize { width, height } => {
|
||||
self.state.terminal_size = (width, height);
|
||||
Cmd::none()
|
||||
}
|
||||
Msg::Tick => Cmd::none(),
|
||||
|
||||
// --- Loaded results (stale guard) ---
|
||||
Msg::IssueListLoaded { generation, page } => {
|
||||
if self
|
||||
.supervisor
|
||||
.is_current(&TaskKey::LoadScreen(Screen::IssueList), generation)
|
||||
{
|
||||
self.state.issue_list.apply_page(page);
|
||||
self.state.set_loading(Screen::IssueList, LoadState::Idle);
|
||||
self.supervisor
|
||||
.complete(&TaskKey::LoadScreen(Screen::IssueList), generation);
|
||||
}
|
||||
Cmd::none()
|
||||
}
|
||||
Msg::MrListLoaded { generation, page } => {
|
||||
if self
|
||||
.supervisor
|
||||
.is_current(&TaskKey::LoadScreen(Screen::MrList), generation)
|
||||
{
|
||||
self.state.mr_list.apply_page(page);
|
||||
self.state.set_loading(Screen::MrList, LoadState::Idle);
|
||||
self.supervisor
|
||||
.complete(&TaskKey::LoadScreen(Screen::MrList), generation);
|
||||
}
|
||||
Cmd::none()
|
||||
}
|
||||
Msg::DashboardLoaded { generation, data } => {
|
||||
if self
|
||||
.supervisor
|
||||
.is_current(&TaskKey::LoadScreen(Screen::Dashboard), generation)
|
||||
{
|
||||
self.state.dashboard.update(*data);
|
||||
self.state.set_loading(Screen::Dashboard, LoadState::Idle);
|
||||
self.supervisor
|
||||
.complete(&TaskKey::LoadScreen(Screen::Dashboard), generation);
|
||||
}
|
||||
Cmd::none()
|
||||
}
|
||||
|
||||
// --- Issue detail ---
|
||||
Msg::IssueDetailLoaded {
|
||||
generation,
|
||||
key,
|
||||
data,
|
||||
} => {
|
||||
let screen = Screen::IssueDetail(key.clone());
|
||||
if self
|
||||
.supervisor
|
||||
.is_current(&TaskKey::LoadScreen(screen.clone()), generation)
|
||||
{
|
||||
self.state.issue_detail.apply_metadata(*data);
|
||||
self.state.set_loading(screen.clone(), LoadState::Idle);
|
||||
self.supervisor
|
||||
.complete(&TaskKey::LoadScreen(screen), generation);
|
||||
}
|
||||
Cmd::none()
|
||||
}
|
||||
Msg::DiscussionsLoaded {
|
||||
generation,
|
||||
key,
|
||||
discussions,
|
||||
} => {
|
||||
let screen = Screen::IssueDetail(key.clone());
|
||||
if self
|
||||
.supervisor
|
||||
.is_current(&TaskKey::LoadScreen(screen.clone()), generation)
|
||||
{
|
||||
self.state.issue_detail.apply_discussions(discussions);
|
||||
}
|
||||
Cmd::none()
|
||||
}
|
||||
|
||||
// --- MR detail ---
|
||||
Msg::MrDetailLoaded {
|
||||
generation,
|
||||
key,
|
||||
data,
|
||||
} => {
|
||||
let screen = Screen::MrDetail(key.clone());
|
||||
if self
|
||||
.supervisor
|
||||
.is_current(&TaskKey::LoadScreen(screen.clone()), generation)
|
||||
{
|
||||
self.state.mr_detail.apply_metadata(*data);
|
||||
self.state.set_loading(screen.clone(), LoadState::Idle);
|
||||
self.supervisor
|
||||
.complete(&TaskKey::LoadScreen(screen), generation);
|
||||
}
|
||||
Cmd::none()
|
||||
}
|
||||
|
||||
// All other message variants: no-op for now.
|
||||
// Future phases will fill these in as screens are implemented.
|
||||
_ => Cmd::none(),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl Model for LoreApp {
|
||||
type Message = Msg;
|
||||
|
||||
fn init(&mut self) -> Cmd<Self::Message> {
|
||||
// Install crash context panic hook.
|
||||
crate::crash_context::CrashContext::install_panic_hook(&self.crash_context);
|
||||
crate::crash_context::CrashContext::prune_crash_files();
|
||||
|
||||
// Navigate to dashboard (will trigger data load in future phase).
|
||||
Cmd::none()
|
||||
}
|
||||
|
||||
fn update(&mut self, msg: Self::Message) -> Cmd<Self::Message> {
|
||||
// Route raw key events through the 5-stage pipeline.
|
||||
if let Msg::RawEvent(Event::Key(key)) = msg {
|
||||
return self.interpret_key(key);
|
||||
}
|
||||
|
||||
// Everything else goes through message dispatch.
|
||||
self.handle_msg(msg)
|
||||
}
|
||||
|
||||
fn view(&self, frame: &mut Frame) {
|
||||
crate::view::render_screen(frame, self);
|
||||
}
|
||||
}
|
||||
165
crates/lore-tui/src/clock.rs
Normal file
165
crates/lore-tui/src/clock.rs
Normal file
@@ -0,0 +1,165 @@
|
||||
//! Injected clock for deterministic time in tests and consistent frame timestamps.
|
||||
//!
|
||||
//! All relative-time rendering (e.g., "3h ago") uses [`Clock::now()`] rather
|
||||
//! than wall-clock time directly. This enables:
|
||||
//! - Deterministic snapshot tests via [`FakeClock`]
|
||||
//! - Consistent timestamps within a single frame render pass
|
||||
|
||||
use std::sync::{Arc, Mutex};
|
||||
|
||||
use chrono::{DateTime, TimeDelta, Utc};
|
||||
|
||||
/// Trait for obtaining the current time.
|
||||
///
|
||||
/// Inject via `Arc<dyn Clock>` to allow swapping between real and fake clocks.
|
||||
pub trait Clock: Send + Sync {
|
||||
/// Returns the current time.
|
||||
fn now(&self) -> DateTime<Utc>;
|
||||
|
||||
/// Returns the current time as milliseconds since the Unix epoch.
|
||||
fn now_ms(&self) -> i64 {
|
||||
self.now().timestamp_millis()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// SystemClock
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Real wall-clock time via `chrono::Utc::now()`.
|
||||
#[derive(Debug, Clone, Copy)]
|
||||
pub struct SystemClock;
|
||||
|
||||
impl Clock for SystemClock {
|
||||
fn now(&self) -> DateTime<Utc> {
|
||||
Utc::now()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// FakeClock
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A controllable clock for tests. Returns a frozen time that can be
|
||||
/// advanced or set explicitly.
|
||||
///
|
||||
/// `FakeClock` is `Clone` (shares the inner `Arc`) and `Send + Sync`
|
||||
/// for use across `Cmd::task` threads.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FakeClock {
|
||||
inner: Arc<Mutex<DateTime<Utc>>>,
|
||||
}
|
||||
|
||||
impl FakeClock {
|
||||
/// Create a fake clock frozen at the given time.
|
||||
#[must_use]
|
||||
pub fn new(time: DateTime<Utc>) -> Self {
|
||||
Self {
|
||||
inner: Arc::new(Mutex::new(time)),
|
||||
}
|
||||
}
|
||||
|
||||
/// Create a fake clock frozen at the given millisecond epoch timestamp.
|
||||
///
|
||||
/// Convenience for action tests that work with raw epoch milliseconds.
|
||||
#[must_use]
|
||||
pub fn from_ms(epoch_ms: i64) -> Self {
|
||||
let time = DateTime::from_timestamp_millis(epoch_ms).expect("valid millisecond timestamp");
|
||||
Self::new(time)
|
||||
}
|
||||
|
||||
/// Advance the clock by `duration`. Uses `checked_add` to handle overflow
|
||||
/// gracefully — if the addition would overflow, the time is not changed.
|
||||
pub fn advance(&self, duration: TimeDelta) {
|
||||
let mut guard = self.inner.lock().expect("FakeClock mutex poisoned");
|
||||
if let Some(advanced) = guard.checked_add_signed(duration) {
|
||||
*guard = advanced;
|
||||
}
|
||||
}
|
||||
|
||||
/// Set the clock to an exact time.
|
||||
pub fn set(&self, time: DateTime<Utc>) {
|
||||
let mut guard = self.inner.lock().expect("FakeClock mutex poisoned");
|
||||
*guard = time;
|
||||
}
|
||||
}
|
||||
|
||||
impl Clock for FakeClock {
|
||||
fn now(&self) -> DateTime<Utc> {
|
||||
*self.inner.lock().expect("FakeClock mutex poisoned")
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use chrono::TimeZone;
|
||||
|
||||
fn fixed_time() -> DateTime<Utc> {
|
||||
Utc.with_ymd_and_hms(2026, 2, 12, 12, 0, 0).unwrap()
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fake_clock_frozen() {
|
||||
let clock = FakeClock::new(fixed_time());
|
||||
let t1 = clock.now();
|
||||
let t2 = clock.now();
|
||||
assert_eq!(t1, t2);
|
||||
assert_eq!(t1, fixed_time());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fake_clock_advance() {
|
||||
let clock = FakeClock::new(fixed_time());
|
||||
clock.advance(TimeDelta::hours(3));
|
||||
let expected = Utc.with_ymd_and_hms(2026, 2, 12, 15, 0, 0).unwrap();
|
||||
assert_eq!(clock.now(), expected);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fake_clock_set() {
|
||||
let clock = FakeClock::new(fixed_time());
|
||||
let new_time = Utc.with_ymd_and_hms(2030, 1, 1, 0, 0, 0).unwrap();
|
||||
clock.set(new_time);
|
||||
assert_eq!(clock.now(), new_time);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fake_clock_clone_shares_state() {
|
||||
let clock1 = FakeClock::new(fixed_time());
|
||||
let clock2 = clock1.clone();
|
||||
clock1.advance(TimeDelta::minutes(30));
|
||||
// Both clones see the advanced time.
|
||||
assert_eq!(clock1.now(), clock2.now());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_system_clock_returns_reasonable_time() {
|
||||
let clock = SystemClock;
|
||||
let now = clock.now();
|
||||
// Sanity: time should be after 2025.
|
||||
assert!(now.year() >= 2025);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fake_clock_is_send_sync() {
|
||||
fn assert_send_sync<T: Send + Sync>() {}
|
||||
assert_send_sync::<FakeClock>();
|
||||
assert_send_sync::<SystemClock>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_clock_trait_object_works() {
|
||||
let fake: Arc<dyn Clock> = Arc::new(FakeClock::new(fixed_time()));
|
||||
assert_eq!(fake.now(), fixed_time());
|
||||
|
||||
let real: Arc<dyn Clock> = Arc::new(SystemClock);
|
||||
let _ = real.now(); // Just verify it doesn't panic.
|
||||
}
|
||||
|
||||
use chrono::Datelike;
|
||||
}
|
||||
807
crates/lore-tui/src/commands.rs.bak
Normal file
807
crates/lore-tui/src/commands.rs.bak
Normal file
@@ -0,0 +1,807 @@
|
||||
#![allow(dead_code)] // Phase 1: consumed by LoreApp in bd-6pmy
|
||||
|
||||
//! Command registry — single source of truth for all TUI actions.
|
||||
//!
|
||||
//! Every keybinding, palette entry, help text, CLI equivalent, and
|
||||
//! status hint is generated from [`CommandRegistry`]. No hardcoded
|
||||
//! duplicate maps exist in view/state modules.
|
||||
//!
|
||||
//! Supports single-key and two-key sequences (g-prefix vim bindings).
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use ftui::{KeyCode, Modifiers};
|
||||
|
||||
use crate::message::{InputMode, Screen};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Key formatting
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Format a key code + modifiers as a human-readable string.
|
||||
fn format_key(code: KeyCode, modifiers: Modifiers) -> String {
|
||||
let mut parts = Vec::new();
|
||||
if modifiers.contains(Modifiers::CTRL) {
|
||||
parts.push("Ctrl");
|
||||
}
|
||||
if modifiers.contains(Modifiers::ALT) {
|
||||
parts.push("Alt");
|
||||
}
|
||||
if modifiers.contains(Modifiers::SHIFT) {
|
||||
parts.push("Shift");
|
||||
}
|
||||
let key_name = match code {
|
||||
KeyCode::Char(c) => c.to_string(),
|
||||
KeyCode::Enter => "Enter".to_string(),
|
||||
KeyCode::Escape => "Esc".to_string(),
|
||||
KeyCode::Tab => "Tab".to_string(),
|
||||
KeyCode::Backspace => "Backspace".to_string(),
|
||||
KeyCode::Delete => "Del".to_string(),
|
||||
KeyCode::Up => "Up".to_string(),
|
||||
KeyCode::Down => "Down".to_string(),
|
||||
KeyCode::Left => "Left".to_string(),
|
||||
KeyCode::Right => "Right".to_string(),
|
||||
KeyCode::Home => "Home".to_string(),
|
||||
KeyCode::End => "End".to_string(),
|
||||
KeyCode::PageUp => "PgUp".to_string(),
|
||||
KeyCode::PageDown => "PgDn".to_string(),
|
||||
KeyCode::F(n) => format!("F{n}"),
|
||||
_ => "?".to_string(),
|
||||
};
|
||||
parts.push(&key_name);
|
||||
// We need to own the joined string.
|
||||
let joined: String = parts.join("+");
|
||||
joined
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// KeyCombo
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A keybinding: either a single key or a two-key sequence.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum KeyCombo {
|
||||
/// Single key press (e.g., `q`, `Esc`, `Ctrl+P`).
|
||||
Single { code: KeyCode, modifiers: Modifiers },
|
||||
/// Two-key sequence (e.g., `g` then `i` for go-to-issues).
|
||||
Sequence {
|
||||
first_code: KeyCode,
|
||||
first_modifiers: Modifiers,
|
||||
second_code: KeyCode,
|
||||
second_modifiers: Modifiers,
|
||||
},
|
||||
}
|
||||
|
||||
impl KeyCombo {
|
||||
/// Convenience: single key with no modifiers.
|
||||
#[must_use]
|
||||
pub const fn key(code: KeyCode) -> Self {
|
||||
Self::Single {
|
||||
code,
|
||||
modifiers: Modifiers::NONE,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenience: single key with Ctrl modifier.
|
||||
#[must_use]
|
||||
pub const fn ctrl(code: KeyCode) -> Self {
|
||||
Self::Single {
|
||||
code,
|
||||
modifiers: Modifiers::CTRL,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenience: g-prefix sequence (g + char).
|
||||
#[must_use]
|
||||
pub const fn g_then(c: char) -> Self {
|
||||
Self::Sequence {
|
||||
first_code: KeyCode::Char('g'),
|
||||
first_modifiers: Modifiers::NONE,
|
||||
second_code: KeyCode::Char(c),
|
||||
second_modifiers: Modifiers::NONE,
|
||||
}
|
||||
}
|
||||
|
||||
/// Human-readable display string for this key combo.
|
||||
#[must_use]
|
||||
pub fn display(&self) -> String {
|
||||
match self {
|
||||
Self::Single { code, modifiers } => format_key(*code, *modifiers),
|
||||
Self::Sequence {
|
||||
first_code,
|
||||
first_modifiers,
|
||||
second_code,
|
||||
second_modifiers,
|
||||
} => {
|
||||
let first = format_key(*first_code, *first_modifiers);
|
||||
let second = format_key(*second_code, *second_modifiers);
|
||||
format!("{first} {second}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether this combo starts with the given key.
|
||||
#[must_use]
|
||||
pub fn starts_with(&self, code: &KeyCode, modifiers: &Modifiers) -> bool {
|
||||
match self {
|
||||
Self::Single {
|
||||
code: c,
|
||||
modifiers: m,
|
||||
} => c == code && m == modifiers,
|
||||
Self::Sequence {
|
||||
first_code,
|
||||
first_modifiers,
|
||||
..
|
||||
} => first_code == code && first_modifiers == modifiers,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// ScreenFilter
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Specifies which screens a command is available on.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ScreenFilter {
|
||||
/// Available on all screens.
|
||||
Global,
|
||||
/// Available only on specific screens.
|
||||
Only(Vec<Screen>),
|
||||
}
|
||||
|
||||
impl ScreenFilter {
|
||||
/// Whether the command is available on the given screen.
|
||||
#[must_use]
|
||||
pub fn matches(&self, screen: &Screen) -> bool {
|
||||
match self {
|
||||
Self::Global => true,
|
||||
Self::Only(screens) => screens.contains(screen),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CommandDef
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Unique command identifier.
|
||||
pub type CommandId = &'static str;
|
||||
|
||||
/// A registered command with its keybinding, help text, and scope.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CommandDef {
|
||||
/// Unique identifier (e.g., "quit", "go_issues").
|
||||
pub id: CommandId,
|
||||
/// Human-readable label for palette and help overlay.
|
||||
pub label: &'static str,
|
||||
/// Keybinding (if any).
|
||||
pub keybinding: Option<KeyCombo>,
|
||||
/// Equivalent `lore` CLI command (for "Show CLI equivalent" feature).
|
||||
pub cli_equivalent: Option<&'static str>,
|
||||
/// Description for help overlay.
|
||||
pub help_text: &'static str,
|
||||
/// Short hint for status bar (e.g., "q:quit").
|
||||
pub status_hint: &'static str,
|
||||
/// Which screens this command is available on.
|
||||
pub available_in: ScreenFilter,
|
||||
/// Whether this command works in Text input mode.
|
||||
pub available_in_text_mode: bool,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CommandRegistry
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Single source of truth for all TUI commands.
|
||||
///
|
||||
/// Built once at startup via [`build_registry`]. Provides O(1) lookup
|
||||
/// by keybinding and per-screen filtering.
|
||||
pub struct CommandRegistry {
|
||||
commands: Vec<CommandDef>,
|
||||
/// Single-key -> command IDs that start with this key.
|
||||
by_single_key: HashMap<(KeyCode, Modifiers), Vec<usize>>,
|
||||
/// Full sequence -> command index (for two-key combos).
|
||||
by_sequence: HashMap<KeyCombo, usize>,
|
||||
}
|
||||
|
||||
impl CommandRegistry {
|
||||
/// Look up a command by a single key press on a given screen and input mode.
|
||||
///
|
||||
/// Returns `None` if no matching command is found. For sequence starters
|
||||
/// (like 'g'), returns `None` — use [`is_sequence_starter`] to detect
|
||||
/// that case.
|
||||
#[must_use]
|
||||
pub fn lookup_key(
|
||||
&self,
|
||||
code: &KeyCode,
|
||||
modifiers: &Modifiers,
|
||||
screen: &Screen,
|
||||
mode: &InputMode,
|
||||
) -> Option<&CommandDef> {
|
||||
let is_text = matches!(mode, InputMode::Text);
|
||||
let key = (*code, *modifiers);
|
||||
|
||||
let indices = self.by_single_key.get(&key)?;
|
||||
for &idx in indices {
|
||||
let cmd = &self.commands[idx];
|
||||
if !cmd.available_in.matches(screen) {
|
||||
continue;
|
||||
}
|
||||
if is_text && !cmd.available_in_text_mode {
|
||||
continue;
|
||||
}
|
||||
// Only match Single combos here, not sequence starters.
|
||||
if let Some(KeyCombo::Single { .. }) = &cmd.keybinding {
|
||||
return Some(cmd);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Complete a two-key sequence.
|
||||
///
|
||||
/// Called after the first key of a sequence is detected (e.g., after 'g').
|
||||
#[must_use]
|
||||
pub fn complete_sequence(
|
||||
&self,
|
||||
first_code: &KeyCode,
|
||||
first_modifiers: &Modifiers,
|
||||
second_code: &KeyCode,
|
||||
second_modifiers: &Modifiers,
|
||||
screen: &Screen,
|
||||
) -> Option<&CommandDef> {
|
||||
let combo = KeyCombo::Sequence {
|
||||
first_code: *first_code,
|
||||
first_modifiers: *first_modifiers,
|
||||
second_code: *second_code,
|
||||
second_modifiers: *second_modifiers,
|
||||
};
|
||||
let &idx = self.by_sequence.get(&combo)?;
|
||||
let cmd = &self.commands[idx];
|
||||
if cmd.available_in.matches(screen) {
|
||||
Some(cmd)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether a key starts a multi-key sequence (e.g., 'g').
|
||||
#[must_use]
|
||||
pub fn is_sequence_starter(&self, code: &KeyCode, modifiers: &Modifiers) -> bool {
|
||||
self.by_sequence
|
||||
.keys()
|
||||
.any(|combo| combo.starts_with(code, modifiers))
|
||||
}
|
||||
|
||||
/// Commands available for the command palette on a given screen.
|
||||
///
|
||||
/// Returned sorted by label.
|
||||
#[must_use]
|
||||
pub fn palette_entries(&self, screen: &Screen) -> Vec<&CommandDef> {
|
||||
let mut entries: Vec<&CommandDef> = self
|
||||
.commands
|
||||
.iter()
|
||||
.filter(|c| c.available_in.matches(screen))
|
||||
.collect();
|
||||
entries.sort_by_key(|c| c.label);
|
||||
entries
|
||||
}
|
||||
|
||||
/// Commands for the help overlay on a given screen.
|
||||
#[must_use]
|
||||
pub fn help_entries(&self, screen: &Screen) -> Vec<&CommandDef> {
|
||||
self.commands
|
||||
.iter()
|
||||
.filter(|c| c.available_in.matches(screen))
|
||||
.filter(|c| c.keybinding.is_some())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Status bar hints for the current screen.
|
||||
#[must_use]
|
||||
pub fn status_hints(&self, screen: &Screen) -> Vec<&str> {
|
||||
self.commands
|
||||
.iter()
|
||||
.filter(|c| c.available_in.matches(screen))
|
||||
.filter(|c| !c.status_hint.is_empty())
|
||||
.map(|c| c.status_hint)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Total number of registered commands.
|
||||
#[must_use]
|
||||
pub fn len(&self) -> usize {
|
||||
self.commands.len()
|
||||
}
|
||||
|
||||
/// Whether the registry has no commands.
|
||||
#[must_use]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.commands.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// build_registry
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Build the command registry with all TUI commands.
|
||||
///
|
||||
/// This is the single source of truth — every keybinding, help text,
|
||||
/// and palette entry originates here.
|
||||
#[must_use]
|
||||
pub fn build_registry() -> CommandRegistry {
|
||||
let commands = vec![
|
||||
// --- Global commands ---
|
||||
CommandDef {
|
||||
id: "quit",
|
||||
label: "Quit",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Char('q'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Exit the TUI",
|
||||
status_hint: "q:quit",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "go_back",
|
||||
label: "Go Back",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Escape)),
|
||||
cli_equivalent: None,
|
||||
help_text: "Go back to previous screen",
|
||||
status_hint: "esc:back",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: true,
|
||||
},
|
||||
CommandDef {
|
||||
id: "show_help",
|
||||
label: "Help",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Char('?'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Show keybinding help overlay",
|
||||
status_hint: "?:help",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "command_palette",
|
||||
label: "Command Palette",
|
||||
keybinding: Some(KeyCombo::ctrl(KeyCode::Char('p'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Open command palette",
|
||||
status_hint: "C-p:palette",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: true,
|
||||
},
|
||||
CommandDef {
|
||||
id: "open_in_browser",
|
||||
label: "Open in Browser",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Char('o'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Open current entity in browser",
|
||||
status_hint: "o:browser",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "show_cli",
|
||||
label: "Show CLI Equivalent",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Char('!'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Show equivalent lore CLI command",
|
||||
status_hint: "",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
// --- Navigation: g-prefix sequences ---
|
||||
CommandDef {
|
||||
id: "go_home",
|
||||
label: "Go to Dashboard",
|
||||
keybinding: Some(KeyCombo::g_then('h')),
|
||||
cli_equivalent: None,
|
||||
help_text: "Jump to dashboard",
|
||||
status_hint: "gh:home",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "go_issues",
|
||||
label: "Go to Issues",
|
||||
keybinding: Some(KeyCombo::g_then('i')),
|
||||
cli_equivalent: Some("lore issues"),
|
||||
help_text: "Jump to issue list",
|
||||
status_hint: "gi:issues",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "go_mrs",
|
||||
label: "Go to Merge Requests",
|
||||
keybinding: Some(KeyCombo::g_then('m')),
|
||||
cli_equivalent: Some("lore mrs"),
|
||||
help_text: "Jump to MR list",
|
||||
status_hint: "gm:mrs",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "go_search",
|
||||
label: "Go to Search",
|
||||
keybinding: Some(KeyCombo::g_then('/')),
|
||||
cli_equivalent: Some("lore search"),
|
||||
help_text: "Jump to search",
|
||||
status_hint: "g/:search",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "go_timeline",
|
||||
label: "Go to Timeline",
|
||||
keybinding: Some(KeyCombo::g_then('t')),
|
||||
cli_equivalent: Some("lore timeline"),
|
||||
help_text: "Jump to timeline",
|
||||
status_hint: "gt:timeline",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "go_who",
|
||||
label: "Go to Who",
|
||||
keybinding: Some(KeyCombo::g_then('w')),
|
||||
cli_equivalent: Some("lore who"),
|
||||
help_text: "Jump to people intelligence",
|
||||
status_hint: "gw:who",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "go_sync",
|
||||
label: "Go to Sync",
|
||||
keybinding: Some(KeyCombo::g_then('s')),
|
||||
cli_equivalent: Some("lore sync"),
|
||||
help_text: "Jump to sync status",
|
||||
status_hint: "gs:sync",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
// --- Vim-style jump list ---
|
||||
CommandDef {
|
||||
id: "jump_back",
|
||||
label: "Jump Back",
|
||||
keybinding: Some(KeyCombo::ctrl(KeyCode::Char('o'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Jump backward through visited detail views",
|
||||
status_hint: "C-o:jump back",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "jump_forward",
|
||||
label: "Jump Forward",
|
||||
keybinding: Some(KeyCombo::ctrl(KeyCode::Char('i'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Jump forward through visited detail views",
|
||||
status_hint: "",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
// --- List navigation ---
|
||||
CommandDef {
|
||||
id: "move_down",
|
||||
label: "Move Down",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Char('j'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Move cursor down",
|
||||
status_hint: "j:down",
|
||||
available_in: ScreenFilter::Only(vec![
|
||||
Screen::IssueList,
|
||||
Screen::MrList,
|
||||
Screen::Search,
|
||||
Screen::Timeline,
|
||||
]),
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "move_up",
|
||||
label: "Move Up",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Char('k'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Move cursor up",
|
||||
status_hint: "k:up",
|
||||
available_in: ScreenFilter::Only(vec![
|
||||
Screen::IssueList,
|
||||
Screen::MrList,
|
||||
Screen::Search,
|
||||
Screen::Timeline,
|
||||
]),
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "select_item",
|
||||
label: "Select",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Enter)),
|
||||
cli_equivalent: None,
|
||||
help_text: "Open selected item",
|
||||
status_hint: "enter:open",
|
||||
available_in: ScreenFilter::Only(vec![
|
||||
Screen::IssueList,
|
||||
Screen::MrList,
|
||||
Screen::Search,
|
||||
]),
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
// --- Filter ---
|
||||
CommandDef {
|
||||
id: "focus_filter",
|
||||
label: "Filter",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Char('/'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Focus the filter input",
|
||||
status_hint: "/:filter",
|
||||
available_in: ScreenFilter::Only(vec![Screen::IssueList, Screen::MrList]),
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
// --- Scroll ---
|
||||
CommandDef {
|
||||
id: "scroll_to_top",
|
||||
label: "Scroll to Top",
|
||||
keybinding: Some(KeyCombo::g_then('g')),
|
||||
cli_equivalent: None,
|
||||
help_text: "Scroll to the top of the current view",
|
||||
status_hint: "",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
];
|
||||
|
||||
build_from_defs(commands)
|
||||
}
|
||||
|
||||
/// Build index maps from a list of command definitions.
|
||||
fn build_from_defs(commands: Vec<CommandDef>) -> CommandRegistry {
|
||||
let mut by_single_key: HashMap<(KeyCode, Modifiers), Vec<usize>> = HashMap::new();
|
||||
let mut by_sequence: HashMap<KeyCombo, usize> = HashMap::new();
|
||||
|
||||
for (idx, cmd) in commands.iter().enumerate() {
|
||||
if let Some(combo) = &cmd.keybinding {
|
||||
match combo {
|
||||
KeyCombo::Single { code, modifiers } => {
|
||||
by_single_key
|
||||
.entry((*code, *modifiers))
|
||||
.or_default()
|
||||
.push(idx);
|
||||
}
|
||||
KeyCombo::Sequence { .. } => {
|
||||
by_sequence.insert(combo.clone(), idx);
|
||||
// Also index the first key so is_sequence_starter works via by_single_key.
|
||||
if let KeyCombo::Sequence {
|
||||
first_code,
|
||||
first_modifiers,
|
||||
..
|
||||
} = combo
|
||||
{
|
||||
by_single_key
|
||||
.entry((*first_code, *first_modifiers))
|
||||
.or_default()
|
||||
.push(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CommandRegistry {
|
||||
commands,
|
||||
by_single_key,
|
||||
by_sequence,
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use chrono::Utc;
|
||||
|
||||
#[test]
|
||||
fn test_registry_builds_successfully() {
|
||||
let reg = build_registry();
|
||||
assert!(!reg.is_empty());
|
||||
assert!(reg.len() >= 15);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_registry_lookup_quit() {
|
||||
let reg = build_registry();
|
||||
let cmd = reg.lookup_key(
|
||||
&KeyCode::Char('q'),
|
||||
&Modifiers::NONE,
|
||||
&Screen::Dashboard,
|
||||
&InputMode::Normal,
|
||||
);
|
||||
assert!(cmd.is_some());
|
||||
assert_eq!(cmd.unwrap().id, "quit");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_registry_lookup_quit_blocked_in_text_mode() {
|
||||
let reg = build_registry();
|
||||
let cmd = reg.lookup_key(
|
||||
&KeyCode::Char('q'),
|
||||
&Modifiers::NONE,
|
||||
&Screen::Dashboard,
|
||||
&InputMode::Text,
|
||||
);
|
||||
assert!(cmd.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_registry_esc_works_in_text_mode() {
|
||||
let reg = build_registry();
|
||||
let cmd = reg.lookup_key(
|
||||
&KeyCode::Escape,
|
||||
&Modifiers::NONE,
|
||||
&Screen::IssueList,
|
||||
&InputMode::Text,
|
||||
);
|
||||
assert!(cmd.is_some());
|
||||
assert_eq!(cmd.unwrap().id, "go_back");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_registry_ctrl_p_works_in_text_mode() {
|
||||
let reg = build_registry();
|
||||
let cmd = reg.lookup_key(
|
||||
&KeyCode::Char('p'),
|
||||
&Modifiers::CTRL,
|
||||
&Screen::Search,
|
||||
&InputMode::Text,
|
||||
);
|
||||
assert!(cmd.is_some());
|
||||
assert_eq!(cmd.unwrap().id, "command_palette");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_g_is_sequence_starter() {
|
||||
let reg = build_registry();
|
||||
assert!(reg.is_sequence_starter(&KeyCode::Char('g'), &Modifiers::NONE));
|
||||
assert!(!reg.is_sequence_starter(&KeyCode::Char('x'), &Modifiers::NONE));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_complete_sequence_gi() {
|
||||
let reg = build_registry();
|
||||
let cmd = reg.complete_sequence(
|
||||
&KeyCode::Char('g'),
|
||||
&Modifiers::NONE,
|
||||
&KeyCode::Char('i'),
|
||||
&Modifiers::NONE,
|
||||
&Screen::Dashboard,
|
||||
);
|
||||
assert!(cmd.is_some());
|
||||
assert_eq!(cmd.unwrap().id, "go_issues");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_complete_sequence_invalid_second_key() {
|
||||
let reg = build_registry();
|
||||
let cmd = reg.complete_sequence(
|
||||
&KeyCode::Char('g'),
|
||||
&Modifiers::NONE,
|
||||
&KeyCode::Char('x'),
|
||||
&Modifiers::NONE,
|
||||
&Screen::Dashboard,
|
||||
);
|
||||
assert!(cmd.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_screen_specific_command() {
|
||||
let reg = build_registry();
|
||||
// 'j' (move_down) should work on IssueList
|
||||
let cmd = reg.lookup_key(
|
||||
&KeyCode::Char('j'),
|
||||
&Modifiers::NONE,
|
||||
&Screen::IssueList,
|
||||
&InputMode::Normal,
|
||||
);
|
||||
assert!(cmd.is_some());
|
||||
assert_eq!(cmd.unwrap().id, "move_down");
|
||||
|
||||
// 'j' should NOT match on Dashboard (move_down is list-only).
|
||||
let cmd = reg.lookup_key(
|
||||
&KeyCode::Char('j'),
|
||||
&Modifiers::NONE,
|
||||
&Screen::Dashboard,
|
||||
&InputMode::Normal,
|
||||
);
|
||||
assert!(cmd.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_palette_entries_sorted_by_label() {
|
||||
let reg = build_registry();
|
||||
let entries = reg.palette_entries(&Screen::Dashboard);
|
||||
let labels: Vec<&str> = entries.iter().map(|c| c.label).collect();
|
||||
let mut sorted = labels.clone();
|
||||
sorted.sort();
|
||||
assert_eq!(labels, sorted);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_help_entries_only_include_keybindings() {
|
||||
let reg = build_registry();
|
||||
let entries = reg.help_entries(&Screen::Dashboard);
|
||||
for entry in &entries {
|
||||
assert!(
|
||||
entry.keybinding.is_some(),
|
||||
"help entry without keybinding: {}",
|
||||
entry.id
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_status_hints_non_empty() {
|
||||
let reg = build_registry();
|
||||
let hints = reg.status_hints(&Screen::Dashboard);
|
||||
assert!(!hints.is_empty());
|
||||
// All returned hints should be non-empty strings.
|
||||
for hint in &hints {
|
||||
assert!(!hint.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cli_equivalents_populated() {
|
||||
let reg = build_registry();
|
||||
let with_cli: Vec<&CommandDef> = reg
|
||||
.commands
|
||||
.iter()
|
||||
.filter(|c| c.cli_equivalent.is_some())
|
||||
.collect();
|
||||
assert!(
|
||||
with_cli.len() >= 5,
|
||||
"expected at least 5 commands with cli_equivalent, got {}",
|
||||
with_cli.len()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_go_prefix_timeout_detection() {
|
||||
let reg = build_registry();
|
||||
// Simulate GoPrefix mode entering: 'g' detected as sequence starter.
|
||||
assert!(reg.is_sequence_starter(&KeyCode::Char('g'), &Modifiers::NONE));
|
||||
|
||||
// Simulate InputMode::GoPrefix with timeout check.
|
||||
let started = Utc::now();
|
||||
let mode = InputMode::GoPrefix {
|
||||
started_at: started,
|
||||
};
|
||||
// In GoPrefix mode, normal lookup should still work for non-sequence keys.
|
||||
let cmd = reg.lookup_key(
|
||||
&KeyCode::Char('q'),
|
||||
&Modifiers::NONE,
|
||||
&Screen::Dashboard,
|
||||
&mode,
|
||||
);
|
||||
assert!(cmd.is_some());
|
||||
assert_eq!(cmd.unwrap().id, "quit");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_commands_have_nonempty_help() {
|
||||
let reg = build_registry();
|
||||
for cmd in ®.commands {
|
||||
assert!(
|
||||
!cmd.help_text.is_empty(),
|
||||
"command {} has empty help_text",
|
||||
cmd.id
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
180
crates/lore-tui/src/commands/defs.rs
Normal file
180
crates/lore-tui/src/commands/defs.rs
Normal file
@@ -0,0 +1,180 @@
|
||||
//! Command definitions — types for keybindings, screen filtering, and command metadata.
|
||||
|
||||
use ftui::{KeyCode, Modifiers};
|
||||
|
||||
use crate::message::Screen;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Key formatting
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Format a key code + modifiers as a human-readable string.
|
||||
pub(crate) fn format_key(code: KeyCode, modifiers: Modifiers) -> String {
|
||||
let mut parts = Vec::new();
|
||||
if modifiers.contains(Modifiers::CTRL) {
|
||||
parts.push("Ctrl");
|
||||
}
|
||||
if modifiers.contains(Modifiers::ALT) {
|
||||
parts.push("Alt");
|
||||
}
|
||||
if modifiers.contains(Modifiers::SHIFT) {
|
||||
parts.push("Shift");
|
||||
}
|
||||
let key_name = match code {
|
||||
KeyCode::Char(c) => c.to_string(),
|
||||
KeyCode::Enter => "Enter".to_string(),
|
||||
KeyCode::Escape => "Esc".to_string(),
|
||||
KeyCode::Tab => "Tab".to_string(),
|
||||
KeyCode::Backspace => "Backspace".to_string(),
|
||||
KeyCode::Delete => "Del".to_string(),
|
||||
KeyCode::Up => "Up".to_string(),
|
||||
KeyCode::Down => "Down".to_string(),
|
||||
KeyCode::Left => "Left".to_string(),
|
||||
KeyCode::Right => "Right".to_string(),
|
||||
KeyCode::Home => "Home".to_string(),
|
||||
KeyCode::End => "End".to_string(),
|
||||
KeyCode::PageUp => "PgUp".to_string(),
|
||||
KeyCode::PageDown => "PgDn".to_string(),
|
||||
KeyCode::F(n) => format!("F{n}"),
|
||||
_ => "?".to_string(),
|
||||
};
|
||||
parts.push(&key_name);
|
||||
// We need to own the joined string.
|
||||
let joined: String = parts.join("+");
|
||||
joined
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// KeyCombo
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A keybinding: either a single key or a two-key sequence.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum KeyCombo {
|
||||
/// Single key press (e.g., `q`, `Esc`, `Ctrl+P`).
|
||||
Single { code: KeyCode, modifiers: Modifiers },
|
||||
/// Two-key sequence (e.g., `g` then `i` for go-to-issues).
|
||||
Sequence {
|
||||
first_code: KeyCode,
|
||||
first_modifiers: Modifiers,
|
||||
second_code: KeyCode,
|
||||
second_modifiers: Modifiers,
|
||||
},
|
||||
}
|
||||
|
||||
impl KeyCombo {
|
||||
/// Convenience: single key with no modifiers.
|
||||
#[must_use]
|
||||
pub const fn key(code: KeyCode) -> Self {
|
||||
Self::Single {
|
||||
code,
|
||||
modifiers: Modifiers::NONE,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenience: single key with Ctrl modifier.
|
||||
#[must_use]
|
||||
pub const fn ctrl(code: KeyCode) -> Self {
|
||||
Self::Single {
|
||||
code,
|
||||
modifiers: Modifiers::CTRL,
|
||||
}
|
||||
}
|
||||
|
||||
/// Convenience: g-prefix sequence (g + char).
|
||||
#[must_use]
|
||||
pub const fn g_then(c: char) -> Self {
|
||||
Self::Sequence {
|
||||
first_code: KeyCode::Char('g'),
|
||||
first_modifiers: Modifiers::NONE,
|
||||
second_code: KeyCode::Char(c),
|
||||
second_modifiers: Modifiers::NONE,
|
||||
}
|
||||
}
|
||||
|
||||
/// Human-readable display string for this key combo.
|
||||
#[must_use]
|
||||
pub fn display(&self) -> String {
|
||||
match self {
|
||||
Self::Single { code, modifiers } => format_key(*code, *modifiers),
|
||||
Self::Sequence {
|
||||
first_code,
|
||||
first_modifiers,
|
||||
second_code,
|
||||
second_modifiers,
|
||||
} => {
|
||||
let first = format_key(*first_code, *first_modifiers);
|
||||
let second = format_key(*second_code, *second_modifiers);
|
||||
format!("{first} {second}")
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether this combo starts with the given key.
|
||||
#[must_use]
|
||||
pub fn starts_with(&self, code: &KeyCode, modifiers: &Modifiers) -> bool {
|
||||
match self {
|
||||
Self::Single {
|
||||
code: c,
|
||||
modifiers: m,
|
||||
} => c == code && m == modifiers,
|
||||
Self::Sequence {
|
||||
first_code,
|
||||
first_modifiers,
|
||||
..
|
||||
} => first_code == code && first_modifiers == modifiers,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// ScreenFilter
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Specifies which screens a command is available on.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum ScreenFilter {
|
||||
/// Available on all screens.
|
||||
Global,
|
||||
/// Available only on specific screens.
|
||||
Only(Vec<Screen>),
|
||||
}
|
||||
|
||||
impl ScreenFilter {
|
||||
/// Whether the command is available on the given screen.
|
||||
#[must_use]
|
||||
pub fn matches(&self, screen: &Screen) -> bool {
|
||||
match self {
|
||||
Self::Global => true,
|
||||
Self::Only(screens) => screens.contains(screen),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CommandDef
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Unique command identifier.
|
||||
pub type CommandId = &'static str;
|
||||
|
||||
/// A registered command with its keybinding, help text, and scope.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CommandDef {
|
||||
/// Unique identifier (e.g., "quit", "go_issues").
|
||||
pub id: CommandId,
|
||||
/// Human-readable label for palette and help overlay.
|
||||
pub label: &'static str,
|
||||
/// Keybinding (if any).
|
||||
pub keybinding: Option<KeyCombo>,
|
||||
/// Equivalent `lore` CLI command (for "Show CLI equivalent" feature).
|
||||
pub cli_equivalent: Option<&'static str>,
|
||||
/// Description for help overlay.
|
||||
pub help_text: &'static str,
|
||||
/// Short hint for status bar (e.g., "q:quit").
|
||||
pub status_hint: &'static str,
|
||||
/// Which screens this command is available on.
|
||||
pub available_in: ScreenFilter,
|
||||
/// Whether this command works in Text input mode.
|
||||
pub available_in_text_mode: bool,
|
||||
}
|
||||
227
crates/lore-tui/src/commands/mod.rs
Normal file
227
crates/lore-tui/src/commands/mod.rs
Normal file
@@ -0,0 +1,227 @@
|
||||
#![allow(dead_code)] // Phase 1: consumed by LoreApp in bd-6pmy
|
||||
|
||||
//! Command registry — single source of truth for all TUI actions.
|
||||
//!
|
||||
//! Every keybinding, palette entry, help text, CLI equivalent, and
|
||||
//! status hint is generated from [`CommandRegistry`]. No hardcoded
|
||||
//! duplicate maps exist in view/state modules.
|
||||
//!
|
||||
//! Supports single-key and two-key sequences (g-prefix vim bindings).
|
||||
|
||||
mod defs;
|
||||
mod registry;
|
||||
|
||||
// Re-export public API — preserves `crate::commands::{CommandRegistry, build_registry, ...}`.
|
||||
pub use defs::{CommandDef, CommandId, KeyCombo, ScreenFilter};
|
||||
pub use registry::{CommandRegistry, build_registry};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use chrono::Utc;
|
||||
use ftui::{KeyCode, Modifiers};
|
||||
|
||||
use crate::message::{InputMode, Screen};
|
||||
|
||||
#[test]
|
||||
fn test_registry_builds_successfully() {
|
||||
let reg = build_registry();
|
||||
assert!(!reg.is_empty());
|
||||
assert!(reg.len() >= 15);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_registry_lookup_quit() {
|
||||
let reg = build_registry();
|
||||
let cmd = reg.lookup_key(
|
||||
&KeyCode::Char('q'),
|
||||
&Modifiers::NONE,
|
||||
&Screen::Dashboard,
|
||||
&InputMode::Normal,
|
||||
);
|
||||
assert!(cmd.is_some());
|
||||
assert_eq!(cmd.unwrap().id, "quit");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_registry_lookup_quit_blocked_in_text_mode() {
|
||||
let reg = build_registry();
|
||||
let cmd = reg.lookup_key(
|
||||
&KeyCode::Char('q'),
|
||||
&Modifiers::NONE,
|
||||
&Screen::Dashboard,
|
||||
&InputMode::Text,
|
||||
);
|
||||
assert!(cmd.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_registry_esc_works_in_text_mode() {
|
||||
let reg = build_registry();
|
||||
let cmd = reg.lookup_key(
|
||||
&KeyCode::Escape,
|
||||
&Modifiers::NONE,
|
||||
&Screen::IssueList,
|
||||
&InputMode::Text,
|
||||
);
|
||||
assert!(cmd.is_some());
|
||||
assert_eq!(cmd.unwrap().id, "go_back");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_registry_ctrl_p_works_in_text_mode() {
|
||||
let reg = build_registry();
|
||||
let cmd = reg.lookup_key(
|
||||
&KeyCode::Char('p'),
|
||||
&Modifiers::CTRL,
|
||||
&Screen::Search,
|
||||
&InputMode::Text,
|
||||
);
|
||||
assert!(cmd.is_some());
|
||||
assert_eq!(cmd.unwrap().id, "command_palette");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_g_is_sequence_starter() {
|
||||
let reg = build_registry();
|
||||
assert!(reg.is_sequence_starter(&KeyCode::Char('g'), &Modifiers::NONE));
|
||||
assert!(!reg.is_sequence_starter(&KeyCode::Char('x'), &Modifiers::NONE));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_complete_sequence_gi() {
|
||||
let reg = build_registry();
|
||||
let cmd = reg.complete_sequence(
|
||||
&KeyCode::Char('g'),
|
||||
&Modifiers::NONE,
|
||||
&KeyCode::Char('i'),
|
||||
&Modifiers::NONE,
|
||||
&Screen::Dashboard,
|
||||
);
|
||||
assert!(cmd.is_some());
|
||||
assert_eq!(cmd.unwrap().id, "go_issues");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_complete_sequence_invalid_second_key() {
|
||||
let reg = build_registry();
|
||||
let cmd = reg.complete_sequence(
|
||||
&KeyCode::Char('g'),
|
||||
&Modifiers::NONE,
|
||||
&KeyCode::Char('x'),
|
||||
&Modifiers::NONE,
|
||||
&Screen::Dashboard,
|
||||
);
|
||||
assert!(cmd.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_screen_specific_command() {
|
||||
let reg = build_registry();
|
||||
// 'j' (move_down) should work on IssueList
|
||||
let cmd = reg.lookup_key(
|
||||
&KeyCode::Char('j'),
|
||||
&Modifiers::NONE,
|
||||
&Screen::IssueList,
|
||||
&InputMode::Normal,
|
||||
);
|
||||
assert!(cmd.is_some());
|
||||
assert_eq!(cmd.unwrap().id, "move_down");
|
||||
|
||||
// 'j' should NOT match on Dashboard (move_down is list-only).
|
||||
let cmd = reg.lookup_key(
|
||||
&KeyCode::Char('j'),
|
||||
&Modifiers::NONE,
|
||||
&Screen::Dashboard,
|
||||
&InputMode::Normal,
|
||||
);
|
||||
assert!(cmd.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_palette_entries_sorted_by_label() {
|
||||
let reg = build_registry();
|
||||
let entries = reg.palette_entries(&Screen::Dashboard);
|
||||
let labels: Vec<&str> = entries.iter().map(|c| c.label).collect();
|
||||
let mut sorted = labels.clone();
|
||||
sorted.sort();
|
||||
assert_eq!(labels, sorted);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_help_entries_only_include_keybindings() {
|
||||
let reg = build_registry();
|
||||
let entries = reg.help_entries(&Screen::Dashboard);
|
||||
for entry in &entries {
|
||||
assert!(
|
||||
entry.keybinding.is_some(),
|
||||
"help entry without keybinding: {}",
|
||||
entry.id
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_status_hints_non_empty() {
|
||||
let reg = build_registry();
|
||||
let hints = reg.status_hints(&Screen::Dashboard);
|
||||
assert!(!hints.is_empty());
|
||||
// All returned hints should be non-empty strings.
|
||||
for hint in &hints {
|
||||
assert!(!hint.is_empty());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cli_equivalents_populated() {
|
||||
let reg = build_registry();
|
||||
let with_cli: Vec<&CommandDef> = reg
|
||||
.commands
|
||||
.iter()
|
||||
.filter(|c| c.cli_equivalent.is_some())
|
||||
.collect();
|
||||
assert!(
|
||||
with_cli.len() >= 5,
|
||||
"expected at least 5 commands with cli_equivalent, got {}",
|
||||
with_cli.len()
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_go_prefix_timeout_detection() {
|
||||
let reg = build_registry();
|
||||
// Simulate GoPrefix mode entering: 'g' detected as sequence starter.
|
||||
assert!(reg.is_sequence_starter(&KeyCode::Char('g'), &Modifiers::NONE));
|
||||
|
||||
// Simulate InputMode::GoPrefix with timeout check.
|
||||
let started = Utc::now();
|
||||
let mode = InputMode::GoPrefix {
|
||||
started_at: started,
|
||||
};
|
||||
// In GoPrefix mode, normal lookup should still work for non-sequence keys.
|
||||
let cmd = reg.lookup_key(
|
||||
&KeyCode::Char('q'),
|
||||
&Modifiers::NONE,
|
||||
&Screen::Dashboard,
|
||||
&mode,
|
||||
);
|
||||
assert!(cmd.is_some());
|
||||
assert_eq!(cmd.unwrap().id, "quit");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_commands_have_nonempty_help() {
|
||||
let reg = build_registry();
|
||||
for cmd in ®.commands {
|
||||
assert!(
|
||||
!cmd.help_text.is_empty(),
|
||||
"command {} has empty help_text",
|
||||
cmd.id
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
418
crates/lore-tui/src/commands/registry.rs
Normal file
418
crates/lore-tui/src/commands/registry.rs
Normal file
@@ -0,0 +1,418 @@
|
||||
//! Command registry — lookup, indexing, and the canonical command list.
|
||||
|
||||
use std::collections::HashMap;
|
||||
|
||||
use ftui::{KeyCode, Modifiers};
|
||||
|
||||
use crate::message::{InputMode, Screen};
|
||||
|
||||
use super::defs::{CommandDef, KeyCombo, ScreenFilter};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CommandRegistry
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Single source of truth for all TUI commands.
|
||||
///
|
||||
/// Built once at startup via [`build_registry`]. Provides O(1) lookup
|
||||
/// by keybinding and per-screen filtering.
|
||||
pub struct CommandRegistry {
|
||||
pub(crate) commands: Vec<CommandDef>,
|
||||
/// Single-key -> command IDs that start with this key.
|
||||
by_single_key: HashMap<(KeyCode, Modifiers), Vec<usize>>,
|
||||
/// Full sequence -> command index (for two-key combos).
|
||||
by_sequence: HashMap<KeyCombo, usize>,
|
||||
}
|
||||
|
||||
impl CommandRegistry {
|
||||
/// Look up a command by a single key press on a given screen and input mode.
|
||||
///
|
||||
/// Returns `None` if no matching command is found. For sequence starters
|
||||
/// (like 'g'), returns `None` — use [`is_sequence_starter`] to detect
|
||||
/// that case.
|
||||
#[must_use]
|
||||
pub fn lookup_key(
|
||||
&self,
|
||||
code: &KeyCode,
|
||||
modifiers: &Modifiers,
|
||||
screen: &Screen,
|
||||
mode: &InputMode,
|
||||
) -> Option<&CommandDef> {
|
||||
let is_text = matches!(mode, InputMode::Text);
|
||||
let key = (*code, *modifiers);
|
||||
|
||||
let indices = self.by_single_key.get(&key)?;
|
||||
for &idx in indices {
|
||||
let cmd = &self.commands[idx];
|
||||
if !cmd.available_in.matches(screen) {
|
||||
continue;
|
||||
}
|
||||
if is_text && !cmd.available_in_text_mode {
|
||||
continue;
|
||||
}
|
||||
// Only match Single combos here, not sequence starters.
|
||||
if let Some(KeyCombo::Single { .. }) = &cmd.keybinding {
|
||||
return Some(cmd);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Complete a two-key sequence.
|
||||
///
|
||||
/// Called after the first key of a sequence is detected (e.g., after 'g').
|
||||
#[must_use]
|
||||
pub fn complete_sequence(
|
||||
&self,
|
||||
first_code: &KeyCode,
|
||||
first_modifiers: &Modifiers,
|
||||
second_code: &KeyCode,
|
||||
second_modifiers: &Modifiers,
|
||||
screen: &Screen,
|
||||
) -> Option<&CommandDef> {
|
||||
let combo = KeyCombo::Sequence {
|
||||
first_code: *first_code,
|
||||
first_modifiers: *first_modifiers,
|
||||
second_code: *second_code,
|
||||
second_modifiers: *second_modifiers,
|
||||
};
|
||||
let &idx = self.by_sequence.get(&combo)?;
|
||||
let cmd = &self.commands[idx];
|
||||
if cmd.available_in.matches(screen) {
|
||||
Some(cmd)
|
||||
} else {
|
||||
None
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether a key starts a multi-key sequence (e.g., 'g').
|
||||
#[must_use]
|
||||
pub fn is_sequence_starter(&self, code: &KeyCode, modifiers: &Modifiers) -> bool {
|
||||
self.by_sequence
|
||||
.keys()
|
||||
.any(|combo| combo.starts_with(code, modifiers))
|
||||
}
|
||||
|
||||
/// Commands available for the command palette on a given screen.
|
||||
///
|
||||
/// Returned sorted by label.
|
||||
#[must_use]
|
||||
pub fn palette_entries(&self, screen: &Screen) -> Vec<&CommandDef> {
|
||||
let mut entries: Vec<&CommandDef> = self
|
||||
.commands
|
||||
.iter()
|
||||
.filter(|c| c.available_in.matches(screen))
|
||||
.collect();
|
||||
entries.sort_by_key(|c| c.label);
|
||||
entries
|
||||
}
|
||||
|
||||
/// Commands for the help overlay on a given screen.
|
||||
#[must_use]
|
||||
pub fn help_entries(&self, screen: &Screen) -> Vec<&CommandDef> {
|
||||
self.commands
|
||||
.iter()
|
||||
.filter(|c| c.available_in.matches(screen))
|
||||
.filter(|c| c.keybinding.is_some())
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Status bar hints for the current screen.
|
||||
#[must_use]
|
||||
pub fn status_hints(&self, screen: &Screen) -> Vec<&str> {
|
||||
self.commands
|
||||
.iter()
|
||||
.filter(|c| c.available_in.matches(screen))
|
||||
.filter(|c| !c.status_hint.is_empty())
|
||||
.map(|c| c.status_hint)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Total number of registered commands.
|
||||
#[must_use]
|
||||
pub fn len(&self) -> usize {
|
||||
self.commands.len()
|
||||
}
|
||||
|
||||
/// Whether the registry has no commands.
|
||||
#[must_use]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.commands.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// build_registry
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Build the command registry with all TUI commands.
|
||||
///
|
||||
/// This is the single source of truth — every keybinding, help text,
|
||||
/// and palette entry originates here.
|
||||
#[must_use]
|
||||
pub fn build_registry() -> CommandRegistry {
|
||||
let commands = vec![
|
||||
// --- Global commands ---
|
||||
CommandDef {
|
||||
id: "quit",
|
||||
label: "Quit",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Char('q'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Exit the TUI",
|
||||
status_hint: "q:quit",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "go_back",
|
||||
label: "Go Back",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Escape)),
|
||||
cli_equivalent: None,
|
||||
help_text: "Go back to previous screen",
|
||||
status_hint: "esc:back",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: true,
|
||||
},
|
||||
CommandDef {
|
||||
id: "show_help",
|
||||
label: "Help",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Char('?'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Show keybinding help overlay",
|
||||
status_hint: "?:help",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "command_palette",
|
||||
label: "Command Palette",
|
||||
keybinding: Some(KeyCombo::ctrl(KeyCode::Char('p'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Open command palette",
|
||||
status_hint: "C-p:palette",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: true,
|
||||
},
|
||||
CommandDef {
|
||||
id: "open_in_browser",
|
||||
label: "Open in Browser",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Char('o'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Open current entity in browser",
|
||||
status_hint: "o:browser",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "show_cli",
|
||||
label: "Show CLI Equivalent",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Char('!'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Show equivalent lore CLI command",
|
||||
status_hint: "",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
// --- Navigation: g-prefix sequences ---
|
||||
CommandDef {
|
||||
id: "go_home",
|
||||
label: "Go to Dashboard",
|
||||
keybinding: Some(KeyCombo::g_then('h')),
|
||||
cli_equivalent: None,
|
||||
help_text: "Jump to dashboard",
|
||||
status_hint: "gh:home",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "go_issues",
|
||||
label: "Go to Issues",
|
||||
keybinding: Some(KeyCombo::g_then('i')),
|
||||
cli_equivalent: Some("lore issues"),
|
||||
help_text: "Jump to issue list",
|
||||
status_hint: "gi:issues",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "go_mrs",
|
||||
label: "Go to Merge Requests",
|
||||
keybinding: Some(KeyCombo::g_then('m')),
|
||||
cli_equivalent: Some("lore mrs"),
|
||||
help_text: "Jump to MR list",
|
||||
status_hint: "gm:mrs",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "go_search",
|
||||
label: "Go to Search",
|
||||
keybinding: Some(KeyCombo::g_then('/')),
|
||||
cli_equivalent: Some("lore search"),
|
||||
help_text: "Jump to search",
|
||||
status_hint: "g/:search",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "go_timeline",
|
||||
label: "Go to Timeline",
|
||||
keybinding: Some(KeyCombo::g_then('t')),
|
||||
cli_equivalent: Some("lore timeline"),
|
||||
help_text: "Jump to timeline",
|
||||
status_hint: "gt:timeline",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "go_who",
|
||||
label: "Go to Who",
|
||||
keybinding: Some(KeyCombo::g_then('w')),
|
||||
cli_equivalent: Some("lore who"),
|
||||
help_text: "Jump to people intelligence",
|
||||
status_hint: "gw:who",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "go_sync",
|
||||
label: "Go to Sync",
|
||||
keybinding: Some(KeyCombo::g_then('s')),
|
||||
cli_equivalent: Some("lore sync"),
|
||||
help_text: "Jump to sync status",
|
||||
status_hint: "gs:sync",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
// --- Vim-style jump list ---
|
||||
CommandDef {
|
||||
id: "jump_back",
|
||||
label: "Jump Back",
|
||||
keybinding: Some(KeyCombo::ctrl(KeyCode::Char('o'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Jump backward through visited detail views",
|
||||
status_hint: "C-o:jump back",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "jump_forward",
|
||||
label: "Jump Forward",
|
||||
keybinding: Some(KeyCombo::ctrl(KeyCode::Char('i'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Jump forward through visited detail views",
|
||||
status_hint: "",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
// --- List navigation ---
|
||||
CommandDef {
|
||||
id: "move_down",
|
||||
label: "Move Down",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Char('j'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Move cursor down",
|
||||
status_hint: "j:down",
|
||||
available_in: ScreenFilter::Only(vec![
|
||||
Screen::IssueList,
|
||||
Screen::MrList,
|
||||
Screen::Search,
|
||||
Screen::Timeline,
|
||||
]),
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "move_up",
|
||||
label: "Move Up",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Char('k'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Move cursor up",
|
||||
status_hint: "k:up",
|
||||
available_in: ScreenFilter::Only(vec![
|
||||
Screen::IssueList,
|
||||
Screen::MrList,
|
||||
Screen::Search,
|
||||
Screen::Timeline,
|
||||
]),
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
CommandDef {
|
||||
id: "select_item",
|
||||
label: "Select",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Enter)),
|
||||
cli_equivalent: None,
|
||||
help_text: "Open selected item",
|
||||
status_hint: "enter:open",
|
||||
available_in: ScreenFilter::Only(vec![
|
||||
Screen::IssueList,
|
||||
Screen::MrList,
|
||||
Screen::Search,
|
||||
]),
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
// --- Filter ---
|
||||
CommandDef {
|
||||
id: "focus_filter",
|
||||
label: "Filter",
|
||||
keybinding: Some(KeyCombo::key(KeyCode::Char('/'))),
|
||||
cli_equivalent: None,
|
||||
help_text: "Focus the filter input",
|
||||
status_hint: "/:filter",
|
||||
available_in: ScreenFilter::Only(vec![Screen::IssueList, Screen::MrList]),
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
// --- Scroll ---
|
||||
CommandDef {
|
||||
id: "scroll_to_top",
|
||||
label: "Scroll to Top",
|
||||
keybinding: Some(KeyCombo::g_then('g')),
|
||||
cli_equivalent: None,
|
||||
help_text: "Scroll to the top of the current view",
|
||||
status_hint: "",
|
||||
available_in: ScreenFilter::Global,
|
||||
available_in_text_mode: false,
|
||||
},
|
||||
];
|
||||
|
||||
build_from_defs(commands)
|
||||
}
|
||||
|
||||
/// Build index maps from a list of command definitions.
|
||||
fn build_from_defs(commands: Vec<CommandDef>) -> CommandRegistry {
|
||||
let mut by_single_key: HashMap<(KeyCode, Modifiers), Vec<usize>> = HashMap::new();
|
||||
let mut by_sequence: HashMap<KeyCombo, usize> = HashMap::new();
|
||||
|
||||
for (idx, cmd) in commands.iter().enumerate() {
|
||||
if let Some(combo) = &cmd.keybinding {
|
||||
match combo {
|
||||
KeyCombo::Single { code, modifiers } => {
|
||||
by_single_key
|
||||
.entry((*code, *modifiers))
|
||||
.or_default()
|
||||
.push(idx);
|
||||
}
|
||||
KeyCombo::Sequence { .. } => {
|
||||
by_sequence.insert(combo.clone(), idx);
|
||||
// Also index the first key so is_sequence_starter works via by_single_key.
|
||||
if let KeyCombo::Sequence {
|
||||
first_code,
|
||||
first_modifiers,
|
||||
..
|
||||
} = combo
|
||||
{
|
||||
by_single_key
|
||||
.entry((*first_code, *first_modifiers))
|
||||
.or_default()
|
||||
.push(idx);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
CommandRegistry {
|
||||
commands,
|
||||
by_single_key,
|
||||
by_sequence,
|
||||
}
|
||||
}
|
||||
450
crates/lore-tui/src/crash_context.rs
Normal file
450
crates/lore-tui/src/crash_context.rs
Normal file
@@ -0,0 +1,450 @@
|
||||
#![allow(dead_code)] // Phase 1: consumed by LoreApp in bd-6pmy
|
||||
|
||||
//! Ring buffer of recent app events for post-mortem crash diagnostics.
|
||||
//!
|
||||
//! The TUI pushes every key press, message dispatch, and state transition
|
||||
//! into [`CrashContext`]. On panic the installed hook dumps the last 2000
|
||||
//! events to `~/.local/share/lore/crash-<timestamp>.json` as NDJSON.
|
||||
//!
|
||||
//! Retention: only the 5 most recent crash files are kept.
|
||||
|
||||
use std::collections::VecDeque;
|
||||
use std::io::{self, BufWriter, Write};
|
||||
use std::path::{Path, PathBuf};
|
||||
|
||||
use serde::Serialize;
|
||||
|
||||
/// Maximum number of events retained in the ring buffer.
|
||||
const MAX_EVENTS: usize = 2000;
|
||||
|
||||
/// Maximum number of crash files to keep on disk.
|
||||
const MAX_CRASH_FILES: usize = 5;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CrashEvent
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A single event recorded for crash diagnostics.
|
||||
#[derive(Debug, Clone, Serialize)]
|
||||
#[serde(tag = "type")]
|
||||
pub enum CrashEvent {
|
||||
/// A key was pressed.
|
||||
KeyPress {
|
||||
key: String,
|
||||
mode: String,
|
||||
screen: String,
|
||||
},
|
||||
/// A message was dispatched through update().
|
||||
MsgDispatched { msg_name: String, screen: String },
|
||||
/// Navigation changed screens.
|
||||
StateTransition { from: String, to: String },
|
||||
/// An error occurred.
|
||||
Error { message: String },
|
||||
/// Catch-all for ad-hoc diagnostic breadcrumbs.
|
||||
Custom { tag: String, detail: String },
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CrashContext
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Ring buffer of recent app events for panic diagnostics.
|
||||
///
|
||||
/// Holds at most [`MAX_EVENTS`] entries. When full, the oldest event
|
||||
/// is evicted on each push.
|
||||
pub struct CrashContext {
|
||||
events: VecDeque<CrashEvent>,
|
||||
}
|
||||
|
||||
impl CrashContext {
|
||||
/// Create an empty crash context with pre-allocated capacity.
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
events: VecDeque::with_capacity(MAX_EVENTS),
|
||||
}
|
||||
}
|
||||
|
||||
/// Record an event. Evicts the oldest when the buffer is full.
|
||||
pub fn push(&mut self, event: CrashEvent) {
|
||||
if self.events.len() == MAX_EVENTS {
|
||||
self.events.pop_front();
|
||||
}
|
||||
self.events.push_back(event);
|
||||
}
|
||||
|
||||
/// Number of events currently stored.
|
||||
#[must_use]
|
||||
pub fn len(&self) -> usize {
|
||||
self.events.len()
|
||||
}
|
||||
|
||||
/// Whether the buffer is empty.
|
||||
#[must_use]
|
||||
pub fn is_empty(&self) -> bool {
|
||||
self.events.is_empty()
|
||||
}
|
||||
|
||||
/// Iterate over stored events (oldest first).
|
||||
pub fn iter(&self) -> impl Iterator<Item = &CrashEvent> {
|
||||
self.events.iter()
|
||||
}
|
||||
|
||||
/// Dump all events to a file as newline-delimited JSON.
|
||||
///
|
||||
/// Creates parent directories if they don't exist.
|
||||
/// Returns `Ok(())` on success, `Err` on I/O failure.
|
||||
pub fn dump_to_file(&self, path: &Path) -> io::Result<()> {
|
||||
if let Some(parent) = path.parent() {
|
||||
std::fs::create_dir_all(parent)?;
|
||||
}
|
||||
let file = std::fs::File::create(path)?;
|
||||
let mut writer = BufWriter::new(file);
|
||||
for event in &self.events {
|
||||
match serde_json::to_string(event) {
|
||||
Ok(json) => {
|
||||
writeln!(writer, "{json}")?;
|
||||
}
|
||||
Err(_) => {
|
||||
// Fallback to debug format if serialization fails.
|
||||
writeln!(
|
||||
writer,
|
||||
"{{\"type\":\"SerializationError\",\"debug\":\"{event:?}\"}}"
|
||||
)?;
|
||||
}
|
||||
}
|
||||
}
|
||||
writer.flush()?;
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Default crash directory: `~/.local/share/lore/`.
|
||||
#[must_use]
|
||||
pub fn crash_dir() -> Option<PathBuf> {
|
||||
dirs::data_local_dir().map(|d| d.join("lore"))
|
||||
}
|
||||
|
||||
/// Generate a timestamped crash file path.
|
||||
#[must_use]
|
||||
pub fn crash_file_path() -> Option<PathBuf> {
|
||||
let dir = Self::crash_dir()?;
|
||||
let timestamp = chrono::Utc::now().format("%Y%m%d-%H%M%S%.3f");
|
||||
Some(dir.join(format!("crash-{timestamp}.json")))
|
||||
}
|
||||
|
||||
/// Remove old crash files, keeping only the most recent [`MAX_CRASH_FILES`].
|
||||
///
|
||||
/// Best-effort: silently ignores I/O errors on individual deletions.
|
||||
pub fn prune_crash_files() {
|
||||
let Some(dir) = Self::crash_dir() else {
|
||||
return;
|
||||
};
|
||||
let Ok(entries) = std::fs::read_dir(&dir) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let mut crash_files: Vec<PathBuf> = entries
|
||||
.filter_map(Result::ok)
|
||||
.map(|e| e.path())
|
||||
.filter(|p| {
|
||||
p.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.is_some_and(|n| n.starts_with("crash-") && n.ends_with(".json"))
|
||||
})
|
||||
.collect();
|
||||
|
||||
// Sort ascending by filename (timestamps sort lexicographically).
|
||||
crash_files.sort();
|
||||
|
||||
if crash_files.len() > MAX_CRASH_FILES {
|
||||
let to_remove = crash_files.len() - MAX_CRASH_FILES;
|
||||
for path in &crash_files[..to_remove] {
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Install a panic hook that dumps the crash context to disk.
|
||||
///
|
||||
/// Captures the current events via a snapshot. The hook chains with
|
||||
/// the default panic handler so backtraces are still printed.
|
||||
///
|
||||
/// FIXME: This snapshots events at install time, which is typically
|
||||
/// during init() when the buffer is empty. The crash dump will only
|
||||
/// contain the panic itself, not the preceding key presses and state
|
||||
/// transitions. Fix requires CrashContext to use interior mutability
|
||||
/// (Arc<Mutex<VecDeque<CrashEvent>>>) so the panic hook reads live
|
||||
/// state instead of a stale snapshot.
|
||||
pub fn install_panic_hook(ctx: &Self) {
|
||||
let snapshot: Vec<CrashEvent> = ctx.events.iter().cloned().collect();
|
||||
let prev_hook = std::panic::take_hook();
|
||||
|
||||
std::panic::set_hook(Box::new(move |info| {
|
||||
// Best-effort dump — never panic inside the panic hook.
|
||||
if let Some(path) = Self::crash_file_path() {
|
||||
let mut dump = CrashContext::new();
|
||||
for event in &snapshot {
|
||||
dump.push(event.clone());
|
||||
}
|
||||
// Add the panic info itself as the final event.
|
||||
dump.push(CrashEvent::Error {
|
||||
message: format!("{info}"),
|
||||
});
|
||||
let _ = dump.dump_to_file(&path);
|
||||
}
|
||||
|
||||
// Chain to the previous hook (prints backtrace, etc.).
|
||||
prev_hook(info);
|
||||
}));
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CrashContext {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::io::BufRead;
|
||||
|
||||
/// Helper: create a numbered Custom event.
|
||||
fn event(n: usize) -> CrashEvent {
|
||||
CrashEvent::Custom {
|
||||
tag: "test".into(),
|
||||
detail: format!("event-{n}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_ring_buffer_evicts_oldest() {
|
||||
let mut ctx = CrashContext::new();
|
||||
for i in 0..2500 {
|
||||
ctx.push(event(i));
|
||||
}
|
||||
assert_eq!(ctx.len(), MAX_EVENTS);
|
||||
|
||||
// First retained event should be #500 (0..499 evicted).
|
||||
let first = ctx.iter().next().unwrap();
|
||||
match first {
|
||||
CrashEvent::Custom { detail, .. } => assert_eq!(detail, "event-500"),
|
||||
other => panic!("unexpected variant: {other:?}"),
|
||||
}
|
||||
|
||||
// Last retained event should be #2499.
|
||||
let last = ctx.iter().last().unwrap();
|
||||
match last {
|
||||
CrashEvent::Custom { detail, .. } => assert_eq!(detail, "event-2499"),
|
||||
other => panic!("unexpected variant: {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_new_is_empty() {
|
||||
let ctx = CrashContext::new();
|
||||
assert!(ctx.is_empty());
|
||||
assert_eq!(ctx.len(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_push_increments_len() {
|
||||
let mut ctx = CrashContext::new();
|
||||
ctx.push(event(1));
|
||||
ctx.push(event(2));
|
||||
assert_eq!(ctx.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_push_does_not_evict_below_capacity() {
|
||||
let mut ctx = CrashContext::new();
|
||||
for i in 0..MAX_EVENTS {
|
||||
ctx.push(event(i));
|
||||
}
|
||||
assert_eq!(ctx.len(), MAX_EVENTS);
|
||||
|
||||
// First should still be event-0.
|
||||
match ctx.iter().next().unwrap() {
|
||||
CrashEvent::Custom { detail, .. } => assert_eq!(detail, "event-0"),
|
||||
other => panic!("unexpected: {other:?}"),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dump_to_file_writes_ndjson() {
|
||||
let mut ctx = CrashContext::new();
|
||||
ctx.push(CrashEvent::KeyPress {
|
||||
key: "j".into(),
|
||||
mode: "Normal".into(),
|
||||
screen: "Dashboard".into(),
|
||||
});
|
||||
ctx.push(CrashEvent::MsgDispatched {
|
||||
msg_name: "NavigateTo".into(),
|
||||
screen: "Dashboard".into(),
|
||||
});
|
||||
ctx.push(CrashEvent::StateTransition {
|
||||
from: "Dashboard".into(),
|
||||
to: "IssueList".into(),
|
||||
});
|
||||
ctx.push(CrashEvent::Error {
|
||||
message: "db busy".into(),
|
||||
});
|
||||
ctx.push(CrashEvent::Custom {
|
||||
tag: "test".into(),
|
||||
detail: "hello".into(),
|
||||
});
|
||||
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let path = dir.path().join("test-crash.json");
|
||||
ctx.dump_to_file(&path).unwrap();
|
||||
|
||||
// Verify: each line is valid JSON, total lines == 5.
|
||||
let file = std::fs::File::open(&path).unwrap();
|
||||
let reader = io::BufReader::new(file);
|
||||
let lines: Vec<String> = reader.lines().map(Result::unwrap).collect();
|
||||
assert_eq!(lines.len(), 5);
|
||||
|
||||
// Each line must parse as JSON.
|
||||
for line in &lines {
|
||||
let val: serde_json::Value = serde_json::from_str(line).unwrap();
|
||||
assert!(val.get("type").is_some(), "missing 'type' field: {line}");
|
||||
}
|
||||
|
||||
// Spot check first line: KeyPress with correct fields.
|
||||
let first: serde_json::Value = serde_json::from_str(&lines[0]).unwrap();
|
||||
assert_eq!(first["type"], "KeyPress");
|
||||
assert_eq!(first["key"], "j");
|
||||
assert_eq!(first["mode"], "Normal");
|
||||
assert_eq!(first["screen"], "Dashboard");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dump_creates_parent_directories() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let nested = dir.path().join("a").join("b").join("c").join("crash.json");
|
||||
|
||||
let mut ctx = CrashContext::new();
|
||||
ctx.push(event(1));
|
||||
ctx.dump_to_file(&nested).unwrap();
|
||||
|
||||
assert!(nested.exists());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dump_empty_context_creates_empty_file() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let path = dir.path().join("empty.json");
|
||||
|
||||
let ctx = CrashContext::new();
|
||||
ctx.dump_to_file(&path).unwrap();
|
||||
|
||||
let content = std::fs::read_to_string(&path).unwrap();
|
||||
assert!(content.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_prune_keeps_newest_files() {
|
||||
let dir = tempfile::tempdir().unwrap();
|
||||
let crash_dir = dir.path();
|
||||
|
||||
// Create 8 crash files with ordered timestamps.
|
||||
let filenames: Vec<String> = (0..8)
|
||||
.map(|i| format!("crash-2026010{i}-120000.000.json"))
|
||||
.collect();
|
||||
for name in &filenames {
|
||||
std::fs::write(crash_dir.join(name), "{}").unwrap();
|
||||
}
|
||||
|
||||
// Prune, pointing at our temp dir.
|
||||
prune_crash_files_in(crash_dir);
|
||||
|
||||
let remaining: Vec<String> = std::fs::read_dir(crash_dir)
|
||||
.unwrap()
|
||||
.filter_map(Result::ok)
|
||||
.map(|e| e.file_name().to_string_lossy().into_owned())
|
||||
.filter(|n| n.starts_with("crash-") && n.ends_with(".json"))
|
||||
.collect();
|
||||
|
||||
assert_eq!(remaining.len(), MAX_CRASH_FILES);
|
||||
// Oldest 3 should be gone.
|
||||
for name in filenames.iter().take(3) {
|
||||
assert!(!remaining.contains(name));
|
||||
}
|
||||
// Newest 5 should remain.
|
||||
for name in filenames.iter().skip(3) {
|
||||
assert!(remaining.contains(name));
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_all_event_variants_serialize() {
|
||||
let events = vec![
|
||||
CrashEvent::KeyPress {
|
||||
key: "q".into(),
|
||||
mode: "Normal".into(),
|
||||
screen: "Dashboard".into(),
|
||||
},
|
||||
CrashEvent::MsgDispatched {
|
||||
msg_name: "Quit".into(),
|
||||
screen: "Dashboard".into(),
|
||||
},
|
||||
CrashEvent::StateTransition {
|
||||
from: "Dashboard".into(),
|
||||
to: "IssueList".into(),
|
||||
},
|
||||
CrashEvent::Error {
|
||||
message: "oops".into(),
|
||||
},
|
||||
CrashEvent::Custom {
|
||||
tag: "debug".into(),
|
||||
detail: "trace".into(),
|
||||
},
|
||||
];
|
||||
|
||||
for event in events {
|
||||
let json = serde_json::to_string(&event).unwrap();
|
||||
let parsed: serde_json::Value = serde_json::from_str(&json).unwrap();
|
||||
assert!(parsed.get("type").is_some());
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_is_new() {
|
||||
let ctx = CrashContext::default();
|
||||
assert!(ctx.is_empty());
|
||||
}
|
||||
|
||||
// -----------------------------------------------------------------------
|
||||
// Test helper: prune files in a specific directory (not the real path).
|
||||
// -----------------------------------------------------------------------
|
||||
|
||||
fn prune_crash_files_in(dir: &Path) {
|
||||
let Ok(entries) = std::fs::read_dir(dir) else {
|
||||
return;
|
||||
};
|
||||
|
||||
let mut crash_files: Vec<PathBuf> = entries
|
||||
.filter_map(Result::ok)
|
||||
.map(|e| e.path())
|
||||
.filter(|p| {
|
||||
p.file_name()
|
||||
.and_then(|n| n.to_str())
|
||||
.is_some_and(|n| n.starts_with("crash-") && n.ends_with(".json"))
|
||||
})
|
||||
.collect();
|
||||
|
||||
crash_files.sort();
|
||||
|
||||
if crash_files.len() > MAX_CRASH_FILES {
|
||||
let to_remove = crash_files.len() - MAX_CRASH_FILES;
|
||||
for path in &crash_files[..to_remove] {
|
||||
let _ = std::fs::remove_file(path);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
270
crates/lore-tui/src/db.rs
Normal file
270
crates/lore-tui/src/db.rs
Normal file
@@ -0,0 +1,270 @@
|
||||
#![allow(dead_code)] // Phase 0: types defined now, consumed in Phase 1+
|
||||
|
||||
//! Database access layer for the TUI.
|
||||
//!
|
||||
//! Provides a read pool (3 connections, round-robin) plus a dedicated writer
|
||||
//! connection. All connections use WAL mode and busy_timeout for concurrency.
|
||||
//!
|
||||
//! The TUI operates read-heavy: parallel queries for dashboard, list views,
|
||||
//! and prefetch. Writes are rare (TUI-local state: scroll positions, bookmarks).
|
||||
|
||||
use std::path::Path;
|
||||
use std::sync::Mutex;
|
||||
use std::sync::atomic::{AtomicUsize, Ordering};
|
||||
|
||||
use anyhow::{Context, Result};
|
||||
use rusqlite::Connection;
|
||||
|
||||
/// Number of reader connections in the pool.
|
||||
const READER_COUNT: usize = 3;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// DbManager
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Manages a pool of read-only connections plus a dedicated writer.
|
||||
///
|
||||
/// Designed for `Arc<DbManager>` sharing across FrankenTUI's `Cmd::task`
|
||||
/// background threads. Each reader is individually `Mutex`-protected so
|
||||
/// concurrent tasks can query different readers without blocking.
|
||||
pub struct DbManager {
|
||||
readers: Vec<Mutex<Connection>>,
|
||||
writer: Mutex<Connection>,
|
||||
next_reader: AtomicUsize,
|
||||
}
|
||||
|
||||
impl DbManager {
|
||||
/// Open a database at `path` with 3 reader + 1 writer connections.
|
||||
///
|
||||
/// All connections get WAL mode, 5000ms busy_timeout, and foreign keys.
|
||||
/// Reader connections additionally set `query_only = ON` as a safety guard.
|
||||
pub fn open(path: &Path) -> Result<Self> {
|
||||
let mut readers = Vec::with_capacity(READER_COUNT);
|
||||
for i in 0..READER_COUNT {
|
||||
let conn =
|
||||
open_connection(path).with_context(|| format!("opening reader connection {i}"))?;
|
||||
conn.pragma_update(None, "query_only", "ON")
|
||||
.context("setting query_only on reader")?;
|
||||
readers.push(Mutex::new(conn));
|
||||
}
|
||||
|
||||
let writer = open_connection(path).context("opening writer connection")?;
|
||||
|
||||
Ok(Self {
|
||||
readers,
|
||||
writer: Mutex::new(writer),
|
||||
next_reader: AtomicUsize::new(0),
|
||||
})
|
||||
}
|
||||
|
||||
/// Execute a read-only query against the pool.
|
||||
///
|
||||
/// Selects the next reader via round-robin. The connection is borrowed
|
||||
/// for the duration of `f` and cannot leak outside.
|
||||
pub fn with_reader<F, T>(&self, f: F) -> Result<T>
|
||||
where
|
||||
F: FnOnce(&Connection) -> Result<T>,
|
||||
{
|
||||
let idx = self.next_reader.fetch_add(1, Ordering::Relaxed) % READER_COUNT;
|
||||
let conn = self.readers[idx].lock().expect("reader mutex poisoned");
|
||||
f(&conn)
|
||||
}
|
||||
|
||||
/// Execute a write operation against the dedicated writer.
|
||||
///
|
||||
/// Serialized via a single `Mutex`. The TUI writes infrequently
|
||||
/// (bookmarks, scroll state) so contention is negligible.
|
||||
pub fn with_writer<F, T>(&self, f: F) -> Result<T>
|
||||
where
|
||||
F: FnOnce(&Connection) -> Result<T>,
|
||||
{
|
||||
let conn = self.writer.lock().expect("writer mutex poisoned");
|
||||
f(&conn)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Connection setup
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Open a single SQLite connection with TUI-appropriate pragmas.
|
||||
///
|
||||
/// Mirrors lore's `create_connection` pragmas (WAL, busy_timeout, etc.)
|
||||
/// but skips the sqlite-vec extension registration — the TUI reads standard
|
||||
/// tables only, never vec0 virtual tables.
|
||||
fn open_connection(path: &Path) -> Result<Connection> {
|
||||
let conn = Connection::open(path).context("opening SQLite database")?;
|
||||
|
||||
conn.pragma_update(None, "journal_mode", "WAL")?;
|
||||
conn.pragma_update(None, "synchronous", "NORMAL")?;
|
||||
conn.pragma_update(None, "foreign_keys", "ON")?;
|
||||
conn.pragma_update(None, "busy_timeout", 5000)?;
|
||||
conn.pragma_update(None, "temp_store", "MEMORY")?;
|
||||
|
||||
Ok(conn)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use std::sync::Arc;
|
||||
|
||||
/// Create a temporary database file for testing.
|
||||
///
|
||||
/// Uses an atomic counter + thread ID to guarantee unique paths even
|
||||
/// when tests run in parallel.
|
||||
fn test_db_path() -> std::path::PathBuf {
|
||||
use std::sync::atomic::AtomicU64;
|
||||
static COUNTER: AtomicU64 = AtomicU64::new(0);
|
||||
let n = COUNTER.fetch_add(1, Ordering::Relaxed);
|
||||
let dir = std::env::temp_dir().join("lore-tui-tests");
|
||||
std::fs::create_dir_all(&dir).expect("create test dir");
|
||||
dir.join(format!(
|
||||
"test-{}-{:?}-{n}.db",
|
||||
std::process::id(),
|
||||
std::thread::current().id(),
|
||||
))
|
||||
}
|
||||
|
||||
fn create_test_table(conn: &Connection) {
|
||||
conn.execute_batch(
|
||||
"CREATE TABLE IF NOT EXISTS test_items (id INTEGER PRIMARY KEY, name TEXT);",
|
||||
)
|
||||
.expect("create test table");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dbmanager_opens_successfully() {
|
||||
let path = test_db_path();
|
||||
let db = DbManager::open(&path).expect("open");
|
||||
// Writer creates the test table
|
||||
db.with_writer(|conn| {
|
||||
create_test_table(conn);
|
||||
Ok(())
|
||||
})
|
||||
.expect("create table via writer");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reader_is_query_only() {
|
||||
let path = test_db_path();
|
||||
let db = DbManager::open(&path).expect("open");
|
||||
|
||||
// Create table via writer first
|
||||
db.with_writer(|conn| {
|
||||
create_test_table(conn);
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
// Attempt INSERT via reader — should fail
|
||||
let result = db.with_reader(|conn| {
|
||||
conn.execute("INSERT INTO test_items (name) VALUES ('boom')", [])
|
||||
.map_err(|e| anyhow::anyhow!(e))?;
|
||||
Ok(())
|
||||
});
|
||||
assert!(result.is_err(), "reader should reject writes");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_writer_allows_mutations() {
|
||||
let path = test_db_path();
|
||||
let db = DbManager::open(&path).expect("open");
|
||||
|
||||
db.with_writer(|conn| {
|
||||
create_test_table(conn);
|
||||
conn.execute("INSERT INTO test_items (name) VALUES ('hello')", [])?;
|
||||
let count: i64 = conn.query_row("SELECT COUNT(*) FROM test_items", [], |r| r.get(0))?;
|
||||
assert_eq!(count, 1);
|
||||
Ok(())
|
||||
})
|
||||
.expect("writer should allow mutations");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_round_robin_rotates_readers() {
|
||||
let path = test_db_path();
|
||||
let db = DbManager::open(&path).expect("open");
|
||||
|
||||
// Call with_reader 6 times — should cycle through readers 0,1,2,0,1,2
|
||||
for expected_cycle in 0..2 {
|
||||
for expected_idx in 0..READER_COUNT {
|
||||
let current = db.next_reader.load(Ordering::Relaxed);
|
||||
assert_eq!(
|
||||
current % READER_COUNT,
|
||||
(expected_cycle * READER_COUNT + expected_idx) % READER_COUNT,
|
||||
);
|
||||
db.with_reader(|_conn| Ok(())).unwrap();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reader_can_read_writer_data() {
|
||||
let path = test_db_path();
|
||||
let db = DbManager::open(&path).expect("open");
|
||||
|
||||
db.with_writer(|conn| {
|
||||
create_test_table(conn);
|
||||
conn.execute("INSERT INTO test_items (name) VALUES ('visible')", [])?;
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let name: String = db
|
||||
.with_reader(|conn| {
|
||||
let n: String =
|
||||
conn.query_row("SELECT name FROM test_items WHERE id = 1", [], |r| r.get(0))?;
|
||||
Ok(n)
|
||||
})
|
||||
.expect("reader should see writer's data");
|
||||
|
||||
assert_eq!(name, "visible");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dbmanager_is_send_sync() {
|
||||
fn assert_send_sync<T: Send + Sync>() {}
|
||||
assert_send_sync::<DbManager>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_concurrent_reads() {
|
||||
let path = test_db_path();
|
||||
let db = Arc::new(DbManager::open(&path).expect("open"));
|
||||
|
||||
db.with_writer(|conn| {
|
||||
create_test_table(conn);
|
||||
for i in 0..10 {
|
||||
conn.execute(
|
||||
"INSERT INTO test_items (name) VALUES (?1)",
|
||||
[format!("item-{i}")],
|
||||
)?;
|
||||
}
|
||||
Ok(())
|
||||
})
|
||||
.unwrap();
|
||||
|
||||
let mut handles = Vec::new();
|
||||
for _ in 0..6 {
|
||||
let db = Arc::clone(&db);
|
||||
handles.push(std::thread::spawn(move || {
|
||||
db.with_reader(|conn| {
|
||||
let count: i64 =
|
||||
conn.query_row("SELECT COUNT(*) FROM test_items", [], |r| r.get(0))?;
|
||||
assert_eq!(count, 10);
|
||||
Ok(())
|
||||
})
|
||||
.expect("concurrent read should succeed");
|
||||
}));
|
||||
}
|
||||
|
||||
for h in handles {
|
||||
h.join().expect("thread should not panic");
|
||||
}
|
||||
}
|
||||
}
|
||||
316
crates/lore-tui/src/filter_dsl.rs
Normal file
316
crates/lore-tui/src/filter_dsl.rs
Normal file
@@ -0,0 +1,316 @@
|
||||
#![allow(dead_code)] // Phase 2: consumed by filter_bar widget
|
||||
|
||||
//! Filter DSL parser for entity list screens.
|
||||
//!
|
||||
//! Parses a compact filter string into structured tokens:
|
||||
//! - `field:value` — typed field filter (e.g., `state:opened`, `author:taylor`)
|
||||
//! - `-field:value` — negation filter (exclude matches)
|
||||
//! - `"quoted value"` — preserved as a single free-text token
|
||||
//! - bare words — free-text search terms
|
||||
//!
|
||||
//! The DSL is intentionally simple: no boolean operators, no nesting.
|
||||
//! Filters are AND-combined at the query layer.
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Token types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A single parsed filter token.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum FilterToken {
|
||||
/// `field:value` — match entities where `field` equals `value`.
|
||||
FieldValue { field: String, value: String },
|
||||
/// `-field:value` — exclude entities where `field` equals `value`.
|
||||
Negation { field: String, value: String },
|
||||
/// Bare word(s) used as free-text search.
|
||||
FreeText(String),
|
||||
/// `"quoted value"` — preserved as a single search term.
|
||||
QuotedValue(String),
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Known fields per entity type
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Known filter fields for issues.
|
||||
pub const ISSUE_FIELDS: &[&str] = &[
|
||||
"state",
|
||||
"author",
|
||||
"assignee",
|
||||
"label",
|
||||
"milestone",
|
||||
"status",
|
||||
];
|
||||
|
||||
/// Known filter fields for merge requests.
|
||||
pub const MR_FIELDS: &[&str] = &[
|
||||
"state",
|
||||
"author",
|
||||
"reviewer",
|
||||
"target_branch",
|
||||
"source_branch",
|
||||
"label",
|
||||
"draft",
|
||||
];
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Parser
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Parse a filter input string into a sequence of tokens.
|
||||
///
|
||||
/// Empty input returns an empty vec (no-op filter = show all).
|
||||
pub fn parse_filter_tokens(input: &str) -> Vec<FilterToken> {
|
||||
let input = input.trim();
|
||||
if input.is_empty() {
|
||||
return Vec::new();
|
||||
}
|
||||
|
||||
let mut tokens = Vec::new();
|
||||
let mut chars = input.chars().peekable();
|
||||
|
||||
while chars.peek().is_some() {
|
||||
// Skip whitespace between tokens.
|
||||
while chars.peek().is_some_and(|c| c.is_whitespace()) {
|
||||
chars.next();
|
||||
}
|
||||
|
||||
match chars.peek() {
|
||||
None => break,
|
||||
Some('"') => {
|
||||
// Quoted value — consume until closing quote or end.
|
||||
chars.next(); // consume opening "
|
||||
let value: String = consume_until(&mut chars, '"');
|
||||
if chars.peek() == Some(&'"') {
|
||||
chars.next(); // consume closing "
|
||||
}
|
||||
if !value.is_empty() {
|
||||
tokens.push(FilterToken::QuotedValue(value));
|
||||
}
|
||||
}
|
||||
Some('-') => {
|
||||
// Could be negation prefix or just a free-text word starting with -.
|
||||
chars.next(); // consume -
|
||||
let word = consume_word(&mut chars);
|
||||
if let Some((field, value)) = word.split_once(':') {
|
||||
tokens.push(FilterToken::Negation {
|
||||
field: field.to_string(),
|
||||
value: value.to_string(),
|
||||
});
|
||||
} else if !word.is_empty() {
|
||||
// Bare negation without field:value — treat as free text with -.
|
||||
tokens.push(FilterToken::FreeText(format!("-{word}")));
|
||||
}
|
||||
}
|
||||
Some(_) => {
|
||||
let word = consume_word(&mut chars);
|
||||
if let Some((field, value)) = word.split_once(':') {
|
||||
tokens.push(FilterToken::FieldValue {
|
||||
field: field.to_string(),
|
||||
value: value.to_string(),
|
||||
});
|
||||
} else if !word.is_empty() {
|
||||
tokens.push(FilterToken::FreeText(word));
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
tokens
|
||||
}
|
||||
|
||||
/// Validate that a field name is known for the given entity type.
|
||||
///
|
||||
/// Returns `true` if the field is in the known set, `false` otherwise.
|
||||
pub fn is_known_field(field: &str, known_fields: &[&str]) -> bool {
|
||||
known_fields.contains(&field)
|
||||
}
|
||||
|
||||
/// Extract all unknown fields from a token list.
|
||||
pub fn unknown_fields<'a>(tokens: &'a [FilterToken], known_fields: &[&str]) -> Vec<&'a str> {
|
||||
tokens
|
||||
.iter()
|
||||
.filter_map(|t| match t {
|
||||
FilterToken::FieldValue { field, .. } | FilterToken::Negation { field, .. } => {
|
||||
if is_known_field(field, known_fields) {
|
||||
None
|
||||
} else {
|
||||
Some(field.as_str())
|
||||
}
|
||||
}
|
||||
_ => None,
|
||||
})
|
||||
.collect()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Consume characters until `delim` is found (exclusive) or end of input.
|
||||
fn consume_until(chars: &mut std::iter::Peekable<std::str::Chars<'_>>, delim: char) -> String {
|
||||
let mut buf = String::new();
|
||||
while let Some(&c) = chars.peek() {
|
||||
if c == delim {
|
||||
break;
|
||||
}
|
||||
buf.push(c);
|
||||
chars.next();
|
||||
}
|
||||
buf
|
||||
}
|
||||
|
||||
/// Consume a non-whitespace word.
|
||||
fn consume_word(chars: &mut std::iter::Peekable<std::str::Chars<'_>>) -> String {
|
||||
let mut buf = String::new();
|
||||
while let Some(&c) = chars.peek() {
|
||||
if c.is_whitespace() {
|
||||
break;
|
||||
}
|
||||
// Stop at quote boundaries so they're handled separately.
|
||||
if c == '"' && !buf.is_empty() {
|
||||
break;
|
||||
}
|
||||
buf.push(c);
|
||||
chars.next();
|
||||
}
|
||||
buf
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// -- TDD Anchor: basic field:value parsing --
|
||||
|
||||
#[test]
|
||||
fn test_parse_filter_basic() {
|
||||
let tokens = parse_filter_tokens("state:opened author:taylor");
|
||||
assert_eq!(tokens.len(), 2);
|
||||
assert_eq!(
|
||||
tokens[0],
|
||||
FilterToken::FieldValue {
|
||||
field: "state".into(),
|
||||
value: "opened".into()
|
||||
}
|
||||
);
|
||||
assert_eq!(
|
||||
tokens[1],
|
||||
FilterToken::FieldValue {
|
||||
field: "author".into(),
|
||||
value: "taylor".into()
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_quoted_value() {
|
||||
let tokens = parse_filter_tokens("\"in progress\"");
|
||||
assert_eq!(tokens.len(), 1);
|
||||
assert_eq!(tokens[0], FilterToken::QuotedValue("in progress".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_negation() {
|
||||
let tokens = parse_filter_tokens("-state:closed");
|
||||
assert_eq!(tokens.len(), 1);
|
||||
assert_eq!(
|
||||
tokens[0],
|
||||
FilterToken::Negation {
|
||||
field: "state".into(),
|
||||
value: "closed".into()
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_mixed() {
|
||||
let tokens = parse_filter_tokens("state:opened \"bug fix\" -label:wontfix");
|
||||
assert_eq!(tokens.len(), 3);
|
||||
assert_eq!(
|
||||
tokens[0],
|
||||
FilterToken::FieldValue {
|
||||
field: "state".into(),
|
||||
value: "opened".into()
|
||||
}
|
||||
);
|
||||
assert_eq!(tokens[1], FilterToken::QuotedValue("bug fix".into()));
|
||||
assert_eq!(
|
||||
tokens[2],
|
||||
FilterToken::Negation {
|
||||
field: "label".into(),
|
||||
value: "wontfix".into()
|
||||
}
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_empty_returns_empty() {
|
||||
assert!(parse_filter_tokens("").is_empty());
|
||||
assert!(parse_filter_tokens(" ").is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_free_text() {
|
||||
let tokens = parse_filter_tokens("authentication bug");
|
||||
assert_eq!(tokens.len(), 2);
|
||||
assert_eq!(tokens[0], FilterToken::FreeText("authentication".into()));
|
||||
assert_eq!(tokens[1], FilterToken::FreeText("bug".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_bare_negation_as_free_text() {
|
||||
let tokens = parse_filter_tokens("-wontfix");
|
||||
assert_eq!(tokens.len(), 1);
|
||||
assert_eq!(tokens[0], FilterToken::FreeText("-wontfix".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_unicode() {
|
||||
let tokens = parse_filter_tokens("author:田中 \"認証バグ\"");
|
||||
assert_eq!(tokens.len(), 2);
|
||||
assert_eq!(
|
||||
tokens[0],
|
||||
FilterToken::FieldValue {
|
||||
field: "author".into(),
|
||||
value: "田中".into()
|
||||
}
|
||||
);
|
||||
assert_eq!(tokens[1], FilterToken::QuotedValue("認証バグ".into()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_unclosed_quote() {
|
||||
let tokens = parse_filter_tokens("\"open ended");
|
||||
assert_eq!(tokens.len(), 1);
|
||||
assert_eq!(tokens[0], FilterToken::QuotedValue("open ended".into()));
|
||||
}
|
||||
|
||||
// -- Field validation --
|
||||
|
||||
#[test]
|
||||
fn test_known_field_issues() {
|
||||
assert!(is_known_field("state", ISSUE_FIELDS));
|
||||
assert!(is_known_field("author", ISSUE_FIELDS));
|
||||
assert!(!is_known_field("reviewer", ISSUE_FIELDS));
|
||||
assert!(!is_known_field("bogus", ISSUE_FIELDS));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_known_field_mrs() {
|
||||
assert!(is_known_field("draft", MR_FIELDS));
|
||||
assert!(is_known_field("reviewer", MR_FIELDS));
|
||||
assert!(!is_known_field("assignee", MR_FIELDS));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_unknown_fields_detection() {
|
||||
let tokens = parse_filter_tokens("state:opened bogus:val author:taylor unknown:x");
|
||||
let unknown = unknown_fields(&tokens, ISSUE_FIELDS);
|
||||
assert_eq!(unknown, vec!["bogus", "unknown"]);
|
||||
}
|
||||
}
|
||||
102
crates/lore-tui/src/layout.rs
Normal file
102
crates/lore-tui/src/layout.rs
Normal file
@@ -0,0 +1,102 @@
|
||||
#![allow(clippy::module_name_repetitions)]
|
||||
|
||||
//! Responsive layout helpers for the Lore TUI.
|
||||
//!
|
||||
//! Wraps [`ftui::layout::Breakpoint`] and [`ftui::layout::Breakpoints`] with
|
||||
//! Lore-specific configuration: breakpoint thresholds, column counts per
|
||||
//! breakpoint, and preview-pane visibility rules.
|
||||
|
||||
use ftui::layout::{Breakpoint, Breakpoints};
|
||||
|
||||
/// Lore-specific breakpoint thresholds.
|
||||
///
|
||||
/// Uses the ftui defaults: Sm=60, Md=90, Lg=120, Xl=160 columns.
|
||||
pub const LORE_BREAKPOINTS: Breakpoints = Breakpoints::DEFAULT;
|
||||
|
||||
/// Classify a terminal width into a [`Breakpoint`].
|
||||
#[inline]
|
||||
pub fn classify_width(width: u16) -> Breakpoint {
|
||||
LORE_BREAKPOINTS.classify_width(width)
|
||||
}
|
||||
|
||||
/// Number of dashboard columns for a given breakpoint.
|
||||
///
|
||||
/// - `Xs` / `Sm`: 1 column (narrow terminals)
|
||||
/// - `Md`: 2 columns (standard width)
|
||||
/// - `Lg` / `Xl`: 3 columns (wide terminals)
|
||||
#[inline]
|
||||
pub const fn dashboard_columns(bp: Breakpoint) -> u16 {
|
||||
match bp {
|
||||
Breakpoint::Xs | Breakpoint::Sm => 1,
|
||||
Breakpoint::Md => 2,
|
||||
Breakpoint::Lg | Breakpoint::Xl => 3,
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether the preview pane should be visible at a given breakpoint.
|
||||
///
|
||||
/// Preview requires at least `Md` width to avoid cramping the main list.
|
||||
#[inline]
|
||||
pub const fn show_preview_pane(bp: Breakpoint) -> bool {
|
||||
match bp {
|
||||
Breakpoint::Md | Breakpoint::Lg | Breakpoint::Xl => true,
|
||||
Breakpoint::Xs | Breakpoint::Sm => false,
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_classify_width_boundaries() {
|
||||
// Xs: 0..59
|
||||
assert_eq!(classify_width(59), Breakpoint::Xs);
|
||||
// Sm: 60..89
|
||||
assert_eq!(classify_width(60), Breakpoint::Sm);
|
||||
assert_eq!(classify_width(89), Breakpoint::Sm);
|
||||
// Md: 90..119
|
||||
assert_eq!(classify_width(90), Breakpoint::Md);
|
||||
assert_eq!(classify_width(119), Breakpoint::Md);
|
||||
// Lg: 120..159
|
||||
assert_eq!(classify_width(120), Breakpoint::Lg);
|
||||
assert_eq!(classify_width(159), Breakpoint::Lg);
|
||||
// Xl: 160+
|
||||
assert_eq!(classify_width(160), Breakpoint::Xl);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dashboard_columns_per_breakpoint() {
|
||||
assert_eq!(dashboard_columns(Breakpoint::Xs), 1);
|
||||
assert_eq!(dashboard_columns(Breakpoint::Sm), 1);
|
||||
assert_eq!(dashboard_columns(Breakpoint::Md), 2);
|
||||
assert_eq!(dashboard_columns(Breakpoint::Lg), 3);
|
||||
assert_eq!(dashboard_columns(Breakpoint::Xl), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_show_preview_pane_per_breakpoint() {
|
||||
assert!(!show_preview_pane(Breakpoint::Xs));
|
||||
assert!(!show_preview_pane(Breakpoint::Sm));
|
||||
assert!(show_preview_pane(Breakpoint::Md));
|
||||
assert!(show_preview_pane(Breakpoint::Lg));
|
||||
assert!(show_preview_pane(Breakpoint::Xl));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_edge_cases() {
|
||||
// Width 0 must not panic, should classify as Xs
|
||||
assert_eq!(classify_width(0), Breakpoint::Xs);
|
||||
// Very wide terminal
|
||||
assert_eq!(classify_width(300), Breakpoint::Xl);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_lore_breakpoints_matches_defaults() {
|
||||
assert_eq!(LORE_BREAKPOINTS, Breakpoints::DEFAULT);
|
||||
}
|
||||
}
|
||||
71
crates/lore-tui/src/lib.rs
Normal file
71
crates/lore-tui/src/lib.rs
Normal file
@@ -0,0 +1,71 @@
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
//! Gitlore TUI — terminal interface for exploring GitLab data locally.
|
||||
//!
|
||||
//! Built on FrankenTUI (Elm architecture): Model, update, view.
|
||||
//! The `lore` CLI spawns `lore-tui` via PATH lookup at runtime.
|
||||
|
||||
use anyhow::Result;
|
||||
|
||||
// Phase 0 modules.
|
||||
pub mod clock; // Clock trait: SystemClock + FakeClock (bd-2lg6)
|
||||
pub mod message; // Msg, Screen, EntityKey, AppError, InputMode (bd-c9gk)
|
||||
|
||||
pub mod safety; // Terminal safety: sanitize + URL policy + redact (bd-3ir1)
|
||||
|
||||
pub mod db; // DbManager: read pool + dedicated writer (bd-2kop)
|
||||
pub mod theme; // Flexoki theme: build_theme, state_color, label_style (bd-5ofk)
|
||||
|
||||
pub mod app; // LoreApp Model trait impl (Phase 0 proof: bd-2emv, full: bd-6pmy)
|
||||
|
||||
// Phase 1 modules.
|
||||
pub mod commands; // CommandRegistry: keybindings, help, palette (bd-38lb)
|
||||
pub mod crash_context; // CrashContext ring buffer + panic hook (bd-2fr7)
|
||||
pub mod layout; // Responsive layout: breakpoints, columns, preview pane (bd-1pzj)
|
||||
pub mod navigation; // NavigationStack: back/forward/jump list (bd-1qpp)
|
||||
pub mod state; // AppState, LoadState, ScreenIntent, per-screen states (bd-1v9m)
|
||||
pub mod task_supervisor; // TaskSupervisor: dedup + cancel + generation IDs (bd-3le2)
|
||||
pub mod view; // View layer: render_screen + common widgets (bd-26f2)
|
||||
|
||||
// Phase 2 modules.
|
||||
pub mod action; // Data-fetching actions for TUI screens (bd-35g5+)
|
||||
pub mod filter_dsl; // Filter DSL tokenizer for list screen filter bars (bd-18qs)
|
||||
|
||||
/// Options controlling how the TUI launches.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct LaunchOptions {
|
||||
/// Path to lore config file.
|
||||
pub config_path: Option<String>,
|
||||
/// Run a background sync before displaying data.
|
||||
pub sync_on_start: bool,
|
||||
/// Clear cached TUI state and start fresh.
|
||||
pub fresh: bool,
|
||||
/// Render backend: "crossterm" or "native".
|
||||
pub render_mode: String,
|
||||
/// Use ASCII-only box drawing characters.
|
||||
pub ascii: bool,
|
||||
/// Disable alternate screen (render inline).
|
||||
pub no_alt_screen: bool,
|
||||
}
|
||||
|
||||
/// Launch the TUI in browse mode (no sync).
|
||||
///
|
||||
/// Loads config from `options.config_path` (or default location),
|
||||
/// opens the database read-only, and enters the FrankenTUI event loop.
|
||||
pub fn launch_tui(options: LaunchOptions) -> Result<()> {
|
||||
let _options = options;
|
||||
// Phase 1 will wire this to LoreApp + App::fullscreen().run()
|
||||
eprintln!("lore-tui: browse mode not yet implemented (Phase 1)");
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Launch the TUI with an initial sync pass.
|
||||
///
|
||||
/// Runs `lore sync` in the background while displaying a progress screen,
|
||||
/// then transitions to browse mode once sync completes.
|
||||
pub fn launch_sync_tui(options: LaunchOptions) -> Result<()> {
|
||||
let _options = options;
|
||||
// Phase 2 will implement the sync progress screen
|
||||
eprintln!("lore-tui: sync mode not yet implemented (Phase 2)");
|
||||
Ok(())
|
||||
}
|
||||
53
crates/lore-tui/src/main.rs
Normal file
53
crates/lore-tui/src/main.rs
Normal file
@@ -0,0 +1,53 @@
|
||||
#![forbid(unsafe_code)]
|
||||
|
||||
use anyhow::Result;
|
||||
use clap::Parser;
|
||||
use lore_tui::LaunchOptions;
|
||||
|
||||
/// Terminal UI for Gitlore — explore GitLab issues, MRs, and search locally.
|
||||
#[derive(Parser, Debug)]
|
||||
#[command(name = "lore-tui", version, about)]
|
||||
struct TuiCli {
|
||||
/// Path to lore config file (default: ~/.config/lore/config.json).
|
||||
#[arg(short, long, env = "LORE_CONFIG_PATH")]
|
||||
config: Option<String>,
|
||||
|
||||
/// Run a sync before launching the TUI.
|
||||
#[arg(long)]
|
||||
sync: bool,
|
||||
|
||||
/// Clear cached state and start fresh.
|
||||
#[arg(long)]
|
||||
fresh: bool,
|
||||
|
||||
/// Render mode: "crossterm" (default) or "native".
|
||||
#[arg(long, default_value = "crossterm")]
|
||||
render_mode: String,
|
||||
|
||||
/// Use ASCII-only drawing characters (no Unicode box drawing).
|
||||
#[arg(long)]
|
||||
ascii: bool,
|
||||
|
||||
/// Disable alternate screen (render inline).
|
||||
#[arg(long)]
|
||||
no_alt_screen: bool,
|
||||
}
|
||||
|
||||
fn main() -> Result<()> {
|
||||
let cli = TuiCli::parse();
|
||||
|
||||
let options = LaunchOptions {
|
||||
config_path: cli.config,
|
||||
sync_on_start: cli.sync,
|
||||
fresh: cli.fresh,
|
||||
render_mode: cli.render_mode,
|
||||
ascii: cli.ascii,
|
||||
no_alt_screen: cli.no_alt_screen,
|
||||
};
|
||||
|
||||
if options.sync_on_start {
|
||||
lore_tui::launch_sync_tui(options)
|
||||
} else {
|
||||
lore_tui::launch_tui(options)
|
||||
}
|
||||
}
|
||||
503
crates/lore-tui/src/message.rs
Normal file
503
crates/lore-tui/src/message.rs
Normal file
@@ -0,0 +1,503 @@
|
||||
#![allow(dead_code)] // Phase 0: types defined now, consumed in Phase 1+
|
||||
|
||||
//! Core types for the lore-tui Elm architecture.
|
||||
//!
|
||||
//! - [`Msg`] — every user action and async result flows through this enum.
|
||||
//! - [`Screen`] — navigation targets.
|
||||
//! - [`EntityKey`] — safe cross-project entity identity.
|
||||
//! - [`AppError`] — structured error display in the TUI.
|
||||
//! - [`InputMode`] — controls key dispatch routing.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use chrono::{DateTime, Utc};
|
||||
use ftui::Event;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// EntityKind
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Distinguishes issue vs merge request in an [`EntityKey`].
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)]
|
||||
pub enum EntityKind {
|
||||
Issue,
|
||||
MergeRequest,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// EntityKey
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Uniquely identifies an entity (issue or MR) across projects.
|
||||
///
|
||||
/// Bare `iid` is unsafe in multi-project datasets — equality requires
|
||||
/// project_id + iid + kind.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub struct EntityKey {
|
||||
pub project_id: i64,
|
||||
pub iid: i64,
|
||||
pub kind: EntityKind,
|
||||
}
|
||||
|
||||
impl EntityKey {
|
||||
#[must_use]
|
||||
pub fn issue(project_id: i64, iid: i64) -> Self {
|
||||
Self {
|
||||
project_id,
|
||||
iid,
|
||||
kind: EntityKind::Issue,
|
||||
}
|
||||
}
|
||||
|
||||
#[must_use]
|
||||
pub fn mr(project_id: i64, iid: i64) -> Self {
|
||||
Self {
|
||||
project_id,
|
||||
iid,
|
||||
kind: EntityKind::MergeRequest,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for EntityKey {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
let prefix = match self.kind {
|
||||
EntityKind::Issue => "#",
|
||||
EntityKind::MergeRequest => "!",
|
||||
};
|
||||
write!(f, "p{}:{}{}", self.project_id, prefix, self.iid)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Screen
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Navigation targets within the TUI.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum Screen {
|
||||
Dashboard,
|
||||
IssueList,
|
||||
IssueDetail(EntityKey),
|
||||
MrList,
|
||||
MrDetail(EntityKey),
|
||||
Search,
|
||||
Timeline,
|
||||
Who,
|
||||
Sync,
|
||||
Stats,
|
||||
Doctor,
|
||||
Bootstrap,
|
||||
}
|
||||
|
||||
impl Screen {
|
||||
/// Human-readable label for breadcrumbs and status bar.
|
||||
#[must_use]
|
||||
pub fn label(&self) -> &str {
|
||||
match self {
|
||||
Self::Dashboard => "Dashboard",
|
||||
Self::IssueList => "Issues",
|
||||
Self::IssueDetail(_) => "Issue",
|
||||
Self::MrList => "Merge Requests",
|
||||
Self::MrDetail(_) => "Merge Request",
|
||||
Self::Search => "Search",
|
||||
Self::Timeline => "Timeline",
|
||||
Self::Who => "Who",
|
||||
Self::Sync => "Sync",
|
||||
Self::Stats => "Stats",
|
||||
Self::Doctor => "Doctor",
|
||||
Self::Bootstrap => "Bootstrap",
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether this screen shows a specific entity detail view.
|
||||
#[must_use]
|
||||
pub fn is_detail_or_entity(&self) -> bool {
|
||||
matches!(self, Self::IssueDetail(_) | Self::MrDetail(_))
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// AppError
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Structured error types for user-facing display in the TUI.
|
||||
#[derive(Debug, Clone)]
|
||||
pub enum AppError {
|
||||
/// Database is busy (WAL contention).
|
||||
DbBusy,
|
||||
/// Database corruption detected.
|
||||
DbCorruption(String),
|
||||
/// GitLab rate-limited; retry after N seconds (if header present).
|
||||
NetworkRateLimited { retry_after_secs: Option<u64> },
|
||||
/// Network unavailable.
|
||||
NetworkUnavailable,
|
||||
/// GitLab authentication failed.
|
||||
AuthFailed,
|
||||
/// Data parsing error.
|
||||
ParseError(String),
|
||||
/// Internal / unexpected error.
|
||||
Internal(String),
|
||||
}
|
||||
|
||||
impl fmt::Display for AppError {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::DbBusy => write!(f, "Database is busy — another process holds the lock"),
|
||||
Self::DbCorruption(detail) => write!(f, "Database corruption: {detail}"),
|
||||
Self::NetworkRateLimited {
|
||||
retry_after_secs: Some(secs),
|
||||
} => write!(f, "Rate limited by GitLab — retry in {secs}s"),
|
||||
Self::NetworkRateLimited {
|
||||
retry_after_secs: None,
|
||||
} => write!(f, "Rate limited by GitLab — try again shortly"),
|
||||
Self::NetworkUnavailable => write!(f, "Network unavailable — working offline"),
|
||||
Self::AuthFailed => write!(f, "GitLab authentication failed — check your token"),
|
||||
Self::ParseError(detail) => write!(f, "Parse error: {detail}"),
|
||||
Self::Internal(detail) => write!(f, "Internal error: {detail}"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// InputMode
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Controls how keystrokes are routed through the key dispatch pipeline.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub enum InputMode {
|
||||
/// Standard navigation mode — keys dispatch to screen-specific handlers.
|
||||
#[default]
|
||||
Normal,
|
||||
/// Text input focused (filter bar, search box).
|
||||
Text,
|
||||
/// Command palette is open.
|
||||
Palette,
|
||||
/// "g" prefix pressed — waiting for second key (500ms timeout).
|
||||
GoPrefix { started_at: DateTime<Utc> },
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Msg
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Every user action and async result flows through this enum.
|
||||
///
|
||||
/// Generation fields (`generation: u64`) on async result variants enable
|
||||
/// stale-response detection: if the generation doesn't match the current
|
||||
/// request generation, the result is silently dropped.
|
||||
#[derive(Debug)]
|
||||
pub enum Msg {
|
||||
// --- Terminal events ---
|
||||
/// Raw terminal event (key, mouse, paste, focus, clipboard).
|
||||
RawEvent(Event),
|
||||
/// Periodic tick from runtime subscription.
|
||||
Tick,
|
||||
/// Terminal resized.
|
||||
Resize {
|
||||
width: u16,
|
||||
height: u16,
|
||||
},
|
||||
|
||||
// --- Navigation ---
|
||||
/// Navigate to a specific screen.
|
||||
NavigateTo(Screen),
|
||||
/// Go back in navigation history.
|
||||
GoBack,
|
||||
/// Go forward in navigation history.
|
||||
GoForward,
|
||||
/// Jump to the dashboard.
|
||||
GoHome,
|
||||
/// Jump back N screens in history.
|
||||
JumpBack(usize),
|
||||
/// Jump forward N screens in history.
|
||||
JumpForward(usize),
|
||||
|
||||
// --- Command palette ---
|
||||
OpenCommandPalette,
|
||||
CloseCommandPalette,
|
||||
CommandPaletteInput(String),
|
||||
CommandPaletteSelect(String),
|
||||
|
||||
// --- Issue list ---
|
||||
IssueListLoaded {
|
||||
generation: u64,
|
||||
page: crate::state::issue_list::IssueListPage,
|
||||
},
|
||||
IssueListFilterChanged(String),
|
||||
IssueListSortChanged,
|
||||
IssueSelected(EntityKey),
|
||||
|
||||
// --- MR list ---
|
||||
MrListLoaded {
|
||||
generation: u64,
|
||||
page: crate::state::mr_list::MrListPage,
|
||||
},
|
||||
MrListFilterChanged(String),
|
||||
MrSelected(EntityKey),
|
||||
|
||||
// --- Issue detail ---
|
||||
IssueDetailLoaded {
|
||||
generation: u64,
|
||||
key: EntityKey,
|
||||
data: Box<crate::state::issue_detail::IssueDetailData>,
|
||||
},
|
||||
|
||||
// --- MR detail ---
|
||||
MrDetailLoaded {
|
||||
generation: u64,
|
||||
key: EntityKey,
|
||||
data: Box<crate::state::mr_detail::MrDetailData>,
|
||||
},
|
||||
|
||||
// --- Discussions (shared by issue + MR detail) ---
|
||||
DiscussionsLoaded {
|
||||
generation: u64,
|
||||
key: EntityKey,
|
||||
discussions: Vec<crate::view::common::discussion_tree::DiscussionNode>,
|
||||
},
|
||||
|
||||
// --- Search ---
|
||||
SearchQueryChanged(String),
|
||||
SearchRequestStarted {
|
||||
generation: u64,
|
||||
query: String,
|
||||
},
|
||||
SearchExecuted {
|
||||
generation: u64,
|
||||
results: Vec<SearchResult>,
|
||||
},
|
||||
SearchResultSelected(EntityKey),
|
||||
SearchModeChanged,
|
||||
SearchCapabilitiesLoaded,
|
||||
|
||||
// --- Timeline ---
|
||||
TimelineLoaded {
|
||||
generation: u64,
|
||||
events: Vec<TimelineEvent>,
|
||||
},
|
||||
TimelineEntitySelected(EntityKey),
|
||||
|
||||
// --- Who (people) ---
|
||||
WhoResultLoaded {
|
||||
generation: u64,
|
||||
result: Box<WhoResult>,
|
||||
},
|
||||
WhoModeChanged,
|
||||
|
||||
// --- Sync ---
|
||||
SyncStarted,
|
||||
SyncProgress {
|
||||
stage: String,
|
||||
current: u64,
|
||||
total: u64,
|
||||
},
|
||||
SyncProgressBatch {
|
||||
stage: String,
|
||||
batch_size: u64,
|
||||
},
|
||||
SyncLogLine(String),
|
||||
SyncBackpressureDrop,
|
||||
SyncCompleted {
|
||||
elapsed_ms: u64,
|
||||
},
|
||||
SyncCancelled,
|
||||
SyncFailed(String),
|
||||
SyncStreamStats {
|
||||
bytes: u64,
|
||||
items: u64,
|
||||
},
|
||||
|
||||
// --- Search debounce ---
|
||||
SearchDebounceArmed {
|
||||
generation: u64,
|
||||
},
|
||||
SearchDebounceFired {
|
||||
generation: u64,
|
||||
},
|
||||
|
||||
// --- Dashboard ---
|
||||
DashboardLoaded {
|
||||
generation: u64,
|
||||
data: Box<crate::state::dashboard::DashboardData>,
|
||||
},
|
||||
|
||||
// --- Global actions ---
|
||||
Error(AppError),
|
||||
ShowHelp,
|
||||
ShowCliEquivalent,
|
||||
OpenInBrowser,
|
||||
BlurTextInput,
|
||||
ScrollToTopCurrentScreen,
|
||||
Quit,
|
||||
}
|
||||
|
||||
/// Convert terminal events into messages.
|
||||
///
|
||||
/// FrankenTUI requires `From<Event>` on the message type so the runtime
|
||||
/// can inject terminal events into the model's update loop.
|
||||
impl From<Event> for Msg {
|
||||
fn from(event: Event) -> Self {
|
||||
match event {
|
||||
Event::Resize { width, height } => Self::Resize { width, height },
|
||||
Event::Tick => Self::Tick,
|
||||
other => Self::RawEvent(other),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Placeholder data types (will be fleshed out in Phase 1+)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Placeholder for issue detail payload.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IssueDetail {
|
||||
pub key: EntityKey,
|
||||
pub title: String,
|
||||
pub description: String,
|
||||
}
|
||||
|
||||
/// Placeholder for MR detail payload.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MrDetail {
|
||||
pub key: EntityKey,
|
||||
pub title: String,
|
||||
pub description: String,
|
||||
}
|
||||
|
||||
/// Placeholder for a discussion thread.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct Discussion {
|
||||
pub id: String,
|
||||
pub notes: Vec<String>,
|
||||
}
|
||||
|
||||
/// Placeholder for a search result.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct SearchResult {
|
||||
pub key: EntityKey,
|
||||
pub title: String,
|
||||
pub score: f64,
|
||||
}
|
||||
|
||||
/// Placeholder for a timeline event.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct TimelineEvent {
|
||||
pub timestamp: String,
|
||||
pub description: String,
|
||||
}
|
||||
|
||||
/// Placeholder for who/people intelligence result.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct WhoResult {
|
||||
pub experts: Vec<String>,
|
||||
}
|
||||
|
||||
// DashboardData moved to crate::state::dashboard (enriched with
|
||||
// EntityCounts, ProjectSyncInfo, RecentActivityItem, LastSyncInfo).
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_entity_key_equality() {
|
||||
assert_eq!(EntityKey::issue(1, 42), EntityKey::issue(1, 42));
|
||||
assert_ne!(EntityKey::issue(1, 42), EntityKey::mr(1, 42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entity_key_different_projects() {
|
||||
assert_ne!(EntityKey::issue(1, 42), EntityKey::issue(2, 42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entity_key_display() {
|
||||
assert_eq!(EntityKey::issue(5, 123).to_string(), "p5:#123");
|
||||
assert_eq!(EntityKey::mr(5, 456).to_string(), "p5:!456");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_entity_key_hash_is_usable_in_collections() {
|
||||
use std::collections::HashSet;
|
||||
let mut set = HashSet::new();
|
||||
set.insert(EntityKey::issue(1, 1));
|
||||
set.insert(EntityKey::issue(1, 1)); // duplicate
|
||||
set.insert(EntityKey::mr(1, 1));
|
||||
assert_eq!(set.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_screen_labels() {
|
||||
assert_eq!(Screen::Dashboard.label(), "Dashboard");
|
||||
assert_eq!(Screen::IssueList.label(), "Issues");
|
||||
assert_eq!(Screen::MrList.label(), "Merge Requests");
|
||||
assert_eq!(Screen::Search.label(), "Search");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_screen_is_detail_or_entity() {
|
||||
assert!(Screen::IssueDetail(EntityKey::issue(1, 1)).is_detail_or_entity());
|
||||
assert!(Screen::MrDetail(EntityKey::mr(1, 1)).is_detail_or_entity());
|
||||
assert!(!Screen::Dashboard.is_detail_or_entity());
|
||||
assert!(!Screen::IssueList.is_detail_or_entity());
|
||||
assert!(!Screen::Search.is_detail_or_entity());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_app_error_display() {
|
||||
let err = AppError::DbBusy;
|
||||
assert!(err.to_string().contains("busy"));
|
||||
|
||||
let err = AppError::NetworkRateLimited {
|
||||
retry_after_secs: Some(30),
|
||||
};
|
||||
assert!(err.to_string().contains("30s"));
|
||||
|
||||
let err = AppError::NetworkRateLimited {
|
||||
retry_after_secs: None,
|
||||
};
|
||||
assert!(err.to_string().contains("shortly"));
|
||||
|
||||
let err = AppError::AuthFailed;
|
||||
assert!(err.to_string().contains("token"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_input_mode_default_is_normal() {
|
||||
assert!(matches!(InputMode::default(), InputMode::Normal));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_msg_from_event_resize() {
|
||||
let event = Event::Resize {
|
||||
width: 80,
|
||||
height: 24,
|
||||
};
|
||||
let msg = Msg::from(event);
|
||||
assert!(matches!(
|
||||
msg,
|
||||
Msg::Resize {
|
||||
width: 80,
|
||||
height: 24
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_msg_from_event_tick() {
|
||||
let msg = Msg::from(Event::Tick);
|
||||
assert!(matches!(msg, Msg::Tick));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_msg_from_event_focus_wraps_raw() {
|
||||
let msg = Msg::from(Event::Focus(true));
|
||||
assert!(matches!(msg, Msg::RawEvent(Event::Focus(true))));
|
||||
}
|
||||
}
|
||||
350
crates/lore-tui/src/navigation.rs
Normal file
350
crates/lore-tui/src/navigation.rs
Normal file
@@ -0,0 +1,350 @@
|
||||
#![allow(dead_code)] // Phase 1: consumed by LoreApp in bd-6pmy
|
||||
|
||||
//! Browser-like navigation stack with vim-style jump list.
|
||||
//!
|
||||
//! Supports back/forward (browser), jump back/forward (vim Ctrl+O/Ctrl+I),
|
||||
//! and breadcrumb generation. State is preserved when navigating away —
|
||||
//! screens are never cleared on pop.
|
||||
|
||||
use crate::message::Screen;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// NavigationStack
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Browser-like navigation with back/forward stacks and a vim jump list.
|
||||
///
|
||||
/// The jump list only records "significant" hops — detail views and
|
||||
/// cross-references — skipping list/dashboard screens that users
|
||||
/// visit briefly during drilling.
|
||||
pub struct NavigationStack {
|
||||
back_stack: Vec<Screen>,
|
||||
current: Screen,
|
||||
forward_stack: Vec<Screen>,
|
||||
jump_list: Vec<Screen>,
|
||||
jump_index: usize,
|
||||
}
|
||||
|
||||
impl NavigationStack {
|
||||
/// Create a new stack starting at the Dashboard.
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
back_stack: Vec::new(),
|
||||
current: Screen::Dashboard,
|
||||
forward_stack: Vec::new(),
|
||||
jump_list: Vec::new(),
|
||||
jump_index: 0,
|
||||
}
|
||||
}
|
||||
|
||||
/// The currently displayed screen.
|
||||
#[must_use]
|
||||
pub fn current(&self) -> &Screen {
|
||||
&self.current
|
||||
}
|
||||
|
||||
/// Whether the current screen matches the given screen.
|
||||
#[must_use]
|
||||
pub fn is_at(&self, screen: &Screen) -> bool {
|
||||
&self.current == screen
|
||||
}
|
||||
|
||||
/// Navigate to a new screen.
|
||||
///
|
||||
/// Pushes current to back_stack, clears forward_stack (browser behavior),
|
||||
/// and records detail hops in the jump list.
|
||||
pub fn push(&mut self, screen: Screen) {
|
||||
let old = std::mem::replace(&mut self.current, screen);
|
||||
self.back_stack.push(old);
|
||||
self.forward_stack.clear();
|
||||
|
||||
// Record significant hops in jump list (vim behavior):
|
||||
// Keep entries up to and including the current position, discard
|
||||
// any forward entries beyond it, then append the new destination.
|
||||
if self.current.is_detail_or_entity() {
|
||||
self.jump_list.truncate(self.jump_index.saturating_add(1));
|
||||
self.jump_list.push(self.current.clone());
|
||||
self.jump_index = self.jump_list.len();
|
||||
}
|
||||
}
|
||||
|
||||
/// Go back to the previous screen.
|
||||
///
|
||||
/// Returns `None` at root (can't pop past the initial screen).
|
||||
pub fn pop(&mut self) -> Option<&Screen> {
|
||||
let prev = self.back_stack.pop()?;
|
||||
let old = std::mem::replace(&mut self.current, prev);
|
||||
self.forward_stack.push(old);
|
||||
Some(&self.current)
|
||||
}
|
||||
|
||||
/// Go forward (redo a pop).
|
||||
///
|
||||
/// Returns `None` if there's nothing to go forward to.
|
||||
pub fn go_forward(&mut self) -> Option<&Screen> {
|
||||
let next = self.forward_stack.pop()?;
|
||||
let old = std::mem::replace(&mut self.current, next);
|
||||
self.back_stack.push(old);
|
||||
Some(&self.current)
|
||||
}
|
||||
|
||||
/// Jump backward through the jump list (vim Ctrl+O).
|
||||
///
|
||||
/// Only visits detail/entity screens. Skips entries matching the
|
||||
/// current screen so the first press always produces a visible change.
|
||||
pub fn jump_back(&mut self) -> Option<&Screen> {
|
||||
while self.jump_index > 0 {
|
||||
self.jump_index -= 1;
|
||||
if let Some(target) = self.jump_list.get(self.jump_index).cloned()
|
||||
&& target != self.current
|
||||
{
|
||||
self.current = target;
|
||||
return Some(&self.current);
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Jump forward through the jump list (vim Ctrl+I).
|
||||
///
|
||||
/// Skips entries matching the current screen.
|
||||
pub fn jump_forward(&mut self) -> Option<&Screen> {
|
||||
while self.jump_index < self.jump_list.len() {
|
||||
if let Some(target) = self.jump_list.get(self.jump_index).cloned() {
|
||||
self.jump_index += 1;
|
||||
if target != self.current {
|
||||
self.current = target;
|
||||
return Some(&self.current);
|
||||
}
|
||||
} else {
|
||||
break;
|
||||
}
|
||||
}
|
||||
None
|
||||
}
|
||||
|
||||
/// Reset to a single screen, clearing all history.
|
||||
pub fn reset_to(&mut self, screen: Screen) {
|
||||
self.current = screen;
|
||||
self.back_stack.clear();
|
||||
self.forward_stack.clear();
|
||||
self.jump_list.clear();
|
||||
self.jump_index = 0;
|
||||
}
|
||||
|
||||
/// Breadcrumb labels for the current navigation path.
|
||||
///
|
||||
/// Returns the back stack labels plus the current screen label.
|
||||
#[must_use]
|
||||
pub fn breadcrumbs(&self) -> Vec<&str> {
|
||||
self.back_stack
|
||||
.iter()
|
||||
.chain(std::iter::once(&self.current))
|
||||
.map(Screen::label)
|
||||
.collect()
|
||||
}
|
||||
|
||||
/// Navigation depth (1 = at root, 2 = one push deep, etc.).
|
||||
#[must_use]
|
||||
pub fn depth(&self) -> usize {
|
||||
self.back_stack.len() + 1
|
||||
}
|
||||
|
||||
/// Whether there's anything to go back to.
|
||||
#[must_use]
|
||||
pub fn can_go_back(&self) -> bool {
|
||||
!self.back_stack.is_empty()
|
||||
}
|
||||
|
||||
/// Whether there's anything to go forward to.
|
||||
#[must_use]
|
||||
pub fn can_go_forward(&self) -> bool {
|
||||
!self.forward_stack.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for NavigationStack {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::message::EntityKey;
|
||||
|
||||
#[test]
|
||||
fn test_new_starts_at_dashboard() {
|
||||
let nav = NavigationStack::new();
|
||||
assert!(nav.is_at(&Screen::Dashboard));
|
||||
assert_eq!(nav.depth(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_push_pop_preserves_order() {
|
||||
let mut nav = NavigationStack::new();
|
||||
nav.push(Screen::IssueList);
|
||||
nav.push(Screen::IssueDetail(EntityKey::issue(1, 42)));
|
||||
|
||||
assert!(nav.is_at(&Screen::IssueDetail(EntityKey::issue(1, 42))));
|
||||
assert_eq!(nav.depth(), 3);
|
||||
|
||||
nav.pop();
|
||||
assert!(nav.is_at(&Screen::IssueList));
|
||||
|
||||
nav.pop();
|
||||
assert!(nav.is_at(&Screen::Dashboard));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_pop_at_root_returns_none() {
|
||||
let mut nav = NavigationStack::new();
|
||||
assert!(nav.pop().is_none());
|
||||
assert!(nav.is_at(&Screen::Dashboard));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_forward_stack_cleared_on_new_push() {
|
||||
let mut nav = NavigationStack::new();
|
||||
nav.push(Screen::IssueList);
|
||||
nav.push(Screen::Search);
|
||||
nav.pop(); // back to IssueList, Search in forward
|
||||
assert!(nav.can_go_forward());
|
||||
|
||||
nav.push(Screen::Timeline); // new push clears forward
|
||||
assert!(!nav.can_go_forward());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_go_forward_restores() {
|
||||
let mut nav = NavigationStack::new();
|
||||
nav.push(Screen::IssueList);
|
||||
nav.push(Screen::Search);
|
||||
nav.pop(); // back to IssueList
|
||||
|
||||
let screen = nav.go_forward();
|
||||
assert!(screen.is_some());
|
||||
assert!(nav.is_at(&Screen::Search));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_go_forward_returns_none_when_empty() {
|
||||
let mut nav = NavigationStack::new();
|
||||
assert!(nav.go_forward().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_jump_list_skips_list_screens() {
|
||||
let mut nav = NavigationStack::new();
|
||||
nav.push(Screen::IssueList); // not a detail — skip
|
||||
nav.push(Screen::IssueDetail(EntityKey::issue(1, 1))); // detail — record
|
||||
nav.push(Screen::MrList); // not a detail — skip
|
||||
nav.push(Screen::MrDetail(EntityKey::mr(1, 2))); // detail — record
|
||||
|
||||
assert_eq!(nav.jump_list.len(), 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_jump_back_and_forward() {
|
||||
let mut nav = NavigationStack::new();
|
||||
let issue = Screen::IssueDetail(EntityKey::issue(1, 1));
|
||||
let mr = Screen::MrDetail(EntityKey::mr(1, 2));
|
||||
|
||||
nav.push(Screen::IssueList);
|
||||
nav.push(issue.clone());
|
||||
nav.push(Screen::MrList);
|
||||
nav.push(mr.clone());
|
||||
|
||||
// Current is MrDetail. jump_list = [IssueDetail, MrDetail], index = 2.
|
||||
// First jump_back skips MrDetail (== current) and lands on IssueDetail.
|
||||
let prev = nav.jump_back();
|
||||
assert_eq!(prev, Some(&issue));
|
||||
assert!(nav.is_at(&issue));
|
||||
|
||||
// Already at beginning of jump list.
|
||||
assert!(nav.jump_back().is_none());
|
||||
|
||||
// jump_forward skips IssueDetail (== current) and lands on MrDetail.
|
||||
let next = nav.jump_forward();
|
||||
assert_eq!(next, Some(&mr));
|
||||
assert!(nav.is_at(&mr));
|
||||
|
||||
// At end of jump list.
|
||||
assert!(nav.jump_forward().is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_jump_list_truncates_on_new_push() {
|
||||
let mut nav = NavigationStack::new();
|
||||
nav.push(Screen::IssueDetail(EntityKey::issue(1, 1)));
|
||||
nav.push(Screen::IssueDetail(EntityKey::issue(1, 2)));
|
||||
nav.push(Screen::IssueDetail(EntityKey::issue(1, 3)));
|
||||
|
||||
// jump back twice — lands on issue(1,1), jump_index = 0
|
||||
nav.jump_back();
|
||||
nav.jump_back();
|
||||
|
||||
// new detail push truncates forward entries
|
||||
nav.push(Screen::MrDetail(EntityKey::mr(1, 99)));
|
||||
|
||||
// should have issue(1,1) and mr(1,99), not issue(1,2) or issue(1,3)
|
||||
assert_eq!(nav.jump_list.len(), 2);
|
||||
assert_eq!(nav.jump_list[1], Screen::MrDetail(EntityKey::mr(1, 99)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_clears_all_history() {
|
||||
let mut nav = NavigationStack::new();
|
||||
nav.push(Screen::IssueList);
|
||||
nav.push(Screen::Search);
|
||||
nav.push(Screen::IssueDetail(EntityKey::issue(1, 1)));
|
||||
|
||||
nav.reset_to(Screen::Dashboard);
|
||||
|
||||
assert!(nav.is_at(&Screen::Dashboard));
|
||||
assert_eq!(nav.depth(), 1);
|
||||
assert!(!nav.can_go_back());
|
||||
assert!(!nav.can_go_forward());
|
||||
assert!(nav.jump_list.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_breadcrumbs_reflect_stack() {
|
||||
let mut nav = NavigationStack::new();
|
||||
assert_eq!(nav.breadcrumbs(), vec!["Dashboard"]);
|
||||
|
||||
nav.push(Screen::IssueList);
|
||||
assert_eq!(nav.breadcrumbs(), vec!["Dashboard", "Issues"]);
|
||||
|
||||
nav.push(Screen::IssueDetail(EntityKey::issue(1, 42)));
|
||||
assert_eq!(nav.breadcrumbs(), vec!["Dashboard", "Issues", "Issue"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_default_is_new() {
|
||||
let nav = NavigationStack::default();
|
||||
assert!(nav.is_at(&Screen::Dashboard));
|
||||
assert_eq!(nav.depth(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_can_go_back_and_forward() {
|
||||
let mut nav = NavigationStack::new();
|
||||
assert!(!nav.can_go_back());
|
||||
assert!(!nav.can_go_forward());
|
||||
|
||||
nav.push(Screen::IssueList);
|
||||
assert!(nav.can_go_back());
|
||||
assert!(!nav.can_go_forward());
|
||||
|
||||
nav.pop();
|
||||
assert!(!nav.can_go_back());
|
||||
assert!(nav.can_go_forward());
|
||||
}
|
||||
}
|
||||
587
crates/lore-tui/src/safety.rs
Normal file
587
crates/lore-tui/src/safety.rs
Normal file
@@ -0,0 +1,587 @@
|
||||
//! Terminal safety: sanitize untrusted text, URL policy, credential redaction.
|
||||
//!
|
||||
//! GitLab content can contain ANSI escapes, bidi overrides, OSC hyperlinks,
|
||||
//! and C1 control codes that could corrupt terminal rendering. This module
|
||||
//! strips dangerous sequences while preserving a safe SGR subset for readability.
|
||||
|
||||
use std::fmt::Write;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// UrlPolicy
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Controls how OSC 8 hyperlinks in input are handled.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum UrlPolicy {
|
||||
/// Remove OSC 8 hyperlinks entirely, keeping only the link text.
|
||||
#[default]
|
||||
Strip,
|
||||
/// Convert hyperlinks to numbered footnotes: `text [1]` with URL list appended.
|
||||
Footnote,
|
||||
/// Pass hyperlinks through unchanged (only for trusted content).
|
||||
Passthrough,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// RedactPattern
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Common patterns for PII/secret redaction.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct RedactPattern {
|
||||
patterns: Vec<regex::Regex>,
|
||||
}
|
||||
|
||||
impl RedactPattern {
|
||||
/// Create a default set of redaction patterns (tokens, emails, etc.).
|
||||
#[must_use]
|
||||
pub fn defaults() -> Self {
|
||||
let patterns = vec![
|
||||
// GitLab personal access tokens
|
||||
regex::Regex::new(r"glpat-[A-Za-z0-9_\-]{20,}").expect("valid regex"),
|
||||
// Generic bearer/API tokens (long hex or base64-ish strings after common prefixes)
|
||||
regex::Regex::new(r"(?i)(token|bearer|api[_-]?key)[\s:=]+\S{8,}").expect("valid regex"),
|
||||
// Email addresses
|
||||
regex::Regex::new(r"[a-zA-Z0-9._%+\-]+@[a-zA-Z0-9.\-]+\.[a-zA-Z]{2,}")
|
||||
.expect("valid regex"),
|
||||
];
|
||||
Self { patterns }
|
||||
}
|
||||
|
||||
/// Apply all redaction patterns to the input string.
|
||||
#[must_use]
|
||||
pub fn redact(&self, input: &str) -> String {
|
||||
let mut result = input.to_string();
|
||||
for pattern in &self.patterns {
|
||||
result = pattern.replace_all(&result, "[REDACTED]").into_owned();
|
||||
}
|
||||
result
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// sanitize_for_terminal
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Sanitize untrusted text for safe terminal display.
|
||||
///
|
||||
/// - Strips C1 control codes (0x80-0x9F)
|
||||
/// - Strips OSC sequences (ESC ] ... ST)
|
||||
/// - Strips cursor movement CSI sequences (CSI n A/B/C/D/E/F/G/H/J/K)
|
||||
/// - Strips bidi overrides (U+202A-U+202E, U+2066-U+2069)
|
||||
/// - Preserves safe SGR subset (bold, italic, underline, reset, standard colors)
|
||||
///
|
||||
/// `url_policy` controls handling of OSC 8 hyperlinks.
|
||||
#[must_use]
|
||||
pub fn sanitize_for_terminal(input: &str, url_policy: UrlPolicy) -> String {
|
||||
let mut output = String::with_capacity(input.len());
|
||||
let mut footnotes: Vec<String> = Vec::new();
|
||||
let chars: Vec<char> = input.chars().collect();
|
||||
let len = chars.len();
|
||||
let mut i = 0;
|
||||
|
||||
while i < len {
|
||||
let ch = chars[i];
|
||||
|
||||
// --- Bidi overrides ---
|
||||
if is_bidi_override(ch) {
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// --- C1 control codes (U+0080-U+009F) ---
|
||||
if ('\u{0080}'..='\u{009F}').contains(&ch) {
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// --- C0 control codes except tab, newline, carriage return ---
|
||||
if ch.is_ascii_control() && ch != '\t' && ch != '\n' && ch != '\r' && ch != '\x1B' {
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// --- ESC sequences ---
|
||||
if ch == '\x1B' {
|
||||
if i + 1 < len {
|
||||
match chars[i + 1] {
|
||||
// CSI sequence: ESC [
|
||||
'[' => {
|
||||
let (consumed, safe_seq) = parse_csi(&chars, i);
|
||||
if let Some(seq) = safe_seq {
|
||||
output.push_str(&seq);
|
||||
}
|
||||
i += consumed;
|
||||
continue;
|
||||
}
|
||||
// OSC sequence: ESC ]
|
||||
']' => {
|
||||
let (consumed, link_text, link_url) = parse_osc(&chars, i);
|
||||
match url_policy {
|
||||
UrlPolicy::Strip => {
|
||||
if let Some(text) = link_text {
|
||||
output.push_str(&text);
|
||||
}
|
||||
}
|
||||
UrlPolicy::Footnote => {
|
||||
if let (Some(text), Some(url)) = (link_text, link_url) {
|
||||
footnotes.push(url);
|
||||
let _ = write!(output, "{text} [{n}]", n = footnotes.len());
|
||||
}
|
||||
}
|
||||
UrlPolicy::Passthrough => {
|
||||
// Reproduce the raw OSC sequence
|
||||
for &ch_raw in &chars[i..len.min(i + consumed)] {
|
||||
output.push(ch_raw);
|
||||
}
|
||||
}
|
||||
}
|
||||
i += consumed;
|
||||
continue;
|
||||
}
|
||||
_ => {
|
||||
// Unknown ESC sequence — skip ESC + next char
|
||||
i += 2;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
} else {
|
||||
// Trailing ESC at end of input
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
}
|
||||
|
||||
// --- Normal character ---
|
||||
output.push(ch);
|
||||
i += 1;
|
||||
}
|
||||
|
||||
// Append footnotes if any
|
||||
if !footnotes.is_empty() {
|
||||
output.push('\n');
|
||||
for (idx, url) in footnotes.iter().enumerate() {
|
||||
let _ = write!(output, "\n[{}] {url}", idx + 1);
|
||||
}
|
||||
}
|
||||
|
||||
output
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Bidi check
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
fn is_bidi_override(ch: char) -> bool {
|
||||
matches!(
|
||||
ch,
|
||||
'\u{202A}' // LRE
|
||||
| '\u{202B}' // RLE
|
||||
| '\u{202C}' // PDF
|
||||
| '\u{202D}' // LRO
|
||||
| '\u{202E}' // RLO
|
||||
| '\u{2066}' // LRI
|
||||
| '\u{2067}' // RLI
|
||||
| '\u{2068}' // FSI
|
||||
| '\u{2069}' // PDI
|
||||
)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CSI parser
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Parse a CSI sequence starting at `chars[start]` (which should be ESC).
|
||||
///
|
||||
/// Returns `(chars_consumed, Option<safe_sequence_string>)`.
|
||||
/// If the CSI is a safe SGR, returns the full sequence string to preserve.
|
||||
/// Otherwise returns None (strip it).
|
||||
fn parse_csi(chars: &[char], start: usize) -> (usize, Option<String>) {
|
||||
// Minimum: ESC [ <final_byte>
|
||||
debug_assert!(chars[start] == '\x1B');
|
||||
debug_assert!(start + 1 < chars.len() && chars[start + 1] == '[');
|
||||
|
||||
let mut i = start + 2; // skip ESC [
|
||||
let len = chars.len();
|
||||
|
||||
// Collect parameter bytes (0x30-0x3F) and intermediate bytes (0x20-0x2F)
|
||||
let param_start = i;
|
||||
while i < len && (chars[i] as u32) >= 0x20 && (chars[i] as u32) <= 0x3F {
|
||||
i += 1;
|
||||
}
|
||||
|
||||
// Collect intermediate bytes
|
||||
while i < len && (chars[i] as u32) >= 0x20 && (chars[i] as u32) <= 0x2F {
|
||||
i += 1;
|
||||
}
|
||||
|
||||
// Final byte (0x40-0x7E)
|
||||
if i >= len || (chars[i] as u32) < 0x40 || (chars[i] as u32) > 0x7E {
|
||||
// Malformed — consume what we've seen and strip
|
||||
return (i.saturating_sub(start).max(2), None);
|
||||
}
|
||||
|
||||
let final_byte = chars[i];
|
||||
let consumed = i + 1 - start;
|
||||
|
||||
// Only preserve SGR sequences (final byte 'm')
|
||||
if final_byte == 'm' {
|
||||
let param_str: String = chars[param_start..i].iter().collect();
|
||||
if is_safe_sgr(¶m_str) {
|
||||
let full_seq: String = chars[start..start + consumed].iter().collect();
|
||||
return (consumed, Some(full_seq));
|
||||
}
|
||||
}
|
||||
|
||||
// Anything else (cursor movement A-H, erase J/K, etc.) is stripped
|
||||
(consumed, None)
|
||||
}
|
||||
|
||||
/// Check if all SGR parameters in a sequence are in the safe subset.
|
||||
///
|
||||
/// Safe: 0 (reset), 1 (bold), 3 (italic), 4 (underline), 22 (normal intensity),
|
||||
/// 23 (not italic), 24 (not underline), 39 (default fg), 49 (default bg),
|
||||
/// 30-37 (standard fg), 40-47 (standard bg), 90-97 (bright fg), 100-107 (bright bg).
|
||||
fn is_safe_sgr(params: &str) -> bool {
|
||||
if params.is_empty() {
|
||||
return true; // ESC[m is reset
|
||||
}
|
||||
|
||||
for param in params.split(';') {
|
||||
let param = param.trim();
|
||||
if param.is_empty() {
|
||||
continue; // treat empty as 0
|
||||
}
|
||||
let Ok(n) = param.parse::<u32>() else {
|
||||
return false;
|
||||
};
|
||||
if !is_safe_sgr_code(n) {
|
||||
return false;
|
||||
}
|
||||
}
|
||||
true
|
||||
}
|
||||
|
||||
fn is_safe_sgr_code(n: u32) -> bool {
|
||||
matches!(
|
||||
n,
|
||||
0 // reset
|
||||
| 1 // bold
|
||||
| 3 // italic
|
||||
| 4 // underline
|
||||
| 22 // normal intensity (turn off bold)
|
||||
| 23 // not italic
|
||||
| 24 // not underline
|
||||
| 39 // default foreground
|
||||
| 49 // default background
|
||||
| 30..=37 // standard foreground colors
|
||||
| 40..=47 // standard background colors
|
||||
| 90..=97 // bright foreground colors
|
||||
| 100..=107 // bright background colors
|
||||
)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// OSC parser
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Parse an OSC sequence starting at `chars[start]` (ESC ]).
|
||||
///
|
||||
/// Returns `(chars_consumed, link_text, link_url)`.
|
||||
/// For OSC 8 hyperlinks: `ESC ] 8 ; params ; url ST text ESC ] 8 ; ; ST`
|
||||
/// For other OSC: consumed without extracting link data.
|
||||
fn parse_osc(chars: &[char], start: usize) -> (usize, Option<String>, Option<String>) {
|
||||
debug_assert!(chars[start] == '\x1B');
|
||||
debug_assert!(start + 1 < chars.len() && chars[start + 1] == ']');
|
||||
|
||||
let len = chars.len();
|
||||
let i = start + 2; // skip ESC ]
|
||||
|
||||
// Find ST (String Terminator): ESC \ or BEL (0x07)
|
||||
let osc_end = find_st(chars, i);
|
||||
|
||||
// Check if this is OSC 8 (hyperlink)
|
||||
if i < len && chars[i] == '8' && i + 1 < len && chars[i + 1] == ';' {
|
||||
// OSC 8 hyperlink: ESC ] 8 ; params ; url ST ... ESC ] 8 ; ; ST
|
||||
let osc_content: String = chars[i..osc_end.0].iter().collect();
|
||||
let first_consumed = osc_end.1;
|
||||
|
||||
// Extract URL from "8;params;url"
|
||||
let url = extract_osc8_url(&osc_content);
|
||||
|
||||
// Now find the link text (between first ST and second OSC 8)
|
||||
let after_first_st = start + 2 + first_consumed;
|
||||
let mut text = String::new();
|
||||
let mut j = after_first_st;
|
||||
|
||||
// Collect text until we hit the closing OSC 8 or end of input
|
||||
while j < len {
|
||||
if j + 1 < len && chars[j] == '\x1B' && chars[j + 1] == ']' {
|
||||
// Found another OSC — this should be the closing OSC 8
|
||||
let close_end = find_st(chars, j + 2);
|
||||
return (
|
||||
j + close_end.1 - start + 2,
|
||||
Some(text),
|
||||
url.map(String::from),
|
||||
);
|
||||
}
|
||||
text.push(chars[j]);
|
||||
j += 1;
|
||||
}
|
||||
|
||||
// Reached end without closing OSC 8
|
||||
return (j - start, Some(text), url.map(String::from));
|
||||
}
|
||||
|
||||
// Non-OSC-8: just consume and strip
|
||||
(osc_end.1 + (start + 2 - start), None, None)
|
||||
}
|
||||
|
||||
/// Find the String Terminator (ST) for an OSC sequence.
|
||||
/// ST is either ESC \ (two chars) or BEL (0x07).
|
||||
/// Returns (content_end_index, total_consumed_from_content_start).
|
||||
fn find_st(chars: &[char], from: usize) -> (usize, usize) {
|
||||
let len = chars.len();
|
||||
let mut i = from;
|
||||
while i < len {
|
||||
if chars[i] == '\x07' {
|
||||
return (i, i - from + 1);
|
||||
}
|
||||
if i + 1 < len && chars[i] == '\x1B' && chars[i + 1] == '\\' {
|
||||
return (i, i - from + 2);
|
||||
}
|
||||
i += 1;
|
||||
}
|
||||
// Unterminated — consume everything
|
||||
(len, len - from)
|
||||
}
|
||||
|
||||
/// Extract URL from OSC 8 content "8;params;url".
|
||||
fn extract_osc8_url(content: &str) -> Option<&str> {
|
||||
// Format: "8;params;url"
|
||||
let rest = content.strip_prefix("8;")?;
|
||||
// Skip params (up to next ;)
|
||||
let url_start = rest.find(';')? + 1;
|
||||
let url = &rest[url_start..];
|
||||
if url.is_empty() { None } else { Some(url) }
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
// --- CSI / cursor movement ---
|
||||
|
||||
#[test]
|
||||
fn test_strips_cursor_movement() {
|
||||
// CSI 5A = cursor up 5
|
||||
let input = "before\x1B[5Aafter";
|
||||
let result = sanitize_for_terminal(input, UrlPolicy::Strip);
|
||||
assert_eq!(result, "beforeafter");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_strips_cursor_movement_all_directions() {
|
||||
for dir in ['A', 'B', 'C', 'D', 'E', 'F', 'G', 'H'] {
|
||||
let input = format!("x\x1B[3{dir}y");
|
||||
let result = sanitize_for_terminal(&input, UrlPolicy::Strip);
|
||||
assert_eq!(result, "xy", "failed for direction {dir}");
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_strips_erase_sequences() {
|
||||
// CSI 2J = erase display
|
||||
let input = "before\x1B[2Jafter";
|
||||
let result = sanitize_for_terminal(input, UrlPolicy::Strip);
|
||||
assert_eq!(result, "beforeafter");
|
||||
}
|
||||
|
||||
// --- SGR preservation ---
|
||||
|
||||
#[test]
|
||||
fn test_preserves_bold_italic_underline_reset() {
|
||||
let input = "\x1B[1mbold\x1B[0m \x1B[3mitalic\x1B[0m \x1B[4munderline\x1B[0m";
|
||||
let result = sanitize_for_terminal(input, UrlPolicy::Strip);
|
||||
assert_eq!(result, input);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_preserves_standard_colors() {
|
||||
// Red foreground, green background
|
||||
let input = "\x1B[31mred\x1B[42m on green\x1B[0m";
|
||||
let result = sanitize_for_terminal(input, UrlPolicy::Strip);
|
||||
assert_eq!(result, input);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_preserves_bright_colors() {
|
||||
let input = "\x1B[91mbright red\x1B[0m";
|
||||
let result = sanitize_for_terminal(input, UrlPolicy::Strip);
|
||||
assert_eq!(result, input);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_preserves_combined_safe_sgr() {
|
||||
// Bold + red foreground in one sequence
|
||||
let input = "\x1B[1;31mbold red\x1B[0m";
|
||||
let result = sanitize_for_terminal(input, UrlPolicy::Strip);
|
||||
assert_eq!(result, input);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_strips_unsafe_sgr() {
|
||||
// SGR 8 = hidden text (not in safe list)
|
||||
let input = "\x1B[8mhidden\x1B[0m";
|
||||
let result = sanitize_for_terminal(input, UrlPolicy::Strip);
|
||||
// SGR 8 stripped, SGR 0 preserved
|
||||
assert_eq!(result, "hidden\x1B[0m");
|
||||
}
|
||||
|
||||
// --- C1 control codes ---
|
||||
|
||||
#[test]
|
||||
fn test_strips_c1_control_codes() {
|
||||
// U+008D = Reverse Index, U+009B = CSI (8-bit)
|
||||
let input = format!("before{}middle{}after", '\u{008D}', '\u{009B}');
|
||||
let result = sanitize_for_terminal(&input, UrlPolicy::Strip);
|
||||
assert_eq!(result, "beforemiddleafter");
|
||||
}
|
||||
|
||||
// --- Bidi overrides ---
|
||||
|
||||
#[test]
|
||||
fn test_strips_bidi_overrides() {
|
||||
let input = format!(
|
||||
"normal{}reversed{}end",
|
||||
'\u{202E}', // RLO
|
||||
'\u{202C}' // PDF
|
||||
);
|
||||
let result = sanitize_for_terminal(&input, UrlPolicy::Strip);
|
||||
assert_eq!(result, "normalreversedend");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_strips_all_bidi_chars() {
|
||||
let bidi_chars = [
|
||||
'\u{202A}', '\u{202B}', '\u{202C}', '\u{202D}', '\u{202E}', '\u{2066}', '\u{2067}',
|
||||
'\u{2068}', '\u{2069}',
|
||||
];
|
||||
for ch in bidi_chars {
|
||||
let input = format!("a{ch}b");
|
||||
let result = sanitize_for_terminal(&input, UrlPolicy::Strip);
|
||||
assert_eq!(result, "ab", "failed for U+{:04X}", ch as u32);
|
||||
}
|
||||
}
|
||||
|
||||
// --- OSC sequences ---
|
||||
|
||||
#[test]
|
||||
fn test_strips_osc_sequences() {
|
||||
// OSC 0 (set title): ESC ] 0 ; title BEL
|
||||
let input = "before\x1B]0;My Title\x07after";
|
||||
let result = sanitize_for_terminal(input, UrlPolicy::Strip);
|
||||
assert_eq!(result, "beforeafter");
|
||||
}
|
||||
|
||||
// --- OSC 8 hyperlinks ---
|
||||
|
||||
#[test]
|
||||
fn test_url_policy_strip() {
|
||||
// OSC 8 hyperlink: ESC]8;;url ST text ESC]8;; ST
|
||||
let input = "click \x1B]8;;https://example.com\x07here\x1B]8;;\x07 done";
|
||||
let result = sanitize_for_terminal(input, UrlPolicy::Strip);
|
||||
assert_eq!(result, "click here done");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_url_policy_footnote() {
|
||||
let input = "click \x1B]8;;https://example.com\x07here\x1B]8;;\x07 done";
|
||||
let result = sanitize_for_terminal(input, UrlPolicy::Footnote);
|
||||
assert!(result.contains("here [1]"));
|
||||
assert!(result.contains("[1] https://example.com"));
|
||||
}
|
||||
|
||||
// --- Redaction ---
|
||||
|
||||
#[test]
|
||||
fn test_redact_gitlab_token() {
|
||||
let redactor = RedactPattern::defaults();
|
||||
let input = "My token is glpat-AbCdEfGhIjKlMnOpQrStUvWx";
|
||||
let result = redactor.redact(input);
|
||||
assert_eq!(result, "My token is [REDACTED]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_redact_email() {
|
||||
let redactor = RedactPattern::defaults();
|
||||
let input = "Contact user@example.com for details";
|
||||
let result = redactor.redact(input);
|
||||
assert_eq!(result, "Contact [REDACTED] for details");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_redact_bearer_token() {
|
||||
let redactor = RedactPattern::defaults();
|
||||
let input = "Authorization: Bearer eyJhbGciOiJSUzI1NiIsInR5cCI";
|
||||
let result = redactor.redact(input);
|
||||
assert!(result.contains("[REDACTED]"));
|
||||
assert!(!result.contains("eyJ"));
|
||||
}
|
||||
|
||||
// --- Edge cases ---
|
||||
|
||||
#[test]
|
||||
fn test_empty_input() {
|
||||
assert_eq!(sanitize_for_terminal("", UrlPolicy::Strip), "");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_safe_content_passthrough() {
|
||||
let input = "Hello, world! This is normal text.\nWith newlines\tand tabs.";
|
||||
assert_eq!(sanitize_for_terminal(input, UrlPolicy::Strip), input);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_trailing_esc() {
|
||||
let input = "text\x1B";
|
||||
let result = sanitize_for_terminal(input, UrlPolicy::Strip);
|
||||
assert_eq!(result, "text");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_malformed_csi_does_not_eat_text() {
|
||||
// ESC [ without a valid final byte before next printable
|
||||
let input = "a\x1B[b";
|
||||
let result = sanitize_for_terminal(input, UrlPolicy::Strip);
|
||||
// The malformed CSI is consumed but shouldn't eat "b" as text
|
||||
// ESC[ is start, 'b' is final byte (0x62 is in 0x40-0x7E range)
|
||||
// So this is CSI with final byte 'b' (cursor back) — gets stripped
|
||||
assert_eq!(result, "a");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_utf8_adjacent_to_escapes() {
|
||||
let input = "\x1B[1m日本語\x1B[0m text";
|
||||
let result = sanitize_for_terminal(input, UrlPolicy::Strip);
|
||||
assert_eq!(result, "\x1B[1m日本語\x1B[0m text");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_fuzz_no_panic() {
|
||||
// 1000 random-ish byte sequences — must not panic
|
||||
for seed in 0u16..1000 {
|
||||
let mut bytes = Vec::new();
|
||||
for j in 0..50 {
|
||||
bytes.push(((seed.wrapping_mul(31).wrapping_add(j)) & 0xFF) as u8);
|
||||
}
|
||||
// Best-effort UTF-8
|
||||
let input = String::from_utf8_lossy(&bytes);
|
||||
let _ = sanitize_for_terminal(&input, UrlPolicy::Strip);
|
||||
}
|
||||
}
|
||||
}
|
||||
11
crates/lore-tui/src/state/command_palette.rs
Normal file
11
crates/lore-tui/src/state/command_palette.rs
Normal file
@@ -0,0 +1,11 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
//! Command palette state.
|
||||
|
||||
/// State for the command palette overlay.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct CommandPaletteState {
|
||||
pub query: String,
|
||||
pub query_focused: bool,
|
||||
pub selected_index: usize,
|
||||
}
|
||||
255
crates/lore-tui/src/state/dashboard.rs
Normal file
255
crates/lore-tui/src/state/dashboard.rs
Normal file
@@ -0,0 +1,255 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
//! Dashboard screen state.
|
||||
//!
|
||||
//! The dashboard is the home screen — entity counts, per-project sync
|
||||
//! status, recent activity, and the last sync summary.
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// EntityCounts
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Aggregated entity counts from the local database.
|
||||
#[derive(Debug, Clone, Default, PartialEq, Eq)]
|
||||
pub struct EntityCounts {
|
||||
pub issues_open: u64,
|
||||
pub issues_total: u64,
|
||||
pub mrs_open: u64,
|
||||
pub mrs_total: u64,
|
||||
pub discussions: u64,
|
||||
pub notes_total: u64,
|
||||
/// Percentage of notes that are system-generated (0-100).
|
||||
pub notes_system_pct: u8,
|
||||
pub documents: u64,
|
||||
pub embeddings: u64,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// ProjectSyncInfo
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Per-project sync freshness.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct ProjectSyncInfo {
|
||||
pub path: String,
|
||||
pub minutes_since_sync: u64,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// RecentActivityItem
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A recently-updated entity for the activity feed.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct RecentActivityItem {
|
||||
/// "issue" or "mr".
|
||||
pub entity_type: String,
|
||||
pub iid: u64,
|
||||
pub title: String,
|
||||
pub state: String,
|
||||
pub minutes_ago: u64,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// LastSyncInfo
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Summary of the most recent sync run.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct LastSyncInfo {
|
||||
pub status: String,
|
||||
/// Milliseconds epoch UTC.
|
||||
pub finished_at: Option<i64>,
|
||||
pub command: String,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// DashboardData
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Data returned by the `fetch_dashboard` action.
|
||||
///
|
||||
/// Pure data transfer — no rendering or display logic.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct DashboardData {
|
||||
pub counts: EntityCounts,
|
||||
pub projects: Vec<ProjectSyncInfo>,
|
||||
pub recent: Vec<RecentActivityItem>,
|
||||
pub last_sync: Option<LastSyncInfo>,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// DashboardState
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// State for the dashboard summary screen.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct DashboardState {
|
||||
pub counts: EntityCounts,
|
||||
pub projects: Vec<ProjectSyncInfo>,
|
||||
pub recent: Vec<RecentActivityItem>,
|
||||
pub last_sync: Option<LastSyncInfo>,
|
||||
/// Scroll offset for the recent activity list.
|
||||
pub scroll_offset: usize,
|
||||
}
|
||||
|
||||
impl DashboardState {
|
||||
/// Apply fresh data from a `fetch_dashboard` result.
|
||||
///
|
||||
/// Preserves scroll offset (clamped to new data bounds).
|
||||
pub fn update(&mut self, data: DashboardData) {
|
||||
self.counts = data.counts;
|
||||
self.projects = data.projects;
|
||||
self.last_sync = data.last_sync;
|
||||
self.recent = data.recent;
|
||||
// Clamp scroll offset if the list shrunk.
|
||||
if !self.recent.is_empty() {
|
||||
self.scroll_offset = self.scroll_offset.min(self.recent.len() - 1);
|
||||
} else {
|
||||
self.scroll_offset = 0;
|
||||
}
|
||||
}
|
||||
|
||||
/// Scroll the recent activity list down by one.
|
||||
pub fn scroll_down(&mut self) {
|
||||
if !self.recent.is_empty() {
|
||||
self.scroll_offset = (self.scroll_offset + 1).min(self.recent.len() - 1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Scroll the recent activity list up by one.
|
||||
pub fn scroll_up(&mut self) {
|
||||
self.scroll_offset = self.scroll_offset.saturating_sub(1);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_dashboard_state_default() {
|
||||
let state = DashboardState::default();
|
||||
assert_eq!(state.counts.issues_total, 0);
|
||||
assert_eq!(state.scroll_offset, 0);
|
||||
assert!(state.recent.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dashboard_state_update_applies_data() {
|
||||
let mut state = DashboardState::default();
|
||||
let data = DashboardData {
|
||||
counts: EntityCounts {
|
||||
issues_open: 3,
|
||||
issues_total: 5,
|
||||
..Default::default()
|
||||
},
|
||||
projects: vec![ProjectSyncInfo {
|
||||
path: "group/project".into(),
|
||||
minutes_since_sync: 42,
|
||||
}],
|
||||
recent: vec![RecentActivityItem {
|
||||
entity_type: "issue".into(),
|
||||
iid: 1,
|
||||
title: "Fix bug".into(),
|
||||
state: "opened".into(),
|
||||
minutes_ago: 10,
|
||||
}],
|
||||
last_sync: None,
|
||||
};
|
||||
|
||||
state.update(data);
|
||||
assert_eq!(state.counts.issues_open, 3);
|
||||
assert_eq!(state.counts.issues_total, 5);
|
||||
assert_eq!(state.projects.len(), 1);
|
||||
assert_eq!(state.recent.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dashboard_state_update_clamps_scroll() {
|
||||
let mut state = DashboardState {
|
||||
scroll_offset: 10,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
let data = DashboardData {
|
||||
recent: vec![RecentActivityItem {
|
||||
entity_type: "issue".into(),
|
||||
iid: 1,
|
||||
title: "Only item".into(),
|
||||
state: "opened".into(),
|
||||
minutes_ago: 5,
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
state.update(data);
|
||||
assert_eq!(state.scroll_offset, 0); // Clamped to len-1 = 0
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_dashboard_state_update_empty_resets_scroll() {
|
||||
let mut state = DashboardState {
|
||||
scroll_offset: 5,
|
||||
..Default::default()
|
||||
};
|
||||
|
||||
state.update(DashboardData::default());
|
||||
assert_eq!(state.scroll_offset, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scroll_down_and_up() {
|
||||
let mut state = DashboardState::default();
|
||||
state.recent = (0..5)
|
||||
.map(|i| RecentActivityItem {
|
||||
entity_type: "issue".into(),
|
||||
iid: i,
|
||||
title: format!("Item {i}"),
|
||||
state: "opened".into(),
|
||||
minutes_ago: i,
|
||||
})
|
||||
.collect();
|
||||
|
||||
assert_eq!(state.scroll_offset, 0);
|
||||
state.scroll_down();
|
||||
assert_eq!(state.scroll_offset, 1);
|
||||
state.scroll_down();
|
||||
assert_eq!(state.scroll_offset, 2);
|
||||
state.scroll_up();
|
||||
assert_eq!(state.scroll_offset, 1);
|
||||
state.scroll_up();
|
||||
assert_eq!(state.scroll_offset, 0);
|
||||
state.scroll_up(); // Can't go below 0
|
||||
assert_eq!(state.scroll_offset, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scroll_down_stops_at_end() {
|
||||
let mut state = DashboardState::default();
|
||||
state.recent = vec![RecentActivityItem {
|
||||
entity_type: "mr".into(),
|
||||
iid: 1,
|
||||
title: "Only".into(),
|
||||
state: "merged".into(),
|
||||
minutes_ago: 0,
|
||||
}];
|
||||
|
||||
state.scroll_down();
|
||||
assert_eq!(state.scroll_offset, 0); // Can't scroll past single item
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scroll_on_empty_is_noop() {
|
||||
let mut state = DashboardState::default();
|
||||
state.scroll_down();
|
||||
assert_eq!(state.scroll_offset, 0);
|
||||
state.scroll_up();
|
||||
assert_eq!(state.scroll_offset, 0);
|
||||
}
|
||||
}
|
||||
284
crates/lore-tui/src/state/issue_detail.rs
Normal file
284
crates/lore-tui/src/state/issue_detail.rs
Normal file
@@ -0,0 +1,284 @@
|
||||
#![allow(dead_code)] // Phase 2: consumed by Issue Detail screen
|
||||
|
||||
//! Issue detail screen state.
|
||||
//!
|
||||
//! Holds metadata, discussions, cross-references, and UI state for
|
||||
//! viewing a single issue. Supports progressive hydration: metadata
|
||||
//! loads first, discussions load async in a second phase.
|
||||
|
||||
use crate::message::EntityKey;
|
||||
use crate::view::common::cross_ref::{CrossRef, CrossRefState};
|
||||
use crate::view::common::discussion_tree::{DiscussionNode, DiscussionTreeState};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// IssueMetadata
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Full metadata for a single issue, fetched from the local DB.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IssueMetadata {
|
||||
/// Issue IID (project-scoped).
|
||||
pub iid: i64,
|
||||
/// Project path (e.g., "group/project").
|
||||
pub project_path: String,
|
||||
/// Issue title.
|
||||
pub title: String,
|
||||
/// Issue description (markdown).
|
||||
pub description: String,
|
||||
/// Current state: "opened" or "closed".
|
||||
pub state: String,
|
||||
/// Author username.
|
||||
pub author: String,
|
||||
/// Assigned usernames.
|
||||
pub assignees: Vec<String>,
|
||||
/// Label names.
|
||||
pub labels: Vec<String>,
|
||||
/// Milestone title (if set).
|
||||
pub milestone: Option<String>,
|
||||
/// Due date (if set, "YYYY-MM-DD").
|
||||
pub due_date: Option<String>,
|
||||
/// Created timestamp (ms epoch).
|
||||
pub created_at: i64,
|
||||
/// Updated timestamp (ms epoch).
|
||||
pub updated_at: i64,
|
||||
/// GitLab web URL for "open in browser".
|
||||
pub web_url: String,
|
||||
/// Discussion count (for display before discussions load).
|
||||
pub discussion_count: usize,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// IssueDetailData
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Bundle returned by the metadata fetch action.
|
||||
///
|
||||
/// Metadata + cross-refs load in Phase 1 (fast). Discussions load
|
||||
/// separately in Phase 2.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IssueDetailData {
|
||||
pub metadata: IssueMetadata,
|
||||
pub cross_refs: Vec<CrossRef>,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// DetailSection
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Which section of the detail view has keyboard focus.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum DetailSection {
|
||||
/// Description area (scrollable text).
|
||||
#[default]
|
||||
Description,
|
||||
/// Discussion tree.
|
||||
Discussions,
|
||||
/// Cross-references list.
|
||||
CrossRefs,
|
||||
}
|
||||
|
||||
impl DetailSection {
|
||||
/// Cycle to the next section.
|
||||
#[must_use]
|
||||
pub fn next(self) -> Self {
|
||||
match self {
|
||||
Self::Description => Self::Discussions,
|
||||
Self::Discussions => Self::CrossRefs,
|
||||
Self::CrossRefs => Self::Description,
|
||||
}
|
||||
}
|
||||
|
||||
/// Cycle to the previous section.
|
||||
#[must_use]
|
||||
pub fn prev(self) -> Self {
|
||||
match self {
|
||||
Self::Description => Self::CrossRefs,
|
||||
Self::Discussions => Self::Description,
|
||||
Self::CrossRefs => Self::Discussions,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// IssueDetailState
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// State for the issue detail screen.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct IssueDetailState {
|
||||
/// Entity key for the currently displayed issue.
|
||||
pub current_key: Option<EntityKey>,
|
||||
/// Issue metadata (Phase 1 load).
|
||||
pub metadata: Option<IssueMetadata>,
|
||||
/// Discussion nodes (Phase 2 async load).
|
||||
pub discussions: Vec<DiscussionNode>,
|
||||
/// Whether discussions have finished loading.
|
||||
pub discussions_loaded: bool,
|
||||
/// Cross-references (loaded with metadata in Phase 1).
|
||||
pub cross_refs: Vec<CrossRef>,
|
||||
/// Discussion tree UI state (expand/collapse, selection).
|
||||
pub tree_state: DiscussionTreeState,
|
||||
/// Cross-reference list UI state.
|
||||
pub cross_ref_state: CrossRefState,
|
||||
/// Description scroll offset.
|
||||
pub description_scroll: usize,
|
||||
/// Active section for keyboard focus.
|
||||
pub active_section: DetailSection,
|
||||
}
|
||||
|
||||
impl IssueDetailState {
|
||||
/// Reset state for a new issue.
|
||||
pub fn load_new(&mut self, key: EntityKey) {
|
||||
self.current_key = Some(key);
|
||||
self.metadata = None;
|
||||
self.discussions.clear();
|
||||
self.discussions_loaded = false;
|
||||
self.cross_refs.clear();
|
||||
self.tree_state = DiscussionTreeState::default();
|
||||
self.cross_ref_state = CrossRefState::default();
|
||||
self.description_scroll = 0;
|
||||
self.active_section = DetailSection::Description;
|
||||
}
|
||||
|
||||
/// Apply Phase 1 data (metadata + cross-refs).
|
||||
pub fn apply_metadata(&mut self, data: IssueDetailData) {
|
||||
self.metadata = Some(data.metadata);
|
||||
self.cross_refs = data.cross_refs;
|
||||
}
|
||||
|
||||
/// Apply Phase 2 data (discussions).
|
||||
pub fn apply_discussions(&mut self, discussions: Vec<DiscussionNode>) {
|
||||
self.discussions = discussions;
|
||||
self.discussions_loaded = true;
|
||||
}
|
||||
|
||||
/// Whether we have metadata loaded for the current key.
|
||||
#[must_use]
|
||||
pub fn has_metadata(&self) -> bool {
|
||||
self.metadata.is_some()
|
||||
}
|
||||
|
||||
/// Cycle to the next section.
|
||||
pub fn next_section(&mut self) {
|
||||
self.active_section = self.active_section.next();
|
||||
}
|
||||
|
||||
/// Cycle to the previous section.
|
||||
pub fn prev_section(&mut self) {
|
||||
self.active_section = self.active_section.prev();
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::view::common::cross_ref::CrossRefKind;
|
||||
|
||||
#[test]
|
||||
fn test_issue_detail_state_default() {
|
||||
let state = IssueDetailState::default();
|
||||
assert!(state.current_key.is_none());
|
||||
assert!(state.metadata.is_none());
|
||||
assert!(state.discussions.is_empty());
|
||||
assert!(!state.discussions_loaded);
|
||||
assert!(state.cross_refs.is_empty());
|
||||
assert_eq!(state.active_section, DetailSection::Description);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_new_resets_state() {
|
||||
let mut state = IssueDetailState {
|
||||
discussions_loaded: true,
|
||||
description_scroll: 10,
|
||||
active_section: DetailSection::CrossRefs,
|
||||
..IssueDetailState::default()
|
||||
};
|
||||
|
||||
state.load_new(EntityKey::issue(1, 42));
|
||||
assert_eq!(state.current_key, Some(EntityKey::issue(1, 42)));
|
||||
assert!(state.metadata.is_none());
|
||||
assert!(!state.discussions_loaded);
|
||||
assert_eq!(state.description_scroll, 0);
|
||||
assert_eq!(state.active_section, DetailSection::Description);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_metadata() {
|
||||
let mut state = IssueDetailState::default();
|
||||
state.load_new(EntityKey::issue(1, 42));
|
||||
|
||||
let data = IssueDetailData {
|
||||
metadata: IssueMetadata {
|
||||
iid: 42,
|
||||
project_path: "group/proj".into(),
|
||||
title: "Fix auth".into(),
|
||||
description: "Description here".into(),
|
||||
state: "opened".into(),
|
||||
author: "alice".into(),
|
||||
assignees: vec!["bob".into()],
|
||||
labels: vec!["backend".into()],
|
||||
milestone: Some("v1.0".into()),
|
||||
due_date: None,
|
||||
created_at: 1_700_000_000_000,
|
||||
updated_at: 1_700_000_060_000,
|
||||
web_url: "https://gitlab.com/group/proj/-/issues/42".into(),
|
||||
discussion_count: 3,
|
||||
},
|
||||
cross_refs: vec![CrossRef {
|
||||
kind: CrossRefKind::ClosingMr,
|
||||
entity_key: EntityKey::mr(1, 10),
|
||||
label: "Fix auth MR".into(),
|
||||
navigable: true,
|
||||
}],
|
||||
};
|
||||
|
||||
state.apply_metadata(data);
|
||||
assert!(state.has_metadata());
|
||||
assert_eq!(state.metadata.as_ref().unwrap().iid, 42);
|
||||
assert_eq!(state.cross_refs.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_discussions() {
|
||||
let mut state = IssueDetailState::default();
|
||||
assert!(!state.discussions_loaded);
|
||||
|
||||
let discussions = vec![DiscussionNode {
|
||||
discussion_id: "d1".into(),
|
||||
notes: vec![],
|
||||
resolvable: false,
|
||||
resolved: false,
|
||||
}];
|
||||
|
||||
state.apply_discussions(discussions);
|
||||
assert!(state.discussions_loaded);
|
||||
assert_eq!(state.discussions.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_detail_section_cycling() {
|
||||
let section = DetailSection::Description;
|
||||
assert_eq!(section.next(), DetailSection::Discussions);
|
||||
assert_eq!(section.next().next(), DetailSection::CrossRefs);
|
||||
assert_eq!(section.next().next().next(), DetailSection::Description);
|
||||
|
||||
assert_eq!(section.prev(), DetailSection::CrossRefs);
|
||||
assert_eq!(section.prev().prev(), DetailSection::Discussions);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_section_next_prev_round_trip() {
|
||||
let mut state = IssueDetailState::default();
|
||||
assert_eq!(state.active_section, DetailSection::Description);
|
||||
|
||||
state.next_section();
|
||||
assert_eq!(state.active_section, DetailSection::Discussions);
|
||||
|
||||
state.prev_section();
|
||||
assert_eq!(state.active_section, DetailSection::Description);
|
||||
}
|
||||
}
|
||||
376
crates/lore-tui/src/state/issue_list.rs
Normal file
376
crates/lore-tui/src/state/issue_list.rs
Normal file
@@ -0,0 +1,376 @@
|
||||
#![allow(dead_code)] // Phase 2: consumed by LoreApp and view/issue_list
|
||||
|
||||
//! Issue list screen state.
|
||||
//!
|
||||
//! Uses keyset pagination with a snapshot fence for stable ordering
|
||||
//! under concurrent sync writes. Filter changes reset the pagination
|
||||
//! cursor and snapshot fence.
|
||||
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Cursor (keyset pagination boundary)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Keyset pagination cursor — (updated_at, iid) boundary.
|
||||
///
|
||||
/// The next page query uses `WHERE (updated_at, iid) < (cursor.updated_at, cursor.iid)`
|
||||
/// to avoid OFFSET instability.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct IssueCursor {
|
||||
pub updated_at: i64,
|
||||
pub iid: i64,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Filter
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Structured filter for issue list queries.
|
||||
#[derive(Debug, Clone, Default, PartialEq, Eq)]
|
||||
pub struct IssueFilter {
|
||||
pub state: Option<String>,
|
||||
pub author: Option<String>,
|
||||
pub assignee: Option<String>,
|
||||
pub label: Option<String>,
|
||||
pub milestone: Option<String>,
|
||||
pub status: Option<String>,
|
||||
pub free_text: Option<String>,
|
||||
pub project_id: Option<i64>,
|
||||
}
|
||||
|
||||
impl IssueFilter {
|
||||
/// Compute a hash for change detection.
|
||||
pub fn hash_value(&self) -> u64 {
|
||||
let mut hasher = std::collections::hash_map::DefaultHasher::new();
|
||||
self.state.hash(&mut hasher);
|
||||
self.author.hash(&mut hasher);
|
||||
self.assignee.hash(&mut hasher);
|
||||
self.label.hash(&mut hasher);
|
||||
self.milestone.hash(&mut hasher);
|
||||
self.status.hash(&mut hasher);
|
||||
self.free_text.hash(&mut hasher);
|
||||
self.project_id.hash(&mut hasher);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
/// Whether any filter is active.
|
||||
pub fn is_active(&self) -> bool {
|
||||
self.state.is_some()
|
||||
|| self.author.is_some()
|
||||
|| self.assignee.is_some()
|
||||
|| self.label.is_some()
|
||||
|| self.milestone.is_some()
|
||||
|| self.status.is_some()
|
||||
|| self.free_text.is_some()
|
||||
|| self.project_id.is_some()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Row
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A single row in the issue list.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IssueListRow {
|
||||
pub project_path: String,
|
||||
pub iid: i64,
|
||||
pub title: String,
|
||||
pub state: String,
|
||||
pub author: String,
|
||||
pub labels: Vec<String>,
|
||||
pub updated_at: i64,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Page result
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Result from a paginated issue list query.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct IssueListPage {
|
||||
pub rows: Vec<IssueListRow>,
|
||||
pub next_cursor: Option<IssueCursor>,
|
||||
pub total_count: u64,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Sort
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Fields available for sorting.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum SortField {
|
||||
#[default]
|
||||
UpdatedAt,
|
||||
Iid,
|
||||
Title,
|
||||
State,
|
||||
Author,
|
||||
}
|
||||
|
||||
/// Sort direction.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum SortOrder {
|
||||
#[default]
|
||||
Desc,
|
||||
Asc,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// IssueListState
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// State for the issue list screen.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct IssueListState {
|
||||
/// Current page of issue rows.
|
||||
pub rows: Vec<IssueListRow>,
|
||||
/// Total count of matching issues.
|
||||
pub total_count: u64,
|
||||
/// Selected row index (within current window).
|
||||
pub selected_index: usize,
|
||||
/// Scroll offset for the entity table.
|
||||
pub scroll_offset: usize,
|
||||
/// Cursor for the next page.
|
||||
pub next_cursor: Option<IssueCursor>,
|
||||
/// Whether a prefetch is in flight.
|
||||
pub prefetch_in_flight: bool,
|
||||
/// Current filter.
|
||||
pub filter: IssueFilter,
|
||||
/// Raw filter input text.
|
||||
pub filter_input: String,
|
||||
/// Whether the filter bar has focus.
|
||||
pub filter_focused: bool,
|
||||
/// Sort field.
|
||||
pub sort_field: SortField,
|
||||
/// Sort direction.
|
||||
pub sort_order: SortOrder,
|
||||
/// Snapshot fence: max updated_at from initial load.
|
||||
pub snapshot_fence: Option<i64>,
|
||||
/// Hash of the current filter for change detection.
|
||||
pub filter_hash: u64,
|
||||
/// Whether Quick Peek is visible.
|
||||
pub peek_visible: bool,
|
||||
}
|
||||
|
||||
impl IssueListState {
|
||||
/// Reset pagination state (called when filter changes or on refresh).
|
||||
pub fn reset_pagination(&mut self) {
|
||||
self.rows.clear();
|
||||
self.next_cursor = None;
|
||||
self.selected_index = 0;
|
||||
self.scroll_offset = 0;
|
||||
self.snapshot_fence = None;
|
||||
self.total_count = 0;
|
||||
self.prefetch_in_flight = false;
|
||||
}
|
||||
|
||||
/// Apply a new page of results.
|
||||
pub fn apply_page(&mut self, page: IssueListPage) {
|
||||
// Set snapshot fence on first page load.
|
||||
if self.snapshot_fence.is_none() {
|
||||
self.snapshot_fence = page.rows.first().map(|r| r.updated_at);
|
||||
}
|
||||
self.rows.extend(page.rows);
|
||||
self.next_cursor = page.next_cursor;
|
||||
self.total_count = page.total_count;
|
||||
self.prefetch_in_flight = false;
|
||||
}
|
||||
|
||||
/// Check if filter changed and reset if needed.
|
||||
pub fn check_filter_change(&mut self) -> bool {
|
||||
let new_hash = self.filter.hash_value();
|
||||
if new_hash != self.filter_hash {
|
||||
self.filter_hash = new_hash;
|
||||
self.reset_pagination();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether the user has scrolled near the end of current data (80% threshold).
|
||||
pub fn should_prefetch(&self) -> bool {
|
||||
if self.prefetch_in_flight || self.next_cursor.is_none() {
|
||||
return false;
|
||||
}
|
||||
if self.rows.is_empty() {
|
||||
return false;
|
||||
}
|
||||
let threshold = (self.rows.len() * 4) / 5; // 80%
|
||||
self.selected_index >= threshold
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn sample_page(count: usize, has_next: bool) -> IssueListPage {
|
||||
let rows: Vec<IssueListRow> = (0..count)
|
||||
.map(|i| IssueListRow {
|
||||
project_path: "group/project".into(),
|
||||
iid: (count - i) as i64,
|
||||
title: format!("Issue {}", count - i),
|
||||
state: "opened".into(),
|
||||
author: "taylor".into(),
|
||||
labels: vec![],
|
||||
updated_at: 1_700_000_000_000 - (i as i64 * 60_000),
|
||||
})
|
||||
.collect();
|
||||
|
||||
let next_cursor = if has_next {
|
||||
rows.last().map(|r| IssueCursor {
|
||||
updated_at: r.updated_at,
|
||||
iid: r.iid,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
IssueListPage {
|
||||
rows,
|
||||
next_cursor,
|
||||
total_count: if has_next {
|
||||
(count * 2) as u64
|
||||
} else {
|
||||
count as u64
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_page_sets_snapshot_fence() {
|
||||
let mut state = IssueListState::default();
|
||||
let page = sample_page(5, false);
|
||||
state.apply_page(page);
|
||||
|
||||
assert_eq!(state.rows.len(), 5);
|
||||
assert!(state.snapshot_fence.is_some());
|
||||
assert_eq!(state.snapshot_fence.unwrap(), 1_700_000_000_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_page_appends() {
|
||||
let mut state = IssueListState::default();
|
||||
state.apply_page(sample_page(5, true));
|
||||
assert_eq!(state.rows.len(), 5);
|
||||
|
||||
state.apply_page(sample_page(3, false));
|
||||
assert_eq!(state.rows.len(), 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_pagination_clears_state() {
|
||||
let mut state = IssueListState::default();
|
||||
state.apply_page(sample_page(5, true));
|
||||
state.selected_index = 3;
|
||||
|
||||
state.reset_pagination();
|
||||
|
||||
assert!(state.rows.is_empty());
|
||||
assert_eq!(state.selected_index, 0);
|
||||
assert!(state.next_cursor.is_none());
|
||||
assert!(state.snapshot_fence.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_filter_change_detects_change() {
|
||||
let mut state = IssueListState::default();
|
||||
state.filter_hash = state.filter.hash_value();
|
||||
|
||||
state.filter.state = Some("opened".into());
|
||||
assert!(state.check_filter_change());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_filter_change_no_change() {
|
||||
let mut state = IssueListState::default();
|
||||
state.filter_hash = state.filter.hash_value();
|
||||
assert!(!state.check_filter_change());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_prefetch() {
|
||||
let mut state = IssueListState::default();
|
||||
state.apply_page(sample_page(10, true));
|
||||
|
||||
state.selected_index = 4; // 40% — no prefetch
|
||||
assert!(!state.should_prefetch());
|
||||
|
||||
state.selected_index = 8; // 80% — prefetch
|
||||
assert!(state.should_prefetch());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_prefetch_no_next_page() {
|
||||
let mut state = IssueListState::default();
|
||||
state.apply_page(sample_page(10, false));
|
||||
state.selected_index = 9;
|
||||
assert!(!state.should_prefetch());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_prefetch_already_in_flight() {
|
||||
let mut state = IssueListState::default();
|
||||
state.apply_page(sample_page(10, true));
|
||||
state.selected_index = 9;
|
||||
state.prefetch_in_flight = true;
|
||||
assert!(!state.should_prefetch());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_issue_filter_is_active() {
|
||||
let empty = IssueFilter::default();
|
||||
assert!(!empty.is_active());
|
||||
|
||||
let active = IssueFilter {
|
||||
state: Some("opened".into()),
|
||||
..Default::default()
|
||||
};
|
||||
assert!(active.is_active());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_issue_filter_hash_deterministic() {
|
||||
let f1 = IssueFilter {
|
||||
state: Some("opened".into()),
|
||||
author: Some("taylor".into()),
|
||||
..Default::default()
|
||||
};
|
||||
let f2 = f1.clone();
|
||||
assert_eq!(f1.hash_value(), f2.hash_value());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_issue_filter_hash_differs() {
|
||||
let f1 = IssueFilter {
|
||||
state: Some("opened".into()),
|
||||
..Default::default()
|
||||
};
|
||||
let f2 = IssueFilter {
|
||||
state: Some("closed".into()),
|
||||
..Default::default()
|
||||
};
|
||||
assert_ne!(f1.hash_value(), f2.hash_value());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_snapshot_fence_not_overwritten_on_second_page() {
|
||||
let mut state = IssueListState::default();
|
||||
state.apply_page(sample_page(5, true));
|
||||
let fence = state.snapshot_fence;
|
||||
|
||||
state.apply_page(sample_page(3, false));
|
||||
assert_eq!(
|
||||
state.snapshot_fence, fence,
|
||||
"Fence should not change on second page"
|
||||
);
|
||||
}
|
||||
}
|
||||
344
crates/lore-tui/src/state/mod.rs
Normal file
344
crates/lore-tui/src/state/mod.rs
Normal file
@@ -0,0 +1,344 @@
|
||||
#![allow(dead_code)] // Phase 1: consumed by LoreApp in bd-6pmy
|
||||
|
||||
//! Top-level state composition for the TUI.
|
||||
//!
|
||||
//! Each screen has its own state struct. State is preserved when
|
||||
//! navigating away — screens are never cleared on pop.
|
||||
//!
|
||||
//! [`LoadState`] enables stale-while-revalidate: screens show the last
|
||||
//! available data during a refresh, with a spinner indicating the load.
|
||||
//!
|
||||
//! [`ScreenIntent`] is the pure return type from state handlers — they
|
||||
//! never spawn async tasks directly. The intent is interpreted by
|
||||
//! [`LoreApp`](crate::app::LoreApp) which dispatches through the
|
||||
//! [`TaskSupervisor`](crate::task_supervisor::TaskSupervisor).
|
||||
|
||||
pub mod command_palette;
|
||||
pub mod dashboard;
|
||||
pub mod issue_detail;
|
||||
pub mod issue_list;
|
||||
pub mod mr_detail;
|
||||
pub mod mr_list;
|
||||
pub mod search;
|
||||
pub mod sync;
|
||||
pub mod timeline;
|
||||
pub mod who;
|
||||
|
||||
use std::collections::{HashMap, HashSet};
|
||||
|
||||
use crate::message::Screen;
|
||||
|
||||
// Re-export screen states for convenience.
|
||||
pub use command_palette::CommandPaletteState;
|
||||
pub use dashboard::DashboardState;
|
||||
pub use issue_detail::IssueDetailState;
|
||||
pub use issue_list::IssueListState;
|
||||
pub use mr_detail::MrDetailState;
|
||||
pub use mr_list::MrListState;
|
||||
pub use search::SearchState;
|
||||
pub use sync::SyncState;
|
||||
pub use timeline::TimelineState;
|
||||
pub use who::WhoState;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// LoadState
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Loading state for a screen's data.
|
||||
///
|
||||
/// Enables stale-while-revalidate: screens render their last data while
|
||||
/// showing a spinner when `Refreshing`.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Default)]
|
||||
pub enum LoadState {
|
||||
/// No load in progress, data is current (or screen was never loaded).
|
||||
#[default]
|
||||
Idle,
|
||||
/// First load — no data to show yet, display a full-screen spinner.
|
||||
LoadingInitial,
|
||||
/// Background refresh — show existing data with a spinner indicator.
|
||||
Refreshing,
|
||||
/// Load failed — display the error alongside any stale data.
|
||||
Error(String),
|
||||
}
|
||||
|
||||
impl LoadState {
|
||||
/// Whether data is currently being loaded.
|
||||
#[must_use]
|
||||
pub fn is_loading(&self) -> bool {
|
||||
matches!(self, Self::LoadingInitial | Self::Refreshing)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// ScreenLoadStateMap
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Tracks per-screen load state.
|
||||
///
|
||||
/// Returns [`LoadState::Idle`] for screens that haven't been tracked.
|
||||
/// Automatically removes entries set to `Idle` to prevent unbounded growth.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ScreenLoadStateMap {
|
||||
map: HashMap<Screen, LoadState>,
|
||||
/// Screens that have had a load state set at least once.
|
||||
visited: HashSet<Screen>,
|
||||
}
|
||||
|
||||
impl ScreenLoadStateMap {
|
||||
/// Get the load state for a screen (defaults to `Idle`).
|
||||
#[must_use]
|
||||
pub fn get(&self, screen: &Screen) -> &LoadState {
|
||||
static IDLE: LoadState = LoadState::Idle;
|
||||
self.map.get(screen).unwrap_or(&IDLE)
|
||||
}
|
||||
|
||||
/// Set the load state for a screen.
|
||||
///
|
||||
/// Setting to `Idle` removes the entry to prevent map growth.
|
||||
pub fn set(&mut self, screen: Screen, state: LoadState) {
|
||||
self.visited.insert(screen.clone());
|
||||
if state == LoadState::Idle {
|
||||
self.map.remove(&screen);
|
||||
} else {
|
||||
self.map.insert(screen, state);
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether this screen has ever had a load initiated.
|
||||
#[must_use]
|
||||
pub fn was_visited(&self, screen: &Screen) -> bool {
|
||||
self.visited.contains(screen)
|
||||
}
|
||||
|
||||
/// Whether any screen is currently loading.
|
||||
#[must_use]
|
||||
pub fn any_loading(&self) -> bool {
|
||||
self.map.values().any(LoadState::is_loading)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// ScreenIntent
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Pure return type from screen state handlers.
|
||||
///
|
||||
/// State handlers must never spawn async work directly — they return
|
||||
/// an intent that [`LoreApp`] interprets and dispatches through the
|
||||
/// [`TaskSupervisor`].
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub enum ScreenIntent {
|
||||
/// No action needed.
|
||||
None,
|
||||
/// Navigate to a new screen.
|
||||
Navigate(Screen),
|
||||
/// Screen data needs re-querying (e.g., filter changed).
|
||||
RequeryNeeded(Screen),
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// ScopeContext
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Global scope filters applied across all screens.
|
||||
///
|
||||
/// When a project filter is active, all data queries scope to that
|
||||
/// project. The TUI shows the active scope in the status bar.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct ScopeContext {
|
||||
/// Active project filter (project_id).
|
||||
pub project_id: Option<i64>,
|
||||
/// Human-readable project name for display.
|
||||
pub project_name: Option<String>,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// AppState
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Top-level state composition for the TUI.
|
||||
///
|
||||
/// Each field holds one screen's state. State is preserved when
|
||||
/// navigating away and restored on return.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct AppState {
|
||||
// Per-screen states.
|
||||
pub dashboard: DashboardState,
|
||||
pub issue_list: IssueListState,
|
||||
pub issue_detail: IssueDetailState,
|
||||
pub mr_list: MrListState,
|
||||
pub mr_detail: MrDetailState,
|
||||
pub search: SearchState,
|
||||
pub timeline: TimelineState,
|
||||
pub who: WhoState,
|
||||
pub sync: SyncState,
|
||||
pub command_palette: CommandPaletteState,
|
||||
|
||||
// Cross-cutting state.
|
||||
pub global_scope: ScopeContext,
|
||||
pub load_state: ScreenLoadStateMap,
|
||||
pub error_toast: Option<String>,
|
||||
pub show_help: bool,
|
||||
pub terminal_size: (u16, u16),
|
||||
}
|
||||
|
||||
impl AppState {
|
||||
/// Set a screen's load state.
|
||||
pub fn set_loading(&mut self, screen: Screen, state: LoadState) {
|
||||
self.load_state.set(screen, state);
|
||||
}
|
||||
|
||||
/// Set the global error toast.
|
||||
pub fn set_error(&mut self, msg: String) {
|
||||
self.error_toast = Some(msg);
|
||||
}
|
||||
|
||||
/// Clear the global error toast.
|
||||
pub fn clear_error(&mut self) {
|
||||
self.error_toast = None;
|
||||
}
|
||||
|
||||
/// Whether any text input is currently focused.
|
||||
#[must_use]
|
||||
pub fn has_text_focus(&self) -> bool {
|
||||
self.issue_list.filter_focused
|
||||
|| self.mr_list.filter_focused
|
||||
|| self.search.query_focused
|
||||
|| self.command_palette.query_focused
|
||||
}
|
||||
|
||||
/// Remove focus from all text inputs.
|
||||
pub fn blur_text_focus(&mut self) {
|
||||
self.issue_list.filter_focused = false;
|
||||
self.mr_list.filter_focused = false;
|
||||
self.search.query_focused = false;
|
||||
self.command_palette.query_focused = false;
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_load_state_default_idle() {
|
||||
let map = ScreenLoadStateMap::default();
|
||||
assert_eq!(*map.get(&Screen::Dashboard), LoadState::Idle);
|
||||
assert_eq!(*map.get(&Screen::IssueList), LoadState::Idle);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_state_set_and_get() {
|
||||
let mut map = ScreenLoadStateMap::default();
|
||||
map.set(Screen::Dashboard, LoadState::LoadingInitial);
|
||||
assert_eq!(*map.get(&Screen::Dashboard), LoadState::LoadingInitial);
|
||||
assert_eq!(*map.get(&Screen::IssueList), LoadState::Idle);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_state_set_idle_removes_entry() {
|
||||
let mut map = ScreenLoadStateMap::default();
|
||||
map.set(Screen::Dashboard, LoadState::Refreshing);
|
||||
assert_eq!(map.map.len(), 1);
|
||||
|
||||
map.set(Screen::Dashboard, LoadState::Idle);
|
||||
assert_eq!(map.map.len(), 0);
|
||||
assert_eq!(*map.get(&Screen::Dashboard), LoadState::Idle);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_any_loading() {
|
||||
let mut map = ScreenLoadStateMap::default();
|
||||
assert!(!map.any_loading());
|
||||
|
||||
map.set(Screen::Dashboard, LoadState::LoadingInitial);
|
||||
assert!(map.any_loading());
|
||||
|
||||
map.set(Screen::Dashboard, LoadState::Error("oops".into()));
|
||||
assert!(!map.any_loading());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_state_is_loading() {
|
||||
assert!(!LoadState::Idle.is_loading());
|
||||
assert!(LoadState::LoadingInitial.is_loading());
|
||||
assert!(LoadState::Refreshing.is_loading());
|
||||
assert!(!LoadState::Error("x".into()).is_loading());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_app_state_default_compiles() {
|
||||
let state = AppState::default();
|
||||
assert!(!state.show_help);
|
||||
assert!(state.error_toast.is_none());
|
||||
assert_eq!(state.terminal_size, (0, 0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_app_state_set_error_and_clear() {
|
||||
let mut state = AppState::default();
|
||||
state.set_error("db busy".into());
|
||||
assert_eq!(state.error_toast.as_deref(), Some("db busy"));
|
||||
|
||||
state.clear_error();
|
||||
assert!(state.error_toast.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_app_state_has_text_focus() {
|
||||
let mut state = AppState::default();
|
||||
assert!(!state.has_text_focus());
|
||||
|
||||
state.search.query_focused = true;
|
||||
assert!(state.has_text_focus());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_app_state_blur_text_focus() {
|
||||
let mut state = AppState::default();
|
||||
state.issue_list.filter_focused = true;
|
||||
state.mr_list.filter_focused = true;
|
||||
state.search.query_focused = true;
|
||||
state.command_palette.query_focused = true;
|
||||
|
||||
state.blur_text_focus();
|
||||
|
||||
assert!(!state.has_text_focus());
|
||||
assert!(!state.issue_list.filter_focused);
|
||||
assert!(!state.mr_list.filter_focused);
|
||||
assert!(!state.search.query_focused);
|
||||
assert!(!state.command_palette.query_focused);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_app_state_set_loading() {
|
||||
let mut state = AppState::default();
|
||||
state.set_loading(Screen::IssueList, LoadState::Refreshing);
|
||||
assert_eq!(
|
||||
*state.load_state.get(&Screen::IssueList),
|
||||
LoadState::Refreshing
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_screen_intent_variants() {
|
||||
let none = ScreenIntent::None;
|
||||
let nav = ScreenIntent::Navigate(Screen::IssueList);
|
||||
let requery = ScreenIntent::RequeryNeeded(Screen::Search);
|
||||
|
||||
assert_eq!(none, ScreenIntent::None);
|
||||
assert_eq!(nav, ScreenIntent::Navigate(Screen::IssueList));
|
||||
assert_eq!(requery, ScreenIntent::RequeryNeeded(Screen::Search));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_scope_context_default() {
|
||||
let scope = ScopeContext::default();
|
||||
assert!(scope.project_id.is_none());
|
||||
assert!(scope.project_name.is_none());
|
||||
}
|
||||
}
|
||||
387
crates/lore-tui/src/state/mr_detail.rs
Normal file
387
crates/lore-tui/src/state/mr_detail.rs
Normal file
@@ -0,0 +1,387 @@
|
||||
#![allow(dead_code)] // Phase 2: consumed by MR Detail screen
|
||||
|
||||
//! Merge request detail screen state.
|
||||
//!
|
||||
//! Holds MR metadata, file changes, discussions, cross-references,
|
||||
//! and UI state. Supports progressive hydration identical to
|
||||
//! Issue Detail: metadata loads first, discussions load async.
|
||||
|
||||
use crate::message::EntityKey;
|
||||
use crate::view::common::cross_ref::{CrossRef, CrossRefState};
|
||||
use crate::view::common::discussion_tree::{DiscussionNode, DiscussionTreeState};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// MrMetadata
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Full metadata for a single merge request, fetched from the local DB.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MrMetadata {
|
||||
/// MR IID (project-scoped).
|
||||
pub iid: i64,
|
||||
/// Project path (e.g., "group/project").
|
||||
pub project_path: String,
|
||||
/// MR title.
|
||||
pub title: String,
|
||||
/// MR description (markdown).
|
||||
pub description: String,
|
||||
/// Current state: "opened", "merged", "closed", "locked".
|
||||
pub state: String,
|
||||
/// Whether this is a draft/WIP MR.
|
||||
pub draft: bool,
|
||||
/// Author username.
|
||||
pub author: String,
|
||||
/// Assigned usernames.
|
||||
pub assignees: Vec<String>,
|
||||
/// Reviewer usernames.
|
||||
pub reviewers: Vec<String>,
|
||||
/// Label names.
|
||||
pub labels: Vec<String>,
|
||||
/// Source branch name.
|
||||
pub source_branch: String,
|
||||
/// Target branch name.
|
||||
pub target_branch: String,
|
||||
/// Detailed merge status (e.g., "mergeable", "checking").
|
||||
pub merge_status: String,
|
||||
/// Created timestamp (ms epoch).
|
||||
pub created_at: i64,
|
||||
/// Updated timestamp (ms epoch).
|
||||
pub updated_at: i64,
|
||||
/// Merged timestamp (ms epoch), if merged.
|
||||
pub merged_at: Option<i64>,
|
||||
/// GitLab web URL.
|
||||
pub web_url: String,
|
||||
/// Discussion count (for display before discussions load).
|
||||
pub discussion_count: usize,
|
||||
/// File change count.
|
||||
pub file_change_count: usize,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// FileChange
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A file changed in the merge request.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct FileChange {
|
||||
/// Previous file path (if renamed).
|
||||
pub old_path: Option<String>,
|
||||
/// New/current file path.
|
||||
pub new_path: String,
|
||||
/// Type of change.
|
||||
pub change_type: FileChangeType,
|
||||
}
|
||||
|
||||
/// The type of file change in an MR.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum FileChangeType {
|
||||
Added,
|
||||
Modified,
|
||||
Deleted,
|
||||
Renamed,
|
||||
}
|
||||
|
||||
impl FileChangeType {
|
||||
/// Short icon for display.
|
||||
#[must_use]
|
||||
pub const fn icon(&self) -> &str {
|
||||
match self {
|
||||
Self::Added => "+",
|
||||
Self::Modified => "~",
|
||||
Self::Deleted => "-",
|
||||
Self::Renamed => "R",
|
||||
}
|
||||
}
|
||||
|
||||
/// Parse from DB string.
|
||||
#[must_use]
|
||||
pub fn parse_db(s: &str) -> Self {
|
||||
match s {
|
||||
"added" => Self::Added,
|
||||
"deleted" => Self::Deleted,
|
||||
"renamed" => Self::Renamed,
|
||||
_ => Self::Modified,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// MrDetailData
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Bundle returned by the metadata fetch action.
|
||||
///
|
||||
/// Metadata + cross-refs + file changes load in Phase 1 (fast).
|
||||
/// Discussions load separately in Phase 2.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MrDetailData {
|
||||
pub metadata: MrMetadata,
|
||||
pub cross_refs: Vec<CrossRef>,
|
||||
pub file_changes: Vec<FileChange>,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// MrTab
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Active tab in the MR detail view.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum MrTab {
|
||||
/// Overview: description + cross-refs.
|
||||
#[default]
|
||||
Overview,
|
||||
/// File changes list.
|
||||
Files,
|
||||
/// Discussions (general + diff).
|
||||
Discussions,
|
||||
}
|
||||
|
||||
impl MrTab {
|
||||
/// Cycle to the next tab.
|
||||
#[must_use]
|
||||
pub fn next(self) -> Self {
|
||||
match self {
|
||||
Self::Overview => Self::Files,
|
||||
Self::Files => Self::Discussions,
|
||||
Self::Discussions => Self::Overview,
|
||||
}
|
||||
}
|
||||
|
||||
/// Cycle to the previous tab.
|
||||
#[must_use]
|
||||
pub fn prev(self) -> Self {
|
||||
match self {
|
||||
Self::Overview => Self::Discussions,
|
||||
Self::Files => Self::Overview,
|
||||
Self::Discussions => Self::Files,
|
||||
}
|
||||
}
|
||||
|
||||
/// Human-readable label.
|
||||
#[must_use]
|
||||
pub const fn label(&self) -> &str {
|
||||
match self {
|
||||
Self::Overview => "Overview",
|
||||
Self::Files => "Files",
|
||||
Self::Discussions => "Discussions",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// MrDetailState
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// State for the MR detail screen.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct MrDetailState {
|
||||
/// Entity key for the currently displayed MR.
|
||||
pub current_key: Option<EntityKey>,
|
||||
/// MR metadata (Phase 1 load).
|
||||
pub metadata: Option<MrMetadata>,
|
||||
/// File changes (loaded with metadata in Phase 1).
|
||||
pub file_changes: Vec<FileChange>,
|
||||
/// Discussion nodes (Phase 2 async load).
|
||||
pub discussions: Vec<DiscussionNode>,
|
||||
/// Whether discussions have finished loading.
|
||||
pub discussions_loaded: bool,
|
||||
/// Cross-references (loaded with metadata in Phase 1).
|
||||
pub cross_refs: Vec<CrossRef>,
|
||||
/// Discussion tree UI state.
|
||||
pub tree_state: DiscussionTreeState,
|
||||
/// Cross-reference list UI state.
|
||||
pub cross_ref_state: CrossRefState,
|
||||
/// Description scroll offset.
|
||||
pub description_scroll: usize,
|
||||
/// File list selected index.
|
||||
pub file_selected: usize,
|
||||
/// File list scroll offset.
|
||||
pub file_scroll: usize,
|
||||
/// Active tab.
|
||||
pub active_tab: MrTab,
|
||||
}
|
||||
|
||||
impl MrDetailState {
|
||||
/// Reset state for a new MR.
|
||||
pub fn load_new(&mut self, key: EntityKey) {
|
||||
self.current_key = Some(key);
|
||||
self.metadata = None;
|
||||
self.file_changes.clear();
|
||||
self.discussions.clear();
|
||||
self.discussions_loaded = false;
|
||||
self.cross_refs.clear();
|
||||
self.tree_state = DiscussionTreeState::default();
|
||||
self.cross_ref_state = CrossRefState::default();
|
||||
self.description_scroll = 0;
|
||||
self.file_selected = 0;
|
||||
self.file_scroll = 0;
|
||||
self.active_tab = MrTab::Overview;
|
||||
}
|
||||
|
||||
/// Apply Phase 1 data (metadata + cross-refs + file changes).
|
||||
pub fn apply_metadata(&mut self, data: MrDetailData) {
|
||||
self.metadata = Some(data.metadata);
|
||||
self.cross_refs = data.cross_refs;
|
||||
self.file_changes = data.file_changes;
|
||||
}
|
||||
|
||||
/// Apply Phase 2 data (discussions).
|
||||
pub fn apply_discussions(&mut self, discussions: Vec<DiscussionNode>) {
|
||||
self.discussions = discussions;
|
||||
self.discussions_loaded = true;
|
||||
}
|
||||
|
||||
/// Whether we have metadata loaded.
|
||||
#[must_use]
|
||||
pub fn has_metadata(&self) -> bool {
|
||||
self.metadata.is_some()
|
||||
}
|
||||
|
||||
/// Switch to the next tab.
|
||||
pub fn next_tab(&mut self) {
|
||||
self.active_tab = self.active_tab.next();
|
||||
}
|
||||
|
||||
/// Switch to the previous tab.
|
||||
pub fn prev_tab(&mut self) {
|
||||
self.active_tab = self.active_tab.prev();
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::view::common::cross_ref::CrossRefKind;
|
||||
|
||||
#[test]
|
||||
fn test_mr_detail_state_default() {
|
||||
let state = MrDetailState::default();
|
||||
assert!(state.current_key.is_none());
|
||||
assert!(state.metadata.is_none());
|
||||
assert!(state.discussions.is_empty());
|
||||
assert!(!state.discussions_loaded);
|
||||
assert!(state.file_changes.is_empty());
|
||||
assert_eq!(state.active_tab, MrTab::Overview);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_load_new_resets_state() {
|
||||
let mut state = MrDetailState {
|
||||
discussions_loaded: true,
|
||||
description_scroll: 10,
|
||||
active_tab: MrTab::Files,
|
||||
..MrDetailState::default()
|
||||
};
|
||||
|
||||
state.load_new(EntityKey::mr(1, 42));
|
||||
assert_eq!(state.current_key, Some(EntityKey::mr(1, 42)));
|
||||
assert!(state.metadata.is_none());
|
||||
assert!(!state.discussions_loaded);
|
||||
assert_eq!(state.description_scroll, 0);
|
||||
assert_eq!(state.active_tab, MrTab::Overview);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_metadata() {
|
||||
let mut state = MrDetailState::default();
|
||||
state.load_new(EntityKey::mr(1, 42));
|
||||
|
||||
let data = MrDetailData {
|
||||
metadata: MrMetadata {
|
||||
iid: 42,
|
||||
project_path: "group/proj".into(),
|
||||
title: "Fix auth".into(),
|
||||
description: "MR description".into(),
|
||||
state: "opened".into(),
|
||||
draft: false,
|
||||
author: "alice".into(),
|
||||
assignees: vec!["bob".into()],
|
||||
reviewers: vec!["carol".into()],
|
||||
labels: vec!["backend".into()],
|
||||
source_branch: "fix-auth".into(),
|
||||
target_branch: "main".into(),
|
||||
merge_status: "mergeable".into(),
|
||||
created_at: 1_700_000_000_000,
|
||||
updated_at: 1_700_000_060_000,
|
||||
merged_at: None,
|
||||
web_url: "https://gitlab.com/group/proj/-/merge_requests/42".into(),
|
||||
discussion_count: 2,
|
||||
file_change_count: 3,
|
||||
},
|
||||
cross_refs: vec![CrossRef {
|
||||
kind: CrossRefKind::RelatedIssue,
|
||||
entity_key: EntityKey::issue(1, 10),
|
||||
label: "Related issue".into(),
|
||||
navigable: true,
|
||||
}],
|
||||
file_changes: vec![FileChange {
|
||||
old_path: None,
|
||||
new_path: "src/auth.rs".into(),
|
||||
change_type: FileChangeType::Modified,
|
||||
}],
|
||||
};
|
||||
|
||||
state.apply_metadata(data);
|
||||
assert!(state.has_metadata());
|
||||
assert_eq!(state.metadata.as_ref().unwrap().iid, 42);
|
||||
assert_eq!(state.cross_refs.len(), 1);
|
||||
assert_eq!(state.file_changes.len(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tab_cycling() {
|
||||
let tab = MrTab::Overview;
|
||||
assert_eq!(tab.next(), MrTab::Files);
|
||||
assert_eq!(tab.next().next(), MrTab::Discussions);
|
||||
assert_eq!(tab.next().next().next(), MrTab::Overview);
|
||||
|
||||
assert_eq!(tab.prev(), MrTab::Discussions);
|
||||
assert_eq!(tab.prev().prev(), MrTab::Files);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_tab_labels() {
|
||||
assert_eq!(MrTab::Overview.label(), "Overview");
|
||||
assert_eq!(MrTab::Files.label(), "Files");
|
||||
assert_eq!(MrTab::Discussions.label(), "Discussions");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_change_type_icon() {
|
||||
assert_eq!(FileChangeType::Added.icon(), "+");
|
||||
assert_eq!(FileChangeType::Modified.icon(), "~");
|
||||
assert_eq!(FileChangeType::Deleted.icon(), "-");
|
||||
assert_eq!(FileChangeType::Renamed.icon(), "R");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_file_change_type_parse_db() {
|
||||
assert_eq!(FileChangeType::parse_db("added"), FileChangeType::Added);
|
||||
assert_eq!(FileChangeType::parse_db("deleted"), FileChangeType::Deleted);
|
||||
assert_eq!(FileChangeType::parse_db("renamed"), FileChangeType::Renamed);
|
||||
assert_eq!(
|
||||
FileChangeType::parse_db("modified"),
|
||||
FileChangeType::Modified
|
||||
);
|
||||
assert_eq!(
|
||||
FileChangeType::parse_db("unknown"),
|
||||
FileChangeType::Modified
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_next_prev_tab_on_state() {
|
||||
let mut state = MrDetailState::default();
|
||||
assert_eq!(state.active_tab, MrTab::Overview);
|
||||
|
||||
state.next_tab();
|
||||
assert_eq!(state.active_tab, MrTab::Files);
|
||||
|
||||
state.prev_tab();
|
||||
assert_eq!(state.active_tab, MrTab::Overview);
|
||||
}
|
||||
}
|
||||
422
crates/lore-tui/src/state/mr_list.rs
Normal file
422
crates/lore-tui/src/state/mr_list.rs
Normal file
@@ -0,0 +1,422 @@
|
||||
#![allow(dead_code)] // Phase 2: consumed by LoreApp and view/mr_list
|
||||
|
||||
//! Merge request list screen state.
|
||||
//!
|
||||
//! Mirrors the issue list pattern with MR-specific filter fields
|
||||
//! (draft, reviewer, target/source branch). Uses the same keyset
|
||||
//! pagination with snapshot fence for stable ordering.
|
||||
|
||||
use std::hash::{Hash, Hasher};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Cursor (keyset pagination boundary)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Keyset pagination cursor — (updated_at, iid) boundary.
|
||||
#[derive(Debug, Clone, PartialEq, Eq)]
|
||||
pub struct MrCursor {
|
||||
pub updated_at: i64,
|
||||
pub iid: i64,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Filter
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Structured filter for MR list queries.
|
||||
#[derive(Debug, Clone, Default, PartialEq, Eq)]
|
||||
pub struct MrFilter {
|
||||
pub state: Option<String>,
|
||||
pub author: Option<String>,
|
||||
pub reviewer: Option<String>,
|
||||
pub target_branch: Option<String>,
|
||||
pub source_branch: Option<String>,
|
||||
pub label: Option<String>,
|
||||
pub draft: Option<bool>,
|
||||
pub free_text: Option<String>,
|
||||
pub project_id: Option<i64>,
|
||||
}
|
||||
|
||||
impl MrFilter {
|
||||
/// Compute a hash for change detection.
|
||||
pub fn hash_value(&self) -> u64 {
|
||||
let mut hasher = std::collections::hash_map::DefaultHasher::new();
|
||||
self.state.hash(&mut hasher);
|
||||
self.author.hash(&mut hasher);
|
||||
self.reviewer.hash(&mut hasher);
|
||||
self.target_branch.hash(&mut hasher);
|
||||
self.source_branch.hash(&mut hasher);
|
||||
self.label.hash(&mut hasher);
|
||||
self.draft.hash(&mut hasher);
|
||||
self.free_text.hash(&mut hasher);
|
||||
self.project_id.hash(&mut hasher);
|
||||
hasher.finish()
|
||||
}
|
||||
|
||||
/// Whether any filter is active.
|
||||
pub fn is_active(&self) -> bool {
|
||||
self.state.is_some()
|
||||
|| self.author.is_some()
|
||||
|| self.reviewer.is_some()
|
||||
|| self.target_branch.is_some()
|
||||
|| self.source_branch.is_some()
|
||||
|| self.label.is_some()
|
||||
|| self.draft.is_some()
|
||||
|| self.free_text.is_some()
|
||||
|| self.project_id.is_some()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Row
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A single row in the MR list.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MrListRow {
|
||||
pub project_path: String,
|
||||
pub iid: i64,
|
||||
pub title: String,
|
||||
pub state: String,
|
||||
pub author: String,
|
||||
pub target_branch: String,
|
||||
pub labels: Vec<String>,
|
||||
pub updated_at: i64,
|
||||
pub draft: bool,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Page result
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Result from a paginated MR list query.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct MrListPage {
|
||||
pub rows: Vec<MrListRow>,
|
||||
pub next_cursor: Option<MrCursor>,
|
||||
pub total_count: u64,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Sort
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Fields available for sorting.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum MrSortField {
|
||||
#[default]
|
||||
UpdatedAt,
|
||||
Iid,
|
||||
Title,
|
||||
State,
|
||||
Author,
|
||||
TargetBranch,
|
||||
}
|
||||
|
||||
/// Sort direction.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum MrSortOrder {
|
||||
#[default]
|
||||
Desc,
|
||||
Asc,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// MrListState
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// State for the MR list screen.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct MrListState {
|
||||
/// Current page of MR rows.
|
||||
pub rows: Vec<MrListRow>,
|
||||
/// Total count of matching MRs.
|
||||
pub total_count: u64,
|
||||
/// Selected row index (within current window).
|
||||
pub selected_index: usize,
|
||||
/// Scroll offset for the entity table.
|
||||
pub scroll_offset: usize,
|
||||
/// Cursor for the next page.
|
||||
pub next_cursor: Option<MrCursor>,
|
||||
/// Whether a prefetch is in flight.
|
||||
pub prefetch_in_flight: bool,
|
||||
/// Current filter.
|
||||
pub filter: MrFilter,
|
||||
/// Raw filter input text.
|
||||
pub filter_input: String,
|
||||
/// Whether the filter bar has focus.
|
||||
pub filter_focused: bool,
|
||||
/// Sort field.
|
||||
pub sort_field: MrSortField,
|
||||
/// Sort direction.
|
||||
pub sort_order: MrSortOrder,
|
||||
/// Snapshot fence: max updated_at from initial load.
|
||||
pub snapshot_fence: Option<i64>,
|
||||
/// Hash of the current filter for change detection.
|
||||
pub filter_hash: u64,
|
||||
/// Whether Quick Peek is visible.
|
||||
pub peek_visible: bool,
|
||||
}
|
||||
|
||||
impl MrListState {
|
||||
/// Reset pagination state (called when filter changes or on refresh).
|
||||
pub fn reset_pagination(&mut self) {
|
||||
self.rows.clear();
|
||||
self.next_cursor = None;
|
||||
self.selected_index = 0;
|
||||
self.scroll_offset = 0;
|
||||
self.snapshot_fence = None;
|
||||
self.total_count = 0;
|
||||
self.prefetch_in_flight = false;
|
||||
}
|
||||
|
||||
/// Apply a new page of results.
|
||||
pub fn apply_page(&mut self, page: MrListPage) {
|
||||
// Set snapshot fence on first page load.
|
||||
if self.snapshot_fence.is_none() {
|
||||
self.snapshot_fence = page.rows.first().map(|r| r.updated_at);
|
||||
}
|
||||
self.rows.extend(page.rows);
|
||||
self.next_cursor = page.next_cursor;
|
||||
self.total_count = page.total_count;
|
||||
self.prefetch_in_flight = false;
|
||||
}
|
||||
|
||||
/// Check if filter changed and reset if needed.
|
||||
pub fn check_filter_change(&mut self) -> bool {
|
||||
let new_hash = self.filter.hash_value();
|
||||
if new_hash != self.filter_hash {
|
||||
self.filter_hash = new_hash;
|
||||
self.reset_pagination();
|
||||
true
|
||||
} else {
|
||||
false
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether the user has scrolled near the end of current data (80% threshold).
|
||||
pub fn should_prefetch(&self) -> bool {
|
||||
if self.prefetch_in_flight || self.next_cursor.is_none() {
|
||||
return false;
|
||||
}
|
||||
if self.rows.is_empty() {
|
||||
return false;
|
||||
}
|
||||
let threshold = (self.rows.len() * 4) / 5; // 80%
|
||||
self.selected_index >= threshold
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
fn sample_page(count: usize, has_next: bool) -> MrListPage {
|
||||
let rows: Vec<MrListRow> = (0..count)
|
||||
.map(|i| MrListRow {
|
||||
project_path: "group/project".into(),
|
||||
iid: (count - i) as i64,
|
||||
title: format!("MR {}", count - i),
|
||||
state: "opened".into(),
|
||||
author: "taylor".into(),
|
||||
target_branch: "main".into(),
|
||||
labels: vec![],
|
||||
updated_at: 1_700_000_000_000 - (i as i64 * 60_000),
|
||||
draft: i % 3 == 0,
|
||||
})
|
||||
.collect();
|
||||
|
||||
let next_cursor = if has_next {
|
||||
rows.last().map(|r| MrCursor {
|
||||
updated_at: r.updated_at,
|
||||
iid: r.iid,
|
||||
})
|
||||
} else {
|
||||
None
|
||||
};
|
||||
|
||||
MrListPage {
|
||||
rows,
|
||||
next_cursor,
|
||||
total_count: if has_next {
|
||||
(count * 2) as u64
|
||||
} else {
|
||||
count as u64
|
||||
},
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_page_sets_snapshot_fence() {
|
||||
let mut state = MrListState::default();
|
||||
let page = sample_page(5, false);
|
||||
state.apply_page(page);
|
||||
|
||||
assert_eq!(state.rows.len(), 5);
|
||||
assert!(state.snapshot_fence.is_some());
|
||||
assert_eq!(state.snapshot_fence.unwrap(), 1_700_000_000_000);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_apply_page_appends() {
|
||||
let mut state = MrListState::default();
|
||||
state.apply_page(sample_page(5, true));
|
||||
assert_eq!(state.rows.len(), 5);
|
||||
|
||||
state.apply_page(sample_page(3, false));
|
||||
assert_eq!(state.rows.len(), 8);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_reset_pagination_clears_state() {
|
||||
let mut state = MrListState::default();
|
||||
state.apply_page(sample_page(5, true));
|
||||
state.selected_index = 3;
|
||||
|
||||
state.reset_pagination();
|
||||
|
||||
assert!(state.rows.is_empty());
|
||||
assert_eq!(state.selected_index, 0);
|
||||
assert!(state.next_cursor.is_none());
|
||||
assert!(state.snapshot_fence.is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_filter_change_detects_change() {
|
||||
let mut state = MrListState::default();
|
||||
state.filter_hash = state.filter.hash_value();
|
||||
|
||||
state.filter.state = Some("opened".into());
|
||||
assert!(state.check_filter_change());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_check_filter_change_no_change() {
|
||||
let mut state = MrListState::default();
|
||||
state.filter_hash = state.filter.hash_value();
|
||||
assert!(!state.check_filter_change());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_prefetch() {
|
||||
let mut state = MrListState::default();
|
||||
state.apply_page(sample_page(10, true));
|
||||
|
||||
state.selected_index = 4; // 40% -- no prefetch
|
||||
assert!(!state.should_prefetch());
|
||||
|
||||
state.selected_index = 8; // 80% -- prefetch
|
||||
assert!(state.should_prefetch());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_prefetch_no_next_page() {
|
||||
let mut state = MrListState::default();
|
||||
state.apply_page(sample_page(10, false));
|
||||
state.selected_index = 9;
|
||||
assert!(!state.should_prefetch());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_should_prefetch_already_in_flight() {
|
||||
let mut state = MrListState::default();
|
||||
state.apply_page(sample_page(10, true));
|
||||
state.selected_index = 9;
|
||||
state.prefetch_in_flight = true;
|
||||
assert!(!state.should_prefetch());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mr_filter_is_active() {
|
||||
let empty = MrFilter::default();
|
||||
assert!(!empty.is_active());
|
||||
|
||||
let active = MrFilter {
|
||||
state: Some("opened".into()),
|
||||
..Default::default()
|
||||
};
|
||||
assert!(active.is_active());
|
||||
|
||||
let draft_active = MrFilter {
|
||||
draft: Some(true),
|
||||
..Default::default()
|
||||
};
|
||||
assert!(draft_active.is_active());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mr_filter_hash_deterministic() {
|
||||
let f1 = MrFilter {
|
||||
state: Some("opened".into()),
|
||||
author: Some("taylor".into()),
|
||||
..Default::default()
|
||||
};
|
||||
let f2 = f1.clone();
|
||||
assert_eq!(f1.hash_value(), f2.hash_value());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mr_filter_hash_differs() {
|
||||
let f1 = MrFilter {
|
||||
state: Some("opened".into()),
|
||||
..Default::default()
|
||||
};
|
||||
let f2 = MrFilter {
|
||||
state: Some("merged".into()),
|
||||
..Default::default()
|
||||
};
|
||||
assert_ne!(f1.hash_value(), f2.hash_value());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_snapshot_fence_not_overwritten_on_second_page() {
|
||||
let mut state = MrListState::default();
|
||||
state.apply_page(sample_page(5, true));
|
||||
let fence = state.snapshot_fence;
|
||||
|
||||
state.apply_page(sample_page(3, false));
|
||||
assert_eq!(
|
||||
state.snapshot_fence, fence,
|
||||
"Fence should not change on second page"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mr_filter_reviewer_field() {
|
||||
let f = MrFilter {
|
||||
reviewer: Some("alice".into()),
|
||||
..Default::default()
|
||||
};
|
||||
assert!(f.is_active());
|
||||
assert_ne!(f.hash_value(), MrFilter::default().hash_value());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mr_filter_target_branch_field() {
|
||||
let f = MrFilter {
|
||||
target_branch: Some("main".into()),
|
||||
..Default::default()
|
||||
};
|
||||
assert!(f.is_active());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mr_list_row_draft_field() {
|
||||
let row = MrListRow {
|
||||
project_path: "g/p".into(),
|
||||
iid: 1,
|
||||
title: "Draft MR".into(),
|
||||
state: "opened".into(),
|
||||
author: "taylor".into(),
|
||||
target_branch: "main".into(),
|
||||
labels: vec![],
|
||||
updated_at: 0,
|
||||
draft: true,
|
||||
};
|
||||
assert!(row.draft);
|
||||
}
|
||||
}
|
||||
14
crates/lore-tui/src/state/search.rs
Normal file
14
crates/lore-tui/src/state/search.rs
Normal file
@@ -0,0 +1,14 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
//! Search screen state.
|
||||
|
||||
use crate::message::SearchResult;
|
||||
|
||||
/// State for the search screen.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SearchState {
|
||||
pub query: String,
|
||||
pub query_focused: bool,
|
||||
pub results: Vec<SearchResult>,
|
||||
pub selected_index: usize,
|
||||
}
|
||||
15
crates/lore-tui/src/state/sync.rs
Normal file
15
crates/lore-tui/src/state/sync.rs
Normal file
@@ -0,0 +1,15 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
//! Sync screen state.
|
||||
|
||||
/// State for the sync progress/summary screen.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct SyncState {
|
||||
pub stage: String,
|
||||
pub current: u64,
|
||||
pub total: u64,
|
||||
pub log_lines: Vec<String>,
|
||||
pub completed: bool,
|
||||
pub elapsed_ms: Option<u64>,
|
||||
pub error: Option<String>,
|
||||
}
|
||||
12
crates/lore-tui/src/state/timeline.rs
Normal file
12
crates/lore-tui/src/state/timeline.rs
Normal file
@@ -0,0 +1,12 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
//! Timeline screen state.
|
||||
|
||||
use crate::message::TimelineEvent;
|
||||
|
||||
/// State for the timeline screen.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct TimelineState {
|
||||
pub events: Vec<TimelineEvent>,
|
||||
pub scroll_offset: u16,
|
||||
}
|
||||
12
crates/lore-tui/src/state/who.rs
Normal file
12
crates/lore-tui/src/state/who.rs
Normal file
@@ -0,0 +1,12 @@
|
||||
#![allow(dead_code)]
|
||||
|
||||
//! Who (people intelligence) screen state.
|
||||
|
||||
use crate::message::WhoResult;
|
||||
|
||||
/// State for the who/people screen.
|
||||
#[derive(Debug, Default)]
|
||||
pub struct WhoState {
|
||||
pub result: Option<WhoResult>,
|
||||
pub scroll_offset: u16,
|
||||
}
|
||||
380
crates/lore-tui/src/task_supervisor.rs
Normal file
380
crates/lore-tui/src/task_supervisor.rs
Normal file
@@ -0,0 +1,380 @@
|
||||
#![allow(dead_code)] // Phase 1: consumed by LoreApp in bd-6pmy
|
||||
|
||||
//! Centralized background task management with dedup and cancellation.
|
||||
//!
|
||||
//! All background work (DB queries, sync, search) flows through
|
||||
//! [`TaskSupervisor`]. Submitting a task with a key that already has an
|
||||
//! active handle cancels the previous task via its [`CancelToken`] and
|
||||
//! bumps the generation counter.
|
||||
//!
|
||||
//! Generation IDs enable stale-result detection: when an async result
|
||||
//! arrives, [`is_current`] checks whether the result's generation
|
||||
//! matches the latest submission for that key.
|
||||
|
||||
use std::collections::HashMap;
|
||||
use std::sync::Arc;
|
||||
use std::sync::atomic::{AtomicBool, AtomicU64, Ordering};
|
||||
|
||||
use crate::message::Screen;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// TaskKey
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Deduplication key for background tasks.
|
||||
///
|
||||
/// Two tasks with the same key cannot run concurrently — submitting a
|
||||
/// new task with an existing key cancels the previous one.
|
||||
#[derive(Debug, Clone, PartialEq, Eq, Hash)]
|
||||
pub enum TaskKey {
|
||||
/// Load data for a specific screen.
|
||||
LoadScreen(Screen),
|
||||
/// Global search query.
|
||||
Search,
|
||||
/// Sync stream (only one at a time).
|
||||
SyncStream,
|
||||
/// Re-query after filter change on a specific screen.
|
||||
FilterRequery(Screen),
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// TaskPriority
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Priority levels for task scheduling.
|
||||
///
|
||||
/// Lower numeric value = higher priority.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, PartialOrd, Ord)]
|
||||
pub enum TaskPriority {
|
||||
/// User-initiated input (highest priority).
|
||||
Input = 0,
|
||||
/// Navigation-triggered data load.
|
||||
Navigation = 1,
|
||||
/// Background refresh / prefetch (lowest priority).
|
||||
Background = 2,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CancelToken
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Thread-safe cooperative cancellation flag.
|
||||
///
|
||||
/// Background tasks poll [`is_cancelled`] periodically and exit early
|
||||
/// when it returns `true`.
|
||||
#[derive(Debug)]
|
||||
pub struct CancelToken {
|
||||
cancelled: AtomicBool,
|
||||
}
|
||||
|
||||
impl CancelToken {
|
||||
/// Create a new, non-cancelled token.
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
cancelled: AtomicBool::new(false),
|
||||
}
|
||||
}
|
||||
|
||||
/// Signal cancellation.
|
||||
pub fn cancel(&self) {
|
||||
self.cancelled.store(true, Ordering::Relaxed);
|
||||
}
|
||||
|
||||
/// Check whether cancellation has been requested.
|
||||
#[must_use]
|
||||
pub fn is_cancelled(&self) -> bool {
|
||||
self.cancelled.load(Ordering::Relaxed)
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for CancelToken {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// InterruptHandle
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Opaque handle for interrupting a rusqlite operation.
|
||||
///
|
||||
/// Wraps the rusqlite `InterruptHandle` so the supervisor can cancel
|
||||
/// long-running queries. This is only set for tasks that lease a reader
|
||||
/// connection from [`DbManager`](crate::db::DbManager).
|
||||
pub struct InterruptHandle {
|
||||
handle: rusqlite::InterruptHandle,
|
||||
}
|
||||
|
||||
impl std::fmt::Debug for InterruptHandle {
|
||||
fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {
|
||||
f.debug_struct("InterruptHandle").finish_non_exhaustive()
|
||||
}
|
||||
}
|
||||
|
||||
impl InterruptHandle {
|
||||
/// Wrap a rusqlite interrupt handle.
|
||||
#[must_use]
|
||||
pub fn new(handle: rusqlite::InterruptHandle) -> Self {
|
||||
Self { handle }
|
||||
}
|
||||
|
||||
/// Interrupt the associated SQLite operation.
|
||||
pub fn interrupt(&self) {
|
||||
self.handle.interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// TaskHandle
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Handle returned when a task is submitted.
|
||||
///
|
||||
/// Callers use this to pass the generation ID into async work so
|
||||
/// results can be tagged and checked for staleness.
|
||||
#[derive(Debug)]
|
||||
pub struct TaskHandle {
|
||||
/// Dedup key for this task.
|
||||
pub key: TaskKey,
|
||||
/// Monotonically increasing generation for stale detection.
|
||||
pub generation: u64,
|
||||
/// Cooperative cancellation token (shared with the supervisor).
|
||||
pub cancel: Arc<CancelToken>,
|
||||
/// Optional SQLite interrupt handle for long queries.
|
||||
pub interrupt: Option<InterruptHandle>,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// TaskSupervisor
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Manages background tasks with deduplication and cancellation.
|
||||
///
|
||||
/// Only one task per [`TaskKey`] can be active. Submitting a new task
|
||||
/// with an existing key cancels the previous one (via its cancel token
|
||||
/// and optional interrupt handle) before registering the new handle.
|
||||
pub struct TaskSupervisor {
|
||||
active: HashMap<TaskKey, TaskHandle>,
|
||||
next_generation: AtomicU64,
|
||||
}
|
||||
|
||||
impl TaskSupervisor {
|
||||
/// Create a new supervisor with no active tasks.
|
||||
#[must_use]
|
||||
pub fn new() -> Self {
|
||||
Self {
|
||||
active: HashMap::new(),
|
||||
next_generation: AtomicU64::new(1),
|
||||
}
|
||||
}
|
||||
|
||||
/// Submit a new task, cancelling any existing task with the same key.
|
||||
///
|
||||
/// Returns a [`TaskHandle`] with a fresh generation ID and a shared
|
||||
/// cancel token. The caller clones the `Arc<CancelToken>` and passes
|
||||
/// it into the async work.
|
||||
pub fn submit(&mut self, key: TaskKey) -> &TaskHandle {
|
||||
// Cancel existing task with this key, if any.
|
||||
if let Some(old) = self.active.remove(&key) {
|
||||
old.cancel.cancel();
|
||||
if let Some(interrupt) = &old.interrupt {
|
||||
interrupt.interrupt();
|
||||
}
|
||||
}
|
||||
|
||||
let generation = self.next_generation.fetch_add(1, Ordering::Relaxed);
|
||||
let cancel = Arc::new(CancelToken::new());
|
||||
|
||||
let handle = TaskHandle {
|
||||
key: key.clone(),
|
||||
generation,
|
||||
cancel,
|
||||
interrupt: None,
|
||||
};
|
||||
|
||||
self.active.insert(key.clone(), handle);
|
||||
self.active.get(&key).expect("just inserted")
|
||||
}
|
||||
|
||||
/// Check whether a generation is current for a given key.
|
||||
///
|
||||
/// Returns `true` only if the key has an active handle with the
|
||||
/// specified generation.
|
||||
#[must_use]
|
||||
pub fn is_current(&self, key: &TaskKey, generation: u64) -> bool {
|
||||
self.active
|
||||
.get(key)
|
||||
.is_some_and(|h| h.generation == generation)
|
||||
}
|
||||
|
||||
/// Mark a task as complete, removing its handle.
|
||||
///
|
||||
/// Only removes the handle if the generation matches the active one.
|
||||
/// This prevents a late-arriving completion from removing a newer
|
||||
/// task's handle.
|
||||
pub fn complete(&mut self, key: &TaskKey, generation: u64) {
|
||||
if self.is_current(key, generation) {
|
||||
self.active.remove(key);
|
||||
}
|
||||
}
|
||||
|
||||
/// Cancel all active tasks.
|
||||
///
|
||||
/// Used during shutdown to ensure background work stops promptly.
|
||||
pub fn cancel_all(&mut self) {
|
||||
for (_, handle) in self.active.drain() {
|
||||
handle.cancel.cancel();
|
||||
if let Some(interrupt) = &handle.interrupt {
|
||||
interrupt.interrupt();
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Number of currently active tasks.
|
||||
#[must_use]
|
||||
pub fn active_count(&self) -> usize {
|
||||
self.active.len()
|
||||
}
|
||||
}
|
||||
|
||||
impl Default for TaskSupervisor {
|
||||
fn default() -> Self {
|
||||
Self::new()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_submit_cancels_previous() {
|
||||
let mut sup = TaskSupervisor::new();
|
||||
|
||||
let gen1 = sup.submit(TaskKey::Search).generation;
|
||||
let cancel1 = sup.active.get(&TaskKey::Search).unwrap().cancel.clone();
|
||||
|
||||
let gen2 = sup.submit(TaskKey::Search).generation;
|
||||
|
||||
// First task's token should be cancelled.
|
||||
assert!(cancel1.is_cancelled());
|
||||
// Second task should have a different (higher) generation.
|
||||
assert!(gen2 > gen1);
|
||||
// Only one active task for this key.
|
||||
assert_eq!(sup.active_count(), 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_is_current_after_supersede() {
|
||||
let mut sup = TaskSupervisor::new();
|
||||
|
||||
let gen1 = sup.submit(TaskKey::Search).generation;
|
||||
let gen2 = sup.submit(TaskKey::Search).generation;
|
||||
|
||||
assert!(!sup.is_current(&TaskKey::Search, gen1));
|
||||
assert!(sup.is_current(&TaskKey::Search, gen2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_complete_removes_handle() {
|
||||
let mut sup = TaskSupervisor::new();
|
||||
let generation = sup.submit(TaskKey::Search).generation;
|
||||
|
||||
assert_eq!(sup.active_count(), 1);
|
||||
sup.complete(&TaskKey::Search, generation);
|
||||
assert_eq!(sup.active_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_complete_ignores_stale() {
|
||||
let mut sup = TaskSupervisor::new();
|
||||
|
||||
let gen1 = sup.submit(TaskKey::Search).generation;
|
||||
let gen2 = sup.submit(TaskKey::Search).generation;
|
||||
|
||||
// Completing with old generation should NOT remove the newer handle.
|
||||
sup.complete(&TaskKey::Search, gen1);
|
||||
assert_eq!(sup.active_count(), 1);
|
||||
assert!(sup.is_current(&TaskKey::Search, gen2));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_generation_monotonic() {
|
||||
let mut sup = TaskSupervisor::new();
|
||||
|
||||
let g1 = sup.submit(TaskKey::Search).generation;
|
||||
let g2 = sup.submit(TaskKey::SyncStream).generation;
|
||||
let g3 = sup.submit(TaskKey::Search).generation;
|
||||
|
||||
assert!(g1 < g2);
|
||||
assert!(g2 < g3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_different_keys_coexist() {
|
||||
let mut sup = TaskSupervisor::new();
|
||||
|
||||
sup.submit(TaskKey::Search);
|
||||
sup.submit(TaskKey::SyncStream);
|
||||
sup.submit(TaskKey::LoadScreen(Screen::Dashboard));
|
||||
|
||||
assert_eq!(sup.active_count(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancel_all() {
|
||||
let mut sup = TaskSupervisor::new();
|
||||
|
||||
let cancel_search = {
|
||||
sup.submit(TaskKey::Search);
|
||||
sup.active.get(&TaskKey::Search).unwrap().cancel.clone()
|
||||
};
|
||||
let cancel_sync = {
|
||||
sup.submit(TaskKey::SyncStream);
|
||||
sup.active.get(&TaskKey::SyncStream).unwrap().cancel.clone()
|
||||
};
|
||||
|
||||
sup.cancel_all();
|
||||
|
||||
assert!(cancel_search.is_cancelled());
|
||||
assert!(cancel_sync.is_cancelled());
|
||||
assert_eq!(sup.active_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancel_token_default_is_not_cancelled() {
|
||||
let token = CancelToken::new();
|
||||
assert!(!token.is_cancelled());
|
||||
token.cancel();
|
||||
assert!(token.is_cancelled());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cancel_token_is_send_sync() {
|
||||
fn assert_send_sync<T: Send + Sync>() {}
|
||||
assert_send_sync::<CancelToken>();
|
||||
assert_send_sync::<Arc<CancelToken>>();
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_task_supervisor_default() {
|
||||
let sup = TaskSupervisor::default();
|
||||
assert_eq!(sup.active_count(), 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_requery_key_distinct_per_screen() {
|
||||
let mut sup = TaskSupervisor::new();
|
||||
|
||||
sup.submit(TaskKey::FilterRequery(Screen::IssueList));
|
||||
sup.submit(TaskKey::FilterRequery(Screen::MrList));
|
||||
|
||||
assert_eq!(sup.active_count(), 2);
|
||||
}
|
||||
}
|
||||
251
crates/lore-tui/src/theme.rs
Normal file
251
crates/lore-tui/src/theme.rs
Normal file
@@ -0,0 +1,251 @@
|
||||
#![allow(dead_code)] // Phase 0: types defined now, consumed in Phase 1+
|
||||
|
||||
//! Flexoki-based theme for the lore TUI.
|
||||
//!
|
||||
//! Uses FrankenTUI's `AdaptiveColor::adaptive(light, dark)` for automatic
|
||||
//! light/dark mode switching. The palette is [Flexoki](https://stephango.com/flexoki)
|
||||
//! by Steph Ango, designed in Oklab perceptual color space for balanced contrast.
|
||||
|
||||
use ftui::{AdaptiveColor, Color, PackedRgba, Style, Theme};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Flexoki palette constants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
// Base tones
|
||||
const PAPER: Color = Color::rgb(0xFF, 0xFC, 0xF0);
|
||||
const BASE_50: Color = Color::rgb(0xF2, 0xF0, 0xE5);
|
||||
const BASE_100: Color = Color::rgb(0xE6, 0xE4, 0xD9);
|
||||
const BASE_200: Color = Color::rgb(0xCE, 0xCD, 0xC3);
|
||||
const BASE_300: Color = Color::rgb(0xB7, 0xB5, 0xAC);
|
||||
const BASE_400: Color = Color::rgb(0x9F, 0x9D, 0x96);
|
||||
const BASE_500: Color = Color::rgb(0x87, 0x85, 0x80);
|
||||
const BASE_600: Color = Color::rgb(0x6F, 0x6E, 0x69);
|
||||
const BASE_700: Color = Color::rgb(0x57, 0x56, 0x53);
|
||||
const BASE_800: Color = Color::rgb(0x40, 0x3E, 0x3C);
|
||||
const BASE_850: Color = Color::rgb(0x34, 0x33, 0x31);
|
||||
const BASE_900: Color = Color::rgb(0x28, 0x27, 0x26);
|
||||
const BLACK: Color = Color::rgb(0x10, 0x0F, 0x0F);
|
||||
|
||||
// Accent colors — light-600 (for light mode)
|
||||
const RED_600: Color = Color::rgb(0xAF, 0x30, 0x29);
|
||||
const ORANGE_600: Color = Color::rgb(0xBC, 0x52, 0x15);
|
||||
const YELLOW_600: Color = Color::rgb(0xAD, 0x83, 0x01);
|
||||
const GREEN_600: Color = Color::rgb(0x66, 0x80, 0x0B);
|
||||
const CYAN_600: Color = Color::rgb(0x24, 0x83, 0x7B);
|
||||
const BLUE_600: Color = Color::rgb(0x20, 0x5E, 0xA6);
|
||||
const PURPLE_600: Color = Color::rgb(0x5E, 0x40, 0x9D);
|
||||
|
||||
// Accent colors — dark-400 (for dark mode)
|
||||
const RED_400: Color = Color::rgb(0xD1, 0x4D, 0x41);
|
||||
const ORANGE_400: Color = Color::rgb(0xDA, 0x70, 0x2C);
|
||||
const YELLOW_400: Color = Color::rgb(0xD0, 0xA2, 0x15);
|
||||
const GREEN_400: Color = Color::rgb(0x87, 0x9A, 0x39);
|
||||
const CYAN_400: Color = Color::rgb(0x3A, 0xA9, 0x9F);
|
||||
const BLUE_400: Color = Color::rgb(0x43, 0x85, 0xBE);
|
||||
const PURPLE_400: Color = Color::rgb(0x8B, 0x7E, 0xC8);
|
||||
const MAGENTA_400: Color = Color::rgb(0xCE, 0x5D, 0x97);
|
||||
|
||||
// Muted fallback as PackedRgba (for Style::fg)
|
||||
const MUTED_PACKED: PackedRgba = PackedRgba::rgb(0x87, 0x85, 0x80);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// build_theme
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Build the lore TUI theme with Flexoki adaptive colors.
|
||||
///
|
||||
/// Each of the 19 semantic slots gets an `AdaptiveColor::adaptive(light, dark)`
|
||||
/// pair. FrankenTUI detects the terminal background and resolves accordingly.
|
||||
#[must_use]
|
||||
pub fn build_theme() -> Theme {
|
||||
Theme::builder()
|
||||
.primary(AdaptiveColor::adaptive(BLUE_600, BLUE_400))
|
||||
.secondary(AdaptiveColor::adaptive(CYAN_600, CYAN_400))
|
||||
.accent(AdaptiveColor::adaptive(PURPLE_600, PURPLE_400))
|
||||
.background(AdaptiveColor::adaptive(PAPER, BLACK))
|
||||
.surface(AdaptiveColor::adaptive(BASE_50, BASE_900))
|
||||
.overlay(AdaptiveColor::adaptive(BASE_100, BASE_850))
|
||||
.text(AdaptiveColor::adaptive(BASE_700, BASE_200))
|
||||
.text_muted(AdaptiveColor::adaptive(BASE_500, BASE_500))
|
||||
.text_subtle(AdaptiveColor::adaptive(BASE_400, BASE_600))
|
||||
.success(AdaptiveColor::adaptive(GREEN_600, GREEN_400))
|
||||
.warning(AdaptiveColor::adaptive(YELLOW_600, YELLOW_400))
|
||||
.error(AdaptiveColor::adaptive(RED_600, RED_400))
|
||||
.info(AdaptiveColor::adaptive(BLUE_600, BLUE_400))
|
||||
.border(AdaptiveColor::adaptive(BASE_300, BASE_700))
|
||||
.border_focused(AdaptiveColor::adaptive(BLUE_600, BLUE_400))
|
||||
.selection_bg(AdaptiveColor::adaptive(BASE_100, BASE_800))
|
||||
.selection_fg(AdaptiveColor::adaptive(BASE_700, BASE_100))
|
||||
.scrollbar_track(AdaptiveColor::adaptive(BASE_50, BASE_900))
|
||||
.scrollbar_thumb(AdaptiveColor::adaptive(BASE_300, BASE_700))
|
||||
.build()
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// State colors
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Map a GitLab entity state to a display color.
|
||||
///
|
||||
/// Returns fixed (non-adaptive) colors — state indicators should be
|
||||
/// consistent regardless of light/dark mode.
|
||||
#[must_use]
|
||||
pub fn state_color(state: &str) -> Color {
|
||||
match state {
|
||||
"opened" => GREEN_400,
|
||||
"closed" => RED_400,
|
||||
"merged" => PURPLE_400,
|
||||
"locked" => YELLOW_400,
|
||||
_ => BASE_500,
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Event type colors
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Map a timeline event type to a display color.
|
||||
#[must_use]
|
||||
pub fn event_color(event_type: &str) -> Color {
|
||||
match event_type {
|
||||
"created" => GREEN_400,
|
||||
"updated" => BLUE_400,
|
||||
"closed" => RED_400,
|
||||
"merged" => PURPLE_400,
|
||||
"commented" => CYAN_400,
|
||||
"labeled" => ORANGE_400,
|
||||
"milestoned" => YELLOW_400,
|
||||
_ => BASE_500,
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Label styling
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Convert a GitLab label hex color (e.g., "#FF0000" or "FF0000") to a Style.
|
||||
///
|
||||
/// Falls back to muted text color if the hex string is invalid.
|
||||
#[must_use]
|
||||
pub fn label_style(hex_color: &str) -> Style {
|
||||
let packed = parse_hex_to_packed(hex_color).unwrap_or(MUTED_PACKED);
|
||||
Style::default().fg(packed)
|
||||
}
|
||||
|
||||
/// Parse a hex color string like "#RRGGBB" or "RRGGBB" into a `PackedRgba`.
|
||||
fn parse_hex_to_packed(s: &str) -> Option<PackedRgba> {
|
||||
let hex = s.strip_prefix('#').unwrap_or(s);
|
||||
if hex.len() != 6 {
|
||||
return None;
|
||||
}
|
||||
let r = u8::from_str_radix(&hex[0..2], 16).ok()?;
|
||||
let g = u8::from_str_radix(&hex[2..4], 16).ok()?;
|
||||
let b = u8::from_str_radix(&hex[4..6], 16).ok()?;
|
||||
Some(PackedRgba::rgb(r, g, b))
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
|
||||
#[test]
|
||||
fn test_build_theme_compiles() {
|
||||
let theme = build_theme();
|
||||
// Resolve for dark mode — primary should be Blue-400
|
||||
let resolved = theme.resolve(true);
|
||||
assert_eq!(resolved.primary, BLUE_400);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_theme_light_mode() {
|
||||
let theme = build_theme();
|
||||
let resolved = theme.resolve(false);
|
||||
assert_eq!(resolved.primary, BLUE_600);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_build_theme_all_slots_differ_between_modes() {
|
||||
let theme = build_theme();
|
||||
let dark = theme.resolve(true);
|
||||
let light = theme.resolve(false);
|
||||
// Background should differ (Paper vs Black)
|
||||
assert_ne!(dark.background, light.background);
|
||||
// Text should differ
|
||||
assert_ne!(dark.text, light.text);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_color_opened_is_green() {
|
||||
assert_eq!(state_color("opened"), GREEN_400);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_color_closed_is_red() {
|
||||
assert_eq!(state_color("closed"), RED_400);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_color_merged_is_purple() {
|
||||
assert_eq!(state_color("merged"), PURPLE_400);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_color_unknown_returns_muted() {
|
||||
assert_eq!(state_color("unknown"), BASE_500);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_event_color_created_is_green() {
|
||||
assert_eq!(event_color("created"), GREEN_400);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_event_color_unknown_returns_muted() {
|
||||
assert_eq!(event_color("whatever"), BASE_500);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_label_style_valid_hex_with_hash() {
|
||||
let style = label_style("#FF0000");
|
||||
assert_eq!(style.fg, Some(PackedRgba::rgb(0xFF, 0x00, 0x00)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_label_style_valid_hex_without_hash() {
|
||||
let style = label_style("00FF00");
|
||||
assert_eq!(style.fg, Some(PackedRgba::rgb(0x00, 0xFF, 0x00)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_label_style_lowercase_hex() {
|
||||
let style = label_style("#ff0000");
|
||||
assert_eq!(style.fg, Some(PackedRgba::rgb(0xFF, 0x00, 0x00)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_label_style_invalid_hex_fallback() {
|
||||
let style = label_style("invalid");
|
||||
assert_eq!(style.fg, Some(MUTED_PACKED));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_label_style_empty_fallback() {
|
||||
let style = label_style("");
|
||||
assert_eq!(style.fg, Some(MUTED_PACKED));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_hex_short_string() {
|
||||
assert!(parse_hex_to_packed("#FFF").is_none());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_parse_hex_non_hex_chars() {
|
||||
assert!(parse_hex_to_packed("#GGHHII").is_none());
|
||||
}
|
||||
}
|
||||
208
crates/lore-tui/src/view/common/breadcrumb.rs
Normal file
208
crates/lore-tui/src/view/common/breadcrumb.rs
Normal file
@@ -0,0 +1,208 @@
|
||||
//! Navigation breadcrumb trail ("Dashboard > Issues > #42").
|
||||
|
||||
use ftui::core::geometry::Rect;
|
||||
use ftui::render::cell::{Cell, PackedRgba};
|
||||
use ftui::render::drawing::Draw;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
use crate::navigation::NavigationStack;
|
||||
|
||||
/// Render the navigation breadcrumb trail.
|
||||
///
|
||||
/// Shows "Dashboard > Issues > Issue" with " > " separators. When the
|
||||
/// trail exceeds the available width, entries are truncated from the left
|
||||
/// with a leading "...".
|
||||
pub fn render_breadcrumb(
|
||||
frame: &mut Frame<'_>,
|
||||
area: Rect,
|
||||
nav: &NavigationStack,
|
||||
text_color: PackedRgba,
|
||||
muted_color: PackedRgba,
|
||||
) {
|
||||
if area.height == 0 || area.width < 3 {
|
||||
return;
|
||||
}
|
||||
|
||||
let crumbs = nav.breadcrumbs();
|
||||
let separator = " > ";
|
||||
|
||||
// Build the full breadcrumb string and calculate width.
|
||||
let full: String = crumbs.join(separator);
|
||||
let max_width = area.width as usize;
|
||||
|
||||
let display = if full.len() <= max_width {
|
||||
full
|
||||
} else {
|
||||
// Truncate from the left: show "... > last_crumbs"
|
||||
truncate_breadcrumb_left(&crumbs, separator, max_width)
|
||||
};
|
||||
|
||||
let base = Cell {
|
||||
fg: text_color,
|
||||
..Cell::default()
|
||||
};
|
||||
let muted = Cell {
|
||||
fg: muted_color,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
// Render each segment with separators in muted color.
|
||||
let mut x = area.x;
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
|
||||
if let Some(rest) = display.strip_prefix("...") {
|
||||
// Render ellipsis in muted, then the rest
|
||||
x = frame.print_text_clipped(x, area.y, "...", muted, max_x);
|
||||
if !rest.is_empty() {
|
||||
render_crumb_segments(frame, x, area.y, rest, separator, base, muted, max_x);
|
||||
}
|
||||
} else {
|
||||
render_crumb_segments(frame, x, area.y, &display, separator, base, muted, max_x);
|
||||
}
|
||||
}
|
||||
|
||||
/// Render breadcrumb text with separators in muted color.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
fn render_crumb_segments(
|
||||
frame: &mut Frame<'_>,
|
||||
start_x: u16,
|
||||
y: u16,
|
||||
text: &str,
|
||||
separator: &str,
|
||||
base: Cell,
|
||||
muted: Cell,
|
||||
max_x: u16,
|
||||
) {
|
||||
let mut x = start_x;
|
||||
let parts: Vec<&str> = text.split(separator).collect();
|
||||
|
||||
for (i, part) in parts.iter().enumerate() {
|
||||
if i > 0 {
|
||||
x = frame.print_text_clipped(x, y, separator, muted, max_x);
|
||||
}
|
||||
x = frame.print_text_clipped(x, y, part, base, max_x);
|
||||
if x >= max_x {
|
||||
break;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Truncate breadcrumb from the left to fit within max_width.
|
||||
fn truncate_breadcrumb_left(crumbs: &[&str], separator: &str, max_width: usize) -> String {
|
||||
let ellipsis = "...";
|
||||
|
||||
// Try showing progressively fewer crumbs from the right.
|
||||
for skip in 1..crumbs.len() {
|
||||
let tail = &crumbs[skip..];
|
||||
let tail_str: String = tail.join(separator);
|
||||
let candidate = format!("{ellipsis}{separator}{tail_str}");
|
||||
if candidate.len() <= max_width {
|
||||
return candidate;
|
||||
}
|
||||
}
|
||||
|
||||
// Last resort: just the current screen truncated.
|
||||
let last = crumbs.last().unwrap_or(&"");
|
||||
if last.len() + ellipsis.len() <= max_width {
|
||||
return format!("{ellipsis}{last}");
|
||||
}
|
||||
|
||||
// Truly tiny terminal: just ellipsis.
|
||||
ellipsis.to_string()
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::message::Screen;
|
||||
use crate::navigation::NavigationStack;
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
fn white() -> PackedRgba {
|
||||
PackedRgba::rgb(0xFF, 0xFF, 0xFF)
|
||||
}
|
||||
|
||||
fn gray() -> PackedRgba {
|
||||
PackedRgba::rgb(0x80, 0x80, 0x80)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_breadcrumb_single_screen() {
|
||||
with_frame!(80, 1, |frame| {
|
||||
let nav = NavigationStack::new();
|
||||
render_breadcrumb(&mut frame, Rect::new(0, 0, 80, 1), &nav, white(), gray());
|
||||
|
||||
let cell = frame.buffer.get(0, 0).unwrap();
|
||||
assert!(
|
||||
cell.content.as_char() == Some('D'),
|
||||
"Expected 'D' at (0,0), got {:?}",
|
||||
cell.content.as_char()
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_breadcrumb_multi_screen() {
|
||||
with_frame!(80, 1, |frame| {
|
||||
let mut nav = NavigationStack::new();
|
||||
nav.push(Screen::IssueList);
|
||||
render_breadcrumb(&mut frame, Rect::new(0, 0, 80, 1), &nav, white(), gray());
|
||||
|
||||
let d = frame.buffer.get(0, 0).unwrap();
|
||||
assert_eq!(d.content.as_char(), Some('D'));
|
||||
|
||||
// "Dashboard > Issues" = 'I' at 12
|
||||
let i_cell = frame.buffer.get(12, 0).unwrap();
|
||||
assert_eq!(i_cell.content.as_char(), Some('I'));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_breadcrumb_truncation() {
|
||||
let crumbs = vec!["Dashboard", "Issues", "Issue"];
|
||||
let result = truncate_breadcrumb_left(&crumbs, " > ", 20);
|
||||
assert!(
|
||||
result.starts_with("..."),
|
||||
"Expected ellipsis prefix, got: {result}"
|
||||
);
|
||||
assert!(result.len() <= 20, "Result too long: {result}");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_breadcrumb_zero_height_noop() {
|
||||
with_frame!(80, 1, |frame| {
|
||||
let nav = NavigationStack::new();
|
||||
render_breadcrumb(&mut frame, Rect::new(0, 0, 80, 0), &nav, white(), gray());
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_truncate_breadcrumb_fits() {
|
||||
let crumbs = vec!["A", "B"];
|
||||
let result = truncate_breadcrumb_left(&crumbs, " > ", 100);
|
||||
assert!(result.contains("..."), "Should always add ellipsis");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_truncate_breadcrumb_single_entry() {
|
||||
let crumbs = vec!["Dashboard"];
|
||||
let result = truncate_breadcrumb_left(&crumbs, " > ", 5);
|
||||
assert_eq!(result, "...");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_truncate_breadcrumb_shows_last_entries() {
|
||||
let crumbs = vec!["Dashboard", "Issues", "Issue Detail"];
|
||||
let result = truncate_breadcrumb_left(&crumbs, " > ", 30);
|
||||
assert!(result.starts_with("..."));
|
||||
assert!(result.contains("Issue Detail"));
|
||||
}
|
||||
}
|
||||
410
crates/lore-tui/src/view/common/cross_ref.rs
Normal file
410
crates/lore-tui/src/view/common/cross_ref.rs
Normal file
@@ -0,0 +1,410 @@
|
||||
#![allow(dead_code)] // Phase 2: consumed by Issue Detail + MR Detail screens
|
||||
|
||||
//! Cross-reference widget for entity detail screens.
|
||||
//!
|
||||
//! Renders a list of linked entities (closing MRs, related issues, mentions)
|
||||
//! as navigable items. Used in both Issue Detail and MR Detail views.
|
||||
|
||||
use std::fmt;
|
||||
|
||||
use ftui::core::geometry::Rect;
|
||||
use ftui::render::cell::{Cell, PackedRgba};
|
||||
use ftui::render::drawing::Draw;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
use crate::message::EntityKey;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CrossRefKind
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// The relationship type between two entities.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq)]
|
||||
pub enum CrossRefKind {
|
||||
/// MR that closes this issue when merged.
|
||||
ClosingMr,
|
||||
/// Issue related via GitLab link.
|
||||
RelatedIssue,
|
||||
/// Entity mentioned in a note or description.
|
||||
MentionedIn,
|
||||
}
|
||||
|
||||
impl CrossRefKind {
|
||||
/// Short icon/prefix for display.
|
||||
#[must_use]
|
||||
pub const fn icon(&self) -> &str {
|
||||
match self {
|
||||
Self::ClosingMr => "MR",
|
||||
Self::RelatedIssue => "REL",
|
||||
Self::MentionedIn => "REF",
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl fmt::Display for CrossRefKind {
|
||||
fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
|
||||
match self {
|
||||
Self::ClosingMr => write!(f, "Closing MR"),
|
||||
Self::RelatedIssue => write!(f, "Related Issue"),
|
||||
Self::MentionedIn => write!(f, "Mentioned In"),
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CrossRef
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A single cross-reference to another entity.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct CrossRef {
|
||||
/// Relationship type.
|
||||
pub kind: CrossRefKind,
|
||||
/// Target entity identity.
|
||||
pub entity_key: EntityKey,
|
||||
/// Human-readable label (e.g., "Fix authentication flow").
|
||||
pub label: String,
|
||||
/// Whether this ref points to an entity in the local DB (navigable).
|
||||
pub navigable: bool,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// CrossRefState
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Rendering state for the cross-reference list.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct CrossRefState {
|
||||
/// Index of the selected cross-reference.
|
||||
pub selected: usize,
|
||||
/// First visible item index.
|
||||
pub scroll_offset: usize,
|
||||
}
|
||||
|
||||
impl CrossRefState {
|
||||
/// Move selection down.
|
||||
pub fn select_next(&mut self, total: usize) {
|
||||
if total > 0 && self.selected < total - 1 {
|
||||
self.selected += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Move selection up.
|
||||
pub fn select_prev(&mut self) {
|
||||
self.selected = self.selected.saturating_sub(1);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Colors
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Color scheme for cross-reference rendering.
|
||||
pub struct CrossRefColors {
|
||||
/// Foreground for the kind icon/badge.
|
||||
pub kind_fg: PackedRgba,
|
||||
/// Foreground for the label text.
|
||||
pub label_fg: PackedRgba,
|
||||
/// Muted foreground for non-navigable refs.
|
||||
pub muted_fg: PackedRgba,
|
||||
/// Selected item foreground.
|
||||
pub selected_fg: PackedRgba,
|
||||
/// Selected item background.
|
||||
pub selected_bg: PackedRgba,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Render
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Render a list of cross-references within the given area.
|
||||
///
|
||||
/// Returns the number of rows consumed.
|
||||
///
|
||||
/// Layout per row:
|
||||
/// ```text
|
||||
/// [MR] !42 Fix authentication flow
|
||||
/// [REL] #15 Related auth issue
|
||||
/// [REF] !99 Mentioned in pipeline MR
|
||||
/// ```
|
||||
pub fn render_cross_refs(
|
||||
frame: &mut Frame<'_>,
|
||||
refs: &[CrossRef],
|
||||
state: &CrossRefState,
|
||||
area: Rect,
|
||||
colors: &CrossRefColors,
|
||||
) -> u16 {
|
||||
if refs.is_empty() || area.height == 0 || area.width < 10 {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
let visible_count = (area.height as usize).min(refs.len().saturating_sub(state.scroll_offset));
|
||||
|
||||
for i in 0..visible_count {
|
||||
let idx = state.scroll_offset + i;
|
||||
let Some(cr) = refs.get(idx) else { break };
|
||||
|
||||
let y = area.y + i as u16;
|
||||
let is_selected = idx == state.selected;
|
||||
|
||||
// Background fill for selected row.
|
||||
if is_selected {
|
||||
frame.draw_rect_filled(
|
||||
Rect::new(area.x, y, area.width, 1),
|
||||
Cell {
|
||||
fg: colors.selected_fg,
|
||||
bg: colors.selected_bg,
|
||||
..Cell::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let mut x = area.x;
|
||||
|
||||
// Kind badge: [MR], [REL], [REF]
|
||||
let badge = format!("[{}]", cr.kind.icon());
|
||||
let badge_style = if is_selected {
|
||||
Cell {
|
||||
fg: colors.selected_fg,
|
||||
bg: colors.selected_bg,
|
||||
..Cell::default()
|
||||
}
|
||||
} else {
|
||||
Cell {
|
||||
fg: colors.kind_fg,
|
||||
..Cell::default()
|
||||
}
|
||||
};
|
||||
x = frame.print_text_clipped(x, y, &badge, badge_style, max_x);
|
||||
|
||||
// Spacing
|
||||
x = frame.print_text_clipped(x, y, " ", badge_style, max_x);
|
||||
|
||||
// Entity prefix + label
|
||||
let prefix = match cr.kind {
|
||||
CrossRefKind::ClosingMr | CrossRefKind::MentionedIn => {
|
||||
format!("!{} ", cr.entity_key.iid)
|
||||
}
|
||||
CrossRefKind::RelatedIssue => {
|
||||
format!("#{} ", cr.entity_key.iid)
|
||||
}
|
||||
};
|
||||
|
||||
let label_style = if is_selected {
|
||||
Cell {
|
||||
fg: colors.selected_fg,
|
||||
bg: colors.selected_bg,
|
||||
..Cell::default()
|
||||
}
|
||||
} else if cr.navigable {
|
||||
Cell {
|
||||
fg: colors.label_fg,
|
||||
..Cell::default()
|
||||
}
|
||||
} else {
|
||||
Cell {
|
||||
fg: colors.muted_fg,
|
||||
..Cell::default()
|
||||
}
|
||||
};
|
||||
|
||||
x = frame.print_text_clipped(x, y, &prefix, label_style, max_x);
|
||||
let _ = frame.print_text_clipped(x, y, &cr.label, label_style, max_x);
|
||||
}
|
||||
|
||||
visible_count as u16
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
fn sample_refs() -> Vec<CrossRef> {
|
||||
vec![
|
||||
CrossRef {
|
||||
kind: CrossRefKind::ClosingMr,
|
||||
entity_key: EntityKey::mr(1, 42),
|
||||
label: "Fix authentication flow".into(),
|
||||
navigable: true,
|
||||
},
|
||||
CrossRef {
|
||||
kind: CrossRefKind::RelatedIssue,
|
||||
entity_key: EntityKey::issue(1, 15),
|
||||
label: "Related auth issue".into(),
|
||||
navigable: true,
|
||||
},
|
||||
CrossRef {
|
||||
kind: CrossRefKind::MentionedIn,
|
||||
entity_key: EntityKey::mr(2, 99),
|
||||
label: "Pipeline improvements".into(),
|
||||
navigable: false,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
fn test_colors() -> CrossRefColors {
|
||||
CrossRefColors {
|
||||
kind_fg: PackedRgba::rgb(0xDA, 0x70, 0x2C),
|
||||
label_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
muted_fg: PackedRgba::rgb(0x87, 0x87, 0x80),
|
||||
selected_fg: PackedRgba::rgb(0x10, 0x0F, 0x0F),
|
||||
selected_bg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
}
|
||||
}
|
||||
|
||||
// TDD anchor test from bead spec.
|
||||
#[test]
|
||||
fn test_cross_ref_entity_key() {
|
||||
let cr = CrossRef {
|
||||
kind: CrossRefKind::ClosingMr,
|
||||
entity_key: EntityKey::mr(1, 42),
|
||||
label: "Fix auth".into(),
|
||||
navigable: true,
|
||||
};
|
||||
assert_eq!(cr.kind, CrossRefKind::ClosingMr);
|
||||
assert_eq!(cr.entity_key, EntityKey::mr(1, 42));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cross_ref_kind_display() {
|
||||
assert_eq!(CrossRefKind::ClosingMr.to_string(), "Closing MR");
|
||||
assert_eq!(CrossRefKind::RelatedIssue.to_string(), "Related Issue");
|
||||
assert_eq!(CrossRefKind::MentionedIn.to_string(), "Mentioned In");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cross_ref_kind_icon() {
|
||||
assert_eq!(CrossRefKind::ClosingMr.icon(), "MR");
|
||||
assert_eq!(CrossRefKind::RelatedIssue.icon(), "REL");
|
||||
assert_eq!(CrossRefKind::MentionedIn.icon(), "REF");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_cross_ref_state_navigation() {
|
||||
let mut state = CrossRefState::default();
|
||||
assert_eq!(state.selected, 0);
|
||||
|
||||
state.select_next(3);
|
||||
assert_eq!(state.selected, 1);
|
||||
|
||||
state.select_next(3);
|
||||
assert_eq!(state.selected, 2);
|
||||
|
||||
// Can't go past end.
|
||||
state.select_next(3);
|
||||
assert_eq!(state.selected, 2);
|
||||
|
||||
state.select_prev();
|
||||
assert_eq!(state.selected, 1);
|
||||
|
||||
state.select_prev();
|
||||
assert_eq!(state.selected, 0);
|
||||
|
||||
// Can't go before start.
|
||||
state.select_prev();
|
||||
assert_eq!(state.selected, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_cross_refs_no_panic() {
|
||||
with_frame!(80, 10, |frame| {
|
||||
let refs = sample_refs();
|
||||
let state = CrossRefState::default();
|
||||
let rows = render_cross_refs(
|
||||
&mut frame,
|
||||
&refs,
|
||||
&state,
|
||||
Rect::new(0, 0, 80, 10),
|
||||
&test_colors(),
|
||||
);
|
||||
assert_eq!(rows, 3);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_cross_refs_empty() {
|
||||
with_frame!(80, 10, |frame| {
|
||||
let state = CrossRefState::default();
|
||||
let rows = render_cross_refs(
|
||||
&mut frame,
|
||||
&[],
|
||||
&state,
|
||||
Rect::new(0, 0, 80, 10),
|
||||
&test_colors(),
|
||||
);
|
||||
assert_eq!(rows, 0);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_cross_refs_tiny_area() {
|
||||
with_frame!(5, 1, |frame| {
|
||||
let refs = sample_refs();
|
||||
let state = CrossRefState::default();
|
||||
let rows = render_cross_refs(
|
||||
&mut frame,
|
||||
&refs,
|
||||
&state,
|
||||
Rect::new(0, 0, 5, 1),
|
||||
&test_colors(),
|
||||
);
|
||||
// Too narrow (< 10), should bail.
|
||||
assert_eq!(rows, 0);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_cross_refs_with_scroll() {
|
||||
with_frame!(80, 2, |frame| {
|
||||
let refs = sample_refs();
|
||||
let state = CrossRefState {
|
||||
selected: 2,
|
||||
scroll_offset: 1,
|
||||
};
|
||||
let rows = render_cross_refs(
|
||||
&mut frame,
|
||||
&refs,
|
||||
&state,
|
||||
Rect::new(0, 0, 80, 2),
|
||||
&test_colors(),
|
||||
);
|
||||
// 2 visible (indices 1 and 2).
|
||||
assert_eq!(rows, 2);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_cross_refs_non_navigable() {
|
||||
with_frame!(80, 5, |frame| {
|
||||
let refs = vec![CrossRef {
|
||||
kind: CrossRefKind::MentionedIn,
|
||||
entity_key: EntityKey::mr(2, 99),
|
||||
label: "Non-local entity".into(),
|
||||
navigable: false,
|
||||
}];
|
||||
let state = CrossRefState::default();
|
||||
let rows = render_cross_refs(
|
||||
&mut frame,
|
||||
&refs,
|
||||
&state,
|
||||
Rect::new(0, 0, 80, 5),
|
||||
&test_colors(),
|
||||
);
|
||||
assert_eq!(rows, 1);
|
||||
});
|
||||
}
|
||||
}
|
||||
979
crates/lore-tui/src/view/common/discussion_tree.rs
Normal file
979
crates/lore-tui/src/view/common/discussion_tree.rs
Normal file
@@ -0,0 +1,979 @@
|
||||
#![allow(dead_code)] // Phase 2: consumed by Issue Detail + MR Detail screens
|
||||
|
||||
//! Discussion tree widget for entity detail screens.
|
||||
//!
|
||||
//! Renders threaded conversations from GitLab issues/MRs. Discussions are
|
||||
//! top-level expandable nodes, with notes as children. Supports expand/collapse
|
||||
//! persistence, system note styling, and diff note file path rendering.
|
||||
|
||||
use std::collections::HashSet;
|
||||
|
||||
use ftui::core::geometry::Rect;
|
||||
use ftui::render::cell::{Cell, PackedRgba};
|
||||
use ftui::render::drawing::Draw;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
use crate::clock::Clock;
|
||||
use crate::safety::{UrlPolicy, sanitize_for_terminal};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Data types
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// A single discussion thread (top-level node).
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct DiscussionNode {
|
||||
/// GitLab discussion ID (used as expand/collapse key).
|
||||
pub discussion_id: String,
|
||||
/// Notes within this discussion, ordered by position.
|
||||
pub notes: Vec<NoteNode>,
|
||||
/// Whether this discussion is resolvable (MR discussions only).
|
||||
pub resolvable: bool,
|
||||
/// Whether this discussion has been resolved.
|
||||
pub resolved: bool,
|
||||
}
|
||||
|
||||
impl DiscussionNode {
|
||||
/// Summary line for collapsed display.
|
||||
fn summary(&self) -> String {
|
||||
let first = self.notes.first();
|
||||
let author = first.map_or("unknown", |n| n.author.as_str());
|
||||
let note_count = self.notes.len();
|
||||
let resolved_tag = if self.resolved { " [resolved]" } else { "" };
|
||||
|
||||
if note_count == 1 {
|
||||
format!("{author}{resolved_tag}")
|
||||
} else {
|
||||
format!("{author} ({note_count} notes){resolved_tag}")
|
||||
}
|
||||
}
|
||||
|
||||
/// First line of the first note body, sanitized and truncated.
|
||||
fn preview(&self, max_chars: usize) -> String {
|
||||
self.notes
|
||||
.first()
|
||||
.and_then(|n| n.body.lines().next())
|
||||
.map(|line| {
|
||||
let sanitized = sanitize_for_terminal(line, UrlPolicy::Strip);
|
||||
if sanitized.len() > max_chars {
|
||||
let trunc = max_chars.saturating_sub(3);
|
||||
// Find the last valid char boundary at or before `trunc`
|
||||
// to avoid panicking on multi-byte UTF-8 (emoji, CJK).
|
||||
let safe_end = sanitized
|
||||
.char_indices()
|
||||
.take_while(|&(i, _)| i <= trunc)
|
||||
.last()
|
||||
.map_or(0, |(i, c)| i + c.len_utf8());
|
||||
format!("{}...", &sanitized[..safe_end])
|
||||
} else {
|
||||
sanitized
|
||||
}
|
||||
})
|
||||
.unwrap_or_default()
|
||||
}
|
||||
}
|
||||
|
||||
/// A single note within a discussion.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct NoteNode {
|
||||
/// Author username.
|
||||
pub author: String,
|
||||
/// Note body (markdown text from GitLab).
|
||||
pub body: String,
|
||||
/// Creation timestamp in milliseconds since epoch.
|
||||
pub created_at: i64,
|
||||
/// Whether this is a system-generated note.
|
||||
pub is_system: bool,
|
||||
/// Whether this is a diff/code review note.
|
||||
pub is_diff_note: bool,
|
||||
/// File path for diff notes.
|
||||
pub diff_file_path: Option<String>,
|
||||
/// New line number for diff notes.
|
||||
pub diff_new_line: Option<i64>,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// State
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Rendering state for the discussion tree.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct DiscussionTreeState {
|
||||
/// Index of the selected discussion (0-based).
|
||||
pub selected: usize,
|
||||
/// First visible row index for scrolling.
|
||||
pub scroll_offset: usize,
|
||||
/// Set of expanded discussion IDs.
|
||||
pub expanded: HashSet<String>,
|
||||
}
|
||||
|
||||
impl DiscussionTreeState {
|
||||
/// Move selection down.
|
||||
pub fn select_next(&mut self, total: usize) {
|
||||
if total > 0 && self.selected < total - 1 {
|
||||
self.selected += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Move selection up.
|
||||
pub fn select_prev(&mut self) {
|
||||
self.selected = self.selected.saturating_sub(1);
|
||||
}
|
||||
|
||||
/// Toggle expand/collapse for the selected discussion.
|
||||
pub fn toggle_selected(&mut self, discussions: &[DiscussionNode]) {
|
||||
if let Some(d) = discussions.get(self.selected) {
|
||||
let id = &d.discussion_id;
|
||||
if self.expanded.contains(id) {
|
||||
self.expanded.remove(id);
|
||||
} else {
|
||||
self.expanded.insert(id.clone());
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Whether a discussion is expanded.
|
||||
#[must_use]
|
||||
pub fn is_expanded(&self, discussion_id: &str) -> bool {
|
||||
self.expanded.contains(discussion_id)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Colors
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Color scheme for discussion tree rendering.
|
||||
pub struct DiscussionTreeColors {
|
||||
/// Author name foreground.
|
||||
pub author_fg: PackedRgba,
|
||||
/// Timestamp foreground.
|
||||
pub timestamp_fg: PackedRgba,
|
||||
/// Note body foreground.
|
||||
pub body_fg: PackedRgba,
|
||||
/// System note foreground (muted).
|
||||
pub system_fg: PackedRgba,
|
||||
/// Diff file path foreground.
|
||||
pub diff_path_fg: PackedRgba,
|
||||
/// Resolved indicator foreground.
|
||||
pub resolved_fg: PackedRgba,
|
||||
/// Tree guide characters.
|
||||
pub guide_fg: PackedRgba,
|
||||
/// Selected discussion background.
|
||||
pub selected_fg: PackedRgba,
|
||||
/// Selected discussion background.
|
||||
pub selected_bg: PackedRgba,
|
||||
/// Expand/collapse indicator.
|
||||
pub expand_fg: PackedRgba,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Relative time formatting
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Format a timestamp as a human-readable relative time string.
|
||||
///
|
||||
/// Uses the provided `Clock` for deterministic rendering in tests.
|
||||
#[must_use]
|
||||
pub fn format_relative_time(epoch_ms: i64, clock: &dyn Clock) -> String {
|
||||
let now_ms = clock.now_ms();
|
||||
let diff_ms = now_ms.saturating_sub(epoch_ms);
|
||||
|
||||
if diff_ms < 0 {
|
||||
return "just now".to_string();
|
||||
}
|
||||
|
||||
let seconds = diff_ms / 1_000;
|
||||
let minutes = seconds / 60;
|
||||
let hours = minutes / 60;
|
||||
let days = hours / 24;
|
||||
let weeks = days / 7;
|
||||
let months = days / 30;
|
||||
|
||||
if seconds < 60 {
|
||||
"just now".to_string()
|
||||
} else if minutes < 60 {
|
||||
format!("{minutes}m ago")
|
||||
} else if hours < 24 {
|
||||
format!("{hours}h ago")
|
||||
} else if days < 7 {
|
||||
format!("{days}d ago")
|
||||
} else if weeks < 4 {
|
||||
format!("{weeks}w ago")
|
||||
} else {
|
||||
format!("{months}mo ago")
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Render
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Maximum indent depth for nested content (notes within discussions).
|
||||
const INDENT: u16 = 4;
|
||||
|
||||
/// Render a discussion tree within the given area.
|
||||
///
|
||||
/// Returns the number of rows consumed.
|
||||
///
|
||||
/// Layout:
|
||||
/// ```text
|
||||
/// > alice (3 notes) [resolved] <- collapsed discussion
|
||||
/// First line of note body preview...
|
||||
///
|
||||
/// v bob (2 notes) <- expanded discussion
|
||||
/// | bob · 3h ago
|
||||
/// | This is the first note body...
|
||||
/// |
|
||||
/// | alice · 1h ago <- diff note
|
||||
/// | diff src/auth.rs:42
|
||||
/// | Code review comment about...
|
||||
/// ```
|
||||
pub fn render_discussion_tree(
|
||||
frame: &mut Frame<'_>,
|
||||
discussions: &[DiscussionNode],
|
||||
state: &DiscussionTreeState,
|
||||
area: Rect,
|
||||
colors: &DiscussionTreeColors,
|
||||
clock: &dyn Clock,
|
||||
) -> u16 {
|
||||
if discussions.is_empty() || area.height == 0 || area.width < 15 {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
let mut y = area.y;
|
||||
let y_max = area.y.saturating_add(area.height);
|
||||
|
||||
// Pre-compute all visual rows to support scroll offset.
|
||||
let rows = compute_visual_rows_with_clock(
|
||||
discussions,
|
||||
state,
|
||||
max_x.saturating_sub(area.x) as usize,
|
||||
clock,
|
||||
);
|
||||
|
||||
// Apply scroll offset.
|
||||
let visible_rows = rows
|
||||
.iter()
|
||||
.skip(state.scroll_offset)
|
||||
.take(area.height as usize);
|
||||
|
||||
for row in visible_rows {
|
||||
if y >= y_max {
|
||||
break;
|
||||
}
|
||||
|
||||
match row {
|
||||
VisualRow::DiscussionHeader {
|
||||
disc_idx,
|
||||
expanded,
|
||||
summary,
|
||||
preview,
|
||||
} => {
|
||||
let is_selected = *disc_idx == state.selected;
|
||||
|
||||
// Background fill for selected.
|
||||
if is_selected {
|
||||
frame.draw_rect_filled(
|
||||
Rect::new(area.x, y, area.width, 1),
|
||||
Cell {
|
||||
fg: colors.selected_fg,
|
||||
bg: colors.selected_bg,
|
||||
..Cell::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let style = if is_selected {
|
||||
Cell {
|
||||
fg: colors.selected_fg,
|
||||
bg: colors.selected_bg,
|
||||
..Cell::default()
|
||||
}
|
||||
} else {
|
||||
Cell {
|
||||
fg: colors.author_fg,
|
||||
..Cell::default()
|
||||
}
|
||||
};
|
||||
|
||||
let indicator = if *expanded { "v " } else { "> " };
|
||||
let mut x = frame.print_text_clipped(area.x, y, indicator, style, max_x);
|
||||
x = frame.print_text_clipped(x, y, summary, style, max_x);
|
||||
|
||||
// Show preview on same line for collapsed.
|
||||
if !expanded && !preview.is_empty() {
|
||||
let preview_style = if is_selected {
|
||||
style
|
||||
} else {
|
||||
Cell {
|
||||
fg: colors.timestamp_fg,
|
||||
..Cell::default()
|
||||
}
|
||||
};
|
||||
x = frame.print_text_clipped(x, y, " - ", preview_style, max_x);
|
||||
let _ = frame.print_text_clipped(x, y, preview, preview_style, max_x);
|
||||
}
|
||||
|
||||
y += 1;
|
||||
}
|
||||
|
||||
VisualRow::NoteHeader {
|
||||
author,
|
||||
relative_time,
|
||||
is_system,
|
||||
..
|
||||
} => {
|
||||
let style = if *is_system {
|
||||
Cell {
|
||||
fg: colors.system_fg,
|
||||
..Cell::default()
|
||||
}
|
||||
} else {
|
||||
Cell {
|
||||
fg: colors.author_fg,
|
||||
..Cell::default()
|
||||
}
|
||||
};
|
||||
|
||||
let guide_style = Cell {
|
||||
fg: colors.guide_fg,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
let indent_x = area.x.saturating_add(INDENT);
|
||||
let mut x = frame.print_text_clipped(area.x, y, " | ", guide_style, max_x);
|
||||
x = frame.print_text_clipped(x.max(indent_x), y, author, style, max_x);
|
||||
|
||||
let time_style = Cell {
|
||||
fg: colors.timestamp_fg,
|
||||
..Cell::default()
|
||||
};
|
||||
x = frame.print_text_clipped(x, y, " · ", time_style, max_x);
|
||||
let _ = frame.print_text_clipped(x, y, relative_time, time_style, max_x);
|
||||
|
||||
y += 1;
|
||||
}
|
||||
|
||||
VisualRow::DiffPath { file_path, line } => {
|
||||
let guide_style = Cell {
|
||||
fg: colors.guide_fg,
|
||||
..Cell::default()
|
||||
};
|
||||
let path_style = Cell {
|
||||
fg: colors.diff_path_fg,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
let mut x = frame.print_text_clipped(area.x, y, " | ", guide_style, max_x);
|
||||
let indent_x = area.x.saturating_add(INDENT);
|
||||
x = x.max(indent_x);
|
||||
|
||||
let location = match line {
|
||||
Some(l) => format!("diff {file_path}:{l}"),
|
||||
None => format!("diff {file_path}"),
|
||||
};
|
||||
let _ = frame.print_text_clipped(x, y, &location, path_style, max_x);
|
||||
|
||||
y += 1;
|
||||
}
|
||||
|
||||
VisualRow::BodyLine { text, is_system } => {
|
||||
let guide_style = Cell {
|
||||
fg: colors.guide_fg,
|
||||
..Cell::default()
|
||||
};
|
||||
let body_style = if *is_system {
|
||||
Cell {
|
||||
fg: colors.system_fg,
|
||||
..Cell::default()
|
||||
}
|
||||
} else {
|
||||
Cell {
|
||||
fg: colors.body_fg,
|
||||
..Cell::default()
|
||||
}
|
||||
};
|
||||
|
||||
let mut x = frame.print_text_clipped(area.x, y, " | ", guide_style, max_x);
|
||||
let indent_x = area.x.saturating_add(INDENT);
|
||||
x = x.max(indent_x);
|
||||
let _ = frame.print_text_clipped(x, y, text, body_style, max_x);
|
||||
|
||||
y += 1;
|
||||
}
|
||||
|
||||
VisualRow::Separator => {
|
||||
let guide_style = Cell {
|
||||
fg: colors.guide_fg,
|
||||
..Cell::default()
|
||||
};
|
||||
let _ = frame.print_text_clipped(area.x, y, " |", guide_style, max_x);
|
||||
y += 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
y.saturating_sub(area.y)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Visual row computation
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Pre-computed visual row for the discussion tree.
|
||||
///
|
||||
/// We flatten the tree into rows to support scroll offset correctly.
|
||||
#[derive(Debug)]
|
||||
enum VisualRow {
|
||||
/// Discussion header (collapsed or expanded).
|
||||
DiscussionHeader {
|
||||
disc_idx: usize,
|
||||
expanded: bool,
|
||||
summary: String,
|
||||
preview: String,
|
||||
},
|
||||
/// Note author + timestamp line.
|
||||
NoteHeader {
|
||||
author: String,
|
||||
relative_time: String,
|
||||
is_system: bool,
|
||||
},
|
||||
/// Diff note file path line.
|
||||
DiffPath {
|
||||
file_path: String,
|
||||
line: Option<i64>,
|
||||
},
|
||||
/// Note body text line.
|
||||
BodyLine { text: String, is_system: bool },
|
||||
/// Blank separator between notes.
|
||||
Separator,
|
||||
}
|
||||
|
||||
/// Maximum body lines shown per note to prevent one huge note from
|
||||
/// consuming the entire viewport.
|
||||
const MAX_BODY_LINES: usize = 10;
|
||||
|
||||
/// Compute visual rows with relative timestamps from the clock.
|
||||
fn compute_visual_rows_with_clock(
|
||||
discussions: &[DiscussionNode],
|
||||
state: &DiscussionTreeState,
|
||||
available_width: usize,
|
||||
clock: &dyn Clock,
|
||||
) -> Vec<VisualRow> {
|
||||
let mut rows = Vec::new();
|
||||
let preview_max = available_width.saturating_sub(40).max(20);
|
||||
|
||||
for (idx, disc) in discussions.iter().enumerate() {
|
||||
let expanded = state.is_expanded(&disc.discussion_id);
|
||||
|
||||
rows.push(VisualRow::DiscussionHeader {
|
||||
disc_idx: idx,
|
||||
expanded,
|
||||
summary: disc.summary(),
|
||||
preview: if expanded {
|
||||
String::new()
|
||||
} else {
|
||||
disc.preview(preview_max)
|
||||
},
|
||||
});
|
||||
|
||||
if expanded {
|
||||
for (note_idx, note) in disc.notes.iter().enumerate() {
|
||||
if note_idx > 0 {
|
||||
rows.push(VisualRow::Separator);
|
||||
}
|
||||
|
||||
rows.push(VisualRow::NoteHeader {
|
||||
author: note.author.clone(),
|
||||
relative_time: format_relative_time(note.created_at, clock),
|
||||
is_system: note.is_system,
|
||||
});
|
||||
|
||||
if note.is_diff_note
|
||||
&& let Some(ref path) = note.diff_file_path
|
||||
{
|
||||
rows.push(VisualRow::DiffPath {
|
||||
file_path: path.clone(),
|
||||
line: note.diff_new_line,
|
||||
});
|
||||
}
|
||||
|
||||
let sanitized = sanitize_for_terminal(¬e.body, UrlPolicy::Strip);
|
||||
for (line_idx, line) in sanitized.lines().enumerate() {
|
||||
if line_idx >= MAX_BODY_LINES {
|
||||
rows.push(VisualRow::BodyLine {
|
||||
text: "...".to_string(),
|
||||
is_system: note.is_system,
|
||||
});
|
||||
break;
|
||||
}
|
||||
rows.push(VisualRow::BodyLine {
|
||||
text: line.to_string(),
|
||||
is_system: note.is_system,
|
||||
});
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
rows
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::clock::FakeClock;
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
fn sample_note(author: &str, body: &str, created_at: i64) -> NoteNode {
|
||||
NoteNode {
|
||||
author: author.into(),
|
||||
body: body.into(),
|
||||
created_at,
|
||||
is_system: false,
|
||||
is_diff_note: false,
|
||||
diff_file_path: None,
|
||||
diff_new_line: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn system_note(body: &str, created_at: i64) -> NoteNode {
|
||||
NoteNode {
|
||||
author: "system".into(),
|
||||
body: body.into(),
|
||||
created_at,
|
||||
is_system: true,
|
||||
is_diff_note: false,
|
||||
diff_file_path: None,
|
||||
diff_new_line: None,
|
||||
}
|
||||
}
|
||||
|
||||
fn diff_note(author: &str, body: &str, path: &str, line: i64, created_at: i64) -> NoteNode {
|
||||
NoteNode {
|
||||
author: author.into(),
|
||||
body: body.into(),
|
||||
created_at,
|
||||
is_system: false,
|
||||
is_diff_note: true,
|
||||
diff_file_path: Some(path.into()),
|
||||
diff_new_line: Some(line),
|
||||
}
|
||||
}
|
||||
|
||||
fn sample_discussions() -> Vec<DiscussionNode> {
|
||||
vec![
|
||||
DiscussionNode {
|
||||
discussion_id: "disc-1".into(),
|
||||
notes: vec![
|
||||
sample_note("alice", "This looks good overall", 1_700_000_000_000),
|
||||
sample_note("bob", "Agreed, but one concern", 1_700_000_060_000),
|
||||
],
|
||||
resolvable: false,
|
||||
resolved: false,
|
||||
},
|
||||
DiscussionNode {
|
||||
discussion_id: "disc-2".into(),
|
||||
notes: vec![diff_note(
|
||||
"charlie",
|
||||
"This function needs error handling",
|
||||
"src/auth.rs",
|
||||
42,
|
||||
1_700_000_120_000,
|
||||
)],
|
||||
resolvable: true,
|
||||
resolved: true,
|
||||
},
|
||||
DiscussionNode {
|
||||
discussion_id: "disc-3".into(),
|
||||
notes: vec![system_note("changed the description", 1_700_000_180_000)],
|
||||
resolvable: false,
|
||||
resolved: false,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
fn test_colors() -> DiscussionTreeColors {
|
||||
DiscussionTreeColors {
|
||||
author_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
timestamp_fg: PackedRgba::rgb(0x87, 0x87, 0x80),
|
||||
body_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
system_fg: PackedRgba::rgb(0x6F, 0x6E, 0x69),
|
||||
diff_path_fg: PackedRgba::rgb(0x87, 0x96, 0x6B),
|
||||
resolved_fg: PackedRgba::rgb(0x87, 0x96, 0x6B),
|
||||
guide_fg: PackedRgba::rgb(0x40, 0x40, 0x3C),
|
||||
selected_fg: PackedRgba::rgb(0x10, 0x0F, 0x0F),
|
||||
selected_bg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
expand_fg: PackedRgba::rgb(0xDA, 0x70, 0x2C),
|
||||
}
|
||||
}
|
||||
|
||||
// Clock set to 1h after the last sample note.
|
||||
fn test_clock() -> FakeClock {
|
||||
FakeClock::from_ms(1_700_000_180_000 + 3_600_000)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_relative_time_just_now() {
|
||||
let clock = FakeClock::from_ms(1_000_000);
|
||||
assert_eq!(format_relative_time(1_000_000, &clock), "just now");
|
||||
assert_eq!(format_relative_time(999_990, &clock), "just now");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_relative_time_minutes() {
|
||||
let clock = FakeClock::from_ms(1_000_000 + 5 * 60 * 1_000);
|
||||
assert_eq!(format_relative_time(1_000_000, &clock), "5m ago");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_relative_time_hours() {
|
||||
let clock = FakeClock::from_ms(1_000_000 + 3 * 3_600 * 1_000);
|
||||
assert_eq!(format_relative_time(1_000_000, &clock), "3h ago");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_relative_time_days() {
|
||||
let clock = FakeClock::from_ms(1_000_000 + 2 * 86_400 * 1_000);
|
||||
assert_eq!(format_relative_time(1_000_000, &clock), "2d ago");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_relative_time_weeks() {
|
||||
let clock = FakeClock::from_ms(1_000_000 + 14 * 86_400 * 1_000);
|
||||
assert_eq!(format_relative_time(1_000_000, &clock), "2w ago");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_relative_time_months() {
|
||||
let clock = FakeClock::from_ms(1_000_000 + 60 * 86_400 * 1_000);
|
||||
assert_eq!(format_relative_time(1_000_000, &clock), "2mo ago");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_discussion_node_summary() {
|
||||
let disc = DiscussionNode {
|
||||
discussion_id: "d1".into(),
|
||||
notes: vec![
|
||||
sample_note("alice", "body", 0),
|
||||
sample_note("bob", "reply", 1000),
|
||||
],
|
||||
resolvable: false,
|
||||
resolved: false,
|
||||
};
|
||||
assert_eq!(disc.summary(), "alice (2 notes)");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_discussion_node_summary_single() {
|
||||
let disc = DiscussionNode {
|
||||
discussion_id: "d1".into(),
|
||||
notes: vec![sample_note("alice", "body", 0)],
|
||||
resolvable: false,
|
||||
resolved: false,
|
||||
};
|
||||
assert_eq!(disc.summary(), "alice");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_discussion_node_summary_resolved() {
|
||||
let disc = DiscussionNode {
|
||||
discussion_id: "d1".into(),
|
||||
notes: vec![sample_note("alice", "body", 0)],
|
||||
resolvable: true,
|
||||
resolved: true,
|
||||
};
|
||||
assert_eq!(disc.summary(), "alice [resolved]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_discussion_node_preview() {
|
||||
let disc = DiscussionNode {
|
||||
discussion_id: "d1".into(),
|
||||
notes: vec![sample_note("alice", "First line\nSecond line", 0)],
|
||||
resolvable: false,
|
||||
resolved: false,
|
||||
};
|
||||
assert_eq!(disc.preview(50), "First line");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_discussion_tree_state_navigation() {
|
||||
let mut state = DiscussionTreeState::default();
|
||||
assert_eq!(state.selected, 0);
|
||||
|
||||
state.select_next(3);
|
||||
assert_eq!(state.selected, 1);
|
||||
|
||||
state.select_next(3);
|
||||
assert_eq!(state.selected, 2);
|
||||
|
||||
state.select_next(3);
|
||||
assert_eq!(state.selected, 2);
|
||||
|
||||
state.select_prev();
|
||||
assert_eq!(state.selected, 1);
|
||||
|
||||
state.select_prev();
|
||||
assert_eq!(state.selected, 0);
|
||||
|
||||
state.select_prev();
|
||||
assert_eq!(state.selected, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_discussion_tree_state_toggle() {
|
||||
let discussions = sample_discussions();
|
||||
let mut state = DiscussionTreeState::default();
|
||||
|
||||
assert!(!state.is_expanded("disc-1"));
|
||||
|
||||
state.toggle_selected(&discussions);
|
||||
assert!(state.is_expanded("disc-1"));
|
||||
|
||||
state.toggle_selected(&discussions);
|
||||
assert!(!state.is_expanded("disc-1"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_discussion_tree_collapsed_no_panic() {
|
||||
with_frame!(80, 20, |frame| {
|
||||
let discussions = sample_discussions();
|
||||
let state = DiscussionTreeState::default();
|
||||
let clock = test_clock();
|
||||
let rows = render_discussion_tree(
|
||||
&mut frame,
|
||||
&discussions,
|
||||
&state,
|
||||
Rect::new(0, 0, 80, 20),
|
||||
&test_colors(),
|
||||
&clock,
|
||||
);
|
||||
// 3 discussions, all collapsed = 3 rows.
|
||||
assert_eq!(rows, 3);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_discussion_tree_expanded_no_panic() {
|
||||
with_frame!(80, 30, |frame| {
|
||||
let discussions = sample_discussions();
|
||||
let mut state = DiscussionTreeState::default();
|
||||
state.expanded.insert("disc-1".into());
|
||||
let clock = test_clock();
|
||||
let rows = render_discussion_tree(
|
||||
&mut frame,
|
||||
&discussions,
|
||||
&state,
|
||||
Rect::new(0, 0, 80, 30),
|
||||
&test_colors(),
|
||||
&clock,
|
||||
);
|
||||
// disc-1 expanded: header + 2 notes (each: header + body line) + separator between
|
||||
// = 1 + (1+1) + 1 + (1+1) = 6 rows from disc-1
|
||||
// disc-2 collapsed: 1 row
|
||||
// disc-3 collapsed: 1 row
|
||||
// Total: 8
|
||||
assert!(rows >= 6); // At least disc-1 content + 2 collapsed.
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_discussion_tree_empty() {
|
||||
with_frame!(80, 20, |frame| {
|
||||
let state = DiscussionTreeState::default();
|
||||
let clock = test_clock();
|
||||
let rows = render_discussion_tree(
|
||||
&mut frame,
|
||||
&[],
|
||||
&state,
|
||||
Rect::new(0, 0, 80, 20),
|
||||
&test_colors(),
|
||||
&clock,
|
||||
);
|
||||
assert_eq!(rows, 0);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_discussion_tree_tiny_area() {
|
||||
with_frame!(10, 2, |frame| {
|
||||
let discussions = sample_discussions();
|
||||
let state = DiscussionTreeState::default();
|
||||
let clock = test_clock();
|
||||
let rows = render_discussion_tree(
|
||||
&mut frame,
|
||||
&discussions,
|
||||
&state,
|
||||
Rect::new(0, 0, 10, 2),
|
||||
&test_colors(),
|
||||
&clock,
|
||||
);
|
||||
// Too narrow (< 15), should bail.
|
||||
assert_eq!(rows, 0);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_discussion_tree_with_diff_note() {
|
||||
with_frame!(80, 30, |frame| {
|
||||
let discussions = vec![DiscussionNode {
|
||||
discussion_id: "diff-disc".into(),
|
||||
notes: vec![diff_note(
|
||||
"reviewer",
|
||||
"Add error handling here",
|
||||
"src/main.rs",
|
||||
42,
|
||||
1_700_000_000_000,
|
||||
)],
|
||||
resolvable: true,
|
||||
resolved: false,
|
||||
}];
|
||||
let mut state = DiscussionTreeState::default();
|
||||
state.expanded.insert("diff-disc".into());
|
||||
let clock = test_clock();
|
||||
let rows = render_discussion_tree(
|
||||
&mut frame,
|
||||
&discussions,
|
||||
&state,
|
||||
Rect::new(0, 0, 80, 30),
|
||||
&test_colors(),
|
||||
&clock,
|
||||
);
|
||||
// header + note header + diff path + body line = 4
|
||||
assert!(rows >= 3);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_discussion_tree_system_note() {
|
||||
with_frame!(80, 20, |frame| {
|
||||
let discussions = vec![DiscussionNode {
|
||||
discussion_id: "sys-disc".into(),
|
||||
notes: vec![system_note("changed the description", 1_700_000_000_000)],
|
||||
resolvable: false,
|
||||
resolved: false,
|
||||
}];
|
||||
let mut state = DiscussionTreeState::default();
|
||||
state.expanded.insert("sys-disc".into());
|
||||
let clock = test_clock();
|
||||
let rows = render_discussion_tree(
|
||||
&mut frame,
|
||||
&discussions,
|
||||
&state,
|
||||
Rect::new(0, 0, 80, 20),
|
||||
&test_colors(),
|
||||
&clock,
|
||||
);
|
||||
assert!(rows >= 2);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_visual_rows_collapsed() {
|
||||
let discussions = sample_discussions();
|
||||
let state = DiscussionTreeState::default();
|
||||
let clock = test_clock();
|
||||
let rows = compute_visual_rows_with_clock(&discussions, &state, 80, &clock);
|
||||
|
||||
// 3 collapsed headers.
|
||||
assert_eq!(rows.len(), 3);
|
||||
assert!(matches!(
|
||||
rows[0],
|
||||
VisualRow::DiscussionHeader {
|
||||
expanded: false,
|
||||
..
|
||||
}
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_compute_visual_rows_expanded() {
|
||||
let discussions = sample_discussions();
|
||||
let mut state = DiscussionTreeState::default();
|
||||
state.expanded.insert("disc-1".into());
|
||||
let clock = test_clock();
|
||||
let rows = compute_visual_rows_with_clock(&discussions, &state, 80, &clock);
|
||||
|
||||
// disc-1: header + note1 (header + body) + separator + note2 (header + body) = 6
|
||||
// disc-2: 1 header
|
||||
// disc-3: 1 header
|
||||
// Total: 8
|
||||
assert!(rows.len() >= 6);
|
||||
assert!(matches!(
|
||||
rows[0],
|
||||
VisualRow::DiscussionHeader { expanded: true, .. }
|
||||
));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_long_body_truncation() {
|
||||
let long_body = (0..20)
|
||||
.map(|i| format!("Line {i} of a very long discussion note"))
|
||||
.collect::<Vec<_>>()
|
||||
.join("\n");
|
||||
|
||||
let discussions = vec![DiscussionNode {
|
||||
discussion_id: "long".into(),
|
||||
notes: vec![sample_note("alice", &long_body, 1_700_000_000_000)],
|
||||
resolvable: false,
|
||||
resolved: false,
|
||||
}];
|
||||
let mut state = DiscussionTreeState::default();
|
||||
state.expanded.insert("long".into());
|
||||
let clock = test_clock();
|
||||
let rows = compute_visual_rows_with_clock(&discussions, &state, 80, &clock);
|
||||
|
||||
// Header + note header + MAX_BODY_LINES + 1 ("...") = 1 + 1 + 10 + 1 = 13
|
||||
let body_lines: Vec<_> = rows
|
||||
.iter()
|
||||
.filter(|r| matches!(r, VisualRow::BodyLine { .. }))
|
||||
.collect();
|
||||
// Should cap at MAX_BODY_LINES + 1 (for the "..." truncation line).
|
||||
assert!(body_lines.len() <= MAX_BODY_LINES + 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_preview_multibyte_utf8_no_panic() {
|
||||
// Emoji are 4 bytes each. Truncating at a byte boundary that falls
|
||||
// inside a multi-byte char must not panic.
|
||||
let disc = DiscussionNode {
|
||||
discussion_id: "d-utf8".into(),
|
||||
notes: vec![sample_note(
|
||||
"alice",
|
||||
"Hello 🌍🌎🌏 world of emoji 🎉🎊🎈",
|
||||
0,
|
||||
)],
|
||||
resolvable: false,
|
||||
resolved: false,
|
||||
};
|
||||
// max_chars=10 would land inside the first emoji's bytes.
|
||||
let preview = disc.preview(10);
|
||||
assert!(preview.ends_with("..."));
|
||||
assert!(preview.len() <= 20); // char-bounded + "..."
|
||||
|
||||
// Edge: max_chars smaller than a single multi-byte char.
|
||||
let disc2 = DiscussionNode {
|
||||
discussion_id: "d-utf8-2".into(),
|
||||
notes: vec![sample_note("bob", "🌍🌎🌏", 0)],
|
||||
resolvable: false,
|
||||
resolved: false,
|
||||
};
|
||||
let preview2 = disc2.preview(3);
|
||||
assert!(preview2.ends_with("..."));
|
||||
}
|
||||
}
|
||||
676
crates/lore-tui/src/view/common/entity_table.rs
Normal file
676
crates/lore-tui/src/view/common/entity_table.rs
Normal file
@@ -0,0 +1,676 @@
|
||||
#![allow(dead_code)] // Phase 2: consumed by Issue List + MR List screens
|
||||
|
||||
//! Generic entity table widget for list screens.
|
||||
//!
|
||||
//! `EntityTable<R>` renders rows with sortable, responsive columns.
|
||||
//! Columns hide gracefully when the terminal is too narrow, using
|
||||
//! priority-based visibility.
|
||||
|
||||
use ftui::core::geometry::Rect;
|
||||
use ftui::render::cell::{Cell, PackedRgba};
|
||||
use ftui::render::drawing::Draw;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Column definition
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Describes a single table column.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct ColumnDef {
|
||||
/// Display name shown in the header.
|
||||
pub name: &'static str,
|
||||
/// Minimum width in characters. Column is hidden if it can't meet this.
|
||||
pub min_width: u16,
|
||||
/// Flex weight for distributing extra space.
|
||||
pub flex_weight: u16,
|
||||
/// Visibility priority (0 = always shown, higher = hidden first).
|
||||
pub priority: u8,
|
||||
/// Text alignment within the column.
|
||||
pub align: Align,
|
||||
}
|
||||
|
||||
/// Text alignment within a column.
|
||||
#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)]
|
||||
pub enum Align {
|
||||
#[default]
|
||||
Left,
|
||||
Right,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// TableRow trait
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Trait for types that can be rendered as a table row.
|
||||
pub trait TableRow {
|
||||
/// Return the cell text for each column, in column order.
|
||||
fn cells(&self, col_count: usize) -> Vec<String>;
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// EntityTable state
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Rendering state for the entity table.
|
||||
#[derive(Debug, Clone)]
|
||||
pub struct EntityTableState {
|
||||
/// Index of the selected row (0-based, within the full data set).
|
||||
pub selected: usize,
|
||||
/// Scroll offset (first visible row index).
|
||||
pub scroll_offset: usize,
|
||||
/// Index of the column used for sorting.
|
||||
pub sort_column: usize,
|
||||
/// Sort direction.
|
||||
pub sort_ascending: bool,
|
||||
}
|
||||
|
||||
impl Default for EntityTableState {
|
||||
fn default() -> Self {
|
||||
Self {
|
||||
selected: 0,
|
||||
scroll_offset: 0,
|
||||
sort_column: 0,
|
||||
sort_ascending: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
impl EntityTableState {
|
||||
/// Move selection down by 1.
|
||||
pub fn select_next(&mut self, total_rows: usize) {
|
||||
if total_rows == 0 {
|
||||
return;
|
||||
}
|
||||
self.selected = (self.selected + 1).min(total_rows - 1);
|
||||
}
|
||||
|
||||
/// Move selection up by 1.
|
||||
pub fn select_prev(&mut self) {
|
||||
self.selected = self.selected.saturating_sub(1);
|
||||
}
|
||||
|
||||
/// Page down (move by `page_size` rows).
|
||||
pub fn page_down(&mut self, total_rows: usize, page_size: usize) {
|
||||
if total_rows == 0 {
|
||||
return;
|
||||
}
|
||||
self.selected = (self.selected + page_size).min(total_rows - 1);
|
||||
}
|
||||
|
||||
/// Page up.
|
||||
pub fn page_up(&mut self, page_size: usize) {
|
||||
self.selected = self.selected.saturating_sub(page_size);
|
||||
}
|
||||
|
||||
/// Jump to top.
|
||||
pub fn select_first(&mut self) {
|
||||
self.selected = 0;
|
||||
}
|
||||
|
||||
/// Jump to bottom.
|
||||
pub fn select_last(&mut self, total_rows: usize) {
|
||||
if total_rows > 0 {
|
||||
self.selected = total_rows - 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Cycle sort column forward (wraps around).
|
||||
pub fn cycle_sort(&mut self, col_count: usize) {
|
||||
if col_count == 0 {
|
||||
return;
|
||||
}
|
||||
self.sort_column = (self.sort_column + 1) % col_count;
|
||||
}
|
||||
|
||||
/// Toggle sort direction on current column.
|
||||
pub fn toggle_sort_direction(&mut self) {
|
||||
self.sort_ascending = !self.sort_ascending;
|
||||
}
|
||||
|
||||
/// Ensure scroll offset keeps selection visible.
|
||||
fn adjust_scroll(&mut self, visible_rows: usize) {
|
||||
if visible_rows == 0 {
|
||||
return;
|
||||
}
|
||||
if self.selected < self.scroll_offset {
|
||||
self.scroll_offset = self.selected;
|
||||
}
|
||||
if self.selected >= self.scroll_offset + visible_rows {
|
||||
self.scroll_offset = self.selected - visible_rows + 1;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Colors
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Colors for the entity table. Will be replaced by Theme injection.
|
||||
pub struct TableColors {
|
||||
pub header_fg: PackedRgba,
|
||||
pub header_bg: PackedRgba,
|
||||
pub row_fg: PackedRgba,
|
||||
pub row_alt_bg: PackedRgba,
|
||||
pub selected_fg: PackedRgba,
|
||||
pub selected_bg: PackedRgba,
|
||||
pub sort_indicator: PackedRgba,
|
||||
pub border: PackedRgba,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Render
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Compute which columns are visible given the available width.
|
||||
///
|
||||
/// Returns indices of visible columns sorted by original order,
|
||||
/// along with their allocated widths.
|
||||
pub fn visible_columns(columns: &[ColumnDef], available_width: u16) -> Vec<(usize, u16)> {
|
||||
// Sort by priority (lowest = most important).
|
||||
let mut indexed: Vec<(usize, &ColumnDef)> = columns.iter().enumerate().collect();
|
||||
indexed.sort_by_key(|(_, col)| col.priority);
|
||||
|
||||
let mut result: Vec<(usize, u16)> = Vec::new();
|
||||
let mut used_width: u16 = 0;
|
||||
let gap = 1u16; // 1-char gap between columns.
|
||||
|
||||
for (idx, col) in &indexed {
|
||||
let needed = col.min_width + if result.is_empty() { 0 } else { gap };
|
||||
if used_width + needed <= available_width {
|
||||
result.push((*idx, col.min_width));
|
||||
used_width += needed;
|
||||
}
|
||||
}
|
||||
|
||||
// Distribute remaining space by flex weight.
|
||||
let remaining = available_width.saturating_sub(used_width);
|
||||
if remaining > 0 {
|
||||
let total_weight: u16 = result
|
||||
.iter()
|
||||
.map(|(idx, _)| columns[*idx].flex_weight)
|
||||
.sum();
|
||||
|
||||
if total_weight > 0 {
|
||||
for (idx, width) in &mut result {
|
||||
let weight = columns[*idx].flex_weight;
|
||||
let extra =
|
||||
(u32::from(remaining) * u32::from(weight) / u32::from(total_weight)) as u16;
|
||||
*width += extra;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// Sort by original column order for rendering.
|
||||
result.sort_by_key(|(idx, _)| *idx);
|
||||
result
|
||||
}
|
||||
|
||||
/// Render the entity table header row.
|
||||
pub fn render_header(
|
||||
frame: &mut Frame<'_>,
|
||||
columns: &[ColumnDef],
|
||||
visible: &[(usize, u16)],
|
||||
state: &EntityTableState,
|
||||
y: u16,
|
||||
area_x: u16,
|
||||
colors: &TableColors,
|
||||
) {
|
||||
let header_cell = Cell {
|
||||
fg: colors.header_fg,
|
||||
bg: colors.header_bg,
|
||||
..Cell::default()
|
||||
};
|
||||
let sort_cell = Cell {
|
||||
fg: colors.sort_indicator,
|
||||
bg: colors.header_bg,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
// Fill header background.
|
||||
let total_width: u16 = visible.iter().map(|(_, w)| w + 1).sum();
|
||||
let header_rect = Rect::new(area_x, y, total_width, 1);
|
||||
frame.draw_rect_filled(
|
||||
header_rect,
|
||||
Cell {
|
||||
bg: colors.header_bg,
|
||||
..Cell::default()
|
||||
},
|
||||
);
|
||||
|
||||
let mut x = area_x;
|
||||
for (col_idx, col_width) in visible {
|
||||
let col = &columns[*col_idx];
|
||||
let col_max = x.saturating_add(*col_width);
|
||||
|
||||
let after_name = frame.print_text_clipped(x, y, col.name, header_cell, col_max);
|
||||
|
||||
// Sort indicator.
|
||||
if *col_idx == state.sort_column {
|
||||
let arrow = if state.sort_ascending { " ^" } else { " v" };
|
||||
frame.print_text_clipped(after_name, y, arrow, sort_cell, col_max);
|
||||
}
|
||||
|
||||
x = col_max.saturating_add(1); // gap
|
||||
}
|
||||
}
|
||||
|
||||
/// Style context for rendering a single row.
|
||||
pub struct RowContext<'a> {
|
||||
pub columns: &'a [ColumnDef],
|
||||
pub visible: &'a [(usize, u16)],
|
||||
pub is_selected: bool,
|
||||
pub is_alt: bool,
|
||||
pub colors: &'a TableColors,
|
||||
}
|
||||
|
||||
/// Render a data row.
|
||||
pub fn render_row<R: TableRow>(
|
||||
frame: &mut Frame<'_>,
|
||||
row: &R,
|
||||
y: u16,
|
||||
area_x: u16,
|
||||
ctx: &RowContext<'_>,
|
||||
) {
|
||||
let (fg, bg) = if ctx.is_selected {
|
||||
(ctx.colors.selected_fg, ctx.colors.selected_bg)
|
||||
} else if ctx.is_alt {
|
||||
(ctx.colors.row_fg, ctx.colors.row_alt_bg)
|
||||
} else {
|
||||
(ctx.colors.row_fg, Cell::default().bg)
|
||||
};
|
||||
|
||||
let cell_style = Cell {
|
||||
fg,
|
||||
bg,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
// Fill row background if selected or alt.
|
||||
if ctx.is_selected || ctx.is_alt {
|
||||
let total_width: u16 = ctx.visible.iter().map(|(_, w)| w + 1).sum();
|
||||
frame.draw_rect_filled(
|
||||
Rect::new(area_x, y, total_width, 1),
|
||||
Cell {
|
||||
bg,
|
||||
..Cell::default()
|
||||
},
|
||||
);
|
||||
}
|
||||
|
||||
let cells = row.cells(ctx.columns.len());
|
||||
let mut x = area_x;
|
||||
|
||||
for (col_idx, col_width) in ctx.visible {
|
||||
let col_max = x.saturating_add(*col_width);
|
||||
let text = cells.get(*col_idx).map(String::as_str).unwrap_or("");
|
||||
|
||||
match ctx.columns[*col_idx].align {
|
||||
Align::Left => {
|
||||
frame.print_text_clipped(x, y, text, cell_style, col_max);
|
||||
}
|
||||
Align::Right => {
|
||||
let text_len = text.len() as u16;
|
||||
let start = if text_len < *col_width {
|
||||
x + col_width - text_len
|
||||
} else {
|
||||
x
|
||||
};
|
||||
frame.print_text_clipped(start, y, text, cell_style, col_max);
|
||||
}
|
||||
}
|
||||
|
||||
x = col_max.saturating_add(1); // gap
|
||||
}
|
||||
}
|
||||
|
||||
/// Render a complete entity table: header + scrollable rows.
|
||||
pub fn render_entity_table<R: TableRow>(
|
||||
frame: &mut Frame<'_>,
|
||||
rows: &[R],
|
||||
columns: &[ColumnDef],
|
||||
state: &mut EntityTableState,
|
||||
area: Rect,
|
||||
colors: &TableColors,
|
||||
) {
|
||||
if area.height < 2 || area.width < 5 {
|
||||
return;
|
||||
}
|
||||
|
||||
let visible = visible_columns(columns, area.width);
|
||||
if visible.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Header row.
|
||||
render_header(frame, columns, &visible, state, area.y, area.x, colors);
|
||||
|
||||
// Separator.
|
||||
let sep_y = area.y.saturating_add(1);
|
||||
let sep_cell = Cell {
|
||||
fg: colors.border,
|
||||
..Cell::default()
|
||||
};
|
||||
let rule = "─".repeat(area.width as usize);
|
||||
frame.print_text_clipped(
|
||||
area.x,
|
||||
sep_y,
|
||||
&rule,
|
||||
sep_cell,
|
||||
area.x.saturating_add(area.width),
|
||||
);
|
||||
|
||||
// Data rows.
|
||||
let data_start_y = area.y.saturating_add(2);
|
||||
let visible_rows = area.height.saturating_sub(2) as usize; // minus header + separator
|
||||
|
||||
state.adjust_scroll(visible_rows);
|
||||
|
||||
let start = state.scroll_offset;
|
||||
let end = (start + visible_rows).min(rows.len());
|
||||
|
||||
for (i, row) in rows[start..end].iter().enumerate() {
|
||||
let row_y = data_start_y.saturating_add(i as u16);
|
||||
let absolute_idx = start + i;
|
||||
let ctx = RowContext {
|
||||
columns,
|
||||
visible: &visible,
|
||||
is_selected: absolute_idx == state.selected,
|
||||
is_alt: absolute_idx % 2 == 1,
|
||||
colors,
|
||||
};
|
||||
|
||||
render_row(frame, row, row_y, area.x, &ctx);
|
||||
}
|
||||
|
||||
// Scroll indicator if more rows below.
|
||||
if end < rows.len() {
|
||||
let indicator_y = data_start_y.saturating_add(visible_rows as u16);
|
||||
if indicator_y < area.y.saturating_add(area.height) {
|
||||
let muted = Cell {
|
||||
fg: colors.border,
|
||||
..Cell::default()
|
||||
};
|
||||
let remaining = rows.len() - end;
|
||||
frame.print_text_clipped(
|
||||
area.x,
|
||||
indicator_y,
|
||||
&format!("... {remaining} more"),
|
||||
muted,
|
||||
area.x.saturating_add(area.width),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
fn test_columns() -> Vec<ColumnDef> {
|
||||
vec![
|
||||
ColumnDef {
|
||||
name: "IID",
|
||||
min_width: 5,
|
||||
flex_weight: 0,
|
||||
priority: 0,
|
||||
align: Align::Right,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "Title",
|
||||
min_width: 10,
|
||||
flex_weight: 3,
|
||||
priority: 0,
|
||||
align: Align::Left,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "State",
|
||||
min_width: 8,
|
||||
flex_weight: 1,
|
||||
priority: 1,
|
||||
align: Align::Left,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "Author",
|
||||
min_width: 8,
|
||||
flex_weight: 1,
|
||||
priority: 2,
|
||||
align: Align::Left,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "Updated",
|
||||
min_width: 10,
|
||||
flex_weight: 0,
|
||||
priority: 3,
|
||||
align: Align::Right,
|
||||
},
|
||||
]
|
||||
}
|
||||
|
||||
struct TestRow {
|
||||
cells: Vec<String>,
|
||||
}
|
||||
|
||||
impl TableRow for TestRow {
|
||||
fn cells(&self, _col_count: usize) -> Vec<String> {
|
||||
self.cells.clone()
|
||||
}
|
||||
}
|
||||
|
||||
fn test_colors() -> TableColors {
|
||||
TableColors {
|
||||
header_fg: PackedRgba::rgb(0xFF, 0xFF, 0xFF),
|
||||
header_bg: PackedRgba::rgb(0x30, 0x30, 0x30),
|
||||
row_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
row_alt_bg: PackedRgba::rgb(0x28, 0x28, 0x24),
|
||||
selected_fg: PackedRgba::rgb(0xFF, 0xFF, 0xFF),
|
||||
selected_bg: PackedRgba::rgb(0xDA, 0x70, 0x2C),
|
||||
sort_indicator: PackedRgba::rgb(0xDA, 0x70, 0x2C),
|
||||
border: PackedRgba::rgb(0x87, 0x87, 0x80),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_visible_columns_all_fit() {
|
||||
let cols = test_columns();
|
||||
let vis = visible_columns(&cols, 100);
|
||||
assert_eq!(vis.len(), 5, "All 5 columns should fit at 100 cols");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_visible_columns_hides_low_priority() {
|
||||
let cols = test_columns();
|
||||
// min widths: 5 + 10 + 8 + 8 + 10 + 4 gaps = 45.
|
||||
// At 25 cols, only priority 0 columns (IID + Title) should fit.
|
||||
let vis = visible_columns(&cols, 25);
|
||||
let visible_indices: Vec<usize> = vis.iter().map(|(idx, _)| *idx).collect();
|
||||
assert!(visible_indices.contains(&0), "IID should always be visible");
|
||||
assert!(
|
||||
visible_indices.contains(&1),
|
||||
"Title should always be visible"
|
||||
);
|
||||
assert!(
|
||||
!visible_indices.contains(&4),
|
||||
"Updated (priority 3) should be hidden"
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_hiding_at_60_cols() {
|
||||
let cols = test_columns();
|
||||
let vis = visible_columns(&cols, 60);
|
||||
// min widths for priority 0,1,2: 5+10+8+8 + 3 gaps = 34.
|
||||
// Priority 3 (Updated, min 10 + gap) = 45 total, should still fit.
|
||||
assert!(vis.len() >= 3, "At least 3 columns at 60 cols");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_select_next_prev() {
|
||||
let mut state = EntityTableState::default();
|
||||
state.select_next(5);
|
||||
assert_eq!(state.selected, 1);
|
||||
state.select_next(5);
|
||||
assert_eq!(state.selected, 2);
|
||||
state.select_prev();
|
||||
assert_eq!(state.selected, 1);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_select_bounds() {
|
||||
let mut state = EntityTableState::default();
|
||||
state.select_prev(); // at 0, can't go below
|
||||
assert_eq!(state.selected, 0);
|
||||
|
||||
state.select_next(3);
|
||||
state.select_next(3);
|
||||
state.select_next(3); // at 2, can't go above last
|
||||
assert_eq!(state.selected, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_page_up_down() {
|
||||
let mut state = EntityTableState::default();
|
||||
state.page_down(20, 5);
|
||||
assert_eq!(state.selected, 5);
|
||||
state.page_up(3);
|
||||
assert_eq!(state.selected, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_first_last() {
|
||||
let mut state = EntityTableState {
|
||||
selected: 5,
|
||||
..Default::default()
|
||||
};
|
||||
state.select_first();
|
||||
assert_eq!(state.selected, 0);
|
||||
state.select_last(10);
|
||||
assert_eq!(state.selected, 9);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_cycle_sort() {
|
||||
let mut state = EntityTableState::default();
|
||||
assert_eq!(state.sort_column, 0);
|
||||
state.cycle_sort(5);
|
||||
assert_eq!(state.sort_column, 1);
|
||||
state.sort_column = 4;
|
||||
state.cycle_sort(5); // wraps to 0
|
||||
assert_eq!(state.sort_column, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_toggle_sort_direction() {
|
||||
let mut state = EntityTableState::default();
|
||||
assert!(state.sort_ascending);
|
||||
state.toggle_sort_direction();
|
||||
assert!(!state.sort_ascending);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_adjust_scroll() {
|
||||
let mut state = EntityTableState {
|
||||
selected: 15,
|
||||
scroll_offset: 0,
|
||||
..Default::default()
|
||||
};
|
||||
state.adjust_scroll(10);
|
||||
assert_eq!(state.scroll_offset, 6); // selected=15 should be at bottom of 10-row window
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_entity_table_no_panic() {
|
||||
with_frame!(80, 20, |frame| {
|
||||
let cols = test_columns();
|
||||
let rows = vec![
|
||||
TestRow {
|
||||
cells: vec![
|
||||
"#42".into(),
|
||||
"Fix auth bug".into(),
|
||||
"opened".into(),
|
||||
"taylor".into(),
|
||||
"2h ago".into(),
|
||||
],
|
||||
},
|
||||
TestRow {
|
||||
cells: vec![
|
||||
"#43".into(),
|
||||
"Add tests".into(),
|
||||
"merged".into(),
|
||||
"alice".into(),
|
||||
"1d ago".into(),
|
||||
],
|
||||
},
|
||||
];
|
||||
let mut state = EntityTableState::default();
|
||||
let colors = test_colors();
|
||||
|
||||
render_entity_table(
|
||||
&mut frame,
|
||||
&rows,
|
||||
&cols,
|
||||
&mut state,
|
||||
Rect::new(0, 0, 80, 20),
|
||||
&colors,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_entity_table_tiny_noop() {
|
||||
with_frame!(3, 1, |frame| {
|
||||
let cols = test_columns();
|
||||
let rows: Vec<TestRow> = vec![];
|
||||
let mut state = EntityTableState::default();
|
||||
let colors = test_colors();
|
||||
|
||||
render_entity_table(
|
||||
&mut frame,
|
||||
&rows,
|
||||
&cols,
|
||||
&mut state,
|
||||
Rect::new(0, 0, 3, 1),
|
||||
&colors,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_entity_table_empty_rows() {
|
||||
with_frame!(80, 10, |frame| {
|
||||
let cols = test_columns();
|
||||
let rows: Vec<TestRow> = vec![];
|
||||
let mut state = EntityTableState::default();
|
||||
let colors = test_colors();
|
||||
|
||||
render_entity_table(
|
||||
&mut frame,
|
||||
&rows,
|
||||
&cols,
|
||||
&mut state,
|
||||
Rect::new(0, 0, 80, 10),
|
||||
&colors,
|
||||
);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_state_select_next_empty() {
|
||||
let mut state = EntityTableState::default();
|
||||
state.select_next(0); // no rows
|
||||
assert_eq!(state.selected, 0);
|
||||
}
|
||||
}
|
||||
132
crates/lore-tui/src/view/common/error_toast.rs
Normal file
132
crates/lore-tui/src/view/common/error_toast.rs
Normal file
@@ -0,0 +1,132 @@
|
||||
//! Floating error toast at bottom-right.
|
||||
|
||||
use ftui::core::geometry::Rect;
|
||||
use ftui::render::cell::{Cell, PackedRgba};
|
||||
use ftui::render::drawing::Draw;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
/// Render a floating error toast at the bottom-right of the area.
|
||||
///
|
||||
/// The toast has a colored background and truncates long messages.
|
||||
pub fn render_error_toast(
|
||||
frame: &mut Frame<'_>,
|
||||
area: Rect,
|
||||
msg: &str,
|
||||
error_bg: PackedRgba,
|
||||
error_fg: PackedRgba,
|
||||
) {
|
||||
if area.height < 3 || area.width < 10 || msg.is_empty() {
|
||||
return;
|
||||
}
|
||||
|
||||
// Toast dimensions: message + padding, max 60 chars or half screen.
|
||||
let max_toast_width = (area.width / 2).clamp(20, 60);
|
||||
let toast_text = if msg.len() as u16 > max_toast_width.saturating_sub(4) {
|
||||
let trunc_len = max_toast_width.saturating_sub(7) as usize;
|
||||
// Find a char boundary at or before trunc_len to avoid panicking
|
||||
// on multi-byte UTF-8 (e.g., emoji or CJK in error messages).
|
||||
let safe_end = msg
|
||||
.char_indices()
|
||||
.take_while(|&(i, _)| i <= trunc_len)
|
||||
.last()
|
||||
.map_or(0, |(i, c)| i + c.len_utf8())
|
||||
.min(msg.len());
|
||||
format!(" {}... ", &msg[..safe_end])
|
||||
} else {
|
||||
format!(" {msg} ")
|
||||
};
|
||||
let toast_width = toast_text.len() as u16;
|
||||
let toast_height: u16 = 1;
|
||||
|
||||
// Position: bottom-right with 1-cell margin.
|
||||
let x = area.right().saturating_sub(toast_width + 1);
|
||||
let y = area.bottom().saturating_sub(toast_height + 1);
|
||||
|
||||
let toast_rect = Rect::new(x, y, toast_width, toast_height);
|
||||
|
||||
// Fill background.
|
||||
let bg_cell = Cell {
|
||||
bg: error_bg,
|
||||
..Cell::default()
|
||||
};
|
||||
frame.draw_rect_filled(toast_rect, bg_cell);
|
||||
|
||||
// Render text.
|
||||
let text_cell = Cell {
|
||||
fg: error_fg,
|
||||
bg: error_bg,
|
||||
..Cell::default()
|
||||
};
|
||||
frame.print_text_clipped(x, y, &toast_text, text_cell, area.right());
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
fn white() -> PackedRgba {
|
||||
PackedRgba::rgb(0xFF, 0xFF, 0xFF)
|
||||
}
|
||||
|
||||
fn red_bg() -> PackedRgba {
|
||||
PackedRgba::rgb(0xFF, 0x00, 0x00)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_toast_renders() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
render_error_toast(
|
||||
&mut frame,
|
||||
Rect::new(0, 0, 80, 24),
|
||||
"Database is busy",
|
||||
red_bg(),
|
||||
white(),
|
||||
);
|
||||
|
||||
let y = 22u16;
|
||||
let has_content = (40..80u16).any(|x| {
|
||||
let cell = frame.buffer.get(x, y).unwrap();
|
||||
!cell.is_empty()
|
||||
});
|
||||
assert!(has_content, "Expected error toast at bottom-right");
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_toast_empty_message_noop() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
render_error_toast(&mut frame, Rect::new(0, 0, 80, 24), "", red_bg(), white());
|
||||
|
||||
let has_content = (0..80u16).any(|x| {
|
||||
(0..24u16).any(|y| {
|
||||
let cell = frame.buffer.get(x, y).unwrap();
|
||||
!cell.is_empty()
|
||||
})
|
||||
});
|
||||
assert!(!has_content, "Empty message should render nothing");
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_error_toast_truncates_long_message() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let long_msg = "A".repeat(200);
|
||||
render_error_toast(
|
||||
&mut frame,
|
||||
Rect::new(0, 0, 80, 24),
|
||||
&long_msg,
|
||||
red_bg(),
|
||||
white(),
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
469
crates/lore-tui/src/view/common/filter_bar.rs
Normal file
469
crates/lore-tui/src/view/common/filter_bar.rs
Normal file
@@ -0,0 +1,469 @@
|
||||
#![allow(dead_code)] // Phase 2: consumed by Issue List + MR List screens
|
||||
|
||||
//! Filter bar widget for list screens.
|
||||
//!
|
||||
//! Wraps a text input with DSL parsing, inline diagnostics for unknown
|
||||
//! fields, and rendered filter chips below the input.
|
||||
|
||||
use ftui::core::geometry::Rect;
|
||||
use ftui::render::cell::{Cell, PackedRgba};
|
||||
use ftui::render::drawing::Draw;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
use crate::filter_dsl::{self, FilterToken};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Filter bar state
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// State for the filter bar widget.
|
||||
#[derive(Debug, Clone, Default)]
|
||||
pub struct FilterBarState {
|
||||
/// Current filter input text.
|
||||
pub input: String,
|
||||
/// Cursor position within the input string (byte offset).
|
||||
pub cursor: usize,
|
||||
/// Whether the filter bar has focus.
|
||||
pub focused: bool,
|
||||
/// Parsed tokens from the current input.
|
||||
pub tokens: Vec<FilterToken>,
|
||||
/// Fields that are unknown for the current entity type.
|
||||
pub unknown_fields: Vec<String>,
|
||||
}
|
||||
|
||||
impl FilterBarState {
|
||||
/// Update parsed tokens from the current input text.
|
||||
pub fn reparse(&mut self, known_fields: &[&str]) {
|
||||
self.tokens = filter_dsl::parse_filter_tokens(&self.input);
|
||||
self.unknown_fields = filter_dsl::unknown_fields(&self.tokens, known_fields)
|
||||
.into_iter()
|
||||
.map(String::from)
|
||||
.collect();
|
||||
}
|
||||
|
||||
/// Insert a character at the cursor position.
|
||||
pub fn insert_char(&mut self, ch: char) {
|
||||
if self.cursor > self.input.len() {
|
||||
self.cursor = self.input.len();
|
||||
}
|
||||
self.input.insert(self.cursor, ch);
|
||||
self.cursor += ch.len_utf8();
|
||||
}
|
||||
|
||||
/// Delete the character before the cursor (backspace).
|
||||
pub fn delete_back(&mut self) {
|
||||
if self.cursor > 0 && !self.input.is_empty() {
|
||||
// Find the previous character boundary.
|
||||
let prev = self.input[..self.cursor]
|
||||
.char_indices()
|
||||
.next_back()
|
||||
.map(|(i, _)| i)
|
||||
.unwrap_or(0);
|
||||
self.input.remove(prev);
|
||||
self.cursor = prev;
|
||||
}
|
||||
}
|
||||
|
||||
/// Delete the character at the cursor (delete key).
|
||||
pub fn delete_forward(&mut self) {
|
||||
if self.cursor < self.input.len() {
|
||||
self.input.remove(self.cursor);
|
||||
}
|
||||
}
|
||||
|
||||
/// Move cursor left by one character.
|
||||
pub fn move_left(&mut self) {
|
||||
if self.cursor > 0 {
|
||||
self.cursor = self.input[..self.cursor]
|
||||
.char_indices()
|
||||
.next_back()
|
||||
.map(|(i, _)| i)
|
||||
.unwrap_or(0);
|
||||
}
|
||||
}
|
||||
|
||||
/// Move cursor right by one character.
|
||||
pub fn move_right(&mut self) {
|
||||
if self.cursor < self.input.len() {
|
||||
self.cursor = self.input[self.cursor..]
|
||||
.chars()
|
||||
.next()
|
||||
.map(|ch| self.cursor + ch.len_utf8())
|
||||
.unwrap_or(self.input.len());
|
||||
}
|
||||
}
|
||||
|
||||
/// Move cursor to start.
|
||||
pub fn move_home(&mut self) {
|
||||
self.cursor = 0;
|
||||
}
|
||||
|
||||
/// Move cursor to end.
|
||||
pub fn move_end(&mut self) {
|
||||
self.cursor = self.input.len();
|
||||
}
|
||||
|
||||
/// Clear the input.
|
||||
pub fn clear(&mut self) {
|
||||
self.input.clear();
|
||||
self.cursor = 0;
|
||||
self.tokens.clear();
|
||||
self.unknown_fields.clear();
|
||||
}
|
||||
|
||||
/// Whether the filter has any active tokens.
|
||||
pub fn is_active(&self) -> bool {
|
||||
!self.tokens.is_empty()
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Colors
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Colors for the filter bar.
|
||||
pub struct FilterBarColors {
|
||||
pub input_fg: PackedRgba,
|
||||
pub input_bg: PackedRgba,
|
||||
pub cursor_fg: PackedRgba,
|
||||
pub cursor_bg: PackedRgba,
|
||||
pub chip_fg: PackedRgba,
|
||||
pub chip_bg: PackedRgba,
|
||||
pub error_fg: PackedRgba,
|
||||
pub label_fg: PackedRgba,
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Render
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Render the filter bar.
|
||||
///
|
||||
/// Layout:
|
||||
/// ```text
|
||||
/// Row 0: [Filter: ][input text with cursor___________]
|
||||
/// Row 1: [chip1] [chip2] [chip3] (if tokens present)
|
||||
/// ```
|
||||
///
|
||||
/// Returns the number of rows consumed (1 or 2).
|
||||
pub fn render_filter_bar(
|
||||
frame: &mut Frame<'_>,
|
||||
state: &FilterBarState,
|
||||
area: Rect,
|
||||
colors: &FilterBarColors,
|
||||
) -> u16 {
|
||||
if area.height == 0 || area.width < 10 {
|
||||
return 0;
|
||||
}
|
||||
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
let y = area.y;
|
||||
|
||||
// Label.
|
||||
let label = if state.focused { "Filter: " } else { "/ " };
|
||||
let label_cell = Cell {
|
||||
fg: colors.label_fg,
|
||||
..Cell::default()
|
||||
};
|
||||
let after_label = frame.print_text_clipped(area.x, y, label, label_cell, max_x);
|
||||
|
||||
// Input text.
|
||||
let input_cell = Cell {
|
||||
fg: colors.input_fg,
|
||||
bg: if state.focused {
|
||||
colors.input_bg
|
||||
} else {
|
||||
Cell::default().bg
|
||||
},
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
if state.input.is_empty() && !state.focused {
|
||||
let muted = Cell {
|
||||
fg: colors.label_fg,
|
||||
..Cell::default()
|
||||
};
|
||||
frame.print_text_clipped(after_label, y, "type / to filter", muted, max_x);
|
||||
} else {
|
||||
// Render input text with cursor highlight.
|
||||
render_input_with_cursor(frame, state, after_label, y, max_x, input_cell, colors);
|
||||
}
|
||||
|
||||
// Error indicators for unknown fields.
|
||||
if !state.unknown_fields.is_empty() {
|
||||
let err_cell = Cell {
|
||||
fg: colors.error_fg,
|
||||
..Cell::default()
|
||||
};
|
||||
let err_msg = format!("Unknown: {}", state.unknown_fields.join(", "));
|
||||
// Right-align the error.
|
||||
let err_x = max_x.saturating_sub(err_msg.len() as u16 + 1);
|
||||
frame.print_text_clipped(err_x, y, &err_msg, err_cell, max_x);
|
||||
}
|
||||
|
||||
// Chip row (if tokens present and space available).
|
||||
if !state.tokens.is_empty() && area.height >= 2 {
|
||||
let chip_y = y.saturating_add(1);
|
||||
render_chips(frame, &state.tokens, area.x, chip_y, max_x, colors);
|
||||
return 2;
|
||||
}
|
||||
|
||||
1
|
||||
}
|
||||
|
||||
/// Render input text with cursor highlight at the correct position.
|
||||
fn render_input_with_cursor(
|
||||
frame: &mut Frame<'_>,
|
||||
state: &FilterBarState,
|
||||
start_x: u16,
|
||||
y: u16,
|
||||
max_x: u16,
|
||||
base_cell: Cell,
|
||||
colors: &FilterBarColors,
|
||||
) {
|
||||
if !state.focused {
|
||||
frame.print_text_clipped(start_x, y, &state.input, base_cell, max_x);
|
||||
return;
|
||||
}
|
||||
|
||||
// Split at cursor position.
|
||||
let cursor = state.cursor;
|
||||
let input = &state.input;
|
||||
let (before, after) = if cursor <= input.len() {
|
||||
(&input[..cursor], &input[cursor..])
|
||||
} else {
|
||||
(input.as_str(), "")
|
||||
};
|
||||
|
||||
let mut x = frame.print_text_clipped(start_x, y, before, base_cell, max_x);
|
||||
|
||||
// Cursor character (or space if at end).
|
||||
let cursor_cell = Cell {
|
||||
fg: colors.cursor_fg,
|
||||
bg: colors.cursor_bg,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
if let Some(ch) = after.chars().next() {
|
||||
let s = String::from(ch);
|
||||
x = frame.print_text_clipped(x, y, &s, cursor_cell, max_x);
|
||||
let remaining = &after[ch.len_utf8()..];
|
||||
frame.print_text_clipped(x, y, remaining, base_cell, max_x);
|
||||
} else {
|
||||
// Cursor at end — render a visible block.
|
||||
frame.print_text_clipped(x, y, " ", cursor_cell, max_x);
|
||||
}
|
||||
}
|
||||
|
||||
/// Render filter chips as compact tags.
|
||||
fn render_chips(
|
||||
frame: &mut Frame<'_>,
|
||||
tokens: &[FilterToken],
|
||||
start_x: u16,
|
||||
y: u16,
|
||||
max_x: u16,
|
||||
colors: &FilterBarColors,
|
||||
) {
|
||||
let chip_cell = Cell {
|
||||
fg: colors.chip_fg,
|
||||
bg: colors.chip_bg,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
let mut x = start_x;
|
||||
|
||||
for token in tokens {
|
||||
if x >= max_x {
|
||||
break;
|
||||
}
|
||||
|
||||
let label = match token {
|
||||
FilterToken::FieldValue { field, value } => format!("{field}:{value}"),
|
||||
FilterToken::Negation { field, value } => format!("-{field}:{value}"),
|
||||
FilterToken::FreeText(text) => text.clone(),
|
||||
FilterToken::QuotedValue(text) => format!("\"{text}\""),
|
||||
};
|
||||
|
||||
let chip_text = format!("[{label}]");
|
||||
x = frame.print_text_clipped(x, y, &chip_text, chip_cell, max_x);
|
||||
x = x.saturating_add(1); // gap between chips
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::filter_dsl::ISSUE_FIELDS;
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
fn test_colors() -> FilterBarColors {
|
||||
FilterBarColors {
|
||||
input_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
input_bg: PackedRgba::rgb(0x28, 0x28, 0x24),
|
||||
cursor_fg: PackedRgba::rgb(0x00, 0x00, 0x00),
|
||||
cursor_bg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
chip_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
chip_bg: PackedRgba::rgb(0x40, 0x40, 0x3C),
|
||||
error_fg: PackedRgba::rgb(0xAF, 0x3A, 0x29),
|
||||
label_fg: PackedRgba::rgb(0x87, 0x87, 0x80),
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_bar_state_insert_char() {
|
||||
let mut state = FilterBarState::default();
|
||||
state.insert_char('a');
|
||||
state.insert_char('b');
|
||||
assert_eq!(state.input, "ab");
|
||||
assert_eq!(state.cursor, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_bar_state_delete_back() {
|
||||
let mut state = FilterBarState {
|
||||
input: "abc".into(),
|
||||
cursor: 3,
|
||||
..Default::default()
|
||||
};
|
||||
state.delete_back();
|
||||
assert_eq!(state.input, "ab");
|
||||
assert_eq!(state.cursor, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_bar_state_delete_back_at_start() {
|
||||
let mut state = FilterBarState {
|
||||
input: "abc".into(),
|
||||
cursor: 0,
|
||||
..Default::default()
|
||||
};
|
||||
state.delete_back();
|
||||
assert_eq!(state.input, "abc");
|
||||
assert_eq!(state.cursor, 0);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_bar_state_move_left_right() {
|
||||
let mut state = FilterBarState {
|
||||
input: "abc".into(),
|
||||
cursor: 2,
|
||||
..Default::default()
|
||||
};
|
||||
state.move_left();
|
||||
assert_eq!(state.cursor, 1);
|
||||
state.move_right();
|
||||
assert_eq!(state.cursor, 2);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_bar_state_home_end() {
|
||||
let mut state = FilterBarState {
|
||||
input: "hello".into(),
|
||||
cursor: 3,
|
||||
..Default::default()
|
||||
};
|
||||
state.move_home();
|
||||
assert_eq!(state.cursor, 0);
|
||||
state.move_end();
|
||||
assert_eq!(state.cursor, 5);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_bar_state_clear() {
|
||||
let mut state = FilterBarState {
|
||||
input: "state:opened".into(),
|
||||
cursor: 12,
|
||||
tokens: vec![FilterToken::FieldValue {
|
||||
field: "state".into(),
|
||||
value: "opened".into(),
|
||||
}],
|
||||
..Default::default()
|
||||
};
|
||||
state.clear();
|
||||
assert!(state.input.is_empty());
|
||||
assert_eq!(state.cursor, 0);
|
||||
assert!(state.tokens.is_empty());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_bar_state_reparse() {
|
||||
let mut state = FilterBarState {
|
||||
input: "state:opened bogus:val".into(),
|
||||
..Default::default()
|
||||
};
|
||||
state.reparse(ISSUE_FIELDS);
|
||||
assert_eq!(state.tokens.len(), 2);
|
||||
assert_eq!(state.unknown_fields, vec!["bogus"]);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_bar_state_is_active() {
|
||||
let mut state = FilterBarState::default();
|
||||
assert!(!state.is_active());
|
||||
|
||||
state.input = "state:opened".into();
|
||||
state.reparse(ISSUE_FIELDS);
|
||||
assert!(state.is_active());
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_filter_bar_unfocused_no_panic() {
|
||||
with_frame!(80, 2, |frame| {
|
||||
let state = FilterBarState::default();
|
||||
let colors = test_colors();
|
||||
let rows = render_filter_bar(&mut frame, &state, Rect::new(0, 0, 80, 2), &colors);
|
||||
assert_eq!(rows, 1);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_filter_bar_focused_no_panic() {
|
||||
with_frame!(80, 2, |frame| {
|
||||
let mut state = FilterBarState {
|
||||
input: "state:opened".into(),
|
||||
cursor: 12,
|
||||
focused: true,
|
||||
..Default::default()
|
||||
};
|
||||
state.reparse(ISSUE_FIELDS);
|
||||
let colors = test_colors();
|
||||
let rows = render_filter_bar(&mut frame, &state, Rect::new(0, 0, 80, 2), &colors);
|
||||
assert_eq!(rows, 2); // chips rendered
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_filter_bar_tiny_noop() {
|
||||
with_frame!(5, 1, |frame| {
|
||||
let state = FilterBarState::default();
|
||||
let colors = test_colors();
|
||||
let rows = render_filter_bar(&mut frame, &state, Rect::new(0, 0, 5, 1), &colors);
|
||||
assert_eq!(rows, 0);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_filter_bar_unicode_cursor() {
|
||||
let mut state = FilterBarState {
|
||||
input: "author:田中".into(),
|
||||
cursor: 7, // points at start of 田
|
||||
..Default::default()
|
||||
};
|
||||
state.move_right();
|
||||
assert_eq!(state.cursor, 10); // past 田 (3 bytes)
|
||||
state.move_left();
|
||||
assert_eq!(state.cursor, 7); // back to 田
|
||||
}
|
||||
}
|
||||
173
crates/lore-tui/src/view/common/help_overlay.rs
Normal file
173
crates/lore-tui/src/view/common/help_overlay.rs
Normal file
@@ -0,0 +1,173 @@
|
||||
//! Centered modal listing keybindings for the current screen.
|
||||
|
||||
use ftui::core::geometry::Rect;
|
||||
use ftui::render::cell::{Cell, PackedRgba};
|
||||
use ftui::render::drawing::Draw;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
use crate::commands::CommandRegistry;
|
||||
use crate::message::Screen;
|
||||
|
||||
/// Render a centered help overlay listing keybindings for the current screen.
|
||||
///
|
||||
/// The overlay is a bordered modal that lists all commands from the
|
||||
/// registry that are available on the current screen.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn render_help_overlay(
|
||||
frame: &mut Frame<'_>,
|
||||
area: Rect,
|
||||
registry: &CommandRegistry,
|
||||
screen: &Screen,
|
||||
border_color: PackedRgba,
|
||||
text_color: PackedRgba,
|
||||
muted_color: PackedRgba,
|
||||
scroll_offset: usize,
|
||||
) {
|
||||
if area.height < 5 || area.width < 20 {
|
||||
return;
|
||||
}
|
||||
|
||||
// Overlay dimensions: 60% of screen, capped.
|
||||
let overlay_width = (area.width * 3 / 5).clamp(30, 70);
|
||||
let overlay_height = (area.height * 3 / 5).clamp(8, 30);
|
||||
|
||||
let overlay_x = area.x + (area.width.saturating_sub(overlay_width)) / 2;
|
||||
let overlay_y = area.y + (area.height.saturating_sub(overlay_height)) / 2;
|
||||
let overlay_rect = Rect::new(overlay_x, overlay_y, overlay_width, overlay_height);
|
||||
|
||||
// Draw border.
|
||||
let border_cell = Cell {
|
||||
fg: border_color,
|
||||
..Cell::default()
|
||||
};
|
||||
frame.draw_border(
|
||||
overlay_rect,
|
||||
ftui::render::drawing::BorderChars::ROUNDED,
|
||||
border_cell,
|
||||
);
|
||||
|
||||
// Title.
|
||||
let title = " Help (? to close) ";
|
||||
let title_x = overlay_x + (overlay_width.saturating_sub(title.len() as u16)) / 2;
|
||||
let title_cell = Cell {
|
||||
fg: border_color,
|
||||
..Cell::default()
|
||||
};
|
||||
frame.print_text_clipped(title_x, overlay_y, title, title_cell, overlay_rect.right());
|
||||
|
||||
// Inner content area (inside border).
|
||||
let inner = Rect::new(
|
||||
overlay_x + 2,
|
||||
overlay_y + 1,
|
||||
overlay_width.saturating_sub(4),
|
||||
overlay_height.saturating_sub(2),
|
||||
);
|
||||
|
||||
// Get commands for this screen.
|
||||
let commands = registry.help_entries(screen);
|
||||
let visible_lines = inner.height as usize;
|
||||
|
||||
let key_cell = Cell {
|
||||
fg: text_color,
|
||||
..Cell::default()
|
||||
};
|
||||
let desc_cell = Cell {
|
||||
fg: muted_color,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
for (i, cmd) in commands.iter().skip(scroll_offset).enumerate() {
|
||||
if i >= visible_lines {
|
||||
break;
|
||||
}
|
||||
let y = inner.y + i as u16;
|
||||
|
||||
// Key binding label (left).
|
||||
let key_label = cmd
|
||||
.keybinding
|
||||
.as_ref()
|
||||
.map_or_else(String::new, |kb| kb.display());
|
||||
let label_end = frame.print_text_clipped(inner.x, y, &key_label, key_cell, inner.right());
|
||||
|
||||
// Spacer + description (right).
|
||||
let desc_x = label_end.saturating_add(2);
|
||||
if desc_x < inner.right() {
|
||||
frame.print_text_clipped(desc_x, y, cmd.help_text, desc_cell, inner.right());
|
||||
}
|
||||
}
|
||||
|
||||
// Scroll indicator if needed.
|
||||
if commands.len() > visible_lines + scroll_offset {
|
||||
let indicator = format!("({}/{})", scroll_offset + visible_lines, commands.len());
|
||||
let ind_x = inner.right().saturating_sub(indicator.len() as u16);
|
||||
let ind_y = overlay_rect.bottom().saturating_sub(1);
|
||||
frame.print_text_clipped(ind_x, ind_y, &indicator, desc_cell, overlay_rect.right());
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::commands::build_registry;
|
||||
use crate::message::Screen;
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
fn white() -> PackedRgba {
|
||||
PackedRgba::rgb(0xFF, 0xFF, 0xFF)
|
||||
}
|
||||
|
||||
fn gray() -> PackedRgba {
|
||||
PackedRgba::rgb(0x80, 0x80, 0x80)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_help_overlay_renders_border() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let registry = build_registry();
|
||||
render_help_overlay(
|
||||
&mut frame,
|
||||
Rect::new(0, 0, 80, 24),
|
||||
®istry,
|
||||
&Screen::Dashboard,
|
||||
gray(),
|
||||
white(),
|
||||
gray(),
|
||||
0,
|
||||
);
|
||||
|
||||
// The overlay should have non-empty cells in the center area.
|
||||
let has_content = (20..60u16).any(|x| {
|
||||
(8..16u16).any(|y| {
|
||||
let cell = frame.buffer.get(x, y).unwrap();
|
||||
!cell.is_empty()
|
||||
})
|
||||
});
|
||||
assert!(has_content, "Expected help overlay in center area");
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_help_overlay_tiny_terminal_noop() {
|
||||
with_frame!(15, 4, |frame| {
|
||||
let registry = build_registry();
|
||||
render_help_overlay(
|
||||
&mut frame,
|
||||
Rect::new(0, 0, 15, 4),
|
||||
®istry,
|
||||
&Screen::Dashboard,
|
||||
gray(),
|
||||
white(),
|
||||
gray(),
|
||||
0,
|
||||
);
|
||||
});
|
||||
}
|
||||
}
|
||||
179
crates/lore-tui/src/view/common/loading.rs
Normal file
179
crates/lore-tui/src/view/common/loading.rs
Normal file
@@ -0,0 +1,179 @@
|
||||
//! Loading spinner indicators (full-screen and corner).
|
||||
|
||||
use ftui::core::geometry::Rect;
|
||||
use ftui::render::cell::{Cell, PackedRgba};
|
||||
use ftui::render::drawing::Draw;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
use crate::state::LoadState;
|
||||
|
||||
/// Braille spinner frames for loading animation.
|
||||
const SPINNER_FRAMES: &[char] = &['⠋', '⠙', '⠹', '⠸', '⠼', '⠴', '⠦', '⠧', '⠇', '⠏'];
|
||||
|
||||
/// Select spinner frame from tick count.
|
||||
#[must_use]
|
||||
pub(crate) fn spinner_char(tick: u64) -> char {
|
||||
SPINNER_FRAMES[(tick as usize) % SPINNER_FRAMES.len()]
|
||||
}
|
||||
|
||||
/// Render a loading indicator.
|
||||
///
|
||||
/// - `LoadingInitial`: centered full-screen spinner with "Loading..."
|
||||
/// - `Refreshing`: subtle spinner in top-right corner
|
||||
/// - Other states: no-op
|
||||
pub fn render_loading(
|
||||
frame: &mut Frame<'_>,
|
||||
area: Rect,
|
||||
load_state: &LoadState,
|
||||
text_color: PackedRgba,
|
||||
muted_color: PackedRgba,
|
||||
tick: u64,
|
||||
) {
|
||||
match load_state {
|
||||
LoadState::LoadingInitial => {
|
||||
render_centered_spinner(frame, area, "Loading...", text_color, tick);
|
||||
}
|
||||
LoadState::Refreshing => {
|
||||
render_corner_spinner(frame, area, muted_color, tick);
|
||||
}
|
||||
_ => {}
|
||||
}
|
||||
}
|
||||
|
||||
/// Render a centered spinner with message.
|
||||
fn render_centered_spinner(
|
||||
frame: &mut Frame<'_>,
|
||||
area: Rect,
|
||||
message: &str,
|
||||
color: PackedRgba,
|
||||
tick: u64,
|
||||
) {
|
||||
if area.height == 0 || area.width < 5 {
|
||||
return;
|
||||
}
|
||||
|
||||
let spinner = spinner_char(tick);
|
||||
let text = format!("{spinner} {message}");
|
||||
let text_len = text.len() as u16;
|
||||
|
||||
// Center horizontally and vertically.
|
||||
let x = area
|
||||
.x
|
||||
.saturating_add(area.width.saturating_sub(text_len) / 2);
|
||||
let y = area.y.saturating_add(area.height / 2);
|
||||
|
||||
let cell = Cell {
|
||||
fg: color,
|
||||
..Cell::default()
|
||||
};
|
||||
frame.print_text_clipped(x, y, &text, cell, area.right());
|
||||
}
|
||||
|
||||
/// Render a subtle spinner in the top-right corner.
|
||||
fn render_corner_spinner(frame: &mut Frame<'_>, area: Rect, color: PackedRgba, tick: u64) {
|
||||
if area.width < 2 || area.height == 0 {
|
||||
return;
|
||||
}
|
||||
|
||||
let spinner = spinner_char(tick);
|
||||
let x = area.right().saturating_sub(2);
|
||||
let y = area.y;
|
||||
|
||||
let cell = Cell {
|
||||
fg: color,
|
||||
..Cell::default()
|
||||
};
|
||||
frame.print_text_clipped(x, y, &spinner.to_string(), cell, area.right());
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
fn white() -> PackedRgba {
|
||||
PackedRgba::rgb(0xFF, 0xFF, 0xFF)
|
||||
}
|
||||
|
||||
fn gray() -> PackedRgba {
|
||||
PackedRgba::rgb(0x80, 0x80, 0x80)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_loading_initial_renders_spinner() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
render_loading(
|
||||
&mut frame,
|
||||
Rect::new(0, 0, 80, 24),
|
||||
&LoadState::LoadingInitial,
|
||||
white(),
|
||||
gray(),
|
||||
0,
|
||||
);
|
||||
|
||||
let center_y = 12u16;
|
||||
let has_content = (0..80u16).any(|x| {
|
||||
let cell = frame.buffer.get(x, center_y).unwrap();
|
||||
!cell.is_empty()
|
||||
});
|
||||
assert!(has_content, "Expected loading spinner at center row");
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_loading_refreshing_renders_corner() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
render_loading(
|
||||
&mut frame,
|
||||
Rect::new(0, 0, 80, 24),
|
||||
&LoadState::Refreshing,
|
||||
white(),
|
||||
gray(),
|
||||
0,
|
||||
);
|
||||
|
||||
let cell = frame.buffer.get(78, 0).unwrap();
|
||||
assert!(!cell.is_empty(), "Expected corner spinner");
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_loading_idle_noop() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
render_loading(
|
||||
&mut frame,
|
||||
Rect::new(0, 0, 80, 24),
|
||||
&LoadState::Idle,
|
||||
white(),
|
||||
gray(),
|
||||
0,
|
||||
);
|
||||
|
||||
let has_content = (0..80u16).any(|x| {
|
||||
(0..24u16).any(|y| {
|
||||
let cell = frame.buffer.get(x, y).unwrap();
|
||||
!cell.is_empty()
|
||||
})
|
||||
});
|
||||
assert!(!has_content, "Idle state should render nothing");
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_spinner_animation_cycles() {
|
||||
let frame0 = spinner_char(0);
|
||||
let frame1 = spinner_char(1);
|
||||
let frame_wrap = spinner_char(SPINNER_FRAMES.len() as u64);
|
||||
|
||||
assert_ne!(frame0, frame1, "Adjacent frames should differ");
|
||||
assert_eq!(frame0, frame_wrap, "Should wrap around");
|
||||
}
|
||||
}
|
||||
28
crates/lore-tui/src/view/common/mod.rs
Normal file
28
crates/lore-tui/src/view/common/mod.rs
Normal file
@@ -0,0 +1,28 @@
|
||||
//! Common widgets shared across all TUI screens.
|
||||
//!
|
||||
//! Each widget is a pure rendering function — writes directly into the
|
||||
//! [`Frame`] buffer using ftui's `Draw` trait. No state mutation,
|
||||
//! no side effects.
|
||||
|
||||
mod breadcrumb;
|
||||
pub mod cross_ref;
|
||||
pub mod discussion_tree;
|
||||
pub mod entity_table;
|
||||
mod error_toast;
|
||||
pub mod filter_bar;
|
||||
mod help_overlay;
|
||||
mod loading;
|
||||
mod status_bar;
|
||||
|
||||
pub use breadcrumb::render_breadcrumb;
|
||||
pub use cross_ref::{CrossRef, CrossRefColors, CrossRefKind, CrossRefState, render_cross_refs};
|
||||
pub use discussion_tree::{
|
||||
DiscussionNode, DiscussionTreeColors, DiscussionTreeState, NoteNode, format_relative_time,
|
||||
render_discussion_tree,
|
||||
};
|
||||
pub use entity_table::{ColumnDef, EntityTableState, TableColors, TableRow, render_entity_table};
|
||||
pub use error_toast::render_error_toast;
|
||||
pub use filter_bar::{FilterBarColors, FilterBarState, render_filter_bar};
|
||||
pub use help_overlay::render_help_overlay;
|
||||
pub use loading::render_loading;
|
||||
pub use status_bar::render_status_bar;
|
||||
173
crates/lore-tui/src/view/common/status_bar.rs
Normal file
173
crates/lore-tui/src/view/common/status_bar.rs
Normal file
@@ -0,0 +1,173 @@
|
||||
//! Bottom status bar with key hints and mode indicator.
|
||||
|
||||
use ftui::core::geometry::Rect;
|
||||
use ftui::render::cell::{Cell, PackedRgba};
|
||||
use ftui::render::drawing::Draw;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
use crate::commands::CommandRegistry;
|
||||
use crate::message::{InputMode, Screen};
|
||||
|
||||
/// Render the bottom status bar with key hints and mode indicator.
|
||||
///
|
||||
/// Layout: `[mode] ─── [key hints]`
|
||||
///
|
||||
/// Key hints are sourced from the [`CommandRegistry`] filtered to the
|
||||
/// current screen, showing only the most important bindings.
|
||||
#[allow(clippy::too_many_arguments)]
|
||||
pub fn render_status_bar(
|
||||
frame: &mut Frame<'_>,
|
||||
area: Rect,
|
||||
registry: &CommandRegistry,
|
||||
screen: &Screen,
|
||||
mode: &InputMode,
|
||||
bar_bg: PackedRgba,
|
||||
text_color: PackedRgba,
|
||||
accent_color: PackedRgba,
|
||||
) {
|
||||
if area.height == 0 || area.width < 5 {
|
||||
return;
|
||||
}
|
||||
|
||||
// Fill the bar background.
|
||||
let bg_cell = Cell {
|
||||
bg: bar_bg,
|
||||
..Cell::default()
|
||||
};
|
||||
frame.draw_rect_filled(area, bg_cell);
|
||||
|
||||
let mode_label = match mode {
|
||||
InputMode::Normal => "NORMAL",
|
||||
InputMode::Text => "INPUT",
|
||||
InputMode::Palette => "PALETTE",
|
||||
InputMode::GoPrefix { .. } => "g...",
|
||||
};
|
||||
|
||||
// Left side: mode indicator.
|
||||
let mode_cell = Cell {
|
||||
fg: accent_color,
|
||||
bg: bar_bg,
|
||||
..Cell::default()
|
||||
};
|
||||
let mut x = frame.print_text_clipped(
|
||||
area.x.saturating_add(1),
|
||||
area.y,
|
||||
mode_label,
|
||||
mode_cell,
|
||||
area.right(),
|
||||
);
|
||||
|
||||
// Spacer.
|
||||
x = x.saturating_add(2);
|
||||
|
||||
// Right side: key hints from registry (formatted as "key:action").
|
||||
let hints = registry.status_hints(screen);
|
||||
let hint_cell = Cell {
|
||||
fg: text_color,
|
||||
bg: bar_bg,
|
||||
..Cell::default()
|
||||
};
|
||||
let key_cell = Cell {
|
||||
fg: accent_color,
|
||||
bg: bar_bg,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
for hint in &hints {
|
||||
if x >= area.right().saturating_sub(1) {
|
||||
break;
|
||||
}
|
||||
// Split "q:quit" into key part and description part.
|
||||
if let Some((key_part, desc_part)) = hint.split_once(':') {
|
||||
x = frame.print_text_clipped(x, area.y, key_part, key_cell, area.right());
|
||||
x = frame.print_text_clipped(x, area.y, ":", hint_cell, area.right());
|
||||
x = frame.print_text_clipped(x, area.y, desc_part, hint_cell, area.right());
|
||||
} else {
|
||||
x = frame.print_text_clipped(x, area.y, hint, hint_cell, area.right());
|
||||
}
|
||||
x = x.saturating_add(2);
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::commands::build_registry;
|
||||
use crate::message::Screen;
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
fn white() -> PackedRgba {
|
||||
PackedRgba::rgb(0xFF, 0xFF, 0xFF)
|
||||
}
|
||||
|
||||
fn gray() -> PackedRgba {
|
||||
PackedRgba::rgb(0x80, 0x80, 0x80)
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_status_bar_renders_mode() {
|
||||
with_frame!(80, 1, |frame| {
|
||||
let registry = build_registry();
|
||||
render_status_bar(
|
||||
&mut frame,
|
||||
Rect::new(0, 0, 80, 1),
|
||||
®istry,
|
||||
&Screen::Dashboard,
|
||||
&InputMode::Normal,
|
||||
gray(),
|
||||
white(),
|
||||
white(),
|
||||
);
|
||||
|
||||
let n_cell = frame.buffer.get(1, 0).unwrap();
|
||||
assert_eq!(n_cell.content.as_char(), Some('N'));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_status_bar_text_mode() {
|
||||
with_frame!(80, 1, |frame| {
|
||||
let registry = build_registry();
|
||||
render_status_bar(
|
||||
&mut frame,
|
||||
Rect::new(0, 0, 80, 1),
|
||||
®istry,
|
||||
&Screen::Search,
|
||||
&InputMode::Text,
|
||||
gray(),
|
||||
white(),
|
||||
white(),
|
||||
);
|
||||
|
||||
let i_cell = frame.buffer.get(1, 0).unwrap();
|
||||
assert_eq!(i_cell.content.as_char(), Some('I'));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_status_bar_narrow_terminal() {
|
||||
with_frame!(4, 1, |frame| {
|
||||
let registry = build_registry();
|
||||
render_status_bar(
|
||||
&mut frame,
|
||||
Rect::new(0, 0, 4, 1),
|
||||
®istry,
|
||||
&Screen::Dashboard,
|
||||
&InputMode::Normal,
|
||||
gray(),
|
||||
white(),
|
||||
white(),
|
||||
);
|
||||
let cell = frame.buffer.get(0, 0).unwrap();
|
||||
assert!(cell.is_empty());
|
||||
});
|
||||
}
|
||||
}
|
||||
554
crates/lore-tui/src/view/dashboard.rs
Normal file
554
crates/lore-tui/src/view/dashboard.rs
Normal file
@@ -0,0 +1,554 @@
|
||||
#![allow(dead_code)] // Phase 2: wired into render_screen dispatch
|
||||
|
||||
//! Dashboard screen view — entity counts, project sync status, recent activity.
|
||||
//!
|
||||
//! Responsive layout using [`crate::layout::classify_width`]:
|
||||
//! - Wide (Lg/Xl, >=120 cols): 3-column `[Stats | Projects | Recent]`
|
||||
//! - Medium (Md, 90–119): 2-column `[Stats+Projects | Recent]`
|
||||
//! - Narrow (Xs/Sm, <90): single column stacked
|
||||
|
||||
use ftui::core::geometry::Rect;
|
||||
use ftui::layout::{Breakpoint, Constraint, Flex};
|
||||
use ftui::render::cell::{Cell, PackedRgba};
|
||||
use ftui::render::drawing::Draw;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
use crate::layout::classify_width;
|
||||
use crate::state::dashboard::{DashboardState, EntityCounts, LastSyncInfo, RecentActivityItem};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Colors (Flexoki palette — will use injected Theme in a later phase)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const TEXT: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3); // tx
|
||||
const TEXT_MUTED: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2
|
||||
const ACCENT: PackedRgba = PackedRgba::rgb(0xDA, 0x70, 0x2C); // orange
|
||||
const GREEN: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39); // green
|
||||
const YELLOW: PackedRgba = PackedRgba::rgb(0xD0, 0xA2, 0x15); // yellow
|
||||
const RED: PackedRgba = PackedRgba::rgb(0xAF, 0x3A, 0x29); // red
|
||||
const CYAN: PackedRgba = PackedRgba::rgb(0x3A, 0xA9, 0x9F); // cyan
|
||||
const BORDER: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Public entry point
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Render the full dashboard screen into `area`.
|
||||
pub fn render_dashboard(frame: &mut Frame<'_>, state: &DashboardState, area: Rect) {
|
||||
if area.height < 2 || area.width < 10 {
|
||||
return; // Too small to render.
|
||||
}
|
||||
|
||||
let bp = classify_width(area.width);
|
||||
|
||||
match bp {
|
||||
Breakpoint::Lg | Breakpoint::Xl => render_wide(frame, state, area),
|
||||
Breakpoint::Md => render_medium(frame, state, area),
|
||||
Breakpoint::Xs | Breakpoint::Sm => render_narrow(frame, state, area),
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Layout variants
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Wide: 3-column [Stats | Projects | Recent Activity].
|
||||
fn render_wide(frame: &mut Frame<'_>, state: &DashboardState, area: Rect) {
|
||||
let cols = Flex::horizontal()
|
||||
.constraints([
|
||||
Constraint::Ratio(1, 3),
|
||||
Constraint::Ratio(1, 3),
|
||||
Constraint::Ratio(1, 3),
|
||||
])
|
||||
.split(area);
|
||||
|
||||
render_stat_panel(frame, &state.counts, cols[0]);
|
||||
render_project_list(frame, state, cols[1]);
|
||||
render_recent_activity(frame, state, cols[2]);
|
||||
}
|
||||
|
||||
/// Medium: 2-column [Stats+Projects stacked | Recent Activity].
|
||||
fn render_medium(frame: &mut Frame<'_>, state: &DashboardState, area: Rect) {
|
||||
let cols = Flex::horizontal()
|
||||
.constraints([Constraint::Ratio(2, 5), Constraint::Ratio(3, 5)])
|
||||
.split(area);
|
||||
|
||||
// Left column: stats on top, projects below.
|
||||
let left_rows = Flex::vertical()
|
||||
.constraints([Constraint::Ratio(1, 2), Constraint::Ratio(1, 2)])
|
||||
.split(cols[0]);
|
||||
|
||||
render_stat_panel(frame, &state.counts, left_rows[0]);
|
||||
render_project_list(frame, state, left_rows[1]);
|
||||
|
||||
render_recent_activity(frame, state, cols[1]);
|
||||
}
|
||||
|
||||
/// Narrow: single column stacked.
|
||||
fn render_narrow(frame: &mut Frame<'_>, state: &DashboardState, area: Rect) {
|
||||
let rows = Flex::vertical()
|
||||
.constraints([
|
||||
Constraint::Fixed(8), // stats
|
||||
Constraint::Fixed(4), // projects (compact)
|
||||
Constraint::Fill, // recent
|
||||
])
|
||||
.split(area);
|
||||
|
||||
render_stat_panel(frame, &state.counts, rows[0]);
|
||||
render_project_list(frame, state, rows[1]);
|
||||
render_recent_activity(frame, state, rows[2]);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Panels
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Entity counts panel.
|
||||
fn render_stat_panel(frame: &mut Frame<'_>, counts: &EntityCounts, area: Rect) {
|
||||
if area.height == 0 || area.width < 5 {
|
||||
return;
|
||||
}
|
||||
|
||||
let title_cell = Cell {
|
||||
fg: ACCENT,
|
||||
..Cell::default()
|
||||
};
|
||||
let label_cell = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
let value_cell = Cell {
|
||||
fg: TEXT,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
let mut y = area.y;
|
||||
let x = area.x.saturating_add(1); // 1-char left padding
|
||||
|
||||
// Title
|
||||
frame.print_text_clipped(x, y, "Entity Counts", title_cell, max_x);
|
||||
y = y.saturating_add(1);
|
||||
|
||||
// Separator
|
||||
render_horizontal_rule(frame, area.x, y, area.width, BORDER);
|
||||
y = y.saturating_add(1);
|
||||
|
||||
// Stats rows
|
||||
let stats: &[(&str, String)] = &[
|
||||
(
|
||||
"Issues",
|
||||
format!("{} open / {}", counts.issues_open, counts.issues_total),
|
||||
),
|
||||
(
|
||||
"MRs",
|
||||
format!("{} open / {}", counts.mrs_open, counts.mrs_total),
|
||||
),
|
||||
("Discussions", counts.discussions.to_string()),
|
||||
(
|
||||
"Notes",
|
||||
format!(
|
||||
"{} ({}% system)",
|
||||
counts.notes_total, counts.notes_system_pct
|
||||
),
|
||||
),
|
||||
("Documents", counts.documents.to_string()),
|
||||
("Embeddings", counts.embeddings.to_string()),
|
||||
];
|
||||
|
||||
for (label, value) in stats {
|
||||
if y >= area.y.saturating_add(area.height) {
|
||||
break;
|
||||
}
|
||||
let after_label = frame.print_text_clipped(x, y, label, label_cell, max_x);
|
||||
let after_colon = frame.print_text_clipped(after_label, y, ": ", label_cell, max_x);
|
||||
frame.print_text_clipped(after_colon, y, value, value_cell, max_x);
|
||||
y = y.saturating_add(1);
|
||||
}
|
||||
}
|
||||
|
||||
/// Per-project sync freshness list.
|
||||
fn render_project_list(frame: &mut Frame<'_>, state: &DashboardState, area: Rect) {
|
||||
if area.height == 0 || area.width < 5 {
|
||||
return;
|
||||
}
|
||||
|
||||
let title_cell = Cell {
|
||||
fg: ACCENT,
|
||||
..Cell::default()
|
||||
};
|
||||
let label_cell = Cell {
|
||||
fg: TEXT,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
let mut y = area.y;
|
||||
let x = area.x.saturating_add(1);
|
||||
|
||||
frame.print_text_clipped(x, y, "Projects", title_cell, max_x);
|
||||
y = y.saturating_add(1);
|
||||
render_horizontal_rule(frame, area.x, y, area.width, BORDER);
|
||||
y = y.saturating_add(1);
|
||||
|
||||
if state.projects.is_empty() {
|
||||
let muted = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
frame.print_text_clipped(x, y, "No projects synced", muted, max_x);
|
||||
return;
|
||||
}
|
||||
|
||||
for proj in &state.projects {
|
||||
if y >= area.y.saturating_add(area.height) {
|
||||
break;
|
||||
}
|
||||
|
||||
let freshness_color = staleness_color(proj.minutes_since_sync);
|
||||
let freshness_cell = Cell {
|
||||
fg: freshness_color,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
let indicator = staleness_indicator(proj.minutes_since_sync);
|
||||
let after_dot = frame.print_text_clipped(x, y, &indicator, freshness_cell, max_x);
|
||||
let after_space = frame.print_text_clipped(after_dot, y, " ", label_cell, max_x);
|
||||
frame.print_text_clipped(after_space, y, &proj.path, label_cell, max_x);
|
||||
y = y.saturating_add(1);
|
||||
}
|
||||
|
||||
// Last sync summary if available.
|
||||
if let Some(ref sync) = state.last_sync
|
||||
&& y < area.y.saturating_add(area.height)
|
||||
{
|
||||
y = y.saturating_add(1); // blank line
|
||||
render_sync_summary(frame, sync, x, y, max_x);
|
||||
}
|
||||
}
|
||||
|
||||
/// Scrollable recent activity list.
|
||||
fn render_recent_activity(frame: &mut Frame<'_>, state: &DashboardState, area: Rect) {
|
||||
if area.height == 0 || area.width < 5 {
|
||||
return;
|
||||
}
|
||||
|
||||
let title_cell = Cell {
|
||||
fg: ACCENT,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
let mut y = area.y;
|
||||
let x = area.x.saturating_add(1);
|
||||
|
||||
frame.print_text_clipped(x, y, "Recent Activity", title_cell, max_x);
|
||||
y = y.saturating_add(1);
|
||||
render_horizontal_rule(frame, area.x, y, area.width, BORDER);
|
||||
y = y.saturating_add(1);
|
||||
|
||||
if state.recent.is_empty() {
|
||||
let muted = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
frame.print_text_clipped(x, y, "No recent activity", muted, max_x);
|
||||
return;
|
||||
}
|
||||
|
||||
let visible_rows = (area.y.saturating_add(area.height)).saturating_sub(y) as usize;
|
||||
let items = &state.recent;
|
||||
let start = state.scroll_offset.min(items.len().saturating_sub(1));
|
||||
let end = (start + visible_rows).min(items.len());
|
||||
|
||||
for item in &items[start..end] {
|
||||
if y >= area.y.saturating_add(area.height) {
|
||||
break;
|
||||
}
|
||||
render_activity_row(frame, item, x, y, max_x);
|
||||
y = y.saturating_add(1);
|
||||
}
|
||||
|
||||
// Scroll indicator if there's more content.
|
||||
if end < items.len() && y < area.y.saturating_add(area.height) {
|
||||
let muted = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
let remaining = items.len() - end;
|
||||
frame.print_text_clipped(x, y, &format!("... {remaining} more"), muted, max_x);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Helpers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Render a single recent activity row.
|
||||
fn render_activity_row(
|
||||
frame: &mut Frame<'_>,
|
||||
item: &RecentActivityItem,
|
||||
x: u16,
|
||||
y: u16,
|
||||
max_x: u16,
|
||||
) {
|
||||
let type_color = if item.entity_type == "issue" {
|
||||
CYAN
|
||||
} else {
|
||||
ACCENT
|
||||
};
|
||||
let type_cell = Cell {
|
||||
fg: type_color,
|
||||
..Cell::default()
|
||||
};
|
||||
let text_cell = Cell {
|
||||
fg: TEXT,
|
||||
..Cell::default()
|
||||
};
|
||||
let muted_cell = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
let type_label = if item.entity_type == "issue" {
|
||||
format!("#{}", item.iid)
|
||||
} else {
|
||||
format!("!{}", item.iid)
|
||||
};
|
||||
|
||||
let after_type = frame.print_text_clipped(x, y, &type_label, type_cell, max_x);
|
||||
let after_space = frame.print_text_clipped(after_type, y, " ", text_cell, max_x);
|
||||
|
||||
// Truncate title to leave room for time.
|
||||
let time_str = format_relative_time(item.minutes_ago);
|
||||
let time_width = time_str.len() as u16 + 2; // " " + time
|
||||
let title_max = max_x.saturating_sub(time_width);
|
||||
|
||||
let after_title = frame.print_text_clipped(after_space, y, &item.title, text_cell, title_max);
|
||||
|
||||
// Right-align time string.
|
||||
let time_x = max_x.saturating_sub(time_str.len() as u16 + 1);
|
||||
if time_x > after_title {
|
||||
frame.print_text_clipped(time_x, y, &time_str, muted_cell, max_x);
|
||||
}
|
||||
}
|
||||
|
||||
/// Render a last-sync summary line.
|
||||
fn render_sync_summary(frame: &mut Frame<'_>, sync: &LastSyncInfo, x: u16, y: u16, max_x: u16) {
|
||||
let status_color = if sync.status == "succeeded" {
|
||||
GREEN
|
||||
} else {
|
||||
RED
|
||||
};
|
||||
let cell = Cell {
|
||||
fg: status_color,
|
||||
..Cell::default()
|
||||
};
|
||||
let muted = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
let label_end = frame.print_text_clipped(x, y, "Last sync: ", muted, max_x);
|
||||
let status_end = frame.print_text_clipped(label_end, y, &sync.status, cell, max_x);
|
||||
|
||||
if let Some(ref err) = sync.error {
|
||||
let err_cell = Cell {
|
||||
fg: RED,
|
||||
..Cell::default()
|
||||
};
|
||||
let after_space = frame.print_text_clipped(status_end, y, " — ", muted, max_x);
|
||||
frame.print_text_clipped(after_space, y, err, err_cell, max_x);
|
||||
}
|
||||
}
|
||||
|
||||
/// Draw a horizontal rule across a row.
|
||||
fn render_horizontal_rule(frame: &mut Frame<'_>, x: u16, y: u16, width: u16, color: PackedRgba) {
|
||||
let cell = Cell {
|
||||
fg: color,
|
||||
..Cell::default()
|
||||
};
|
||||
let rule = "─".repeat(width as usize);
|
||||
frame.print_text_clipped(x, y, &rule, cell, x.saturating_add(width));
|
||||
}
|
||||
|
||||
/// Staleness color: green <60min, yellow <360min, red >360min.
|
||||
const fn staleness_color(minutes: u64) -> PackedRgba {
|
||||
if minutes == u64::MAX {
|
||||
RED // Never synced.
|
||||
} else if minutes < 60 {
|
||||
GREEN
|
||||
} else if minutes < 360 {
|
||||
YELLOW
|
||||
} else {
|
||||
RED
|
||||
}
|
||||
}
|
||||
|
||||
/// Staleness dot indicator.
|
||||
fn staleness_indicator(minutes: u64) -> String {
|
||||
if minutes == u64::MAX {
|
||||
"● never".to_string()
|
||||
} else if minutes < 60 {
|
||||
format!("● {minutes}m ago")
|
||||
} else if minutes < 1440 {
|
||||
format!("● {}h ago", minutes / 60)
|
||||
} else {
|
||||
format!("● {}d ago", minutes / 1440)
|
||||
}
|
||||
}
|
||||
|
||||
/// Format relative time for activity feed.
|
||||
fn format_relative_time(minutes: u64) -> String {
|
||||
if minutes == 0 {
|
||||
"just now".to_string()
|
||||
} else if minutes < 60 {
|
||||
format!("{minutes}m ago")
|
||||
} else if minutes < 1440 {
|
||||
format!("{}h ago", minutes / 60)
|
||||
} else {
|
||||
format!("{}d ago", minutes / 1440)
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::state::dashboard::{DashboardData, EntityCounts, ProjectSyncInfo};
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
fn sample_state() -> DashboardState {
|
||||
let mut state = DashboardState::default();
|
||||
state.update(DashboardData {
|
||||
counts: EntityCounts {
|
||||
issues_open: 42,
|
||||
issues_total: 100,
|
||||
mrs_open: 10,
|
||||
mrs_total: 50,
|
||||
discussions: 200,
|
||||
notes_total: 500,
|
||||
notes_system_pct: 30,
|
||||
documents: 80,
|
||||
embeddings: 75,
|
||||
},
|
||||
projects: vec![
|
||||
ProjectSyncInfo {
|
||||
path: "group/alpha".into(),
|
||||
minutes_since_sync: 15,
|
||||
},
|
||||
ProjectSyncInfo {
|
||||
path: "group/beta".into(),
|
||||
minutes_since_sync: 120,
|
||||
},
|
||||
],
|
||||
recent: vec![RecentActivityItem {
|
||||
entity_type: "issue".into(),
|
||||
iid: 42,
|
||||
title: "Fix authentication bug".into(),
|
||||
state: "opened".into(),
|
||||
minutes_ago: 5,
|
||||
}],
|
||||
last_sync: None,
|
||||
});
|
||||
state
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_dashboard_wide_no_panic() {
|
||||
with_frame!(140, 30, |frame| {
|
||||
let state = sample_state();
|
||||
let area = Rect::new(0, 0, 140, 30);
|
||||
render_dashboard(&mut frame, &state, area);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_dashboard_medium_no_panic() {
|
||||
with_frame!(100, 24, |frame| {
|
||||
let state = sample_state();
|
||||
let area = Rect::new(0, 0, 100, 24);
|
||||
render_dashboard(&mut frame, &state, area);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_dashboard_narrow_no_panic() {
|
||||
with_frame!(60, 20, |frame| {
|
||||
let state = sample_state();
|
||||
let area = Rect::new(0, 0, 60, 20);
|
||||
render_dashboard(&mut frame, &state, area);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_dashboard_tiny_noop() {
|
||||
with_frame!(5, 1, |frame| {
|
||||
let state = DashboardState::default();
|
||||
let area = Rect::new(0, 0, 5, 1);
|
||||
render_dashboard(&mut frame, &state, area);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_dashboard_empty_state_no_panic() {
|
||||
with_frame!(120, 24, |frame| {
|
||||
let state = DashboardState::default();
|
||||
let area = Rect::new(0, 0, 120, 24);
|
||||
render_dashboard(&mut frame, &state, area);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_staleness_color_thresholds() {
|
||||
assert_eq!(staleness_color(0), GREEN);
|
||||
assert_eq!(staleness_color(59), GREEN);
|
||||
assert_eq!(staleness_color(60), YELLOW);
|
||||
assert_eq!(staleness_color(359), YELLOW);
|
||||
assert_eq!(staleness_color(360), RED);
|
||||
assert_eq!(staleness_color(u64::MAX), RED);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_staleness_indicator() {
|
||||
assert_eq!(staleness_indicator(15), "● 15m ago");
|
||||
assert_eq!(staleness_indicator(120), "● 2h ago");
|
||||
assert_eq!(staleness_indicator(2880), "● 2d ago");
|
||||
assert_eq!(staleness_indicator(u64::MAX), "● never");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_format_relative_time() {
|
||||
assert_eq!(format_relative_time(0), "just now");
|
||||
assert_eq!(format_relative_time(5), "5m ago");
|
||||
assert_eq!(format_relative_time(90), "1h ago");
|
||||
assert_eq!(format_relative_time(1500), "1d ago");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_stat_panel_renders_title() {
|
||||
with_frame!(40, 10, |frame| {
|
||||
let counts = EntityCounts {
|
||||
issues_open: 3,
|
||||
issues_total: 10,
|
||||
..Default::default()
|
||||
};
|
||||
render_stat_panel(&mut frame, &counts, Rect::new(0, 0, 40, 10));
|
||||
|
||||
// Check that 'E' from "Entity Counts" is rendered at x=1, y=0.
|
||||
let cell = frame.buffer.get(1, 0).unwrap();
|
||||
assert_eq!(cell.content.as_char(), Some('E'), "Expected 'E' at (1,0)");
|
||||
});
|
||||
}
|
||||
}
|
||||
626
crates/lore-tui/src/view/issue_detail.rs
Normal file
626
crates/lore-tui/src/view/issue_detail.rs
Normal file
@@ -0,0 +1,626 @@
|
||||
#![allow(dead_code)] // Phase 2: consumed by view/mod.rs screen dispatch
|
||||
|
||||
//! Issue detail screen view.
|
||||
//!
|
||||
//! Composes metadata header, description, discussion tree, and
|
||||
//! cross-references into a scrollable detail layout. Supports
|
||||
//! progressive hydration: metadata renders immediately while
|
||||
//! discussions load async in Phase 2.
|
||||
|
||||
use ftui::core::geometry::Rect;
|
||||
use ftui::render::cell::{Cell, PackedRgba};
|
||||
use ftui::render::drawing::Draw;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
use crate::clock::Clock;
|
||||
use crate::safety::{UrlPolicy, sanitize_for_terminal};
|
||||
use crate::state::issue_detail::{DetailSection, IssueDetailState, IssueMetadata};
|
||||
use crate::view::common::cross_ref::{CrossRefColors, render_cross_refs};
|
||||
use crate::view::common::discussion_tree::{DiscussionTreeColors, render_discussion_tree};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Colors (Flexoki palette — will use injected Theme in a later phase)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const TEXT: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3); // tx
|
||||
const TEXT_MUTED: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2
|
||||
const ACCENT: PackedRgba = PackedRgba::rgb(0xDA, 0x70, 0x2C); // orange
|
||||
const GREEN: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39); // green
|
||||
const RED: PackedRgba = PackedRgba::rgb(0xAF, 0x3A, 0x29); // red
|
||||
const CYAN: PackedRgba = PackedRgba::rgb(0x3A, 0xA9, 0x9F); // cyan
|
||||
const BG_SURFACE: PackedRgba = PackedRgba::rgb(0x28, 0x28, 0x24); // bg-2
|
||||
const BORDER: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2
|
||||
const SELECTED_FG: PackedRgba = PackedRgba::rgb(0x10, 0x0F, 0x0F); // bg
|
||||
const SELECTED_BG: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3); // tx
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Color constructors
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
fn discussion_colors() -> DiscussionTreeColors {
|
||||
DiscussionTreeColors {
|
||||
author_fg: CYAN,
|
||||
timestamp_fg: TEXT_MUTED,
|
||||
body_fg: TEXT,
|
||||
system_fg: TEXT_MUTED,
|
||||
diff_path_fg: GREEN,
|
||||
resolved_fg: TEXT_MUTED,
|
||||
guide_fg: BORDER,
|
||||
selected_fg: SELECTED_FG,
|
||||
selected_bg: SELECTED_BG,
|
||||
expand_fg: ACCENT,
|
||||
}
|
||||
}
|
||||
|
||||
fn cross_ref_colors() -> CrossRefColors {
|
||||
CrossRefColors {
|
||||
kind_fg: ACCENT,
|
||||
label_fg: TEXT,
|
||||
muted_fg: TEXT_MUTED,
|
||||
selected_fg: SELECTED_FG,
|
||||
selected_bg: SELECTED_BG,
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Render
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Render the full issue detail screen.
|
||||
///
|
||||
/// Layout:
|
||||
/// ```text
|
||||
/// Row 0: #42 Fix authentication flow (title bar)
|
||||
/// Row 1: opened | alice | backend, security (metadata row)
|
||||
/// Row 2: Milestone: v1.0 | Due: 2026-03-01 (optional)
|
||||
/// Row 3: ─────────────────────────────────── (separator)
|
||||
/// Row 4..N: Description text... (scrollable)
|
||||
/// ─────────────────────────────────── (separator)
|
||||
/// Discussions (3) (section header)
|
||||
/// ▶ alice: Fixed the login flow... (collapsed)
|
||||
/// ▼ bob: I think we should also... (expanded)
|
||||
/// bob: body line 1...
|
||||
/// ─────────────────────────────────── (separator)
|
||||
/// Cross References (section header)
|
||||
/// [MR] !10 Fix authentication MR
|
||||
/// ```
|
||||
pub fn render_issue_detail(
|
||||
frame: &mut Frame<'_>,
|
||||
state: &IssueDetailState,
|
||||
area: Rect,
|
||||
clock: &dyn Clock,
|
||||
) {
|
||||
if area.height < 3 || area.width < 10 {
|
||||
return;
|
||||
}
|
||||
|
||||
let Some(ref meta) = state.metadata else {
|
||||
// No metadata yet — the loading spinner handles this.
|
||||
return;
|
||||
};
|
||||
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
let mut y = area.y;
|
||||
|
||||
// --- Title bar ---
|
||||
y = render_title_bar(frame, meta, area.x, y, max_x);
|
||||
|
||||
// --- Metadata row ---
|
||||
y = render_metadata_row(frame, meta, area.x, y, max_x);
|
||||
|
||||
// --- Optional milestone / due date row ---
|
||||
if meta.milestone.is_some() || meta.due_date.is_some() {
|
||||
y = render_milestone_row(frame, meta, area.x, y, max_x);
|
||||
}
|
||||
|
||||
// --- Separator ---
|
||||
y = render_separator(frame, area.x, y, area.width);
|
||||
|
||||
let bottom = area.y.saturating_add(area.height);
|
||||
if y >= bottom {
|
||||
return;
|
||||
}
|
||||
|
||||
// Remaining space is split between description, discussions, and cross-refs.
|
||||
let remaining = bottom.saturating_sub(y);
|
||||
|
||||
// Compute section heights based on content.
|
||||
let desc_lines = count_description_lines(meta, area.width);
|
||||
let disc_count = state.discussions.len();
|
||||
let xref_count = state.cross_refs.len();
|
||||
|
||||
let (desc_h, disc_h, xref_h) = allocate_sections(remaining, desc_lines, disc_count, xref_count);
|
||||
|
||||
// --- Description section ---
|
||||
if desc_h > 0 {
|
||||
let desc_area = Rect::new(area.x, y, area.width, desc_h);
|
||||
let is_focused = state.active_section == DetailSection::Description;
|
||||
render_description(frame, meta, state.description_scroll, desc_area, is_focused);
|
||||
y += desc_h;
|
||||
}
|
||||
|
||||
// --- Separator before discussions ---
|
||||
if (disc_h > 0 || xref_h > 0) && y < bottom {
|
||||
y = render_separator(frame, area.x, y, area.width);
|
||||
}
|
||||
|
||||
// --- Discussions section ---
|
||||
if disc_h > 0 && y < bottom {
|
||||
let header_h = 1;
|
||||
let is_focused = state.active_section == DetailSection::Discussions;
|
||||
|
||||
// Section header.
|
||||
render_section_header(
|
||||
frame,
|
||||
&format!("Discussions ({})", state.discussions.len()),
|
||||
area.x,
|
||||
y,
|
||||
max_x,
|
||||
is_focused,
|
||||
);
|
||||
y += header_h;
|
||||
|
||||
if !state.discussions_loaded {
|
||||
// Still loading.
|
||||
let style = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
let _ = frame.print_text_clipped(area.x + 1, y, "Loading discussions...", style, max_x);
|
||||
y += 1;
|
||||
} else if state.discussions.is_empty() {
|
||||
let style = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
let _ = frame.print_text_clipped(area.x + 1, y, "No discussions", style, max_x);
|
||||
y += 1;
|
||||
} else {
|
||||
let tree_height = disc_h.saturating_sub(header_h);
|
||||
if tree_height > 0 {
|
||||
let tree_area = Rect::new(area.x, y, area.width, tree_height);
|
||||
let rendered = render_discussion_tree(
|
||||
frame,
|
||||
&state.discussions,
|
||||
&state.tree_state,
|
||||
tree_area,
|
||||
&discussion_colors(),
|
||||
clock,
|
||||
);
|
||||
y += rendered;
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// --- Separator before cross-refs ---
|
||||
if xref_h > 0 && y < bottom {
|
||||
y = render_separator(frame, area.x, y, area.width);
|
||||
}
|
||||
|
||||
// --- Cross-references section ---
|
||||
if xref_h > 0 && y < bottom {
|
||||
let is_focused = state.active_section == DetailSection::CrossRefs;
|
||||
|
||||
render_section_header(
|
||||
frame,
|
||||
&format!("Cross References ({})", state.cross_refs.len()),
|
||||
area.x,
|
||||
y,
|
||||
max_x,
|
||||
is_focused,
|
||||
);
|
||||
y += 1;
|
||||
|
||||
if state.cross_refs.is_empty() {
|
||||
let style = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
let _ = frame.print_text_clipped(area.x + 1, y, "No cross-references", style, max_x);
|
||||
} else {
|
||||
let refs_height = xref_h.saturating_sub(1); // minus header
|
||||
if refs_height > 0 {
|
||||
let refs_area = Rect::new(area.x, y, area.width, refs_height);
|
||||
let _ = render_cross_refs(
|
||||
frame,
|
||||
&state.cross_refs,
|
||||
&state.cross_ref_state,
|
||||
refs_area,
|
||||
&cross_ref_colors(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Sub-renderers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Render the issue title bar: `#42 Fix authentication flow`
|
||||
fn render_title_bar(
|
||||
frame: &mut Frame<'_>,
|
||||
meta: &IssueMetadata,
|
||||
x: u16,
|
||||
y: u16,
|
||||
max_x: u16,
|
||||
) -> u16 {
|
||||
let iid_text = format!("#{} ", meta.iid);
|
||||
let iid_style = Cell {
|
||||
fg: ACCENT,
|
||||
..Cell::default()
|
||||
};
|
||||
let title_style = Cell {
|
||||
fg: TEXT,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
let cx = frame.print_text_clipped(x, y, &iid_text, iid_style, max_x);
|
||||
let safe_title = sanitize_for_terminal(&meta.title, UrlPolicy::Strip);
|
||||
let _ = frame.print_text_clipped(cx, y, &safe_title, title_style, max_x);
|
||||
|
||||
y + 1
|
||||
}
|
||||
|
||||
/// Render the metadata row: `opened | alice | backend, security`
|
||||
fn render_metadata_row(
|
||||
frame: &mut Frame<'_>,
|
||||
meta: &IssueMetadata,
|
||||
x: u16,
|
||||
y: u16,
|
||||
max_x: u16,
|
||||
) -> u16 {
|
||||
let state_fg = match meta.state.as_str() {
|
||||
"opened" => GREEN,
|
||||
"closed" => RED,
|
||||
_ => TEXT_MUTED,
|
||||
};
|
||||
let state_style = Cell {
|
||||
fg: state_fg,
|
||||
..Cell::default()
|
||||
};
|
||||
let muted_style = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
let author_style = Cell {
|
||||
fg: CYAN,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
let mut cx = frame.print_text_clipped(x, y, &meta.state, state_style, max_x);
|
||||
cx = frame.print_text_clipped(cx, y, " | ", muted_style, max_x);
|
||||
cx = frame.print_text_clipped(cx, y, &meta.author, author_style, max_x);
|
||||
|
||||
if !meta.labels.is_empty() {
|
||||
cx = frame.print_text_clipped(cx, y, " | ", muted_style, max_x);
|
||||
let labels_text = meta.labels.join(", ");
|
||||
let _ = frame.print_text_clipped(cx, y, &labels_text, muted_style, max_x);
|
||||
}
|
||||
|
||||
if !meta.assignees.is_empty() {
|
||||
cx = frame.print_text_clipped(cx, y, " | ", muted_style, max_x);
|
||||
let assignees_text = format!("-> {}", meta.assignees.join(", "));
|
||||
let _ = frame.print_text_clipped(cx, y, &assignees_text, muted_style, max_x);
|
||||
}
|
||||
|
||||
y + 1
|
||||
}
|
||||
|
||||
/// Render optional milestone / due date row.
|
||||
fn render_milestone_row(
|
||||
frame: &mut Frame<'_>,
|
||||
meta: &IssueMetadata,
|
||||
x: u16,
|
||||
y: u16,
|
||||
max_x: u16,
|
||||
) -> u16 {
|
||||
let muted = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
let mut cx = x;
|
||||
|
||||
if let Some(ref ms) = meta.milestone {
|
||||
cx = frame.print_text_clipped(cx, y, "Milestone: ", muted, max_x);
|
||||
let val_style = Cell {
|
||||
fg: TEXT,
|
||||
..Cell::default()
|
||||
};
|
||||
cx = frame.print_text_clipped(cx, y, ms, val_style, max_x);
|
||||
}
|
||||
|
||||
if let Some(ref due) = meta.due_date {
|
||||
if cx > x {
|
||||
cx = frame.print_text_clipped(cx, y, " | ", muted, max_x);
|
||||
}
|
||||
cx = frame.print_text_clipped(cx, y, "Due: ", muted, max_x);
|
||||
let val_style = Cell {
|
||||
fg: TEXT,
|
||||
..Cell::default()
|
||||
};
|
||||
let _ = frame.print_text_clipped(cx, y, due, val_style, max_x);
|
||||
}
|
||||
|
||||
y + 1
|
||||
}
|
||||
|
||||
/// Render a horizontal separator line.
|
||||
fn render_separator(frame: &mut Frame<'_>, x: u16, y: u16, width: u16) -> u16 {
|
||||
let sep_style = Cell {
|
||||
fg: BORDER,
|
||||
..Cell::default()
|
||||
};
|
||||
let line: String = "\u{2500}".repeat(width as usize);
|
||||
let _ = frame.print_text_clipped(x, y, &line, sep_style, x.saturating_add(width));
|
||||
y + 1
|
||||
}
|
||||
|
||||
/// Render a section header with focus indicator.
|
||||
fn render_section_header(
|
||||
frame: &mut Frame<'_>,
|
||||
label: &str,
|
||||
x: u16,
|
||||
y: u16,
|
||||
max_x: u16,
|
||||
is_focused: bool,
|
||||
) {
|
||||
if is_focused {
|
||||
let style = Cell {
|
||||
fg: SELECTED_FG,
|
||||
bg: SELECTED_BG,
|
||||
..Cell::default()
|
||||
};
|
||||
// Fill the row with selected background.
|
||||
frame.draw_rect_filled(Rect::new(x, y, max_x.saturating_sub(x), 1), style);
|
||||
let _ = frame.print_text_clipped(x, y, label, style, max_x);
|
||||
} else {
|
||||
let style = Cell {
|
||||
fg: ACCENT,
|
||||
..Cell::default()
|
||||
};
|
||||
let _ = frame.print_text_clipped(x, y, label, style, max_x);
|
||||
}
|
||||
}
|
||||
|
||||
/// Render the description section.
|
||||
fn render_description(
|
||||
frame: &mut Frame<'_>,
|
||||
meta: &IssueMetadata,
|
||||
scroll: usize,
|
||||
area: Rect,
|
||||
_is_focused: bool,
|
||||
) {
|
||||
let safe_desc = sanitize_for_terminal(&meta.description, UrlPolicy::Strip);
|
||||
let lines: Vec<&str> = safe_desc.lines().collect();
|
||||
|
||||
let text_style = Cell {
|
||||
fg: TEXT,
|
||||
..Cell::default()
|
||||
};
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
|
||||
for (i, line) in lines
|
||||
.iter()
|
||||
.skip(scroll)
|
||||
.take(area.height as usize)
|
||||
.enumerate()
|
||||
{
|
||||
let y = area.y + i as u16;
|
||||
let _ = frame.print_text_clipped(area.x, y, line, text_style, max_x);
|
||||
}
|
||||
}
|
||||
|
||||
/// Count the number of visible description lines for layout allocation.
|
||||
fn count_description_lines(meta: &IssueMetadata, _width: u16) -> usize {
|
||||
if meta.description.is_empty() {
|
||||
return 0;
|
||||
}
|
||||
// Rough estimate: count newlines. Proper word-wrap would need unicode width.
|
||||
meta.description.lines().count().max(1)
|
||||
}
|
||||
|
||||
/// Allocate vertical space between description, discussions, and cross-refs.
|
||||
///
|
||||
/// Priority: description gets min(content, 40%), discussions get most of the
|
||||
/// remaining space, cross-refs get a fixed portion at the bottom.
|
||||
fn allocate_sections(
|
||||
available: u16,
|
||||
desc_lines: usize,
|
||||
_disc_count: usize,
|
||||
xref_count: usize,
|
||||
) -> (u16, u16, u16) {
|
||||
if available == 0 {
|
||||
return (0, 0, 0);
|
||||
}
|
||||
|
||||
let total = available as usize;
|
||||
|
||||
// Cross-refs: 1 header + count, max 25% of space.
|
||||
let xref_need = if xref_count > 0 {
|
||||
(1 + xref_count).min(total / 4)
|
||||
} else {
|
||||
0
|
||||
};
|
||||
|
||||
let after_xref = total.saturating_sub(xref_need);
|
||||
|
||||
// Description: up to 40% of remaining, but at least the content lines.
|
||||
let desc_max = after_xref * 2 / 5;
|
||||
let desc_alloc = desc_lines.min(desc_max).min(after_xref);
|
||||
|
||||
// Discussions: everything else.
|
||||
let disc_alloc = after_xref.saturating_sub(desc_alloc);
|
||||
|
||||
(desc_alloc as u16, disc_alloc as u16, xref_need as u16)
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::clock::FakeClock;
|
||||
use crate::message::EntityKey;
|
||||
use crate::state::issue_detail::{IssueDetailData, IssueMetadata};
|
||||
use crate::view::common::cross_ref::{CrossRef, CrossRefKind};
|
||||
use crate::view::common::discussion_tree::{DiscussionNode, NoteNode};
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
fn sample_metadata() -> IssueMetadata {
|
||||
IssueMetadata {
|
||||
iid: 42,
|
||||
project_path: "group/project".into(),
|
||||
title: "Fix authentication flow".into(),
|
||||
description: "The login page has a bug.\nSteps to reproduce:\n1. Go to /login\n2. Enter credentials\n3. Click submit".into(),
|
||||
state: "opened".into(),
|
||||
author: "alice".into(),
|
||||
assignees: vec!["bob".into()],
|
||||
labels: vec!["backend".into(), "security".into()],
|
||||
milestone: Some("v1.0".into()),
|
||||
due_date: Some("2026-03-01".into()),
|
||||
created_at: 1_700_000_000_000,
|
||||
updated_at: 1_700_000_060_000,
|
||||
web_url: "https://gitlab.com/group/project/-/issues/42".into(),
|
||||
discussion_count: 2,
|
||||
}
|
||||
}
|
||||
|
||||
fn sample_state_with_metadata() -> IssueDetailState {
|
||||
let mut state = IssueDetailState::default();
|
||||
state.load_new(EntityKey::issue(1, 42));
|
||||
state.apply_metadata(IssueDetailData {
|
||||
metadata: sample_metadata(),
|
||||
cross_refs: vec![CrossRef {
|
||||
kind: CrossRefKind::ClosingMr,
|
||||
entity_key: EntityKey::mr(1, 10),
|
||||
label: "Fix auth MR".into(),
|
||||
navigable: true,
|
||||
}],
|
||||
});
|
||||
state
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_issue_detail_no_metadata_no_panic() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let state = IssueDetailState::default();
|
||||
let clock = FakeClock::from_ms(1_700_000_000_000);
|
||||
render_issue_detail(&mut frame, &state, Rect::new(0, 0, 80, 24), &clock);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_issue_detail_with_metadata_no_panic() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let state = sample_state_with_metadata();
|
||||
let clock = FakeClock::from_ms(1_700_000_060_000);
|
||||
render_issue_detail(&mut frame, &state, Rect::new(0, 0, 80, 24), &clock);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_issue_detail_tiny_area() {
|
||||
with_frame!(5, 2, |frame| {
|
||||
let state = sample_state_with_metadata();
|
||||
let clock = FakeClock::from_ms(1_700_000_060_000);
|
||||
render_issue_detail(&mut frame, &state, Rect::new(0, 0, 5, 2), &clock);
|
||||
// Should bail early, no panic.
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_issue_detail_with_discussions() {
|
||||
with_frame!(80, 40, |frame| {
|
||||
let mut state = sample_state_with_metadata();
|
||||
state.apply_discussions(vec![DiscussionNode {
|
||||
discussion_id: "d1".into(),
|
||||
notes: vec![NoteNode {
|
||||
author: "alice".into(),
|
||||
body: "I found the bug".into(),
|
||||
created_at: 1_700_000_000_000,
|
||||
is_system: false,
|
||||
is_diff_note: false,
|
||||
diff_file_path: None,
|
||||
diff_new_line: None,
|
||||
}],
|
||||
resolvable: false,
|
||||
resolved: false,
|
||||
}]);
|
||||
let clock = FakeClock::from_ms(1_700_000_060_000);
|
||||
render_issue_detail(&mut frame, &state, Rect::new(0, 0, 80, 40), &clock);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_issue_detail_discussions_loading() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let state = sample_state_with_metadata();
|
||||
// discussions_loaded is false by default after load_new.
|
||||
assert!(!state.discussions_loaded);
|
||||
let clock = FakeClock::from_ms(1_700_000_060_000);
|
||||
render_issue_detail(&mut frame, &state, Rect::new(0, 0, 80, 24), &clock);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_issue_detail_narrow_terminal() {
|
||||
with_frame!(30, 10, |frame| {
|
||||
let state = sample_state_with_metadata();
|
||||
let clock = FakeClock::from_ms(1_700_000_060_000);
|
||||
render_issue_detail(&mut frame, &state, Rect::new(0, 0, 30, 10), &clock);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_allocate_sections_empty() {
|
||||
assert_eq!(allocate_sections(0, 5, 3, 2), (0, 0, 0));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_allocate_sections_balanced() {
|
||||
let (d, disc, x) = allocate_sections(20, 5, 3, 2);
|
||||
assert!(d > 0);
|
||||
assert!(disc > 0);
|
||||
assert!(x > 0);
|
||||
assert_eq!(d + disc + x, 20);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_allocate_sections_no_xrefs() {
|
||||
let (d, disc, x) = allocate_sections(20, 5, 3, 0);
|
||||
assert_eq!(x, 0);
|
||||
assert_eq!(d + disc, 20);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_allocate_sections_no_discussions() {
|
||||
let (d, disc, x) = allocate_sections(20, 5, 0, 2);
|
||||
assert!(d > 0);
|
||||
assert_eq!(d + disc + x, 20);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_description_lines() {
|
||||
let meta = sample_metadata();
|
||||
let lines = count_description_lines(&meta, 80);
|
||||
assert_eq!(lines, 5); // 5 lines in the sample description
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_count_description_lines_empty() {
|
||||
let mut meta = sample_metadata();
|
||||
meta.description = String::new();
|
||||
assert_eq!(count_description_lines(&meta, 80), 0);
|
||||
}
|
||||
}
|
||||
353
crates/lore-tui/src/view/issue_list.rs
Normal file
353
crates/lore-tui/src/view/issue_list.rs
Normal file
@@ -0,0 +1,353 @@
|
||||
#![allow(dead_code)] // Phase 2: consumed by view/mod.rs screen dispatch
|
||||
|
||||
//! Issue list screen view.
|
||||
//!
|
||||
//! Composes the reusable [`EntityTable`] and [`FilterBar`] widgets
|
||||
//! with issue-specific column definitions and [`TableRow`] implementation.
|
||||
|
||||
use ftui::core::geometry::Rect;
|
||||
use ftui::render::cell::{Cell, PackedRgba};
|
||||
use ftui::render::drawing::Draw;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
use crate::state::issue_list::{IssueListRow, IssueListState, SortField, SortOrder};
|
||||
use crate::view::common::entity_table::{
|
||||
Align, ColumnDef, EntityTableState, TableColors, TableRow, render_entity_table,
|
||||
};
|
||||
use crate::view::common::filter_bar::{FilterBarColors, FilterBarState, render_filter_bar};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// TableRow implementation for IssueListRow
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
impl TableRow for IssueListRow {
|
||||
fn cells(&self, col_count: usize) -> Vec<String> {
|
||||
let mut cells = Vec::with_capacity(col_count);
|
||||
|
||||
// Column order must match ISSUE_COLUMNS definition.
|
||||
// 0: IID
|
||||
cells.push(format!("#{}", self.iid));
|
||||
// 1: Title
|
||||
cells.push(self.title.clone());
|
||||
// 2: State
|
||||
cells.push(self.state.clone());
|
||||
// 3: Author
|
||||
cells.push(self.author.clone());
|
||||
// 4: Labels
|
||||
cells.push(self.labels.join(", "));
|
||||
// 5: Project
|
||||
cells.push(self.project_path.clone());
|
||||
|
||||
cells.truncate(col_count);
|
||||
cells
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Column definitions
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Column definitions for the issue list table.
|
||||
const ISSUE_COLUMNS: &[ColumnDef] = &[
|
||||
ColumnDef {
|
||||
name: "IID",
|
||||
min_width: 5,
|
||||
flex_weight: 0,
|
||||
priority: 0,
|
||||
align: Align::Right,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "Title",
|
||||
min_width: 15,
|
||||
flex_weight: 4,
|
||||
priority: 0,
|
||||
align: Align::Left,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "State",
|
||||
min_width: 7,
|
||||
flex_weight: 0,
|
||||
priority: 0,
|
||||
align: Align::Left,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "Author",
|
||||
min_width: 8,
|
||||
flex_weight: 1,
|
||||
priority: 1,
|
||||
align: Align::Left,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "Labels",
|
||||
min_width: 10,
|
||||
flex_weight: 2,
|
||||
priority: 2,
|
||||
align: Align::Left,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "Project",
|
||||
min_width: 12,
|
||||
flex_weight: 1,
|
||||
priority: 3,
|
||||
align: Align::Left,
|
||||
},
|
||||
];
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Colors
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
fn table_colors() -> TableColors {
|
||||
TableColors {
|
||||
header_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
header_bg: PackedRgba::rgb(0x34, 0x34, 0x31),
|
||||
row_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
row_alt_bg: PackedRgba::rgb(0x1C, 0x1B, 0x1A),
|
||||
selected_fg: PackedRgba::rgb(0x10, 0x0F, 0x0F),
|
||||
selected_bg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
sort_indicator: PackedRgba::rgb(0x87, 0x96, 0x6B),
|
||||
border: PackedRgba::rgb(0x40, 0x40, 0x3C),
|
||||
}
|
||||
}
|
||||
|
||||
fn filter_colors() -> FilterBarColors {
|
||||
FilterBarColors {
|
||||
input_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
input_bg: PackedRgba::rgb(0x28, 0x28, 0x24),
|
||||
cursor_fg: PackedRgba::rgb(0x00, 0x00, 0x00),
|
||||
cursor_bg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
chip_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
chip_bg: PackedRgba::rgb(0x40, 0x40, 0x3C),
|
||||
error_fg: PackedRgba::rgb(0xAF, 0x3A, 0x29),
|
||||
label_fg: PackedRgba::rgb(0x87, 0x87, 0x80),
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Render
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Render the full issue list screen.
|
||||
///
|
||||
/// Layout:
|
||||
/// ```text
|
||||
/// Row 0: [Filter bar: / filter input_________]
|
||||
/// Row 1: [chip1] [chip2] (if filter active)
|
||||
/// Row 2: ─────────────────────────────────────
|
||||
/// Row 3..N: IID Title State Author ...
|
||||
/// ───────────────────────────────────────
|
||||
/// #42 Fix login bug open alice ...
|
||||
/// #41 Add tests open bob ...
|
||||
/// Bottom: Showing 42 of 128 issues
|
||||
/// ```
|
||||
pub fn render_issue_list(frame: &mut Frame<'_>, state: &IssueListState, area: Rect) {
|
||||
if area.height < 3 || area.width < 10 {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut y = area.y;
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
|
||||
// -- Filter bar ---------------------------------------------------------
|
||||
let filter_area = Rect::new(area.x, y, area.width, 2.min(area.height));
|
||||
let fb_state = FilterBarState {
|
||||
input: state.filter_input.clone(),
|
||||
cursor: state.filter_input.len(),
|
||||
focused: state.filter_focused,
|
||||
tokens: crate::filter_dsl::parse_filter_tokens(&state.filter_input),
|
||||
unknown_fields: Vec::new(),
|
||||
};
|
||||
let filter_rows = render_filter_bar(frame, &fb_state, filter_area, &filter_colors());
|
||||
y = y.saturating_add(filter_rows);
|
||||
|
||||
// -- Status line (total count) ------------------------------------------
|
||||
let remaining_height = area.height.saturating_sub(y - area.y);
|
||||
if remaining_height < 2 {
|
||||
return;
|
||||
}
|
||||
|
||||
// Reserve bottom row for status.
|
||||
let table_height = remaining_height.saturating_sub(1);
|
||||
let status_y = y.saturating_add(table_height);
|
||||
|
||||
// -- Entity table -------------------------------------------------------
|
||||
let sort_col = match state.sort_field {
|
||||
SortField::UpdatedAt => 0, // Map to IID column (closest visual proxy)
|
||||
SortField::Iid => 0,
|
||||
SortField::Title => 1,
|
||||
SortField::State => 2,
|
||||
SortField::Author => 3,
|
||||
};
|
||||
|
||||
let mut table_state = EntityTableState {
|
||||
selected: state.selected_index,
|
||||
scroll_offset: state.scroll_offset,
|
||||
sort_column: sort_col,
|
||||
sort_ascending: matches!(state.sort_order, SortOrder::Asc),
|
||||
};
|
||||
|
||||
let table_area = Rect::new(area.x, y, area.width, table_height);
|
||||
render_entity_table(
|
||||
frame,
|
||||
&state.rows,
|
||||
ISSUE_COLUMNS,
|
||||
&mut table_state,
|
||||
table_area,
|
||||
&table_colors(),
|
||||
);
|
||||
|
||||
// -- Bottom status ------------------------------------------------------
|
||||
if status_y < area.y.saturating_add(area.height) {
|
||||
render_status_line(frame, state, area.x, status_y, max_x);
|
||||
}
|
||||
}
|
||||
|
||||
/// Render the bottom status line showing row count and pagination info.
|
||||
fn render_status_line(frame: &mut Frame<'_>, state: &IssueListState, x: u16, y: u16, max_x: u16) {
|
||||
let muted = Cell {
|
||||
fg: PackedRgba::rgb(0x87, 0x87, 0x80),
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
let status = if state.rows.is_empty() {
|
||||
"No issues found".to_string()
|
||||
} else {
|
||||
let showing = state.rows.len();
|
||||
let total = state.total_count;
|
||||
if state.next_cursor.is_some() {
|
||||
format!("Showing {showing} of {total} issues (more available)")
|
||||
} else {
|
||||
format!("Showing {showing} of {total} issues")
|
||||
}
|
||||
};
|
||||
|
||||
frame.print_text_clipped(x, y, &status, muted, max_x);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
fn sample_state(row_count: usize) -> IssueListState {
|
||||
let rows: Vec<IssueListRow> = (0..row_count)
|
||||
.map(|i| IssueListRow {
|
||||
project_path: "group/project".into(),
|
||||
iid: (i + 1) as i64,
|
||||
title: format!("Issue {}", i + 1),
|
||||
state: if i % 2 == 0 { "opened" } else { "closed" }.into(),
|
||||
author: "taylor".into(),
|
||||
labels: if i == 0 {
|
||||
vec!["bug".into(), "critical".into()]
|
||||
} else {
|
||||
vec![]
|
||||
},
|
||||
updated_at: 1_700_000_000_000 - (i as i64 * 60_000),
|
||||
})
|
||||
.collect();
|
||||
|
||||
IssueListState {
|
||||
total_count: row_count as u64,
|
||||
rows,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_issue_list_no_panic() {
|
||||
with_frame!(120, 30, |frame| {
|
||||
let state = sample_state(10);
|
||||
render_issue_list(&mut frame, &state, Rect::new(0, 0, 120, 30));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_issue_list_empty_no_panic() {
|
||||
with_frame!(80, 20, |frame| {
|
||||
let state = IssueListState::default();
|
||||
render_issue_list(&mut frame, &state, Rect::new(0, 0, 80, 20));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_issue_list_tiny_noop() {
|
||||
with_frame!(5, 2, |frame| {
|
||||
let state = sample_state(5);
|
||||
render_issue_list(&mut frame, &state, Rect::new(0, 0, 5, 2));
|
||||
// Should not panic with too-small area.
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_issue_list_narrow_no_panic() {
|
||||
with_frame!(40, 15, |frame| {
|
||||
let state = sample_state(5);
|
||||
render_issue_list(&mut frame, &state, Rect::new(0, 0, 40, 15));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_issue_list_with_filter_no_panic() {
|
||||
with_frame!(100, 25, |frame| {
|
||||
let mut state = sample_state(5);
|
||||
state.filter_input = "state:opened".into();
|
||||
state.filter_focused = true;
|
||||
render_issue_list(&mut frame, &state, Rect::new(0, 0, 100, 25));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_issue_list_row_cells() {
|
||||
let row = IssueListRow {
|
||||
project_path: "group/proj".into(),
|
||||
iid: 42,
|
||||
title: "Fix bug".into(),
|
||||
state: "opened".into(),
|
||||
author: "alice".into(),
|
||||
labels: vec!["bug".into(), "urgent".into()],
|
||||
updated_at: 1_700_000_000_000,
|
||||
};
|
||||
|
||||
let cells = row.cells(6);
|
||||
assert_eq!(cells[0], "#42");
|
||||
assert_eq!(cells[1], "Fix bug");
|
||||
assert_eq!(cells[2], "opened");
|
||||
assert_eq!(cells[3], "alice");
|
||||
assert_eq!(cells[4], "bug, urgent");
|
||||
assert_eq!(cells[5], "group/proj");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_issue_list_row_cells_truncated() {
|
||||
let row = IssueListRow {
|
||||
project_path: "g/p".into(),
|
||||
iid: 1,
|
||||
title: "t".into(),
|
||||
state: "opened".into(),
|
||||
author: "a".into(),
|
||||
labels: vec![],
|
||||
updated_at: 0,
|
||||
};
|
||||
|
||||
// Request fewer columns than available.
|
||||
let cells = row.cells(3);
|
||||
assert_eq!(cells.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_count() {
|
||||
assert_eq!(ISSUE_COLUMNS.len(), 6);
|
||||
}
|
||||
}
|
||||
202
crates/lore-tui/src/view/mod.rs
Normal file
202
crates/lore-tui/src/view/mod.rs
Normal file
@@ -0,0 +1,202 @@
|
||||
#![allow(dead_code)] // Phase 1: screen content renders added in Phase 2+
|
||||
|
||||
//! Top-level view dispatch for the lore TUI.
|
||||
//!
|
||||
//! [`render_screen`] is the entry point called from `LoreApp::view()`.
|
||||
//! It composes the layout: breadcrumb bar, screen content area, status
|
||||
//! bar, and optional overlays (help, error toast).
|
||||
|
||||
pub mod common;
|
||||
pub mod dashboard;
|
||||
pub mod issue_detail;
|
||||
pub mod issue_list;
|
||||
pub mod mr_detail;
|
||||
pub mod mr_list;
|
||||
|
||||
use ftui::layout::{Constraint, Flex};
|
||||
use ftui::render::cell::PackedRgba;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
use crate::app::LoreApp;
|
||||
use crate::message::Screen;
|
||||
|
||||
use common::{
|
||||
render_breadcrumb, render_error_toast, render_help_overlay, render_loading, render_status_bar,
|
||||
};
|
||||
use dashboard::render_dashboard;
|
||||
use issue_detail::render_issue_detail;
|
||||
use issue_list::render_issue_list;
|
||||
use mr_detail::render_mr_detail;
|
||||
use mr_list::render_mr_list;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Colors (hardcoded Flexoki palette — will use Theme in Phase 2)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const TEXT: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3); // tx
|
||||
const TEXT_MUTED: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2
|
||||
const BG_SURFACE: PackedRgba = PackedRgba::rgb(0x28, 0x28, 0x24); // bg-2
|
||||
const ACCENT: PackedRgba = PackedRgba::rgb(0xDA, 0x70, 0x2C); // orange
|
||||
const ERROR_BG: PackedRgba = PackedRgba::rgb(0xAF, 0x3A, 0x29); // red
|
||||
const ERROR_FG: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3); // tx
|
||||
const BORDER: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// render_screen
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Top-level view dispatch: composes breadcrumb + content + status bar + overlays.
|
||||
///
|
||||
/// Called from `LoreApp::view()`. The layout is:
|
||||
/// ```text
|
||||
/// +-----------------------------------+
|
||||
/// | Breadcrumb (1 row) |
|
||||
/// +-----------------------------------+
|
||||
/// | |
|
||||
/// | Screen content (fill) |
|
||||
/// | |
|
||||
/// +-----------------------------------+
|
||||
/// | Status bar (1 row) |
|
||||
/// +-----------------------------------+
|
||||
/// ```
|
||||
///
|
||||
/// Overlays (help, error toast) render on top of existing content.
|
||||
pub fn render_screen(frame: &mut Frame<'_>, app: &LoreApp) {
|
||||
let bounds = frame.bounds();
|
||||
if bounds.width < 3 || bounds.height < 3 {
|
||||
return; // Terminal too small to render anything useful.
|
||||
}
|
||||
|
||||
// Split vertically: breadcrumb (1) | content (fill) | status bar (1).
|
||||
let regions = Flex::vertical()
|
||||
.constraints([
|
||||
Constraint::Fixed(1), // breadcrumb
|
||||
Constraint::Fill, // content
|
||||
Constraint::Fixed(1), // status bar
|
||||
])
|
||||
.split(bounds);
|
||||
|
||||
let breadcrumb_area = regions[0];
|
||||
let content_area = regions[1];
|
||||
let status_area = regions[2];
|
||||
|
||||
let screen = app.navigation.current();
|
||||
|
||||
// --- Breadcrumb ---
|
||||
render_breadcrumb(frame, breadcrumb_area, &app.navigation, TEXT, TEXT_MUTED);
|
||||
|
||||
// --- Screen content ---
|
||||
let load_state = app.state.load_state.get(screen);
|
||||
// tick=0 placeholder — animation wired up when Msg::Tick increments a counter.
|
||||
render_loading(frame, content_area, load_state, TEXT, TEXT_MUTED, 0);
|
||||
|
||||
// Per-screen content dispatch (other screens wired in later phases).
|
||||
if screen == &Screen::Dashboard {
|
||||
render_dashboard(frame, &app.state.dashboard, content_area);
|
||||
} else if screen == &Screen::IssueList {
|
||||
render_issue_list(frame, &app.state.issue_list, content_area);
|
||||
} else if screen == &Screen::MrList {
|
||||
render_mr_list(frame, &app.state.mr_list, content_area);
|
||||
} else if matches!(screen, Screen::IssueDetail(_)) {
|
||||
render_issue_detail(frame, &app.state.issue_detail, content_area, &*app.clock);
|
||||
} else if matches!(screen, Screen::MrDetail(_)) {
|
||||
render_mr_detail(frame, &app.state.mr_detail, content_area, &*app.clock);
|
||||
}
|
||||
|
||||
// --- Status bar ---
|
||||
render_status_bar(
|
||||
frame,
|
||||
status_area,
|
||||
&app.command_registry,
|
||||
screen,
|
||||
&app.input_mode,
|
||||
BG_SURFACE,
|
||||
TEXT,
|
||||
ACCENT,
|
||||
);
|
||||
|
||||
// --- Overlays (render last, on top of everything) ---
|
||||
|
||||
// Error toast.
|
||||
if let Some(ref error_msg) = app.state.error_toast {
|
||||
render_error_toast(frame, bounds, error_msg, ERROR_BG, ERROR_FG);
|
||||
}
|
||||
|
||||
// Help overlay.
|
||||
if app.state.show_help {
|
||||
render_help_overlay(
|
||||
frame,
|
||||
bounds,
|
||||
&app.command_registry,
|
||||
screen,
|
||||
BORDER,
|
||||
TEXT,
|
||||
TEXT_MUTED,
|
||||
0, // scroll_offset — tracked in future phase
|
||||
);
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::app::LoreApp;
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_screen_does_not_panic() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let app = LoreApp::new();
|
||||
render_screen(&mut frame, &app);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_screen_tiny_terminal_noop() {
|
||||
with_frame!(2, 2, |frame| {
|
||||
let app = LoreApp::new();
|
||||
render_screen(&mut frame, &app);
|
||||
// Should not panic — early return for tiny terminals.
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_screen_with_error_toast() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let mut app = LoreApp::new();
|
||||
app.state.set_error("test error".into());
|
||||
render_screen(&mut frame, &app);
|
||||
// Should render without panicking.
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_screen_with_help_overlay() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let mut app = LoreApp::new();
|
||||
app.state.show_help = true;
|
||||
render_screen(&mut frame, &app);
|
||||
// Should render without panicking.
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_screen_narrow_terminal() {
|
||||
with_frame!(20, 5, |frame| {
|
||||
let app = LoreApp::new();
|
||||
render_screen(&mut frame, &app);
|
||||
});
|
||||
}
|
||||
}
|
||||
635
crates/lore-tui/src/view/mr_detail.rs
Normal file
635
crates/lore-tui/src/view/mr_detail.rs
Normal file
@@ -0,0 +1,635 @@
|
||||
#![allow(dead_code)] // Phase 2: consumed by view/mod.rs screen dispatch
|
||||
|
||||
//! Merge request detail screen view.
|
||||
//!
|
||||
//! Composes metadata header, tab bar (Overview / Files / Discussions),
|
||||
//! and tab content. Supports progressive hydration: metadata + file
|
||||
//! changes render immediately while discussions load async.
|
||||
|
||||
use ftui::core::geometry::Rect;
|
||||
use ftui::render::cell::{Cell, PackedRgba};
|
||||
use ftui::render::drawing::Draw;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
use crate::clock::Clock;
|
||||
use crate::safety::{UrlPolicy, sanitize_for_terminal};
|
||||
use crate::state::mr_detail::{FileChangeType, MrDetailState, MrMetadata, MrTab};
|
||||
use crate::view::common::cross_ref::{CrossRefColors, render_cross_refs};
|
||||
use crate::view::common::discussion_tree::{DiscussionTreeColors, render_discussion_tree};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Colors (Flexoki palette)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const TEXT: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3);
|
||||
const TEXT_MUTED: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80);
|
||||
const ACCENT: PackedRgba = PackedRgba::rgb(0xDA, 0x70, 0x2C);
|
||||
const GREEN: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39);
|
||||
const RED: PackedRgba = PackedRgba::rgb(0xAF, 0x3A, 0x29);
|
||||
const CYAN: PackedRgba = PackedRgba::rgb(0x3A, 0xA9, 0x9F);
|
||||
const YELLOW: PackedRgba = PackedRgba::rgb(0xD0, 0xA2, 0x15);
|
||||
const BORDER: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80);
|
||||
const SELECTED_FG: PackedRgba = PackedRgba::rgb(0x10, 0x0F, 0x0F);
|
||||
const SELECTED_BG: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3);
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Color constructors
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
fn discussion_colors() -> DiscussionTreeColors {
|
||||
DiscussionTreeColors {
|
||||
author_fg: CYAN,
|
||||
timestamp_fg: TEXT_MUTED,
|
||||
body_fg: TEXT,
|
||||
system_fg: TEXT_MUTED,
|
||||
diff_path_fg: GREEN,
|
||||
resolved_fg: TEXT_MUTED,
|
||||
guide_fg: BORDER,
|
||||
selected_fg: SELECTED_FG,
|
||||
selected_bg: SELECTED_BG,
|
||||
expand_fg: ACCENT,
|
||||
}
|
||||
}
|
||||
|
||||
fn cross_ref_colors() -> CrossRefColors {
|
||||
CrossRefColors {
|
||||
kind_fg: ACCENT,
|
||||
label_fg: TEXT,
|
||||
muted_fg: TEXT_MUTED,
|
||||
selected_fg: SELECTED_FG,
|
||||
selected_bg: SELECTED_BG,
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Render
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Render the full MR detail screen.
|
||||
///
|
||||
/// Layout:
|
||||
/// ```text
|
||||
/// Row 0: !10 Fix auth flow (title bar)
|
||||
/// Row 1: opened | alice | fix-auth -> main (metadata row)
|
||||
/// Row 2: [Overview] [Files (3)] [Discussions] (tab bar)
|
||||
/// Row 3: ──────────────────────────────────── (separator)
|
||||
/// Row 4..N: Tab-specific content
|
||||
/// ```
|
||||
pub fn render_mr_detail(
|
||||
frame: &mut Frame<'_>,
|
||||
state: &MrDetailState,
|
||||
area: Rect,
|
||||
clock: &dyn Clock,
|
||||
) {
|
||||
if area.height < 4 || area.width < 10 {
|
||||
return;
|
||||
}
|
||||
|
||||
let Some(ref meta) = state.metadata else {
|
||||
return;
|
||||
};
|
||||
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
let mut y = area.y;
|
||||
|
||||
// --- Title bar ---
|
||||
y = render_title_bar(frame, meta, area.x, y, max_x);
|
||||
|
||||
// --- Metadata row ---
|
||||
y = render_metadata_row(frame, meta, area.x, y, max_x);
|
||||
|
||||
// --- Tab bar ---
|
||||
y = render_tab_bar(frame, state, area.x, y, max_x);
|
||||
|
||||
// --- Separator ---
|
||||
y = render_separator(frame, area.x, y, area.width);
|
||||
|
||||
let bottom = area.y.saturating_add(area.height);
|
||||
if y >= bottom {
|
||||
return;
|
||||
}
|
||||
|
||||
let content_area = Rect::new(area.x, y, area.width, bottom.saturating_sub(y));
|
||||
|
||||
match state.active_tab {
|
||||
MrTab::Overview => render_overview_tab(frame, state, meta, content_area, clock),
|
||||
MrTab::Files => render_files_tab(frame, state, content_area),
|
||||
MrTab::Discussions => render_discussions_tab(frame, state, content_area, clock),
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Sub-renderers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Render `!10 Fix auth flow` (or `!10 [Draft] Fix auth flow`).
|
||||
fn render_title_bar(frame: &mut Frame<'_>, meta: &MrMetadata, x: u16, y: u16, max_x: u16) -> u16 {
|
||||
let iid_text = format!("!{} ", meta.iid);
|
||||
let iid_style = Cell {
|
||||
fg: ACCENT,
|
||||
..Cell::default()
|
||||
};
|
||||
let mut cx = frame.print_text_clipped(x, y, &iid_text, iid_style, max_x);
|
||||
|
||||
if meta.draft {
|
||||
let draft_style = Cell {
|
||||
fg: YELLOW,
|
||||
..Cell::default()
|
||||
};
|
||||
cx = frame.print_text_clipped(cx, y, "[Draft] ", draft_style, max_x);
|
||||
}
|
||||
|
||||
let title_style = Cell {
|
||||
fg: TEXT,
|
||||
..Cell::default()
|
||||
};
|
||||
let safe_title = sanitize_for_terminal(&meta.title, UrlPolicy::Strip);
|
||||
let _ = frame.print_text_clipped(cx, y, &safe_title, title_style, max_x);
|
||||
|
||||
y + 1
|
||||
}
|
||||
|
||||
/// Render `opened | alice | fix-auth -> main | mergeable`.
|
||||
fn render_metadata_row(
|
||||
frame: &mut Frame<'_>,
|
||||
meta: &MrMetadata,
|
||||
x: u16,
|
||||
y: u16,
|
||||
max_x: u16,
|
||||
) -> u16 {
|
||||
let state_fg = match meta.state.as_str() {
|
||||
"opened" => GREEN,
|
||||
"merged" => CYAN,
|
||||
"closed" => RED,
|
||||
_ => TEXT_MUTED,
|
||||
};
|
||||
let state_style = Cell {
|
||||
fg: state_fg,
|
||||
..Cell::default()
|
||||
};
|
||||
let muted = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
let author_style = Cell {
|
||||
fg: CYAN,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
let mut cx = frame.print_text_clipped(x, y, &meta.state, state_style, max_x);
|
||||
cx = frame.print_text_clipped(cx, y, " | ", muted, max_x);
|
||||
cx = frame.print_text_clipped(cx, y, &meta.author, author_style, max_x);
|
||||
cx = frame.print_text_clipped(cx, y, " | ", muted, max_x);
|
||||
|
||||
let branch_text = format!("{} -> {}", meta.source_branch, meta.target_branch);
|
||||
cx = frame.print_text_clipped(cx, y, &branch_text, muted, max_x);
|
||||
|
||||
if !meta.merge_status.is_empty() {
|
||||
cx = frame.print_text_clipped(cx, y, " | ", muted, max_x);
|
||||
let status_fg = if meta.merge_status == "mergeable" {
|
||||
GREEN
|
||||
} else {
|
||||
YELLOW
|
||||
};
|
||||
let status_style = Cell {
|
||||
fg: status_fg,
|
||||
..Cell::default()
|
||||
};
|
||||
let _ = frame.print_text_clipped(cx, y, &meta.merge_status, status_style, max_x);
|
||||
}
|
||||
|
||||
y + 1
|
||||
}
|
||||
|
||||
/// Render tab bar: `[Overview] [Files (3)] [Discussions (2)]`.
|
||||
fn render_tab_bar(frame: &mut Frame<'_>, state: &MrDetailState, x: u16, y: u16, max_x: u16) -> u16 {
|
||||
let tabs = [
|
||||
(MrTab::Overview, "Overview".to_string()),
|
||||
(
|
||||
MrTab::Files,
|
||||
format!("Files ({})", state.file_changes.len()),
|
||||
),
|
||||
(
|
||||
MrTab::Discussions,
|
||||
format!("Discussions ({})", state.discussions.len()),
|
||||
),
|
||||
];
|
||||
|
||||
let mut cx = x;
|
||||
for (tab, label) in &tabs {
|
||||
if *tab == state.active_tab {
|
||||
let style = Cell {
|
||||
fg: SELECTED_FG,
|
||||
bg: SELECTED_BG,
|
||||
..Cell::default()
|
||||
};
|
||||
let text = format!(" {label} ");
|
||||
cx = frame.print_text_clipped(cx, y, &text, style, max_x);
|
||||
} else {
|
||||
let style = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
let text = format!(" {label} ");
|
||||
cx = frame.print_text_clipped(cx, y, &text, style, max_x);
|
||||
}
|
||||
// Tab separator.
|
||||
let sep = Cell {
|
||||
fg: BORDER,
|
||||
..Cell::default()
|
||||
};
|
||||
cx = frame.print_text_clipped(cx, y, " ", sep, max_x);
|
||||
}
|
||||
|
||||
y + 1
|
||||
}
|
||||
|
||||
/// Render horizontal separator.
|
||||
fn render_separator(frame: &mut Frame<'_>, x: u16, y: u16, width: u16) -> u16 {
|
||||
let sep_style = Cell {
|
||||
fg: BORDER,
|
||||
..Cell::default()
|
||||
};
|
||||
let line: String = "\u{2500}".repeat(width as usize);
|
||||
let _ = frame.print_text_clipped(x, y, &line, sep_style, x.saturating_add(width));
|
||||
y + 1
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tab content renderers
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Overview tab: description + cross-references.
|
||||
fn render_overview_tab(
|
||||
frame: &mut Frame<'_>,
|
||||
state: &MrDetailState,
|
||||
meta: &MrMetadata,
|
||||
area: Rect,
|
||||
_clock: &dyn Clock,
|
||||
) {
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
let mut y = area.y;
|
||||
let bottom = area.y.saturating_add(area.height);
|
||||
|
||||
// --- Description ---
|
||||
let safe_desc = sanitize_for_terminal(&meta.description, UrlPolicy::Strip);
|
||||
let lines: Vec<&str> = safe_desc.lines().collect();
|
||||
let text_style = Cell {
|
||||
fg: TEXT,
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
for line in lines
|
||||
.iter()
|
||||
.skip(state.description_scroll)
|
||||
.take((bottom.saturating_sub(y)) as usize)
|
||||
{
|
||||
let _ = frame.print_text_clipped(area.x, y, line, text_style, max_x);
|
||||
y += 1;
|
||||
}
|
||||
|
||||
if y >= bottom {
|
||||
return;
|
||||
}
|
||||
|
||||
// --- Separator ---
|
||||
y = render_separator(frame, area.x, y, area.width);
|
||||
if y >= bottom {
|
||||
return;
|
||||
}
|
||||
|
||||
// --- Cross-references ---
|
||||
if !state.cross_refs.is_empty() {
|
||||
let header_style = Cell {
|
||||
fg: ACCENT,
|
||||
..Cell::default()
|
||||
};
|
||||
let header = format!("Cross References ({})", state.cross_refs.len());
|
||||
let _ = frame.print_text_clipped(area.x, y, &header, header_style, max_x);
|
||||
y += 1;
|
||||
|
||||
if y < bottom {
|
||||
let refs_area = Rect::new(area.x, y, area.width, bottom.saturating_sub(y));
|
||||
let _ = render_cross_refs(
|
||||
frame,
|
||||
&state.cross_refs,
|
||||
&state.cross_ref_state,
|
||||
refs_area,
|
||||
&cross_ref_colors(),
|
||||
);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
/// Files tab: list of changed files with change type indicators.
|
||||
fn render_files_tab(frame: &mut Frame<'_>, state: &MrDetailState, area: Rect) {
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
let mut y = area.y;
|
||||
let bottom = area.y.saturating_add(area.height);
|
||||
|
||||
if state.file_changes.is_empty() {
|
||||
let style = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
let _ = frame.print_text_clipped(area.x + 1, y, "No file changes", style, max_x);
|
||||
return;
|
||||
}
|
||||
|
||||
for (i, fc) in state
|
||||
.file_changes
|
||||
.iter()
|
||||
.skip(state.file_scroll)
|
||||
.take((bottom.saturating_sub(y)) as usize)
|
||||
.enumerate()
|
||||
{
|
||||
let is_selected = i + state.file_scroll == state.file_selected;
|
||||
|
||||
let (fg, bg) = if is_selected {
|
||||
(SELECTED_FG, SELECTED_BG)
|
||||
} else {
|
||||
(TEXT, PackedRgba::TRANSPARENT)
|
||||
};
|
||||
|
||||
if is_selected {
|
||||
let sel_cell = Cell {
|
||||
fg,
|
||||
bg,
|
||||
..Cell::default()
|
||||
};
|
||||
frame.draw_rect_filled(Rect::new(area.x, y, area.width, 1), sel_cell);
|
||||
}
|
||||
|
||||
// Change type icon.
|
||||
let icon_fg = match fc.change_type {
|
||||
FileChangeType::Added => GREEN,
|
||||
FileChangeType::Deleted => RED,
|
||||
FileChangeType::Modified => YELLOW,
|
||||
FileChangeType::Renamed => CYAN,
|
||||
};
|
||||
let icon_style = Cell {
|
||||
fg: if is_selected { fg } else { icon_fg },
|
||||
bg,
|
||||
..Cell::default()
|
||||
};
|
||||
let mut cx = frame.print_text_clipped(area.x, y, fc.change_type.icon(), icon_style, max_x);
|
||||
cx = frame.print_text_clipped(cx, y, " ", icon_style, max_x);
|
||||
|
||||
// File path.
|
||||
let path_style = Cell {
|
||||
fg,
|
||||
bg,
|
||||
..Cell::default()
|
||||
};
|
||||
let display_path = if fc.change_type == FileChangeType::Renamed {
|
||||
if let Some(ref old) = fc.old_path {
|
||||
format!("{old} -> {}", fc.new_path)
|
||||
} else {
|
||||
fc.new_path.clone()
|
||||
}
|
||||
} else {
|
||||
fc.new_path.clone()
|
||||
};
|
||||
let _ = frame.print_text_clipped(cx, y, &display_path, path_style, max_x);
|
||||
|
||||
y += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Discussions tab: all discussions rendered via the tree widget.
|
||||
fn render_discussions_tab(
|
||||
frame: &mut Frame<'_>,
|
||||
state: &MrDetailState,
|
||||
area: Rect,
|
||||
clock: &dyn Clock,
|
||||
) {
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
|
||||
if !state.discussions_loaded {
|
||||
let style = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
let _ =
|
||||
frame.print_text_clipped(area.x + 1, area.y, "Loading discussions...", style, max_x);
|
||||
return;
|
||||
}
|
||||
|
||||
if state.discussions.is_empty() {
|
||||
let style = Cell {
|
||||
fg: TEXT_MUTED,
|
||||
..Cell::default()
|
||||
};
|
||||
let _ = frame.print_text_clipped(area.x + 1, area.y, "No discussions", style, max_x);
|
||||
return;
|
||||
}
|
||||
|
||||
let _ = render_discussion_tree(
|
||||
frame,
|
||||
&state.discussions,
|
||||
&state.tree_state,
|
||||
area,
|
||||
&discussion_colors(),
|
||||
clock,
|
||||
);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::clock::FakeClock;
|
||||
use crate::message::EntityKey;
|
||||
use crate::state::mr_detail::{FileChange, FileChangeType, MrDetailData, MrMetadata, MrTab};
|
||||
use crate::view::common::cross_ref::{CrossRef, CrossRefKind};
|
||||
use crate::view::common::discussion_tree::{DiscussionNode, NoteNode};
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
fn sample_mr_metadata() -> MrMetadata {
|
||||
MrMetadata {
|
||||
iid: 10,
|
||||
project_path: "group/project".into(),
|
||||
title: "Fix authentication flow".into(),
|
||||
description: "This MR fixes the login bug.\nSee issue #42.".into(),
|
||||
state: "opened".into(),
|
||||
draft: false,
|
||||
author: "alice".into(),
|
||||
assignees: vec!["bob".into()],
|
||||
reviewers: vec!["carol".into()],
|
||||
labels: vec!["backend".into()],
|
||||
source_branch: "fix-auth".into(),
|
||||
target_branch: "main".into(),
|
||||
merge_status: "mergeable".into(),
|
||||
created_at: 1_700_000_000_000,
|
||||
updated_at: 1_700_000_060_000,
|
||||
merged_at: None,
|
||||
web_url: "https://gitlab.com/group/project/-/merge_requests/10".into(),
|
||||
discussion_count: 1,
|
||||
file_change_count: 2,
|
||||
}
|
||||
}
|
||||
|
||||
fn sample_mr_state() -> MrDetailState {
|
||||
let mut state = MrDetailState::default();
|
||||
state.load_new(EntityKey::mr(1, 10));
|
||||
state.apply_metadata(MrDetailData {
|
||||
metadata: sample_mr_metadata(),
|
||||
cross_refs: vec![CrossRef {
|
||||
kind: CrossRefKind::ClosingMr,
|
||||
entity_key: EntityKey::issue(1, 42),
|
||||
label: "Auth bug".into(),
|
||||
navigable: true,
|
||||
}],
|
||||
file_changes: vec![
|
||||
FileChange {
|
||||
old_path: None,
|
||||
new_path: "src/auth.rs".into(),
|
||||
change_type: FileChangeType::Modified,
|
||||
},
|
||||
FileChange {
|
||||
old_path: None,
|
||||
new_path: "src/lib.rs".into(),
|
||||
change_type: FileChangeType::Added,
|
||||
},
|
||||
],
|
||||
});
|
||||
state
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_mr_detail_no_metadata() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let state = MrDetailState::default();
|
||||
let clock = FakeClock::from_ms(1_700_000_000_000);
|
||||
render_mr_detail(&mut frame, &state, Rect::new(0, 0, 80, 24), &clock);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_mr_detail_overview_tab() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let state = sample_mr_state();
|
||||
let clock = FakeClock::from_ms(1_700_000_060_000);
|
||||
render_mr_detail(&mut frame, &state, Rect::new(0, 0, 80, 24), &clock);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_mr_detail_files_tab() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let mut state = sample_mr_state();
|
||||
state.active_tab = MrTab::Files;
|
||||
let clock = FakeClock::from_ms(1_700_000_060_000);
|
||||
render_mr_detail(&mut frame, &state, Rect::new(0, 0, 80, 24), &clock);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_mr_detail_discussions_tab_loading() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let mut state = sample_mr_state();
|
||||
state.active_tab = MrTab::Discussions;
|
||||
let clock = FakeClock::from_ms(1_700_000_060_000);
|
||||
render_mr_detail(&mut frame, &state, Rect::new(0, 0, 80, 24), &clock);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_mr_detail_discussions_tab_with_data() {
|
||||
with_frame!(80, 30, |frame| {
|
||||
let mut state = sample_mr_state();
|
||||
state.active_tab = MrTab::Discussions;
|
||||
state.apply_discussions(vec![DiscussionNode {
|
||||
discussion_id: "d1".into(),
|
||||
notes: vec![NoteNode {
|
||||
author: "alice".into(),
|
||||
body: "Looks good".into(),
|
||||
created_at: 1_700_000_020_000,
|
||||
is_system: false,
|
||||
is_diff_note: true,
|
||||
diff_file_path: Some("src/auth.rs".into()),
|
||||
diff_new_line: Some(42),
|
||||
}],
|
||||
resolvable: true,
|
||||
resolved: false,
|
||||
}]);
|
||||
let clock = FakeClock::from_ms(1_700_000_060_000);
|
||||
render_mr_detail(&mut frame, &state, Rect::new(0, 0, 80, 30), &clock);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_mr_detail_draft() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let mut state = sample_mr_state();
|
||||
state.metadata.as_mut().unwrap().draft = true;
|
||||
let clock = FakeClock::from_ms(1_700_000_060_000);
|
||||
render_mr_detail(&mut frame, &state, Rect::new(0, 0, 80, 24), &clock);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_mr_detail_tiny_area() {
|
||||
with_frame!(5, 3, |frame| {
|
||||
let state = sample_mr_state();
|
||||
let clock = FakeClock::from_ms(1_700_000_060_000);
|
||||
render_mr_detail(&mut frame, &state, Rect::new(0, 0, 5, 3), &clock);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_mr_detail_narrow_terminal() {
|
||||
with_frame!(30, 10, |frame| {
|
||||
let state = sample_mr_state();
|
||||
let clock = FakeClock::from_ms(1_700_000_060_000);
|
||||
render_mr_detail(&mut frame, &state, Rect::new(0, 0, 30, 10), &clock);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_files_empty() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let mut state = MrDetailState::default();
|
||||
state.load_new(EntityKey::mr(1, 10));
|
||||
state.apply_metadata(MrDetailData {
|
||||
metadata: sample_mr_metadata(),
|
||||
cross_refs: vec![],
|
||||
file_changes: vec![],
|
||||
});
|
||||
state.active_tab = MrTab::Files;
|
||||
let clock = FakeClock::from_ms(1_700_000_060_000);
|
||||
render_mr_detail(&mut frame, &state, Rect::new(0, 0, 80, 24), &clock);
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_files_with_rename() {
|
||||
with_frame!(80, 24, |frame| {
|
||||
let mut state = MrDetailState::default();
|
||||
state.load_new(EntityKey::mr(1, 10));
|
||||
state.apply_metadata(MrDetailData {
|
||||
metadata: sample_mr_metadata(),
|
||||
cross_refs: vec![],
|
||||
file_changes: vec![FileChange {
|
||||
old_path: Some("src/old.rs".into()),
|
||||
new_path: "src/new.rs".into(),
|
||||
change_type: FileChangeType::Renamed,
|
||||
}],
|
||||
});
|
||||
state.active_tab = MrTab::Files;
|
||||
let clock = FakeClock::from_ms(1_700_000_060_000);
|
||||
render_mr_detail(&mut frame, &state, Rect::new(0, 0, 80, 24), &clock);
|
||||
});
|
||||
}
|
||||
}
|
||||
390
crates/lore-tui/src/view/mr_list.rs
Normal file
390
crates/lore-tui/src/view/mr_list.rs
Normal file
@@ -0,0 +1,390 @@
|
||||
#![allow(dead_code)] // Phase 2: consumed by view/mod.rs screen dispatch
|
||||
|
||||
//! MR list screen view.
|
||||
//!
|
||||
//! Composes the reusable [`EntityTable`] and [`FilterBar`] widgets
|
||||
//! with MR-specific column definitions and [`TableRow`] implementation.
|
||||
|
||||
use ftui::core::geometry::Rect;
|
||||
use ftui::render::cell::{Cell, PackedRgba};
|
||||
use ftui::render::drawing::Draw;
|
||||
use ftui::render::frame::Frame;
|
||||
|
||||
use crate::state::mr_list::{MrListRow, MrListState, MrSortField, MrSortOrder};
|
||||
use crate::view::common::entity_table::{
|
||||
Align, ColumnDef, EntityTableState, TableColors, TableRow, render_entity_table,
|
||||
};
|
||||
use crate::view::common::filter_bar::{FilterBarColors, FilterBarState, render_filter_bar};
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// TableRow implementation for MrListRow
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
impl TableRow for MrListRow {
|
||||
fn cells(&self, col_count: usize) -> Vec<String> {
|
||||
let mut cells = Vec::with_capacity(col_count);
|
||||
|
||||
// Column order must match MR_COLUMNS definition.
|
||||
// 0: IID (with draft indicator)
|
||||
let iid_text = if self.draft {
|
||||
format!("!{} [WIP]", self.iid)
|
||||
} else {
|
||||
format!("!{}", self.iid)
|
||||
};
|
||||
cells.push(iid_text);
|
||||
// 1: Title
|
||||
cells.push(self.title.clone());
|
||||
// 2: State
|
||||
cells.push(self.state.clone());
|
||||
// 3: Author
|
||||
cells.push(self.author.clone());
|
||||
// 4: Target Branch
|
||||
cells.push(self.target_branch.clone());
|
||||
// 5: Labels
|
||||
cells.push(self.labels.join(", "));
|
||||
// 6: Project
|
||||
cells.push(self.project_path.clone());
|
||||
|
||||
cells.truncate(col_count);
|
||||
cells
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Column definitions
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Column definitions for the MR list table.
|
||||
const MR_COLUMNS: &[ColumnDef] = &[
|
||||
ColumnDef {
|
||||
name: "IID",
|
||||
min_width: 6,
|
||||
flex_weight: 0,
|
||||
priority: 0,
|
||||
align: Align::Right,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "Title",
|
||||
min_width: 15,
|
||||
flex_weight: 4,
|
||||
priority: 0,
|
||||
align: Align::Left,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "State",
|
||||
min_width: 7,
|
||||
flex_weight: 0,
|
||||
priority: 0,
|
||||
align: Align::Left,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "Author",
|
||||
min_width: 8,
|
||||
flex_weight: 1,
|
||||
priority: 1,
|
||||
align: Align::Left,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "Target",
|
||||
min_width: 8,
|
||||
flex_weight: 1,
|
||||
priority: 1,
|
||||
align: Align::Left,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "Labels",
|
||||
min_width: 10,
|
||||
flex_weight: 2,
|
||||
priority: 2,
|
||||
align: Align::Left,
|
||||
},
|
||||
ColumnDef {
|
||||
name: "Project",
|
||||
min_width: 12,
|
||||
flex_weight: 1,
|
||||
priority: 3,
|
||||
align: Align::Left,
|
||||
},
|
||||
];
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Colors
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
fn table_colors() -> TableColors {
|
||||
TableColors {
|
||||
header_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
header_bg: PackedRgba::rgb(0x34, 0x34, 0x31),
|
||||
row_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
row_alt_bg: PackedRgba::rgb(0x1C, 0x1B, 0x1A),
|
||||
selected_fg: PackedRgba::rgb(0x10, 0x0F, 0x0F),
|
||||
selected_bg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
sort_indicator: PackedRgba::rgb(0x87, 0x96, 0x6B),
|
||||
border: PackedRgba::rgb(0x40, 0x40, 0x3C),
|
||||
}
|
||||
}
|
||||
|
||||
fn filter_colors() -> FilterBarColors {
|
||||
FilterBarColors {
|
||||
input_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
input_bg: PackedRgba::rgb(0x28, 0x28, 0x24),
|
||||
cursor_fg: PackedRgba::rgb(0x00, 0x00, 0x00),
|
||||
cursor_bg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
chip_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3),
|
||||
chip_bg: PackedRgba::rgb(0x40, 0x40, 0x3C),
|
||||
error_fg: PackedRgba::rgb(0xAF, 0x3A, 0x29),
|
||||
label_fg: PackedRgba::rgb(0x87, 0x87, 0x80),
|
||||
}
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Render
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Render the full MR list screen.
|
||||
///
|
||||
/// Layout:
|
||||
/// ```text
|
||||
/// Row 0: [Filter bar: / filter input_________]
|
||||
/// Row 1: [chip1] [chip2] (if filter active)
|
||||
/// Row 2: -----------------------------------------
|
||||
/// Row 3..N: IID Title State Author ...
|
||||
/// -----------------------------------------
|
||||
/// !42 Fix pipeline opened alice ...
|
||||
/// !41 Add CI config merged bob ...
|
||||
/// Bottom: Showing 42 of 128 merge requests
|
||||
/// ```
|
||||
pub fn render_mr_list(frame: &mut Frame<'_>, state: &MrListState, area: Rect) {
|
||||
if area.height < 3 || area.width < 10 {
|
||||
return;
|
||||
}
|
||||
|
||||
let mut y = area.y;
|
||||
let max_x = area.x.saturating_add(area.width);
|
||||
|
||||
// -- Filter bar ---------------------------------------------------------
|
||||
let filter_area = Rect::new(area.x, y, area.width, 2.min(area.height));
|
||||
let fb_state = FilterBarState {
|
||||
input: state.filter_input.clone(),
|
||||
cursor: state.filter_input.len(),
|
||||
focused: state.filter_focused,
|
||||
tokens: crate::filter_dsl::parse_filter_tokens(&state.filter_input),
|
||||
unknown_fields: Vec::new(),
|
||||
};
|
||||
let filter_rows = render_filter_bar(frame, &fb_state, filter_area, &filter_colors());
|
||||
y = y.saturating_add(filter_rows);
|
||||
|
||||
// -- Status line (total count) ------------------------------------------
|
||||
let remaining_height = area.height.saturating_sub(y - area.y);
|
||||
if remaining_height < 2 {
|
||||
return;
|
||||
}
|
||||
|
||||
// Reserve bottom row for status.
|
||||
let table_height = remaining_height.saturating_sub(1);
|
||||
let status_y = y.saturating_add(table_height);
|
||||
|
||||
// -- Entity table -------------------------------------------------------
|
||||
let sort_col = match state.sort_field {
|
||||
MrSortField::UpdatedAt | MrSortField::Iid => 0,
|
||||
MrSortField::Title => 1,
|
||||
MrSortField::State => 2,
|
||||
MrSortField::Author => 3,
|
||||
MrSortField::TargetBranch => 4,
|
||||
};
|
||||
|
||||
let mut table_state = EntityTableState {
|
||||
selected: state.selected_index,
|
||||
scroll_offset: state.scroll_offset,
|
||||
sort_column: sort_col,
|
||||
sort_ascending: matches!(state.sort_order, MrSortOrder::Asc),
|
||||
};
|
||||
|
||||
let table_area = Rect::new(area.x, y, area.width, table_height);
|
||||
render_entity_table(
|
||||
frame,
|
||||
&state.rows,
|
||||
MR_COLUMNS,
|
||||
&mut table_state,
|
||||
table_area,
|
||||
&table_colors(),
|
||||
);
|
||||
|
||||
// -- Bottom status ------------------------------------------------------
|
||||
if status_y < area.y.saturating_add(area.height) {
|
||||
render_status_line(frame, state, area.x, status_y, max_x);
|
||||
}
|
||||
}
|
||||
|
||||
/// Render the bottom status line showing row count and pagination info.
|
||||
fn render_status_line(frame: &mut Frame<'_>, state: &MrListState, x: u16, y: u16, max_x: u16) {
|
||||
let muted = Cell {
|
||||
fg: PackedRgba::rgb(0x87, 0x87, 0x80),
|
||||
..Cell::default()
|
||||
};
|
||||
|
||||
let status = if state.rows.is_empty() {
|
||||
"No merge requests found".to_string()
|
||||
} else {
|
||||
let showing = state.rows.len();
|
||||
let total = state.total_count;
|
||||
if state.next_cursor.is_some() {
|
||||
format!("Showing {showing} of {total} merge requests (more available)")
|
||||
} else {
|
||||
format!("Showing {showing} of {total} merge requests")
|
||||
}
|
||||
};
|
||||
|
||||
frame.print_text_clipped(x, y, &status, muted, max_x);
|
||||
}
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Tests
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use ftui::render::grapheme_pool::GraphemePool;
|
||||
|
||||
macro_rules! with_frame {
|
||||
($width:expr, $height:expr, |$frame:ident| $body:block) => {{
|
||||
let mut pool = GraphemePool::new();
|
||||
let mut $frame = Frame::new($width, $height, &mut pool);
|
||||
$body
|
||||
}};
|
||||
}
|
||||
|
||||
fn sample_state(row_count: usize) -> MrListState {
|
||||
let rows: Vec<MrListRow> = (0..row_count)
|
||||
.map(|i| MrListRow {
|
||||
project_path: "group/project".into(),
|
||||
iid: (i + 1) as i64,
|
||||
title: format!("MR {}", i + 1),
|
||||
state: if i % 2 == 0 { "opened" } else { "merged" }.into(),
|
||||
author: "taylor".into(),
|
||||
target_branch: "main".into(),
|
||||
labels: if i == 0 {
|
||||
vec!["backend".into(), "urgent".into()]
|
||||
} else {
|
||||
vec![]
|
||||
},
|
||||
updated_at: 1_700_000_000_000 - (i as i64 * 60_000),
|
||||
draft: i % 3 == 0,
|
||||
})
|
||||
.collect();
|
||||
|
||||
MrListState {
|
||||
total_count: row_count as u64,
|
||||
rows,
|
||||
..Default::default()
|
||||
}
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_mr_list_no_panic() {
|
||||
with_frame!(120, 30, |frame| {
|
||||
let state = sample_state(10);
|
||||
render_mr_list(&mut frame, &state, Rect::new(0, 0, 120, 30));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_mr_list_empty_no_panic() {
|
||||
with_frame!(80, 20, |frame| {
|
||||
let state = MrListState::default();
|
||||
render_mr_list(&mut frame, &state, Rect::new(0, 0, 80, 20));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_mr_list_tiny_noop() {
|
||||
with_frame!(5, 2, |frame| {
|
||||
let state = sample_state(5);
|
||||
render_mr_list(&mut frame, &state, Rect::new(0, 0, 5, 2));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_mr_list_narrow_no_panic() {
|
||||
with_frame!(40, 15, |frame| {
|
||||
let state = sample_state(5);
|
||||
render_mr_list(&mut frame, &state, Rect::new(0, 0, 40, 15));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_render_mr_list_with_filter_no_panic() {
|
||||
with_frame!(100, 25, |frame| {
|
||||
let mut state = sample_state(5);
|
||||
state.filter_input = "state:opened".into();
|
||||
state.filter_focused = true;
|
||||
render_mr_list(&mut frame, &state, Rect::new(0, 0, 100, 25));
|
||||
});
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mr_list_row_cells() {
|
||||
let row = MrListRow {
|
||||
project_path: "group/proj".into(),
|
||||
iid: 42,
|
||||
title: "Fix pipeline".into(),
|
||||
state: "opened".into(),
|
||||
author: "alice".into(),
|
||||
target_branch: "main".into(),
|
||||
labels: vec!["backend".into(), "urgent".into()],
|
||||
updated_at: 1_700_000_000_000,
|
||||
draft: false,
|
||||
};
|
||||
|
||||
let cells = row.cells(7);
|
||||
assert_eq!(cells[0], "!42");
|
||||
assert_eq!(cells[1], "Fix pipeline");
|
||||
assert_eq!(cells[2], "opened");
|
||||
assert_eq!(cells[3], "alice");
|
||||
assert_eq!(cells[4], "main");
|
||||
assert_eq!(cells[5], "backend, urgent");
|
||||
assert_eq!(cells[6], "group/proj");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mr_list_row_cells_draft() {
|
||||
let row = MrListRow {
|
||||
project_path: "g/p".into(),
|
||||
iid: 7,
|
||||
title: "WIP MR".into(),
|
||||
state: "opened".into(),
|
||||
author: "bob".into(),
|
||||
target_branch: "develop".into(),
|
||||
labels: vec![],
|
||||
updated_at: 0,
|
||||
draft: true,
|
||||
};
|
||||
|
||||
let cells = row.cells(7);
|
||||
assert_eq!(cells[0], "!7 [WIP]");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_mr_list_row_cells_truncated() {
|
||||
let row = MrListRow {
|
||||
project_path: "g/p".into(),
|
||||
iid: 1,
|
||||
title: "t".into(),
|
||||
state: "opened".into(),
|
||||
author: "a".into(),
|
||||
target_branch: "main".into(),
|
||||
labels: vec![],
|
||||
updated_at: 0,
|
||||
draft: false,
|
||||
};
|
||||
|
||||
let cells = row.cells(3);
|
||||
assert_eq!(cells.len(), 3);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn test_column_count() {
|
||||
assert_eq!(MR_COLUMNS.len(), 7);
|
||||
}
|
||||
}
|
||||
202
docs/plan-expose-discussion-ids.feedback-1.md
Normal file
202
docs/plan-expose-discussion-ids.feedback-1.md
Normal file
@@ -0,0 +1,202 @@
|
||||
No `## Rejected Recommendations` section appears in the plan you pasted, so the revisions below are all net-new.
|
||||
|
||||
1. **Add an explicit “Bridge Contract” and fix scope inconsistency**
|
||||
Analysis: The plan says “Three changes” but defines four. More importantly, identifier requirements are scattered. A single contract section prevents drift and makes every new read surface prove it can drive a write call.
|
||||
|
||||
```diff
|
||||
@@
|
||||
-**Scope**: Three changes, delivered in order:
|
||||
+**Scope**: Four workstreams, delivered in order:
|
||||
1. Add `gitlab_discussion_id` to notes output
|
||||
2. Add `gitlab_discussion_id` to show command discussion groups
|
||||
3. Add a standalone `discussions` list command
|
||||
4. Fix robot-docs to list actual field names instead of opaque type references
|
||||
+
|
||||
+## Bridge Contract (Cross-Cutting)
|
||||
+Every read payload that surfaces notes/discussions MUST include:
|
||||
+- `project_path`
|
||||
+- `noteable_type`
|
||||
+- `parent_iid`
|
||||
+- `gitlab_discussion_id`
|
||||
+- `gitlab_note_id` (when note-level data is returned)
|
||||
+This contract is required so agents can deterministically construct `glab api` write calls.
|
||||
```
|
||||
|
||||
2. **Normalize identifier naming now (break ambiguous names)**
|
||||
Analysis: Current `id`/`gitlab_id` naming is ambiguous in mixed payloads. Rename to explicit `note_id` and `gitlab_note_id` now (you explicitly don’t care about backward compatibility). This reduces automation mistakes.
|
||||
|
||||
```diff
|
||||
@@ 1b. Add field to `NoteListRow`
|
||||
-pub struct NoteListRow {
|
||||
- pub id: i64,
|
||||
- pub gitlab_id: i64,
|
||||
+pub struct NoteListRow {
|
||||
+ pub note_id: i64, // local DB id
|
||||
+ pub gitlab_note_id: i64, // GitLab note id
|
||||
@@
|
||||
@@ 1c. Add field to `NoteListRowJson`
|
||||
-pub struct NoteListRowJson {
|
||||
- pub id: i64,
|
||||
- pub gitlab_id: i64,
|
||||
+pub struct NoteListRowJson {
|
||||
+ pub note_id: i64,
|
||||
+ pub gitlab_note_id: i64,
|
||||
@@
|
||||
-#### 2f. Add `gitlab_note_id` to note detail structs in show
|
||||
-While we're here, add `gitlab_id` to `NoteDetail`, `MrNoteDetail`, and their JSON
|
||||
+#### 2f. Add `gitlab_note_id` to note detail structs in show
|
||||
+While we're here, add `gitlab_note_id` to `NoteDetail`, `MrNoteDetail`, and their JSON
|
||||
counterparts.
|
||||
```
|
||||
|
||||
3. **Stop positional column indexing for these changes**
|
||||
Analysis: In `list.rs`, row extraction is positional (`row.get(18)`, etc.). Adding fields is fragile and easy to break silently. Use named aliases and named lookup for robustness.
|
||||
|
||||
```diff
|
||||
@@ 1a/1b SQL + query_map
|
||||
- p.path_with_namespace AS project_path
|
||||
+ p.path_with_namespace AS project_path,
|
||||
+ d.gitlab_discussion_id AS gitlab_discussion_id
|
||||
@@
|
||||
- project_path: row.get(18)?,
|
||||
- gitlab_discussion_id: row.get(19)?,
|
||||
+ project_path: row.get("project_path")?,
|
||||
+ gitlab_discussion_id: row.get("gitlab_discussion_id")?,
|
||||
```
|
||||
|
||||
4. **Redesign `discussions` query to avoid correlated subquery fanout**
|
||||
Analysis: Proposed query uses many correlated subqueries per row. That’s acceptable for tiny MR-scoped sets, but degrades for project-wide scans. Use a base CTE + one rollup pass over notes.
|
||||
|
||||
```diff
|
||||
@@ 3c. SQL Query
|
||||
-SELECT
|
||||
- d.id,
|
||||
- ...
|
||||
- (SELECT COUNT(*) FROM notes n2 WHERE n2.discussion_id = d.id AND n2.is_system = 0) AS note_count,
|
||||
- (SELECT n3.author_username FROM notes n3 WHERE n3.discussion_id = d.id ORDER BY n3.position LIMIT 1) AS first_author,
|
||||
- ...
|
||||
-FROM discussions d
|
||||
+WITH base AS (
|
||||
+ SELECT d.id, d.gitlab_discussion_id, d.noteable_type, d.project_id, d.issue_id, d.merge_request_id,
|
||||
+ d.individual_note, d.first_note_at, d.last_note_at, d.resolvable, d.resolved
|
||||
+ FROM discussions d
|
||||
+ {where_sql}
|
||||
+),
|
||||
+note_rollup AS (
|
||||
+ SELECT n.discussion_id,
|
||||
+ COUNT(*) FILTER (WHERE n.is_system = 0) AS user_note_count,
|
||||
+ COUNT(*) AS total_note_count,
|
||||
+ MIN(CASE WHEN n.is_system = 0 THEN n.position END) AS first_user_pos
|
||||
+ FROM notes n
|
||||
+ JOIN base b ON b.id = n.discussion_id
|
||||
+ GROUP BY n.discussion_id
|
||||
+)
|
||||
+SELECT ...
|
||||
+FROM base b
|
||||
+LEFT JOIN note_rollup r ON r.discussion_id = b.id
|
||||
```
|
||||
|
||||
5. **Add explicit index work for new access patterns**
|
||||
Analysis: Existing indexes are good but not ideal for new list patterns (`project + last_note`, note position ordering inside discussion). Add migration entries to keep latency stable.
|
||||
|
||||
```diff
|
||||
@@ ## 3. Add Standalone `discussions` List Command
|
||||
+#### 3h. Add migration for discussion-list performance
|
||||
+**File**: `migrations/027_discussions_list_indexes.sql`
|
||||
+```sql
|
||||
+CREATE INDEX IF NOT EXISTS idx_discussions_project_last_note
|
||||
+ ON discussions(project_id, last_note_at DESC, id DESC);
|
||||
+CREATE INDEX IF NOT EXISTS idx_discussions_project_first_note
|
||||
+ ON discussions(project_id, first_note_at DESC, id DESC);
|
||||
+CREATE INDEX IF NOT EXISTS idx_notes_discussion_position
|
||||
+ ON notes(discussion_id, position);
|
||||
+```
|
||||
```
|
||||
|
||||
6. **Add keyset pagination (critical for agent workflows)**
|
||||
Analysis: `--limit` alone is not enough for automation over large datasets. Add cursor-based pagination with deterministic sort keys and `next_cursor` in JSON.
|
||||
|
||||
```diff
|
||||
@@ 3a. CLI Args
|
||||
+ /// Keyset cursor from previous response
|
||||
+ #[arg(long, help_heading = "Output")]
|
||||
+ pub cursor: Option<String>,
|
||||
@@
|
||||
@@ Response Schema
|
||||
- "total_count": 15,
|
||||
- "showing": 15
|
||||
+ "total_count": 15,
|
||||
+ "showing": 15,
|
||||
+ "next_cursor": "eyJsYXN0X25vdGVfYXQiOjE3MDAwMDAwMDAwMDAsImlkIjoxMjN9"
|
||||
@@
|
||||
@@ Validation Criteria
|
||||
+7. `lore -J discussions ... --cursor <token>` returns the next stable page without duplicates/skips
|
||||
```
|
||||
|
||||
7. **Fix semantic ambiguities in discussion summary fields**
|
||||
Analysis: `note_count` is ambiguous, and `first_author` can accidentally be a system note author. Make fields explicit and consistent with non-system default behavior.
|
||||
|
||||
```diff
|
||||
@@ Response Schema
|
||||
- "note_count": 3,
|
||||
- "first_author": "elovegrove",
|
||||
+ "user_note_count": 3,
|
||||
+ "total_note_count": 4,
|
||||
+ "first_user_author": "elovegrove",
|
||||
@@
|
||||
@@ 3d. Filters struct / path behavior
|
||||
-- `path` → `EXISTS (SELECT 1 FROM notes n WHERE n.discussion_id = d.id AND n.position_new_path LIKE ?)`
|
||||
+- `path` → match on BOTH `position_new_path` and `position_old_path` (exact/prefix)
|
||||
```
|
||||
|
||||
8. **Enrich show outputs with actionable thread metadata**
|
||||
Analysis: Adding only discussion id helps, but agents still need thread state and note ids to pick targets correctly. Add `resolvable`, `resolved`, `last_note_at_iso`, and `gitlab_note_id` in show discussion payloads.
|
||||
|
||||
```diff
|
||||
@@ 2a/2b show discussion structs
|
||||
pub struct DiscussionDetailJson {
|
||||
pub gitlab_discussion_id: String,
|
||||
+ pub resolvable: bool,
|
||||
+ pub resolved: bool,
|
||||
+ pub last_note_at_iso: String,
|
||||
pub notes: Vec<NoteDetailJson>,
|
||||
@@
|
||||
pub struct NoteDetailJson {
|
||||
+ pub gitlab_note_id: i64,
|
||||
pub author_username: String,
|
||||
```
|
||||
|
||||
9. **Harden robot-docs against schema drift with tests**
|
||||
Analysis: Static JSON in `main.rs` will drift again. Add a lightweight contract test that asserts docs include required fields for `notes`, `discussions`, and show payloads.
|
||||
|
||||
```diff
|
||||
@@ 4. Fix Robot-Docs Response Schemas
|
||||
+#### 4f. Add robot-docs contract tests
|
||||
+**File**: `src/main.rs` (or dedicated test module)
|
||||
+- Assert `robot-docs` contains `gitlab_discussion_id` and `gitlab_note_id` in:
|
||||
+ - `notes.response_schema`
|
||||
+ - `issues.response_schema.show`
|
||||
+ - `mrs.response_schema.show`
|
||||
+ - `discussions.response_schema`
|
||||
```
|
||||
|
||||
10. **Adjust delivery order to reduce rework and include missing CSV path**
|
||||
Analysis: In your sample `handle_discussions`, `csv` is declared in args but not handled. Also, robot-docs should land after all payload changes. Sequence should minimize churn.
|
||||
|
||||
```diff
|
||||
@@ Delivery Order
|
||||
-3. **Change 4** (robot-docs) — depends on 1 and 2 being done so schemas are accurate.
|
||||
-4. **Change 3** (discussions command) — largest change, depends on 1 for design consistency.
|
||||
+3. **Change 3** (discussions command + indexes + pagination) — largest change.
|
||||
+4. **Change 4** (robot-docs + contract tests) — last, after payloads are final.
|
||||
@@ 3e. Handler wiring
|
||||
- match format {
|
||||
+ match format {
|
||||
"json" => ...
|
||||
"jsonl" => ...
|
||||
+ "csv" => print_list_discussions_csv(&result),
|
||||
_ => ...
|
||||
}
|
||||
```
|
||||
|
||||
If you want, I can produce a single consolidated revised plan markdown with these edits applied so you can drop it in directly.
|
||||
162
docs/plan-expose-discussion-ids.feedback-2.md
Normal file
162
docs/plan-expose-discussion-ids.feedback-2.md
Normal file
@@ -0,0 +1,162 @@
|
||||
Best non-rejected upgrades I’d make to this plan are below. They focus on reducing schema drift, making robot output safer to consume, and improving performance behavior at scale.
|
||||
|
||||
1. Add a shared contract model and field constants first (before workstreams 1-4)
|
||||
Rationale: Right now each command has its own structs and ad-hoc mapping. That is exactly how drift happens. A single contract definition reused by `notes`, `show`, `discussions`, and robot-docs gives compile-time coupling between output payloads and docs. It also makes future fields cheaper and safer to add.
|
||||
|
||||
```diff
|
||||
@@ Scope: Four workstreams, delivered in order:
|
||||
-1. Add `gitlab_discussion_id` to notes output
|
||||
-2. Add `gitlab_discussion_id` to show command discussion groups
|
||||
-3. Add a standalone `discussions` list command
|
||||
-4. Fix robot-docs to list actual field names instead of opaque type references
|
||||
+0. Introduce shared Bridge Contract model/constants used by notes/show/discussions/robot-docs
|
||||
+1. Add `gitlab_discussion_id` to notes output
|
||||
+2. Add `gitlab_discussion_id` to show command discussion groups
|
||||
+3. Add a standalone `discussions` list command
|
||||
+4. Fix robot-docs to list actual field names instead of opaque type references
|
||||
|
||||
+## 0. Shared Contract Model (Cross-Cutting)
|
||||
+Define canonical required-field constants and shared mapping helpers, then consume them in:
|
||||
+- `src/cli/commands/list.rs`
|
||||
+- `src/cli/commands/show.rs`
|
||||
+- `src/cli/robot.rs`
|
||||
+- `src/main.rs` robot-docs builder
|
||||
+This removes duplicated field-name strings and prevents docs/output mismatch.
|
||||
```
|
||||
|
||||
2. Make bridge fields “non-droppable” in robot mode
|
||||
Rationale: The current plan adds fields, but `--fields` can still remove them. That breaks the core read/write bridge contract in exactly the workflows this change is trying to fix. In robot mode, contract fields should always be force-included.
|
||||
|
||||
```diff
|
||||
@@ ## Bridge Contract (Cross-Cutting)
|
||||
Every read payload that surfaces notes or discussions **MUST** include:
|
||||
- `project_path`
|
||||
- `noteable_type`
|
||||
- `parent_iid`
|
||||
- `gitlab_discussion_id`
|
||||
- `gitlab_note_id` (when note-level data is returned — i.e., in notes list and show detail)
|
||||
|
||||
+### Field Filtering Guardrail
|
||||
+In robot mode, `filter_fields` must force-include Bridge Contract fields even when users pass a narrower `--fields` list.
|
||||
+Human/table mode keeps existing behavior.
|
||||
```
|
||||
|
||||
3. Replace correlated subqueries in `discussions` rollup with a single-pass window/aggregate pattern
|
||||
Rationale: Your CTE is better than naive fanout, but it still uses multiple correlated sub-selects per discussion for first author/body/path. At 200K+ discussions this can regress badly depending on cache/index state. A window-ranked `notes` CTE with grouped aggregates is usually faster and more predictable in SQLite.
|
||||
|
||||
```diff
|
||||
@@ #### 3c. SQL Query
|
||||
-Core query uses a CTE + rollup to avoid correlated subquery fanout on larger result sets:
|
||||
+Core query uses a CTE + ranked-notes rollup (window function) to avoid per-row correlated subqueries:
|
||||
|
||||
-WITH filtered_discussions AS (...),
|
||||
-note_rollup AS (
|
||||
- SELECT
|
||||
- n.discussion_id,
|
||||
- SUM(...) AS note_count,
|
||||
- (SELECT ... LIMIT 1) AS first_author,
|
||||
- (SELECT ... LIMIT 1) AS first_note_body,
|
||||
- (SELECT ... LIMIT 1) AS position_new_path,
|
||||
- (SELECT ... LIMIT 1) AS position_new_line
|
||||
- FROM notes n
|
||||
- ...
|
||||
-)
|
||||
+WITH filtered_discussions AS (...),
|
||||
+ranked_notes AS (
|
||||
+ SELECT
|
||||
+ n.*,
|
||||
+ ROW_NUMBER() OVER (PARTITION BY n.discussion_id ORDER BY n.position, n.id) AS rn
|
||||
+ FROM notes n
|
||||
+ WHERE n.discussion_id IN (SELECT id FROM filtered_discussions)
|
||||
+),
|
||||
+note_rollup AS (
|
||||
+ SELECT
|
||||
+ discussion_id,
|
||||
+ SUM(CASE WHEN is_system = 0 THEN 1 ELSE 0 END) AS note_count,
|
||||
+ MAX(CASE WHEN rn = 1 AND is_system = 0 THEN author_username END) AS first_author,
|
||||
+ MAX(CASE WHEN rn = 1 AND is_system = 0 THEN body END) AS first_note_body,
|
||||
+ MAX(CASE WHEN position_new_path IS NOT NULL THEN position_new_path END) AS position_new_path,
|
||||
+ MAX(CASE WHEN position_new_line IS NOT NULL THEN position_new_line END) AS position_new_line
|
||||
+ FROM ranked_notes
|
||||
+ GROUP BY discussion_id
|
||||
+)
|
||||
```
|
||||
|
||||
4. Add direct GitLab ID filters for deterministic bridging
|
||||
Rationale: Bridge workflows often start from one known ID. You already have `gitlab_note_id` in notes filters, but discussion filtering still looks internal-ID-centric. Add explicit GitLab-ID filters so agents do not need extra translation calls.
|
||||
|
||||
```diff
|
||||
@@ #### 3a. CLI Args
|
||||
pub struct DiscussionsArgs {
|
||||
+ /// Filter by GitLab discussion ID
|
||||
+ #[arg(long, help_heading = "Filters")]
|
||||
+ pub gitlab_discussion_id: Option<String>,
|
||||
@@
|
||||
|
||||
@@ #### 3d. Filters struct
|
||||
pub struct DiscussionListFilters {
|
||||
+ pub gitlab_discussion_id: Option<String>,
|
||||
@@
|
||||
}
|
||||
```
|
||||
|
||||
```diff
|
||||
@@ ## 1. Add `gitlab_discussion_id` to Notes Output
|
||||
+#### 1g. Add `--gitlab-discussion-id` filter to notes
|
||||
+Allow filtering notes directly by GitLab thread ID (not only internal discussion ID).
|
||||
+This enables one-hop note retrieval from external references.
|
||||
```
|
||||
|
||||
5. Add optional note expansion to `discussions` for fewer round-trips
|
||||
Rationale: Today the agent flow is often `discussions -> show`. Optional embedded notes (`--include-notes N`) gives a fast path for “list unresolved threads with latest context” without forcing full show payloads.
|
||||
|
||||
```diff
|
||||
@@ ### Design
|
||||
lore -J discussions --for-mr 99 --resolution unresolved
|
||||
+lore -J discussions --for-mr 99 --resolution unresolved --include-notes 2
|
||||
|
||||
@@ #### 3a. CLI Args
|
||||
+ /// Include up to N latest notes per discussion (0 = none)
|
||||
+ #[arg(long, default_value = "0", help_heading = "Output")]
|
||||
+ pub include_notes: usize,
|
||||
```
|
||||
|
||||
6. Upgrade robot-docs from string blobs to structured schema + explicit contract block
|
||||
Rationale: `contains("gitlab_discussion_id")` tests on schema strings are brittle. A structured schema object gives machine-checked docs and reliable test assertions. Add a contract section for agent consumers.
|
||||
|
||||
```diff
|
||||
@@ ## 4. Fix Robot-Docs Response Schemas
|
||||
-#### 4a. Notes response_schema
|
||||
-Replace stringly-typed schema snippets...
|
||||
+#### 4a. Notes response_schema (structured)
|
||||
+Represent response fields as JSON objects (field -> type/nullable), not freeform strings.
|
||||
|
||||
+#### 4g. Add `bridge_contract` section in robot-docs
|
||||
+Publish canonical required fields per entity:
|
||||
+- notes
|
||||
+- discussions
|
||||
+- show.discussions
|
||||
+- show.notes
|
||||
```
|
||||
|
||||
7. Strengthen validation: add CLI-level contract tests and perf guardrails
|
||||
Rationale: Most current tests are unit-level struct/query checks. Add end-to-end JSON contract tests via command handlers, plus a benchmark-style regression test (ignored by default) so performance work stays intentional.
|
||||
|
||||
```diff
|
||||
@@ ## Validation Criteria
|
||||
8. Bridge Contract fields (...) are present in every applicable read payload
|
||||
+9. Contract fields remain present even with `--fields` in robot mode
|
||||
+10. `discussions` query meets performance guardrail on representative fixture (documented threshold)
|
||||
|
||||
@@ ### Tests
|
||||
+#### Test: robot-mode fields cannot drop bridge contract keys
|
||||
+Run notes/discussions JSON output through `filter_fields` path and assert required keys remain.
|
||||
+
|
||||
+#### Test: CLI contract integration
|
||||
+Invoke command handlers for `notes`, `discussions`, `mrs <iid>`, parse JSON, assert required keys and types.
|
||||
+
|
||||
+#### Test (ignored): large-fixture performance regression
|
||||
+Generate representative fixture and assert `query_discussions` stays under target elapsed time.
|
||||
```
|
||||
|
||||
If you want, I can now produce a full “v2 plan” document that applies these diffs end-to-end (including revised delivery order and complete updated sections).
|
||||
147
docs/plan-expose-discussion-ids.feedback-3.md
Normal file
147
docs/plan-expose-discussion-ids.feedback-3.md
Normal file
@@ -0,0 +1,147 @@
|
||||
1. **Make `gitlab_note_id` explicit in all note-level payloads without breaking existing consumers**
|
||||
Rationale: Your Bridge Contract already requires `gitlab_note_id`, but current plan keeps `gitlab_id` only in `notes` list while adding `gitlab_note_id` only in `show`. That forces agents to special-case commands. Add `gitlab_note_id` as an alias field everywhere note-level data appears, while keeping `gitlab_id` for compatibility.
|
||||
|
||||
```diff
|
||||
@@ Bridge Contract (Cross-Cutting)
|
||||
-Every read payload that surfaces notes or discussions MUST include:
|
||||
+Every read payload that surfaces notes or discussions MUST include:
|
||||
- project_path
|
||||
- noteable_type
|
||||
- parent_iid
|
||||
- gitlab_discussion_id
|
||||
- gitlab_note_id (when note-level data is returned — i.e., in notes list and show detail)
|
||||
+ - Back-compat rule: note payloads may continue exposing `gitlab_id`, but MUST also expose `gitlab_note_id` with the same value.
|
||||
|
||||
@@ 1. Add `gitlab_discussion_id` to Notes Output
|
||||
-#### 1c. Add field to `NoteListRowJson`
|
||||
+#### 1c. Add fields to `NoteListRowJson`
|
||||
+Add `gitlab_note_id` alias in addition to existing `gitlab_id` (no rename, no breakage).
|
||||
|
||||
@@ 1f. Update `--fields minimal` preset
|
||||
-"notes" => ["id", "author_username", "body", "created_at_iso", "gitlab_discussion_id"]
|
||||
+"notes" => ["id", "gitlab_note_id", "author_username", "body", "created_at_iso", "gitlab_discussion_id"]
|
||||
```
|
||||
|
||||
2. **Avoid duplicate flag semantics for discussion filtering**
|
||||
Rationale: `notes` already has `--discussion-id` and it already maps to `d.gitlab_discussion_id`. Adding a second independent flag/field (`--gitlab-discussion-id`) increases complexity and precedence bugs. Keep one backing filter field and make the new flag an alias.
|
||||
|
||||
```diff
|
||||
@@ 1g. Add `--gitlab-discussion-id` filter to notes
|
||||
-Allow filtering notes directly by GitLab discussion thread ID...
|
||||
+Normalize discussion ID flags:
|
||||
+- Keep one backing filter field (`discussion_id`)
|
||||
+- Support both `--discussion-id` (existing) and `--gitlab-discussion-id` (alias)
|
||||
+- If both are provided, clap should reject as duplicate/alias conflict
|
||||
```
|
||||
|
||||
3. **Add ambiguity guardrails for cross-project discussion IDs**
|
||||
Rationale: `gitlab_discussion_id` is unique per project, not globally. Filtering by discussion ID without project can return multiple rows across repos, which breaks deterministic write bridging. Fail fast with an `Ambiguous` error and actionable fix (`--project`).
|
||||
|
||||
```diff
|
||||
@@ Bridge Contract (Cross-Cutting)
|
||||
+### Ambiguity Guardrail
|
||||
+When filtering by `gitlab_discussion_id` without `--project`, if multiple projects match:
|
||||
+- return `Ambiguous` error
|
||||
+- include matching project paths in message
|
||||
+- suggest retry with `--project <path>`
|
||||
```
|
||||
|
||||
4. **Replace `--include-notes` N+1 retrieval with one batched top-N query**
|
||||
Rationale: The current plan’s per-discussion follow-up query scales poorly and creates latency spikes. Use a single window-function query over selected discussion IDs and group rows in Rust. This is both faster and more predictable.
|
||||
|
||||
```diff
|
||||
@@ 3c-ii. Note expansion query (--include-notes)
|
||||
-When `include_notes > 0`, after the main discussion query, run a follow-up query per discussion...
|
||||
+When `include_notes > 0`, run one batched query:
|
||||
+WITH ranked_notes AS (
|
||||
+ SELECT
|
||||
+ n.*,
|
||||
+ d.gitlab_discussion_id,
|
||||
+ ROW_NUMBER() OVER (
|
||||
+ PARTITION BY n.discussion_id
|
||||
+ ORDER BY n.created_at DESC, n.id DESC
|
||||
+ ) AS rn
|
||||
+ FROM notes n
|
||||
+ JOIN discussions d ON d.id = n.discussion_id
|
||||
+ WHERE n.discussion_id IN ( ...selected discussion ids... )
|
||||
+)
|
||||
+SELECT ... FROM ranked_notes WHERE rn <= ?
|
||||
+ORDER BY discussion_id, rn;
|
||||
+
|
||||
+Group by `discussion_id` in Rust and attach notes arrays without per-thread round-trips.
|
||||
```
|
||||
|
||||
5. **Add hard output guardrails and explicit truncation metadata**
|
||||
Rationale: `--limit` and `--include-notes` are unbounded today. For robot workflows this can accidentally generate huge payloads. Cap values and surface effective limits plus truncation state in `meta`.
|
||||
|
||||
```diff
|
||||
@@ 3a. CLI Args
|
||||
- pub limit: usize,
|
||||
+ pub limit: usize, // clamp to max (e.g., 500)
|
||||
|
||||
- pub include_notes: usize,
|
||||
+ pub include_notes: usize, // clamp to max (e.g., 20)
|
||||
|
||||
@@ Response Schema
|
||||
- "meta": { "elapsed_ms": 12 }
|
||||
+ "meta": {
|
||||
+ "elapsed_ms": 12,
|
||||
+ "effective_limit": 50,
|
||||
+ "effective_include_notes": 2,
|
||||
+ "has_more": true
|
||||
+ }
|
||||
```
|
||||
|
||||
6. **Strengthen deterministic ordering and null handling**
|
||||
Rationale: `first_note_at`, `last_note_at`, and note `position` can be null/incomplete during partial sync states. Add null-safe ordering to avoid unstable output and flaky automation.
|
||||
|
||||
```diff
|
||||
@@ 2c. Update queries to SELECT new fields
|
||||
-... ORDER BY first_note_at
|
||||
+... ORDER BY COALESCE(first_note_at, last_note_at, 0), id
|
||||
|
||||
@@ show note query
|
||||
-ORDER BY position
|
||||
+ORDER BY COALESCE(position, 9223372036854775807), created_at, id
|
||||
|
||||
@@ 3c. SQL Query
|
||||
-ORDER BY {sort_column} {order}
|
||||
+ORDER BY COALESCE({sort_column}, 0) {order}, fd.id {order}
|
||||
```
|
||||
|
||||
7. **Make write-bridging more useful with optional command hints**
|
||||
Rationale: Exposing IDs is necessary but not sufficient; agents still need to assemble endpoints repeatedly. Add optional `--with-write-hints` that injects compact endpoint templates (`reply`, `resolve`) derived from row context. This improves usability without bloating default output.
|
||||
|
||||
```diff
|
||||
@@ 3a. CLI Args
|
||||
+ /// Include machine-actionable glab write hints per row
|
||||
+ #[arg(long, help_heading = "Output")]
|
||||
+ pub with_write_hints: bool,
|
||||
|
||||
@@ Response Schema (notes/discussions/show)
|
||||
+ "write_hints?": {
|
||||
+ "reply_endpoint": "string",
|
||||
+ "resolve_endpoint?": "string"
|
||||
+ }
|
||||
```
|
||||
|
||||
8. **Upgrade robot-docs/contract validation from string-contains to parity checks**
|
||||
Rationale: `contains("gitlab_discussion_id")` catches very little and allows schema drift. Build field-set parity tests that compare actual serialized JSON keys to robot-docs declared fields for `notes`, `discussions`, and `show` discussion nodes.
|
||||
|
||||
```diff
|
||||
@@ 4f. Add robot-docs contract tests
|
||||
-assert!(notes_schema.contains("gitlab_discussion_id"));
|
||||
+let declared = parse_schema_field_list(notes_schema);
|
||||
+let sample = sample_notes_row_json_keys();
|
||||
+assert_required_subset(&declared, &["project_path","noteable_type","parent_iid","gitlab_discussion_id","gitlab_note_id"]);
|
||||
+assert_schema_matches_payload(&declared, &sample);
|
||||
|
||||
@@ 4g. Add CLI-level contract integration tests
|
||||
+Add parity tests for:
|
||||
+- notes list JSON
|
||||
+- discussions list JSON
|
||||
+- issues show discussions[*]
|
||||
+- mrs show discussions[*]
|
||||
```
|
||||
|
||||
If you want, I can produce a full revised v3 plan text with these edits merged end-to-end so it’s ready to execute directly.
|
||||
207
docs/plan-expose-discussion-ids.feedback-4.md
Normal file
207
docs/plan-expose-discussion-ids.feedback-4.md
Normal file
@@ -0,0 +1,207 @@
|
||||
Below are the highest-impact revisions I’d make to this plan. I excluded everything listed in your `## Rejected Recommendations` section.
|
||||
|
||||
**1. Fix a correctness bug in the ambiguity guardrail (must run before `LIMIT`)**
|
||||
|
||||
The current post-query ambiguity check can silently fail when `--limit` truncates results to one project even though multiple projects match the same `gitlab_discussion_id`. That creates non-deterministic write targeting risk.
|
||||
|
||||
```diff
|
||||
@@ ## Ambiguity Guardrail
|
||||
-**Implementation**: After the main query, if `gitlab_discussion_id` is set and no `--project`
|
||||
-was provided, check if the result set spans multiple `project_path` values.
|
||||
+**Implementation**: Run a preflight distinct-project check when `gitlab_discussion_id` is set
|
||||
+and `--project` was not provided, before the main list query applies `LIMIT`.
|
||||
+Use:
|
||||
+```sql
|
||||
+SELECT DISTINCT p.path_with_namespace
|
||||
+FROM discussions d
|
||||
+JOIN projects p ON p.id = d.project_id
|
||||
+WHERE d.gitlab_discussion_id = ?
|
||||
+LIMIT 3
|
||||
+```
|
||||
+If more than one project is found, return `LoreError::Ambiguous` (exit code 18) with project
|
||||
+paths and suggestion to retry with `--project <path>`.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**2. Add `gitlab_project_id` to the Bridge Contract**
|
||||
|
||||
`project_path` is human-friendly but mutable (renames/transfers). `gitlab_project_id` gives a stable write target and avoids path re-resolution failures.
|
||||
|
||||
```diff
|
||||
@@ ## Bridge Contract (Cross-Cutting)
|
||||
Every read payload that surfaces notes or discussions **MUST** include:
|
||||
- `project_path`
|
||||
+- `gitlab_project_id`
|
||||
- `noteable_type`
|
||||
- `parent_iid`
|
||||
- `gitlab_discussion_id`
|
||||
- `gitlab_note_id`
|
||||
@@
|
||||
const BRIDGE_FIELDS_NOTES: &[&str] = &[
|
||||
- "project_path", "noteable_type", "parent_iid",
|
||||
+ "project_path", "gitlab_project_id", "noteable_type", "parent_iid",
|
||||
"gitlab_discussion_id", "gitlab_note_id",
|
||||
];
|
||||
const BRIDGE_FIELDS_DISCUSSIONS: &[&str] = &[
|
||||
- "project_path", "noteable_type", "parent_iid",
|
||||
+ "project_path", "gitlab_project_id", "noteable_type", "parent_iid",
|
||||
"gitlab_discussion_id",
|
||||
];
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**3. Replace stringly-typed filter/sort fields with enums end-to-end**
|
||||
|
||||
Right now `sort`, `order`, `resolution`, `noteable_type` are mostly `String`. This is fragile and risks unsafe SQL interpolation drift over time. Typed enums make invalid states unrepresentable.
|
||||
|
||||
```diff
|
||||
@@ ## 3a. CLI Args
|
||||
- pub resolution: Option<String>,
|
||||
+ pub resolution: Option<ResolutionFilter>,
|
||||
@@
|
||||
- pub noteable_type: Option<String>,
|
||||
+ pub noteable_type: Option<NoteableTypeFilter>,
|
||||
@@
|
||||
- pub sort: String,
|
||||
+ pub sort: DiscussionSortField,
|
||||
@@
|
||||
- pub asc: bool,
|
||||
+ pub order: SortDirection,
|
||||
@@ ## 3d. Filters struct
|
||||
- pub resolution: Option<String>,
|
||||
- pub noteable_type: Option<String>,
|
||||
- pub sort: String,
|
||||
- pub order: String,
|
||||
+ pub resolution: Option<ResolutionFilter>,
|
||||
+ pub noteable_type: Option<NoteableTypeFilter>,
|
||||
+ pub sort: DiscussionSortField,
|
||||
+ pub order: SortDirection,
|
||||
@@
|
||||
+Map enum -> SQL fragment via `match` in query builder; never interpolate raw strings.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**4. Enforce snapshot consistency for multi-query commands**
|
||||
|
||||
`discussions` with `--include-notes` does multiple reads. Without a single read transaction, concurrent ingest can produce mismatched `total_count`, row set, and expanded notes.
|
||||
|
||||
```diff
|
||||
@@ ## 3c. SQL Query
|
||||
-pub fn query_discussions(...)
|
||||
+pub fn query_discussions(...)
|
||||
{
|
||||
+ // Run count query + page query + note expansion under one deferred read transaction
|
||||
+ // so output is a single consistent snapshot.
|
||||
+ let tx = conn.transaction_with_behavior(rusqlite::TransactionBehavior::Deferred)?;
|
||||
...
|
||||
+ tx.commit()?;
|
||||
}
|
||||
@@ ## 1. Add `gitlab_discussion_id` to Notes Output
|
||||
+Apply the same snapshot rule to `query_notes` when returning `total_count` + paged rows.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**5. Correct first-note rollup semantics (current CTE can return null/incorrect `first_author`)**
|
||||
|
||||
In the proposed SQL, `rn=1` is computed over all notes but then filtered with `is_system=0`, so threads with a leading system note may incorrectly lose `first_author`/snippet. Also path rollup uses non-deterministic `MAX(...)`.
|
||||
|
||||
```diff
|
||||
@@ ## 3c. SQL Query
|
||||
-ranked_notes AS (
|
||||
+ranked_notes AS (
|
||||
SELECT
|
||||
n.discussion_id,
|
||||
n.author_username,
|
||||
n.body,
|
||||
n.is_system,
|
||||
n.position_new_path,
|
||||
n.position_new_line,
|
||||
- ROW_NUMBER() OVER (
|
||||
- PARTITION BY n.discussion_id
|
||||
- ORDER BY n.position, n.id
|
||||
- ) AS rn
|
||||
+ ROW_NUMBER() OVER (
|
||||
+ PARTITION BY n.discussion_id
|
||||
+ ORDER BY CASE WHEN n.is_system = 0 THEN 0 ELSE 1 END, n.created_at, n.id
|
||||
+ ) AS rn_first_note,
|
||||
+ ROW_NUMBER() OVER (
|
||||
+ PARTITION BY n.discussion_id
|
||||
+ ORDER BY CASE WHEN n.position_new_path IS NULL THEN 1 ELSE 0 END, n.created_at, n.id
|
||||
+ ) AS rn_first_position
|
||||
@@
|
||||
- MAX(CASE WHEN rn = 1 AND is_system = 0 THEN author_username END) AS first_author,
|
||||
- MAX(CASE WHEN rn = 1 AND is_system = 0 THEN body END) AS first_note_body,
|
||||
- MAX(CASE WHEN position_new_path IS NOT NULL THEN position_new_path END) AS position_new_path,
|
||||
- MAX(CASE WHEN position_new_line IS NOT NULL THEN position_new_line END) AS position_new_line
|
||||
+ MAX(CASE WHEN rn_first_note = 1 AND is_system = 0 THEN author_username END) AS first_author,
|
||||
+ MAX(CASE WHEN rn_first_note = 1 AND is_system = 0 THEN body END) AS first_note_body,
|
||||
+ MAX(CASE WHEN rn_first_position = 1 THEN position_new_path END) AS position_new_path,
|
||||
+ MAX(CASE WHEN rn_first_position = 1 THEN position_new_line END) AS position_new_line
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**6. Add per-discussion truncation signals for `--include-notes`**
|
||||
|
||||
Top-level `has_more` is useful, but agents also need to know if an individual thread’s notes were truncated. Otherwise they can’t tell if a thread is complete.
|
||||
|
||||
```diff
|
||||
@@ ## Response Schema
|
||||
{
|
||||
"gitlab_discussion_id": "...",
|
||||
...
|
||||
- "notes": []
|
||||
+ "included_note_count": 0,
|
||||
+ "has_more_notes": false,
|
||||
+ "notes": []
|
||||
}
|
||||
@@ ## 3b. Domain Structs
|
||||
pub struct DiscussionListRowJson {
|
||||
@@
|
||||
+ pub included_note_count: usize,
|
||||
+ pub has_more_notes: bool,
|
||||
#[serde(skip_serializing_if = "Vec::is_empty")]
|
||||
pub notes: Vec<NoteListRowJson>,
|
||||
}
|
||||
@@ ## 3c-ii. Note expansion query (--include-notes)
|
||||
-Group by `discussion_id` in Rust and attach notes arrays...
|
||||
+Group by `discussion_id` in Rust, attach notes arrays, and set:
|
||||
+`included_note_count = notes.len()`,
|
||||
+`has_more_notes = note_count > included_note_count`.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
**7. Add explicit query-plan gate and targeted index workstream (measured, not speculative)**
|
||||
|
||||
This plan introduces heavy discussion-centric reads. You should bake in deterministic performance validation with `EXPLAIN QUERY PLAN` and only then add indexes if missing.
|
||||
|
||||
```diff
|
||||
@@ ## Scope: Four workstreams, delivered in order:
|
||||
-4. Fix robot-docs to list actual field names instead of opaque type references
|
||||
+4. Add query-plan validation + targeted index updates for new discussion queries
|
||||
+5. Fix robot-docs to list actual field names instead of opaque type references
|
||||
@@
|
||||
+## 4. Query-Plan Validation and Targeted Indexes
|
||||
+
|
||||
+Before and after implementing `query_discussions`, capture `EXPLAIN QUERY PLAN` for:
|
||||
+- `--for-mr <iid> --resolution unresolved`
|
||||
+- `--project <path> --since 7d --sort last_note`
|
||||
+- `--gitlab-discussion-id <id>`
|
||||
+
|
||||
+If plans show table scans on `notes`/`discussions`, add indexes in `MIGRATIONS` array:
|
||||
+- `discussions(project_id, gitlab_discussion_id)`
|
||||
+- `discussions(merge_request_id, last_note_at, id)`
|
||||
+- `notes(discussion_id, created_at DESC, id DESC)`
|
||||
+- `notes(discussion_id, position, id)`
|
||||
+
|
||||
+Tests: assert the new query paths return expected rows under indexed schema and no regressions.
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
If you want, I can produce a single consolidated “iteration 4” version of the plan text with all seven revisions merged in place.
|
||||
160
docs/plan-expose-discussion-ids.feedback-4.md.bak
Normal file
160
docs/plan-expose-discussion-ids.feedback-4.md.bak
Normal file
@@ -0,0 +1,160 @@
|
||||
I reviewed the plan end-to-end and focused only on new improvements (none of the items in `## Rejected Recommendations` are re-proposed).
|
||||
|
||||
1. Add direct `--discussion-id` retrieval paths
|
||||
Rationale: This removes a full discovery hop for the exact workflow that failed (replying to a known thread). It also reduces ambiguity and query cost when an agent already has the thread ID.
|
||||
|
||||
```diff
|
||||
@@ Core Changes
|
||||
| 7 | Fix robot-docs to list actual field names | Docs | Small |
|
||||
+| 8 | Add direct `--discussion-id` filter to notes/discussions/show | Core | Small |
|
||||
|
||||
@@ Change 3: Add Standalone `discussions` List Command
|
||||
lore -J discussions --for-mr 99 --cursor <token> # keyset pagination
|
||||
+lore -J discussions --discussion-id 6a9c1750b37d... # direct lookup
|
||||
|
||||
@@ 3a. CLI Args
|
||||
+ #[arg(long, conflicts_with_all = ["for_issue", "for_mr"], help_heading = "Filters")]
|
||||
+ pub discussion_id: Option<String>,
|
||||
|
||||
@@ Change 1: Add `gitlab_discussion_id` to Notes Output
|
||||
+Add `--discussion-id <hex>` filter to `notes` for direct note retrieval within one thread.
|
||||
```
|
||||
|
||||
2. Add a shared filter compiler to eliminate count/query drift
|
||||
Rationale: The plan currently repeats filters across data query, `total_count`, and `incomplete_rows` count queries. That is a classic reliability bug source. A single compiled filter object makes count semantics provably consistent.
|
||||
|
||||
```diff
|
||||
@@ Count Semantics (Cross-Cutting Convention)
|
||||
+## Filter Compiler (NEW, Cross-Cutting Convention)
|
||||
+All list commands must build predicates via a shared `CompiledFilters` object that emits:
|
||||
+- SQL predicate fragment
|
||||
+- bind parameters
|
||||
+- canonical filter string (for cursor hash)
|
||||
+The same compiled object is reused by:
|
||||
+- page data query
|
||||
+- `total_count` query
|
||||
+- `incomplete_rows` query
|
||||
```
|
||||
|
||||
3. Harden keyset pagination semantics for `DESC`, limits, and client ergonomics
|
||||
Rationale: `(sort_value, id) > (?, ?)` is only correct for ascending order. Descending sort needs `<`. Also add explicit `has_more` so clients don’t infer from cursor nullability.
|
||||
|
||||
```diff
|
||||
@@ Keyset Pagination (Cross-Cutting, Change B)
|
||||
-```sql
|
||||
-WHERE (sort_value, id) > (?, ?)
|
||||
-```
|
||||
+Use comparator by order:
|
||||
+- ASC: `(sort_value, id) > (?, ?)`
|
||||
+- DESC: `(sort_value, id) < (?, ?)`
|
||||
|
||||
@@ 3a. CLI Args
|
||||
+ #[arg(short = 'n', long = "limit", default_value = "50", value_parser = clap::value_parser!(usize).range(1..=500), help_heading = "Output")]
|
||||
+ pub limit: usize,
|
||||
|
||||
@@ Response Schema
|
||||
- "next_cursor": "aW...xyz=="
|
||||
+ "next_cursor": "aW...xyz==",
|
||||
+ "has_more": true
|
||||
```
|
||||
|
||||
4. Add DB-level entity integrity invariants (not just response invariants)
|
||||
Rationale: Response-side filtering is good, but DB correctness should also be guarded. This prevents silent corruption and bad joins from ingestion or future migrations.
|
||||
|
||||
```diff
|
||||
@@ Contract Invariants (NEW)
|
||||
+### Entity Integrity Invariants (DB + Ingest)
|
||||
+1. `discussions` must belong to exactly one parent (`issue_id XOR merge_request_id`).
|
||||
+2. `discussions.noteable_type` must match the populated parent column.
|
||||
+3. Natural-key uniqueness is enforced where valid:
|
||||
+ - `(project_id, gitlab_discussion_id)` unique for discussions.
|
||||
+4. Ingestion must reject/quarantine rows violating invariants and report counts.
|
||||
|
||||
@@ Supporting Indexes (Cross-Cutting, Change D)
|
||||
+CREATE UNIQUE INDEX IF NOT EXISTS idx_discussions_project_gitlab_discussion_id
|
||||
+ ON discussions(project_id, gitlab_discussion_id);
|
||||
```
|
||||
|
||||
5. Switch bulk note loading to streaming grouping (avoid large intermediate vecs)
|
||||
Rationale: Current bulk strategy still materializes all notes before grouping. Streaming into the map cuts peak memory and improves large-MR stability.
|
||||
|
||||
```diff
|
||||
@@ Change 2e. Constructor — use bulk notes map
|
||||
-let all_note_rows: Vec<MrNoteDetail> = ... // From bulk query above
|
||||
-let notes_by_discussion: HashMap<i64, Vec<MrNoteDetail>> =
|
||||
- all_note_rows.into_iter().fold(HashMap::new(), |mut map, note| {
|
||||
- map.entry(note.discussion_id).or_insert_with(Vec::new).push(note);
|
||||
- map
|
||||
- });
|
||||
+let mut notes_by_discussion: HashMap<i64, Vec<MrNoteDetail>> = HashMap::new();
|
||||
+for row in bulk_note_stmt.query_map(params, map_note_row)? {
|
||||
+ let note = row?;
|
||||
+ notes_by_discussion.entry(note.discussion_id).or_default().push(note);
|
||||
+}
|
||||
```
|
||||
|
||||
6. Make freshness tri-state (`fresh|stale|unknown`) and fail closed on unknown with `--require-fresh`
|
||||
Rationale: `stale: bool` alone cannot represent “never synced / unknown project freshness.” For write safety, unknown freshness should be explicit and reject under freshness constraints.
|
||||
|
||||
```diff
|
||||
@@ Freshness Metadata & Staleness Guards
|
||||
pub struct ResponseMeta {
|
||||
pub elapsed_ms: i64,
|
||||
pub data_as_of_iso: String,
|
||||
pub sync_lag_seconds: i64,
|
||||
pub stale: bool,
|
||||
+ pub freshness_state: String, // "fresh" | "stale" | "unknown"
|
||||
+ #[serde(skip_serializing_if = "Option::is_none")]
|
||||
+ pub freshness_reason: Option<String>,
|
||||
pub incomplete_rows: i64,
|
||||
@@
|
||||
-if sync_lag_seconds > max_age_secs {
|
||||
+if freshness_state == "unknown" || sync_lag_seconds > max_age_secs {
|
||||
```
|
||||
|
||||
7. Tune indexes to match actual ORDER BY paths in window queries
|
||||
Rationale: `idx_notes_discussion_position` is likely insufficient for the two window orderings. A covering-style index aligned with partition/order keys reduces random table lookups.
|
||||
|
||||
```diff
|
||||
@@ Supporting Indexes (Cross-Cutting, Change D)
|
||||
--- Notes: window function ORDER BY (discussion_id, position) for ROW_NUMBER()
|
||||
-CREATE INDEX IF NOT EXISTS idx_notes_discussion_position
|
||||
- ON notes(discussion_id, position);
|
||||
+-- Notes: support dual ROW_NUMBER() orderings and reduce table lookups
|
||||
+CREATE INDEX IF NOT EXISTS idx_notes_discussion_window
|
||||
+ ON notes(discussion_id, is_system, position, created_at, gitlab_id);
|
||||
```
|
||||
|
||||
8. Add a phased rollout gate before strict exclusion becomes default
|
||||
Rationale: Enforcing `gitlab_* IS NOT NULL` immediately can hide data if existing rows are incomplete. A short observation gate prevents sudden regressions while preserving the end-state contract.
|
||||
|
||||
```diff
|
||||
@@ Delivery Order
|
||||
+Batch 0: Observability gate (NEW)
|
||||
+- Ship `incomplete_rows` and freshness meta first
|
||||
+- Measure incomplete rate across real datasets
|
||||
+- If incomplete ratio <= threshold, enable strict exclusion defaults
|
||||
+- If above threshold, block rollout and fix ingestion quality first
|
||||
+
|
||||
Change 1 (notes output) ──┐
|
||||
```
|
||||
|
||||
9. Add property-based invariants for pagination/count correctness
|
||||
Rationale: Your current tests are scenario-based and good, but randomized property tests are much better at catching edge-case cursor/count bugs.
|
||||
|
||||
```diff
|
||||
@@ Tests (Change 3 / Change B)
|
||||
+**Test 12**: Property-based pagination invariants (`proptest`)
|
||||
+```rust
|
||||
+#[test]
|
||||
+fn prop_discussion_cursor_no_overlap_no_gap_under_random_data() { /* ... */ }
|
||||
+```
|
||||
+
|
||||
+**Test 13**: Property-based count invariants
|
||||
+```rust
|
||||
+#[test]
|
||||
+fn prop_total_count_and_incomplete_rows_match_filter_partition() { /* ... */ }
|
||||
+```
|
||||
```
|
||||
|
||||
If you want, I can now produce a fully consolidated “Plan v4” that applies these diffs cleanly into your original document so it reads as a single coherent spec.
|
||||
140
docs/plan-expose-discussion-ids.feedback-5.md
Normal file
140
docs/plan-expose-discussion-ids.feedback-5.md
Normal file
@@ -0,0 +1,140 @@
|
||||
Your iteration 4 plan is already strong. The highest-impact revisions are around query shape, transaction boundaries, and contract stability for agents.
|
||||
|
||||
1. **Switch discussions query to a two-phase page-first architecture**
|
||||
Analysis: Current `ranked_notes` runs over every filtered discussion before `LIMIT`, which can explode on project-wide queries. A page-first plan keeps complexity proportional to `limit`, improves tail latency, and reduces memory churn.
|
||||
```diff
|
||||
@@ ## 3c. SQL Query
|
||||
-Core query uses a CTE + ranked-notes rollup (window function) to avoid per-row correlated
|
||||
-subqueries.
|
||||
+Core query is split into two phases for scalability:
|
||||
+1) `paged_discussions` applies filters/sort/LIMIT and returns only page IDs.
|
||||
+2) Note rollups and optional `--include-notes` expansion run only for those page IDs.
|
||||
+This bounds note scanning to visible results and stabilizes latency on large projects.
|
||||
|
||||
-WITH filtered_discussions AS (
|
||||
+WITH filtered_discussions AS (
|
||||
...
|
||||
),
|
||||
-ranked_notes AS (
|
||||
+paged_discussions AS (
|
||||
+ SELECT id
|
||||
+ FROM filtered_discussions
|
||||
+ ORDER BY COALESCE({sort_column}, 0) {order}, id {order}
|
||||
+ LIMIT ?
|
||||
+),
|
||||
+ranked_notes AS (
|
||||
...
|
||||
- WHERE n.discussion_id IN (SELECT id FROM filtered_discussions)
|
||||
+ WHERE n.discussion_id IN (SELECT id FROM paged_discussions)
|
||||
)
|
||||
```
|
||||
|
||||
2. **Move snapshot transaction ownership to handlers (not query helpers)**
|
||||
Analysis: This avoids nested transaction edge cases, keeps function signatures clean, and guarantees one snapshot across count + page + include-notes + serialization metadata.
|
||||
```diff
|
||||
@@ ## Cross-cutting: snapshot consistency
|
||||
-Wrap `query_notes` and `query_discussions` in a deferred read transaction.
|
||||
+Open one deferred read transaction in each handler (`handle_notes`, `handle_discussions`)
|
||||
+and pass `&Transaction` into query helpers. Query helpers do not open/commit transactions.
|
||||
+This guarantees a single snapshot across all subqueries and avoids nested tx pitfalls.
|
||||
|
||||
-pub fn query_discussions(conn: &Connection, ...)
|
||||
+pub fn query_discussions(tx: &rusqlite::Transaction<'_>, ...)
|
||||
```
|
||||
|
||||
3. **Add immutable input filter `--project-id` across notes/discussions/show**
|
||||
Analysis: You already expose `gitlab_project_id` because paths are mutable; input should support the same immutable selector. This removes failure modes after project renames/transfers.
|
||||
```diff
|
||||
@@ ## 3a. CLI Args
|
||||
+ /// Filter by immutable GitLab project ID
|
||||
+ #[arg(long, help_heading = "Filters", conflicts_with = "project")]
|
||||
+ pub project_id: Option<i64>,
|
||||
@@ ## Bridge Contract
|
||||
+Input symmetry rule: commands that accept `--project` should also accept `--project-id`.
|
||||
+If both are present, return usage error (exit code 2).
|
||||
```
|
||||
|
||||
4. **Enforce bridge fields for nested notes in `discussions --include-notes`**
|
||||
Analysis: Current guardrail is entity-level; nested notes can still lose required IDs under aggressive filtering. This is a contract hole for write-bridging.
|
||||
```diff
|
||||
@@ ### Field Filtering Guardrail
|
||||
-In robot mode, `filter_fields` MUST force-include Bridge Contract fields...
|
||||
+In robot mode, `filter_fields` MUST force-include Bridge Contract fields at all returned levels:
|
||||
+- discussion row fields
|
||||
+- nested note fields when `discussions --include-notes` is used
|
||||
|
||||
+const BRIDGE_FIELDS_DISCUSSION_NOTES: &[&str] = &[
|
||||
+ "project_path", "gitlab_project_id", "noteable_type", "parent_iid",
|
||||
+ "gitlab_discussion_id", "gitlab_note_id",
|
||||
+];
|
||||
```
|
||||
|
||||
5. **Make ambiguity preflight scope-aware and machine-actionable**
|
||||
Analysis: Current preflight checks only `gitlab_discussion_id`, which can produce false ambiguity when additional filters already narrow to one project. Also, agents need structured candidates, not only free-text.
|
||||
```diff
|
||||
@@ ### Ambiguity Guardrail
|
||||
-SELECT DISTINCT p.path_with_namespace
|
||||
+SELECT DISTINCT p.path_with_namespace, p.gitlab_project_id
|
||||
FROM discussions d
|
||||
JOIN projects p ON p.id = d.project_id
|
||||
-WHERE d.gitlab_discussion_id = ?
|
||||
+WHERE d.gitlab_discussion_id = ?
|
||||
+ /* plus active scope filters: noteable_type, for_issue/for_mr, since/path when present */
|
||||
LIMIT 3
|
||||
|
||||
-Return LoreError::Ambiguous with message
|
||||
+Return LoreError::Ambiguous with structured details:
|
||||
+`{ code, message, candidates:[{project_path, gitlab_project_id}], suggestion }`
|
||||
```
|
||||
|
||||
6. **Add `--contains` filter to `discussions`**
|
||||
Analysis: This is a high-utility agent workflow gap. Agents frequently need “find thread by text then reply”; forcing a separate `notes` search round-trip is unnecessary.
|
||||
```diff
|
||||
@@ ## 3a. CLI Args
|
||||
+ /// Filter discussions whose notes contain text
|
||||
+ #[arg(long, help_heading = "Filters")]
|
||||
+ pub contains: Option<String>,
|
||||
@@ ## 3d. Filters struct
|
||||
+ pub contains: Option<String>,
|
||||
@@ ## 3d. Where-clause construction
|
||||
+- `path` -> EXISTS (...)
|
||||
+- `path` -> EXISTS (...)
|
||||
+- `contains` -> EXISTS (
|
||||
+ SELECT 1 FROM notes n
|
||||
+ WHERE n.discussion_id = d.id
|
||||
+ AND n.body LIKE ?
|
||||
+ )
|
||||
```
|
||||
|
||||
7. **Promote two baseline indexes from “candidate” to “required”**
|
||||
Analysis: These are directly hit by new primary paths; waiting for post-merge profiling risks immediate perf cliffs in real usage.
|
||||
```diff
|
||||
@@ ## 3h. Query-plan validation
|
||||
-Candidate indexes (add only if EXPLAIN QUERY PLAN shows they're needed):
|
||||
-- discussions(project_id, gitlab_discussion_id)
|
||||
-- notes(discussion_id, created_at DESC, id DESC)
|
||||
+Required baseline indexes for this feature:
|
||||
+- discussions(project_id, gitlab_discussion_id)
|
||||
+- notes(discussion_id, created_at DESC, id DESC)
|
||||
+Keep other indexes conditional on EXPLAIN QUERY PLAN.
|
||||
```
|
||||
|
||||
8. **Add schema versioning and remove contradictory rejected items**
|
||||
Analysis: `robot-docs` contract drift is a long-term agent risk; explicit schema versions let clients fail safely. Also, rejected items currently contradict active sections, which creates implementation ambiguity.
|
||||
```diff
|
||||
@@ ## 4. Fix Robot-Docs Response Schemas
|
||||
"meta": {"elapsed_ms": "int", ...}
|
||||
+"meta": {"elapsed_ms":"int", ..., "schema_version":"string"}
|
||||
+
|
||||
+Schema version policy:
|
||||
+- bump minor on additive fields
|
||||
+- bump major on removals/renames
|
||||
+- expose per-command versions in `robot-docs`
|
||||
@@ ## Rejected Recommendations
|
||||
-- Add `gitlab_note_id` to show-command note detail structs ... rejected ...
|
||||
-- Add `gitlab_discussion_id` to show-command discussion detail structs ... rejected ...
|
||||
-- Add `gitlab_project_id` to show-command discussion detail structs ... rejected ...
|
||||
+Remove stale rejected entries that conflict with accepted workstreams in this plan iteration.
|
||||
```
|
||||
|
||||
If you want, I can produce a fully rewritten iteration 5 plan document that applies all of the above edits cleanly end-to-end.
|
||||
158
docs/plan-expose-discussion-ids.feedback-5.md.bak
Normal file
158
docs/plan-expose-discussion-ids.feedback-5.md.bak
Normal file
@@ -0,0 +1,158 @@
|
||||
I reviewed the whole plan and only proposed changes that are not in your `## Rejected Recommendations`.
|
||||
|
||||
1. **Fix plan-internal inconsistencies first**
|
||||
Analysis: The plan currently has a few self-contradictions (`8` vs `9` cross-cutting improvements, `stale` still referenced after moving to tri-state freshness). Cleaning this prevents implementation drift and bad AC validation.
|
||||
|
||||
```diff
|
||||
--- a/plan.md
|
||||
+++ b/plan.md
|
||||
@@
|
||||
-**Scope**: 8 core changes + 8 cross-cutting architectural improvements across 3 tiers:
|
||||
+**Scope**: 8 core changes + 9 cross-cutting architectural improvements across 3 tiers:
|
||||
@@ AC-7: Freshness Metadata Present & Staleness Guards Work
|
||||
-lore -J notes -n 1 | jq '.meta | {data_as_of_iso, sync_lag_seconds, stale}'
|
||||
-# All fields present, stale=false if recently synced
|
||||
+lore -J notes -n 1 | jq '.meta | {data_as_of_iso, sync_lag_seconds, freshness_state}'
|
||||
+# All fields present, freshness_state is one of fresh|stale|unknown
|
||||
@@ Change 6 Response Schema example
|
||||
- "stale": false,
|
||||
+ "freshness_state": "fresh",
|
||||
```
|
||||
|
||||
2. **Require snapshot-consistent list responses (page + counts)**
|
||||
Analysis: `total_count`, `incomplete_rows`, and page rows can drift if sync writes between queries. Enforcing a single read snapshot for all list commands makes pagination and counts deterministic.
|
||||
|
||||
```diff
|
||||
--- a/plan.md
|
||||
+++ b/plan.md
|
||||
@@ Count Semantics (Cross-Cutting Convention)
|
||||
All list commands use consistent count fields:
|
||||
+All three queries (`page`, `total_count`, `incomplete_rows`) MUST execute inside one read transaction/snapshot.
|
||||
+This guarantees count/page consistency under concurrent sync writes.
|
||||
```
|
||||
|
||||
3. **Use RAII transactions instead of manual `BEGIN/COMMIT`**
|
||||
Analysis: Manual `execute_batch("BEGIN...")` is fragile on early returns. `rusqlite::Transaction` guarantees rollback on error and removes transaction-leak risk.
|
||||
|
||||
```diff
|
||||
--- a/plan.md
|
||||
+++ b/plan.md
|
||||
@@ Change 2: Consistency guarantee
|
||||
-conn.execute_batch("BEGIN DEFERRED")?;
|
||||
-// ... discussion query ...
|
||||
-// ... bulk note query ...
|
||||
-conn.execute_batch("COMMIT")?;
|
||||
+let tx = conn.transaction_with_behavior(rusqlite::TransactionBehavior::Deferred)?;
|
||||
+// ... discussion query ...
|
||||
+// ... bulk note query ...
|
||||
+tx.commit()?;
|
||||
```
|
||||
|
||||
4. **Allow small focused new modules for query infrastructure**
|
||||
Analysis: Keeping everything in `list.rs`/`show.rs` will become a maintenance hotspot as filters/cursors/freshness expand. A small module split reduces coupling and regression risk.
|
||||
|
||||
```diff
|
||||
--- a/plan.md
|
||||
+++ b/plan.md
|
||||
@@ Change 3: File Architecture
|
||||
-**No new files.** Follow existing patterns:
|
||||
+Allow focused infra modules for shared logic:
|
||||
+- `src/cli/query/filters.rs` (CompiledFilters + builders)
|
||||
+- `src/cli/query/cursor.rs` (encode/decode/validate v2 cursors)
|
||||
+- `src/cli/query/freshness.rs` (freshness computation + guards)
|
||||
+Command handlers remain in existing files.
|
||||
```
|
||||
|
||||
5. **Add ingest-time `discussion_rollups` to avoid repeated heavy window scans**
|
||||
Analysis: Window functions are good, but doing them on every read over large note volumes is still expensive. Precomputing rollups during ingest gives lower and more predictable p95 latency while keeping read paths simpler.
|
||||
|
||||
```diff
|
||||
--- a/plan.md
|
||||
+++ b/plan.md
|
||||
@@ Architectural Improvements (Cross-Cutting)
|
||||
+| J | Ingest-time discussion rollups (`discussion_rollups`) | Performance | Medium |
|
||||
@@ Change 3 SQL strategy
|
||||
-Use `ROW_NUMBER()` window function instead of correlated subqueries...
|
||||
+Primary path: join precomputed `discussion_rollups` for `note_count`, `first_author`,
|
||||
+`first_note_body`, `position_new_path`, `position_new_line`.
|
||||
+Fallback path: window-function recompute if rollup row is missing (defensive correctness).
|
||||
```
|
||||
|
||||
6. **Add deterministic numeric project selector `--project-id`**
|
||||
Analysis: `-p group/repo` is human-friendly, but numeric project IDs are safer for robots and avoid fuzzy/project-path ambiguity. This reduces false ambiguity failures and lookup overhead.
|
||||
|
||||
```diff
|
||||
--- a/plan.md
|
||||
+++ b/plan.md
|
||||
@@ DiscussionsArgs
|
||||
#[arg(short = 'p', long, help_heading = "Filters")]
|
||||
pub project: Option<String>,
|
||||
+ #[arg(long, conflicts_with = "project", help_heading = "Filters")]
|
||||
+ pub project_id: Option<i64>,
|
||||
@@ Ambiguity handling
|
||||
+If `--project-id` is provided, IID resolution is scoped directly to that project.
|
||||
+`--project-id` takes precedence over path-based project matching.
|
||||
```
|
||||
|
||||
7. **Make path filtering rename-aware (`old` + `new`)**
|
||||
Analysis: Current `--path` strategy only using `position_new_path` misses deleted/renamed-file discussions. Supporting side selection makes the feature materially more useful for review workflows.
|
||||
|
||||
```diff
|
||||
--- a/plan.md
|
||||
+++ b/plan.md
|
||||
@@ DiscussionsArgs
|
||||
#[arg(long, help_heading = "Filters")]
|
||||
pub path: Option<String>,
|
||||
+ #[arg(long, value_parser = ["either", "new", "old"], default_value = "either", help_heading = "Filters")]
|
||||
+ pub path_side: String,
|
||||
@@ Change 3 filtering
|
||||
-Path filter matches `position_new_path`.
|
||||
+Path filter semantics:
|
||||
+- `either` (default): match `position_new_path` OR `position_old_path`
|
||||
+- `new`: match only `position_new_path`
|
||||
+- `old`: match only `position_old_path`
|
||||
```
|
||||
|
||||
8. **Add explicit freshness behavior for empty-result queries + bootstrap backfill**
|
||||
Analysis: Freshness based only on “participating rows” is undefined when results are empty. Define deterministic behavior and backfill `project_sync_state` on migration so `unknown` doesn’t spike unexpectedly after deploy.
|
||||
|
||||
```diff
|
||||
--- a/plan.md
|
||||
+++ b/plan.md
|
||||
@@ Freshness state logic
|
||||
+Empty-result rules:
|
||||
+- If query is project-scoped (`-p` or `--project-id`), freshness is computed from that project even when no rows match.
|
||||
+- If query is unscoped and returns zero rows, freshness is computed from all tracked projects.
|
||||
@@ A1. Track per-project sync timestamp
|
||||
+Migration step: seed `project_sync_state` from latest known sync metadata where available
|
||||
+to avoid mass `unknown` freshness immediately after rollout.
|
||||
```
|
||||
|
||||
9. **Upgrade `--discussion-id` from filter-only to first-class thread retrieval**
|
||||
Analysis: Filtering list output by discussion ID still returns list-shaped data and partial note context. A direct thread retrieval mode is faster for agent workflows and avoids extra commands.
|
||||
|
||||
```diff
|
||||
--- a/plan.md
|
||||
+++ b/plan.md
|
||||
@@ Core Changes
|
||||
-| 8 | Add direct `--discussion-id` filter to notes/discussions/show | Core | Small |
|
||||
+| 8 | Add direct `--discussion-id` filter + single-thread retrieval mode | Core | Medium |
|
||||
@@ Change 8
|
||||
+lore -J discussions --discussion-id <id> --full-thread
|
||||
+# Returns one discussion with full notes payload (same note schema as show command).
|
||||
```
|
||||
|
||||
10. **Replace ad-hoc AC performance timing with repeatable perf harness**
|
||||
Analysis: `time lore ...` is noisy and machine-dependent. A reproducible seeded benchmark test gives stable guardrails and catches regressions earlier.
|
||||
|
||||
```diff
|
||||
--- a/plan.md
|
||||
+++ b/plan.md
|
||||
@@ AC-10: Performance Budget
|
||||
-time lore -J discussions --for-mr <iid> -n 100
|
||||
-# real 0m0.100s (p95 < 150ms)
|
||||
+cargo test --test perf_discussions -- --ignored --nocapture
|
||||
+# Uses seeded fixture DB and N repeated runs; asserts p95 < 150ms for target query shape.
|
||||
```
|
||||
|
||||
If you want, I can also produce a fully merged “iteration 5” rewritten plan document with these edits applied end-to-end so it’s directly executable by an implementation agent.
|
||||
143
docs/plan-expose-discussion-ids.feedback-6.md.bak
Normal file
143
docs/plan-expose-discussion-ids.feedback-6.md.bak
Normal file
@@ -0,0 +1,143 @@
|
||||
Strong plan overall. The biggest gaps I’d fix are around sync-health correctness, idempotency/integrity under repeated ingests, deleted-entity lifecycle, and reducing schema drift risk without heavy reflection machinery.
|
||||
|
||||
I avoided everything in your `## Rejected Recommendations` section.
|
||||
|
||||
**1. Add Sync Health Semantics (not just age)**
|
||||
Time freshness alone can mislead after partial/failed syncs. Agents need to know whether data is both recent and complete.
|
||||
|
||||
```diff
|
||||
@@ ## Freshness Metadata & Staleness Guards (Cross-Cutting, Change A/F/G)
|
||||
- pub freshness_state: String, // "fresh" | "stale" | "unknown"
|
||||
+ pub freshness_state: String, // "fresh" | "stale" | "unknown"
|
||||
+ pub sync_status: String, // "ok" | "partial" | "failed" | "never"
|
||||
+ pub last_successful_sync_run_id: Option<i64>,
|
||||
+ pub last_attempted_sync_run_id: Option<i64>,
|
||||
@@
|
||||
-#[arg(long, help_heading = "Freshness")]
|
||||
-pub require_fresh: Option<String>,
|
||||
+#[arg(long, help_heading = "Freshness")]
|
||||
+pub require_fresh: Option<String>,
|
||||
+#[arg(long, help_heading = "Freshness")]
|
||||
+pub require_sync_ok: bool,
|
||||
```
|
||||
|
||||
Rationale: this prevents false confidence when one project is fresh-by-time but latest sync actually failed or was partial.
|
||||
|
||||
---
|
||||
|
||||
**2. Add `--require-complete` Guard for Missing Required IDs**
|
||||
You already expose `meta.incomplete_rows`; add a hard gate for automation.
|
||||
|
||||
```diff
|
||||
@@ ## Count Semantics (Cross-Cutting Convention)
|
||||
`incomplete_rows` is computed via a dedicated COUNT query...
|
||||
+Add CLI guard:
|
||||
+`--require-complete` fails with exit code 19 when `meta.incomplete_rows > 0`.
|
||||
+Suggested action: `lore sync --full`.
|
||||
```
|
||||
|
||||
Rationale: agents can fail fast instead of silently acting on partial datasets.
|
||||
|
||||
---
|
||||
|
||||
**3. Strengthen Ingestion Idempotency + Referential Integrity for Notes**
|
||||
You added natural-key uniqueness for discussions; do the same for notes and enforce parent integrity at DB level.
|
||||
|
||||
```diff
|
||||
@@ ## Supporting Indexes (Cross-Cutting, Change D)
|
||||
CREATE UNIQUE INDEX IF NOT EXISTS idx_discussions_project_gitlab_discussion_id
|
||||
ON discussions(project_id, gitlab_discussion_id);
|
||||
+CREATE UNIQUE INDEX IF NOT EXISTS idx_notes_project_gitlab_id
|
||||
+ ON notes(project_id, gitlab_id);
|
||||
+
|
||||
+-- Referential integrity
|
||||
+-- notes.discussion_id REFERENCES discussions(id)
|
||||
+-- notes.project_id REFERENCES projects(id)
|
||||
```
|
||||
|
||||
Rationale: repeated syncs and retries won’t duplicate notes, and orphaned rows can’t accumulate.
|
||||
|
||||
---
|
||||
|
||||
**4. Add Deleted/Tombstoned Entity Lifecycle**
|
||||
Current plan excludes null IDs but doesn’t define behavior when GitLab entities are deleted after sync.
|
||||
|
||||
```diff
|
||||
@@ ## Contract Invariants (NEW)
|
||||
+### Deletion Lifecycle Invariant
|
||||
+1. Notes/discussions deleted upstream are tombstoned locally (`deleted_at`), not hard-deleted.
|
||||
+2. All list/show commands exclude tombstoned rows by default.
|
||||
+3. Optional flag `--include-deleted` exposes tombstoned rows for audit/debug.
|
||||
```
|
||||
|
||||
Rationale: preserves auditability, prevents ghost actions on deleted objects, and avoids destructive resync behavior.
|
||||
|
||||
---
|
||||
|
||||
**5. Expand Discussions Payload for Rename Accuracy + Better Triage**
|
||||
`--path-side old` is great, but output currently only returns `position_new_*`.
|
||||
|
||||
```diff
|
||||
@@ ## Change 3: Add Standalone `discussions` List Command
|
||||
pub position_new_path: Option<String>,
|
||||
pub position_new_line: Option<i64>,
|
||||
+ pub position_old_path: Option<String>,
|
||||
+ pub position_old_line: Option<i64>,
|
||||
+ pub last_author: Option<String>,
|
||||
+ pub participant_usernames: Vec<String>,
|
||||
```
|
||||
|
||||
Rationale: for renamed/deleted files, agents need old and new coordinates to act confidently; participants/last_author improve thread routing and prioritization.
|
||||
|
||||
---
|
||||
|
||||
**6. Add SQLite Busy Handling + Retry Policy**
|
||||
Read transactions + concurrent sync writes can still produce `SQLITE_BUSY` under load.
|
||||
|
||||
```diff
|
||||
@@ ## Count Semantics (Cross-Cutting Convention)
|
||||
**Snapshot consistency**: All three queries ... inside a single read transaction ...
|
||||
+**Busy handling**: set `PRAGMA busy_timeout` (e.g. 5000ms) and retry transient
|
||||
+`SQLITE_BUSY` errors up to 3 times with jittered backoff for read commands.
|
||||
```
|
||||
|
||||
Rationale: improves reliability in real multi-agent usage without changing semantics.
|
||||
|
||||
---
|
||||
|
||||
**7. Make Field Definitions Single-Source (Lightweight Drift Prevention)**
|
||||
You rejected full schema generation from code; a lower-cost middle ground is shared field manifests used by both docs and `--fields` validation.
|
||||
|
||||
```diff
|
||||
@@ ## Change 7: Fix Robot-Docs Response Schemas
|
||||
+#### 7h. Single-source field manifests (no reflection)
|
||||
+Define per-command field constants (e.g. `NOTES_FIELDS`, `DISCUSSIONS_FIELDS`)
|
||||
+used by:
|
||||
+1) `--fields` validation/filtering
|
||||
+2) `--fields minimal` expansion
|
||||
+3) `robot-docs` schema rendering
|
||||
```
|
||||
|
||||
Rationale: cuts drift risk materially while staying much simpler than reflection/snapshot infra.
|
||||
|
||||
---
|
||||
|
||||
**8. De-duplicate and Upgrade Test Strategy Around Concurrency**
|
||||
There are duplicated tests across Change 2 and Change 3; add explicit race tests where sync writes happen between list subqueries to prove tx consistency.
|
||||
|
||||
```diff
|
||||
@@ ## Tests
|
||||
-**Test 6**: `--project-id` scopes IID resolution directly
|
||||
-**Test 7**: `--path-side old` matches renamed file discussions
|
||||
-**Test 8**: `--path-side either` matches both old and new paths
|
||||
+Move shared discussion-filter tests to a single section under Change 3.
|
||||
+Add concurrency tests:
|
||||
+1) count/page/incomplete consistency under concurrent sync writes
|
||||
+2) show discussion+notes snapshot consistency under concurrent writes
|
||||
```
|
||||
|
||||
Rationale: less maintenance noise, better coverage of your highest-risk correctness path.
|
||||
|
||||
---
|
||||
|
||||
If you want, I can also produce a single consolidated patch block that rewrites your plan text end-to-end with these edits applied in-place.
|
||||
2128
docs/plan-expose-discussion-ids.md
Normal file
2128
docs/plan-expose-discussion-ids.md
Normal file
File diff suppressed because it is too large
Load Diff
169
docs/plan-surgical-sync.feedback-3.md
Normal file
169
docs/plan-surgical-sync.feedback-3.md
Normal file
@@ -0,0 +1,169 @@
|
||||
Below are the strongest **new** revisions I’d make (excluding everything in your rejected list), with rationale and plan-level diffs.
|
||||
|
||||
### 1. Add a durable run ledger (`sync_runs`) with phase state
|
||||
This makes surgical sync crash-resumable, auditable, and safer under Ctrl+C. Right now `run_id` is mostly ephemeral; persisting phase state removes ambiguity about what completed.
|
||||
|
||||
```diff
|
||||
@@ Design Constraints
|
||||
+9. **Durable run state**: Surgical sync MUST persist a `sync_runs` row keyed by `run_id`
|
||||
+ with phase transitions (`preflight`, `ingest`, `dependents`, `docs`, `embed`, `done`, `failed`).
|
||||
+ This is required for crash recovery, observability, and deterministic retries.
|
||||
|
||||
@@ Step 9: Create `run_sync_surgical`
|
||||
+Before Stage 0, insert `sync_runs(run_id, project_id, mode='surgical', requested_counts, started_at)`.
|
||||
+After each stage, update `sync_runs.phase`, counters, and `last_error` if present.
|
||||
+On success/failure, set terminal state (`done`/`failed`) and `finished_at`.
|
||||
```
|
||||
|
||||
### 2. Add `--preflight-only` (network validation without writes)
|
||||
`--dry-run` is intentionally zero-network, so it cannot validate IIDs. `--preflight-only` is high-value for agents: verifies existence/permissions quickly with no DB mutation.
|
||||
|
||||
```diff
|
||||
@@ CLI Interface
|
||||
lore sync --dry-run --issue 123 -p myproject
|
||||
+lore sync --preflight-only --issue 123 -p myproject
|
||||
|
||||
@@ Step 2: Add `--issue`, `--mr`, `-p` to `SyncArgs`
|
||||
+ /// Validate remote entities and auth without any DB writes
|
||||
+ #[arg(long, default_value_t = false)]
|
||||
+ pub preflight_only: bool,
|
||||
|
||||
@@ Step 10: Add branch in `run_sync`
|
||||
+if options.preflight_only && options.is_surgical() {
|
||||
+ return run_sync_surgical_preflight_only(config, &options, run_id, signal).await;
|
||||
+}
|
||||
```
|
||||
|
||||
### 3. Preflight should aggregate all missing/failed IIDs, not fail-fast
|
||||
Fail-fast causes repeated reruns. Aggregating errors gives one-shot correction and better robot automation.
|
||||
|
||||
```diff
|
||||
@@ Step 7: Create `src/ingestion/surgical.rs`
|
||||
-/// Returns the fetched payloads. If ANY fetch fails, the entire operation should abort.
|
||||
+/// Returns fetched payloads plus per-IID failures; caller aborts writes if failures exist.
|
||||
pub async fn preflight_fetch(...) -> Result<PreflightResult> {
|
||||
|
||||
@@
|
||||
#[derive(Debug, Default)]
|
||||
pub struct PreflightResult {
|
||||
pub issues: Vec<GitLabIssue>,
|
||||
pub merge_requests: Vec<GitLabMergeRequest>,
|
||||
+ pub failures: Vec<EntityFailure>, // stage="fetch"
|
||||
}
|
||||
|
||||
@@ Step 9: Create `run_sync_surgical`
|
||||
-let preflight = preflight_fetch(...).await?;
|
||||
+let preflight = preflight_fetch(...).await?;
|
||||
+if !preflight.failures.is_empty() {
|
||||
+ result.entity_failures = preflight.failures;
|
||||
+ return Err(LoreError::Other("Surgical preflight failed for one or more IIDs".into()).into());
|
||||
+}
|
||||
```
|
||||
|
||||
### 4. Stop filtering scoped queue drains with raw `json_extract` scans
|
||||
`json_extract(payload_json, '$.scope_run_id')` in hot drain queries will degrade as queue grows. Use indexed scope metadata.
|
||||
|
||||
```diff
|
||||
@@ Step 9b: Implement scoped drain helpers
|
||||
-// claim query adds:
|
||||
-// AND json_extract(payload_json, '$.scope_run_id') = ?
|
||||
+// Add migration:
|
||||
+// 1) Add `scope_run_id` generated/stored column derived from payload_json (or explicit column)
|
||||
+// 2) Create index on (project_id, job_type, scope_run_id, status, id)
|
||||
+// Scoped drains filter by indexed `scope_run_id`, not full-table JSON extraction.
|
||||
```
|
||||
|
||||
### 5. Replace `dirty_source_ids` collection-by-query with explicit run scoping
|
||||
Current approach can accidentally include prior dirty rows for same source and can duplicate work. Tag dirty rows with `origin_run_id` and consume by run.
|
||||
|
||||
```diff
|
||||
@@ Design Constraints
|
||||
-2. **Dirty queue scoping**: ... MUST call ... `run_generate_docs_for_dirty_ids`
|
||||
+2. **Dirty queue scoping**: Surgical sync MUST scope docs by `origin_run_id` on `dirty_sources`
|
||||
+ (or equivalent exact run marker) and MUST NOT drain unrelated dirty rows.
|
||||
|
||||
@@ Step 7: `SurgicalIngestResult`
|
||||
- pub dirty_source_ids: Vec<i64>,
|
||||
+ pub origin_run_id: String,
|
||||
|
||||
@@ Step 9a: Implement `run_generate_docs_for_dirty_ids`
|
||||
-pub fn run_generate_docs_for_dirty_ids(config: &Config, dirty_source_ids: &[i64]) -> Result<...>
|
||||
+pub fn run_generate_docs_for_run_id(config: &Config, run_id: &str) -> Result<...>
|
||||
```
|
||||
|
||||
### 6. Enforce transaction safety at the type boundary
|
||||
`unchecked_transaction()` + `&Connection` signatures is fragile. Accept `&Transaction` for ingest internals and use `TransactionBehavior::Immediate` for deterministic lock behavior.
|
||||
|
||||
```diff
|
||||
@@ Step 7: Create `src/ingestion/surgical.rs`
|
||||
-pub fn ingest_issue_by_iid_from_payload(conn: &Connection, ...)
|
||||
+pub fn ingest_issue_by_iid_from_payload(tx: &rusqlite::Transaction<'_>, ...)
|
||||
|
||||
-pub fn ingest_mr_by_iid_from_payload(conn: &Connection, ...)
|
||||
+pub fn ingest_mr_by_iid_from_payload(tx: &rusqlite::Transaction<'_>, ...)
|
||||
|
||||
-let tx = conn.unchecked_transaction()?;
|
||||
+let tx = conn.transaction_with_behavior(rusqlite::TransactionBehavior::Immediate)?;
|
||||
```
|
||||
|
||||
### 7. Acquire sync lock only for mutation phases, not remote preflight
|
||||
This materially reduces lock contention and keeps normal sync throughput higher, while still guaranteeing mutation serialization.
|
||||
|
||||
```diff
|
||||
@@ Design Constraints
|
||||
+10. **Lock window minimization**: Preflight fetch runs without sync lock; lock is acquired immediately
|
||||
+ before first DB mutation and held through all mutation stages.
|
||||
|
||||
@@ Step 9: Create `run_sync_surgical`
|
||||
-// ── Acquire sync lock ──
|
||||
-...
|
||||
-// ── Stage 0: Preflight fetch ──
|
||||
+// ── Stage 0: Preflight fetch (no lock, no writes) ──
|
||||
...
|
||||
+// ── Acquire sync lock just before Stage 1 mutation ──
|
||||
```
|
||||
|
||||
### 8. Add explicit transient retry policy beyond 429
|
||||
Client already handles rate limits; surgical reliability improves a lot if 5xx/timeouts are retried with bounded backoff.
|
||||
|
||||
```diff
|
||||
@@ Design Constraints
|
||||
+11. **Transient retry policy**: Preflight and dependent remote fetches MUST retry boundedly on
|
||||
+ timeout/5xx with jittered backoff; permanent errors (404/401/403) fail immediately.
|
||||
|
||||
@@ Step 5: Add `get_issue_by_iid` / `get_mr_by_iid`
|
||||
+Document retry behavior for transient transport/server failures.
|
||||
```
|
||||
|
||||
### 9. Tighten automated tests around scoping invariants
|
||||
You already list manual checks; these should be enforced in unit/integration tests to prevent regressions.
|
||||
|
||||
```diff
|
||||
@@ Step 1: TDD — Write Failing Tests First
|
||||
+### 1d. New invariants tests
|
||||
+- `surgical_docs_scope_ignores_preexisting_dirty_rows`
|
||||
+- `scoped_queue_drain_ignores_orphaned_jobs`
|
||||
+- `preflight_aggregates_multiple_missing_iids`
|
||||
+- `preflight_only_performs_zero_writes`
|
||||
+- `dry_run_performs_zero_network_calls`
|
||||
+- `lock_window_does_not_block_during_preflight`
|
||||
|
||||
@@ Acceptance Criteria
|
||||
+32. Scoped queue/docs invariants are covered by automated tests (not manual-only verification).
|
||||
```
|
||||
|
||||
### 10. Make robot-mode surgical output first-class
|
||||
For agent workflows, include full stage telemetry and actionable recovery commands.
|
||||
|
||||
```diff
|
||||
@@ Step 15: Update `SyncResult` for robot mode structured output
|
||||
+ /// Per-stage elapsed ms for deterministic performance tracking
|
||||
+ pub stage_timings_ms: std::collections::BTreeMap<String, u64>,
|
||||
+ /// Suggested recovery commands (robot ergonomics)
|
||||
+ pub recovery_actions: Vec<String>,
|
||||
|
||||
@@ Step 14: Update `robot-docs` manifest
|
||||
+Document surgical-specific error codes and `actions` schema for automated recovery.
|
||||
```
|
||||
|
||||
If you want, I can now produce a fully rewritten **iteration 3** plan that merges these into your current structure end-to-end.
|
||||
212
docs/plan-surgical-sync.feedback-4.md
Normal file
212
docs/plan-surgical-sync.feedback-4.md
Normal file
@@ -0,0 +1,212 @@
|
||||
1. **Resolve the current contract contradictions (`preflight-only`, `dry-run`, `sync_runs`)**
|
||||
|
||||
Why this improves the plan:
|
||||
- Right now constraints conflict: “zero DB writes before commit” vs inserting `sync_runs` during preflight.
|
||||
- This ambiguity will cause implementation drift and flaky acceptance tests.
|
||||
- Splitting control-plane writes from content-plane writes keeps safety guarantees strict while preserving observability.
|
||||
|
||||
```diff
|
||||
@@ ## Design Constraints
|
||||
-6. **Preflight-then-commit**: All remote fetches happen BEFORE any DB writes. If any IID fetch fails (404, network error), the entire operation aborts with zero DB mutations.
|
||||
+6. **Preflight-then-commit (content-plane)**: All remote fetches happen BEFORE any writes to content tables (`issues`, `merge_requests`, `discussions`, `resource_events`, `documents`, `embeddings`).
|
||||
+7. **Control-plane exception**: `sync_runs` / `sync_run_entities` writes are allowed during preflight for observability and crash diagnostics.
|
||||
@@
|
||||
-11. **Preflight-only mode**: `--preflight-only` validates remote entity existence and permissions with zero DB writes.
|
||||
+11. **Preflight-only mode**: `--preflight-only` performs zero content writes; control-plane run-ledger writes are allowed.
|
||||
@@ ### For me to evaluate (functional):
|
||||
-24. **Preflight-only mode** ... no DB mutations beyond the sync_runs ledger entry
|
||||
+24. **Preflight-only mode** ... no content DB mutations; only run-ledger rows may be written
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
2. **Add stale-write protection to avoid TOCTOU regressions during unlocked preflight**
|
||||
|
||||
Why this improves the plan:
|
||||
- You intentionally preflight without lock; that’s good for throughput but introduces race risk.
|
||||
- Without a guard, a slower surgical run can overwrite newer data ingested by a concurrent normal sync.
|
||||
- This is a correctness bug under contention, not a nice-to-have.
|
||||
|
||||
```diff
|
||||
@@ ## Design Constraints
|
||||
+12. **Stale-write protection**: Surgical ingest MUST NOT overwrite fresher local rows. If local `updated_at` is newer than the preflight payload’s `updated_at`, skip that entity and record `skipped_stale`.
|
||||
@@ ## Step 7: Create `src/ingestion/surgical.rs`
|
||||
- let labels_created = process_single_issue(conn, config, project_id, issue)?;
|
||||
+ // Skip stale payloads to avoid TOCTOU overwrite after unlocked preflight.
|
||||
+ if is_local_newer_issue(conn, project_id, issue.iid, issue.updated_at)? {
|
||||
+ result.skipped_stale += 1;
|
||||
+ return Ok(result);
|
||||
+ }
|
||||
+ let labels_created = process_single_issue(conn, config, project_id, issue)?;
|
||||
@@
|
||||
+// same guard for MR path
|
||||
@@ ## Step 15: Update `SyncResult`
|
||||
+ /// Entities skipped because local row was newer than preflight payload
|
||||
+ pub skipped_stale: usize,
|
||||
@@ ### Edge cases to verify:
|
||||
+38. **TOCTOU safety**: if a normal sync updates entity after preflight but before ingest, surgical run skips stale payload (no overwrite)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
3. **Make dirty-source scoping exact (do not capture pre-existing rows for same entity)**
|
||||
|
||||
Why this improves the plan:
|
||||
- Current “query dirty rows by `source_id` after ingest” can accidentally include older dirty rows for the same entity.
|
||||
- That silently violates strict run scoping and can delete unrelated backlog rows.
|
||||
- You can fix this without adding `origin_run_id` to `dirty_sources` (which you already rejected).
|
||||
|
||||
```diff
|
||||
@@ ## Step 7: Create `src/ingestion/surgical.rs`
|
||||
- // Collect dirty_source rows for this entity
|
||||
- let mut stmt = conn.prepare(
|
||||
- "SELECT id FROM dirty_sources WHERE source_type = 'issue' AND source_id = ?1"
|
||||
- )?;
|
||||
+ // Capture only rows inserted by THIS call using high-water mark.
|
||||
+ let before_dirty_id: i64 = conn.query_row(
|
||||
+ "SELECT COALESCE(MAX(id), 0) FROM dirty_sources",
|
||||
+ [], |r| r.get(0),
|
||||
+ )?;
|
||||
+ // ... call process_single_issue ...
|
||||
+ let mut stmt = conn.prepare(
|
||||
+ "SELECT id FROM dirty_sources
|
||||
+ WHERE id > ?1 AND source_type = 'issue' AND source_id = ?2"
|
||||
+ )?;
|
||||
@@
|
||||
+ // same pattern for MR
|
||||
@@ ### 1d. Scoping invariant tests
|
||||
+#[test]
|
||||
+fn surgical_docs_scope_ignores_preexisting_dirty_rows_for_same_entity() {
|
||||
+ // pre-insert dirty row for iid=7, then surgical ingest iid=7
|
||||
+ // assert result.dirty_source_ids only contains newly inserted rows
|
||||
+}
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
4. **Fix embed-stage leakage when `--no-docs` is used in surgical mode**
|
||||
|
||||
Why this improves the plan:
|
||||
- Current design can run global embed even when docs stage is skipped, which may embed unrelated backlog docs.
|
||||
- That breaks the surgical “scope only this run” promise.
|
||||
- This is both correctness and operator-trust critical.
|
||||
|
||||
```diff
|
||||
@@ ## Step 9: Create `run_sync_surgical`
|
||||
- if !options.no_embed {
|
||||
+ // Surgical embed only runs when surgical docs actually regenerated docs in this run.
|
||||
+ if !options.no_embed && !options.no_docs && result.documents_regenerated > 0 {
|
||||
@@ ## Step 4: Wire new fields in `handle_sync_cmd`
|
||||
+ if options.is_surgical() && options.no_docs && !options.no_embed {
|
||||
+ return Err(Box::new(LoreError::Other(
|
||||
+ "In surgical mode, --no-docs requires --no-embed (to preserve scoping guarantees)".to_string()
|
||||
+ )));
|
||||
+ }
|
||||
@@ ### For me to evaluate
|
||||
+39. **No embed leakage**: `sync --issue X --no-docs` never embeds unrelated unembedded docs
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
5. **Add queue-failure hygiene so scoped jobs do not leak forever**
|
||||
|
||||
Why this improves the plan:
|
||||
- Scoped drains prevent accidental processing, but failed runs can strand pending jobs permanently.
|
||||
- You need explicit terminalization (`aborted`) and optional replay mechanics.
|
||||
- Otherwise queue bloat and confusing diagnostics accumulate.
|
||||
|
||||
```diff
|
||||
@@ ## Step 8a: Add `sync_runs` table migration
|
||||
+ALTER TABLE dependent_queue ADD COLUMN aborted_reason TEXT;
|
||||
+-- status domain now includes: pending, claimed, done, failed, aborted
|
||||
@@ ## Step 9: run_sync_surgical failure paths
|
||||
+// On run failure/cancel:
|
||||
+conn.execute(
|
||||
+ "UPDATE dependent_queue
|
||||
+ SET status='aborted', aborted_reason=?1
|
||||
+ WHERE project_id=?2 AND scope_run_id=?3 AND status='pending'",
|
||||
+ rusqlite::params![failure_summary, project_id, run_id],
|
||||
+)?;
|
||||
@@ ## Acceptance Criteria
|
||||
+40. **No stranded scoped jobs**: failed surgical runs leave no `pending` rows for their `scope_run_id`
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
6. **Persist per-entity lifecycle (`sync_run_entities`) for real observability and deterministic retry**
|
||||
|
||||
Why this improves the plan:
|
||||
- `sync_runs` alone gives aggregate counters but not which IID failed at which stage.
|
||||
- Per-entity records make retries deterministic and robot output far more useful.
|
||||
- This is the missing piece for your stated “deterministic retry decisions.”
|
||||
|
||||
```diff
|
||||
@@ ## Step 8a: Add `sync_runs` table migration
|
||||
+CREATE TABLE IF NOT EXISTS sync_run_entities (
|
||||
+ id INTEGER PRIMARY KEY,
|
||||
+ run_id TEXT NOT NULL REFERENCES sync_runs(run_id),
|
||||
+ entity_type TEXT NOT NULL CHECK(entity_type IN ('issue','merge_request')),
|
||||
+ iid INTEGER NOT NULL,
|
||||
+ stage TEXT NOT NULL,
|
||||
+ status TEXT NOT NULL CHECK(status IN ('ok','failed','skipped_stale')),
|
||||
+ error_code TEXT,
|
||||
+ error_message TEXT,
|
||||
+ updated_at INTEGER NOT NULL
|
||||
+);
|
||||
+CREATE INDEX IF NOT EXISTS idx_sync_run_entities_run ON sync_run_entities(run_id, entity_type, iid);
|
||||
@@ ## Step 15: Update `SyncResult`
|
||||
+ pub failed_iids: Vec<(String, u64)>,
|
||||
+ pub skipped_stale_iids: Vec<(String, u64)>,
|
||||
@@ ## CLI Interface
|
||||
+lore --robot sync-runs --run-id <id>
|
||||
+lore --robot sync-runs --run-id <id> --retry-failed
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
7. **Use explicit error type for surgical preflight failures (not `LoreError::Other`)**
|
||||
|
||||
Why this improves the plan:
|
||||
- `Other(String)` loses machine semantics, weakens robot mode, and leads to bad exit-code behavior.
|
||||
- A typed error preserves structured failures and enables actionable recovery commands.
|
||||
|
||||
```diff
|
||||
@@ ## Step 9: run_sync_surgical
|
||||
- return Err(LoreError::Other(
|
||||
- format!("Surgical preflight failed for {} of {} IIDs: {}", ...)
|
||||
- ).into());
|
||||
+ return Err(LoreError::SurgicalPreflightFailed {
|
||||
+ run_id: run_id.to_string(),
|
||||
+ total: total_items,
|
||||
+ failures: preflight.failures.clone(),
|
||||
+ }.into());
|
||||
@@ ## Step 15: Update `SyncResult`
|
||||
+ /// Machine-actionable error summary for robot mode
|
||||
+ pub error_code: Option<String>,
|
||||
@@ ## Acceptance Criteria
|
||||
+41. **Typed failure**: preflight failures serialize structured errors (not generic `Other`) with machine-usable codes/actions
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
8. **Strengthen tests for rollback, contention, and stale-skip guarantees**
|
||||
|
||||
Why this improves the plan:
|
||||
- Current tests cover many happy-paths and scoping invariants, but key race/rollback behaviors are still under-tested.
|
||||
- These are exactly where regressions will appear first in production.
|
||||
|
||||
```diff
|
||||
@@ ## Step 1: TDD — Write Failing Tests First
|
||||
+### 1f. Transactional rollback + TOCTOU tests
|
||||
+1. `preflight_success_then_ingest_failure_rolls_back_all_content_writes`
|
||||
+2. `stale_payload_is_skipped_when_local_updated_at_is_newer`
|
||||
+3. `failed_run_aborts_pending_scoped_jobs`
|
||||
+4. `surgical_no_docs_requires_no_embed`
|
||||
@@ ### Automated scoping invariants
|
||||
-38. **Scoped queue/docs invariants are enforced by automated tests**
|
||||
+42. **Rollback and race invariants are enforced by automated tests** (no partial writes on ingest failure, no stale overwrite)
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
These eight revisions keep your core approach intact, avoid your explicitly rejected ideas, and close the biggest correctness/operability gaps before implementation.
|
||||
130
docs/plan-surgical-sync.feedback-5.md
Normal file
130
docs/plan-surgical-sync.feedback-5.md
Normal file
@@ -0,0 +1,130 @@
|
||||
**Critical Gaps In Current Plan**
|
||||
1. `dirty_sources` scoping is based on `id`, but `dirty_sources` has no `id` column and uses `(source_type, source_id)` UPSERT semantics.
|
||||
2. Plan assumes a new `dependent_queue` with `status`, but current code uses `pending_dependent_fetches` (delete-on-complete), so queue-scoping design conflicts with existing invariants.
|
||||
3. Constraint 6 says all remote fetches happen before any content writes, but the proposed surgical flow fetches discussions/events/diffs after ingest writes.
|
||||
4. `sync_runs` is already an existing table and already used by `SyncRunRecorder`; the plan currently treats it like a new table.
|
||||
|
||||
**Best Revisions**
|
||||
|
||||
1. **Fix dirty-source scoping to match real schema (queued-at watermark, not `id` high-water).**
|
||||
Why this is better: This removes a correctness bug and makes same-entity re-ingest deterministic under UPSERT behavior.
|
||||
|
||||
```diff
|
||||
@@ Design Constraints
|
||||
-2. Dirty queue scoping: ... capture MAX(id) FROM dirty_sources ... run_generate_docs_for_dirty_ids ...
|
||||
+2. Dirty queue scoping: `dirty_sources` is keyed by `(source_type, source_id)` and updated via UPSERT.
|
||||
+ Surgical scoping MUST use:
|
||||
+ 1) a run-level `run_dirty_floor_ms` captured before surgical ingest, and
|
||||
+ 2) explicit touched source keys from ingest (`(source_type, source_id)`).
|
||||
+ Surgical docs MUST call a scoped API (e.g. `run_generate_docs_for_sources`) and MUST NOT drain global dirty queue.
|
||||
@@ Step 9a
|
||||
-pub fn run_generate_docs_for_dirty_ids(config: &Config, dirty_source_ids: &[i64]) -> Result<GenerateDocsResult>
|
||||
+pub fn run_generate_docs_for_sources(config: &Config, sources: &[(SourceType, i64)]) -> Result<GenerateDocsResult>
|
||||
```
|
||||
|
||||
2. **Bypass shared dependent queue in surgical mode; run dependents inline per target.**
|
||||
Why this is better: Avoids queue migration churn, avoids run-scope conflicts with existing unique constraints, and removes orphan-job hygiene complexity entirely.
|
||||
|
||||
```diff
|
||||
@@ Design Constraints
|
||||
-4. Dependent queue scoping: ... scope_run_id indexed column on dependent_queue ...
|
||||
+4. Surgical dependent execution: surgical mode MUST bypass `pending_dependent_fetches`.
|
||||
+ Dependents (resource_events, mr_closes_issues, mr_diffs) run inline for targeted entities only.
|
||||
+ Global queue remains for normal sync only.
|
||||
@@ Design Constraints
|
||||
-14. Queue failure hygiene: ... pending scoped jobs ... terminalized to aborted ...
|
||||
+14. Surgical failure hygiene: surgical mode MUST leave no queue artifacts because it does not enqueue dependent jobs.
|
||||
@@ Step 9b / 9c / Step 13
|
||||
-Implement scoped drain helpers and enqueue_job scope_run_id plumbing
|
||||
+Replace with direct per-entity helpers in ingestion layer:
|
||||
+ - sync_issue_resource_events_direct(...)
|
||||
+ - sync_mr_resource_events_direct(...)
|
||||
+ - sync_mr_closes_issues_direct(...)
|
||||
+ - sync_mr_diffs_direct(...)
|
||||
```
|
||||
|
||||
3. **Clarify atomicity contract to “primary-entity atomicity” (remove contradiction).**
|
||||
Why this is better: Keeps strong zero-write guarantees for missing IIDs while matching practical staged pipeline behavior.
|
||||
|
||||
```diff
|
||||
@@ Design Constraints
|
||||
-6. Preflight-then-commit (content-plane): All remote fetches happen BEFORE any writes to content tables ...
|
||||
+6. Primary-entity atomicity: all requested issue/MR payload fetches complete before first content write.
|
||||
+ If any primary IID fetch fails, primary ingest does zero content writes.
|
||||
+ Dependent stages (discussions/events/diffs/closes) are post-ingest and best-effort, with structured per-stage failure reporting.
|
||||
```
|
||||
|
||||
4. **Extend existing `sync_runs` schema instead of redefining it.**
|
||||
Why this is better: Preserves compatibility with current `SyncRunRecorder`, `sync_status`, and existing historical data.
|
||||
|
||||
```diff
|
||||
@@ Step 8a
|
||||
-Add `sync_runs` table migration (CREATE TABLE sync_runs ...)
|
||||
+Add migration 027 to extend existing `sync_runs` table:
|
||||
+ - ADD COLUMN mode TEXT NULL -- 'standard' | 'surgical'
|
||||
+ - ADD COLUMN phase TEXT NULL -- preflight|ingest|dependents|docs|embed|done|failed
|
||||
+ - ADD COLUMN surgical_summary_json TEXT NULL
|
||||
+Reuse `SyncRunRecorder` row lifecycle; do not introduce a parallel run-ledger model.
|
||||
```
|
||||
|
||||
5. **Strengthen TOCTOU stale protection for equal timestamps.**
|
||||
Why this is better: Prevents regressions when `updated_at` is equal but a fresher local fetch already happened.
|
||||
|
||||
```diff
|
||||
@@ Design Constraints
|
||||
-13. ... If local `updated_at` is newer than preflight payload `updated_at`, skip ...
|
||||
+13. ... Skip stale when:
|
||||
+ a) local.updated_at > payload.updated_at, OR
|
||||
+ b) local.updated_at == payload.updated_at AND local.last_seen_at > preflight_started_at_ms.
|
||||
+ This prevents equal-timestamp regressions under concurrent sync.
|
||||
@@ Step 1f tests
|
||||
+Add test: `equal_updated_at_but_newer_last_seen_is_skipped`.
|
||||
```
|
||||
|
||||
6. **Shrink lock window further: release `sync` lock before embed; use dedicated embed lock.**
|
||||
Why this is better: Prevents long embedding from blocking unrelated syncs and avoids concurrent embed writers.
|
||||
|
||||
```diff
|
||||
@@ Design Constraints
|
||||
-11. Lock ... held through all mutation stages.
|
||||
+11. Lock ... held through ingest/dependents/docs only.
|
||||
+ Release `AppLock("sync")` before embed.
|
||||
+ Embed stage uses `AppLock("embed")` for single-flight embedding writes.
|
||||
@@ Step 9
|
||||
-Embed runs inside the same sync lock window
|
||||
+Embed runs after sync lock release, under dedicated embed lock
|
||||
```
|
||||
|
||||
7. **Add the missing `sync-runs` robot read path (the plan references it but doesn’t define it).**
|
||||
Why this is better: Makes durable run-state actually useful for recovery automation and observability.
|
||||
|
||||
```diff
|
||||
@@ Step 14 (new)
|
||||
+## Step 14a: Add `sync-runs` read command
|
||||
+
|
||||
+CLI:
|
||||
+ lore --robot sync-runs --limit 20
|
||||
+ lore --robot sync-runs --run-id <id>
|
||||
+ lore --robot sync-runs --state failed
|
||||
+
|
||||
+Robot response fields:
|
||||
+ run_id, mode, phase, status, started_at, finished_at, counters, failures, suggested_retry_command
|
||||
```
|
||||
|
||||
8. **Add URL-native surgical targets (`--issue-url`, `--mr-url`) with project inference.**
|
||||
Why this is better: Much more agent-friendly and reduces project-resolution errors from copy/paste workflows.
|
||||
|
||||
```diff
|
||||
@@ CLI Interface
|
||||
lore sync --issue 123 --issue 456 -p myproject
|
||||
+lore sync --issue-url https://gitlab.example.com/group/proj/-/issues/123
|
||||
+lore sync --mr-url https://gitlab.example.com/group/proj/-/merge_requests/789
|
||||
@@ Step 2
|
||||
+Add repeatable flags:
|
||||
+ --issue-url <url>
|
||||
+ --mr-url <url>
|
||||
+Parse URL into (project_path, iid). If all targets are URL-derived and same project, `-p` is optional.
|
||||
+If mixed projects are provided in one command, reject with clear error.
|
||||
```
|
||||
|
||||
If you want, I can produce a single consolidated patched version of your plan (iteration 5 draft) with these revisions already merged.
|
||||
152
docs/plan-surgical-sync.feedback-6.md
Normal file
152
docs/plan-surgical-sync.feedback-6.md
Normal file
@@ -0,0 +1,152 @@
|
||||
Highest-impact revisions after reviewing your v5 plan:
|
||||
|
||||
1. **Fix a real scoping hole: embed can still process unrelated docs**
|
||||
Rationale: Current plan assumes scoped docs implies scoped embed, but that only holds while no other run creates unembedded docs. You explicitly release sync lock before embed, so another sync can enqueue/regenerate docs in between, and `run_embed` may embed unrelated backlog. This breaks surgical isolation and can hide backlog debt.
|
||||
```diff
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@ Design Constraints
|
||||
-3. Embed scoping: Embedding runs only for documents regenerated by this surgical run. Because `run_embed` processes only unembedded docs, scoping is automatic IF docs are scoped correctly...
|
||||
+3. Embed scoping: Embedding MUST be explicitly scoped to documents regenerated by this surgical run.
|
||||
+ `run_generate_docs_for_sources` returns regenerated `document_ids`; surgical mode calls
|
||||
+ `run_embed_for_document_ids(document_ids)` and never global `run_embed`.
|
||||
+ This remains true even after lock release and under concurrent normal sync activity.
|
||||
@@ Step 9a: Implement `run_generate_docs_for_sources`
|
||||
-pub fn run_generate_docs_for_sources(...) -> Result<GenerateDocsResult> {
|
||||
+pub fn run_generate_docs_for_sources(...) -> Result<GenerateDocsResult> {
|
||||
+ // Return regenerated document IDs for scoped embedding.
|
||||
+ // GenerateDocsResult { regenerated, errored, regenerated_document_ids: Vec<i64> }
|
||||
@@ Step 9: Embed stage
|
||||
- match run_embed(config, false, false, None, signal).await {
|
||||
+ match run_embed_for_document_ids(config, &result.regenerated_document_ids, signal).await {
|
||||
```
|
||||
|
||||
2. **Make run-ledger lifecycle actually durable (and consistent with your own constraint 10)**
|
||||
Rationale: Plan text says “reuse `SyncRunRecorder`”, but Step 9 writes raw SQL directly. That creates lifecycle drift, missing heartbeats, and inconsistent failure handling as code evolves.
|
||||
```diff
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@ Design Constraints
|
||||
-10. Durable run state: ... Reuses `SyncRunRecorder` row lifecycle ...
|
||||
+10. Durable run state: surgical sync MUST use `SyncRunRecorder` end-to-end (no ad-hoc SQL updates).
|
||||
+ Add recorder APIs for `set_mode`, `set_phase`, `set_counters`, `finish_succeeded`,
|
||||
+ `finish_failed`, `finish_cancelled`, and periodic `heartbeat`.
|
||||
@@ Step 9: Create `run_sync_surgical`
|
||||
- conn.execute("INSERT INTO sync_runs ...")
|
||||
- conn.execute("UPDATE sync_runs SET phase = ...")
|
||||
+ let mut recorder = SyncRunRecorder::start_surgical(...)?;
|
||||
+ recorder.set_phase("preflight")?;
|
||||
+ recorder.heartbeat_if_due()?;
|
||||
+ recorder.set_phase("ingest")?;
|
||||
+ ...
|
||||
+ recorder.finish_succeeded_with_warnings(...)?;
|
||||
```
|
||||
|
||||
3. **Add explicit `cancelled` terminal state**
|
||||
Rationale: Current early cancellation branches return `Ok(result)` without guaranteed run-row finalization. That leaves misleading `running` rows and weak crash diagnostics.
|
||||
```diff
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@ Design Constraints
|
||||
+15. Cancellation semantics: If shutdown is observed after run start, phase is set to `cancelled`,
|
||||
+ status is `cancelled`, `finished_at` is written, and lock is released before return.
|
||||
@@ Step 8a migration
|
||||
+ALTER TABLE sync_runs ADD COLUMN warnings_count INTEGER NOT NULL DEFAULT 0;
|
||||
+ALTER TABLE sync_runs ADD COLUMN cancelled_at INTEGER;
|
||||
@@ Acceptance Criteria
|
||||
+47. Cancellation durability: Ctrl+C during surgical sync records `status='cancelled'`,
|
||||
+ `phase='cancelled'`, and `finished_at` in `sync_runs`.
|
||||
```
|
||||
|
||||
4. **Reduce lock contention further by separating dependent fetch and dependent write**
|
||||
Rationale: You currently hold lock through network-heavy dependent stages. That maximizes contention and increases lock timeout risk. Better: fetch dependents unlocked, write in short locked transactions with per-entity freshness guards.
|
||||
```diff
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@ Design Constraints
|
||||
-11. Lock window minimization: ... held through ingest, dependents, and docs stages.
|
||||
+11. Lock window minimization: lock is held only for DB mutation windows.
|
||||
+ Dependents run in two phases:
|
||||
+ (a) fetch from GitLab without lock,
|
||||
+ (b) write results under lock in short transactions.
|
||||
+ Apply per-entity freshness checks before dependent writes.
|
||||
@@ Step 9: Dependent stages
|
||||
- // All dependents run INLINE per-entity ... while lock is held
|
||||
+ // Dependents fetch outside lock, then write under lock with CAS-style watermark guards.
|
||||
```
|
||||
|
||||
5. **Introduce stage timeout budgets to prevent hung surgical runs**
|
||||
Rationale: A single slow GitLab endpoint can stall the whole run and hold resources too long. Timeout budgets plus per-entity failure recording keep the run bounded and predictable.
|
||||
```diff
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@ Design Constraints
|
||||
+16. Stage timeout budgets: each dependent fetch has a per-entity timeout and a global stage budget.
|
||||
+ Timed-out entities are recorded in `entity_failures` with code `TIMEOUT` and run continues best-effort.
|
||||
@@ Step 9 notes
|
||||
+ - Wrap dependent network calls with `tokio::time::timeout`.
|
||||
+ - Add config knobs:
|
||||
+ `sync.surgical_entity_timeout_seconds` (default 20),
|
||||
+ `sync.surgical_dependents_budget_seconds` (default 120).
|
||||
```
|
||||
|
||||
6. **Add payload integrity checks (project mismatch hard-fail)**
|
||||
Rationale: Surgical mode is precision tooling. If API/proxy misconfiguration returns payloads from wrong project, you should fail preflight loudly, not trust downstream assumptions.
|
||||
```diff
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@ Step 7: preflight_fetch
|
||||
+ // Integrity check: payload.project_id must equal requested gitlab_project_id.
|
||||
+ // On mismatch, record EntityFailure { code: "PROJECT_MISMATCH", stage: "fetch" }.
|
||||
@@ Step 9d: error codes
|
||||
+PROJECT_MISMATCH -> usage/config data integrity failure (typed, machine-readable)
|
||||
@@ Acceptance Criteria
|
||||
+48. Project integrity: payloads with unexpected `project_id` are rejected in preflight
|
||||
+ and produce zero content writes.
|
||||
```
|
||||
|
||||
7. **Upgrade robot output from aggregate-only to per-entity lifecycle**
|
||||
Rationale: `entity_failures` alone is not enough for robust automation. Agents need a complete entity outcome map (fetched, ingested, stale-skipped, dependent failures) to retry deterministically.
|
||||
```diff
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@ Step 15: Update `SyncResult`
|
||||
+pub struct EntityOutcome {
|
||||
+ pub entity_type: String,
|
||||
+ pub iid: u64,
|
||||
+ pub fetched: bool,
|
||||
+ pub ingested: bool,
|
||||
+ pub stale_skipped: bool,
|
||||
+ pub dependent_failures: Vec<EntityFailure>,
|
||||
+}
|
||||
@@
|
||||
+pub entity_outcomes: Vec<EntityOutcome>,
|
||||
+pub completion_status: String, // succeeded | succeeded_with_warnings | failed | cancelled
|
||||
@@ Robot mode
|
||||
- enables agents to detect partial failures via `entity_failures`
|
||||
+ enables deterministic, per-IID retry and richer UI messaging.
|
||||
```
|
||||
|
||||
8. **Index `sync_runs` for real observability at scale**
|
||||
Rationale: You’re adding mode/phase/counters and then querying recent surgical runs. Without indexes, this degrades as run history grows.
|
||||
```diff
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@ Step 8a migration
|
||||
+CREATE INDEX IF NOT EXISTS idx_sync_runs_mode_started
|
||||
+ ON sync_runs(mode, started_at DESC);
|
||||
+CREATE INDEX IF NOT EXISTS idx_sync_runs_status_phase_started
|
||||
+ ON sync_runs(status, phase, started_at DESC);
|
||||
```
|
||||
|
||||
9. **Add tests specifically for the new failure-prone paths**
|
||||
Rationale: Current tests are strong on ingest and scoping, but still miss new high-risk runtime behavior (cancel state, timeout handling, scoped embed under concurrency).
|
||||
```diff
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@ Step 1f tests
|
||||
+#[tokio::test]
|
||||
+async fn cancellation_marks_sync_run_cancelled() { ... }
|
||||
+
|
||||
+#[tokio::test]
|
||||
+async fn dependent_timeout_records_entity_failure_and_continues() { ... }
|
||||
+
|
||||
+#[tokio::test]
|
||||
+async fn scoped_embed_does_not_embed_unrelated_docs_created_after_docs_stage() { ... }
|
||||
@@ Acceptance Criteria
|
||||
+49. Scoped embed isolation under concurrency is verified by automated test.
|
||||
+50. Timeout path is verified (TIMEOUT code + continued processing).
|
||||
```
|
||||
|
||||
These revisions keep your core direction intact, avoid every rejected recommendation, and materially improve correctness under concurrency, operational observability, and agent automation quality.
|
||||
2240
docs/plan-surgical-sync.md
Normal file
2240
docs/plan-surgical-sync.md
Normal file
File diff suppressed because it is too large
Load Diff
@@ -1,174 +0,0 @@
|
||||
Highest-impact gaps I see in the current plan:
|
||||
|
||||
1. `for-issue` / `for-mr` filtering is ambiguous across projects and can return incorrect rows.
|
||||
2. `lore notes` has no pagination contract, so large exports and deterministic resumption are weak.
|
||||
3. Migration `022` is high-risk (table rebuild + FTS + junction tables) without explicit integrity gates.
|
||||
4. Note-doc freshness is incomplete for upstream note deletions and parent metadata changes (labels/title).
|
||||
|
||||
Below are my best revisions, each with rationale and a git-diff-style plan edit.
|
||||
|
||||
---
|
||||
|
||||
1. **Add gated rollout + rollback controls**
|
||||
Rationale: You can still “ship together” while reducing blast radius. This makes recovery fast if note-doc generation causes DB/embedding pressure.
|
||||
|
||||
```diff
|
||||
@@ ## Design
|
||||
-Two phases, shipped together as one feature:
|
||||
+Two phases, shipped together as one feature, but with runtime gates:
|
||||
+
|
||||
+- `feature.notes_cli` (Phase 1 surface)
|
||||
+- `feature.note_documents` (Phase 2 indexing/extraction path)
|
||||
+
|
||||
+Rollout order:
|
||||
+1) Enable `notes_cli`
|
||||
+2) Run note-doc backfill in bounded batches
|
||||
+3) Enable `note_documents` for continuous updates
|
||||
+
|
||||
+Rollback:
|
||||
+- Disabling `feature.note_documents` stops new note-doc generation without affecting issue/MR/discussion docs.
|
||||
```
|
||||
|
||||
2. **Add keyset pagination + deterministic ordering**
|
||||
Rationale: Needed for year-long reviewer analysis and reliable “continue where I left off” behavior under concurrent updates.
|
||||
|
||||
```diff
|
||||
@@ pub struct NoteListFilters<'a> {
|
||||
pub limit: usize,
|
||||
+ pub cursor: Option<&'a str>, // keyset token "<sort_ms>:<id>"
|
||||
+ pub include_total_count: bool, // avoid COUNT(*) in hot paths
|
||||
@@
|
||||
- pub sort: &'a str, // "created" (default) | "updated"
|
||||
+ pub sort: &'a str, // "created" | "updated"
|
||||
@@ query_notes SQL
|
||||
-ORDER BY {sort_column} {order}
|
||||
+ORDER BY {sort_column} {order}, n.id {order}
|
||||
LIMIT ?
|
||||
```
|
||||
|
||||
3. **Make `for-issue` / `for-mr` project-scoped**
|
||||
Rationale: IIDs are not globally unique. Requiring project avoids false positives and hard-to-debug cross-project leakage.
|
||||
|
||||
```diff
|
||||
@@ pub struct NotesArgs {
|
||||
- #[arg(long = "for-issue", help_heading = "Filters", conflicts_with = "for_mr")]
|
||||
+ #[arg(long = "for-issue", help_heading = "Filters", conflicts_with = "for_mr", requires = "project")]
|
||||
pub for_issue: Option<i64>,
|
||||
@@
|
||||
- #[arg(long = "for-mr", help_heading = "Filters", conflicts_with = "for_issue")]
|
||||
+ #[arg(long = "for-mr", help_heading = "Filters", conflicts_with = "for_issue", requires = "project")]
|
||||
pub for_mr: Option<i64>,
|
||||
```
|
||||
|
||||
4. **Upgrade path filtering semantics**
|
||||
Rationale: Review comments often reference renames/moves. Restricting to `position_new_path` misses relevant notes.
|
||||
|
||||
```diff
|
||||
@@ pub struct NotesArgs {
|
||||
- /// Filter by file path (trailing / for prefix match)
|
||||
+ /// Filter by file path
|
||||
#[arg(long, help_heading = "Filters")]
|
||||
pub path: Option<String>,
|
||||
+ /// Path mode: exact|prefix|glob
|
||||
+ #[arg(long = "path-mode", value_parser = ["exact","prefix","glob"], default_value = "exact", help_heading = "Filters")]
|
||||
+ pub path_mode: String,
|
||||
+ /// Match against old path as well as new path
|
||||
+ #[arg(long = "match-old-path", help_heading = "Filters")]
|
||||
+ pub match_old_path: bool,
|
||||
@@ query_notes filter mappings
|
||||
-- `path` ... n.position_new_path ...
|
||||
+- `path` applies to `n.position_new_path` and optionally `n.position_old_path`.
|
||||
+- `glob` mode translates `*`/`?` to SQL LIKE with escaping.
|
||||
```
|
||||
|
||||
5. **Add explicit performance indexes (new migration)**
|
||||
Rationale: `notes` becomes a first-class query surface; without indexes, filters degrade quickly at 10k+ note scale.
|
||||
|
||||
```diff
|
||||
@@ ## Phase 1: `lore notes` Command
|
||||
+### Work Chunk 1E: Query Performance Indexes
|
||||
+**Files:** `migrations/023_notes_query_indexes.sql`, `src/core/db.rs`
|
||||
+
|
||||
+Add indexes:
|
||||
+- `notes(project_id, created_at DESC, id DESC)`
|
||||
+- `notes(author_username, created_at DESC, id DESC) WHERE is_system = 0`
|
||||
+- `notes(discussion_id)`
|
||||
+- `notes(position_new_path)`
|
||||
+- `notes(position_old_path)`
|
||||
+- `discussions(issue_id)`
|
||||
+- `discussions(merge_request_id)`
|
||||
```
|
||||
|
||||
6. **Harden migration 022 with transactional integrity checks**
|
||||
Rationale: This is the riskiest part of the plan. Add hard fail-fast checks so corruption cannot silently pass.
|
||||
|
||||
```diff
|
||||
@@ ### Work Chunk 2A: Schema Migration (022)
|
||||
+Migration safety requirements:
|
||||
+- Execute in a single `BEGIN IMMEDIATE ... COMMIT` transaction.
|
||||
+- Capture and compare pre/post row counts for `documents`, `document_labels`, `document_paths`, `dirty_sources`.
|
||||
+- Run `PRAGMA foreign_key_check` and abort on any violation.
|
||||
+- Run `PRAGMA integrity_check` and abort on non-`ok`.
|
||||
+- Rebuild FTS and assert `documents_fts` rowcount equals `documents` rowcount.
|
||||
```
|
||||
|
||||
7. **Add note deletion + parent-change propagation**
|
||||
Rationale: Current plan handles create/update ingestion but not all staleness paths. Without this, note documents drift.
|
||||
|
||||
```diff
|
||||
@@ ## Phase 2: Per-Note Documents
|
||||
+### Work Chunk 2G: Freshness Propagation
|
||||
+**Files:** `src/ingestion/discussions.rs`, `src/ingestion/mr_discussions.rs`, `src/documents/regenerator.rs`
|
||||
+
|
||||
+Rules:
|
||||
+- If a previously stored note is missing from upstream payload, delete local note row and enqueue `(note, id)` for document deletion.
|
||||
+- When parent issue/MR title or labels change, enqueue descendant note docs dirty (notes inherit parent metadata).
|
||||
+- Keep idempotent behavior for repeated syncs.
|
||||
```
|
||||
|
||||
8. **Separate FTS coverage from embedding coverage**
|
||||
Rationale: Biggest cost/perf risk is embeddings. Index all notes in FTS, but embed selectively with policy knobs.
|
||||
|
||||
```diff
|
||||
@@ ## Estimated Document Volume Impact
|
||||
-FTS5 handles this comfortably. Embedding generation time scales linearly (~4x increase).
|
||||
+FTS5 handles this comfortably. Embedding generation is policy-controlled:
|
||||
+- FTS: index all non-system note docs
|
||||
+- Embeddings default: only notes with body length >= 40 chars (configurable)
|
||||
+- Add config: `documents.note_embeddings.min_chars`, `documents.note_embeddings.enabled`
|
||||
+- Prioritize unresolved DiffNotes before other notes during embedding backfill
|
||||
```
|
||||
|
||||
9. **Bring structured reviewer profiling into scope (not narrative reporting)**
|
||||
Rationale: This directly serves the stated use case and makes the feature compelling immediately.
|
||||
|
||||
```diff
|
||||
@@ ## Non-Goals
|
||||
-- Adding a "reviewer profile" report command (that's a downstream use case built on this infrastructure)
|
||||
+- Generating free-form narrative reviewer reports.
|
||||
+ A structured profiling command is in scope.
|
||||
+
|
||||
+## Phase 3: Structured Reviewer Profiling
|
||||
+Add `lore notes profile --author <user> --since <window>` returning:
|
||||
+- top commented paths
|
||||
+- top parent labels
|
||||
+- unresolved-comment ratio
|
||||
+- note-type distribution
|
||||
+- median comment length
|
||||
```
|
||||
|
||||
10. **Add operational SLOs + robot-mode status for note pipeline**
|
||||
Rationale: Reliability improves when regressions are observable, not inferred from failures.
|
||||
|
||||
```diff
|
||||
@@ ## Verification Checklist
|
||||
+Operational checks:
|
||||
+- `lore -J stats` includes per-`source_type` document counts (including `note`)
|
||||
+- Add queue lag metrics: oldest dirty note age, retry backlog size
|
||||
+- Add extraction error breakdown by `source_type`
|
||||
+- Add smoke assertion: disabling `feature.note_documents` leaves other source regeneration unaffected
|
||||
```
|
||||
|
||||
---
|
||||
|
||||
If you want, I can produce a single consolidated revised PRD draft (fully merged text, not just diffs) as the next step.
|
||||
@@ -1,200 +0,0 @@
|
||||
Below are the strongest revisions I’d make, excluding everything in your `## Rejected Recommendations` list.
|
||||
|
||||
1. **Add a Phase 0 for stable note identity before any note-doc generation**
|
||||
Rationale: your current plan still allows note document churn because Issue discussion ingestion is delete/reinsert-based. That makes local `notes.id` unstable, causing unnecessary dirtying/regeneration and potential stale-doc edge cases. Stabilizing identity first (upsert-by-GitLab-ID + sweep stale) improves correctness and cuts repeated work.
|
||||
|
||||
```diff
|
||||
@@ ## Design
|
||||
-Two phases, shipped together as one feature:
|
||||
+Three phases, shipped together as one feature:
|
||||
+- **Phase 0 (Foundation):** Stable note identity in local DB (upsert + sweep, no delete/reinsert churn)
|
||||
- **Phase 1 (Option A):** `lore notes` command — direct SQL query over the `notes` table with rich filtering
|
||||
- **Phase 2 (Option B):** Per-note documents — each non-system note becomes its own searchable document in the FTS/embedding pipeline
|
||||
@@
|
||||
+## Phase 0: Stable Note Identity
|
||||
+
|
||||
+### Work Chunk 0A: Upsert/Sweep for Issue Discussion Notes
|
||||
+**Files:** `src/ingestion/discussions.rs`, `migrations/022_notes_identity_index.sql`, `src/core/db.rs`
|
||||
+**Implementation:**
|
||||
+- Add unique index: `UNIQUE(project_id, gitlab_id)` on `notes`
|
||||
+- Replace delete/reinsert issue-note flow with upsert + `last_seen_at` sweep (same durability model as MR note sweep)
|
||||
+- Ensure `insert_note/upsert_note` returns the stable local row id for both insert and update paths
|
||||
```
|
||||
|
||||
2. **Replace `source_type` CHECK constraints with a registry table + FK in migration**
|
||||
Rationale: table CHECKs force full table rebuild for every new source type forever. A `source_types` table with FK keeps DB-level integrity and future extensibility without rebuilding `documents`/`dirty_sources` every time. This is a major architecture hardening win.
|
||||
|
||||
```diff
|
||||
@@ ### Work Chunk 2A: Schema Migration (023)
|
||||
-Current migration ... CHECK constraints limiting `source_type` ...
|
||||
+Current migration ... CHECK constraints limiting `source_type` ...
|
||||
+Revision: migrate to `source_types` registry table + FK constraints.
|
||||
@@
|
||||
-1. `dirty_sources` — add `'note'` to source_type CHECK
|
||||
-2. `documents` — add `'note'` to source_type CHECK
|
||||
+1. Create `source_types(name TEXT PRIMARY KEY)` and seed: `issue, merge_request, discussion, note`
|
||||
+2. Rebuild `dirty_sources` and `documents` to replace CHECK with `REFERENCES source_types(name)`
|
||||
+3. Future source-type additions become `INSERT INTO source_types(name) VALUES (?)` (no table rebuild)
|
||||
@@
|
||||
+#### Additional integrity tests
|
||||
+#[test]
|
||||
+fn test_source_types_registry_contains_note() { ... }
|
||||
+#[test]
|
||||
+fn test_documents_source_type_fk_enforced() { ... }
|
||||
+#[test]
|
||||
+fn test_dirty_sources_source_type_fk_enforced() { ... }
|
||||
```
|
||||
|
||||
3. **Mark note documents dirty only when note semantics actually changed**
|
||||
Rationale: current loops mark every non-system note dirty every sync. With 8k+ notes this creates avoidable queue pressure and regeneration time. Change-aware dirtying (inserted/changed only) gives major performance and stability improvements.
|
||||
|
||||
```diff
|
||||
@@ ### Work Chunk 2D: Regenerator & Dirty Tracking Integration
|
||||
-for note in notes {
|
||||
- let local_note_id = insert_note(&tx, local_discussion_id, ¬e, None)?;
|
||||
- if !note.is_system {
|
||||
- dirty_tracker::mark_dirty_tx(&tx, SourceType::Note, local_note_id)?;
|
||||
- }
|
||||
-}
|
||||
+for note in notes {
|
||||
+ let outcome = upsert_note(&tx, local_discussion_id, ¬e, None)?;
|
||||
+ if !note.is_system && outcome.changed_semantics {
|
||||
+ dirty_tracker::mark_dirty_tx(&tx, SourceType::Note, outcome.local_note_id)?;
|
||||
+ }
|
||||
+}
|
||||
@@
|
||||
+// changed_semantics should include: body, note_type, path/line positions, resolvable/resolved/resolved_by, updated_at
|
||||
```
|
||||
|
||||
4. **Expand filters to support real analysis windows and resolution state**
|
||||
Rationale: reviewer profiling usually needs bounded windows and both resolved/unresolved views. Current `unresolved: bool` is too narrow and one-sided. Add `--until` and tri-state resolution filtering for better analytical power.
|
||||
|
||||
```diff
|
||||
@@ pub struct NoteListFilters<'a> {
|
||||
- pub since: Option<&'a str>,
|
||||
+ pub since: Option<&'a str>,
|
||||
+ pub until: Option<&'a str>,
|
||||
@@
|
||||
- pub unresolved: bool,
|
||||
+ pub resolution: &'a str, // "any" (default) | "unresolved" | "resolved"
|
||||
@@
|
||||
- pub author: Option<&'a str>,
|
||||
+ pub author: Option<&'a str>, // case-insensitive match
|
||||
@@
|
||||
- // Filter by time (7d, 2w, 1m, or YYYY-MM-DD)
|
||||
+ // Filter by start time (7d, 2w, 1m, or YYYY-MM-DD)
|
||||
pub since: Option<String>,
|
||||
+ /// Filter by end time (7d, 2w, 1m, or YYYY-MM-DD)
|
||||
+ #[arg(long, help_heading = "Filters")]
|
||||
+ pub until: Option<String>,
|
||||
@@
|
||||
- /// Only show unresolved review comments
|
||||
- pub unresolved: bool,
|
||||
+ /// Resolution filter: any, unresolved, resolved
|
||||
+ #[arg(long, value_parser = ["any", "unresolved", "resolved"], default_value = "any", help_heading = "Filters")]
|
||||
+ pub resolution: String,
|
||||
```
|
||||
|
||||
5. **Broaden index strategy to match actual query shapes, not just author queries**
|
||||
Rationale: `idx_notes_user_created` helps one path, but common usage also includes project+time scans and unresolved filters. Add two more partial composites for high-selectivity paths.
|
||||
|
||||
```diff
|
||||
@@ ### Work Chunk 1E: Composite Query Index
|
||||
CREATE INDEX IF NOT EXISTS idx_notes_user_created
|
||||
ON notes(project_id, author_username, created_at DESC, id DESC)
|
||||
WHERE is_system = 0;
|
||||
+
|
||||
+CREATE INDEX IF NOT EXISTS idx_notes_project_created
|
||||
+ON notes(project_id, created_at DESC, id DESC)
|
||||
+WHERE is_system = 0;
|
||||
+
|
||||
+CREATE INDEX IF NOT EXISTS idx_notes_unresolved_project_created
|
||||
+ON notes(project_id, created_at DESC, id DESC)
|
||||
+WHERE is_system = 0 AND resolvable = 1 AND resolved = 0;
|
||||
@@
|
||||
+#[test]
|
||||
+fn test_notes_query_plan_uses_project_created_index_for_default_listing() { ... }
|
||||
+#[test]
|
||||
+fn test_notes_query_plan_uses_unresolved_index_when_resolution_unresolved() { ... }
|
||||
```
|
||||
|
||||
6. **Improve per-note document payload with structured metadata header + minimal thread context**
|
||||
Rationale: isolated single-note docs can lose meaning. A small structured header plus lightweight context (parent + one preceding note excerpt) improves semantic retrieval quality substantially without re-bundling full threads.
|
||||
|
||||
```diff
|
||||
@@ ### Work Chunk 2C: Note Document Extractor
|
||||
-// 6. Format content:
|
||||
-// [[Note]] {note_type or "Comment"} on {parent_type_prefix}: {parent_title}
|
||||
-// Project: {path_with_namespace}
|
||||
-// URL: {url}
|
||||
-// Author: @{author}
|
||||
-// Date: {format_date(created_at)}
|
||||
-// Labels: {labels_json}
|
||||
-// File: {position_new_path}:{position_new_line} (if DiffNote)
|
||||
-//
|
||||
-// --- Body ---
|
||||
-//
|
||||
-// {body}
|
||||
+// 6. Format content with machine-readable header:
|
||||
+// [[Note]]
|
||||
+// source_type: note
|
||||
+// note_gitlab_id: {gitlab_id}
|
||||
+// project: {path_with_namespace}
|
||||
+// parent_type: {Issue|MergeRequest}
|
||||
+// parent_iid: {iid}
|
||||
+// note_type: {DiffNote|DiscussionNote|Comment}
|
||||
+// author: @{author}
|
||||
+// created_at: {iso8601}
|
||||
+// resolved: {true|false}
|
||||
+// path: {position_new_path}:{position_new_line}
|
||||
+// url: {url}
|
||||
+//
|
||||
+// --- Context ---
|
||||
+// parent_title: {title}
|
||||
+// previous_note_excerpt: {optional, max 200 chars}
|
||||
+//
|
||||
+// --- Body ---
|
||||
+// {body}
|
||||
```
|
||||
|
||||
7. **Add first-class export modes for downstream profiling pipelines**
|
||||
Rationale: this makes the feature much more useful immediately (LLM prompts, notebook analysis, external scripts) without adding a profiling command. It stays within your non-goals and increases adoption.
|
||||
|
||||
```diff
|
||||
@@ pub struct NotesArgs {
|
||||
+ /// Output format
|
||||
+ #[arg(long, value_parser = ["table", "json", "jsonl", "csv"], default_value = "table", help_heading = "Output")]
|
||||
+ pub format: String,
|
||||
@@
|
||||
- if robot_mode {
|
||||
+ if robot_mode || args.format == "json" || args.format == "jsonl" || args.format == "csv" {
|
||||
print_list_notes_json(...)
|
||||
} else {
|
||||
print_list_notes(&result);
|
||||
}
|
||||
@@ ### Work Chunk 1C: Human & Robot Output Formatting
|
||||
+Add `print_list_notes_csv()` and `print_list_notes_jsonl()`:
|
||||
+- CSV columns mirror `NoteListRowJson` field names
|
||||
+- JSONL emits one note object per line for streaming pipelines
|
||||
```
|
||||
|
||||
8. **Strengthen verification with idempotence + migration data-preservation checks**
|
||||
Rationale: this feature touches ingestion, migrations, indexing, and regeneration. Add explicit idempotence/perf checks so regressions surface early.
|
||||
|
||||
```diff
|
||||
@@ ## Verification Checklist
|
||||
cargo test
|
||||
cargo clippy --all-targets -- -D warnings
|
||||
cargo fmt --check
|
||||
+cargo test test_note_ingestion_idempotent_across_two_syncs
|
||||
+cargo test test_note_document_count_stable_after_second_generate_docs_full
|
||||
@@
|
||||
+lore sync
|
||||
+lore generate-docs --full
|
||||
+lore -J stats > /tmp/stats1.json
|
||||
+lore generate-docs --full
|
||||
+lore -J stats > /tmp/stats2.json
|
||||
+# assert note doc count unchanged and dirty queue drains to zero
|
||||
```
|
||||
|
||||
If you want, I can turn this into a fully rewritten PRD v2 draft with these changes merged in-place and renumbered work chunks end-to-end.
|
||||
@@ -1,162 +0,0 @@
|
||||
These are the highest-impact revisions I’d make. They avoid everything in your `## Rejected Recommendations` list.
|
||||
|
||||
1. Add immediate note-document deletion propagation (don’t wait for `generate-docs --full`)
|
||||
Why: right now, deleted notes can leave stale `source_type='note'` documents until a full rebuild. That creates incorrect search/reporting results and weakens trust in the dataset.
|
||||
```diff
|
||||
@@ Phase 0: Stable Note Identity
|
||||
+### Work Chunk 0B: Immediate Deletion Propagation
|
||||
+
|
||||
+When sweep deletes stale notes, propagate deletion to documents in the same transaction.
|
||||
+Do not rely on eventual cleanup via `generate-docs --full`.
|
||||
+
|
||||
+#### Tests to Write First
|
||||
+#[test]
|
||||
+fn test_issue_note_sweep_deletes_note_documents_immediately() { ... }
|
||||
+#[test]
|
||||
+fn test_mr_note_sweep_deletes_note_documents_immediately() { ... }
|
||||
+
|
||||
+#### Implementation
|
||||
+Use `DELETE ... RETURNING id, is_system` in note sweep functions.
|
||||
+For returned non-system note ids:
|
||||
+1) `DELETE FROM documents WHERE source_type='note' AND source_id=?`
|
||||
+2) `DELETE FROM dirty_sources WHERE source_type='note' AND source_id=?`
|
||||
```
|
||||
|
||||
2. Add one-time upgrade backfill for existing notes (migration 024)
|
||||
Why: existing DBs will otherwise only get note-documents for changed/new notes. Historical notes remain invisible unless users manually run full rebuild.
|
||||
```diff
|
||||
@@ Phase 2: Per-Note Documents
|
||||
+### Work Chunk 2H: Backfill Existing Notes After Upgrade (Migration 024)
|
||||
+
|
||||
+Create migration `024_note_dirty_backfill.sql`:
|
||||
+INSERT INTO dirty_sources (source_type, source_id, queued_at)
|
||||
+SELECT 'note', n.id, unixepoch('now') * 1000
|
||||
+FROM notes n
|
||||
+LEFT JOIN documents d
|
||||
+ ON d.source_type='note' AND d.source_id=n.id
|
||||
+WHERE n.is_system=0 AND d.id IS NULL
|
||||
+ON CONFLICT(source_type, source_id) DO NOTHING;
|
||||
+
|
||||
+Add migration test asserting idempotence and expected queue size.
|
||||
```
|
||||
|
||||
3. Fix `--since/--until` semantics and validation
|
||||
Why: reusing `parse_since` for `until` creates ambiguous windows and off-by-boundary behavior; your own example `--since 90d --until 180d` is chronologically reversed.
|
||||
```diff
|
||||
@@ Work Chunk 1A: Data Types & Query Layer
|
||||
- since: parse_since(since_str) then n.created_at >= ?
|
||||
- until: parse_since(until_str) then n.created_at <= ?
|
||||
+ since: parse_since_start_bound(since_str) then n.created_at >= ?
|
||||
+ until: parse_until_end_bound(until_str) then n.created_at <= ?
|
||||
+ Validate since <= until; otherwise return a clear user error.
|
||||
+
|
||||
+#### Tests to Write First
|
||||
+#[test] fn test_query_notes_invalid_time_window_rejected() { ... }
|
||||
+#[test] fn test_query_notes_until_date_is_end_of_day_inclusive() { ... }
|
||||
```
|
||||
|
||||
4. Separate semantic-change detection from housekeeping updates
|
||||
Why: current proposed `WHERE` includes `updated_at`, which will cause unnecessary dirty churn. You want `last_seen_at` to always refresh, but regeneration only when searchable semantics changed.
|
||||
```diff
|
||||
@@ Work Chunk 0A: Upsert/Sweep for Issue Discussion Notes
|
||||
- OR notes.updated_at IS NOT excluded.updated_at
|
||||
+ -- updated_at-only changes should not mark semantic dirty
|
||||
+
|
||||
+Perform two-step logic:
|
||||
+1) Upsert always updates persistence/housekeeping fields (`updated_at`, `last_seen_at`).
|
||||
+2) `changed_semantics` is computed only from fields used by note documents/search filters
|
||||
+ (body, note_type, resolved flags, paths, author, parent linkage).
|
||||
+
|
||||
+#### Tests to Write First
|
||||
+#[test]
|
||||
+fn test_issue_note_upsert_updated_at_only_does_not_mark_semantic_change() { ... }
|
||||
```
|
||||
|
||||
5. Make indexes align with actual query collation and join strategy
|
||||
Why: `author` uses `COLLATE NOCASE`; without collation-aware index, SQLite can skip index use. Also, IID filters via scalar subqueries are harder for planner than direct join predicates.
|
||||
```diff
|
||||
@@ Work Chunk 1E: Composite Query Index
|
||||
-CREATE INDEX ... ON notes(project_id, author_username, created_at DESC, id DESC) WHERE is_system = 0;
|
||||
+CREATE INDEX ... ON notes(project_id, author_username COLLATE NOCASE, created_at DESC, id DESC) WHERE is_system = 0;
|
||||
+
|
||||
+CREATE INDEX IF NOT EXISTS idx_discussions_issue_id ON discussions(issue_id);
|
||||
+CREATE INDEX IF NOT EXISTS idx_discussions_mr_id ON discussions(merge_request_id);
|
||||
```
|
||||
|
||||
```diff
|
||||
@@ Work Chunk 1A: query_notes()
|
||||
- d.issue_id = (SELECT id FROM issues WHERE iid = ? AND project_id = ?)
|
||||
+ i.iid = ? AND i.project_id = ?
|
||||
- d.merge_request_id = (SELECT id FROM merge_requests WHERE iid = ? AND project_id = ?)
|
||||
+ m.iid = ? AND m.project_id = ?
|
||||
```
|
||||
|
||||
6. Replace manual CSV escaping with `csv` crate
|
||||
Why: manual RFC4180 escaping is fragile (quotes/newlines/multi-byte edge cases). This is exactly where a mature library reduces long-term bug risk.
|
||||
```diff
|
||||
@@ Work Chunk 1C: Human & Robot Output Formatting
|
||||
- Uses a minimal CSV writer (no external dependency — the format is simple enough for manual escaping).
|
||||
+ Uses `csv::Writer` for RFC4180-compliant escaping and stable output across edge cases.
|
||||
+
|
||||
+#### Tests to Write First
|
||||
+#[test] fn test_csv_output_multiline_and_quotes_roundtrip() { ... }
|
||||
```
|
||||
|
||||
7. Add `--contains` lexical body filter to `lore notes`
|
||||
Why: useful middle ground between metadata filtering and semantic search; great for reviewer-pattern mining without requiring FTS query syntax.
|
||||
```diff
|
||||
@@ Work Chunk 1B: CLI Arguments & Command Wiring
|
||||
+/// Filter by case-insensitive substring in note body
|
||||
+#[arg(long, help_heading = "Filters")]
|
||||
+pub contains: Option<String>;
|
||||
```
|
||||
|
||||
```diff
|
||||
@@ Work Chunk 1A: NoteListFilters
|
||||
+ pub contains: Option<&'a str>,
|
||||
@@ query_notes dynamic filters
|
||||
+ if contains.is_some() {
|
||||
+ where_clauses.push("n.body LIKE ? COLLATE NOCASE");
|
||||
+ params.push(format!("%{}%", escape_like(contains.unwrap())));
|
||||
+ }
|
||||
```
|
||||
|
||||
8. Reduce note-document embedding noise by slimming metadata header
|
||||
Why: current verbose key-value header repeats low-signal tokens and consumes embedding budget. Keep context, but bias tokens toward actual review text.
|
||||
```diff
|
||||
@@ Work Chunk 2C: Note Document Extractor
|
||||
- Build content with structured metadata header:
|
||||
- [[Note]]
|
||||
- source_type: note
|
||||
- note_gitlab_id: ...
|
||||
- project: ...
|
||||
- ...
|
||||
- --- Body ---
|
||||
- {body}
|
||||
+ Build content with compact, high-signal layout:
|
||||
+ [[Note]]
|
||||
+ @{author} on {Issue#|MR!}{iid} in {project_path}
|
||||
+ path: {path:line} (only when available)
|
||||
+ state: {resolved|unresolved} (only when resolvable)
|
||||
+
|
||||
+ {body}
|
||||
+
|
||||
+Keep detailed metadata in structured document columns/labels/paths/url,
|
||||
+not repeated in verbose text.
|
||||
```
|
||||
|
||||
9. Add explicit performance regression checks for the new hot paths
|
||||
Why: this feature increases document volume ~4x; you should pin acceptable query behavior now so future changes don’t silently degrade.
|
||||
```diff
|
||||
@@ Verification Checklist
|
||||
+Performance/plan checks:
|
||||
+1) `EXPLAIN QUERY PLAN` for:
|
||||
+ - author+since query
|
||||
+ - project+date query
|
||||
+ - for-mr / for-issue query
|
||||
+2) Seed 50k-note synthetic fixture and assert:
|
||||
+ - `lore notes --author ... --limit 100` stays under agreed local threshold
|
||||
+ - `lore search --type note ...` remains deterministic and completes successfully
|
||||
```
|
||||
|
||||
If you want, I can also provide a fully merged “iteration 3” PRD text with these edits applied end-to-end so you can drop it in directly.
|
||||
@@ -1,187 +0,0 @@
|
||||
1. **Canonical note identity for documents: use `notes.gitlab_id` as `source_id`**
|
||||
Why this is better: the current plan still couples document identity to local row IDs. Even with upsert+sweep, local IDs are a storage artifact and can be reused in edge cases. Using GitLab note IDs as canonical document IDs makes regeneration, backfill, and deletion propagation more stable and portable.
|
||||
|
||||
```diff
|
||||
--- a/PRD.md
|
||||
+++ b/PRD.md
|
||||
@@ Phase 0: Stable Note Identity
|
||||
-Phase 2 depends on `notes.id` as the `source_id` for note documents.
|
||||
+Phase 2 uses `notes.gitlab_id` as the `source_id` for note documents.
|
||||
+`notes.id` remains an internal relational key only.
|
||||
|
||||
@@ Work Chunk 0A
|
||||
pub struct NoteUpsertOutcome {
|
||||
pub local_note_id: i64,
|
||||
+ pub document_source_id: i64, // notes.gitlab_id
|
||||
pub changed_semantics: bool,
|
||||
}
|
||||
|
||||
@@ Work Chunk 2D
|
||||
-if !note.is_system && outcome.changed_semantics {
|
||||
- dirty_tracker::mark_dirty_tx(&tx, SourceType::Note, outcome.local_note_id)?;
|
||||
+if !note.is_system && outcome.changed_semantics {
|
||||
+ dirty_tracker::mark_dirty_tx(&tx, SourceType::Note, outcome.document_source_id)?;
|
||||
}
|
||||
|
||||
@@ Work Chunk 2E
|
||||
-SELECT 'note', n.id, ?1
|
||||
+SELECT 'note', n.gitlab_id, ?1
|
||||
|
||||
@@ Work Chunk 2H
|
||||
-ON d.source_type = 'note' AND d.source_id = n.id
|
||||
+ON d.source_type = 'note' AND d.source_id = n.gitlab_id
|
||||
```
|
||||
|
||||
2. **Prevent false deletions on partial/incomplete syncs**
|
||||
Why this is better: sweep-based deletion is correct only when a discussion’s notes were fully fetched. If a page fails mid-fetch, current logic can incorrectly delete valid notes. Add an explicit “fetch complete” guard before sweep.
|
||||
|
||||
```diff
|
||||
--- a/PRD.md
|
||||
+++ b/PRD.md
|
||||
@@ Phase 0
|
||||
+### Work Chunk 0C: Sweep Safety Guard (Partial Fetch Protection)
|
||||
+
|
||||
+Only run stale-note sweep when note pagination completed successfully for that discussion.
|
||||
+If fetch is partial/interrupted, skip sweep and keep prior notes intact.
|
||||
|
||||
+#### Tests to Write First
|
||||
+#[test]
|
||||
+fn test_partial_fetch_does_not_sweep_notes() { /* ... */ }
|
||||
+
|
||||
+#[test]
|
||||
+fn test_complete_fetch_runs_sweep_notes() { /* ... */ }
|
||||
|
||||
+#### Implementation
|
||||
+if discussion_fetch_complete {
|
||||
+ sweep_stale_issue_notes(...)?;
|
||||
+} else {
|
||||
+ tracing::warn!("Skipping stale sweep for discussion {} due to partial fetch", discussion_gitlab_id);
|
||||
+}
|
||||
```
|
||||
|
||||
3. **Make deletion propagation set-based (not per-note loop)**
|
||||
Why this is better: the current per-note DELETE loop is O(N) statements and gets slow on large threads. A temp-table/CTE set-based delete is faster, simpler to reason about, and remains atomic.
|
||||
|
||||
```diff
|
||||
--- a/PRD.md
|
||||
+++ b/PRD.md
|
||||
@@ Work Chunk 0B Implementation
|
||||
- for note_id in stale_note_ids {
|
||||
- conn.execute("DELETE FROM documents WHERE source_type = 'note' AND source_id = ?", [note_id])?;
|
||||
- conn.execute("DELETE FROM dirty_sources WHERE source_type = 'note' AND source_id = ?", [note_id])?;
|
||||
- }
|
||||
+ CREATE TEMP TABLE _stale_note_source_ids(source_id INTEGER PRIMARY KEY) WITHOUT ROWID;
|
||||
+ INSERT INTO _stale_note_source_ids
|
||||
+ SELECT gitlab_id
|
||||
+ FROM notes
|
||||
+ WHERE discussion_id = ? AND last_seen_at < ? AND is_system = 0;
|
||||
+
|
||||
+ DELETE FROM notes
|
||||
+ WHERE discussion_id = ? AND last_seen_at < ?;
|
||||
+
|
||||
+ DELETE FROM documents
|
||||
+ WHERE source_type = 'note'
|
||||
+ AND source_id IN (SELECT source_id FROM _stale_note_source_ids);
|
||||
+
|
||||
+ DELETE FROM dirty_sources
|
||||
+ WHERE source_type = 'note'
|
||||
+ AND source_id IN (SELECT source_id FROM _stale_note_source_ids);
|
||||
+
|
||||
+ DROP TABLE _stale_note_source_ids;
|
||||
```
|
||||
|
||||
4. **Fix project-scoping and time-window semantics in `lore notes`**
|
||||
Why this is better: the plan currently has a contradiction: clap `requires = "project"` blocks use of `defaultProject`, while query layer says default fallback is allowed. Also, `since/until` parsing should use one shared “now” to avoid subtle drift and inverted windows.
|
||||
|
||||
```diff
|
||||
--- a/PRD.md
|
||||
+++ b/PRD.md
|
||||
@@ Work Chunk 1B NotesArgs
|
||||
-#[arg(long = "for-issue", ..., requires = "project")]
|
||||
+#[arg(long = "for-issue", ...)]
|
||||
pub for_issue: Option<i64>;
|
||||
|
||||
-#[arg(long = "for-mr", ..., requires = "project")]
|
||||
+#[arg(long = "for-mr", ...)]
|
||||
pub for_mr: Option<i64>;
|
||||
|
||||
@@ Work Chunk 1A Query Notes
|
||||
-- `since`: `parse_since(since_str)` then `n.created_at >= ?`
|
||||
-- `until`: `parse_since(until_str)` then `n.created_at <= ?`
|
||||
+- Parse `since` and `until` with a single anchored `now_ms` captured once per command.
|
||||
+- If user supplies `YYYY-MM-DD` for `--until`, interpret as end-of-day (23:59:59.999 UTC).
|
||||
+- Validate `since <= until` after both parse with same anchor.
|
||||
```
|
||||
|
||||
5. **Add an analytics mode (not a profile command): `lore notes --aggregate`**
|
||||
Why this is better: this directly supports the stated use case (review patterns) without introducing the rejected “profile report” command. It keeps scope narrow and reuses existing filters.
|
||||
|
||||
```diff
|
||||
--- a/PRD.md
|
||||
+++ b/PRD.md
|
||||
@@ Phase 1
|
||||
+### Work Chunk 1F: Aggregation Mode for Notes Listing
|
||||
+
|
||||
+Add optional aggregation on top of `lore notes`:
|
||||
+- `--aggregate author|note_type|path|resolution`
|
||||
+- `--top N` (default 20)
|
||||
+
|
||||
+Behavior:
|
||||
+- Reuses all existing filters (`--since`, `--project`, `--for-mr`, etc.)
|
||||
+- Returns grouped counts (+ percentage of filtered corpus)
|
||||
+- Works in table/json/jsonl/csv
|
||||
+
|
||||
+Non-goal alignment:
|
||||
+- This is not a narrative “reviewer profile” command.
|
||||
+- It is a query primitive for downstream analysis.
|
||||
```
|
||||
|
||||
6. **Prevent note backfill from starving other document regeneration**
|
||||
Why this is better: after migration/backfill, note dirty entries can dominate the queue and delay issue/MR/discussion updates. Add source-type fairness in regenerator scheduling.
|
||||
|
||||
```diff
|
||||
--- a/PRD.md
|
||||
+++ b/PRD.md
|
||||
@@ Work Chunk 2D
|
||||
+#### Scheduling Revision
|
||||
+Process dirty sources with weighted fairness instead of strict FIFO:
|
||||
+- issue: 3
|
||||
+- merge_request: 3
|
||||
+- discussion: 2
|
||||
+- note: 1
|
||||
+
|
||||
+Implementation sketch:
|
||||
+- fetch next batch by source_type buckets
|
||||
+- interleave according to weights
|
||||
+- preserve retry semantics per source
|
||||
|
||||
+#### Tests to Write First
|
||||
+#[test]
|
||||
+fn test_note_backfill_does_not_starve_issue_and_mr_regeneration() { /* ... */ }
|
||||
```
|
||||
|
||||
7. **Harden migration 023: remove invalid SQL assertions and move integrity checks to tests**
|
||||
Why this is better: `RAISE(ABORT, ...)` in standalone `SELECT` is not valid SQLite usage outside triggers/check expressions. Keep migration SQL minimal/portable and enforce invariants in migration tests.
|
||||
|
||||
```diff
|
||||
--- a/PRD.md
|
||||
+++ b/PRD.md
|
||||
@@ Work Chunk 2A Migration SQL
|
||||
--- Step 10: Integrity verification
|
||||
-SELECT CASE
|
||||
- WHEN ... THEN RAISE(ABORT, '...')
|
||||
-END;
|
||||
+-- Step 10 removed from SQL migration.
|
||||
+-- Integrity verification is enforced in migration tests:
|
||||
+-- 1) pre/post row-count equality
|
||||
+-- 2) `PRAGMA foreign_key_check` is empty
|
||||
+-- 3) documents_fts row count matches documents row count after rebuild
|
||||
|
||||
@@ Work Chunk 2A Tests
|
||||
+#[test]
|
||||
+fn test_migration_023_integrity_checks_pass() {
|
||||
+ // pre/post counts, foreign_key_check empty, fts parity
|
||||
+}
|
||||
```
|
||||
|
||||
These 7 revisions improve correctness under failure, reduce churn risk, improve large-sync performance, and make the feature materially more useful for reviewer-analysis workflows without reintroducing any rejected recommendations.
|
||||
@@ -1,190 +0,0 @@
|
||||
Here are the highest-impact revisions I’d make. None of these repeat anything in your `## Rejected Recommendations`.
|
||||
|
||||
1. **Add immutable reviewer identity (`author_id`) as a first-class key**
|
||||
Why this improves the plan: the PRD’s core use case is year-scale reviewer profiling. Usernames are mutable in GitLab, so username-only filtering will fragment one reviewer into multiple identities over time. Adding `author_id` closes that correctness hole and makes historical analysis reliable.
|
||||
|
||||
```diff
|
||||
@@ Problem Statement
|
||||
-1. **Query individual notes by author** — the `--author` filter on `lore search` only matches the first note's author per discussion thread
|
||||
+1. **Query individual notes by reviewer identity** — support both mutable username and immutable GitLab `author_id` for stable longitudinal analysis
|
||||
|
||||
@@ Phase 0: Stable Note Identity
|
||||
+### Work Chunk 0D: Immutable Author Identity Capture
|
||||
+**Files:** `migrations/025_notes_author_id.sql`, `src/ingestion/discussions.rs`, `src/ingestion/mr_discussions.rs`, `src/cli/commands/list.rs`
|
||||
+
|
||||
+#### Implementation
|
||||
+- Add nullable `notes.author_id INTEGER` and backfill from future syncs.
|
||||
+- Populate `author_id` from GitLab note payload (`note.author.id`) on both issue and MR note ingestion paths.
|
||||
+- Add `--author-id <int>` filter to `lore notes`.
|
||||
+- Keep `--author` for ergonomics; when both provided, require both to match.
|
||||
+
|
||||
+#### Indexing
|
||||
+- Add `idx_notes_author_id_created ON notes(project_id, author_id, created_at DESC, id DESC) WHERE is_system = 0;`
|
||||
+
|
||||
+#### Tests
|
||||
+- `test_query_notes_filter_author_id_survives_username_change`
|
||||
+- `test_query_notes_author_and_author_id_intersection`
|
||||
```
|
||||
|
||||
2. **Strengthen partial-fetch safety from a boolean to an explicit fetch state contract**
|
||||
Why this improves the plan: `fetch_complete: bool` is easy to misuse and fragile under retries/crashes. A run-scoped state model makes sweep correctness auditable and prevents accidental deletions when ingestion aborts midway.
|
||||
|
||||
```diff
|
||||
@@ Phase 0: Stable Note Identity
|
||||
-### Work Chunk 0C: Sweep Safety Guard (Partial Fetch Protection)
|
||||
+### Work Chunk 0C: Sweep Safety Guard with Run-Scoped Fetch State
|
||||
|
||||
@@ Implementation
|
||||
-Add a `fetch_complete` parameter to the discussion ingestion functions. Only run the stale-note sweep when the fetch completed successfully:
|
||||
+Add a run-scoped fetch state:
|
||||
+- `FetchState::Complete`
|
||||
+- `FetchState::Partial`
|
||||
+- `FetchState::Failed`
|
||||
+
|
||||
+Only run sweep on `FetchState::Complete`.
|
||||
+Persist `run_seen_at` once per sync run and pass unchanged through all discussion/note upserts.
|
||||
+Require `run_seen_at` monotonicity per discussion before sweep (skip and warn otherwise).
|
||||
|
||||
@@ Tests to Write First
|
||||
+#[test]
|
||||
+fn test_failed_fetch_never_sweeps_even_after_partial_upserts() { ... }
|
||||
+#[test]
|
||||
+fn test_non_monotonic_run_seen_at_skips_sweep() { ... }
|
||||
+#[test]
|
||||
+fn test_retry_after_failed_fetch_then_complete_sweeps_correctly() { ... }
|
||||
```
|
||||
|
||||
3. **Add DB-level cleanup triggers for note-document referential integrity**
|
||||
Why this improves the plan: Work Chunk 0B handles the sweep path, but not every possible delete path. DB triggers give defense-in-depth so stale note docs cannot survive even if a future code path deletes notes differently.
|
||||
|
||||
```diff
|
||||
@@ Work Chunk 0B: Immediate Deletion Propagation
|
||||
-Update both sweep functions to propagate deletion to documents and dirty_sources using set-based SQL
|
||||
+Keep set-based SQL in sweep functions, and add DB-level cleanup triggers as a safety net.
|
||||
|
||||
@@ Work Chunk 2A: Schema Migration (023)
|
||||
+-- Cleanup trigger: deleting a non-system note must delete note document + dirty queue row
|
||||
+CREATE TRIGGER notes_ad_cleanup AFTER DELETE ON notes
|
||||
+WHEN old.is_system = 0
|
||||
+BEGIN
|
||||
+ DELETE FROM documents
|
||||
+ WHERE source_type = 'note' AND source_id = old.id;
|
||||
+ DELETE FROM dirty_sources
|
||||
+ WHERE source_type = 'note' AND source_id = old.id;
|
||||
+END;
|
||||
+
|
||||
+-- Cleanup trigger: if note flips to system, remove its document artifacts
|
||||
+CREATE TRIGGER notes_au_system_cleanup AFTER UPDATE OF is_system ON notes
|
||||
+WHEN old.is_system = 0 AND new.is_system = 1
|
||||
+BEGIN
|
||||
+ DELETE FROM documents
|
||||
+ WHERE source_type = 'note' AND source_id = new.id;
|
||||
+ DELETE FROM dirty_sources
|
||||
+ WHERE source_type = 'note' AND source_id = new.id;
|
||||
+END;
|
||||
```
|
||||
|
||||
4. **Eliminate N+1 extraction cost with parent metadata caching in regeneration**
|
||||
Why this improves the plan: backfilling ~8k notes with per-note parent/label lookups creates avoidable query amplification. Batch caching turns repeated joins into one-time lookups per parent entity and materially reduces rebuild time.
|
||||
|
||||
```diff
|
||||
@@ Phase 2: Per-Note Documents
|
||||
+### Work Chunk 2I: Batch Parent Metadata Cache for Note Regeneration
|
||||
+**Files:** `src/documents/regenerator.rs`, `src/documents/extractor.rs`
|
||||
+
|
||||
+#### Implementation
|
||||
+- Add `NoteExtractionContext` cache keyed by `(noteable_type, parent_id)` containing:
|
||||
+ - parent iid/title/url
|
||||
+ - parent labels
|
||||
+ - project path
|
||||
+- In batch regeneration, prefetch parent metadata for note IDs in the current chunk.
|
||||
+- Use cached metadata in `extract_note_document()` to avoid repeated parent/label queries.
|
||||
+
|
||||
+#### Tests
|
||||
+- `test_note_regeneration_uses_parent_cache_consistently`
|
||||
+- `test_note_regeneration_cache_hit_preserves_hash_determinism`
|
||||
```
|
||||
|
||||
5. **Add embedding dedup cache keyed by semantic text hash**
|
||||
Why this improves the plan: note docs will contain repeated short comments (“LGTM”, “nit: …”). Current doc-level hashing includes metadata, so identical semantic comments still re-embed many times. A semantic embedding hash cache cuts cost and speeds full rebuild/backfill without changing search behavior.
|
||||
|
||||
```diff
|
||||
@@ Phase 2: Per-Note Documents
|
||||
+### Work Chunk 2J: Semantic Embedding Dedup for Notes
|
||||
+**Files:** `migrations/026_embedding_cache.sql`, embedding pipeline module(s), `src/documents/extractor.rs`
|
||||
+
|
||||
+#### Implementation
|
||||
+- Compute `embedding_text` for notes as: normalized note body + compact stable context (`parent_type`, `path`, `resolution`), excluding volatile fields.
|
||||
+- Compute `embedding_hash = sha256(embedding_text)`.
|
||||
+- Before embedding generation, lookup existing vector by `(model, embedding_hash)`.
|
||||
+- Reuse cached vector when present; only call embedding model on misses.
|
||||
+
|
||||
+#### Tests
|
||||
+- `test_identical_note_bodies_reuse_embedding_vector`
|
||||
+- `test_embedding_hash_changes_when_semantic_context_changes`
|
||||
```
|
||||
|
||||
6. **Add deterministic review-signal tags as derived labels**
|
||||
Why this improves the plan: this makes output immediately more useful for reviewer-pattern analysis without adding a profile command (which is explicitly out of scope). It increases practical value of both `lore notes` and `lore search --type note` with low complexity.
|
||||
|
||||
```diff
|
||||
@@ Non-Goals
|
||||
-- Adding a "reviewer profile" report command (that's a downstream use case built on this infrastructure)
|
||||
+- Adding a "reviewer profile" report command (downstream), while allowing low-level derived signal tags as indexing primitives
|
||||
|
||||
@@ Phase 2: Per-Note Documents
|
||||
+### Work Chunk 2K: Derived Review Signal Labels
|
||||
+**Files:** `src/documents/extractor.rs`
|
||||
+
|
||||
+#### Implementation
|
||||
+- Derive deterministic labels from note text + metadata:
|
||||
+ - `signal:nit`
|
||||
+ - `signal:blocking`
|
||||
+ - `signal:security`
|
||||
+ - `signal:performance`
|
||||
+ - `signal:testing`
|
||||
+- Attach via existing `document_labels` flow for note documents.
|
||||
+- No new CLI mode required; existing label filters can consume these labels.
|
||||
+
|
||||
+#### Tests
|
||||
+- `test_note_document_derives_signal_labels_nit`
|
||||
+- `test_note_document_derives_signal_labels_security`
|
||||
+- `test_signal_label_derivation_is_deterministic`
|
||||
```
|
||||
|
||||
7. **Add high-precision note targeting filters (`--note-id`, `--gitlab-note-id`, `--discussion-id`)**
|
||||
Why this improves the plan: debugging, incident response, and reproducibility all benefit from exact addressing. This is especially useful when validating sync correctness and cross-checking a specific note/document lifecycle.
|
||||
|
||||
```diff
|
||||
@@ Work Chunk 1B: CLI Arguments & Command Wiring
|
||||
pub struct NotesArgs {
|
||||
+ /// Filter by local note row id
|
||||
+ #[arg(long = "note-id", help_heading = "Filters")]
|
||||
+ pub note_id: Option<i64>,
|
||||
+
|
||||
+ /// Filter by GitLab note id
|
||||
+ #[arg(long = "gitlab-note-id", help_heading = "Filters")]
|
||||
+ pub gitlab_note_id: Option<i64>,
|
||||
+
|
||||
+ /// Filter by local discussion id
|
||||
+ #[arg(long = "discussion-id", help_heading = "Filters")]
|
||||
+ pub discussion_id: Option<i64>,
|
||||
}
|
||||
|
||||
@@ Work Chunk 1A: Filter struct
|
||||
pub struct NoteListFilters<'a> {
|
||||
+ pub note_id: Option<i64>,
|
||||
+ pub gitlab_note_id: Option<i64>,
|
||||
+ pub discussion_id: Option<i64>,
|
||||
}
|
||||
|
||||
@@ Tests to Write First
|
||||
+#[test]
|
||||
+fn test_query_notes_filter_note_id_exact() { ... }
|
||||
+#[test]
|
||||
+fn test_query_notes_filter_gitlab_note_id_exact() { ... }
|
||||
+#[test]
|
||||
+fn test_query_notes_filter_discussion_id_exact() { ... }
|
||||
```
|
||||
|
||||
If you want, I can produce a single consolidated “iteration 5” PRD diff that merges these into your exact section ordering and updates the dependency graph/migration numbering end-to-end.
|
||||
@@ -1,434 +0,0 @@
|
||||
Below are the highest-leverage revisions I’d make to this plan. I’m focusing on correctness pitfalls, SQLite gotchas, query performance on 280K notes, and reducing “dynamic SQL + param juggling” complexity—without turning this into a new ingestion project.
|
||||
|
||||
Change 1 — Fix a hard SQLite bug in --active (GROUP_CONCAT DISTINCT + separator)
|
||||
Why
|
||||
|
||||
SQLite does not allow GROUP_CONCAT(DISTINCT x, sep). With DISTINCT, SQLite only permits a single argument (GROUP_CONCAT(DISTINCT x)). Your current query will error at runtime in many SQLite versions.
|
||||
|
||||
Revision
|
||||
|
||||
Use a subquery that selects distinct participants, then GROUP_CONCAT with your separator.
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/Plan.md b/Plan.md
|
||||
@@ fn query_active(...)
|
||||
- (SELECT GROUP_CONCAT(DISTINCT n.author_username, X'1F')
|
||||
- FROM notes n
|
||||
- WHERE n.discussion_id = d.id
|
||||
- AND n.is_system = 0
|
||||
- AND n.author_username IS NOT NULL) AS participants
|
||||
+ (SELECT GROUP_CONCAT(username, X'1F') FROM (
|
||||
+ SELECT DISTINCT n.author_username AS username
|
||||
+ FROM notes n
|
||||
+ WHERE n.discussion_id = d.id
|
||||
+ AND n.is_system = 0
|
||||
+ AND n.author_username IS NOT NULL
|
||||
+ ORDER BY username
|
||||
+ )) AS participants
|
||||
|
||||
Change 2 — Replace “contains('.') => exact file match” with segment-aware path classification
|
||||
Why
|
||||
|
||||
path.contains('.') misclassifies directories like:
|
||||
|
||||
.github/workflows/
|
||||
|
||||
src/v1.2/auth/
|
||||
|
||||
It also fails the “root file” case (README.md) because your mode discriminator only treats paths as paths if they contain /.
|
||||
|
||||
Revision
|
||||
|
||||
Add explicit --path to force Expert mode (covers root files cleanly).
|
||||
|
||||
Classify file-vs-dir by checking last path segment for a dot, and whether the input ends with /.
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/Plan.md b/Plan.md
|
||||
@@ pub struct WhoArgs {
|
||||
- /// Username or file path (path if contains /)
|
||||
- pub target: Option<String>,
|
||||
+ /// Username or file path shorthand (ambiguous for root files like README.md)
|
||||
+ pub target: Option<String>,
|
||||
+
|
||||
+ /// Force expert mode for a file/directory path (supports root files like README.md)
|
||||
+ #[arg(long, help_heading = "Mode", conflicts_with_all = ["active", "overlap", "reviews"])]
|
||||
+ pub path: Option<String>,
|
||||
@@ fn resolve_mode<'a>(args: &'a WhoArgs) -> Result<WhoMode<'a>> {
|
||||
- if let Some(target) = &args.target {
|
||||
+ if let Some(p) = &args.path {
|
||||
+ return Ok(WhoMode::Expert { path: p });
|
||||
+ }
|
||||
+ if let Some(target) = &args.target {
|
||||
let clean = target.strip_prefix('@').unwrap_or(target);
|
||||
if args.reviews {
|
||||
return Ok(WhoMode::Reviews { username: clean });
|
||||
}
|
||||
- // Disambiguation: if target contains '/', it's a file path.
|
||||
- // GitLab usernames never contain '/'.
|
||||
- if target.contains('/') {
|
||||
+ // Disambiguation:
|
||||
+ // - treat as path if it contains '/'
|
||||
+ // - otherwise treat as username (root files require --path)
|
||||
+ if target.contains('/') {
|
||||
return Ok(WhoMode::Expert { path: target });
|
||||
}
|
||||
return Ok(WhoMode::Workload { username: clean });
|
||||
}
|
||||
|
||||
|
||||
And update the path pattern logic used by Expert/Overlap:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/Plan.md b/Plan.md
|
||||
@@ fn query_expert(...)
|
||||
- // Normalize path for LIKE matching: add trailing % if no extension
|
||||
- let path_pattern = if path.contains('.') {
|
||||
- path.to_string() // Exact file match
|
||||
- } else {
|
||||
- let trimmed = path.trim_end_matches('/');
|
||||
- format!("{trimmed}/%")
|
||||
- };
|
||||
+ // Normalize:
|
||||
+ // - if ends_with('/') => directory prefix
|
||||
+ // - else if last segment contains '.' => file exact match
|
||||
+ // - else => directory prefix
|
||||
+ let trimmed = path.trim_end_matches('/');
|
||||
+ let last = trimmed.rsplit('/').next().unwrap_or(trimmed);
|
||||
+ let is_file = !path.ends_with('/') && last.contains('.');
|
||||
+ let path_pattern = if is_file { trimmed.to_string() } else { format!("{trimmed}/%") };
|
||||
|
||||
Change 3 — Stop building dynamic SQL strings for optional filters; always bind params
|
||||
Why
|
||||
|
||||
Right now you’re mixing:
|
||||
|
||||
dynamic project_clause string fragments
|
||||
|
||||
ad-hoc param vectors
|
||||
|
||||
placeholder renumbering by branch
|
||||
|
||||
That’s brittle and easy to regress (especially when you add more conditions later). SQLite/rusqlite can bind Option<T> to NULL, which enables a simple pattern:
|
||||
|
||||
sql
|
||||
Copy code
|
||||
AND (?3 IS NULL OR n.project_id = ?3)
|
||||
|
||||
Revision (representative; apply to all queries)
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/Plan.md b/Plan.md
|
||||
@@ fn query_expert(...)
|
||||
- let project_clause = if project_id.is_some() {
|
||||
- "AND n.project_id = ?3"
|
||||
- } else {
|
||||
- ""
|
||||
- };
|
||||
-
|
||||
- let sql = format!(
|
||||
+ let sql = format!(
|
||||
"SELECT username, role, activity_count, last_active_at FROM (
|
||||
@@
|
||||
FROM notes n
|
||||
WHERE n.position_new_path LIKE ?1
|
||||
AND n.is_system = 0
|
||||
AND n.author_username IS NOT NULL
|
||||
AND n.created_at >= ?2
|
||||
- {project_clause}
|
||||
+ AND (?3 IS NULL OR n.project_id = ?3)
|
||||
@@
|
||||
WHERE n.position_new_path LIKE ?1
|
||||
AND m.author_username IS NOT NULL
|
||||
AND m.updated_at >= ?2
|
||||
- {project_clause}
|
||||
+ AND (?3 IS NULL OR n.project_id = ?3)
|
||||
GROUP BY m.author_username
|
||||
- )"
|
||||
+ ) t"
|
||||
);
|
||||
-
|
||||
- let mut params: Vec<Box<dyn rusqlite::ToSql>> = Vec::new();
|
||||
- params.push(Box::new(path_pattern.clone()));
|
||||
- params.push(Box::new(since_ms));
|
||||
- if let Some(pid) = project_id {
|
||||
- params.push(Box::new(pid));
|
||||
- }
|
||||
- let param_refs: Vec<&dyn rusqlite::ToSql> = params.iter().map(|p| p.as_ref()).collect();
|
||||
+ let param_refs = rusqlite::params![path_pattern, since_ms, project_id];
|
||||
|
||||
|
||||
Notes:
|
||||
|
||||
Adds required derived-table alias t (some SQLite configurations are stricter).
|
||||
|
||||
Eliminates the dynamic param vector and placeholder gymnastics.
|
||||
|
||||
Change 4 — Filter “path touch” queries to DiffNotes and escape LIKE properly
|
||||
Why
|
||||
|
||||
Only DiffNotes reliably have position_new_path; including other note types can skew counts and harm performance.
|
||||
|
||||
LIKE treats % and _ as wildcards—rare in file paths, but not impossible (generated files, templates). Escaping is a low-cost robustness win.
|
||||
|
||||
Revision
|
||||
|
||||
Add note_type='DiffNote' and LIKE ... ESCAPE '\' plus a tiny escape helper.
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/Plan.md b/Plan.md
|
||||
@@ fn query_expert(...)
|
||||
- FROM notes n
|
||||
- WHERE n.position_new_path LIKE ?1
|
||||
+ FROM notes n
|
||||
+ WHERE n.note_type = 'DiffNote'
|
||||
+ AND n.position_new_path LIKE ?1 ESCAPE '\'
|
||||
AND n.is_system = 0
|
||||
@@
|
||||
diff --git a/Plan.md b/Plan.md
|
||||
@@ Helper Functions
|
||||
+fn escape_like(input: &str) -> String {
|
||||
+ input.replace('\\', "\\\\").replace('%', "\\%").replace('_', "\\_")
|
||||
+}
|
||||
|
||||
|
||||
And when building patterns:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
- let path_pattern = if is_file { trimmed.to_string() } else { format!("{trimmed}/%") };
|
||||
+ let base = escape_like(trimmed);
|
||||
+ let path_pattern = if is_file { base } else { format!("{base}/%") };
|
||||
|
||||
|
||||
Apply the same changes to query_overlap and any other position_new_path LIKE ....
|
||||
|
||||
Change 5 — Use note timestamps for “touch since” semantics (Expert/Overlap author branch)
|
||||
Why
|
||||
|
||||
In Expert/Overlap “author” branches you filter by m.updated_at >= since. That answers “MR updated recently” rather than “MR touched at this path recently”, which can surface stale ownership.
|
||||
|
||||
Revision
|
||||
|
||||
Filter by the note creation time (and use it for “last touch” where relevant). You can still compute author activity, but anchor it to note activity.
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/Plan.md b/Plan.md
|
||||
@@ fn query_overlap(...)
|
||||
- WHERE n.position_new_path LIKE ?1
|
||||
+ WHERE n.note_type = 'DiffNote'
|
||||
+ AND n.position_new_path LIKE ?1 ESCAPE '\'
|
||||
AND m.state IN ('opened', 'merged')
|
||||
AND m.author_username IS NOT NULL
|
||||
- AND m.updated_at >= ?2
|
||||
+ AND n.created_at >= ?2
|
||||
AND (?3 IS NULL OR m.project_id = ?3)
|
||||
|
||||
|
||||
Same idea in Expert mode’s “MR authors” branch.
|
||||
|
||||
Change 6 — Workload mode: apply --since consistently to unresolved discussions
|
||||
Why
|
||||
|
||||
Workload’s unresolved discussions ignore since_ms. That makes --since partially misleading and can dump very old threads.
|
||||
|
||||
Revision
|
||||
|
||||
Filter on d.last_note_at when since_ms is set.
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/Plan.md b/Plan.md
|
||||
@@ fn query_workload(...)
|
||||
- let disc_sql = format!(
|
||||
+ let disc_since = if since_ms.is_some() {
|
||||
+ "AND d.last_note_at >= ?2"
|
||||
+ } else { "" };
|
||||
+ let disc_sql = format!(
|
||||
"SELECT d.noteable_type,
|
||||
@@
|
||||
WHERE d.resolvable = 1 AND d.resolved = 0
|
||||
AND EXISTS (
|
||||
@@
|
||||
)
|
||||
{disc_project_filter}
|
||||
+ {disc_since}
|
||||
ORDER BY d.last_note_at DESC
|
||||
LIMIT {limit}"
|
||||
);
|
||||
@@
|
||||
- // Rebuild params for discussion query (only username + optional project_id)
|
||||
- let mut disc_params: Vec<Box<dyn rusqlite::ToSql>> = Vec::new();
|
||||
- disc_params.push(Box::new(username.to_string()));
|
||||
- if let Some(pid) = project_id {
|
||||
- disc_params.push(Box::new(pid));
|
||||
- }
|
||||
+ // Params: username, since_ms, project_id (NULLs ok)
|
||||
+ let disc_param_refs = rusqlite::params![username, since_ms, project_id];
|
||||
|
||||
|
||||
(If you adopt Change 3 fully, this becomes very clean.)
|
||||
|
||||
Change 7 — Make Overlap results represent “both roles” instead of collapsing to one
|
||||
Why
|
||||
|
||||
Collapsing to a single role loses valuable info (“they authored and reviewed”). Also your current “prefer author” rule is arbitrary for the “who else is touching this” question.
|
||||
|
||||
Revision
|
||||
|
||||
Track role counts separately and render as A, R, or A+R.
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/Plan.md b/Plan.md
|
||||
@@ pub struct OverlapUser {
|
||||
pub username: String,
|
||||
- pub role: String,
|
||||
- pub touch_count: u32,
|
||||
+ pub author_touch_count: u32,
|
||||
+ pub review_touch_count: u32,
|
||||
+ pub touch_count: u32,
|
||||
pub last_touch_at: i64,
|
||||
pub mr_iids: Vec<i64>,
|
||||
}
|
||||
@@ fn query_overlap(...)
|
||||
- let entry = user_map.entry(username.clone()).or_insert_with(|| OverlapUser {
|
||||
+ let entry = user_map.entry(username.clone()).or_insert_with(|| OverlapUser {
|
||||
username: username.clone(),
|
||||
- role: role.clone(),
|
||||
+ author_touch_count: 0,
|
||||
+ review_touch_count: 0,
|
||||
touch_count: 0,
|
||||
last_touch_at: 0,
|
||||
mr_iids: Vec::new(),
|
||||
});
|
||||
entry.touch_count += count;
|
||||
+ if role == "author" { entry.author_touch_count += count; }
|
||||
+ if role == "reviewer" { entry.review_touch_count += count; }
|
||||
@@ human output
|
||||
- println!(
|
||||
- " {:<16} {:<8} {:>7} {:<12} {}",
|
||||
+ println!(
|
||||
+ " {:<16} {:<6} {:>7} {:<12} {}",
|
||||
...
|
||||
);
|
||||
@@
|
||||
- user.role,
|
||||
+ format_roles(user.author_touch_count, user.review_touch_count),
|
||||
|
||||
Change 8 — Add an “Index Audit + optional migration” step (big perf win, low blast radius)
|
||||
Why
|
||||
|
||||
With 280K notes, the path/timestamp queries will degrade quickly without indexes. This isn’t “scope creep”; it’s making the feature usable.
|
||||
|
||||
Revision (plan-level)
|
||||
|
||||
Add a non-breaking migration that only creates indexes if missing.
|
||||
|
||||
Optionally add a runtime check: if EXPLAIN QUERY PLAN indicates full table scan on notes, print a dim warning in human mode.
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/Plan.md b/Plan.md
|
||||
@@ Implementation Order
|
||||
-| Step | What | Files |
|
||||
+| Step | What | Files |
|
||||
| 1 | CLI skeleton: `WhoArgs` + `Commands::Who` + dispatch + stub | `cli/mod.rs`, `commands/mod.rs`, `main.rs` |
|
||||
+| 1.5 | Index audit + add `CREATE INDEX IF NOT EXISTS` migration for who hot paths | `migrations/0xx_who_indexes.sql` |
|
||||
@@
|
||||
|
||||
|
||||
Suggested indexes (tune names to your conventions):
|
||||
|
||||
notes(note_type, position_new_path, created_at)
|
||||
|
||||
notes(discussion_id, is_system, author_username)
|
||||
|
||||
discussions(resolvable, resolved, last_note_at, project_id)
|
||||
|
||||
merge_requests(project_id, state, updated_at, author_username)
|
||||
|
||||
issue_assignees(username, issue_id)
|
||||
|
||||
Even if SQLite can’t perfectly index LIKE, these still help with join and timestamp filters.
|
||||
|
||||
Change 9 — Make robot JSON reproducible by echoing the effective query inputs
|
||||
Why
|
||||
|
||||
Agent workflows benefit from a stable “query record”: what mode ran, what path/user, resolved project, effective since, limit.
|
||||
|
||||
Revision
|
||||
|
||||
Include an input object in JSON output.
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/Plan.md b/Plan.md
|
||||
@@ struct WhoJsonData {
|
||||
mode: String,
|
||||
+ input: serde_json::Value,
|
||||
#[serde(flatten)]
|
||||
result: serde_json::Value,
|
||||
}
|
||||
@@ pub fn print_who_json(...)
|
||||
- let output = WhoJsonEnvelope {
|
||||
+ let input = serde_json::json!({
|
||||
+ "project": /* resolved or raw args.project */,
|
||||
+ "since": /* resolved since ISO */,
|
||||
+ "limit": /* args.limit */,
|
||||
+ });
|
||||
+ let output = WhoJsonEnvelope {
|
||||
ok: true,
|
||||
data: WhoJsonData {
|
||||
mode: mode.to_string(),
|
||||
+ input,
|
||||
result: data,
|
||||
},
|
||||
meta: RobotMeta { elapsed_ms },
|
||||
};
|
||||
|
||||
Change 10 — Tighten clap constraints so invalid combinations never reach resolve_mode
|
||||
Why
|
||||
|
||||
Right now conflicts are enforced manually (or not at all). Clamp the invalid combos at the CLI layer:
|
||||
|
||||
--active should conflict with target, --overlap, --reviews, --path
|
||||
|
||||
--reviews should require a username (and should conflict with Expert path modes)
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/Plan.md b/Plan.md
|
||||
@@ pub struct WhoArgs {
|
||||
- pub active: bool,
|
||||
+ #[arg(long, help_heading = "Mode", conflicts_with_all = ["target", "overlap", "reviews", "path"])]
|
||||
+ pub active: bool,
|
||||
@@
|
||||
- pub overlap: Option<String>,
|
||||
+ #[arg(long, help_heading = "Mode", conflicts_with_all = ["target", "active", "reviews", "path"])]
|
||||
+ pub overlap: Option<String>,
|
||||
@@
|
||||
- pub reviews: bool,
|
||||
+ #[arg(long, help_heading = "Mode", requires = "target", conflicts_with_all = ["active", "overlap", "path"])]
|
||||
+ pub reviews: bool,
|
||||
|
||||
Summary of what I’d definitely change
|
||||
|
||||
If you do nothing else, do these first:
|
||||
|
||||
Fix GROUP_CONCAT(DISTINCT ..., sep) in Active mode (runtime error).
|
||||
|
||||
Path classification: add --path, and stop using contains('.') globally.
|
||||
|
||||
Remove dynamic SQL + param vectors: always bind project_id as nullable and use (? IS NULL OR ...).
|
||||
|
||||
Filter to DiffNotes + LIKE escaping for correctness and fewer rows scanned.
|
||||
|
||||
Optional index migration: otherwise this will feel slow/non-deterministically slow depending on local DB state.
|
||||
|
||||
If you want, I can also provide a consolidated “v2 plan” as a single unified patch (one diff) rather than per-change snippets.
|
||||
@@ -1,303 +0,0 @@
|
||||
Below are the highest-leverage revisions I’d make to iteration 1 to tighten correctness, performance, and “agent usefulness” without blowing up scope. For each change: (1) rationale, (2) a focused unified diff against the plan you pasted.
|
||||
|
||||
Change 1 — Make robot “input echo” actually resolved (project_id, project_path, since_ms/iso, mode)
|
||||
Why
|
||||
|
||||
Your Design Principle #5 says the robot envelope should echo resolved inputs (“effective since, resolved project”), but the current input object echoes only raw CLI strings. Agents can’t reliably reproduce or compare runs (e.g., fuzzy project resolution may map differently over time).
|
||||
|
||||
This is also a reliability improvement: “what ran” should be computed once and propagated, not recomputed in output.
|
||||
|
||||
Plan diff
|
||||
diff
|
||||
Copy code
|
||||
--- a/who-command-design.md
|
||||
+++ b/who-command-design.md
|
||||
@@
|
||||
-5. **Robot-first reproducibility.** Robot JSON output includes an `input` object echoing the resolved query parameters (effective since, resolved project, limit) so agents can trace exactly what ran.
|
||||
+5. **Robot-first reproducibility.** Robot JSON output includes a `resolved_input` object (mode, since_ms + since_iso, resolved project_id + project_path, limit, db_path) so agents can trace exactly what ran.
|
||||
|
||||
@@
|
||||
-/// Main entry point. Resolves mode from args and dispatches.
|
||||
-pub fn run_who(config: &Config, args: &WhoArgs) -> Result<WhoResult> {
|
||||
+/// Main entry point. Resolves mode + resolved inputs once, then dispatches.
|
||||
+pub fn run_who(config: &Config, args: &WhoArgs) -> Result<WhoRun> {
|
||||
let db_path = get_db_path(config.storage.db_path.as_deref());
|
||||
let conn = create_connection(&db_path)?;
|
||||
|
||||
- let project_id = args
|
||||
+ let project_id = args
|
||||
.project
|
||||
.as_deref()
|
||||
.map(|p| resolve_project(&conn, p))
|
||||
.transpose()?;
|
||||
+ let project_path = project_id
|
||||
+ .map(|id| lookup_project_path(&conn, id))
|
||||
+ .transpose()?;
|
||||
|
||||
let mode = resolve_mode(args)?;
|
||||
|
||||
match mode {
|
||||
WhoMode::Expert { path } => {
|
||||
let since_ms = resolve_since(args.since.as_deref(), "6m")?;
|
||||
let result = query_expert(&conn, path, project_id, since_ms, args.limit)?;
|
||||
- Ok(WhoResult::Expert(result))
|
||||
+ Ok(WhoRun::new("expert", &db_path, project_id, project_path, since_ms, args.limit, WhoResult::Expert(result)))
|
||||
}
|
||||
@@
|
||||
}
|
||||
}
|
||||
+
|
||||
+/// Wrapper that carries resolved inputs for reproducible output.
|
||||
+pub struct WhoRun {
|
||||
+ pub mode: String,
|
||||
+ pub resolved_input: WhoResolvedInput,
|
||||
+ pub result: WhoResult,
|
||||
+}
|
||||
+
|
||||
+pub struct WhoResolvedInput {
|
||||
+ pub db_path: String,
|
||||
+ pub project_id: Option<i64>,
|
||||
+ pub project_path: Option<String>,
|
||||
+ pub since_ms: i64,
|
||||
+ pub since_iso: String,
|
||||
+ pub limit: usize,
|
||||
+}
|
||||
@@
|
||||
-pub fn print_who_json(result: &WhoResult, args: &WhoArgs, elapsed_ms: u64) {
|
||||
- let (mode, data) = match result {
|
||||
+pub fn print_who_json(run: &WhoRun, args: &WhoArgs, elapsed_ms: u64) {
|
||||
+ let (mode, data) = match &run.result {
|
||||
WhoResult::Expert(r) => ("expert", expert_to_json(r)),
|
||||
@@
|
||||
- let input = serde_json::json!({
|
||||
+ let input = serde_json::json!({
|
||||
"target": args.target,
|
||||
"path": args.path,
|
||||
"project": args.project,
|
||||
"since": args.since,
|
||||
"limit": args.limit,
|
||||
});
|
||||
+
|
||||
+ let resolved_input = serde_json::json!({
|
||||
+ "mode": run.mode,
|
||||
+ "db_path": run.resolved_input.db_path,
|
||||
+ "project_id": run.resolved_input.project_id,
|
||||
+ "project_path": run.resolved_input.project_path,
|
||||
+ "since_ms": run.resolved_input.since_ms,
|
||||
+ "since_iso": run.resolved_input.since_iso,
|
||||
+ "limit": run.resolved_input.limit,
|
||||
+ });
|
||||
@@
|
||||
- data: WhoJsonData {
|
||||
- mode: mode.to_string(),
|
||||
- input,
|
||||
- result: data,
|
||||
- },
|
||||
+ data: WhoJsonData { mode: mode.to_string(), input, resolved_input, result: data },
|
||||
meta: RobotMeta { elapsed_ms },
|
||||
};
|
||||
@@
|
||||
struct WhoJsonData {
|
||||
mode: String,
|
||||
input: serde_json::Value,
|
||||
+ resolved_input: serde_json::Value,
|
||||
#[serde(flatten)]
|
||||
result: serde_json::Value,
|
||||
}
|
||||
|
||||
Change 2 — Remove dynamic SQL format!(..LIMIT {limit}) and parameterize LIMIT everywhere
|
||||
Why
|
||||
|
||||
You explicitly prefer static SQL ((?N IS NULL OR ...)) to avoid subtle bugs; but Workload/Active use format! for LIMIT. Even though limit is typed, it’s an inconsistency that complicates statement caching and encourages future string assembly creep.
|
||||
|
||||
SQLite supports LIMIT ? with bound parameters; rusqlite can bind an i64.
|
||||
|
||||
Plan diff
|
||||
diff
|
||||
Copy code
|
||||
--- a/who-command-design.md
|
||||
+++ b/who-command-design.md
|
||||
@@
|
||||
- let issues_sql = format!(
|
||||
- "SELECT ...
|
||||
- ORDER BY i.updated_at DESC
|
||||
- LIMIT {limit}"
|
||||
- );
|
||||
- let mut stmt = conn.prepare(&issues_sql)?;
|
||||
+ let issues_sql =
|
||||
+ "SELECT ...
|
||||
+ ORDER BY i.updated_at DESC
|
||||
+ LIMIT ?4";
|
||||
+ let mut stmt = conn.prepare(issues_sql)?;
|
||||
let assigned_issues: Vec<WorkloadIssue> = stmt
|
||||
- .query_map(rusqlite::params![username, project_id, since_ms], |row| {
|
||||
+ .query_map(rusqlite::params![username, project_id, since_ms, limit as i64], |row| {
|
||||
@@
|
||||
- let authored_sql = format!(
|
||||
- "SELECT ...
|
||||
- ORDER BY m.updated_at DESC
|
||||
- LIMIT {limit}"
|
||||
- );
|
||||
- let mut stmt = conn.prepare(&authored_sql)?;
|
||||
+ let authored_sql =
|
||||
+ "SELECT ...
|
||||
+ ORDER BY m.updated_at DESC
|
||||
+ LIMIT ?4";
|
||||
+ let mut stmt = conn.prepare(authored_sql)?;
|
||||
@@
|
||||
- .query_map(rusqlite::params![username, project_id, since_ms], |row| {
|
||||
+ .query_map(rusqlite::params![username, project_id, since_ms, limit as i64], |row| {
|
||||
@@
|
||||
- let reviewing_sql = format!(
|
||||
- "SELECT ...
|
||||
- ORDER BY m.updated_at DESC
|
||||
- LIMIT {limit}"
|
||||
- );
|
||||
- let mut stmt = conn.prepare(&reviewing_sql)?;
|
||||
+ let reviewing_sql =
|
||||
+ "SELECT ...
|
||||
+ ORDER BY m.updated_at DESC
|
||||
+ LIMIT ?4";
|
||||
+ let mut stmt = conn.prepare(reviewing_sql)?;
|
||||
@@
|
||||
- .query_map(rusqlite::params![username, project_id, since_ms], |row| {
|
||||
+ .query_map(rusqlite::params![username, project_id, since_ms, limit as i64], |row| {
|
||||
@@
|
||||
- let disc_sql = format!(
|
||||
- "SELECT ...
|
||||
- ORDER BY d.last_note_at DESC
|
||||
- LIMIT {limit}"
|
||||
- );
|
||||
- let mut stmt = conn.prepare(&disc_sql)?;
|
||||
+ let disc_sql =
|
||||
+ "SELECT ...
|
||||
+ ORDER BY d.last_note_at DESC
|
||||
+ LIMIT ?4";
|
||||
+ let mut stmt = conn.prepare(disc_sql)?;
|
||||
@@
|
||||
- .query_map(rusqlite::params![username, project_id, since_ms], |row| {
|
||||
+ .query_map(rusqlite::params![username, project_id, since_ms, limit as i64], |row| {
|
||||
@@
|
||||
- let sql = format!(
|
||||
- "SELECT ...
|
||||
- ORDER BY d.last_note_at DESC
|
||||
- LIMIT {limit}"
|
||||
- );
|
||||
- let mut stmt = conn.prepare(&sql)?;
|
||||
+ let sql =
|
||||
+ "SELECT ...
|
||||
+ ORDER BY d.last_note_at DESC
|
||||
+ LIMIT ?3";
|
||||
+ let mut stmt = conn.prepare(sql)?;
|
||||
@@
|
||||
- .query_map(rusqlite::params![since_ms, project_id], |row| {
|
||||
+ .query_map(rusqlite::params![since_ms, project_id, limit as i64], |row| {
|
||||
|
||||
Change 3 — Fix path matching for dotless files (LICENSE/Makefile) via “exact OR prefix” (no new flags)
|
||||
Why
|
||||
|
||||
Your improved “dot only in last segment” heuristic still fails on dotless files (LICENSE, Makefile, Dockerfile) which are common, especially at repo root. Right now they’ll be treated as directories (LICENSE/%) and silently return nothing.
|
||||
|
||||
Best minimal UX: if user provides a path that’s ambiguous (no trailing slash), match either exact file OR directory prefix.
|
||||
|
||||
Plan diff
|
||||
diff
|
||||
Copy code
|
||||
--- a/who-command-design.md
|
||||
+++ b/who-command-design.md
|
||||
@@
|
||||
-/// Build a LIKE pattern from a user-supplied path, with proper LIKE escaping.
|
||||
-///
|
||||
-/// Rules:
|
||||
-/// - If the path ends with `/`, it's a directory prefix → `escaped_path%`
|
||||
-/// - If the last path segment contains `.`, it's a file → exact match
|
||||
-/// - Otherwise, it's a directory prefix → `escaped_path/%`
|
||||
+/// Build an exact + prefix match from a user-supplied path, with proper LIKE escaping.
|
||||
+///
|
||||
+/// Rules:
|
||||
+/// - If the path ends with `/`, treat as directory-only (prefix match)
|
||||
+/// - Otherwise, treat as ambiguous: exact match OR directory prefix
|
||||
+/// (fixes dotless files like LICENSE/Makefile without requiring new flags)
|
||||
@@
|
||||
-fn build_path_pattern(path: &str) -> String {
|
||||
+struct PathMatch {
|
||||
+ exact: String,
|
||||
+ prefix: String,
|
||||
+ dir_only: bool,
|
||||
+}
|
||||
+
|
||||
+fn build_path_match(path: &str) -> PathMatch {
|
||||
let trimmed = path.trim_end_matches('/');
|
||||
- let last_segment = trimmed.rsplit('/').next().unwrap_or(trimmed);
|
||||
- let is_file = !path.ends_with('/') && last_segment.contains('.');
|
||||
let escaped = escape_like(trimmed);
|
||||
-
|
||||
- if is_file {
|
||||
- escaped
|
||||
- } else {
|
||||
- format!("{escaped}/%")
|
||||
- }
|
||||
+ PathMatch {
|
||||
+ exact: escaped.clone(),
|
||||
+ prefix: format!("{escaped}/%"),
|
||||
+ dir_only: path.ends_with('/'),
|
||||
+ }
|
||||
}
|
||||
@@
|
||||
- let path_pattern = build_path_pattern(path);
|
||||
+ let pm = build_path_match(path);
|
||||
@@
|
||||
- AND n.position_new_path LIKE ?1 ESCAPE '\\'
|
||||
+ AND (
|
||||
+ (?4 = 1 AND n.position_new_path LIKE ?2 ESCAPE '\\')
|
||||
+ OR (?4 = 0 AND (n.position_new_path = ?1 OR n.position_new_path LIKE ?2 ESCAPE '\\'))
|
||||
+ )
|
||||
@@
|
||||
- let rows: Vec<(String, String, u32, i64)> = stmt
|
||||
- .query_map(rusqlite::params![path_pattern, since_ms, project_id], |row| {
|
||||
+ let rows: Vec<(String, String, u32, i64)> = stmt
|
||||
+ .query_map(rusqlite::params![pm.exact, pm.prefix, since_ms, i32::from(pm.dir_only), project_id], |row| {
|
||||
Ok((row.get(0)?, row.get(1)?, row.get(2)?, row.get(3)?))
|
||||
})?
|
||||
|
||||
|
||||
(Apply the same pattern to Overlap mode.)
|
||||
|
||||
Change 4 — Consistently exclude system notes in all DiffNote-based branches (Expert/Overlap author branches currently don’t)
|
||||
Why
|
||||
|
||||
You filter n.is_system = 0 for reviewer branches, but not in the author branches of Expert/Overlap. That can skew “author touch” via system-generated diff notes or bot activity.
|
||||
|
||||
Consistency here improves correctness and also enables more aggressive partial indexing.
|
||||
|
||||
Plan diff
|
||||
diff
|
||||
Copy code
|
||||
--- a/who-command-design.md
|
||||
+++ b/who-command-design.md
|
||||
@@
|
||||
- WHERE n.note_type = 'DiffNote'
|
||||
+ WHERE n.note_type = 'DiffNote'
|
||||
AND n.position_new_path LIKE ?1 ESCAPE '\\'
|
||||
+ AND n.is_system = 0
|
||||
AND m.author_username IS NOT NULL
|
||||
AND n.created_at >= ?2
|
||||
AND (?3 IS NULL OR m.project_id = ?3)
|
||||
@@
|
||||
- WHERE n.note_type = 'DiffNote'
|
||||
+ WHERE n.note_type = 'DiffNote'
|
||||
AND n.position_new_path LIKE ?1 ESCAPE '\\'
|
||||
+ AND n.is_system = 0
|
||||
AND m.state IN ('opened', 'merged')
|
||||
AND m.author_username IS NOT NULL
|
||||
AND n.created_at >= ?2
|
||||
AND (?3 IS NULL OR m.project_id = ?3)
|
||||
|
||||
Change 5 — Rework Migration 017 indexes to match real predicates + add one critical notes index for discussion participation
|
||||
Why
|
||||
|
||||
(a) idx_notes_diffnote_path_created currently leads with note_type even though it’s constant via partial index. You want the leading columns to match your most selective predicates: position_new_path prefix + created_at range, with optional project_id.
|
||||
|
||||
(b) Active + Workload discussion participation repeatedly hits notes by (discussion_id, author_username); you only guarantee notes(discussion_id) is indexed. Adding a narrow partial composite index pays off immediately for both “participants” and “EXISTS user participated” checks.
|
||||
|
||||
(c) The discussions index should focus on (project_id, last_note_at) with a partial predicate; resolvable/resolved a_
|
||||
@@ -1,471 +0,0 @@
|
||||
Below are the revisions I’d make to iteration 2 to improve correctness, determinism, query-plan quality, and multi-project usability without turning this into a bigger product.
|
||||
|
||||
I’m treating your plan as the “source of truth” and showing git-diff style patches against the plan text/code blocks you included.
|
||||
|
||||
Change 1 — Fix project scoping to hit the right index (DiffNote branches)
|
||||
Why
|
||||
|
||||
Your hot-path index is:
|
||||
|
||||
idx_notes_diffnote_path_created ON notes(position_new_path, created_at, project_id) WHERE note_type='DiffNote' AND is_system=0
|
||||
|
||||
But in Expert/Overlap you sometimes scope by m.project_id = ?3 (MR table), not n.project_id = ?3 (notes table). That weakens the optimizer’s ability to use the composite notes index (and can force broader joins before filtering).
|
||||
|
||||
Diff
|
||||
diff
|
||||
Copy code
|
||||
--- a/who-command-design.md
|
||||
+++ b/who-command-design.md
|
||||
@@ Query: Expert Mode @@
|
||||
- AND (?3 IS NULL OR m.project_id = ?3)
|
||||
+ -- IMPORTANT: scope on notes.project_id to maximize use of
|
||||
+ -- idx_notes_diffnote_path_created (notes is the selective table)
|
||||
+ AND (?3 IS NULL OR n.project_id = ?3)
|
||||
|
||||
@@ Query: Overlap Mode @@
|
||||
- AND (?3 IS NULL OR m.project_id = ?3)
|
||||
+ AND (?3 IS NULL OR n.project_id = ?3)
|
||||
|
||||
@@ Query: Overlap Mode (author branch) @@
|
||||
- AND (?3 IS NULL OR m.project_id = ?3)
|
||||
+ AND (?3 IS NULL OR n.project_id = ?3)
|
||||
|
||||
Change 2 — Introduce a “prefix vs exact” path query to avoid LIKE when you don’t need it
|
||||
Why
|
||||
|
||||
For exact file paths (e.g. src/auth/login.rs), you currently do:
|
||||
|
||||
position_new_path LIKE ?1 ESCAPE '\' where ?1 has no wildcard
|
||||
|
||||
That’s logically fine, but it’s a worse signal to the planner than = and can degrade performance depending on collation/case settings.
|
||||
|
||||
This doesn’t violate “static SQL” — you can pick between two static query strings.
|
||||
|
||||
Diff
|
||||
diff
|
||||
Copy code
|
||||
--- a/who-command-design.md
|
||||
+++ b/who-command-design.md
|
||||
@@ Helper: Path Pattern Construction @@
|
||||
-fn build_path_pattern(path: &str) -> String {
|
||||
+struct PathQuery {
|
||||
+ /// The parameter value to bind.
|
||||
+ value: String,
|
||||
+ /// If true: use LIKE value || '%'. If false: use '='.
|
||||
+ is_prefix: bool,
|
||||
+}
|
||||
+
|
||||
+fn build_path_query(path: &str) -> PathQuery {
|
||||
let trimmed = path.trim_end_matches('/');
|
||||
let last_segment = trimmed.rsplit('/').next().unwrap_or(trimmed);
|
||||
let is_file = !path.ends_with('/') && last_segment.contains('.');
|
||||
let escaped = escape_like(trimmed);
|
||||
|
||||
if is_file {
|
||||
- escaped
|
||||
+ PathQuery { value: escaped, is_prefix: false }
|
||||
} else {
|
||||
- format!("{escaped}/%")
|
||||
+ PathQuery { value: format!("{escaped}/%"), is_prefix: true }
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
And then (example for DiffNote predicates):
|
||||
|
||||
diff
|
||||
Copy code
|
||||
@@ Query: Expert Mode @@
|
||||
- let path_pattern = build_path_pattern(path);
|
||||
+ let pq = build_path_query(path);
|
||||
|
||||
- let sql = " ... n.position_new_path LIKE ?1 ESCAPE '\\' ... ";
|
||||
+ let sql_prefix = " ... n.position_new_path LIKE ?1 ESCAPE '\\' ... ";
|
||||
+ let sql_exact = " ... n.position_new_path = ?1 ... ";
|
||||
|
||||
- let mut stmt = conn.prepare(sql)?;
|
||||
+ let mut stmt = if pq.is_prefix { conn.prepare_cached(sql_prefix)? }
|
||||
+ else { conn.prepare_cached(sql_exact)? };
|
||||
let rows = stmt.query_map(params![... pq.value ...], ...);
|
||||
|
||||
Change 3 — Push Expert aggregation into SQL (less Rust, fewer rows, SQL-level LIMIT)
|
||||
Why
|
||||
|
||||
Right now Expert does:
|
||||
|
||||
UNION ALL
|
||||
|
||||
return per-role rows
|
||||
|
||||
HashMap merge
|
||||
|
||||
score compute
|
||||
|
||||
sort/truncate
|
||||
|
||||
You can do all of that in SQL deterministically, then LIMIT ?N actually works.
|
||||
|
||||
Diff
|
||||
diff
|
||||
Copy code
|
||||
--- a/who-command-design.md
|
||||
+++ b/who-command-design.md
|
||||
@@ Query: Expert Mode @@
|
||||
- let sql = "SELECT username, role, activity_count, last_active_at FROM (
|
||||
- ...
|
||||
- )";
|
||||
+ let sql = "
|
||||
+ WITH activity AS (
|
||||
+ SELECT
|
||||
+ n.author_username AS username,
|
||||
+ 'reviewer' AS role,
|
||||
+ COUNT(*) AS cnt,
|
||||
+ MAX(n.created_at) AS last_active_at
|
||||
+ FROM notes n
|
||||
+ WHERE n.note_type = 'DiffNote'
|
||||
+ AND n.is_system = 0
|
||||
+ AND n.author_username IS NOT NULL
|
||||
+ AND n.created_at >= ?2
|
||||
+ AND (?3 IS NULL OR n.project_id = ?3)
|
||||
+ AND (
|
||||
+ (?4 = 1 AND n.position_new_path LIKE ?1 ESCAPE '\\') OR
|
||||
+ (?4 = 0 AND n.position_new_path = ?1)
|
||||
+ )
|
||||
+ GROUP BY n.author_username
|
||||
+
|
||||
+ UNION ALL
|
||||
+
|
||||
+ SELECT
|
||||
+ m.author_username AS username,
|
||||
+ 'author' AS role,
|
||||
+ COUNT(DISTINCT m.id) AS cnt,
|
||||
+ MAX(n.created_at) AS last_active_at
|
||||
+ FROM merge_requests m
|
||||
+ JOIN discussions d ON d.merge_request_id = m.id
|
||||
+ JOIN notes n ON n.discussion_id = d.id
|
||||
+ WHERE n.note_type = 'DiffNote'
|
||||
+ AND n.is_system = 0
|
||||
+ AND m.author_username IS NOT NULL
|
||||
+ AND n.created_at >= ?2
|
||||
+ AND (?3 IS NULL OR n.project_id = ?3)
|
||||
+ AND (
|
||||
+ (?4 = 1 AND n.position_new_path LIKE ?1 ESCAPE '\\') OR
|
||||
+ (?4 = 0 AND n.position_new_path = ?1)
|
||||
+ )
|
||||
+ GROUP BY m.author_username
|
||||
+ )
|
||||
+ SELECT
|
||||
+ username,
|
||||
+ SUM(CASE WHEN role='reviewer' THEN cnt ELSE 0 END) AS review_count,
|
||||
+ SUM(CASE WHEN role='author' THEN cnt ELSE 0 END) AS author_count,
|
||||
+ MAX(last_active_at) AS last_active_at,
|
||||
+ (SUM(CASE WHEN role='reviewer' THEN cnt ELSE 0 END) * 3.0) +
|
||||
+ (SUM(CASE WHEN role='author' THEN cnt ELSE 0 END) * 2.0) AS score
|
||||
+ FROM activity
|
||||
+ GROUP BY username
|
||||
+ ORDER BY score DESC, last_active_at DESC, username ASC
|
||||
+ LIMIT ?5
|
||||
+ ";
|
||||
|
||||
- // Aggregate by username: combine reviewer + author counts
|
||||
- let mut user_map: HashMap<...> = HashMap::new();
|
||||
- ...
|
||||
- experts.sort_by(...); experts.truncate(limit);
|
||||
+ // No Rust-side merge/sort needed; SQL already returns final rows.
|
||||
|
||||
Change 4 — Overlap output is ambiguous across projects: include stable MR refs (project_path!iid)
|
||||
Why
|
||||
|
||||
mr_iids: Vec<i64> is ambiguous in a multi-project DB. !123 only means something with a project.
|
||||
|
||||
Also: your MR IID dedup is currently Vec.contains() inside a loop (O(n²)). Use a HashSet.
|
||||
|
||||
Diff
|
||||
diff
|
||||
Copy code
|
||||
--- a/who-command-design.md
|
||||
+++ b/who-command-design.md
|
||||
@@ OverlapResult @@
|
||||
pub struct OverlapUser {
|
||||
pub username: String,
|
||||
@@
|
||||
- pub mr_iids: Vec<i64>,
|
||||
+ /// Stable MR references like "group/project!123"
|
||||
+ pub mr_refs: Vec<String>,
|
||||
}
|
||||
|
||||
@@ Query: Overlap Mode (SQL) @@
|
||||
- GROUP_CONCAT(DISTINCT m.iid) AS mr_iids
|
||||
+ GROUP_CONCAT(DISTINCT (p.path_with_namespace || '!' || m.iid)) AS mr_refs
|
||||
FROM notes n
|
||||
JOIN discussions d ON n.discussion_id = d.id
|
||||
JOIN merge_requests m ON d.merge_request_id = m.id
|
||||
+ JOIN projects p ON m.project_id = p.id
|
||||
@@
|
||||
- GROUP_CONCAT(DISTINCT m.iid) AS mr_iids
|
||||
+ GROUP_CONCAT(DISTINCT (p.path_with_namespace || '!' || m.iid)) AS mr_refs
|
||||
FROM merge_requests m
|
||||
JOIN discussions d ON d.merge_request_id = m.id
|
||||
JOIN notes n ON n.discussion_id = d.id
|
||||
+ JOIN projects p ON m.project_id = p.id
|
||||
|
||||
@@ Query: Overlap Mode (Rust merge) @@
|
||||
- let mr_iids: Vec<i64> = mr_iids_csv ...
|
||||
+ let mr_refs: Vec<String> = mr_refs_csv
|
||||
+ .as_deref()
|
||||
+ .map(|csv| csv.split(',').map(|s| s.trim().to_string()).collect())
|
||||
+ .unwrap_or_default();
|
||||
@@
|
||||
- // Merge MR IIDs, deduplicate
|
||||
- for iid in &mr_iids {
|
||||
- if !entry.mr_iids.contains(iid) {
|
||||
- entry.mr_iids.push(*iid);
|
||||
- }
|
||||
- }
|
||||
+ // Merge MR refs, deduplicate
|
||||
+ use std::collections::HashSet;
|
||||
+ let mut set: HashSet<String> = entry.mr_refs.drain(..).collect();
|
||||
+ for r in mr_refs { set.insert(r); }
|
||||
+ entry.mr_refs = set.into_iter().collect();
|
||||
|
||||
Change 5 — Active mode: avoid correlated subqueries by preselecting discussions, then aggregating notes once
|
||||
Why
|
||||
|
||||
Your Active query does two correlated subqueries per discussion row:
|
||||
|
||||
note_count
|
||||
|
||||
participants
|
||||
|
||||
With LIMIT 20 it’s not catastrophic, but it is still unnecessary work and creates “spiky” behavior if the planner chooses poorly.
|
||||
|
||||
Pattern to use:
|
||||
|
||||
CTE selects the limited set of discussions
|
||||
|
||||
Join notes once, aggregate with GROUP BY
|
||||
|
||||
Diff
|
||||
diff
|
||||
Copy code
|
||||
--- a/who-command-design.md
|
||||
+++ b/who-command-design.md
|
||||
@@ Query: Active Mode @@
|
||||
- let sql =
|
||||
- "SELECT
|
||||
- d.noteable_type,
|
||||
- ...
|
||||
- (SELECT COUNT(*) FROM notes n
|
||||
- WHERE n.discussion_id = d.id AND n.is_system = 0) AS note_count,
|
||||
- (SELECT GROUP_CONCAT(username, X'1F') FROM (
|
||||
- SELECT DISTINCT n.author_username AS username
|
||||
- FROM notes n
|
||||
- WHERE n.discussion_id = d.id
|
||||
- AND n.is_system = 0
|
||||
- AND n.author_username IS NOT NULL
|
||||
- ORDER BY username
|
||||
- )) AS participants
|
||||
- FROM discussions d
|
||||
- ...
|
||||
- LIMIT ?3";
|
||||
+ let sql = "
|
||||
+ WITH picked AS (
|
||||
+ SELECT d.id, d.noteable_type, d.issue_id, d.merge_request_id, d.project_id, d.last_note_at
|
||||
+ FROM discussions d
|
||||
+ WHERE d.resolvable = 1 AND d.resolved = 0
|
||||
+ AND d.last_note_at >= ?1
|
||||
+ AND (?2 IS NULL OR d.project_id = ?2)
|
||||
+ ORDER BY d.last_note_at DESC
|
||||
+ LIMIT ?3
|
||||
+ ),
|
||||
+ note_agg AS (
|
||||
+ SELECT
|
||||
+ n.discussion_id,
|
||||
+ COUNT(*) AS note_count,
|
||||
+ GROUP_CONCAT(n.author_username, X'1F') AS participants
|
||||
+ FROM (
|
||||
+ SELECT DISTINCT discussion_id, author_username
|
||||
+ FROM notes
|
||||
+ WHERE is_system = 0 AND author_username IS NOT NULL
|
||||
+ ) n
|
||||
+ JOIN picked p ON p.id = n.discussion_id
|
||||
+ GROUP BY n.discussion_id
|
||||
+ )
|
||||
+ SELECT
|
||||
+ p.noteable_type,
|
||||
+ COALESCE(i.iid, m.iid) AS entity_iid,
|
||||
+ COALESCE(i.title, m.title) AS entity_title,
|
||||
+ proj.path_with_namespace,
|
||||
+ p.last_note_at,
|
||||
+ COALESCE(na.note_count, 0) AS note_count,
|
||||
+ COALESCE(na.participants, '') AS participants
|
||||
+ FROM picked p
|
||||
+ JOIN projects proj ON p.project_id = proj.id
|
||||
+ LEFT JOIN issues i ON p.issue_id = i.id
|
||||
+ LEFT JOIN merge_requests m ON p.merge_request_id = m.id
|
||||
+ LEFT JOIN note_agg na ON na.discussion_id = p.id
|
||||
+ ORDER BY p.last_note_at DESC
|
||||
+ ";
|
||||
|
||||
Change 6 — Use prepare_cached() everywhere (cheap perf win, no scope creep)
|
||||
Why
|
||||
|
||||
You already worked hard to keep SQL static. Taking advantage of sqlite statement caching completes the loop.
|
||||
|
||||
Diff
|
||||
diff
|
||||
Copy code
|
||||
--- a/who-command-design.md
|
||||
+++ b/who-command-design.md
|
||||
@@ Query functions @@
|
||||
- let mut stmt = conn.prepare(sql)?;
|
||||
+ let mut stmt = conn.prepare_cached(sql)?;
|
||||
|
||||
|
||||
Apply in all query fns (query_workload, query_reviews, query_active, query_expert, query_overlap, lookup_project_path).
|
||||
|
||||
Change 7 — Human output: show project_path where ambiguity exists (Workload + Overlap)
|
||||
Why
|
||||
|
||||
When not project-scoped, #42 and !100 aren’t unique. You already have project paths in the query results — you’re just not printing them.
|
||||
|
||||
Diff
|
||||
diff
|
||||
Copy code
|
||||
--- a/who-command-design.md
|
||||
+++ b/who-command-design.md
|
||||
@@ print_workload_human @@
|
||||
- println!(
|
||||
- " {} {} {}",
|
||||
+ println!(
|
||||
+ " {} {} {} {}",
|
||||
style(format!("#{:<5}", item.iid)).cyan(),
|
||||
truncate_str(&item.title, 45),
|
||||
style(format_relative_time(item.updated_at)).dim(),
|
||||
+ style(&item.project_path).dim(),
|
||||
);
|
||||
|
||||
@@ print_workload_human (MRs) @@
|
||||
- println!(
|
||||
- " {} {}{} {}",
|
||||
+ println!(
|
||||
+ " {} {}{} {} {}",
|
||||
style(format!("!{:<5}", mr.iid)).cyan(),
|
||||
truncate_str(&mr.title, 40),
|
||||
style(draft).dim(),
|
||||
style(format_relative_time(mr.updated_at)).dim(),
|
||||
+ style(&mr.project_path).dim(),
|
||||
);
|
||||
|
||||
@@ print_overlap_human @@
|
||||
- let mr_str = user.mr_iids.iter().take(5).map(|iid| format!("!{iid}")).collect::<Vec<_>>().join(", ");
|
||||
+ let mr_str = user.mr_refs.iter().take(5).cloned().collect::<Vec<_>>().join(", ");
|
||||
|
||||
Change 8 — Robot JSON: add stable IDs + “defaulted” flags for reproducibility
|
||||
Why
|
||||
|
||||
You already added resolved_input — good. Two more reproducibility gaps remain:
|
||||
|
||||
Agents can’t reliably “open” an entity without IDs (discussion_id, mr_id, issue_id).
|
||||
|
||||
Agents can’t tell whether since was user-provided vs defaulted (important when replaying intent).
|
||||
|
||||
Diff
|
||||
diff
|
||||
Copy code
|
||||
--- a/who-command-design.md
|
||||
+++ b/who-command-design.md
|
||||
@@ WhoResolvedInput @@
|
||||
pub struct WhoResolvedInput {
|
||||
@@
|
||||
pub since_ms: Option<i64>,
|
||||
pub since_iso: Option<String>,
|
||||
+ pub since_was_default: bool,
|
||||
pub limit: usize,
|
||||
}
|
||||
|
||||
@@ run_who @@
|
||||
- let since_ms = resolve_since(args.since.as_deref(), "6m")?;
|
||||
+ let since_was_default = args.since.is_none();
|
||||
+ let since_ms = resolve_since(args.since.as_deref(), "6m")?;
|
||||
Ok(WhoRun {
|
||||
resolved_input: WhoResolvedInput {
|
||||
@@
|
||||
since_ms: Some(since_ms),
|
||||
since_iso: Some(ms_to_iso(since_ms)),
|
||||
+ since_was_default,
|
||||
limit: args.limit,
|
||||
},
|
||||
|
||||
@@ print_who_json resolved_input @@
|
||||
let resolved_input = serde_json::json!({
|
||||
@@
|
||||
"since_ms": run.resolved_input.since_ms,
|
||||
"since_iso": run.resolved_input.since_iso,
|
||||
+ "since_was_default": run.resolved_input.since_was_default,
|
||||
"limit": run.resolved_input.limit,
|
||||
});
|
||||
|
||||
|
||||
And for Active/Workload discussion items, add IDs in SQL and JSON:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
@@ ActiveDiscussion @@
|
||||
pub struct ActiveDiscussion {
|
||||
+ pub discussion_id: i64,
|
||||
@@
|
||||
}
|
||||
|
||||
@@ query_active SELECT @@
|
||||
- SELECT
|
||||
- p.noteable_type,
|
||||
+ SELECT
|
||||
+ p.id AS discussion_id,
|
||||
+ p.noteable_type,
|
||||
|
||||
@@ active_to_json @@
|
||||
- "discussions": r.discussions.iter().map(|d| json!({
|
||||
+ "discussions": r.discussions.iter().map(|d| json!({
|
||||
+ "discussion_id": d.discussion_id,
|
||||
...
|
||||
}))
|
||||
|
||||
Change 9 — Make performance verification explicit: require EXPLAIN QUERY PLAN checks for each mode
|
||||
Why
|
||||
|
||||
You’re adding indexes specifically for these queries. The only way to ensure the planner is doing what you think is to lock in a short perf checklist (especially after schema drift or SQLite version differences).
|
||||
|
||||
Diff
|
||||
diff
|
||||
Copy code
|
||||
--- a/who-command-design.md
|
||||
+++ b/who-command-design.md
|
||||
@@ Verification @@
|
||||
# Manual verification against real data
|
||||
cargo run --release -- who src/features/global-search/
|
||||
@@
|
||||
cargo run --release -- who src/features/global-search/ -p typescript # project scoped
|
||||
+
|
||||
+# Perf verification (required before merge):
|
||||
+# Confirm idx_notes_diffnote_path_created is used for Expert/Overlap and
|
||||
+# idx_discussions_unresolved_recent is used for Active.
|
||||
+sqlite3 path/to/db.sqlite "
|
||||
+ EXPLAIN QUERY PLAN
|
||||
+ SELECT ... -- paste final Expert SQL with representative bindings
|
||||
+";
|
||||
|
||||
|
||||
(Keep it lightweight: one representative query per mode is enough.)
|
||||
|
||||
Net effect
|
||||
|
||||
Correctness: project scoping hits the notes index; IDs added for agent workflows.
|
||||
|
||||
Performance: fewer rows/materialization in Expert; statement caching everywhere; Active avoids correlated subqueries.
|
||||
|
||||
UX: human output no longer ambiguous across projects; Overlap MR references become actionable.
|
||||
|
||||
Reproducibility: agents can distinguish defaults vs explicit inputs; can dereference entities reliably.
|
||||
|
||||
If you want one “highest ROI” subset to implement first: Change 1 + Change 4 + Change 6 + Change 7. That’s where the real operational value lands.
|
||||
@@ -1,3 +0,0 @@
|
||||
ChatGPT said:
|
||||
Proposing code revisions for performance and determinism
|
||||
Answer now
|
||||
@@ -1,356 +0,0 @@
|
||||
Below are the highest-leverage revisions I’d make. They’re tightly scoped (no new tables/APIs), but fix a few real correctness issues and make the outputs more actionable.
|
||||
|
||||
1) Fix a correctness bug in PathQuery: don’t escape for =, and make --path Makefile actually work
|
||||
Why
|
||||
|
||||
Bug: build_path_query() currently runs escape_like() even when is_prefix = false (exact match). That will break exact matches for paths containing _, %, or \ because = does not treat those as metacharacters (so the escaped string won’t equal the stored path).
|
||||
|
||||
UX mismatch: The plan says --path handles dotless root files (Makefile/LICENSE), but the current logic still treats them as directory prefixes (Makefile/%) → zero results.
|
||||
|
||||
Change
|
||||
|
||||
Only escape for LIKE.
|
||||
|
||||
Treat root paths (no /) passed via --path as exact matches by default (unless they end with /).
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@
|
||||
-/// Build a path query from a user-supplied path.
|
||||
-///
|
||||
-/// Rules:
|
||||
-/// - If the path ends with `/`, it's a directory prefix -> `escaped_path%` (LIKE)
|
||||
-/// - If the last path segment contains `.`, it's a file -> exact match (=)
|
||||
-/// - Otherwise, it's a directory prefix -> `escaped_path/%` (LIKE)
|
||||
+/// Build a path query from a user-supplied path.
|
||||
+///
|
||||
+/// Rules:
|
||||
+/// - If the path ends with `/`, it's a directory prefix -> `escaped_path/%` (LIKE)
|
||||
+/// - If the path is a root path (no `/`) and does NOT end with `/`, treat as exact (=)
|
||||
+/// (this makes `--path Makefile` and `--path LICENSE` work as intended)
|
||||
+/// - Else if the last path segment contains `.`, treat as exact (=)
|
||||
+/// - Otherwise, treat as directory prefix -> `escaped_path/%` (LIKE)
|
||||
@@
|
||||
-fn build_path_query(path: &str) -> PathQuery {
|
||||
+fn build_path_query(path: &str) -> PathQuery {
|
||||
let trimmed = path.trim_end_matches('/');
|
||||
let last_segment = trimmed.rsplit('/').next().unwrap_or(trimmed);
|
||||
- let is_file = !path.ends_with('/') && last_segment.contains('.');
|
||||
- let escaped = escape_like(trimmed);
|
||||
+ let is_root = !trimmed.contains('/');
|
||||
+ let is_file = !path.ends_with('/') && (is_root || last_segment.contains('.'));
|
||||
|
||||
if is_file {
|
||||
PathQuery {
|
||||
- value: escaped,
|
||||
+ // IMPORTANT: do NOT escape for exact match (=)
|
||||
+ value: trimmed.to_string(),
|
||||
is_prefix: false,
|
||||
}
|
||||
} else {
|
||||
+ let escaped = escape_like(trimmed);
|
||||
PathQuery {
|
||||
value: format!("{escaped}/%"),
|
||||
is_prefix: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
@@
|
||||
-/// **Known limitation:** Dotless root files (LICENSE, Makefile, Dockerfile)
|
||||
-/// without a trailing `/` will be treated as directory prefixes. Use `--path`
|
||||
-/// for these — the `--path` flag passes through to Expert mode directly,
|
||||
-/// and the `build_path_query` output for "LICENSE" is a prefix `LICENSE/%`
|
||||
-/// which will simply return zero results (a safe, obvious failure mode that the
|
||||
-/// help text addresses).
|
||||
+/// Note: Root file paths passed via `--path` (including dotless files like Makefile/LICENSE)
|
||||
+/// are treated as exact matches unless they end with `/`.
|
||||
|
||||
|
||||
Also update the --path help text to be explicit:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@
|
||||
- /// Force expert mode for a file/directory path (handles root files like
|
||||
- /// README.md, LICENSE, Makefile that lack a / and can't be auto-detected)
|
||||
+ /// Force expert mode for a file/directory path.
|
||||
+ /// Root files (README.md, LICENSE, Makefile) are treated as exact matches.
|
||||
+ /// Use a trailing `/` to force directory-prefix matching.
|
||||
|
||||
2) Fix Active mode: your note_count is currently counting participants, and the CTE scans too broadly
|
||||
Why
|
||||
|
||||
In note_agg, you do SELECT DISTINCT discussion_id, author_username and then COUNT(*) AS note_count. That’s participant count, not note count.
|
||||
|
||||
The current note_agg also builds the DISTINCT set from all notes then joins to picked. It’s avoidable work.
|
||||
|
||||
Change
|
||||
|
||||
Split into two aggregations scoped to picked:
|
||||
|
||||
note_counts: counts non-system notes per picked discussion.
|
||||
|
||||
participants: distinct usernames per picked discussion, then GROUP_CONCAT.
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@
|
||||
- note_agg AS (
|
||||
- SELECT
|
||||
- n.discussion_id,
|
||||
- COUNT(*) AS note_count,
|
||||
- GROUP_CONCAT(n.author_username, X'1F') AS participants
|
||||
- FROM (
|
||||
- SELECT DISTINCT discussion_id, author_username
|
||||
- FROM notes
|
||||
- WHERE is_system = 0 AND author_username IS NOT NULL
|
||||
- ) n
|
||||
- JOIN picked p ON p.id = n.discussion_id
|
||||
- GROUP BY n.discussion_id
|
||||
- )
|
||||
+ note_counts AS (
|
||||
+ SELECT
|
||||
+ n.discussion_id,
|
||||
+ COUNT(*) AS note_count
|
||||
+ FROM notes n
|
||||
+ JOIN picked p ON p.id = n.discussion_id
|
||||
+ WHERE n.is_system = 0
|
||||
+ GROUP BY n.discussion_id
|
||||
+ ),
|
||||
+ participants AS (
|
||||
+ SELECT
|
||||
+ x.discussion_id,
|
||||
+ GROUP_CONCAT(x.author_username, X'1F') AS participants
|
||||
+ FROM (
|
||||
+ SELECT DISTINCT n.discussion_id, n.author_username
|
||||
+ FROM notes n
|
||||
+ JOIN picked p ON p.id = n.discussion_id
|
||||
+ WHERE n.is_system = 0 AND n.author_username IS NOT NULL
|
||||
+ ) x
|
||||
+ GROUP BY x.discussion_id
|
||||
+ )
|
||||
@@
|
||||
- LEFT JOIN note_agg na ON na.discussion_id = p.id
|
||||
+ LEFT JOIN note_counts nc ON nc.discussion_id = p.id
|
||||
+ LEFT JOIN participants pa ON pa.discussion_id = p.id
|
||||
@@
|
||||
- COALESCE(na.note_count, 0) AS note_count,
|
||||
- COALESCE(na.participants, '') AS participants
|
||||
+ COALESCE(nc.note_count, 0) AS note_count,
|
||||
+ COALESCE(pa.participants, '') AS participants
|
||||
|
||||
|
||||
Net effect: correctness fix + more predictable perf.
|
||||
|
||||
Add a test that would have failed before:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@
|
||||
#[test]
|
||||
fn test_active_query() {
|
||||
@@
|
||||
- insert_diffnote(&conn, 1, 1, 1, "reviewer_b", "src/foo.rs", "needs work");
|
||||
+ insert_diffnote(&conn, 1, 1, 1, "reviewer_b", "src/foo.rs", "needs work");
|
||||
+ insert_diffnote(&conn, 2, 1, 1, "reviewer_b", "src/foo.rs", "follow-up");
|
||||
@@
|
||||
- assert_eq!(result.discussions[0].participants, vec!["reviewer_b"]);
|
||||
+ assert_eq!(result.discussions[0].participants, vec!["reviewer_b"]);
|
||||
+ assert_eq!(result.discussions[0].note_count, 2);
|
||||
|
||||
3) Index fix: idx_discussions_unresolved_recent won’t help global --active ordering
|
||||
Why
|
||||
|
||||
Your index is (project_id, last_note_at) with WHERE resolvable=1 AND resolved=0.
|
||||
|
||||
When --active is not project-scoped (common default), SQLite can’t use (project_id, last_note_at) to satisfy ORDER BY last_note_at DESC efficiently because project_id isn’t constrained.
|
||||
|
||||
This can turn into a scan+sort over potentially large unresolved sets.
|
||||
|
||||
Change
|
||||
|
||||
Keep the project-scoped index, but add a global ordering index (partial, still small):
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@
|
||||
CREATE INDEX IF NOT EXISTS idx_discussions_unresolved_recent
|
||||
ON discussions(project_id, last_note_at)
|
||||
WHERE resolvable = 1 AND resolved = 0;
|
||||
+
|
||||
+-- Active (global): unresolved discussions by recency (no project scope).
|
||||
+-- Supports ORDER BY last_note_at DESC LIMIT N when project_id is unconstrained.
|
||||
+CREATE INDEX IF NOT EXISTS idx_discussions_unresolved_recent_global
|
||||
+ ON discussions(last_note_at)
|
||||
+ WHERE resolvable = 1 AND resolved = 0;
|
||||
|
||||
4) Make Overlap “touches” coherent: count MRs for reviewers, not DiffNotes
|
||||
Why
|
||||
|
||||
Overlap’s question is “Who else has MRs touching my files?” but:
|
||||
|
||||
reviewer branch uses COUNT(*) (DiffNotes)
|
||||
|
||||
author branch uses COUNT(DISTINCT m.id) (MRs)
|
||||
|
||||
Those are different units; summing them into touch_count is misleading.
|
||||
|
||||
Change
|
||||
|
||||
Count distinct MRs on the reviewer branch too:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@
|
||||
- COUNT(*) AS touch_count,
|
||||
+ COUNT(DISTINCT m.id) AS touch_count,
|
||||
MAX(n.created_at) AS last_touch_at,
|
||||
GROUP_CONCAT(DISTINCT (p.path_with_namespace || '!' || m.iid)) AS mr_refs
|
||||
|
||||
|
||||
Also update human output labeling:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@
|
||||
- style("Touches").bold(),
|
||||
+ style("MRs").bold(),
|
||||
|
||||
|
||||
(You still preserve “strength” via mr_refs and last_touch_at.)
|
||||
|
||||
5) Make outputs more actionable: add a canonical ref field (group/project!iid, group/project#iid)
|
||||
Why
|
||||
|
||||
You already do this for Overlap (mr_refs). Doing the same for Workload and Active reduces friction for both humans and agents:
|
||||
|
||||
humans can copy/paste a single token
|
||||
|
||||
robots don’t need to stitch project_path + iid + prefix
|
||||
|
||||
Change (Workload structs + SQL)
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@
|
||||
pub struct WorkloadIssue {
|
||||
pub iid: i64,
|
||||
+ pub ref_: String,
|
||||
pub title: String,
|
||||
pub project_path: String,
|
||||
pub updated_at: i64,
|
||||
}
|
||||
@@
|
||||
pub struct WorkloadMr {
|
||||
pub iid: i64,
|
||||
+ pub ref_: String,
|
||||
pub title: String,
|
||||
pub draft: bool,
|
||||
pub project_path: String,
|
||||
@@
|
||||
- let issues_sql =
|
||||
- "SELECT i.iid, i.title, p.path_with_namespace, i.updated_at
|
||||
+ let issues_sql =
|
||||
+ "SELECT i.iid,
|
||||
+ (p.path_with_namespace || '#' || i.iid) AS ref,
|
||||
+ i.title, p.path_with_namespace, i.updated_at
|
||||
@@
|
||||
- iid: row.get(0)?,
|
||||
- title: row.get(1)?,
|
||||
- project_path: row.get(2)?,
|
||||
- updated_at: row.get(3)?,
|
||||
+ iid: row.get(0)?,
|
||||
+ ref_: row.get(1)?,
|
||||
+ title: row.get(2)?,
|
||||
+ project_path: row.get(3)?,
|
||||
+ updated_at: row.get(4)?,
|
||||
})
|
||||
@@
|
||||
- let authored_sql =
|
||||
- "SELECT m.iid, m.title, m.draft, p.path_with_namespace, m.updated_at
|
||||
+ let authored_sql =
|
||||
+ "SELECT m.iid,
|
||||
+ (p.path_with_namespace || '!' || m.iid) AS ref,
|
||||
+ m.title, m.draft, p.path_with_namespace, m.updated_at
|
||||
@@
|
||||
- iid: row.get(0)?,
|
||||
- title: row.get(1)?,
|
||||
- draft: row.get::<_, i32>(2)? != 0,
|
||||
- project_path: row.get(3)?,
|
||||
+ iid: row.get(0)?,
|
||||
+ ref_: row.get(1)?,
|
||||
+ title: row.get(2)?,
|
||||
+ draft: row.get::<_, i32>(3)? != 0,
|
||||
+ project_path: row.get(4)?,
|
||||
author_username: None,
|
||||
- updated_at: row.get(4)?,
|
||||
+ updated_at: row.get(5)?,
|
||||
})
|
||||
|
||||
|
||||
Then use ref_ in human output + robot JSON.
|
||||
|
||||
6) Reviews mode: tolerate leading whitespace before **prefix**
|
||||
Why
|
||||
|
||||
Many people write " **suggestion**: ...". Current LIKE '**%**%' misses that.
|
||||
|
||||
Change
|
||||
|
||||
Use ltrim(n.body) consistently:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@
|
||||
- AND n.body LIKE '**%**%'
|
||||
+ AND ltrim(n.body) LIKE '**%**%'
|
||||
@@
|
||||
- SUBSTR(n.body, 3, INSTR(SUBSTR(n.body, 3), '**') - 1) AS raw_prefix,
|
||||
+ SUBSTR(ltrim(n.body), 3, INSTR(SUBSTR(ltrim(n.body), 3), '**') - 1) AS raw_prefix,
|
||||
|
||||
7) Add two small tests that catch the above regressions
|
||||
Why
|
||||
|
||||
These are exactly the kind of issues that slip through without targeted tests.
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/plan.md b/plan.md
|
||||
@@
|
||||
#[test]
|
||||
fn test_escape_like() {
|
||||
@@
|
||||
}
|
||||
+
|
||||
+ #[test]
|
||||
+ fn test_build_path_query_exact_does_not_escape() {
|
||||
+ // '_' must not be escaped for '='
|
||||
+ let pq = build_path_query("README_with_underscore.md");
|
||||
+ assert_eq!(pq.value, "README_with_underscore.md");
|
||||
+ assert!(!pq.is_prefix);
|
||||
+ }
|
||||
+
|
||||
+ #[test]
|
||||
+ fn test_path_flag_dotless_root_file_is_exact() {
|
||||
+ let pq = build_path_query("Makefile");
|
||||
+ assert_eq!(pq.value, "Makefile");
|
||||
+ assert!(!pq.is_prefix);
|
||||
+ }
|
||||
|
||||
Summary of net effect
|
||||
|
||||
Correctness fixes: exact-path escaping bug; Active.note_count bug.
|
||||
|
||||
Perf fixes: global --active index; avoid broad note scans in Active.
|
||||
|
||||
Usefulness upgrades: coherent overlap “touch” metric; canonical refs everywhere; reviews prefix more robust.
|
||||
|
||||
If you want one extra “stretch” that still isn’t scope creep: add an unscoped warning line in human output when project_id == None (e.g., “Aggregated across projects; use -p to scope”) for Expert/Overlap/Active. That’s pure presentation, but prevents misinterpretation in multi-project DBs.
|
||||
@@ -1,471 +0,0 @@
|
||||
Proposed revisions (Iteration 6)
|
||||
|
||||
Below are the highest-leverage changes I’d make on top of your current Iteration 5 plan, with rationale and git-diff style edits to the plan text/snippets.
|
||||
|
||||
1) Fix a real edge case: dotless non-root files (src/Dockerfile, infra/Makefile, etc.)
|
||||
Why
|
||||
|
||||
Your current build_path_query() treats dotless last segments as directories (prefix match) unless the path is root. That misclassifies legitimate dotless files inside directories and silently produces path/% (zero hits or wrong hits).
|
||||
|
||||
Best minimal fix: keep your static SQL approach, but add a DB existence probe (static SQL) for path queries:
|
||||
|
||||
If user didn’t force directory (/), and exact path exists in DiffNotes, treat as exact =.
|
||||
|
||||
Otherwise use prefix LIKE 'dir/%'.
|
||||
|
||||
This avoids new CLI flags, avoids heuristics lists, and uses your existing partial index (idx_notes_diffnote_path_created) efficiently.
|
||||
|
||||
Diff
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/Plan.md b/Plan.md
|
||||
@@
|
||||
-struct PathQuery {
|
||||
+struct PathQuery {
|
||||
/// The parameter value to bind.
|
||||
value: String,
|
||||
/// If true: use `LIKE value ESCAPE '\'`. If false: use `= value`.
|
||||
is_prefix: bool,
|
||||
}
|
||||
|
||||
-/// Build a path query from a user-supplied path.
|
||||
+/// Build a path query from a user-supplied path, with a DB probe for dotless files.
|
||||
@@
|
||||
-fn build_path_query(path: &str) -> PathQuery {
|
||||
+fn build_path_query(conn: &Connection, path: &str) -> Result<PathQuery> {
|
||||
let trimmed = path.trim_end_matches('/');
|
||||
let last_segment = trimmed.rsplit('/').next().unwrap_or(trimmed);
|
||||
let is_root = !trimmed.contains('/');
|
||||
- let is_file = !path.ends_with('/') && (is_root || last_segment.contains('.'));
|
||||
+ let forced_dir = path.ends_with('/');
|
||||
+ let looks_like_file = !forced_dir && (is_root || last_segment.contains('.'));
|
||||
+
|
||||
+ // If it doesn't "look like a file" but the exact path exists in DiffNotes,
|
||||
+ // treat as exact (handles src/Dockerfile, infra/Makefile, etc.).
|
||||
+ let exact_exists = if !looks_like_file && !forced_dir {
|
||||
+ conn.query_row(
|
||||
+ "SELECT 1
|
||||
+ FROM notes
|
||||
+ WHERE note_type = 'DiffNote'
|
||||
+ AND is_system = 0
|
||||
+ AND position_new_path = ?1
|
||||
+ LIMIT 1",
|
||||
+ rusqlite::params![trimmed],
|
||||
+ |_| Ok(()),
|
||||
+ ).is_ok()
|
||||
+ } else {
|
||||
+ false
|
||||
+ };
|
||||
+
|
||||
+ let is_file = looks_like_file || exact_exists;
|
||||
|
||||
if is_file {
|
||||
PathQuery {
|
||||
value: trimmed.to_string(),
|
||||
is_prefix: false,
|
||||
}
|
||||
} else {
|
||||
let escaped = escape_like(trimmed);
|
||||
PathQuery {
|
||||
value: format!("{escaped}/%"),
|
||||
is_prefix: true,
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
|
||||
Also update callers:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
- let pq = build_path_query(path);
|
||||
+ let pq = build_path_query(conn, path)?;
|
||||
@@
|
||||
- let pq = build_path_query(path);
|
||||
+ let pq = build_path_query(conn, path)?;
|
||||
|
||||
|
||||
And tests:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
- fn test_build_path_query() {
|
||||
+ fn test_build_path_query() {
|
||||
@@
|
||||
- // Dotless root file -> exact match (root path without '/')
|
||||
+ // Dotless root file -> exact match (root path without '/')
|
||||
let pq = build_path_query("Makefile");
|
||||
assert_eq!(pq.value, "Makefile");
|
||||
assert!(!pq.is_prefix);
|
||||
+
|
||||
+ // Dotless file in subdir should become exact if DB contains it (probe)
|
||||
+ // (set up: insert one DiffNote with position_new_path = "src/Dockerfile")
|
||||
|
||||
2) Make “reviewer” semantics correct: exclude MR authors commenting on their own diffs
|
||||
Why
|
||||
|
||||
Right now, Overlap (and Expert reviewer branch) will count MR authors as “reviewers” if they leave DiffNotes in their own MR (clarifications / replies), inflating A+R and contaminating “who reviewed here” signals.
|
||||
|
||||
You already enforce this in --reviews mode (m.author_username != ?1). Apply the same principle consistently:
|
||||
|
||||
Reviewer branch: only count notes where n.author_username != m.author_username (when both non-NULL).
|
||||
|
||||
Diff (Overlap reviewer branch)
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
- WHERE n.note_type = 'DiffNote'
|
||||
+ WHERE n.note_type = 'DiffNote'
|
||||
AND n.position_new_path LIKE ?1 ESCAPE '\\'
|
||||
AND n.is_system = 0
|
||||
AND n.author_username IS NOT NULL
|
||||
+ AND (m.author_username IS NULL OR n.author_username != m.author_username)
|
||||
AND n.created_at >= ?2
|
||||
AND (?3 IS NULL OR n.project_id = ?3)
|
||||
|
||||
|
||||
Same change for sql_exact.
|
||||
|
||||
3) Expert mode scoring: align units + reduce single-MR “comment storms”
|
||||
Why
|
||||
|
||||
Expert currently mixes units:
|
||||
|
||||
reviewer side: DiffNote count
|
||||
|
||||
author side: distinct MR count
|
||||
|
||||
That makes score noisy and can crown “someone who wrote 30 comments on one MR” as top expert.
|
||||
|
||||
Fix: make both sides primarily MR-breadth:
|
||||
|
||||
reviewer: COUNT(DISTINCT m.id) as review_mr_count
|
||||
|
||||
author: COUNT(DISTINCT m.id) as author_mr_count
|
||||
Optionally keep review_note_count as a secondary intensity signal (but not the main driver).
|
||||
|
||||
Diff (types + SQL)
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
pub struct Expert {
|
||||
pub username: String,
|
||||
- pub score: f64,
|
||||
- pub review_count: u32,
|
||||
- pub author_count: u32,
|
||||
+ pub score: i64,
|
||||
+ pub review_mr_count: u32,
|
||||
+ pub review_note_count: u32,
|
||||
+ pub author_mr_count: u32,
|
||||
pub last_active_ms: i64,
|
||||
}
|
||||
|
||||
|
||||
Reviewer branch now joins to MR so it can count distinct MRs and exclude self-comments:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
- SELECT
|
||||
- n.author_username AS username,
|
||||
- 'reviewer' AS role,
|
||||
- COUNT(*) AS cnt,
|
||||
- MAX(n.created_at) AS last_active_at
|
||||
- FROM notes n
|
||||
+ SELECT
|
||||
+ n.author_username AS username,
|
||||
+ 'reviewer' AS role,
|
||||
+ COUNT(DISTINCT m.id) AS mr_cnt,
|
||||
+ COUNT(*) AS note_cnt,
|
||||
+ MAX(n.created_at) AS last_active_at
|
||||
+ FROM notes n
|
||||
+ JOIN discussions d ON n.discussion_id = d.id
|
||||
+ JOIN merge_requests m ON d.merge_request_id = m.id
|
||||
WHERE n.note_type = 'DiffNote'
|
||||
AND n.is_system = 0
|
||||
AND n.author_username IS NOT NULL
|
||||
+ AND (m.author_username IS NULL OR n.author_username != m.author_username)
|
||||
AND n.position_new_path LIKE ?1 ESCAPE '\\'
|
||||
AND n.created_at >= ?2
|
||||
AND (?3 IS NULL OR n.project_id = ?3)
|
||||
GROUP BY n.author_username
|
||||
|
||||
|
||||
Update author branch payload to match shape:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
SELECT
|
||||
m.author_username AS username,
|
||||
'author' AS role,
|
||||
- COUNT(DISTINCT m.id) AS cnt,
|
||||
+ COUNT(DISTINCT m.id) AS mr_cnt,
|
||||
+ 0 AS note_cnt,
|
||||
MAX(n.created_at) AS last_active_at
|
||||
|
||||
|
||||
Aggregate:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
SELECT
|
||||
username,
|
||||
- SUM(CASE WHEN role = 'reviewer' THEN cnt ELSE 0 END) AS review_count,
|
||||
- SUM(CASE WHEN role = 'author' THEN cnt ELSE 0 END) AS author_count,
|
||||
+ SUM(CASE WHEN role = 'reviewer' THEN mr_cnt ELSE 0 END) AS review_mr_count,
|
||||
+ SUM(CASE WHEN role = 'reviewer' THEN note_cnt ELSE 0 END) AS review_note_count,
|
||||
+ SUM(CASE WHEN role = 'author' THEN mr_cnt ELSE 0 END) AS author_mr_count,
|
||||
MAX(last_active_at) AS last_active_at,
|
||||
- (SUM(CASE WHEN role = 'reviewer' THEN cnt ELSE 0 END) * 3.0) +
|
||||
- (SUM(CASE WHEN role = 'author' THEN cnt ELSE 0 END) * 2.0) AS score
|
||||
+ (
|
||||
+ (SUM(CASE WHEN role = 'reviewer' THEN mr_cnt ELSE 0 END) * 20) +
|
||||
+ (SUM(CASE WHEN role = 'author' THEN mr_cnt ELSE 0 END) * 12) +
|
||||
+ (SUM(CASE WHEN role = 'reviewer' THEN note_cnt ELSE 0 END) * 1)
|
||||
+ ) AS score
|
||||
|
||||
|
||||
Human header:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
- style("Reviews").bold(),
|
||||
- style("Authored").bold(),
|
||||
+ style("Reviewed(MRs)").bold(),
|
||||
+ style("Notes").bold(),
|
||||
+ style("Authored(MRs)").bold(),
|
||||
|
||||
4) Deterministic output: participants + MR refs + tie-breakers
|
||||
Why
|
||||
|
||||
You’ve correctly focused on reproducibility (resolved_input), but you still have nondeterministic lists:
|
||||
|
||||
participants: GROUP_CONCAT order is undefined → vector order changes run-to-run.
|
||||
|
||||
mr_refs: you dedup via HashSet then iterate → undefined order.
|
||||
|
||||
user sorting in overlap is missing stable tie-breakers.
|
||||
|
||||
This is a real “robot mode flake” source.
|
||||
|
||||
Diff (Active participants sort)
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
- let participants: Vec<String> = participants_csv
|
||||
+ let mut participants: Vec<String> = participants_csv
|
||||
.as_deref()
|
||||
.filter(|s| !s.is_empty())
|
||||
.map(|csv| csv.split('\x1F').map(String::from).collect())
|
||||
.unwrap_or_default();
|
||||
+ participants.sort(); // stable, deterministic
|
||||
|
||||
Diff (Overlap MR refs sort + stable user sort)
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
- users.sort_by(|a, b| b.touch_count.cmp(&a.touch_count));
|
||||
+ users.sort_by(|a, b| {
|
||||
+ b.touch_count.cmp(&a.touch_count)
|
||||
+ .then_with(|| b.last_touch_at.cmp(&a.last_touch_at))
|
||||
+ .then_with(|| a.username.cmp(&b.username))
|
||||
+ });
|
||||
@@
|
||||
- entry.mr_refs = set.into_iter().collect();
|
||||
+ let mut v: Vec<String> = set.into_iter().collect();
|
||||
+ v.sort();
|
||||
+ entry.mr_refs = v;
|
||||
|
||||
5) Make --limit actionable: surface truncation explicitly (human + robot)
|
||||
Why
|
||||
|
||||
Agents (and humans) need to know if results were cut off so they can rerun with a bigger -n.
|
||||
Right now there’s no signal.
|
||||
|
||||
Minimal pattern: query limit + 1, set truncated = true if you got > limit, then truncate.
|
||||
|
||||
Diff (result types)
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
pub struct ExpertResult {
|
||||
pub path_query: String,
|
||||
pub experts: Vec<Expert>,
|
||||
+ pub truncated: bool,
|
||||
}
|
||||
@@
|
||||
pub struct ActiveResult {
|
||||
pub discussions: Vec<ActiveDiscussion>,
|
||||
pub total_unresolved: u32,
|
||||
+ pub truncated: bool,
|
||||
}
|
||||
@@
|
||||
pub struct OverlapResult {
|
||||
pub path_query: String,
|
||||
pub users: Vec<OverlapUser>,
|
||||
+ pub truncated: bool,
|
||||
}
|
||||
|
||||
Diff (query pattern example)
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
- let limit_i64 = limit as i64;
|
||||
+ let limit_plus_one = (limit + 1) as i64;
|
||||
@@
|
||||
- LIMIT ?4
|
||||
+ LIMIT ?4
|
||||
@@
|
||||
- rusqlite::params![pq.value, since_ms, project_id, limit_i64],
|
||||
+ rusqlite::params![pq.value, since_ms, project_id, limit_plus_one],
|
||||
@@
|
||||
- Ok(ExpertResult {
|
||||
+ let truncated = experts.len() > limit;
|
||||
+ let experts = experts.into_iter().take(limit).collect();
|
||||
+ Ok(ExpertResult {
|
||||
path_query: path.to_string(),
|
||||
experts,
|
||||
+ truncated,
|
||||
})
|
||||
|
||||
|
||||
Human output hint:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
if r.experts.is_empty() { ... }
|
||||
+ if r.truncated {
|
||||
+ println!(" {}", style("(showing first -n; rerun with a higher --limit)").dim());
|
||||
+ }
|
||||
|
||||
|
||||
Robot output field:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
fn expert_to_json(r: &ExpertResult) -> serde_json::Value {
|
||||
serde_json::json!({
|
||||
"path_query": r.path_query,
|
||||
+ "truncated": r.truncated,
|
||||
"experts": ...
|
||||
})
|
||||
}
|
||||
|
||||
6) Overlap merge hot loop: avoid repeated HashSet rebuild per row
|
||||
Why
|
||||
|
||||
This line is expensive in a UNION result with many rows:
|
||||
|
||||
rust
|
||||
Copy code
|
||||
let mut set: HashSet<String> = entry.mr_refs.drain(..).collect();
|
||||
|
||||
|
||||
It reallocates and rehashes every time.
|
||||
|
||||
Fix: store an accumulator with HashSet during merge, convert once at end.
|
||||
|
||||
Diff (internal accumulator)
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
- let mut user_map: HashMap<String, OverlapUser> = HashMap::new();
|
||||
+ struct OverlapAcc {
|
||||
+ username: String,
|
||||
+ author_touch_count: u32,
|
||||
+ review_touch_count: u32,
|
||||
+ touch_count: u32,
|
||||
+ last_touch_at: i64,
|
||||
+ mr_refs: HashSet<String>,
|
||||
+ }
|
||||
+ let mut user_map: HashMap<String, OverlapAcc> = HashMap::new();
|
||||
@@
|
||||
- let entry = user_map.entry(username.clone()).or_insert_with(|| OverlapUser {
|
||||
+ let entry = user_map.entry(username.clone()).or_insert_with(|| OverlapAcc {
|
||||
username: username.clone(),
|
||||
author_touch_count: 0,
|
||||
review_touch_count: 0,
|
||||
touch_count: 0,
|
||||
last_touch_at: 0,
|
||||
- mr_refs: Vec::new(),
|
||||
+ mr_refs: HashSet::new(),
|
||||
});
|
||||
@@
|
||||
- let mut set: HashSet<String> = entry.mr_refs.drain(..).collect();
|
||||
- for r in mr_refs { set.insert(r); }
|
||||
- entry.mr_refs = set.into_iter().collect();
|
||||
+ for r in mr_refs { entry.mr_refs.insert(r); }
|
||||
@@
|
||||
- let mut users: Vec<OverlapUser> = user_map.into_values().collect();
|
||||
+ let mut users: Vec<OverlapUser> = user_map.into_values().map(|a| {
|
||||
+ let mut mr_refs: Vec<String> = a.mr_refs.into_iter().collect();
|
||||
+ mr_refs.sort();
|
||||
+ OverlapUser {
|
||||
+ username: a.username,
|
||||
+ author_touch_count: a.author_touch_count,
|
||||
+ review_touch_count: a.review_touch_count,
|
||||
+ touch_count: a.touch_count,
|
||||
+ last_touch_at: a.last_touch_at,
|
||||
+ mr_refs,
|
||||
+ }
|
||||
+ }).collect();
|
||||
|
||||
7) Tests to lock these behaviors
|
||||
Add tests (high value)
|
||||
|
||||
dotless subdir file uses DB probe → exact match
|
||||
|
||||
self-review exclusion prevents MR author showing up as reviewer
|
||||
|
||||
deterministic ordering for participants and mr_refs (sort)
|
||||
|
||||
Diff (test additions outline)
|
||||
diff
|
||||
Copy code
|
||||
@@
|
||||
#[test]
|
||||
+ fn test_build_path_query_dotless_subdir_file_uses_probe() {
|
||||
+ let conn = setup_test_db();
|
||||
+ insert_project(&conn, 1, "team/backend");
|
||||
+ insert_mr(&conn, 1, 1, 100, "author_a", "opened");
|
||||
+ insert_discussion(&conn, 1, 1, Some(1), None, true, false);
|
||||
+ insert_diffnote(&conn, 1, 1, 1, "reviewer_b", "src/Dockerfile", "note");
|
||||
+
|
||||
+ let pq = build_path_query(&conn, "src/Dockerfile").unwrap();
|
||||
+ assert_eq!(pq.value, "src/Dockerfile");
|
||||
+ assert!(!pq.is_prefix);
|
||||
+ }
|
||||
+
|
||||
+ #[test]
|
||||
+ fn test_overlap_excludes_self_review_notes() {
|
||||
+ let conn = setup_test_db();
|
||||
+ insert_project(&conn, 1, "team/backend");
|
||||
+ insert_mr(&conn, 1, 1, 100, "author_a", "opened");
|
||||
+ insert_discussion(&conn, 1, 1, Some(1), None, true, false);
|
||||
+ // author_a comments on their own MR diff
|
||||
+ insert_diffnote(&conn, 1, 1, 1, "author_a", "src/auth/login.rs", "clarification");
|
||||
+
|
||||
+ let result = query_overlap(&conn, "src/auth/", None, 0, 20).unwrap();
|
||||
+ let u = result.users.iter().find(|u| u.username == "author_a");
|
||||
+ // should not be credited as reviewer touch
|
||||
+ assert!(u.map(|x| x.review_touch_count).unwrap_or(0) == 0);
|
||||
+ }
|
||||
|
||||
Net effect
|
||||
|
||||
Correctness: fixes dotless subdir files + self-review pollution.
|
||||
|
||||
Signal quality: Expert ranking becomes harder to game by comment volume.
|
||||
|
||||
Robot reproducibility: deterministic ordering + explicit truncation.
|
||||
|
||||
Performance: avoids rehash loops in overlap merges; path probe uses indexed equality.
|
||||
|
||||
If you want one “single best” change: #1 (DB probe exact-match) is the most likely to prevent confusing “why is this empty?” behavior without adding any user-facing complexity.
|
||||
@@ -1,353 +0,0 @@
|
||||
Below are the highest-leverage revisions I’d make to iteration 6 to improve correctness (multi-project edge cases), robot-mode reliability (bounded payloads + truncation), and signal quality—without changing the fundamental scope (still pure SQL over existing tables).
|
||||
|
||||
1) Make build_path_query project-aware and two-way probe (exact and prefix)
|
||||
Why
|
||||
|
||||
Your DB probe currently answers: “does this exact file exist anywhere in DiffNotes?” That can misclassify in a project-scoped run:
|
||||
|
||||
Path exists as a dotless file in Project A → probe returns true
|
||||
|
||||
User runs -p Project B where the path is a directory (or different shape) → you switch to exact, return empty, and miss valid prefix hits.
|
||||
|
||||
Also, you still have a minor heuristic fragility for dot directories when the user omits trailing / (e.g., .github/workflows): last segment has a dot → you treat as file unless forced dir.
|
||||
|
||||
Revision
|
||||
|
||||
Thread project_id into build_path_query(conn, path, project_id)
|
||||
|
||||
Probe exact first (scoped), then probe prefix (scoped)
|
||||
|
||||
Only fall back to heuristics if both probes fail
|
||||
|
||||
This keeps “static SQL, no dynamic assembly,” and costs at most 2 indexed existence queries per invocation.
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/who-command-design.md b/who-command-design.md
|
||||
@@
|
||||
- fn build_path_query(conn: &Connection, path: &str) -> Result<PathQuery> {
|
||||
+ fn build_path_query(conn: &Connection, path: &str, project_id: Option<i64>) -> Result<PathQuery> {
|
||||
let trimmed = path.trim_end_matches('/');
|
||||
let last_segment = trimmed.rsplit('/').next().unwrap_or(trimmed);
|
||||
let is_root = !trimmed.contains('/');
|
||||
let forced_dir = path.ends_with('/');
|
||||
- let looks_like_file = !forced_dir && (is_root || last_segment.contains('.'));
|
||||
+ // Heuristic is now only a fallback; probes decide first.
|
||||
+ let looks_like_file = !forced_dir && (is_root || last_segment.contains('.'));
|
||||
|
||||
- let exact_exists = if !looks_like_file && !forced_dir {
|
||||
- conn.query_row(
|
||||
- "SELECT 1 FROM notes
|
||||
- WHERE note_type = 'DiffNote'
|
||||
- AND is_system = 0
|
||||
- AND position_new_path = ?1
|
||||
- LIMIT 1",
|
||||
- rusqlite::params![trimmed],
|
||||
- |_| Ok(()),
|
||||
- )
|
||||
- .is_ok()
|
||||
- } else {
|
||||
- false
|
||||
- };
|
||||
+ // Probe 1: exact file exists (scoped)
|
||||
+ let exact_exists = conn.query_row(
|
||||
+ "SELECT 1 FROM notes
|
||||
+ WHERE note_type = 'DiffNote'
|
||||
+ AND is_system = 0
|
||||
+ AND position_new_path = ?1
|
||||
+ AND (?2 IS NULL OR project_id = ?2)
|
||||
+ LIMIT 1",
|
||||
+ rusqlite::params![trimmed, project_id],
|
||||
+ |_| Ok(()),
|
||||
+ ).is_ok();
|
||||
+
|
||||
+ // Probe 2: directory prefix exists (scoped)
|
||||
+ let prefix_exists = if !forced_dir {
|
||||
+ let escaped = escape_like(trimmed);
|
||||
+ let pat = format!("{escaped}/%");
|
||||
+ conn.query_row(
|
||||
+ "SELECT 1 FROM notes
|
||||
+ WHERE note_type = 'DiffNote'
|
||||
+ AND is_system = 0
|
||||
+ AND position_new_path LIKE ?1 ESCAPE '\\'
|
||||
+ AND (?2 IS NULL OR project_id = ?2)
|
||||
+ LIMIT 1",
|
||||
+ rusqlite::params![pat, project_id],
|
||||
+ |_| Ok(()),
|
||||
+ ).is_ok()
|
||||
+ } else { false };
|
||||
|
||||
- let is_file = looks_like_file || exact_exists;
|
||||
+ // Forced directory always wins; otherwise: exact > prefix > heuristic
|
||||
+ let is_file = if forced_dir { false }
|
||||
+ else if exact_exists { true }
|
||||
+ else if prefix_exists { false }
|
||||
+ else { looks_like_file };
|
||||
|
||||
if is_file {
|
||||
Ok(PathQuery { value: trimmed.to_string(), is_prefix: false })
|
||||
} else {
|
||||
let escaped = escape_like(trimmed);
|
||||
Ok(PathQuery { value: format!("{escaped}/%"), is_prefix: true })
|
||||
}
|
||||
}
|
||||
@@
|
||||
- let pq = build_path_query(conn, path)?;
|
||||
+ let pq = build_path_query(conn, path, project_id)?;
|
||||
|
||||
|
||||
Add test coverage for the multi-project misclassification case:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/who-command-design.md b/who-command-design.md
|
||||
@@
|
||||
#[test]
|
||||
fn test_build_path_query_dotless_subdir_file_uses_db_probe() {
|
||||
@@
|
||||
- let pq = build_path_query(&conn, "src/Dockerfile").unwrap();
|
||||
+ let pq = build_path_query(&conn, "src/Dockerfile", None).unwrap();
|
||||
@@
|
||||
- let pq2 = build_path_query(&conn2, "src/Dockerfile").unwrap();
|
||||
+ let pq2 = build_path_query(&conn2, "src/Dockerfile", None).unwrap();
|
||||
}
|
||||
+
|
||||
+ #[test]
|
||||
+ fn test_build_path_query_probe_is_project_scoped() {
|
||||
+ // Path exists as a dotless file in project 1; project 2 should not
|
||||
+ // treat it as an exact file unless it exists there too.
|
||||
+ let conn = setup_test_db();
|
||||
+ insert_project(&conn, 1, "team/a");
|
||||
+ insert_project(&conn, 2, "team/b");
|
||||
+ insert_mr(&conn, 1, 1, 10, "author_a", "opened");
|
||||
+ insert_discussion(&conn, 1, 1, Some(1), None, true, false);
|
||||
+ insert_diffnote(&conn, 1, 1, 1, "rev", "infra/Makefile", "note");
|
||||
+
|
||||
+ let pq_scoped = build_path_query(&conn, "infra/Makefile", Some(2)).unwrap();
|
||||
+ assert!(pq_scoped.is_prefix); // should fall back to prefix in project 2
|
||||
+ }
|
||||
|
||||
2) Bound robot payload sizes for participants and mr_refs (with totals + truncation)
|
||||
Why
|
||||
|
||||
mr_refs and participants can become unbounded arrays in robot mode, which is a real operational hazard:
|
||||
|
||||
huge JSON → slow, noisy diffs, brittle downstream pipelines
|
||||
|
||||
potential SQLite group_concat truncation becomes invisible (and you can’t distinguish “no refs” vs “refs truncated”)
|
||||
|
||||
Revision
|
||||
|
||||
Introduce hard caps and explicit metadata:
|
||||
|
||||
participants_total, participants_truncated
|
||||
|
||||
mr_refs_total, mr_refs_truncated
|
||||
|
||||
This is not scope creep—it’s defensive output hygiene.
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/who-command-design.md b/who-command-design.md
|
||||
@@
|
||||
pub struct ActiveDiscussion {
|
||||
@@
|
||||
pub participants: Vec<String>,
|
||||
+ pub participants_total: u32,
|
||||
+ pub participants_truncated: bool,
|
||||
}
|
||||
@@
|
||||
pub struct OverlapUser {
|
||||
@@
|
||||
pub mr_refs: Vec<String>,
|
||||
+ pub mr_refs_total: u32,
|
||||
+ pub mr_refs_truncated: bool,
|
||||
}
|
||||
|
||||
|
||||
Implementation sketch (Rust-side, deterministic):
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/who-command-design.md b/who-command-design.md
|
||||
@@
|
||||
fn query_active(...) -> Result<ActiveResult> {
|
||||
+ const MAX_PARTICIPANTS: usize = 50;
|
||||
@@
|
||||
- participants.sort();
|
||||
+ participants.sort();
|
||||
+ let participants_total = participants.len() as u32;
|
||||
+ let participants_truncated = participants.len() > MAX_PARTICIPANTS;
|
||||
+ if participants_truncated {
|
||||
+ participants.truncate(MAX_PARTICIPANTS);
|
||||
+ }
|
||||
@@
|
||||
Ok(ActiveDiscussion {
|
||||
@@
|
||||
participants,
|
||||
+ participants_total,
|
||||
+ participants_truncated,
|
||||
})
|
||||
@@
|
||||
fn query_overlap(...) -> Result<OverlapResult> {
|
||||
+ const MAX_MR_REFS_PER_USER: usize = 50;
|
||||
@@
|
||||
.map(|a| {
|
||||
let mut mr_refs: Vec<String> = a.mr_refs.into_iter().collect();
|
||||
mr_refs.sort();
|
||||
+ let mr_refs_total = mr_refs.len() as u32;
|
||||
+ let mr_refs_truncated = mr_refs.len() > MAX_MR_REFS_PER_USER;
|
||||
+ if mr_refs_truncated {
|
||||
+ mr_refs.truncate(MAX_MR_REFS_PER_USER);
|
||||
+ }
|
||||
OverlapUser {
|
||||
@@
|
||||
mr_refs,
|
||||
+ mr_refs_total,
|
||||
+ mr_refs_truncated,
|
||||
}
|
||||
})
|
||||
|
||||
|
||||
Update robot JSON accordingly:
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/who-command-design.md b/who-command-design.md
|
||||
@@
|
||||
fn active_to_json(r: &ActiveResult) -> serde_json::Value {
|
||||
@@
|
||||
"participants": d.participants,
|
||||
+ "participants_total": d.participants_total,
|
||||
+ "participants_truncated": d.participants_truncated,
|
||||
}))
|
||||
@@
|
||||
fn overlap_to_json(r: &OverlapResult) -> serde_json::Value {
|
||||
@@
|
||||
"mr_refs": u.mr_refs,
|
||||
+ "mr_refs_total": u.mr_refs_total,
|
||||
+ "mr_refs_truncated": u.mr_refs_truncated,
|
||||
}))
|
||||
|
||||
|
||||
Also update robot-docs manifest schema snippet for who.active.discussions[] and who.overlap.users[].
|
||||
|
||||
3) Add truncation metadata to Workload sections (same LIMIT+1 pattern)
|
||||
Why
|
||||
|
||||
Workload is the mode most likely to be consumed by agents, and right now it has silent truncation (each section is LIMIT N with no signal). Your plan already treats truncation as a first-class contract elsewhere; Workload should match.
|
||||
|
||||
Revision
|
||||
|
||||
For each workload query:
|
||||
|
||||
request LIMIT + 1
|
||||
|
||||
set *_truncated booleans
|
||||
|
||||
trim to requested limit
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/who-command-design.md b/who-command-design.md
|
||||
@@
|
||||
pub struct WorkloadResult {
|
||||
pub username: String,
|
||||
pub assigned_issues: Vec<WorkloadIssue>,
|
||||
pub authored_mrs: Vec<WorkloadMr>,
|
||||
pub reviewing_mrs: Vec<WorkloadMr>,
|
||||
pub unresolved_discussions: Vec<WorkloadDiscussion>,
|
||||
+ pub assigned_issues_truncated: bool,
|
||||
+ pub authored_mrs_truncated: bool,
|
||||
+ pub reviewing_mrs_truncated: bool,
|
||||
+ pub unresolved_discussions_truncated: bool,
|
||||
}
|
||||
|
||||
|
||||
And in JSON include the booleans (plus you already have summary.counts).
|
||||
|
||||
This is mechanically repetitive but extremely valuable for automation.
|
||||
|
||||
4) Rename “Last Active” → “Last Seen” for Expert/Overlap
|
||||
Why
|
||||
|
||||
For “author” rows, the timestamp is derived from review activity on their MR (via MAX(n.created_at)), not necessarily that person’s direct action. Calling that “active” is semantically misleading. “Last seen” is accurate across both reviewer+author branches.
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/who-command-design.md b/who-command-design.md
|
||||
@@
|
||||
pub struct Expert {
|
||||
@@
|
||||
- pub last_active_ms: i64,
|
||||
+ pub last_seen_ms: i64,
|
||||
}
|
||||
@@
|
||||
pub struct OverlapUser {
|
||||
@@
|
||||
- pub last_touch_at: i64,
|
||||
+ pub last_seen_at: i64,
|
||||
@@
|
||||
fn print_expert_human(...) {
|
||||
@@
|
||||
- style("Last Active").bold(),
|
||||
+ style("Last Seen").bold(),
|
||||
@@
|
||||
- style(format_relative_time(expert.last_active_ms)).dim(),
|
||||
+ style(format_relative_time(expert.last_seen_ms)).dim(),
|
||||
|
||||
|
||||
(Keep internal SQL aliases consistent: last_seen_at everywhere.)
|
||||
|
||||
5) Make MR state filtering consistent in Expert/Overlap reviewer branches
|
||||
Why
|
||||
|
||||
You already restrict Overlap author branch to opened|merged, but reviewer branches can include closed/unmerged noise. Consistency improves signal quality and can reduce scan churn.
|
||||
|
||||
Low-risk revision: apply the same state filter to reviewer branches (Expert + Overlap). You can keep “closed” excluded by default without adding new flags.
|
||||
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/who-command-design.md b/who-command-design.md
|
||||
@@
|
||||
WHERE n.note_type = 'DiffNote'
|
||||
AND n.is_system = 0
|
||||
@@
|
||||
- AND n.created_at >= ?2
|
||||
+ AND m.state IN ('opened','merged')
|
||||
+ AND n.created_at >= ?2
|
||||
|
||||
|
||||
This is a semantic choice; if you later want archaeology across closed/unmerged, that belongs in a separate mode/flag, but I would not add it now.
|
||||
|
||||
6) Add a design principle for bounded outputs (aligns with robot-first reproducibility)
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/who-command-design.md b/who-command-design.md
|
||||
@@
|
||||
10. **Truncation transparency.** Result types carry a `truncated: bool` flag...
|
||||
+11. **Bounded payloads.** Robot JSON must never emit unbounded arrays (participants, refs).
|
||||
+ Large list fields are capped with `*_total` + `*_truncated` so agents can page/retry.
|
||||
|
||||
Consolidated plan metadata bump (Iteration 7)
|
||||
diff
|
||||
Copy code
|
||||
diff --git a/who-command-design.md b/who-command-design.md
|
||||
@@
|
||||
-iteration: 6
|
||||
+iteration: 7
|
||||
updated: 2026-02-07
|
||||
|
||||
Net effect (what you get)
|
||||
|
||||
Correct path classification under -p scoping (no cross-project probe leakage)
|
||||
|
||||
Deterministic + bounded robot payloads (no giant JSON surprises)
|
||||
|
||||
Uniform truncation contract across all modes (Workload no longer silently truncates)
|
||||
|
||||
Clearer semantics (“Last Seen” avoids misinterpretation)
|
||||
|
||||
Cleaner signals (reviewer branches ignore closed/unmerged by default)
|
||||
|
||||
If you want, I can also produce a second diff that updates the robot-docs schema block and the Verification EXPLAIN expectations to reflect the new probe queries and the state filter.
|
||||
21
migrations/022_notes_query_index.sql
Normal file
21
migrations/022_notes_query_index.sql
Normal file
@@ -0,0 +1,21 @@
|
||||
-- Migration 022: Composite query indexes for notes + author_id column
|
||||
-- Optimizes author-scoped and project-scoped date-range queries on notes.
|
||||
-- Adds discussion JOIN indexes and immutable author identity column.
|
||||
|
||||
-- Composite index for author-scoped queries (who command, notes --author)
|
||||
CREATE INDEX IF NOT EXISTS idx_notes_user_created
|
||||
ON notes(project_id, author_username COLLATE NOCASE, created_at DESC, id DESC)
|
||||
WHERE is_system = 0;
|
||||
|
||||
-- Composite index for project-scoped date-range queries
|
||||
CREATE INDEX IF NOT EXISTS idx_notes_project_created
|
||||
ON notes(project_id, created_at DESC, id DESC)
|
||||
WHERE is_system = 0;
|
||||
|
||||
-- Discussion JOIN indexes
|
||||
CREATE INDEX IF NOT EXISTS idx_discussions_issue_id ON discussions(issue_id);
|
||||
CREATE INDEX IF NOT EXISTS idx_discussions_mr_id ON discussions(merge_request_id);
|
||||
|
||||
-- Immutable author identity column (GitLab numeric user ID)
|
||||
ALTER TABLE notes ADD COLUMN author_id INTEGER;
|
||||
CREATE INDEX IF NOT EXISTS idx_notes_author_id ON notes(author_id) WHERE author_id IS NOT NULL;
|
||||
153
migrations/024_note_documents.sql
Normal file
153
migrations/024_note_documents.sql
Normal file
@@ -0,0 +1,153 @@
|
||||
-- Migration 024: Add 'note' source_type to documents and dirty_sources
|
||||
-- SQLite does not support ALTER CONSTRAINT, so we use the table-rebuild pattern.
|
||||
|
||||
-- ============================================================
|
||||
-- 1. Rebuild dirty_sources with updated CHECK constraint
|
||||
-- ============================================================
|
||||
|
||||
CREATE TABLE dirty_sources_new (
|
||||
source_type TEXT NOT NULL CHECK (source_type IN ('issue','merge_request','discussion','note')),
|
||||
source_id INTEGER NOT NULL,
|
||||
queued_at INTEGER NOT NULL,
|
||||
attempt_count INTEGER NOT NULL DEFAULT 0,
|
||||
last_attempt_at INTEGER,
|
||||
last_error TEXT,
|
||||
next_attempt_at INTEGER,
|
||||
PRIMARY KEY(source_type, source_id)
|
||||
);
|
||||
|
||||
INSERT INTO dirty_sources_new SELECT * FROM dirty_sources;
|
||||
DROP TABLE dirty_sources;
|
||||
ALTER TABLE dirty_sources_new RENAME TO dirty_sources;
|
||||
CREATE INDEX idx_dirty_sources_next_attempt ON dirty_sources(next_attempt_at);
|
||||
|
||||
-- ============================================================
|
||||
-- 2. Rebuild documents with updated CHECK constraint
|
||||
-- ============================================================
|
||||
|
||||
-- 2a. Backup junction table data
|
||||
CREATE TEMP TABLE _doc_labels_backup AS SELECT * FROM document_labels;
|
||||
CREATE TEMP TABLE _doc_paths_backup AS SELECT * FROM document_paths;
|
||||
|
||||
-- 2b. Drop all triggers that reference documents
|
||||
DROP TRIGGER IF EXISTS documents_ai;
|
||||
DROP TRIGGER IF EXISTS documents_ad;
|
||||
DROP TRIGGER IF EXISTS documents_au;
|
||||
DROP TRIGGER IF EXISTS documents_embeddings_ad;
|
||||
|
||||
-- 2c. Drop junction tables (they have FK references to documents)
|
||||
DROP TABLE IF EXISTS document_labels;
|
||||
DROP TABLE IF EXISTS document_paths;
|
||||
|
||||
-- 2d. Create new documents table with 'note' in CHECK constraint
|
||||
CREATE TABLE documents_new (
|
||||
id INTEGER PRIMARY KEY,
|
||||
source_type TEXT NOT NULL CHECK (source_type IN ('issue','merge_request','discussion','note')),
|
||||
source_id INTEGER NOT NULL,
|
||||
project_id INTEGER NOT NULL REFERENCES projects(id),
|
||||
author_username TEXT,
|
||||
label_names TEXT,
|
||||
created_at INTEGER,
|
||||
updated_at INTEGER,
|
||||
url TEXT,
|
||||
title TEXT,
|
||||
content_text TEXT NOT NULL,
|
||||
content_hash TEXT NOT NULL,
|
||||
labels_hash TEXT NOT NULL DEFAULT '',
|
||||
paths_hash TEXT NOT NULL DEFAULT '',
|
||||
is_truncated INTEGER NOT NULL DEFAULT 0,
|
||||
truncated_reason TEXT CHECK (
|
||||
truncated_reason IN (
|
||||
'token_limit_middle_drop','single_note_oversized','first_last_oversized',
|
||||
'hard_cap_oversized'
|
||||
)
|
||||
OR truncated_reason IS NULL
|
||||
),
|
||||
UNIQUE(source_type, source_id)
|
||||
);
|
||||
|
||||
-- 2e. Copy all existing data
|
||||
INSERT INTO documents_new SELECT * FROM documents;
|
||||
|
||||
-- 2f. Swap tables
|
||||
DROP TABLE documents;
|
||||
ALTER TABLE documents_new RENAME TO documents;
|
||||
|
||||
-- 2g. Recreate all indexes on documents
|
||||
CREATE INDEX idx_documents_project_updated ON documents(project_id, updated_at);
|
||||
CREATE INDEX idx_documents_author ON documents(author_username);
|
||||
CREATE INDEX idx_documents_source ON documents(source_type, source_id);
|
||||
CREATE INDEX idx_documents_hash ON documents(content_hash);
|
||||
|
||||
-- 2h. Recreate junction tables
|
||||
CREATE TABLE document_labels (
|
||||
document_id INTEGER NOT NULL REFERENCES documents(id) ON DELETE CASCADE,
|
||||
label_name TEXT NOT NULL,
|
||||
PRIMARY KEY(document_id, label_name)
|
||||
) WITHOUT ROWID;
|
||||
CREATE INDEX idx_document_labels_label ON document_labels(label_name);
|
||||
|
||||
CREATE TABLE document_paths (
|
||||
document_id INTEGER NOT NULL REFERENCES documents(id) ON DELETE CASCADE,
|
||||
path TEXT NOT NULL,
|
||||
PRIMARY KEY(document_id, path)
|
||||
) WITHOUT ROWID;
|
||||
CREATE INDEX idx_document_paths_path ON document_paths(path);
|
||||
|
||||
-- 2i. Restore junction table data from backups
|
||||
INSERT INTO document_labels SELECT * FROM _doc_labels_backup;
|
||||
INSERT INTO document_paths SELECT * FROM _doc_paths_backup;
|
||||
|
||||
-- 2j. Recreate FTS triggers (from migration 008)
|
||||
CREATE TRIGGER documents_ai AFTER INSERT ON documents BEGIN
|
||||
INSERT INTO documents_fts(rowid, title, content_text)
|
||||
VALUES (new.id, COALESCE(new.title, ''), new.content_text);
|
||||
END;
|
||||
|
||||
CREATE TRIGGER documents_ad AFTER DELETE ON documents BEGIN
|
||||
INSERT INTO documents_fts(documents_fts, rowid, title, content_text)
|
||||
VALUES('delete', old.id, COALESCE(old.title, ''), old.content_text);
|
||||
END;
|
||||
|
||||
CREATE TRIGGER documents_au AFTER UPDATE ON documents
|
||||
WHEN old.title IS NOT new.title OR old.content_text != new.content_text
|
||||
BEGIN
|
||||
INSERT INTO documents_fts(documents_fts, rowid, title, content_text)
|
||||
VALUES('delete', old.id, COALESCE(old.title, ''), old.content_text);
|
||||
INSERT INTO documents_fts(rowid, title, content_text)
|
||||
VALUES (new.id, COALESCE(new.title, ''), new.content_text);
|
||||
END;
|
||||
|
||||
-- 2k. Recreate embeddings cleanup trigger (from migration 009)
|
||||
CREATE TRIGGER documents_embeddings_ad AFTER DELETE ON documents BEGIN
|
||||
DELETE FROM embeddings
|
||||
WHERE rowid >= old.id * 1000
|
||||
AND rowid < (old.id + 1) * 1000;
|
||||
END;
|
||||
|
||||
-- 2l. Rebuild FTS index to ensure consistency after table swap
|
||||
INSERT INTO documents_fts(documents_fts) VALUES('rebuild');
|
||||
|
||||
-- ============================================================
|
||||
-- 3. Defense triggers: clean up documents when notes are
|
||||
-- deleted or flipped to system notes
|
||||
-- ============================================================
|
||||
|
||||
CREATE TRIGGER notes_ad_cleanup AFTER DELETE ON notes
|
||||
WHEN old.is_system = 0
|
||||
BEGIN
|
||||
DELETE FROM documents WHERE source_type = 'note' AND source_id = old.id;
|
||||
END;
|
||||
|
||||
CREATE TRIGGER notes_au_system_cleanup AFTER UPDATE OF is_system ON notes
|
||||
WHEN NEW.is_system = 1 AND OLD.is_system = 0
|
||||
BEGIN
|
||||
DELETE FROM documents WHERE source_type = 'note' AND source_id = OLD.id;
|
||||
END;
|
||||
|
||||
-- ============================================================
|
||||
-- 4. Drop temp backup tables
|
||||
-- ============================================================
|
||||
|
||||
DROP TABLE IF EXISTS _doc_labels_backup;
|
||||
DROP TABLE IF EXISTS _doc_paths_backup;
|
||||
8
migrations/025_note_dirty_backfill.sql
Normal file
8
migrations/025_note_dirty_backfill.sql
Normal file
@@ -0,0 +1,8 @@
|
||||
-- Backfill existing non-system notes into dirty queue for document generation.
|
||||
-- Only seeds notes that don't already have documents and aren't already queued.
|
||||
INSERT INTO dirty_sources (source_type, source_id, queued_at)
|
||||
SELECT 'note', n.id, CAST(strftime('%s', 'now') AS INTEGER) * 1000
|
||||
FROM notes n
|
||||
LEFT JOIN documents d ON d.source_type = 'note' AND d.source_id = n.id
|
||||
WHERE n.is_system = 0 AND d.id IS NULL
|
||||
ON CONFLICT(source_type, source_id) DO NOTHING;
|
||||
20
migrations/026_scoring_indexes.sql
Normal file
20
migrations/026_scoring_indexes.sql
Normal file
@@ -0,0 +1,20 @@
|
||||
-- Indexes for time-decay expert scoring: dual-path matching and reviewer participation.
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_notes_old_path_author
|
||||
ON notes(position_old_path, author_username, created_at)
|
||||
WHERE note_type = 'DiffNote' AND is_system = 0 AND position_old_path IS NOT NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_mfc_old_path_project_mr
|
||||
ON mr_file_changes(old_path, project_id, merge_request_id)
|
||||
WHERE old_path IS NOT NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_mfc_new_path_project_mr
|
||||
ON mr_file_changes(new_path, project_id, merge_request_id);
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_notes_diffnote_discussion_author
|
||||
ON notes(discussion_id, author_username, created_at)
|
||||
WHERE note_type = 'DiffNote' AND is_system = 0;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_notes_old_path_project_created
|
||||
ON notes(position_old_path, project_id, created_at)
|
||||
WHERE note_type = 'DiffNote' AND is_system = 0 AND position_old_path IS NOT NULL;
|
||||
41
migrations/027_tui_list_indexes.sql
Normal file
41
migrations/027_tui_list_indexes.sql
Normal file
@@ -0,0 +1,41 @@
|
||||
-- Covering indexes for TUI list screen keyset pagination.
|
||||
-- These supplement existing indexes from earlier migrations to
|
||||
-- enable efficient ORDER BY ... LIMIT queries without temp B-tree sorts.
|
||||
|
||||
-- Issue list: default sort (updated_at DESC, iid DESC) with state filter.
|
||||
-- Covers: WHERE project_id = ? AND state = ? ORDER BY updated_at DESC, iid DESC
|
||||
CREATE INDEX IF NOT EXISTS idx_issues_tui_list
|
||||
ON issues(project_id, state, updated_at DESC, iid DESC);
|
||||
|
||||
-- MR list: default sort (updated_at DESC, iid DESC) with state filter.
|
||||
CREATE INDEX IF NOT EXISTS idx_mrs_tui_list
|
||||
ON merge_requests(project_id, state, updated_at DESC, iid DESC);
|
||||
|
||||
-- Discussion list for entity detail screens: ordered by first note timestamp.
|
||||
CREATE INDEX IF NOT EXISTS idx_discussions_issue_ordered
|
||||
ON discussions(issue_id, first_note_at DESC)
|
||||
WHERE issue_id IS NOT NULL;
|
||||
|
||||
CREATE INDEX IF NOT EXISTS idx_discussions_mr_ordered
|
||||
ON discussions(merge_request_id, first_note_at DESC)
|
||||
WHERE merge_request_id IS NOT NULL;
|
||||
|
||||
-- Notes within a discussion: chronological order for detail views.
|
||||
CREATE INDEX IF NOT EXISTS idx_notes_discussion_ordered
|
||||
ON notes(discussion_id, created_at ASC);
|
||||
|
||||
-- Filter-path indexes for TUI filter bar queries.
|
||||
-- Issues: author filter with state (covers WHERE author_username = ? AND state = ?).
|
||||
CREATE INDEX IF NOT EXISTS idx_issues_author_state
|
||||
ON issues(author_username, state);
|
||||
|
||||
-- MRs: author filter with state.
|
||||
CREATE INDEX IF NOT EXISTS idx_mrs_author_state
|
||||
ON merge_requests(author_username, state);
|
||||
|
||||
-- MRs: target branch filter with state.
|
||||
CREATE INDEX IF NOT EXISTS idx_mrs_target_branch_state
|
||||
ON merge_requests(target_branch, state);
|
||||
|
||||
INSERT INTO schema_version (version, applied_at, description)
|
||||
VALUES (27, strftime('%s', 'now') * 1000, 'TUI list screen covering indexes');
|
||||
@@ -4,7 +4,7 @@ title: ""
|
||||
status: iterating
|
||||
iteration: 6
|
||||
target_iterations: 8
|
||||
beads_revision: 1
|
||||
beads_revision: 2
|
||||
related_plans: []
|
||||
created: 2026-02-08
|
||||
updated: 2026-02-12
|
||||
|
||||
@@ -21,6 +21,10 @@ pub enum CorrectionRule {
|
||||
SingleDashLongFlag,
|
||||
CaseNormalization,
|
||||
FuzzyFlag,
|
||||
SubcommandAlias,
|
||||
ValueNormalization,
|
||||
ValueFuzzy,
|
||||
FlagPrefix,
|
||||
}
|
||||
|
||||
/// Result of the correction pass over raw args.
|
||||
@@ -40,6 +44,7 @@ const GLOBAL_FLAGS: &[&str] = &[
|
||||
"--robot",
|
||||
"--json",
|
||||
"--color",
|
||||
"--icons",
|
||||
"--quiet",
|
||||
"--no-quiet",
|
||||
"--verbose",
|
||||
@@ -119,8 +124,10 @@ const COMMAND_FLAGS: &[(&str, &[&str])] = &[
|
||||
"--no-docs",
|
||||
"--no-events",
|
||||
"--no-file-changes",
|
||||
"--no-status",
|
||||
"--dry-run",
|
||||
"--no-dry-run",
|
||||
"--timings",
|
||||
],
|
||||
),
|
||||
(
|
||||
@@ -162,7 +169,7 @@ const COMMAND_FLAGS: &[(&str, &[&str])] = &[
|
||||
"--project",
|
||||
"--since",
|
||||
"--depth",
|
||||
"--expand-mentions",
|
||||
"--no-mentions",
|
||||
"--limit",
|
||||
"--fields",
|
||||
"--max-seeds",
|
||||
@@ -183,9 +190,39 @@ const COMMAND_FLAGS: &[(&str, &[&str])] = &[
|
||||
"--fields",
|
||||
"--detail",
|
||||
"--no-detail",
|
||||
"--as-of",
|
||||
"--explain-score",
|
||||
"--include-bots",
|
||||
"--include-closed",
|
||||
"--all-history",
|
||||
],
|
||||
),
|
||||
("drift", &["--threshold", "--project"]),
|
||||
(
|
||||
"notes",
|
||||
&[
|
||||
"--limit",
|
||||
"--fields",
|
||||
"--format",
|
||||
"--author",
|
||||
"--note-type",
|
||||
"--contains",
|
||||
"--note-id",
|
||||
"--gitlab-note-id",
|
||||
"--discussion-id",
|
||||
"--include-system",
|
||||
"--for-issue",
|
||||
"--for-mr",
|
||||
"--project",
|
||||
"--since",
|
||||
"--until",
|
||||
"--path",
|
||||
"--resolution",
|
||||
"--sort",
|
||||
"--asc",
|
||||
"--open",
|
||||
],
|
||||
),
|
||||
(
|
||||
"init",
|
||||
&[
|
||||
@@ -197,6 +234,25 @@ const COMMAND_FLAGS: &[(&str, &[&str])] = &[
|
||||
"--default-project",
|
||||
],
|
||||
),
|
||||
(
|
||||
"file-history",
|
||||
&[
|
||||
"--project",
|
||||
"--discussions",
|
||||
"--no-follow-renames",
|
||||
"--merged",
|
||||
"--limit",
|
||||
],
|
||||
),
|
||||
(
|
||||
"trace",
|
||||
&[
|
||||
"--project",
|
||||
"--discussions",
|
||||
"--no-follow-renames",
|
||||
"--limit",
|
||||
],
|
||||
),
|
||||
("generate-docs", &["--full", "--project"]),
|
||||
("completions", &[]),
|
||||
("robot-docs", &["--brief"]),
|
||||
@@ -232,18 +288,47 @@ pub const ENUM_VALUES: &[(&str, &[&str])] = &[
|
||||
("--state", &["opened", "closed", "merged", "locked", "all"]),
|
||||
("--mode", &["lexical", "hybrid", "semantic"]),
|
||||
("--sort", &["updated", "created", "iid"]),
|
||||
("--type", &["issue", "mr", "discussion"]),
|
||||
("--type", &["issue", "mr", "discussion", "note"]),
|
||||
("--fts-mode", &["safe", "raw"]),
|
||||
("--color", &["auto", "always", "never"]),
|
||||
("--log-format", &["text", "json"]),
|
||||
("--for", &["issue", "mr"]),
|
||||
];
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Subcommand alias map (for forms clap aliases can't express)
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
/// Subcommand aliases for non-standard forms (underscores, no separators).
|
||||
/// Clap `visible_alias`/`alias` handles hyphenated forms (`merge-requests`);
|
||||
/// this map catches the rest.
|
||||
const SUBCOMMAND_ALIASES: &[(&str, &str)] = &[
|
||||
("merge_requests", "mrs"),
|
||||
("merge_request", "mrs"),
|
||||
("mergerequests", "mrs"),
|
||||
("mergerequest", "mrs"),
|
||||
("generate_docs", "generate-docs"),
|
||||
("generatedocs", "generate-docs"),
|
||||
("gendocs", "generate-docs"),
|
||||
("gen-docs", "generate-docs"),
|
||||
("robot_docs", "robot-docs"),
|
||||
("robotdocs", "robot-docs"),
|
||||
("sync_status", "status"),
|
||||
("syncstatus", "status"),
|
||||
("auth_test", "auth"),
|
||||
("authtest", "auth"),
|
||||
("file_history", "file-history"),
|
||||
("filehistory", "file-history"),
|
||||
];
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Correction thresholds
|
||||
// ---------------------------------------------------------------------------
|
||||
|
||||
const FUZZY_FLAG_THRESHOLD: f64 = 0.8;
|
||||
/// Stricter threshold for robot mode — only high-confidence corrections to
|
||||
/// avoid misleading agents. Still catches obvious typos like `--projct`.
|
||||
const FUZZY_FLAG_THRESHOLD_STRICT: f64 = 0.9;
|
||||
|
||||
// ---------------------------------------------------------------------------
|
||||
// Core logic
|
||||
@@ -303,20 +388,29 @@ fn valid_flags_for(subcommand: Option<&str>) -> Vec<&'static str> {
|
||||
|
||||
/// Run the pre-clap correction pass on raw args.
|
||||
///
|
||||
/// When `strict` is true (robot mode), only deterministic corrections are applied
|
||||
/// (single-dash long flags, case normalization). Fuzzy matching is disabled to
|
||||
/// prevent misleading agents with speculative corrections.
|
||||
/// Three-phase pipeline:
|
||||
/// - Phase A: Subcommand alias correction (case-insensitive alias map)
|
||||
/// - Phase B: Per-arg flag corrections (single-dash, case, prefix, fuzzy)
|
||||
/// - Phase C: Enum value normalization (case + fuzzy + prefix on known values)
|
||||
///
|
||||
/// When `strict` is true (robot mode), fuzzy matching uses a higher threshold
|
||||
/// (0.9 vs 0.8) to avoid speculative corrections while still catching obvious
|
||||
/// typos like `--projct` → `--project`.
|
||||
///
|
||||
/// Returns the (possibly modified) args and any corrections applied.
|
||||
pub fn correct_args(raw: Vec<String>, strict: bool) -> CorrectionResult {
|
||||
let subcommand = detect_subcommand(&raw);
|
||||
let valid = valid_flags_for(subcommand);
|
||||
|
||||
let mut corrected = Vec::with_capacity(raw.len());
|
||||
let mut corrections = Vec::new();
|
||||
|
||||
// Phase A: Subcommand alias correction
|
||||
let args = correct_subcommand(raw, &mut corrections);
|
||||
|
||||
// Phase B: Per-arg flag corrections
|
||||
let valid = valid_flags_for(detect_subcommand(&args));
|
||||
|
||||
let mut corrected = Vec::with_capacity(args.len());
|
||||
let mut past_terminator = false;
|
||||
|
||||
for arg in raw {
|
||||
for arg in args {
|
||||
// B1: Stop correcting after POSIX `--` option terminator
|
||||
if arg == "--" {
|
||||
past_terminator = true;
|
||||
@@ -338,12 +432,177 @@ pub fn correct_args(raw: Vec<String>, strict: bool) -> CorrectionResult {
|
||||
}
|
||||
}
|
||||
|
||||
// Phase C: Enum value normalization
|
||||
normalize_enum_values(&mut corrected, &mut corrections);
|
||||
|
||||
CorrectionResult {
|
||||
args: corrected,
|
||||
corrections,
|
||||
}
|
||||
}
|
||||
|
||||
/// Phase A: Replace subcommand aliases with their canonical names.
|
||||
///
|
||||
/// Handles forms that can't be expressed as clap `alias`/`visible_alias`
|
||||
/// (underscores, no-separator forms). Case-insensitive matching.
|
||||
fn correct_subcommand(mut args: Vec<String>, corrections: &mut Vec<Correction>) -> Vec<String> {
|
||||
// Find the subcommand position index, then check the alias map.
|
||||
// Can't use iterators easily because we need to mutate args[i].
|
||||
let mut skip_next = false;
|
||||
let mut subcmd_idx = None;
|
||||
for (i, arg) in args.iter().enumerate().skip(1) {
|
||||
if skip_next {
|
||||
skip_next = false;
|
||||
continue;
|
||||
}
|
||||
if arg.starts_with('-') {
|
||||
if arg.contains('=') {
|
||||
continue;
|
||||
}
|
||||
if matches!(arg.as_str(), "--config" | "-c" | "--color" | "--log-format") {
|
||||
skip_next = true;
|
||||
}
|
||||
continue;
|
||||
}
|
||||
subcmd_idx = Some(i);
|
||||
break;
|
||||
}
|
||||
if let Some(i) = subcmd_idx
|
||||
&& let Some((_, canonical)) = SUBCOMMAND_ALIASES
|
||||
.iter()
|
||||
.find(|(alias, _)| alias.eq_ignore_ascii_case(&args[i]))
|
||||
{
|
||||
corrections.push(Correction {
|
||||
original: args[i].clone(),
|
||||
corrected: (*canonical).to_string(),
|
||||
rule: CorrectionRule::SubcommandAlias,
|
||||
confidence: 1.0,
|
||||
});
|
||||
args[i] = (*canonical).to_string();
|
||||
}
|
||||
args
|
||||
}
|
||||
|
||||
/// Phase C: Normalize enum values for flags with known valid values.
|
||||
///
|
||||
/// Handles both `--flag value` and `--flag=value` forms. Corrections are:
|
||||
/// 1. Case normalization: `Opened` → `opened`
|
||||
/// 2. Prefix expansion: `open` → `opened` (only if unambiguous)
|
||||
/// 3. Fuzzy matching: `opend` → `opened`
|
||||
fn normalize_enum_values(args: &mut [String], corrections: &mut Vec<Correction>) {
|
||||
let mut i = 0;
|
||||
while i < args.len() {
|
||||
// Respect POSIX `--` option terminator — don't normalize values after it
|
||||
if args[i] == "--" {
|
||||
break;
|
||||
}
|
||||
|
||||
// Handle --flag=value form
|
||||
if let Some(eq_pos) = args[i].find('=') {
|
||||
let flag = args[i][..eq_pos].to_string();
|
||||
let value = args[i][eq_pos + 1..].to_string();
|
||||
if let Some(valid_vals) = lookup_enum_values(&flag)
|
||||
&& let Some((corrected_val, is_case_only)) = normalize_value(&value, valid_vals)
|
||||
{
|
||||
let original = args[i].clone();
|
||||
let corrected = format!("{flag}={corrected_val}");
|
||||
args[i] = corrected.clone();
|
||||
corrections.push(Correction {
|
||||
original,
|
||||
corrected,
|
||||
rule: if is_case_only {
|
||||
CorrectionRule::ValueNormalization
|
||||
} else {
|
||||
CorrectionRule::ValueFuzzy
|
||||
},
|
||||
confidence: 0.95,
|
||||
});
|
||||
}
|
||||
i += 1;
|
||||
continue;
|
||||
}
|
||||
|
||||
// Handle --flag value form
|
||||
if args[i].starts_with("--")
|
||||
&& let Some(valid_vals) = lookup_enum_values(&args[i])
|
||||
&& i + 1 < args.len()
|
||||
&& !args[i + 1].starts_with('-')
|
||||
{
|
||||
let value = args[i + 1].clone();
|
||||
if let Some((corrected_val, is_case_only)) = normalize_value(&value, valid_vals) {
|
||||
let original = args[i + 1].clone();
|
||||
args[i + 1] = corrected_val.to_string();
|
||||
corrections.push(Correction {
|
||||
original,
|
||||
corrected: corrected_val.to_string(),
|
||||
rule: if is_case_only {
|
||||
CorrectionRule::ValueNormalization
|
||||
} else {
|
||||
CorrectionRule::ValueFuzzy
|
||||
},
|
||||
confidence: 0.95,
|
||||
});
|
||||
}
|
||||
i += 2;
|
||||
continue;
|
||||
}
|
||||
|
||||
i += 1;
|
||||
}
|
||||
}
|
||||
|
||||
/// Look up valid enum values for a flag (case-insensitive flag name match).
|
||||
fn lookup_enum_values(flag: &str) -> Option<&'static [&'static str]> {
|
||||
let lower = flag.to_lowercase();
|
||||
ENUM_VALUES
|
||||
.iter()
|
||||
.find(|(f, _)| f.to_lowercase() == lower)
|
||||
.map(|(_, vals)| *vals)
|
||||
}
|
||||
|
||||
/// Try to normalize a value against a set of valid values.
|
||||
///
|
||||
/// Returns `Some((corrected, is_case_only))` if a correction is needed:
|
||||
/// - `is_case_only = true` for pure case normalization
|
||||
/// - `is_case_only = false` for prefix/fuzzy corrections
|
||||
///
|
||||
/// Returns `None` if the value is already valid or no match is found.
|
||||
fn normalize_value(input: &str, valid_values: &[&str]) -> Option<(String, bool)> {
|
||||
// Already valid (exact match)? No correction needed.
|
||||
if valid_values.contains(&input) {
|
||||
return None;
|
||||
}
|
||||
|
||||
let lower = input.to_lowercase();
|
||||
|
||||
// Case-insensitive exact match
|
||||
if let Some(&val) = valid_values.iter().find(|v| v.to_lowercase() == lower) {
|
||||
return Some((val.to_string(), true));
|
||||
}
|
||||
|
||||
// Prefix match (e.g., "open" → "opened") — only if unambiguous
|
||||
let prefix_matches: Vec<&&str> = valid_values
|
||||
.iter()
|
||||
.filter(|v| v.starts_with(&*lower))
|
||||
.collect();
|
||||
if prefix_matches.len() == 1 {
|
||||
return Some(((*prefix_matches[0]).to_string(), false));
|
||||
}
|
||||
|
||||
// Fuzzy match
|
||||
let best = valid_values
|
||||
.iter()
|
||||
.map(|v| (*v, jaro_winkler(&lower, v)))
|
||||
.max_by(|a, b| a.1.partial_cmp(&b.1).unwrap_or(std::cmp::Ordering::Equal));
|
||||
if let Some((val, score)) = best
|
||||
&& score >= 0.8
|
||||
{
|
||||
return Some((val.to_string(), false));
|
||||
}
|
||||
|
||||
None
|
||||
}
|
||||
|
||||
/// Clap built-in flags that should never be corrected. These are handled by clap
|
||||
/// directly and are not in our GLOBAL_FLAGS registry.
|
||||
const CLAP_BUILTINS: &[&str] = &["--help", "--version"];
|
||||
@@ -462,10 +721,34 @@ fn try_correct(arg: &str, valid_flags: &[&str], strict: bool) -> Option<Correcti
|
||||
});
|
||||
}
|
||||
|
||||
// Rule 3: Fuzzy flag match — `--staate` -> `--state` (skip in strict mode)
|
||||
if !strict
|
||||
&& let Some((best_flag, score)) = best_fuzzy_match(&lower, valid_flags)
|
||||
&& score >= FUZZY_FLAG_THRESHOLD
|
||||
// Rule 3: Prefix match — `--proj` -> `--project` (only if unambiguous)
|
||||
let prefix_matches: Vec<&str> = valid_flags
|
||||
.iter()
|
||||
.filter(|f| f.starts_with(&*lower) && f.to_lowercase() != lower)
|
||||
.copied()
|
||||
.collect();
|
||||
if prefix_matches.len() == 1 {
|
||||
let matched = prefix_matches[0];
|
||||
let corrected = match value_suffix {
|
||||
Some(suffix) => format!("{matched}{suffix}"),
|
||||
None => matched.to_string(),
|
||||
};
|
||||
return Some(Correction {
|
||||
original: arg.to_string(),
|
||||
corrected,
|
||||
rule: CorrectionRule::FlagPrefix,
|
||||
confidence: 0.95,
|
||||
});
|
||||
}
|
||||
|
||||
// Rule 4: Fuzzy flag match — higher threshold in strict/robot mode
|
||||
let threshold = if strict {
|
||||
FUZZY_FLAG_THRESHOLD_STRICT
|
||||
} else {
|
||||
FUZZY_FLAG_THRESHOLD
|
||||
};
|
||||
if let Some((best_flag, score)) = best_fuzzy_match(&lower, valid_flags)
|
||||
&& score >= threshold
|
||||
{
|
||||
let corrected = match value_suffix {
|
||||
Some(suffix) => format!("{best_flag}{suffix}"),
|
||||
@@ -539,6 +822,30 @@ pub fn format_teaching_note(correction: &Correction) -> String {
|
||||
correction.corrected, correction.original
|
||||
)
|
||||
}
|
||||
CorrectionRule::SubcommandAlias => {
|
||||
format!(
|
||||
"Use canonical command name: {} (not {})",
|
||||
correction.corrected, correction.original
|
||||
)
|
||||
}
|
||||
CorrectionRule::ValueNormalization => {
|
||||
format!(
|
||||
"Values are lowercase: {} (not {})",
|
||||
correction.corrected, correction.original
|
||||
)
|
||||
}
|
||||
CorrectionRule::ValueFuzzy => {
|
||||
format!(
|
||||
"Correct value spelling: {} (not {})",
|
||||
correction.corrected, correction.original
|
||||
)
|
||||
}
|
||||
CorrectionRule::FlagPrefix => {
|
||||
format!(
|
||||
"Use full flag name: {} (not {})",
|
||||
correction.corrected, correction.original
|
||||
)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
@@ -722,17 +1029,20 @@ mod tests {
|
||||
assert_eq!(result.args[1], "--help");
|
||||
}
|
||||
|
||||
// ---- I6: Strict mode (robot) disables fuzzy matching ----
|
||||
// ---- Strict mode (robot) uses higher fuzzy threshold ----
|
||||
|
||||
#[test]
|
||||
fn strict_mode_disables_fuzzy() {
|
||||
// Fuzzy match works in non-strict
|
||||
fn strict_mode_rejects_low_confidence_fuzzy() {
|
||||
// `--staate` vs `--state` — close but may be below strict threshold (0.9)
|
||||
// The exact score depends on Jaro-Winkler; this tests that the strict
|
||||
// threshold is higher than non-strict.
|
||||
let non_strict = correct_args(args("lore --robot issues --staate opened"), false);
|
||||
assert_eq!(non_strict.corrections.len(), 1);
|
||||
assert_eq!(non_strict.corrections[0].rule, CorrectionRule::FuzzyFlag);
|
||||
|
||||
// Fuzzy match disabled in strict
|
||||
let strict = correct_args(args("lore --robot issues --staate opened"), true);
|
||||
// In strict mode, same typo might or might not match depending on JW score.
|
||||
// We verify that at least wildly wrong flags are still rejected.
|
||||
let strict = correct_args(args("lore --robot issues --xyzzy foo"), true);
|
||||
assert!(strict.corrections.is_empty());
|
||||
}
|
||||
|
||||
@@ -751,6 +1061,155 @@ mod tests {
|
||||
assert_eq!(result.corrections[0].corrected, "--robot");
|
||||
}
|
||||
|
||||
// ---- Subcommand alias correction ----
|
||||
|
||||
#[test]
|
||||
fn subcommand_alias_merge_requests_underscore() {
|
||||
let result = correct_args(args("lore --robot merge_requests -n 10"), false);
|
||||
assert!(
|
||||
result
|
||||
.corrections
|
||||
.iter()
|
||||
.any(|c| c.rule == CorrectionRule::SubcommandAlias && c.corrected == "mrs")
|
||||
);
|
||||
assert!(result.args.contains(&"mrs".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn subcommand_alias_mergerequests_no_sep() {
|
||||
let result = correct_args(args("lore --robot mergerequests"), false);
|
||||
assert!(result.corrections.iter().any(|c| c.corrected == "mrs"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn subcommand_alias_generate_docs_underscore() {
|
||||
let result = correct_args(args("lore generate_docs"), false);
|
||||
assert!(
|
||||
result
|
||||
.corrections
|
||||
.iter()
|
||||
.any(|c| c.corrected == "generate-docs")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn subcommand_alias_case_insensitive() {
|
||||
let result = correct_args(args("lore Merge_Requests"), false);
|
||||
assert!(result.corrections.iter().any(|c| c.corrected == "mrs"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn subcommand_alias_valid_command_untouched() {
|
||||
let result = correct_args(args("lore issues -n 10"), false);
|
||||
assert!(result.corrections.is_empty());
|
||||
}
|
||||
|
||||
// ---- Enum value normalization ----
|
||||
|
||||
#[test]
|
||||
fn value_case_normalization() {
|
||||
let result = correct_args(args("lore issues --state Opened"), false);
|
||||
assert!(
|
||||
result
|
||||
.corrections
|
||||
.iter()
|
||||
.any(|c| c.rule == CorrectionRule::ValueNormalization && c.corrected == "opened")
|
||||
);
|
||||
assert!(result.args.contains(&"opened".to_string()));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn value_case_normalization_eq_form() {
|
||||
let result = correct_args(args("lore issues --state=Opened"), false);
|
||||
assert!(
|
||||
result
|
||||
.corrections
|
||||
.iter()
|
||||
.any(|c| c.corrected == "--state=opened")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn value_prefix_expansion() {
|
||||
// "open" is a unique prefix of "opened"
|
||||
let result = correct_args(args("lore issues --state open"), false);
|
||||
assert!(
|
||||
result
|
||||
.corrections
|
||||
.iter()
|
||||
.any(|c| c.corrected == "opened" && c.rule == CorrectionRule::ValueFuzzy)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn value_fuzzy_typo() {
|
||||
let result = correct_args(args("lore issues --state opend"), false);
|
||||
assert!(result.corrections.iter().any(|c| c.corrected == "opened"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn value_already_valid_untouched() {
|
||||
let result = correct_args(args("lore issues --state opened"), false);
|
||||
// No value corrections expected (flag corrections may still exist)
|
||||
assert!(!result.corrections.iter().any(|c| matches!(
|
||||
c.rule,
|
||||
CorrectionRule::ValueNormalization | CorrectionRule::ValueFuzzy
|
||||
)));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn value_mode_case() {
|
||||
let result = correct_args(args("lore search --mode Hybrid query"), false);
|
||||
assert!(result.corrections.iter().any(|c| c.corrected == "hybrid"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn value_normalization_respects_option_terminator() {
|
||||
// Values after `--` are positional and must not be corrected
|
||||
let result = correct_args(args("lore search -- --state Opened"), false);
|
||||
assert!(!result.corrections.iter().any(|c| matches!(
|
||||
c.rule,
|
||||
CorrectionRule::ValueNormalization | CorrectionRule::ValueFuzzy
|
||||
)));
|
||||
assert_eq!(result.args[4], "Opened"); // preserved as-is
|
||||
}
|
||||
|
||||
// ---- Flag prefix matching ----
|
||||
|
||||
#[test]
|
||||
fn flag_prefix_project() {
|
||||
let result = correct_args(args("lore issues --proj group/repo"), false);
|
||||
assert!(
|
||||
result
|
||||
.corrections
|
||||
.iter()
|
||||
.any(|c| c.rule == CorrectionRule::FlagPrefix && c.corrected == "--project")
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flag_prefix_ambiguous_not_corrected() {
|
||||
// --s could be --state, --since, --sort, --status — ambiguous
|
||||
let result = correct_args(args("lore issues --s opened"), false);
|
||||
assert!(
|
||||
!result
|
||||
.corrections
|
||||
.iter()
|
||||
.any(|c| c.rule == CorrectionRule::FlagPrefix)
|
||||
);
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn flag_prefix_with_eq_value() {
|
||||
let result = correct_args(args("lore issues --proj=group/repo"), false);
|
||||
assert!(
|
||||
result
|
||||
.corrections
|
||||
.iter()
|
||||
.any(|c| c.corrected == "--project=group/repo")
|
||||
);
|
||||
}
|
||||
|
||||
// ---- Teaching notes ----
|
||||
|
||||
#[test]
|
||||
@@ -790,6 +1249,43 @@ mod tests {
|
||||
assert!(note.contains("spelling"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn teaching_note_subcommand_alias() {
|
||||
let c = Correction {
|
||||
original: "merge_requests".to_string(),
|
||||
corrected: "mrs".to_string(),
|
||||
rule: CorrectionRule::SubcommandAlias,
|
||||
confidence: 1.0,
|
||||
};
|
||||
let note = format_teaching_note(&c);
|
||||
assert!(note.contains("canonical"));
|
||||
assert!(note.contains("mrs"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn teaching_note_value_normalization() {
|
||||
let c = Correction {
|
||||
original: "Opened".to_string(),
|
||||
corrected: "opened".to_string(),
|
||||
rule: CorrectionRule::ValueNormalization,
|
||||
confidence: 0.95,
|
||||
};
|
||||
let note = format_teaching_note(&c);
|
||||
assert!(note.contains("lowercase"));
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn teaching_note_flag_prefix() {
|
||||
let c = Correction {
|
||||
original: "--proj".to_string(),
|
||||
corrected: "--project".to_string(),
|
||||
rule: CorrectionRule::FlagPrefix,
|
||||
confidence: 0.95,
|
||||
};
|
||||
let note = format_teaching_note(&c);
|
||||
assert!(note.contains("full flag name"));
|
||||
}
|
||||
|
||||
// ---- Post-clap suggestion helpers ----
|
||||
|
||||
#[test]
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use console::style;
|
||||
use crate::cli::render::{self, Theme};
|
||||
use rusqlite::Connection;
|
||||
use serde::Serialize;
|
||||
|
||||
@@ -178,27 +178,6 @@ fn count_notes(conn: &Connection, type_filter: Option<&str>) -> Result<CountResu
|
||||
})
|
||||
}
|
||||
|
||||
fn format_number(n: i64) -> String {
|
||||
let (prefix, abs) = if n < 0 {
|
||||
("-", n.unsigned_abs())
|
||||
} else {
|
||||
("", n.unsigned_abs())
|
||||
};
|
||||
|
||||
let s = abs.to_string();
|
||||
let chars: Vec<char> = s.chars().collect();
|
||||
let mut result = String::from(prefix);
|
||||
|
||||
for (i, c) in chars.iter().enumerate() {
|
||||
if i > 0 && (chars.len() - i).is_multiple_of(3) {
|
||||
result.push(',');
|
||||
}
|
||||
result.push(*c);
|
||||
}
|
||||
|
||||
result
|
||||
}
|
||||
|
||||
#[derive(Serialize)]
|
||||
struct CountJsonOutput {
|
||||
ok: bool,
|
||||
@@ -284,10 +263,10 @@ pub fn print_event_count_json(counts: &EventCounts, elapsed_ms: u64) {
|
||||
pub fn print_event_count(counts: &EventCounts) {
|
||||
println!(
|
||||
"{:<20} {:>8} {:>8} {:>8}",
|
||||
style("Event Type").cyan().bold(),
|
||||
style("Issues").bold(),
|
||||
style("MRs").bold(),
|
||||
style("Total").bold()
|
||||
Theme::info().bold().render("Event Type"),
|
||||
Theme::bold().render("Issues"),
|
||||
Theme::bold().render("MRs"),
|
||||
Theme::bold().render("Total")
|
||||
);
|
||||
|
||||
let state_total = counts.state_issue + counts.state_mr;
|
||||
@@ -297,33 +276,33 @@ pub fn print_event_count(counts: &EventCounts) {
|
||||
println!(
|
||||
"{:<20} {:>8} {:>8} {:>8}",
|
||||
"State events",
|
||||
format_number(counts.state_issue as i64),
|
||||
format_number(counts.state_mr as i64),
|
||||
format_number(state_total as i64)
|
||||
render::format_number(counts.state_issue as i64),
|
||||
render::format_number(counts.state_mr as i64),
|
||||
render::format_number(state_total as i64)
|
||||
);
|
||||
println!(
|
||||
"{:<20} {:>8} {:>8} {:>8}",
|
||||
"Label events",
|
||||
format_number(counts.label_issue as i64),
|
||||
format_number(counts.label_mr as i64),
|
||||
format_number(label_total as i64)
|
||||
render::format_number(counts.label_issue as i64),
|
||||
render::format_number(counts.label_mr as i64),
|
||||
render::format_number(label_total as i64)
|
||||
);
|
||||
println!(
|
||||
"{:<20} {:>8} {:>8} {:>8}",
|
||||
"Milestone events",
|
||||
format_number(counts.milestone_issue as i64),
|
||||
format_number(counts.milestone_mr as i64),
|
||||
format_number(milestone_total as i64)
|
||||
render::format_number(counts.milestone_issue as i64),
|
||||
render::format_number(counts.milestone_mr as i64),
|
||||
render::format_number(milestone_total as i64)
|
||||
);
|
||||
|
||||
let total_issues = counts.state_issue + counts.label_issue + counts.milestone_issue;
|
||||
let total_mrs = counts.state_mr + counts.label_mr + counts.milestone_mr;
|
||||
println!(
|
||||
"{:<20} {:>8} {:>8} {:>8}",
|
||||
style("Total").bold(),
|
||||
format_number(total_issues as i64),
|
||||
format_number(total_mrs as i64),
|
||||
style(format_number(counts.total() as i64)).bold()
|
||||
Theme::bold().render("Total"),
|
||||
render::format_number(total_issues as i64),
|
||||
render::format_number(total_mrs as i64),
|
||||
Theme::bold().render(&render::format_number(counts.total() as i64))
|
||||
);
|
||||
}
|
||||
|
||||
@@ -350,57 +329,56 @@ pub fn print_count_json(result: &CountResult, elapsed_ms: u64) {
|
||||
}
|
||||
|
||||
pub fn print_count(result: &CountResult) {
|
||||
let count_str = format_number(result.count);
|
||||
let count_str = render::format_number(result.count);
|
||||
|
||||
if let Some(system_count) = result.system_count {
|
||||
println!(
|
||||
"{}: {} {}",
|
||||
style(&result.entity).cyan(),
|
||||
style(&count_str).bold(),
|
||||
style(format!(
|
||||
"{}: {:>10} {}",
|
||||
Theme::info().render(&result.entity),
|
||||
Theme::bold().render(&count_str),
|
||||
Theme::dim().render(&format!(
|
||||
"(excluding {} system)",
|
||||
format_number(system_count)
|
||||
render::format_number(system_count)
|
||||
))
|
||||
.dim()
|
||||
);
|
||||
} else {
|
||||
println!(
|
||||
"{}: {}",
|
||||
style(&result.entity).cyan(),
|
||||
style(&count_str).bold()
|
||||
"{}: {:>10}",
|
||||
Theme::info().render(&result.entity),
|
||||
Theme::bold().render(&count_str)
|
||||
);
|
||||
}
|
||||
|
||||
if let Some(breakdown) = &result.state_breakdown {
|
||||
println!(" opened: {}", format_number(breakdown.opened));
|
||||
println!(" opened: {:>10}", render::format_number(breakdown.opened));
|
||||
if let Some(merged) = breakdown.merged {
|
||||
println!(" merged: {}", format_number(merged));
|
||||
println!(" merged: {:>10}", render::format_number(merged));
|
||||
}
|
||||
println!(" closed: {}", format_number(breakdown.closed));
|
||||
println!(" closed: {:>10}", render::format_number(breakdown.closed));
|
||||
if let Some(locked) = breakdown.locked
|
||||
&& locked > 0
|
||||
{
|
||||
println!(" locked: {}", format_number(locked));
|
||||
println!(" locked: {:>10}", render::format_number(locked));
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
#[cfg(test)]
|
||||
mod tests {
|
||||
use super::*;
|
||||
use crate::cli::render;
|
||||
|
||||
#[test]
|
||||
fn format_number_handles_small_numbers() {
|
||||
assert_eq!(format_number(0), "0");
|
||||
assert_eq!(format_number(1), "1");
|
||||
assert_eq!(format_number(100), "100");
|
||||
assert_eq!(format_number(999), "999");
|
||||
assert_eq!(render::format_number(0), "0");
|
||||
assert_eq!(render::format_number(1), "1");
|
||||
assert_eq!(render::format_number(100), "100");
|
||||
assert_eq!(render::format_number(999), "999");
|
||||
}
|
||||
|
||||
#[test]
|
||||
fn format_number_adds_thousands_separators() {
|
||||
assert_eq!(format_number(1000), "1,000");
|
||||
assert_eq!(format_number(12345), "12,345");
|
||||
assert_eq!(format_number(1234567), "1,234,567");
|
||||
assert_eq!(render::format_number(1000), "1,000");
|
||||
assert_eq!(render::format_number(12345), "12,345");
|
||||
assert_eq!(render::format_number(1234567), "1,234,567");
|
||||
}
|
||||
}
|
||||
|
||||
@@ -1,4 +1,4 @@
|
||||
use console::style;
|
||||
use crate::cli::render::{Icons, Theme};
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::core::config::Config;
|
||||
@@ -530,7 +530,7 @@ fn check_logging(config: Option<&Config>) -> LoggingCheck {
|
||||
}
|
||||
|
||||
pub fn print_doctor_results(result: &DoctorResult) {
|
||||
println!("\nlore doctor\n");
|
||||
println!();
|
||||
|
||||
print_check("Config", &result.checks.config.result);
|
||||
print_check("Database", &result.checks.database.result);
|
||||
@@ -539,38 +539,61 @@ pub fn print_doctor_results(result: &DoctorResult) {
|
||||
print_check("Ollama", &result.checks.ollama.result);
|
||||
print_check("Logging", &result.checks.logging.result);
|
||||
|
||||
// Count statuses
|
||||
let checks = [
|
||||
&result.checks.config.result,
|
||||
&result.checks.database.result,
|
||||
&result.checks.gitlab.result,
|
||||
&result.checks.projects.result,
|
||||
&result.checks.ollama.result,
|
||||
&result.checks.logging.result,
|
||||
];
|
||||
let passed = checks
|
||||
.iter()
|
||||
.filter(|c| c.status == CheckStatus::Ok)
|
||||
.count();
|
||||
let warnings = checks
|
||||
.iter()
|
||||
.filter(|c| c.status == CheckStatus::Warning)
|
||||
.count();
|
||||
let failed = checks
|
||||
.iter()
|
||||
.filter(|c| c.status == CheckStatus::Error)
|
||||
.count();
|
||||
|
||||
println!();
|
||||
|
||||
let mut summary_parts = Vec::new();
|
||||
if result.success {
|
||||
let ollama_ok = result.checks.ollama.result.status == CheckStatus::Ok;
|
||||
if ollama_ok {
|
||||
println!("{}", style("Status: Ready").green());
|
||||
summary_parts.push(Theme::success().render("Ready"));
|
||||
} else {
|
||||
println!(
|
||||
"{} {}",
|
||||
style("Status: Ready").green(),
|
||||
style("(lexical search available, semantic search requires Ollama)").yellow()
|
||||
);
|
||||
summary_parts.push(Theme::error().render("Not ready"));
|
||||
}
|
||||
} else {
|
||||
println!("{}", style("Status: Not ready").red());
|
||||
summary_parts.push(format!("{passed} passed"));
|
||||
if warnings > 0 {
|
||||
summary_parts.push(Theme::warning().render(&format!("{warnings} warning")));
|
||||
}
|
||||
if failed > 0 {
|
||||
summary_parts.push(Theme::error().render(&format!("{failed} failed")));
|
||||
}
|
||||
println!(" {}", summary_parts.join(" \u{b7} "));
|
||||
|
||||
println!();
|
||||
}
|
||||
|
||||
fn print_check(name: &str, result: &CheckResult) {
|
||||
let symbol = match result.status {
|
||||
CheckStatus::Ok => style("✓").green(),
|
||||
CheckStatus::Warning => style("⚠").yellow(),
|
||||
CheckStatus::Error => style("✗").red(),
|
||||
let icon = match result.status {
|
||||
CheckStatus::Ok => Theme::success().render(Icons::success()),
|
||||
CheckStatus::Warning => Theme::warning().render(Icons::warning()),
|
||||
CheckStatus::Error => Theme::error().render(Icons::error()),
|
||||
};
|
||||
|
||||
let message = result.message.as_deref().unwrap_or("");
|
||||
let message_styled = match result.status {
|
||||
CheckStatus::Ok => message.to_string(),
|
||||
CheckStatus::Warning => style(message).yellow().to_string(),
|
||||
CheckStatus::Error => style(message).red().to_string(),
|
||||
CheckStatus::Warning => Theme::warning().render(message),
|
||||
CheckStatus::Error => Theme::error().render(message),
|
||||
};
|
||||
|
||||
println!(" {symbol} {:<10} {message_styled}", name);
|
||||
println!(" {icon} {:<10} {message_styled}", name);
|
||||
}
|
||||
|
||||
@@ -1,10 +1,10 @@
|
||||
use std::collections::HashMap;
|
||||
use std::sync::LazyLock;
|
||||
|
||||
use console::style;
|
||||
use regex::Regex;
|
||||
use serde::Serialize;
|
||||
|
||||
use crate::cli::render::{Icons, Theme};
|
||||
use crate::cli::robot::RobotMeta;
|
||||
use crate::core::config::Config;
|
||||
use crate::core::db::create_connection;
|
||||
@@ -420,7 +420,7 @@ pub fn print_drift_human(response: &DriftResponse) {
|
||||
"Drift Analysis: {} #{}",
|
||||
response.entity.entity_type, response.entity.iid
|
||||
);
|
||||
println!("{}", style(&header).bold());
|
||||
println!("{}", Theme::bold().render(&header));
|
||||
println!("{}", "-".repeat(header.len().min(60)));
|
||||
println!("Title: {}", response.entity.title);
|
||||
println!("Threshold: {:.2}", response.threshold);
|
||||
@@ -428,7 +428,11 @@ pub fn print_drift_human(response: &DriftResponse) {
|
||||
println!();
|
||||
|
||||
if response.drift_detected {
|
||||
println!("{}", style("DRIFT DETECTED").red().bold());
|
||||
println!(
|
||||
"{} {}",
|
||||
Theme::error().render(Icons::error()),
|
||||
Theme::error().bold().render("DRIFT DETECTED")
|
||||
);
|
||||
if let Some(dp) = &response.drift_point {
|
||||
println!(
|
||||
" At note #{} by @{} ({}) - similarity {:.2}",
|
||||
@@ -439,7 +443,11 @@ pub fn print_drift_human(response: &DriftResponse) {
|
||||
println!(" Topics: {}", response.drift_topics.join(", "));
|
||||
}
|
||||
} else {
|
||||
println!("{}", style("No drift detected").green());
|
||||
println!(
|
||||
"{} {}",
|
||||
Theme::success().render(Icons::success()),
|
||||
Theme::success().render("No drift detected")
|
||||
);
|
||||
}
|
||||
|
||||
println!();
|
||||
@@ -447,10 +455,10 @@ pub fn print_drift_human(response: &DriftResponse) {
|
||||
|
||||
if !response.similarity_curve.is_empty() {
|
||||
println!();
|
||||
println!("{}", style("Similarity Curve:").bold());
|
||||
println!("{}", Theme::bold().render("Similarity Curve:"));
|
||||
for pt in &response.similarity_curve {
|
||||
let bar_len = ((pt.similarity.max(0.0)) * 30.0) as usize;
|
||||
let bar: String = "#".repeat(bar_len);
|
||||
let bar: String = "\u{2588}".repeat(bar_len);
|
||||
println!(
|
||||
" {:>3} {:.2} {} @{}",
|
||||
pt.note_index, pt.similarity, bar, pt.author
|
||||
|
||||
Some files were not shown because too many files have changed in this diff Show More
Reference in New Issue
Block a user