From fb40fdc6778cfb7cf02aa745c2d5d6174b318883 Mon Sep 17 00:00:00 2001 From: teernisse Date: Wed, 18 Feb 2026 22:56:24 -0500 Subject: [PATCH] =?UTF-8?q?feat(tui):=20Phase=203=20power=20features=20?= =?UTF-8?q?=E2=80=94=20Who,=20Search,=20Timeline,=20Trace,=20File=20Histor?= =?UTF-8?q?y=20screens?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Complete TUI Phase 3 implementation with all 5 power feature screens: - Who screen: 5 modes (expert/workload/reviews/active/overlap) with mode tabs, input bar, result rendering, and hint bar - Search screen: full-text search with result list and scoring display - Timeline screen: chronological event feed with time-relative display - Trace screen: file provenance chains with expand/collapse, rename tracking, and linked issues/discussions - File History screen: per-file MR timeline with rename chain display and discussion snippets Also includes: - Command palette overlay (fuzzy search) - Bootstrap screen (initial sync flow) - Action layer split from monolithic action.rs to per-screen modules - Entity and render cache infrastructure - Shared who_types module in core crate - All screens wired into view/mod.rs dispatch - 597 tests passing, clippy clean (pedantic + nursery), fmt clean --- .beads/issues.jsonl | 28 +- .beads/last-touched | 2 +- .liquid-mail.toml | 13 + AGENTS.md | 154 + CLAUDE.md | 943 ++++++ crates/lore-tui/src/action.rs | 2835 ------------------ crates/lore-tui/src/action/bootstrap.rs | 298 ++ crates/lore-tui/src/action/dashboard.rs | 485 +++ crates/lore-tui/src/action/file_history.rs | 383 +++ crates/lore-tui/src/action/issue_detail.rs | 611 ++++ crates/lore-tui/src/action/issue_list.rs | 532 ++++ crates/lore-tui/src/action/mod.rs | 29 + crates/lore-tui/src/action/mr_detail.rs | 694 +++++ crates/lore-tui/src/action/mr_list.rs | 629 ++++ crates/lore-tui/src/action/search.rs | 361 +++ crates/lore-tui/src/action/timeline.rs | 845 ++++++ crates/lore-tui/src/action/trace.rs | 234 ++ crates/lore-tui/src/action/who.rs | 285 ++ crates/lore-tui/src/app/tests.rs | 49 + crates/lore-tui/src/app/update.rs | 169 +- crates/lore-tui/src/entity_cache.rs | 232 ++ crates/lore-tui/src/lib.rs | 36 + crates/lore-tui/src/message.rs | 257 +- crates/lore-tui/src/render_cache.rs | 252 ++ crates/lore-tui/src/state/bootstrap.rs | 160 + crates/lore-tui/src/state/command_palette.rs | 297 +- crates/lore-tui/src/state/file_history.rs | 364 +++ crates/lore-tui/src/state/mod.rs | 15 + crates/lore-tui/src/state/search.rs | 559 +++- crates/lore-tui/src/state/timeline.rs | 265 +- crates/lore-tui/src/state/trace.rs | 556 ++++ crates/lore-tui/src/state/who.rs | 512 +++- crates/lore-tui/src/view/bootstrap.rs | 134 + crates/lore-tui/src/view/command_palette.rs | 389 +++ crates/lore-tui/src/view/common/cross_ref.rs | 15 +- crates/lore-tui/src/view/file_history.rs | 578 ++++ crates/lore-tui/src/view/issue_detail.rs | 2 +- crates/lore-tui/src/view/mod.rs | 88 +- crates/lore-tui/src/view/mr_detail.rs | 12 +- crates/lore-tui/src/view/search.rs | 492 +++ crates/lore-tui/src/view/timeline.rs | 449 +++ crates/lore-tui/src/view/trace.rs | 627 ++++ crates/lore-tui/src/view/who.rs | 1049 +++++++ crates/lore-tui/tests/vertical_slice.rs | 636 ++++ 44 files changed, 14650 insertions(+), 2905 deletions(-) create mode 100644 .liquid-mail.toml create mode 100644 CLAUDE.md delete mode 100644 crates/lore-tui/src/action.rs create mode 100644 crates/lore-tui/src/action/bootstrap.rs create mode 100644 crates/lore-tui/src/action/dashboard.rs create mode 100644 crates/lore-tui/src/action/file_history.rs create mode 100644 crates/lore-tui/src/action/issue_detail.rs create mode 100644 crates/lore-tui/src/action/issue_list.rs create mode 100644 crates/lore-tui/src/action/mod.rs create mode 100644 crates/lore-tui/src/action/mr_detail.rs create mode 100644 crates/lore-tui/src/action/mr_list.rs create mode 100644 crates/lore-tui/src/action/search.rs create mode 100644 crates/lore-tui/src/action/timeline.rs create mode 100644 crates/lore-tui/src/action/trace.rs create mode 100644 crates/lore-tui/src/action/who.rs create mode 100644 crates/lore-tui/src/entity_cache.rs create mode 100644 crates/lore-tui/src/render_cache.rs create mode 100644 crates/lore-tui/src/state/bootstrap.rs create mode 100644 crates/lore-tui/src/state/file_history.rs create mode 100644 crates/lore-tui/src/state/trace.rs create mode 100644 crates/lore-tui/src/view/bootstrap.rs create mode 100644 crates/lore-tui/src/view/command_palette.rs create mode 100644 crates/lore-tui/src/view/file_history.rs create mode 100644 crates/lore-tui/src/view/search.rs create mode 100644 crates/lore-tui/src/view/timeline.rs create mode 100644 crates/lore-tui/src/view/trace.rs create mode 100644 crates/lore-tui/src/view/who.rs create mode 100644 crates/lore-tui/tests/vertical_slice.rs diff --git a/.beads/issues.jsonl b/.beads/issues.jsonl index da33ad6..402d8c5 100644 --- a/.beads/issues.jsonl +++ b/.beads/issues.jsonl @@ -34,7 +34,7 @@ {"id":"bd-1df9","title":"Epic: TUI Phase 4 — Operations","description":"## Background\nPhase 4 adds operational screens: Sync (real-time progress + post-sync summary), Doctor/Stats (health checks), and CLI integration (lore tui command for binary delegation). The Sync screen is the most complex — it needs real-time streaming progress with backpressure handling.\n\n## Acceptance Criteria\n- [ ] Sync screen shows real-time progress during sync with per-lane indicators\n- [ ] Sync summary shows exact changed entities after completion\n- [ ] Doctor screen shows environment health checks\n- [ ] Stats screen shows database statistics\n- [ ] CLI integration: lore tui launches lore-tui binary via runtime delegation","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:01:44.603447Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.361318Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1df9","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1elx","title":"Implement run_embed_for_document_ids scoped embedding","description":"## Background\n\nCurrently `embed_documents()` in `src/embedding/pipeline.rs` uses `find_pending_documents()` to discover ALL documents that need embedding (no existing embedding, changed content_hash, or model mismatch). The surgical sync pipeline needs a scoped variant that only embeds specific document IDs — the ones returned by the scoped doc regeneration step (bd-hs6j).\n\nThe existing `embed_page()` private function handles the actual embedding work for a batch of `PendingDocument` structs. It calls `split_into_chunks`, sends batches to the OllamaClient, and writes embeddings + metadata to the DB. The scoped function can reuse this by constructing `PendingDocument` structs from the provided document IDs.\n\nKey types:\n- `PendingDocument { document_id: i64, content_text: String, content_hash: String }` (from `change_detector.rs`)\n- `EmbedResult { chunks_embedded, docs_embedded, failed, skipped }` (pipeline.rs:21)\n- `OllamaClient` for the actual embedding API calls\n- `ShutdownSignal` for cancellation support\n\n## Approach\n\nAdd `embed_documents_by_ids()` to `src/embedding/pipeline.rs`:\n\n```rust\npub struct EmbedForIdsResult {\n pub chunks_embedded: usize,\n pub docs_embedded: usize,\n pub failed: usize,\n pub skipped: usize,\n}\n\npub async fn embed_documents_by_ids(\n conn: &Connection,\n client: &OllamaClient,\n model_name: &str,\n concurrency: usize,\n document_ids: &[i64],\n signal: &ShutdownSignal,\n) -> Result\n```\n\nImplementation:\n1. If `document_ids` is empty, return immediately with zero counts.\n2. Load `PendingDocument` structs for the specified IDs. Query: `SELECT id, content_text, content_hash FROM documents WHERE id IN (...)`. Filter out documents that already have current embeddings (same content_hash, model, dims, chunk_max_bytes) — reuse the LEFT JOIN logic from `find_pending_documents` but with `WHERE d.id IN (?)` instead of `WHERE d.id > ?`.\n3. If no documents need embedding after filtering, return with skipped=len.\n4. Chunk into pages of `DB_PAGE_SIZE` (500).\n5. For each page, call `embed_page()` (reuse existing private function) within a SAVEPOINT.\n6. Handle cancellation via `signal.is_cancelled()` between pages.\n\nAlternative simpler approach: load all specified doc IDs into a temp table or use a parameterized IN clause, then let `embed_page` process them. Since the list is typically small (1-5 documents for surgical sync), a single page call suffices.\n\nExport from `src/embedding/mod.rs` if not already pub.\n\n## Acceptance Criteria\n\n- [ ] `embed_documents_by_ids` only embeds the specified document IDs, not all pending documents\n- [ ] Documents already embedded with current content_hash + model are skipped (not re-embedded)\n- [ ] Empty document_ids input returns immediately with zero counts\n- [ ] Cancellation via ShutdownSignal is respected between pages\n- [ ] SAVEPOINT/ROLLBACK semantics match existing `embed_documents` for data integrity\n- [ ] Ollama errors for individual documents are counted as failed, not fatal\n- [ ] Function is pub for use by orchestration (bd-1i4i)\n\n## Files\n\n- `src/embedding/pipeline.rs` (add new function + result struct)\n- `src/embedding/mod.rs` (export if needed)\n\n## TDD Anchor\n\nTests in `src/embedding/pipeline_tests.rs` (or new `src/embedding/scoped_embed_tests.rs`):\n\n```rust\n#[tokio::test]\nasync fn test_embed_by_ids_only_embeds_specified_docs() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n setup_ollama_mock(&mock).await;\n let client = OllamaClient::new(&mock.uri());\n\n // Insert 2 documents: A (id=1) and B (id=2)\n insert_test_document(&conn, 1, \"Content A\", \"hash_a\");\n insert_test_document(&conn, 2, \"Content B\", \"hash_b\");\n\n let signal = ShutdownSignal::new();\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1,\n &[1], // Only embed doc 1\n &signal,\n ).await.unwrap();\n\n assert_eq!(result.docs_embedded, 1);\n // Verify doc 1 has embeddings\n let count: i64 = conn.query_row(\n \"SELECT COUNT(*) FROM embedding_metadata WHERE document_id = 1\",\n [], |r| r.get(0),\n ).unwrap();\n assert!(count > 0);\n // Verify doc 2 has NO embeddings\n let count_b: i64 = conn.query_row(\n \"SELECT COUNT(*) FROM embedding_metadata WHERE document_id = 2\",\n [], |r| r.get(0),\n ).unwrap();\n assert_eq!(count_b, 0);\n}\n\n#[tokio::test]\nasync fn test_embed_by_ids_skips_already_embedded() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n setup_ollama_mock(&mock).await;\n let client = OllamaClient::new(&mock.uri());\n\n insert_test_document(&conn, 1, \"Content A\", \"hash_a\");\n let signal = ShutdownSignal::new();\n\n // Embed once\n embed_documents_by_ids(&conn, &client, \"nomic-embed-text\", 1, &[1], &signal).await.unwrap();\n // Embed again with same hash — should skip\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1, &[1], &signal,\n ).await.unwrap();\n assert_eq!(result.docs_embedded, 0);\n assert_eq!(result.skipped, 1);\n}\n\n#[tokio::test]\nasync fn test_embed_by_ids_empty_input() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n let client = OllamaClient::new(&mock.uri());\n let signal = ShutdownSignal::new();\n\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1, &[], &signal,\n ).await.unwrap();\n assert_eq!(result.docs_embedded, 0);\n assert_eq!(result.chunks_embedded, 0);\n}\n\n#[tokio::test]\nasync fn test_embed_by_ids_respects_cancellation() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n // Use delayed response to allow cancellation\n setup_slow_ollama_mock(&mock).await;\n let client = OllamaClient::new(&mock.uri());\n\n insert_test_document(&conn, 1, \"Content A\", \"hash_a\");\n let signal = ShutdownSignal::new();\n signal.cancel(); // Pre-cancel\n\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1, &[1], &signal,\n ).await.unwrap();\n assert_eq!(result.docs_embedded, 0);\n}\n```\n\n## Edge Cases\n\n- Document ID that does not exist in the documents table: query returns no rows, skipped silently.\n- Document with empty `content_text`: `split_into_chunks` may return 0 chunks, counted as skipped.\n- Ollama server unreachable: returns `OllamaUnavailable` error. Must not leave partial embeddings (SAVEPOINT rollback).\n- Very long document (>1500 bytes): gets chunked into multiple chunks by `split_into_chunks`. All chunks for one document must be embedded atomically.\n- Document already has embeddings but with different model: content_hash check passes but model mismatch detected — should re-embed.\n- Concurrent calls with overlapping document_ids: SAVEPOINT isolation prevents conflicts, last writer wins on embedding_metadata upsert.\n\n## Dependency Context\n\n- **Blocked by bd-hs6j**: Gets `document_ids` from scoped doc regeneration output\n- **Blocks bd-1i4i**: Orchestration function calls this as the final step of surgical sync\n- **Blocks bd-3jqx**: Integration tests verify embed isolation (only surgical docs get embedded)\n- **Uses existing internals**: `embed_page`, `PendingDocument`, `split_into_chunks`, `OllamaClient`, `ShutdownSignal`","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:16:43.680009Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:05:18.735382Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-1elx","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1elx","depends_on_id":"bd-3jqx","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-1ep","title":"Wire resource event fetching into sync pipeline","description":"## Background\nAfter issue/MR primary ingestion and discussion fetch, changed entities need resource_events jobs enqueued and drained. This is the integration point that connects the queue (bd-tir), API client (bd-sqw), DB upserts (bd-1uc), and config flag (bd-2e8).\n\n## Approach\nModify the sync pipeline to add two new phases after discussion sync:\n\n**Phase 1 — Enqueue during ingestion:**\nIn src/ingestion/orchestrator.rs, after each entity upsert (issue or MR), call:\n```rust\nif config.sync.fetch_resource_events {\n enqueue_job(conn, project_id, \"issue\", iid, local_id, \"resource_events\", None)?;\n}\n// For MRs, also enqueue mr_closes_issues (always) and mr_diffs (when fetchMrFileChanges)\n```\n\nThe \"changed entity\" detection uses the existing dirty tracker: if an entity was inserted or updated during this sync run, it gets enqueued. On --full sync, all entities are enqueued.\n\n**Phase 2 — Drain dependent queue:**\nAdd a new drain step in src/cli/commands/sync.rs (or new src/core/drain.rs), called after discussion sync:\n```rust\npub async fn drain_dependent_queue(\n conn: &Connection,\n client: &GitLabClient,\n config: &Config,\n progress: Option,\n) -> Result\n```\n\nFlow:\n1. reclaim_stale_locks(conn, config.sync.stale_lock_minutes)\n2. Loop: claim_jobs(conn, \"resource_events\", batch_size=10)\n3. For each job:\n a. Fetch 3 event types via client (fetch_issue_state_events etc.)\n b. Store via upsert functions (upsert_state_events etc.)\n c. complete_job(conn, job.id) on success\n d. fail_job(conn, job.id, error_msg) on failure\n4. Report progress: \"Fetching resource events... [N/M]\"\n5. Repeat until no more claimable jobs\n\n**Progress reporting:**\nAdd new ProgressEvent variants:\n```rust\nResourceEventsFetchStart { total: usize },\nResourceEventsFetchProgress { completed: usize, total: usize },\nResourceEventsFetchComplete { fetched: usize, failed: usize },\n```\n\n## Acceptance Criteria\n- [ ] Full sync enqueues resource_events jobs for all issues and MRs\n- [ ] Incremental sync only enqueues for entities changed since last sync\n- [ ] --no-events prevents enqueueing resource_events jobs\n- [ ] Drain step fetches all 3 event types per entity\n- [ ] Successful fetches stored and job completed\n- [ ] Failed fetches recorded with error, job retried on next sync\n- [ ] Stale locks reclaimed at drain start\n- [ ] Progress displayed: \"Fetching resource events... [N/M]\"\n- [ ] Robot mode progress suppressed (quiet mode)\n\n## Files\n- src/ingestion/orchestrator.rs (add enqueue calls during upsert)\n- src/cli/commands/sync.rs (add drain step after discussions)\n- src/core/drain.rs (new, optional — or inline in sync.rs)\n\n## TDD Loop\nRED: tests/sync_pipeline_tests.rs (or extend existing):\n- `test_sync_enqueues_resource_events_for_changed_entities` - mock sync, verify jobs enqueued\n- `test_sync_no_events_flag_skips_enqueue` - verify no jobs when flag false\n- `test_drain_completes_jobs_on_success` - mock API responses, verify jobs deleted\n- `test_drain_fails_jobs_on_error` - mock API failure, verify job attempts incremented\n\nNote: Full pipeline integration tests may need mock HTTP server. Start with unit tests on enqueue/drain logic using the real DB with mock API responses.\n\nGREEN: Implement enqueue hooks + drain step\n\nVERIFY: `cargo test sync -- --nocapture && cargo build`\n\n## Edge Cases\n- Entity deleted between enqueue and drain: API returns 404, fail_job with \"entity not found\" (retry won't help but backoff caps it)\n- Rate limiting during drain: GitLabRateLimited error should fail_job with retry (transient)\n- Network error during drain: GitLabNetworkError should fail_job with retry\n- Multiple sync runs competing: locked_at prevents double-processing; stale lock reclaim handles crashes\n- Drain should have a max iterations guard to prevent infinite loop if jobs keep failing and being retried within the same run","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:31:57.334527Z","created_by":"tayloreernisse","updated_at":"2026-02-03T17:46:51.336138Z","closed_at":"2026-02-03T17:46:51.336077Z","close_reason":"Implemented: enqueue + drain resource events in orchestrator, wired counts through ingest→sync pipeline, added progress events, 4 new tests, all 209 tests pass","compaction_level":0,"original_size":0,"labels":["gate-1","phase-b","pipeline"],"dependencies":[{"issue_id":"bd-1ep","depends_on_id":"bd-1uc","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ep","depends_on_id":"bd-2e8","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ep","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ep","depends_on_id":"bd-sqw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ep","depends_on_id":"bd-tir","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-1f5b","title":"Extract query functions from CLI to shared pub API","description":"## Background\nThe TUI's action.rs bridges to existing CLI query functions. To avoid code duplication, query functions need to be made accessible to the TUI crate. The who module was refactored on master into src/cli/commands/who/ with types.rs, expert.rs, workload.rs, reviews.rs, active.rs, overlap.rs. Query functions are currently pub(super) — visible within the who module but not from external crates.\n\n## Approach\n\n### Phase A: Move shared types to core (who)\nMove src/cli/commands/who/types.rs content to src/core/who_types.rs (or src/core/who/types.rs). These are pure data structs with zero logic — WhoRun, WhoResolvedInput, WhoResult enum, ExpertResult, WorkloadResult, ReviewsResult, ActiveResult, OverlapResult, and all nested structs. CLI re-exports from core. TUI imports from core.\n\n### Phase B: Promote query function visibility (who)\nChange pub(super) to pub on the 5 query functions:\n- src/cli/commands/who/expert.rs: query_expert(conn, path, project_id, since_ms, as_of_ms, limit, scoring, detail, explain_score, include_bots)\n- src/cli/commands/who/workload.rs: query_workload(conn, username, project_id, since_ms, limit, include_closed)\n- src/cli/commands/who/reviews.rs: query_reviews(conn, username, project_id, since_ms)\n- src/cli/commands/who/active.rs: query_active(conn, project_id, since_ms, limit, include_closed)\n- src/cli/commands/who/overlap.rs: query_overlap(conn, path, project_id, since_ms, limit)\n\nAlso promote helper: half_life_decay in expert.rs (pub(super) -> pub).\n\n### Phase C: Other command extractions\n- src/cli/commands/list.rs: make query_issues(), query_mrs() pub\n- src/cli/commands/show.rs: make query_issue_detail(), query_mr_detail() pub\n- src/cli/commands/search.rs: make run_search_query() pub\n- src/cli/commands/file_history.rs: extract run_file_history() query logic to pub fn (currently takes Config for DB path; split into query-only fn taking Connection)\n- src/cli/commands/trace.rs: make parse_trace_path() pub\n\n### Phase D: Re-export from who module\nUpdate src/cli/commands/who/mod.rs to re-export query functions as pub (not just pub(super)):\n```rust\npub use expert::query_expert;\npub use workload::query_workload;\npub use reviews::query_reviews;\npub use active::query_active;\npub use overlap::query_overlap;\n```\n\n## Acceptance Criteria\n- [ ] WhoResult, ExpertResult, WorkloadResult, ReviewsResult, ActiveResult, OverlapResult, and all nested structs live in src/core/ (not CLI)\n- [ ] CLI who module imports types from core (no duplication)\n- [ ] query_expert, query_workload, query_reviews, query_active, query_overlap are pub and callable from TUI crate\n- [ ] query_issues(), query_mrs() are pub\n- [ ] query_issue_detail(), query_mr_detail() are pub\n- [ ] run_search_query() is pub\n- [ ] run_file_history() query logic available as pub fn taking Connection (not Config)\n- [ ] parse_trace_path() is pub\n- [ ] Existing CLI behavior unchanged (no functional changes)\n- [ ] cargo test passes (no regressions)\n- [ ] cargo check --all-targets passes\n\n## Files\n- CREATE: src/core/who_types.rs (move types from who/types.rs)\n- MODIFY: src/core/mod.rs (add pub mod who_types)\n- MODIFY: src/cli/commands/who/types.rs (re-export from core)\n- MODIFY: src/cli/commands/who/mod.rs (pub use query functions)\n- MODIFY: src/cli/commands/who/expert.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/workload.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/reviews.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/active.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/overlap.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/list.rs (make query functions pub)\n- MODIFY: src/cli/commands/show.rs (make query functions pub)\n- MODIFY: src/cli/commands/search.rs (make search query pub)\n- MODIFY: src/cli/commands/file_history.rs (extract query logic)\n- MODIFY: src/cli/commands/trace.rs (make parse_trace_path pub)\n\n## TDD Anchor\nRED: In lore-tui action.rs, write test that imports lore::core::who_types::ExpertResult and lore::cli::commands::who::query_expert — assert it compiles.\nGREEN: Move types to core, promote visibility.\nVERIFY: cargo test --all-targets && cargo check --all-targets\n\n## Edge Cases\n- ScoringConfig dependency: query_expert takes &ScoringConfig from src/core/config.rs — TUI has access via Config\n- include_closed: only affects query_workload and query_active — other modes ignore it\n- file_history.rs run_file_history takes Config for DB path resolution — split into query_file_history(conn, ...) + run_file_history(config, ...) wrapper\n- Visibility changes are additive (non-breaking) — existing callers unaffected\n\n## Dependency Context\nThis modifies the main lore crate (stable Rust). The who module was refactored on master from a single who.rs file into src/cli/commands/who/ with types.rs + 5 mode files. Types are already cleanly separated in types.rs, making the move to core mechanical.\nRequired by: Who screen (bd-u7se), Trace screen (bd-2uzm), File History screen (bd-1up1), and all other TUI action.rs query bridges.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:06:25.285403Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:31:43.615250Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1f5b","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-1f5b","title":"Extract query functions from CLI to shared pub API","description":"## Background\nThe TUI's action.rs bridges to existing CLI query functions. To avoid code duplication, query functions need to be made accessible to the TUI crate. The who module was refactored on master into src/cli/commands/who/ with types.rs, expert.rs, workload.rs, reviews.rs, active.rs, overlap.rs. Query functions are currently pub(super) — visible within the who module but not from external crates.\n\n## Approach\n\n### Phase A: Move shared types to core (who)\nMove src/cli/commands/who/types.rs content to src/core/who_types.rs (or src/core/who/types.rs). These are pure data structs with zero logic — WhoRun, WhoResolvedInput, WhoResult enum, ExpertResult, WorkloadResult, ReviewsResult, ActiveResult, OverlapResult, and all nested structs. CLI re-exports from core. TUI imports from core.\n\n### Phase B: Promote query function visibility (who)\nChange pub(super) to pub on the 5 query functions:\n- src/cli/commands/who/expert.rs: query_expert(conn, path, project_id, since_ms, as_of_ms, limit, scoring, detail, explain_score, include_bots)\n- src/cli/commands/who/workload.rs: query_workload(conn, username, project_id, since_ms, limit, include_closed)\n- src/cli/commands/who/reviews.rs: query_reviews(conn, username, project_id, since_ms)\n- src/cli/commands/who/active.rs: query_active(conn, project_id, since_ms, limit, include_closed)\n- src/cli/commands/who/overlap.rs: query_overlap(conn, path, project_id, since_ms, limit)\n\nAlso promote helper: half_life_decay in expert.rs (pub(super) -> pub).\n\n### Phase C: Other command extractions\n- src/cli/commands/list.rs: make query_issues(), query_mrs() pub\n- src/cli/commands/show.rs: make query_issue_detail(), query_mr_detail() pub\n- src/cli/commands/search.rs: make run_search_query() pub\n- src/cli/commands/file_history.rs: extract run_file_history() query logic to pub fn (currently takes Config for DB path; split into query-only fn taking Connection)\n- src/cli/commands/trace.rs: make parse_trace_path() pub\n\n### Phase D: Re-export from who module\nUpdate src/cli/commands/who/mod.rs to re-export query functions as pub (not just pub(super)):\n```rust\npub use expert::query_expert;\npub use workload::query_workload;\npub use reviews::query_reviews;\npub use active::query_active;\npub use overlap::query_overlap;\n```\n\n## Acceptance Criteria\n- [ ] WhoResult, ExpertResult, WorkloadResult, ReviewsResult, ActiveResult, OverlapResult, and all nested structs live in src/core/ (not CLI)\n- [ ] CLI who module imports types from core (no duplication)\n- [ ] query_expert, query_workload, query_reviews, query_active, query_overlap are pub and callable from TUI crate\n- [ ] query_issues(), query_mrs() are pub\n- [ ] query_issue_detail(), query_mr_detail() are pub\n- [ ] run_search_query() is pub\n- [ ] run_file_history() query logic available as pub fn taking Connection (not Config)\n- [ ] parse_trace_path() is pub\n- [ ] Existing CLI behavior unchanged (no functional changes)\n- [ ] cargo test passes (no regressions)\n- [ ] cargo check --all-targets passes\n\n## Files\n- CREATE: src/core/who_types.rs (move types from who/types.rs)\n- MODIFY: src/core/mod.rs (add pub mod who_types)\n- MODIFY: src/cli/commands/who/types.rs (re-export from core)\n- MODIFY: src/cli/commands/who/mod.rs (pub use query functions)\n- MODIFY: src/cli/commands/who/expert.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/workload.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/reviews.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/active.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/overlap.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/list.rs (make query functions pub)\n- MODIFY: src/cli/commands/show.rs (make query functions pub)\n- MODIFY: src/cli/commands/search.rs (make search query pub)\n- MODIFY: src/cli/commands/file_history.rs (extract query logic)\n- MODIFY: src/cli/commands/trace.rs (make parse_trace_path pub)\n\n## TDD Anchor\nRED: In lore-tui action.rs, write test that imports lore::core::who_types::ExpertResult and lore::cli::commands::who::query_expert — assert it compiles.\nGREEN: Move types to core, promote visibility.\nVERIFY: cargo test --all-targets && cargo check --all-targets\n\n## Edge Cases\n- ScoringConfig dependency: query_expert takes &ScoringConfig from src/core/config.rs — TUI has access via Config\n- include_closed: only affects query_workload and query_active — other modes ignore it\n- file_history.rs run_file_history takes Config for DB path resolution — split into query_file_history(conn, ...) + run_file_history(config, ...) wrapper\n- Visibility changes are additive (non-breaking) — existing callers unaffected\n\n## Dependency Context\nThis modifies the main lore crate (stable Rust). The who module was refactored on master from a single who.rs file into src/cli/commands/who/ with types.rs + 5 mode files. Types are already cleanly separated in types.rs, making the move to core mechanical.\nRequired by: Who screen (bd-u7se), Trace screen (bd-2uzm), File History screen (bd-1up1), and all other TUI action.rs query bridges.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:06:25.285403Z","created_by":"tayloreernisse","updated_at":"2026-02-19T03:20:31.218124Z","closed_at":"2026-02-19T03:20:31.218072Z","close_reason":"Phases A+B+D complete: who_types.rs in core, 5 query fns pub, query_issues/query_mrs pub. All tests pass.","compaction_level":0,"original_size":0,"labels":["TUI"]} {"id":"bd-1fn","title":"[CP1] Integration tests for discussion watermark","description":"Integration tests verifying discussion sync watermark behavior.\n\n## Tests (tests/discussion_watermark_tests.rs)\n\n- skips_discussion_fetch_when_updated_at_unchanged\n- fetches_discussions_when_updated_at_advanced\n- updates_watermark_after_successful_discussion_sync\n- does_not_update_watermark_on_discussion_sync_failure\n\n## Test Scenario\n1. Ingest issue with updated_at = T1\n2. Verify discussions_synced_for_updated_at = T1\n3. Re-run ingest with same issue (updated_at = T1)\n4. Verify NO discussion API calls made (watermark prevents)\n5. Simulate issue update (updated_at = T2)\n6. Re-run ingest\n7. Verify discussion API calls made for T2\n8. Verify watermark updated to T2\n\n## Why This Matters\nDiscussion API is expensive (1 call per issue). Watermark ensures\nwe only refetch when issue actually changed, even with cursor rewind.\n\nFiles: tests/discussion_watermark_tests.rs\nDone when: Watermark correctly prevents redundant discussion refetch","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:59:11.362495Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:02.086158Z","closed_at":"2026-01-25T17:02:02.086158Z","deleted_at":"2026-01-25T17:02:02.086154Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-1gu","title":"[CP0] gi auth-test command","description":"## Background\n\nauth-test is a quick diagnostic command to verify GitLab connectivity. Used for troubleshooting and CI pipelines. Simpler than doctor because it only checks auth, not full system health.\n\nReference: docs/prd/checkpoint-0.md section \"gi auth-test\"\n\n## Approach\n\n**src/cli/commands/auth-test.ts:**\n```typescript\nimport { Command } from 'commander';\nimport { loadConfig } from '../../core/config';\nimport { GitLabClient } from '../../gitlab/client';\nimport { TokenNotSetError } from '../../core/errors';\n\nexport const authTestCommand = new Command('auth-test')\n .description('Verify GitLab authentication')\n .action(async (options, command) => {\n const globalOpts = command.optsWithGlobals();\n \n // 1. Load config\n const config = loadConfig(globalOpts.config);\n \n // 2. Get token from environment\n const token = process.env[config.gitlab.tokenEnvVar];\n if (!token) {\n throw new TokenNotSetError(config.gitlab.tokenEnvVar);\n }\n \n // 3. Create client and test auth\n const client = new GitLabClient({\n baseUrl: config.gitlab.baseUrl,\n token,\n });\n \n // 4. Get current user\n const user = await client.getCurrentUser();\n \n // 5. Output success\n console.log(`Authenticated as @${user.username} (${user.name})`);\n console.log(`GitLab: ${config.gitlab.baseUrl}`);\n });\n```\n\n**Output format:**\n```\nAuthenticated as @johndoe (John Doe)\nGitLab: https://gitlab.example.com\n```\n\n## Acceptance Criteria\n\n- [ ] Loads config from default or --config path\n- [ ] Gets token from configured env var (default GITLAB_TOKEN)\n- [ ] Throws TokenNotSetError if env var not set\n- [ ] Calls GET /api/v4/user to verify auth\n- [ ] Prints username and display name on success\n- [ ] Exit 0 on success\n- [ ] Exit 1 on auth failure (GitLabAuthError)\n- [ ] Exit 1 if config not found (ConfigNotFoundError)\n\n## Files\n\nCREATE:\n- src/cli/commands/auth-test.ts\n\n## TDD Loop\n\nN/A - simple command, verify manually and with integration test in init.test.ts\n\n```bash\n# Manual verification\nexport GITLAB_TOKEN=\"valid-token\"\ngi auth-test\n\n# With invalid token\nexport GITLAB_TOKEN=\"invalid\"\ngi auth-test # should exit 1\n```\n\n## Edge Cases\n\n- Config exists but token env var not set - clear error message\n- Token exists but wrong scopes - GitLabAuthError (401)\n- Network unreachable - GitLabNetworkError\n- Token with extra whitespace - should trim","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:51.135580Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:28:16.369542Z","closed_at":"2026-01-25T03:28:16.369481Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1gu","depends_on_id":"bd-13b","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1gu","depends_on_id":"bd-1l1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1gvg","title":"Implement status fetcher with adaptive paging and pagination guard","description":"## Background\nWith the GraphQL client in place, we need a status-specific fetcher that paginates through all issues in a project, extracts status widgets via __typename matching, and handles edge cases like complexity errors and cursor stalls.\n\n## Approach\nAll code goes in src/gitlab/graphql.rs alongside GraphqlClient. The fetcher uses the workItems(types:[ISSUE]) resolver (NOT project.issues which returns the old Issue type without status widgets). Widget matching uses __typename == \"WorkItemWidgetStatus\" for deterministic identification.\n\n## Files\n- src/gitlab/graphql.rs (add to existing file created by bd-2dlt)\n\n## Implementation\n\nConstants:\n ISSUE_STATUS_QUERY: GraphQL query string with $projectPath, $after, $first variables\n PAGE_SIZES: &[u32] = &[100, 50, 25, 10]\n\nPrivate deserialization types:\n WorkItemsResponse { project: Option }\n ProjectNode { work_items: Option } (serde rename workItems)\n WorkItemConnection { nodes: Vec, page_info: PageInfo } (serde rename pageInfo)\n WorkItemNode { iid: String, widgets: Vec }\n PageInfo { end_cursor: Option, has_next_page: bool } (serde renames)\n StatusWidget { status: Option }\n\nPublic types:\n UnsupportedReason enum: GraphqlEndpointMissing, AuthForbidden (Debug, Clone)\n FetchStatusResult struct:\n statuses: HashMap\n all_fetched_iids: HashSet\n unsupported_reason: Option\n partial_error_count: usize\n first_partial_error: Option\n\nis_complexity_or_timeout_error(msg) -> bool: lowercase contains \"complexity\" or \"timeout\"\n\nfetch_issue_statuses(client, project_path) -> Result:\n Pagination loop:\n 1. Build variables with current page_size from PAGE_SIZES[page_size_idx]\n 2. Call client.query() — match errors:\n - GitLabNotFound -> Ok(empty + GraphqlEndpointMissing) + warn\n - GitLabAuthFailed -> Ok(empty + AuthForbidden) + warn \n - Other with complexity/timeout msg -> reduce page_size_idx, continue (retry same cursor)\n - Other with smallest page size exhausted -> return Err\n - Other -> return Err\n 3. Track partial errors from GraphqlQueryResult\n 4. Parse response into WorkItemsResponse\n 5. For each node: parse iid to i64, add to all_fetched_iids, check widgets for __typename == \"WorkItemWidgetStatus\", insert status into map\n 6. Reset page_size_idx to 0 after successful page\n 7. Pagination guard: if has_next_page but new cursor == old cursor or is None, warn + break\n 8. Update cursor, continue loop\n\n## Acceptance Criteria\n- [ ] Paginates: 2-page mock returns all statuses + all IIDs\n- [ ] No status widget: IID in all_fetched_iids but not in statuses\n- [ ] Status widget with null status: IID in all_fetched_iids but not in statuses\n- [ ] 404 -> Ok(empty, unsupported_reason: GraphqlEndpointMissing)\n- [ ] 403 -> Ok(empty, unsupported_reason: AuthForbidden)\n- [ ] Success -> unsupported_reason: None\n- [ ] __typename != \"WorkItemWidgetStatus\" -> ignored, no error\n- [ ] Cursor stall (same endCursor twice) -> aborts, returns partial result\n- [ ] Complexity error at first=100 -> retries at 50, succeeds\n- [ ] Timeout error -> reduces page size\n- [ ] All page sizes fail -> returns Err\n- [ ] After successful page, next page starts at first=100 again\n- [ ] Partial-data pages -> partial_error_count incremented, first_partial_error captured\n\n## TDD Loop\nRED: test_fetch_statuses_pagination, test_fetch_statuses_no_status_widget, test_fetch_statuses_404_graceful, test_fetch_statuses_403_graceful, test_typename_matching_ignores_non_status_widgets, test_fetch_statuses_cursor_stall_aborts, test_fetch_statuses_complexity_error_reduces_page_size, test_fetch_statuses_timeout_error_reduces_page_size, test_fetch_statuses_smallest_page_still_fails, test_fetch_statuses_page_size_resets_after_success, test_fetch_statuses_unsupported_reason_none_on_success, test_fetch_statuses_partial_errors_tracked\n Adaptive tests: mock must inspect $first variable in request body to return different responses per page size\nGREEN: Implement all types + fetch_issue_statuses function\nVERIFY: cargo test fetch_statuses && cargo test typename\n\n## Edge Cases\n- GraphQL returns iid as String — parse to i64\n- widgets is Vec — match __typename field, then deserialize matching widgets\n- let-chain syntax: if is_status_widget && let Ok(sw) = serde_json::from_value::(...)\n- Pagination guard: new_cursor.is_none() || new_cursor == cursor\n- Page size resets to 0 (index into PAGE_SIZES) after each successful page\n- FetchStatusResult is NOT Clone — test fields individually","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:42:00.388137Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.418490Z","closed_at":"2026-02-11T07:21:33.418451Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1gvg","depends_on_id":"bd-2dlt","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1gvg","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -56,7 +56,7 @@ {"id":"bd-1lja","title":"Add --issue, --mr, -p, --preflight-only CLI flags and SyncOptions extensions with validation","description":"## Background\nSurgical sync is invoked via `lore sync --issue 123 --mr 456 -p myproject`. This bead adds the CLI flags to `SyncArgs` (clap struct), extends `SyncOptions` with surgical fields, and wires them together in `handle_sync_cmd` with full validation. This is the user-facing entry point for the entire surgical sync feature.\n\nThe existing `SyncArgs` struct at lines 760-805 of `src/cli/mod.rs` defines all CLI flags for `lore sync`. `SyncOptions` at lines 20-29 of `src/cli/commands/sync.rs` is the runtime options struct passed to `run_sync`. `handle_sync_cmd` at lines 2070-2096 of `src/main.rs` bridges CLI args to SyncOptions and calls `run_sync`.\n\n## Approach\n\n### Step 1: Add flags to SyncArgs (src/cli/mod.rs, struct SyncArgs at line ~760)\n\nAdd after the existing `timings` field:\n\n```rust\n/// Surgically sync specific issues by IID (repeatable, must be positive)\n#[arg(long, value_parser = clap::value_parser!(u64).range(1..), action = clap::ArgAction::Append)]\npub issue: Vec,\n\n/// Surgically sync specific merge requests by IID (repeatable, must be positive)\n#[arg(long, value_parser = clap::value_parser!(u64).range(1..), action = clap::ArgAction::Append)]\npub mr: Vec,\n\n/// Scope to a single project (required when --issue or --mr is used, falls back to config.defaultProject)\n#[arg(short = 'p', long)]\npub project: Option,\n\n/// Validate remote entities exist without any DB content writes. Runs preflight network fetch only.\n#[arg(long, default_value_t = false)]\npub preflight_only: bool,\n```\n\n**Why u64 with range(1..)**: IIDs are always positive. Parse-time validation gives immediate, clear error messages from clap.\n\n### Step 2: Extend SyncOptions (src/cli/commands/sync.rs, struct SyncOptions at line ~20)\n\nAdd fields:\n\n```rust\npub issue_iids: Vec,\npub mr_iids: Vec,\npub project: Option,\npub preflight_only: bool,\n```\n\nAdd helper:\n\n```rust\nimpl SyncOptions {\n pub const MAX_SURGICAL_TARGETS: usize = 100;\n\n pub fn is_surgical(&self) -> bool {\n !self.issue_iids.is_empty() || !self.mr_iids.is_empty()\n }\n}\n```\n\n### Step 3: Wire in handle_sync_cmd (src/main.rs, function handle_sync_cmd at line ~2070)\n\nAfter existing SyncOptions construction (~line 2088):\n\n1. **Dedup IIDs** before constructing options:\n```rust\nlet mut issue_iids = args.issue;\nlet mut mr_iids = args.mr;\nissue_iids.sort_unstable();\nissue_iids.dedup();\nmr_iids.sort_unstable();\nmr_iids.dedup();\n```\n\n2. **Add new fields** to the SyncOptions construction.\n\n3. **Validation** (after options creation, before calling run_sync):\n- Hard cap: `issue_iids.len() + mr_iids.len() > MAX_SURGICAL_TARGETS` → error with count\n- Project required: if `is_surgical()`, use `config.effective_project(options.project.as_deref())`. If None → error saying `-p` or `defaultProject` is required\n- Incompatible flags: `--full` + surgical → error\n- Embed leakage guard: `--no-docs` without `--no-embed` in surgical mode → error (stale embeddings for regenerated docs)\n- `--preflight-only` requires surgical mode → error if not `is_surgical()`\n\n## Acceptance Criteria\n- [ ] `lore sync --issue 123` parses correctly (issue_iids = [123])\n- [ ] `lore sync --issue 123 --issue 456` produces deduplicated sorted vec\n- [ ] `lore sync --mr 789` parses correctly\n- [ ] `lore sync --issue 0` rejected at parse time by clap (range 1..)\n- [ ] `lore sync --issue -1` rejected at parse time by clap (u64 parse failure)\n- [ ] `lore sync -p myproject --issue 1` sets project = Some(\"myproject\")\n- [ ] `lore sync --preflight-only --issue 1 -p proj` sets preflight_only = true\n- [ ] `SyncOptions::is_surgical()` returns true when issue_iids or mr_iids is non-empty\n- [ ] `SyncOptions::is_surgical()` returns false when both vecs are empty\n- [ ] `SyncOptions::MAX_SURGICAL_TARGETS` is 100\n- [ ] Validation: `--issue 1` without `-p` and no defaultProject → error mentioning `-p`\n- [ ] Validation: `--issue 1` without `-p` but with defaultProject in config → uses defaultProject (no error)\n- [ ] Validation: `--full --issue 1 -p proj` → incompatibility error\n- [ ] Validation: `--no-docs --issue 1 -p proj` (without --no-embed) → embed leakage error\n- [ ] Validation: `--no-docs --no-embed --issue 1 -p proj` → accepted\n- [ ] Validation: `--preflight-only` without --issue/--mr → error\n- [ ] Validation: >100 combined targets → hard cap error\n- [ ] Normal `lore sync` (without --issue/--mr) still works identically\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n- MODIFY: src/cli/mod.rs (add fields to SyncArgs, ~line 805)\n- MODIFY: src/cli/commands/sync.rs (extend SyncOptions + is_surgical + MAX_SURGICAL_TARGETS)\n- MODIFY: src/main.rs (wire fields + validation in handle_sync_cmd)\n\n## TDD Anchor\nRED: Write tests in `src/cli/commands/sync.rs` (in a `#[cfg(test)] mod tests` block):\n\n```rust\n#[cfg(test)]\nmod tests {\n use super::*;\n\n fn default_options() -> SyncOptions {\n SyncOptions {\n full: false,\n no_status: false,\n no_docs: false,\n no_embed: false,\n timings: false,\n issue_iids: vec![],\n mr_iids: vec![],\n project: None,\n preflight_only: false,\n }\n }\n\n #[test]\n fn is_surgical_with_issues() {\n let opts = SyncOptions { issue_iids: vec![1], ..default_options() };\n assert!(opts.is_surgical());\n }\n\n #[test]\n fn is_surgical_with_mrs() {\n let opts = SyncOptions { mr_iids: vec![10], ..default_options() };\n assert!(opts.is_surgical());\n }\n\n #[test]\n fn is_surgical_empty() {\n let opts = default_options();\n assert!(!opts.is_surgical());\n }\n\n #[test]\n fn max_surgical_targets_is_100() {\n assert_eq!(SyncOptions::MAX_SURGICAL_TARGETS, 100);\n }\n}\n```\n\nGREEN: Add the fields and `is_surgical()` method.\nVERIFY: `cargo test is_surgical && cargo test max_surgical_targets`\n\nAdditional validation tests (in integration or as unit tests on a `validate_surgical_options` helper if extracted):\n- `preflight_only_requires_surgical` — SyncOptions with preflight_only=true, empty iids → error\n- `surgical_no_docs_requires_no_embed` — SyncOptions with no_docs=true, no_embed=false, is_surgical=true → error\n- `surgical_incompatible_with_full` — SyncOptions with full=true, is_surgical=true → error\n\n## Edge Cases\n- Clap `ArgAction::Append` allows `--issue 1 --issue 2` but NOT `--issue 1,2` (no value_delimiter). This is intentional — comma-separated values are ambiguous and error-prone.\n- Duplicate IIDs like `--issue 123 --issue 123` are handled by dedup in handle_sync_cmd, not rejected.\n- The `effective_project` method on Config (line 309 of config.rs) already handles the `-p` / defaultProject fallback: `cli_project.or(self.default_project.as_deref())`.\n- The `-p` short flag does not conflict with any existing SyncArgs flags.\n\n## Dependency Context\nThis is a leaf dependency with no upstream blockers. Can be done in parallel with bd-1sc6, bd-159p, bd-tiux. Downstream bead bd-1i4i (orchestrator) reads these fields to dispatch surgical vs standard sync.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:12:43.921399Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:02:47.520632Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-1lja","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-1m8","title":"Extend 'lore stats --check' for event table integrity and queue health","description":"## Background\nThe existing stats --check command validates data integrity. Need to extend it for event tables (referential integrity) and dependent job queue health (stuck locks, retryable jobs). This provides operators and agents a way to detect data quality issues after sync.\n\n## Approach\nExtend src/cli/commands/stats.rs check mode:\n\n**New checks:**\n\n1. Event FK integrity:\n```sql\n-- Orphaned state events (issue_id points to non-existent issue)\nSELECT COUNT(*) FROM resource_state_events rse\nWHERE rse.issue_id IS NOT NULL\n AND NOT EXISTS (SELECT 1 FROM issues i WHERE i.id = rse.issue_id);\n-- (repeat for merge_request_id, and for label + milestone event tables)\n```\n\n2. Queue health:\n```sql\n-- Pending jobs by type\nSELECT job_type, COUNT(*) FROM pending_dependent_fetches GROUP BY job_type;\n-- Stuck locks (locked_at older than 5 minutes)\nSELECT COUNT(*) FROM pending_dependent_fetches WHERE locked_at IS NOT NULL AND locked_at < ?;\n-- Retryable jobs (attempts > 0, not locked)\nSELECT COUNT(*) FROM pending_dependent_fetches WHERE attempts > 0 AND locked_at IS NULL;\n-- Max attempts (jobs that may be permanently failing)\nSELECT job_type, MAX(attempts) FROM pending_dependent_fetches GROUP BY job_type;\n```\n\n3. Human output per check: PASS / WARN / FAIL with counts\n```\nEvent FK integrity: PASS (0 orphaned events)\nQueue health: WARN (3 stuck locks, 12 retryable jobs)\n```\n\n4. Robot JSON: structured health report\n```json\n{\n \"event_integrity\": {\n \"status\": \"pass\",\n \"orphaned_state_events\": 0,\n \"orphaned_label_events\": 0,\n \"orphaned_milestone_events\": 0\n },\n \"queue_health\": {\n \"status\": \"warn\",\n \"pending_by_type\": {\"resource_events\": 5, \"mr_closes_issues\": 2},\n \"stuck_locks\": 3,\n \"retryable_jobs\": 12,\n \"max_attempts_by_type\": {\"resource_events\": 5}\n }\n}\n```\n\n## Acceptance Criteria\n- [ ] Detects orphaned events (FK target missing)\n- [ ] Detects stuck locks (locked_at older than threshold)\n- [ ] Reports retryable job count and max attempts\n- [ ] Human output shows PASS/WARN/FAIL per check\n- [ ] Robot JSON matches structured schema\n- [ ] Graceful when event/queue tables don't exist\n\n## Files\n- src/cli/commands/stats.rs (extend check mode)\n\n## TDD Loop\nRED: tests/stats_check_tests.rs:\n- `test_stats_check_events_pass` - clean data, verify PASS\n- `test_stats_check_events_orphaned` - delete an issue with events remaining, verify FAIL count\n- `test_stats_check_queue_stuck_locks` - set old locked_at, verify WARN\n- `test_stats_check_queue_retryable` - fail some jobs, verify retryable count\n\nGREEN: Add the check queries and formatting\n\nVERIFY: `cargo test stats_check -- --nocapture`\n\n## Edge Cases\n- FK with CASCADE should prevent orphaned events in normal operation — but manual DB edits or bugs could cause them\n- Tables may not exist if migration 011 not applied — check table existence before querying\n- Empty queue is PASS (not WARN for \"no jobs found\")\n- Distinguish between \"0 stuck locks\" (good) and \"queue table doesn't exist\" (skip check)","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-02T21:31:57.422916Z","created_by":"tayloreernisse","updated_at":"2026-02-03T16:23:13.409909Z","closed_at":"2026-02-03T16:23:13.409717Z","close_reason":"Extended IntegrityResult with orphan_state/label/milestone_events and queue_stuck_locks/queue_max_attempts. Added FK integrity queries for all 3 event tables and queue health checks. Updated human output with PASS/WARN/FAIL indicators and robot JSON.","compaction_level":0,"original_size":0,"labels":["cli","gate-1","phase-b"],"dependencies":[{"issue_id":"bd-1m8","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1m8","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1m8","depends_on_id":"bd-tir","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1mf","title":"[CP1] gi sync-status enhancement","description":"Enhance sync-status from CP0 stub to show issue cursors.\n\nOutput:\n- Last run timestamp and duration\n- Cursor positions per project (issues resource_type)\n- Entity counts (issues, discussions, notes)\n\nFiles: src/cli/commands/sync-status.ts (update existing)\nDone when: Shows cursor positions and counts after ingestion","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T15:20:36.449088Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.157235Z","closed_at":"2026-01-25T15:21:35.157235Z","deleted_at":"2026-01-25T15:21:35.157232Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} -{"id":"bd-1mju","title":"Vertical slice integration test + SLO verification","description":"## Background\nThe vertical slice gate validates that core screens work together end-to-end with real data flows and meet performance SLOs. This is a manual + automated verification pass.\n\n## Approach\nCreate integration tests in crates/lore-tui/tests/:\n- test_full_nav_flow: Dashboard -> press i -> IssueList loads -> press Enter -> IssueDetail loads -> press Esc -> back to IssueList with cursor preserved -> press H -> Dashboard\n- test_filter_requery: IssueList -> type filter -> verify re-query triggers and results update\n- test_stale_result_guard: rapidly navigate between screens, verify no stale data displayed\n- Performance benchmarks: run M-tier fixture, measure p95 nav latency, assert < 75ms\n- Stuck-input check: fuzz InputMode transitions, assert always recoverable via Esc or Ctrl+C\n- Cancel latency: start sync, cancel, measure time to acknowledgment, assert < 2s\n\n## Acceptance Criteria\n- [ ] Full nav flow test passes without panic\n- [ ] Filter re-query test shows updated results\n- [ ] No stale data displayed during rapid navigation\n- [ ] p95 nav latency < 75ms on M-tier fixtures\n- [ ] Zero stuck-input states across 1000 random key sequences\n- [ ] Sync cancel acknowledged p95 < 2s\n- [ ] All state preserved correctly on back-navigation\n\n## Files\n- CREATE: crates/lore-tui/tests/vertical_slice.rs\n\n## TDD Anchor\nRED: Write test_dashboard_to_issue_detail_roundtrip that navigates Dashboard -> IssueList -> IssueDetail -> Esc -> IssueList, asserts cursor position preserved.\nGREEN: Ensure all navigation and state preservation is wired up.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml vertical_slice\n\n## Edge Cases\n- Tests need FakeClock and synthetic DB fixtures (not real GitLab)\n- ftui test harness required for rendering tests without TTY\n- Performance benchmarks may vary by machine — use relative thresholds\n\n## Dependency Context\nRequires all Phase 2 screens: Dashboard, Issue List, Issue Detail, MR List, MR Detail.\nRequires NavigationStack, TaskSupervisor, DbManager from Phase 1.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:00:18.310264Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:33.796953Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1mju","depends_on_id":"bd-3pxe","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1mju","depends_on_id":"bd-3t1b","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1mju","depends_on_id":"bd-3ty8","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1mju","depends_on_id":"bd-8ab7","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-1mju","title":"Vertical slice integration test + SLO verification","description":"## Background\nThe vertical slice gate validates that core screens work together end-to-end with real data flows and meet performance SLOs. This is a manual + automated verification pass.\n\n## Approach\nCreate integration tests in crates/lore-tui/tests/:\n- test_full_nav_flow: Dashboard -> press i -> IssueList loads -> press Enter -> IssueDetail loads -> press Esc -> back to IssueList with cursor preserved -> press H -> Dashboard\n- test_filter_requery: IssueList -> type filter -> verify re-query triggers and results update\n- test_stale_result_guard: rapidly navigate between screens, verify no stale data displayed\n- Performance benchmarks: run M-tier fixture, measure p95 nav latency, assert < 75ms\n- Stuck-input check: fuzz InputMode transitions, assert always recoverable via Esc or Ctrl+C\n- Cancel latency: start sync, cancel, measure time to acknowledgment, assert < 2s\n\n## Acceptance Criteria\n- [ ] Full nav flow test passes without panic\n- [ ] Filter re-query test shows updated results\n- [ ] No stale data displayed during rapid navigation\n- [ ] p95 nav latency < 75ms on M-tier fixtures\n- [ ] Zero stuck-input states across 1000 random key sequences\n- [ ] Sync cancel acknowledged p95 < 2s\n- [ ] All state preserved correctly on back-navigation\n\n## Files\n- CREATE: crates/lore-tui/tests/vertical_slice.rs\n\n## TDD Anchor\nRED: Write test_dashboard_to_issue_detail_roundtrip that navigates Dashboard -> IssueList -> IssueDetail -> Esc -> IssueList, asserts cursor position preserved.\nGREEN: Ensure all navigation and state preservation is wired up.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml vertical_slice\n\n## Edge Cases\n- Tests need FakeClock and synthetic DB fixtures (not real GitLab)\n- ftui test harness required for rendering tests without TTY\n- Performance benchmarks may vary by machine — use relative thresholds\n\n## Dependency Context\nRequires all Phase 2 screens: Dashboard, Issue List, Issue Detail, MR List, MR Detail.\nRequires NavigationStack, TaskSupervisor, DbManager from Phase 1.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:00:18.310264Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:06:21.021705Z","closed_at":"2026-02-18T21:06:21.021656Z","close_reason":"Vertical slice integration test complete: 11 tests covering nav flows, stale guards, input fuzz, bootstrap transition, render all screens, performance smoke","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1mju","depends_on_id":"bd-3t1b","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1mju","depends_on_id":"bd-3ty8","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1mju","depends_on_id":"bd-8ab7","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1n5","title":"[CP1] gi ingest --type=issues command","description":"CLI command to orchestrate issue ingestion.\n\nImplementation:\n1. Acquire app lock with heartbeat\n2. Create sync_run record (status='running')\n3. For each configured project:\n - Call ingestIssues()\n - For each ingested issue, call ingestIssueDiscussions()\n - Show progress (spinner or progress bar)\n4. Update sync_run (status='succeeded', metrics_json)\n5. Release lock\n\nFlags:\n- --type=issues (required)\n- --project=PATH (optional, filter to single project)\n- --force (override stale lock)\n\nOutput: Progress bar, then summary with counts\n\nFiles: src/cli/commands/ingest.ts\nTests: tests/integration/sync-runs.test.ts\nDone when: Full issue + discussion ingestion works end-to-end","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:20:05.114751Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.153598Z","closed_at":"2026-01-25T15:21:35.153598Z","deleted_at":"2026-01-25T15:21:35.153595Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-1n5q","title":"lore brief: situational awareness for topic/module/person","description":"## Background\nComposable capstone command. An agent says \"I am about to work on auth\" and gets everything in one call: open issues, active MRs, experts, recent activity, unresolved threads, related context. Replaces 5 separate lore calls with 1.\n\n## Input Modes\n1. Topic: `lore brief 'authentication'` — FTS search to find relevant entities, aggregate\n2. Path: `lore brief --path src/auth/` — who expert internals for path expertise\n3. Person: `lore brief --person teernisse` — who workload internals\n4. Entity: `lore brief issues 3864` — single entity focus with cross-references\n\n## Section Assembly Architecture\n\n### Reuse existing run_* functions (ship faster, recommended for v1)\nEach section calls existing CLI command functions and converts their output.\n\nIMPORTANT: All existing run_* functions take `&Config`, NOT `&Connection`. The Config contains the db_path and each function opens its own connection internally.\n\n```rust\n// In src/cli/commands/brief.rs\n\nuse crate::cli::commands::list::{run_list_issues, run_list_mrs, ListFilters, MrListFilters};\nuse crate::cli::commands::who::{run_who, WhoArgs, WhoMode};\nuse crate::core::config::Config;\n\npub async fn run_brief(config: &Config, args: BriefArgs) -> Result {\n let mut sections_computed = Vec::new();\n\n // 1. open_issues: reuse list.rs\n // Signature: pub fn run_list_issues(config: &Config, filters: ListFilters) -> Result\n // Located at src/cli/commands/list.rs:268\n let open_issues = run_list_issues(config, ListFilters {\n state: Some(\"opened\".into()),\n limit: Some(5),\n project: args.project.clone(),\n // ... scope by topic/path/person based on mode\n ..Default::default()\n })?;\n sections_computed.push(\"open_issues\");\n\n // 2. active_mrs: reuse list.rs\n // Signature: pub fn run_list_mrs(config: &Config, filters: MrListFilters) -> Result\n // Located at src/cli/commands/list.rs:476\n let active_mrs = run_list_mrs(config, MrListFilters {\n state: Some(\"opened\".into()),\n limit: Some(5),\n project: args.project.clone(),\n ..Default::default()\n })?;\n sections_computed.push(\"active_mrs\");\n\n // 3. experts: reuse who.rs\n // Signature: pub fn run_who(config: &Config, args: &WhoArgs) -> Result\n // Located at src/cli/commands/who.rs:276\n let experts = run_who(config, &WhoArgs {\n mode: WhoMode::Expert,\n path: args.path.clone(),\n limit: Some(3),\n ..Default::default()\n })?;\n sections_computed.push(\"experts\");\n\n // 4. recent_activity: reuse timeline internals\n // The timeline pipeline is 5-stage (SEED->HYDRATE->EXPAND->COLLECT->RENDER)\n // Types in src/core/timeline.rs, seed in src/core/timeline_seed.rs\n // ...etc\n}\n```\n\nNOTE: ListFilters and MrListFilters may not implement Default. Check before using `..Default::default()`. If they don't, derive it or construct all fields explicitly.\n\n### Concrete Function References (src/cli/commands/)\n| Module | Function | Signature | Line |\n|--------|----------|-----------|------|\n| list.rs | run_list_issues | `(config: &Config, filters: ListFilters) -> Result` | 268 |\n| list.rs | run_list_mrs | `(config: &Config, filters: MrListFilters) -> Result` | 476 |\n| who.rs | run_who | `(config: &Config, args: &WhoArgs) -> Result` | 276 |\n| search.rs | run_search | `(config: &Config, query: &str, cli_filters: SearchCliFilters, fts_mode: FtsQueryMode, requested_mode: &str, explain: bool) -> Result` | 61 |\n\nNOTE: run_search is currently synchronous (pub fn, not pub async fn). If bd-1ksf ships first, it becomes async. Brief should handle both cases — call `.await` if async, direct call if sync.\n\n### Section Details\n| Section | Source | Limit | Fallback |\n|---------|--------|-------|----------|\n| open_issues | list.rs with state=opened | 5 | empty array |\n| active_mrs | list.rs with state=opened | 5 | empty array |\n| experts | who.rs Expert mode | 3 | empty array (no path data) |\n| recent_activity | timeline pipeline | 10 events | empty array |\n| unresolved_threads | SQL: discussions WHERE resolved=false | 5 | empty array |\n| related | search_vector() via bd-8con | 5 | omit section (no embeddings) |\n| warnings | computed from dates/state | all | empty array |\n\n### Warning Generation\n```rust\nfn compute_warnings(issues: &[IssueRow]) -> Vec {\n let now = chrono::Utc::now();\n issues.iter().filter_map(|i| {\n let updated = parse_timestamp(i.updated_at)?;\n let days_stale = (now - updated).num_days();\n if days_stale > 30 {\n Some(format!(\"Issue #{} has no activity for {} days\", i.iid, days_stale))\n } else { None }\n }).chain(\n issues.iter().filter(|i| i.assignees.is_empty())\n .map(|i| format!(\"Issue #{} is unassigned\", i.iid))\n ).collect()\n}\n```\n\n## Robot Mode Output Schema\n```json\n{\n \"ok\": true,\n \"data\": {\n \"mode\": \"topic\",\n \"query\": \"authentication\",\n \"summary\": \"3 open issues, 2 active MRs, top expert: teernisse\",\n \"open_issues\": [{ \"iid\": 123, \"title\": \"...\", \"state\": \"opened\", \"assignees\": [...], \"updated_at\": \"...\", \"labels\": [...] }],\n \"active_mrs\": [{ \"iid\": 456, \"title\": \"...\", \"state\": \"opened\", \"author\": \"...\", \"draft\": false, \"updated_at\": \"...\" }],\n \"experts\": [{ \"username\": \"teernisse\", \"score\": 42, \"last_activity\": \"...\" }],\n \"recent_activity\": [{ \"timestamp\": \"...\", \"event_type\": \"state_change\", \"entity_ref\": \"issues#123\", \"summary\": \"...\", \"actor\": \"...\" }],\n \"unresolved_threads\": [{ \"discussion_id\": \"abc\", \"entity_ref\": \"issues#123\", \"started_by\": \"...\", \"note_count\": 5, \"last_note_at\": \"...\" }],\n \"related\": [{ \"iid\": 789, \"title\": \"...\", \"similarity_score\": 0.85 }],\n \"warnings\": [\"Issue #3800 has no activity for 45 days\"]\n },\n \"meta\": { \"elapsed_ms\": 1200, \"sections_computed\": [\"open_issues\", \"active_mrs\", \"experts\", \"recent_activity\"] }\n}\n```\n\n## Clap Registration\n```rust\n// In src/main.rs Commands enum, add:\nBrief {\n /// Free-text topic, entity type, or omit for project-wide brief\n query: Option,\n /// Focus on a file path (who expert mode)\n #[arg(long)]\n path: Option,\n /// Focus on a person (who workload mode)\n #[arg(long)]\n person: Option,\n /// Scope to project (fuzzy match)\n #[arg(short, long)]\n project: Option,\n /// Maximum items per section\n #[arg(long, default_value = \"5\")]\n section_limit: usize,\n},\n```\n\n## TDD Loop\nRED: Tests in src/cli/commands/brief.rs:\n- test_brief_topic_returns_all_sections: insert test data, search 'auth', assert all section keys present in response\n- test_brief_path_uses_who_expert: brief --path src/auth/, assert experts section populated\n- test_brief_person_uses_who_workload: brief --person user, assert open_issues filtered to user's assignments\n- test_brief_warnings_stale_issue: insert issue with updated_at > 30 days ago, assert warning generated\n- test_brief_token_budget: robot mode output for topic query is under 12000 bytes (~3000 tokens)\n- test_brief_no_embeddings_graceful: related section omitted (not errored) when no embeddings exist\n- test_brief_empty_topic: zero matches returns valid JSON with empty arrays + \"No data found\" summary\n\nGREEN: Implement brief with section assembly, calling existing run_* functions\n\nVERIFY:\n```bash\ncargo test brief:: && cargo clippy --all-targets -- -D warnings\ncargo run --release -- -J brief 'throw time' | jq '.data | keys'\ncargo run --release -- -J brief 'throw time' | wc -c # target <12000\n```\n\n## Acceptance Criteria\n- [ ] lore brief TOPIC returns all sections for free-text topic\n- [ ] lore brief --path PATH returns path-focused briefing with experts\n- [ ] lore brief --person USERNAME returns person-focused briefing\n- [ ] lore brief issues N returns entity-focused briefing\n- [ ] Robot mode output under 12000 bytes (~3000 tokens)\n- [ ] Each section degrades gracefully if its data source is unavailable\n- [ ] summary field is auto-generated one-liner from section counts\n- [ ] warnings detect: stale issues (>30d), unassigned, no due date\n- [ ] Performance: <2s total (acceptable since composing multiple queries)\n- [ ] Command registered in main.rs and robot-docs\n\n## Edge Cases\n- Topic with zero matches: return empty sections + \"No data found for this topic\" summary\n- Path that nobody has touched: experts empty, related may still have results\n- Person not found in DB: exit code 17 with suggestion\n- All sections empty: still return valid JSON with empty arrays\n- Very broad topic (\"the\"): may return too many results — each section respects its limit cap\n- ListFilters/MrListFilters may not derive Default — construct all fields explicitly if needed\n\n## Dependencies\n- Hybrid search (bd-1ksf) for topic relevance ranking\n- lore who (already shipped) for expertise\n- lore related (bd-8con) for semantic connections (BLOCKER — related section is core to the feature)\n- Timeline pipeline (already shipped) for recent activity\n\n## Dependency Context\n- **bd-1ksf (hybrid search)**: Provides `search_hybrid()` which brief uses for topic mode to find relevant entities. Without it, topic mode falls back to FTS-only via `search_fts()`.\n- **bd-8con (related)**: Provides `run_related()` which brief calls to populate the `related` section with semantically similar entities. This is a blocking dependency — the related section is a core differentiator.\n\n## Files to Create/Modify\n- NEW: src/cli/commands/brief.rs\n- src/cli/commands/mod.rs (add pub mod brief; re-export)\n- src/main.rs (register Brief subcommand in Commands enum, add handle_brief fn)\n- Reuse: list.rs, who.rs, timeline.rs, search.rs, show.rs internals","status":"open","priority":2,"issue_type":"feature","created_at":"2026-02-12T15:47:22.893231Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:31:33.752020Z","compaction_level":0,"original_size":0,"labels":["cli-imp","intelligence"],"dependencies":[{"issue_id":"bd-1n5q","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1n5q","depends_on_id":"bd-1ksf","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1n5q","depends_on_id":"bd-8con","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1nf","title":"Register 'lore timeline' command with all flags","description":"## Background\n\nThis bead wires the `lore timeline` command into the CLI — adding the subcommand to the Commands enum, defining all flags, registering in VALID_COMMANDS, and dispatching to the timeline handler. The actual query logic and rendering are in separate beads.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 3.1 (Command Design).\n\n## Codebase Context\n\n- Commands enum in `src/cli/mod.rs` (line ~86): uses #[derive(Subcommand)] with nested Args structs\n- VALID_COMMANDS in `src/main.rs` (line ~448): &[&str] array for fuzzy command matching\n- Handler dispatch in `src/main.rs` match on Commands:: variants\n- robot-docs manifest in `src/main.rs`: registers commands for `lore robot-docs` output\n- Existing pattern: `Sync(SyncArgs)`, `Search(SearchArgs)`, etc.\n- No timeline module exists yet — this bead creates the CLI entry point only\n\n## Approach\n\n### 1. TimelineArgs struct (`src/cli/mod.rs`):\n\n```rust\n/// Show a chronological timeline of events matching a query\n#[derive(Parser, Debug)]\npub struct TimelineArgs {\n /// Search query (keywords to find in issues, MRs, and discussions)\n pub query: String,\n\n /// Scope to a specific project (fuzzy match)\n #[arg(short = 'p', long)]\n pub project: Option,\n\n /// Only show events after this date (e.g. \"6m\", \"2w\", \"2024-01-01\")\n #[arg(long)]\n pub since: Option,\n\n /// Cross-reference expansion depth (0 = no expansion)\n #[arg(long, default_value = \"1\")]\n pub depth: usize,\n\n /// Also follow 'mentioned' edges during expansion (high fan-out)\n #[arg(long = \"expand-mentions\")]\n pub expand_mentions: bool,\n\n /// Maximum number of events to display\n #[arg(short = 'n', long = \"limit\", default_value = \"100\")]\n pub limit: usize,\n}\n```\n\n### 2. Commands enum variant:\n\n```rust\n/// Show a chronological timeline of events matching a query\n#[command(name = \"timeline\")]\nTimeline(TimelineArgs),\n```\n\n### 3. Handler in `src/main.rs`:\n\n```rust\nCommands::Timeline(args) => {\n // Placeholder: will be filled by bd-2f2 (human) and bd-dty (robot)\n // For now: resolve project, call timeline query, dispatch to renderer\n}\n```\n\n### 4. VALID_COMMANDS: add `\"timeline\"` to the array\n\n### 5. robot-docs: add timeline command description to manifest\n\n## Acceptance Criteria\n\n- [ ] `TimelineArgs` struct with all 6 flags: query, project, since, depth, expand-mentions, limit\n- [ ] Commands::Timeline variant registered in Commands enum\n- [ ] Handler stub in src/main.rs dispatches to timeline logic\n- [ ] `\"timeline\"` added to VALID_COMMANDS array\n- [ ] robot-docs manifest includes timeline command description\n- [ ] `lore timeline --help` shows correct help text\n- [ ] `lore timeline` without query shows error (query is required positional)\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/cli/mod.rs` (TimelineArgs struct + Commands::Timeline variant)\n- `src/main.rs` (handler dispatch + VALID_COMMANDS + robot-docs entry)\n\n## TDD Loop\n\nNo unit tests for CLI wiring. Verify with:\n\n```bash\ncargo check --all-targets\ncargo run -- timeline --help\n```\n\n## Edge Cases\n\n- --since parsing: reuse existing date parsing from ListFilters (src/cli/mod.rs handles \"7d\", \"2w\", \"YYYY-MM-DD\")\n- --depth 0: valid, means no cross-reference expansion\n- --expand-mentions: off by default because mentioned edges have high fan-out\n","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:33:28.422082Z","created_by":"tayloreernisse","updated_at":"2026-02-06T13:49:15.313047Z","closed_at":"2026-02-06T13:49:15.312993Z","close_reason":"Wired lore timeline command: TimelineArgs with 9 flags, Commands::Timeline variant, handle_timeline handler, VALID_COMMANDS entry, robot-docs manifest with temporal_intelligence workflow","compaction_level":0,"original_size":0,"labels":["cli","gate-3","phase-b"],"dependencies":[{"issue_id":"bd-1nf","depends_on_id":"bd-2f2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1nf","depends_on_id":"bd-dty","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1nf","depends_on_id":"bd-ike","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -83,7 +83,7 @@ {"id":"bd-1ta","title":"[CP1] Integration tests for pagination","description":"Integration tests for GitLab pagination with wiremock.\n\n## Tests (tests/pagination_tests.rs)\n\n### Page Navigation\n- fetches_all_pages_when_multiple_exist\n- respects_per_page_parameter\n- follows_x_next_page_header_until_empty\n- falls_back_to_empty_page_stop_if_headers_missing\n\n### Cursor Behavior\n- applies_cursor_rewind_for_tuple_semantics\n- clamps_negative_rewind_to_zero\n\n## Test Setup\n- Use wiremock::MockServer\n- Set up handlers for /api/v4/projects/:id/issues\n- Return x-next-page headers\n- Verify request params (updated_after, per_page)\n\nFiles: tests/pagination_tests.rs\nDone when: All pagination tests pass with mocked server","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:59:07.806593Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:02.038945Z","closed_at":"2026-01-25T17:02:02.038945Z","deleted_at":"2026-01-25T17:02:02.038939Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-1u1","title":"Implement document regenerator","description":"## Background\nThe document regenerator drains the dirty_sources queue, regenerating documents for each entry. It uses per-item transactions for crash safety, a triple-hash fast path to skip unchanged documents entirely (no writes at all), and a bounded batch loop that drains completely. Error recording includes backoff computation.\n\n## Approach\nCreate `src/documents/regenerator.rs` per PRD Section 6.3.\n\n**Core function:**\n```rust\npub fn regenerate_dirty_documents(conn: &Connection) -> Result\n```\n\n**RegenerateResult:** { regenerated, unchanged, errored }\n\n**Algorithm (per PRD):**\n1. Loop: get_dirty_sources(conn) -> Vec<(SourceType, i64)>\n2. If empty, break (queue fully drained)\n3. For each (source_type, source_id):\n a. Begin transaction\n b. Call regenerate_one_tx(&tx, source_type, source_id) -> Result\n c. If Ok(changed): clear_dirty_tx, commit, count regenerated or unchanged\n d. If Err: record_dirty_error_tx (with backoff), commit, count errored\n\n**regenerate_one_tx (per PRD):**\n1. Extract document via extract_{type}_document(conn, source_id)\n2. If None (deleted): delete_document, return Ok(true)\n3. If Some(doc): call get_existing_hash() to check current state\n4. **If ALL THREE hashes match: return Ok(false) — skip ALL writes** (fast path)\n5. Otherwise: upsert_document with conditional label/path relinking\n6. Return Ok(content changed)\n\n**Helper functions (PRD-exact):**\n\n`get_existing_hash` — uses `optional()` to distinguish missing rows from DB errors:\n```rust\nfn get_existing_hash(\n conn: &Connection,\n source_type: SourceType,\n source_id: i64,\n) -> Result> {\n use rusqlite::OptionalExtension;\n let hash: Option = stmt\n .query_row(params, |row| row.get(0))\n .optional()?; // IMPORTANT: Not .ok() — .ok() would hide real DB errors\n Ok(hash)\n}\n```\n\n`get_document_id` — resolve document ID after upsert:\n```rust\nfn get_document_id(conn: &Connection, source_type: SourceType, source_id: i64) -> Result\n```\n\n`upsert_document` — checks existing triple hash before writing:\n```rust\nfn upsert_document(conn: &Connection, doc: &DocumentData) -> Result<()> {\n // 1. Query existing (id, content_hash, labels_hash, paths_hash) via OptionalExtension\n // 2. Triple-hash fast path: all match -> return Ok(())\n // 3. Upsert document row (ON CONFLICT DO UPDATE)\n // 4. Get doc_id (from existing or query after insert)\n // 5. Only delete+reinsert labels if labels_hash changed\n // 6. Only delete+reinsert paths if paths_hash changed\n}\n```\n\n**Key PRD detail — triple-hash fast path:**\n```rust\nif old_content_hash == &doc.content_hash\n && old_labels_hash == &doc.labels_hash\n && old_paths_hash == &doc.paths_hash\n{ return Ok(()); } // Skip ALL writes — prevents WAL churn\n```\n\n**Error recording with backoff:**\nrecord_dirty_error_tx reads current attempt_count from DB, computes next_attempt_at via shared backoff utility:\n```rust\nlet next_attempt_at = crate::core::backoff::compute_next_attempt_at(now, attempt_count + 1);\n```\n\n**All internal functions use _tx suffix** (take &Transaction) for atomicity.\n\n## Acceptance Criteria\n- [ ] Queue fully drained (bounded batch loop until empty)\n- [ ] Per-item transactions (crash loses at most 1 doc)\n- [ ] Triple-hash fast path: ALL THREE hashes match -> skip ALL writes (return Ok(false))\n- [ ] Content change: upsert document, update labels/paths\n- [ ] Labels-only change: relabels but skips path writes (paths_hash unchanged)\n- [ ] Deleted entity: delete document (cascade handles FTS/labels/paths/embeddings)\n- [ ] get_existing_hash uses `.optional()` (not `.ok()`) to preserve DB errors\n- [ ] get_document_id resolves document ID after upsert\n- [ ] Error recording: increment attempt_count, compute next_attempt_at via backoff\n- [ ] FTS triggers fire on insert/update/delete (verified by trigger, not regenerator)\n- [ ] RegenerateResult counts accurate (regenerated, unchanged, errored)\n- [ ] Errors do not abort batch (log, increment, continue)\n- [ ] `cargo test regenerator` passes\n\n## Files\n- `src/documents/regenerator.rs` — new file\n- `src/documents/mod.rs` — add `pub use regenerator::regenerate_dirty_documents;`\n\n## TDD Loop\nRED: Tests requiring DB:\n- `test_creates_new_document` — dirty source -> document created\n- `test_skips_unchanged_triple_hash` — all 3 hashes match -> unchanged count incremented, no DB writes\n- `test_updates_changed_content` — content_hash mismatch -> updated\n- `test_updates_changed_labels_only` — content same but labels_hash different -> updated\n- `test_updates_changed_paths_only` — content same but paths_hash different -> updated\n- `test_deletes_missing_source` — source deleted -> document deleted\n- `test_drains_queue` — queue empty after regeneration\n- `test_error_records_backoff` — error -> attempt_count incremented, next_attempt_at set\n- `test_get_existing_hash_not_found` — returns Ok(None) for missing document\nGREEN: Implement regenerate_dirty_documents + all helpers\nVERIFY: `cargo test regenerator`\n\n## Edge Cases\n- Empty queue: return immediately with all-zero counts\n- Extractor error for one item: record_dirty_error_tx, commit, continue\n- Triple-hash prevents WAL churn on incremental syncs (most entities unchanged)\n- Labels change but content does not: labels_hash mismatch triggers upsert with label relinking\n- get_existing_hash on missing document: returns Ok(None) via .optional() (not DB error)\n- get_existing_hash on corrupt DB: propagates real DB error (not masked by .ok())","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:25:55.178825Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:41:29.942386Z","closed_at":"2026-01-30T17:41:29.942324Z","close_reason":"Implemented document regenerator with triple-hash fast path, queue draining, fail-soft error handling + 5 tests","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1u1","depends_on_id":"bd-1yz","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1u1","depends_on_id":"bd-247","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1u1","depends_on_id":"bd-2fp","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1uc","title":"Implement DB upsert functions for resource events","description":"## Background\nNeed to store fetched resource events into the three event tables created by migration 011. The existing DB pattern uses rusqlite prepared statements with named parameters. Timestamps from GitLab are ISO 8601 strings that need conversion to ms epoch UTC (matching the existing time.rs parse_datetime_to_ms function).\n\n## Approach\nCreate src/core/events_db.rs (new module) with three upsert functions:\n\n```rust\nuse rusqlite::Connection;\nuse super::error::Result;\n\n/// Upsert state events for an entity.\n/// Uses INSERT OR REPLACE keyed on UNIQUE(gitlab_id, project_id).\npub fn upsert_state_events(\n conn: &Connection,\n project_id: i64, // local DB project id\n entity_type: &str, // \"issue\" | \"merge_request\"\n entity_local_id: i64, // local DB id of the issue/MR\n events: &[GitLabStateEvent],\n) -> Result\n\n/// Upsert label events for an entity.\npub fn upsert_label_events(\n conn: &Connection,\n project_id: i64,\n entity_type: &str,\n entity_local_id: i64,\n events: &[GitLabLabelEvent],\n) -> Result\n\n/// Upsert milestone events for an entity.\npub fn upsert_milestone_events(\n conn: &Connection,\n project_id: i64,\n entity_type: &str,\n entity_local_id: i64,\n events: &[GitLabMilestoneEvent],\n) -> Result\n```\n\nEach function:\n1. Prepares INSERT OR REPLACE statement\n2. For each event, maps GitLab types to DB columns:\n - `actor_gitlab_id` = event.user.map(|u| u.id)\n - `actor_username` = event.user.map(|u| u.username.clone())\n - `created_at` = parse_datetime_to_ms(&event.created_at)?\n - Set issue_id or merge_request_id based on entity_type\n3. Returns count of upserted rows\n4. Wraps in a savepoint for atomicity per entity\n\nRegister module in src/core/mod.rs:\n```rust\npub mod events_db;\n```\n\n## Acceptance Criteria\n- [ ] All three upsert functions compile and handle all event fields\n- [ ] Upserts are idempotent (re-inserting same event doesn't duplicate)\n- [ ] Timestamps converted to ms epoch UTC via parse_datetime_to_ms\n- [ ] actor_gitlab_id and actor_username populated from event.user (handles None)\n- [ ] entity_type correctly maps to issue_id/merge_request_id (other is NULL)\n- [ ] source_merge_request_id populated for state events (iid from source_merge_request)\n- [ ] source_commit populated for state events\n- [ ] label_name populated for label events\n- [ ] milestone_title and milestone_id populated for milestone events\n- [ ] Returns upserted count\n\n## Files\n- src/core/events_db.rs (new)\n- src/core/mod.rs (add `pub mod events_db;`)\n\n## TDD Loop\nRED: tests/events_db_tests.rs (new):\n- `test_upsert_state_events_basic` - insert 3 events, verify count and data\n- `test_upsert_state_events_idempotent` - insert same events twice, verify no duplicates\n- `test_upsert_label_events_with_actor` - verify actor fields populated\n- `test_upsert_milestone_events_null_user` - verify user: null doesn't crash\n- `test_upsert_state_events_entity_exclusivity` - verify only one of issue_id/merge_request_id set\n\nSetup: create_test_db() helper that applies migrations 001-011, inserts a test project + issue + MR.\n\nGREEN: Implement the three functions\n\nVERIFY: `cargo test events_db -- --nocapture`\n\n## Edge Cases\n- parse_datetime_to_ms must handle GitLab's format: \"2024-03-15T10:30:00.000Z\" and \"2024-03-15T10:30:00.000+00:00\"\n- INSERT OR REPLACE will fire CASCADE deletes if there are FK references to these rows — currently no other table references event rows, so this is safe\n- entity_type must be validated (\"issue\" or \"merge_request\") — panic or error on invalid\n- source_merge_request field contains an MR ref object, not an ID — extract .iid for DB column","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:31:57.242549Z","created_by":"tayloreernisse","updated_at":"2026-02-03T16:19:14.169437Z","closed_at":"2026-02-03T16:19:14.169233Z","close_reason":"Implemented upsert_state_events, upsert_label_events, upsert_milestone_events, count_events in src/core/events_db.rs. Uses savepoints for atomicity, LoreError::Database via ? operator for clean error handling.","compaction_level":0,"original_size":0,"labels":["db","gate-1","phase-b"],"dependencies":[{"issue_id":"bd-1uc","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1uc","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-1up1","title":"Implement File History screen (per-file MR timeline with rename tracking)","description":"## Background\nThe File History screen shows which MRs touched a file over time, with rename-aware tracking and optional DiffNote discussion snippets. It wraps run_file_history() from src/cli/commands/file_history.rs (added in v0.8.1) in a TUI view. While Trace answers \"why was this code introduced?\", File History answers \"what happened to this file?\" — a chronological MR timeline.\n\nThe core query resolves rename chains via BFS (resolve_rename_chain from src/core/file_history.rs), finds all MRs with mr_file_changes entries matching any renamed path, and optionally fetches DiffNote discussions on those paths.\n\n## Data Shapes (from src/cli/commands/file_history.rs)\n\n```rust\npub struct FileHistoryResult {\n pub path: String,\n pub rename_chain: Vec, // resolved paths via BFS\n pub renames_followed: bool,\n pub merge_requests: Vec,\n pub discussions: Vec,\n pub total_mrs: usize,\n pub paths_searched: usize,\n}\n\npub struct FileHistoryMr {\n pub iid: i64,\n pub title: String,\n pub state: String, // merged/opened/closed\n pub author_username: String,\n pub change_type: String, // added/modified/deleted/renamed\n pub merged_at_iso: Option,\n pub updated_at_iso: String,\n pub merge_commit_sha: Option,\n pub web_url: Option,\n}\n\npub struct FileDiscussion {\n pub discussion_id: String,\n pub author_username: String,\n pub body_snippet: String,\n pub path: String,\n pub created_at_iso: String,\n}\n```\n\nrun_file_history() signature (src/cli/commands/file_history.rs):\n```rust\npub fn run_file_history(\n config: &Config, // used only for DB path — bd-1f5b will extract query-only version\n path: &str,\n project: Option<&str>,\n no_follow_renames: bool,\n merged_only: bool,\n include_discussions: bool,\n limit: usize,\n) -> Result\n```\n\nAfter bd-1f5b extracts the query logic, the TUI will call a Connection-based variant:\n```rust\npub fn query_file_history(\n conn: &Connection,\n project_id: Option,\n path: &str,\n follow_renames: bool,\n merged_only: bool,\n include_discussions: bool,\n limit: usize,\n) -> Result\n```\n\n## Approach\n\n**Screen enum** (message.rs):\nAdd Screen::FileHistory variant (no parameters). Label: \"File History\". Breadcrumb: \"File History\".\n\n**Path autocomplete**: Same mechanism as Trace screen — query DISTINCT new_path from mr_file_changes. Share the known_paths cache with Trace if both are loaded, or each screen maintains its own (simpler).\n\n**State** (state/file_history.rs):\n```rust\n#[derive(Debug, Default)]\npub struct FileHistoryState {\n pub path_input: String,\n pub path_focused: bool,\n pub result: Option,\n pub selected_mr_index: usize,\n pub follow_renames: bool, // default true\n pub merged_only: bool, // default false\n pub show_discussions: bool, // default false\n pub scroll_offset: u16,\n pub known_paths: Vec, // autocomplete cache\n pub autocomplete_matches: Vec,\n pub autocomplete_index: usize,\n}\n```\n\n**Action** (action.rs):\n- fetch_file_history(conn, project_id, path, follow_renames, merged_only, show_discussions, limit) -> Result: calls query_file_history from file_history module (after bd-1f5b extraction)\n- fetch_known_paths(conn, project_id): shared with Trace screen (same query)\n\n**View** (view/file_history.rs):\n- Top: path input with autocomplete dropdown + toggle indicators [renames: on] [merged: off] [discussions: off]\n- If renames followed: rename chain breadcrumb (path_a -> path_b -> path_c) in dimmed text\n- Summary line: \"N merge requests across M paths\"\n- Main area: chronological MR list (sorted by updated_at descending):\n - Each row: MR state icon + !iid + title + @author + change_type tag + date\n - If show_discussions: inline discussion snippets beneath relevant MRs (indented, dimmed, author + date + body_snippet)\n- Footer: \"showing N of M\" when total_mrs > limit\n- Keyboard:\n - j/k: scroll MR list\n - Enter: navigate to MrDetail(EntityKey::mr(project_id, iid))\n - /: focus path input\n - Tab: cycle autocomplete suggestions when path focused\n - r: toggle follow_renames (re-fetches)\n - m: toggle merged_only (re-fetches)\n - d: toggle show_discussions (re-fetches)\n - q: back\n\n**Contextual entry points** (wired from other screens):\n- MR Detail: h on a file path opens File History pre-filled with that path\n- Expert mode (Who screen): when viewing a file path's experts, h opens File History for that path\n- Requires other screens to expose selected_file_path() -> Option\n\n## Acceptance Criteria\n- [ ] Screen::FileHistory added to message.rs Screen enum with label and breadcrumb\n- [ ] FileHistoryState struct with all fields, Default impl\n- [ ] Path input with autocomplete dropdown from mr_file_changes (same mechanism as Trace)\n- [ ] Rename chain displayed as breadcrumb when renames_followed is true\n- [ ] Chronological MR list with state icons (merged/opened/closed) and change_type tags\n- [ ] Enter on MR navigates to MrDetail(EntityKey::mr(project_id, iid))\n- [ ] r toggles follow_renames, m toggles merged_only, d toggles show_discussions — all re-fetch\n- [ ] Discussion snippets shown inline beneath MRs when toggled on\n- [ ] Summary line showing \"N merge requests across M paths\"\n- [ ] Footer truncation indicator when total_mrs > display limit\n- [ ] Empty state: \"No MRs found for this file\" with hint \"Run 'lore sync --fetch-mr-file-changes' to populate\"\n- [ ] Contextual navigation: h on file path in MR Detail opens File History pre-filled\n- [ ] Registered in command palette (label \"File History\", keywords [\"history\", \"file\", \"changes\"])\n- [ ] AppState.has_text_focus() updated to include file_history.path_focused\n- [ ] AppState.blur_text_focus() updated to include file_history.path_focused = false\n\n## Files\n- MODIFY: crates/lore-tui/src/message.rs (add Screen::FileHistory variant + label)\n- CREATE: crates/lore-tui/src/state/file_history.rs (FileHistoryState struct + Default)\n- MODIFY: crates/lore-tui/src/state/mod.rs (pub mod file_history, pub use FileHistoryState, add to AppState, update has_text_focus/blur_text_focus)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_file_history, share fetch_known_paths with Trace)\n- CREATE: crates/lore-tui/src/view/file_history.rs (render_file_history fn)\n- MODIFY: crates/lore-tui/src/view/mod.rs (add Screen::FileHistory dispatch arm)\n\n## TDD Anchor\nRED: Write test_fetch_file_history_returns_mrs in action tests. Setup: in-memory DB, insert project, MR (state=\"merged\", merged_at set), mr_file_changes row (new_path=\"src/lib.rs\", change_type=\"modified\"). Call fetch_file_history(conn, Some(project_id), \"src/lib.rs\", true, false, false, 50). Assert: result.merge_requests.len() == 1, result.merge_requests[0].iid matches.\nGREEN: Implement fetch_file_history calling query_file_history.\nVERIFY: cargo test -p lore-tui file_history -- --nocapture\n\nAdditional tests:\n- test_file_history_empty: path \"nonexistent.rs\" returns empty merge_requests\n- test_file_history_rename_chain: insert rename A->B, query A, assert rename_chain=[\"A\",\"B\"] and MRs touching B are included\n- test_file_history_merged_only: merged_only=true excludes opened/closed MRs\n- test_file_history_discussions: show_discussions=true populates discussions vec with DiffNote snippets\n- test_file_history_limit: insert 10 MRs, limit=5, assert merge_requests.len()==5 and total_mrs==10\n- test_autocomplete: shared with Trace tests\n\n## Edge Cases\n- File never modified by any MR: empty state with helpful message and sync hint\n- Rename chain with cycles: BFS visited set in resolve_rename_chain prevents infinite loop\n- Very long file paths: truncate from left in list view (...path/to/file.rs)\n- Hundreds of MRs for a single file: default limit 50, footer shows total count\n- Discussion body_snippet may contain markdown/code — render as plain text, no parsing\n- No mr_file_changes data at all: hint that sync needs --fetch-mr-file-changes (config.sync.fetch_mr_file_changes)\n- Project scope: if global_scope.project_id is set, pass it to query and autocomplete\n\n## Dependency Context\n- bd-1f5b (blocks): Extracts query_file_history(conn, ...) from run_file_history(config, ...) in src/cli/commands/file_history.rs. The current function opens its own DB connection from Config — TUI needs a Connection-based variant since it manages its own connection.\n- src/core/file_history.rs: resolve_rename_chain() used by query_file_history internally. TUI does not call it directly.\n- FileHistoryResult, FileHistoryMr, FileDiscussion: currently defined in src/cli/commands/file_history.rs — bd-1f5b should move these to core or make them importable.\n- Navigation: uses NavigationStack.push(Screen::MrDetail(key)) from crates/lore-tui/src/navigation.rs.\n- AppState composition: FileHistoryState added as field in AppState (state/mod.rs ~line 154-174). has_text_focus/blur_text_focus at lines 194-207 must include file_history.path_focused.\n- Autocomplete: fetch_known_paths query identical to Trace screen — consider extracting to shared helper in action.rs.\n- Contextual entry: requires MrDetailState to expose selected file path. Deferred if MR Detail not yet built.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-18T18:14:13.179338Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:34:24.563746Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1up1","depends_on_id":"bd-1f5b","type":"blocks","created_at":"2026-02-18T18:14:33.412864Z","created_by":"tayloreernisse"},{"issue_id":"bd-1up1","depends_on_id":"bd-nwux","type":"parent-child","created_at":"2026-02-18T18:14:13.180816Z","created_by":"tayloreernisse"}]} +{"id":"bd-1up1","title":"Implement File History screen (per-file MR timeline with rename tracking)","description":"## Background\nThe File History screen shows which MRs touched a file over time, with rename-aware tracking and optional DiffNote discussion snippets. It wraps run_file_history() from src/cli/commands/file_history.rs (added in v0.8.1) in a TUI view. While Trace answers \"why was this code introduced?\", File History answers \"what happened to this file?\" — a chronological MR timeline.\n\nThe core query resolves rename chains via BFS (resolve_rename_chain from src/core/file_history.rs), finds all MRs with mr_file_changes entries matching any renamed path, and optionally fetches DiffNote discussions on those paths.\n\n## Data Shapes (from src/cli/commands/file_history.rs)\n\n```rust\npub struct FileHistoryResult {\n pub path: String,\n pub rename_chain: Vec, // resolved paths via BFS\n pub renames_followed: bool,\n pub merge_requests: Vec,\n pub discussions: Vec,\n pub total_mrs: usize,\n pub paths_searched: usize,\n}\n\npub struct FileHistoryMr {\n pub iid: i64,\n pub title: String,\n pub state: String, // merged/opened/closed\n pub author_username: String,\n pub change_type: String, // added/modified/deleted/renamed\n pub merged_at_iso: Option,\n pub updated_at_iso: String,\n pub merge_commit_sha: Option,\n pub web_url: Option,\n}\n\npub struct FileDiscussion {\n pub discussion_id: String,\n pub author_username: String,\n pub body_snippet: String,\n pub path: String,\n pub created_at_iso: String,\n}\n```\n\nrun_file_history() signature (src/cli/commands/file_history.rs):\n```rust\npub fn run_file_history(\n config: &Config, // used only for DB path — bd-1f5b will extract query-only version\n path: &str,\n project: Option<&str>,\n no_follow_renames: bool,\n merged_only: bool,\n include_discussions: bool,\n limit: usize,\n) -> Result\n```\n\nAfter bd-1f5b extracts the query logic, the TUI will call a Connection-based variant:\n```rust\npub fn query_file_history(\n conn: &Connection,\n project_id: Option,\n path: &str,\n follow_renames: bool,\n merged_only: bool,\n include_discussions: bool,\n limit: usize,\n) -> Result\n```\n\n## Approach\n\n**Screen enum** (message.rs):\nAdd Screen::FileHistory variant (no parameters). Label: \"File History\". Breadcrumb: \"File History\".\n\n**Path autocomplete**: Same mechanism as Trace screen — query DISTINCT new_path from mr_file_changes. Share the known_paths cache with Trace if both are loaded, or each screen maintains its own (simpler).\n\n**State** (state/file_history.rs):\n```rust\n#[derive(Debug, Default)]\npub struct FileHistoryState {\n pub path_input: String,\n pub path_focused: bool,\n pub result: Option,\n pub selected_mr_index: usize,\n pub follow_renames: bool, // default true\n pub merged_only: bool, // default false\n pub show_discussions: bool, // default false\n pub scroll_offset: u16,\n pub known_paths: Vec, // autocomplete cache\n pub autocomplete_matches: Vec,\n pub autocomplete_index: usize,\n}\n```\n\n**Action** (action.rs):\n- fetch_file_history(conn, project_id, path, follow_renames, merged_only, show_discussions, limit) -> Result: calls query_file_history from file_history module (after bd-1f5b extraction)\n- fetch_known_paths(conn, project_id): shared with Trace screen (same query)\n\n**View** (view/file_history.rs):\n- Top: path input with autocomplete dropdown + toggle indicators [renames: on] [merged: off] [discussions: off]\n- If renames followed: rename chain breadcrumb (path_a -> path_b -> path_c) in dimmed text\n- Summary line: \"N merge requests across M paths\"\n- Main area: chronological MR list (sorted by updated_at descending):\n - Each row: MR state icon + !iid + title + @author + change_type tag + date\n - If show_discussions: inline discussion snippets beneath relevant MRs (indented, dimmed, author + date + body_snippet)\n- Footer: \"showing N of M\" when total_mrs > limit\n- Keyboard:\n - j/k: scroll MR list\n - Enter: navigate to MrDetail(EntityKey::mr(project_id, iid))\n - /: focus path input\n - Tab: cycle autocomplete suggestions when path focused\n - r: toggle follow_renames (re-fetches)\n - m: toggle merged_only (re-fetches)\n - d: toggle show_discussions (re-fetches)\n - q: back\n\n**Contextual entry points** (wired from other screens):\n- MR Detail: h on a file path opens File History pre-filled with that path\n- Expert mode (Who screen): when viewing a file path's experts, h opens File History for that path\n- Requires other screens to expose selected_file_path() -> Option\n\n## Acceptance Criteria\n- [ ] Screen::FileHistory added to message.rs Screen enum with label and breadcrumb\n- [ ] FileHistoryState struct with all fields, Default impl\n- [ ] Path input with autocomplete dropdown from mr_file_changes (same mechanism as Trace)\n- [ ] Rename chain displayed as breadcrumb when renames_followed is true\n- [ ] Chronological MR list with state icons (merged/opened/closed) and change_type tags\n- [ ] Enter on MR navigates to MrDetail(EntityKey::mr(project_id, iid))\n- [ ] r toggles follow_renames, m toggles merged_only, d toggles show_discussions — all re-fetch\n- [ ] Discussion snippets shown inline beneath MRs when toggled on\n- [ ] Summary line showing \"N merge requests across M paths\"\n- [ ] Footer truncation indicator when total_mrs > display limit\n- [ ] Empty state: \"No MRs found for this file\" with hint \"Run 'lore sync --fetch-mr-file-changes' to populate\"\n- [ ] Contextual navigation: h on file path in MR Detail opens File History pre-filled\n- [ ] Registered in command palette (label \"File History\", keywords [\"history\", \"file\", \"changes\"])\n- [ ] AppState.has_text_focus() updated to include file_history.path_focused\n- [ ] AppState.blur_text_focus() updated to include file_history.path_focused = false\n\n## Files\n- MODIFY: crates/lore-tui/src/message.rs (add Screen::FileHistory variant + label)\n- CREATE: crates/lore-tui/src/state/file_history.rs (FileHistoryState struct + Default)\n- MODIFY: crates/lore-tui/src/state/mod.rs (pub mod file_history, pub use FileHistoryState, add to AppState, update has_text_focus/blur_text_focus)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_file_history, share fetch_known_paths with Trace)\n- CREATE: crates/lore-tui/src/view/file_history.rs (render_file_history fn)\n- MODIFY: crates/lore-tui/src/view/mod.rs (add Screen::FileHistory dispatch arm)\n\n## TDD Anchor\nRED: Write test_fetch_file_history_returns_mrs in action tests. Setup: in-memory DB, insert project, MR (state=\"merged\", merged_at set), mr_file_changes row (new_path=\"src/lib.rs\", change_type=\"modified\"). Call fetch_file_history(conn, Some(project_id), \"src/lib.rs\", true, false, false, 50). Assert: result.merge_requests.len() == 1, result.merge_requests[0].iid matches.\nGREEN: Implement fetch_file_history calling query_file_history.\nVERIFY: cargo test -p lore-tui file_history -- --nocapture\n\nAdditional tests:\n- test_file_history_empty: path \"nonexistent.rs\" returns empty merge_requests\n- test_file_history_rename_chain: insert rename A->B, query A, assert rename_chain=[\"A\",\"B\"] and MRs touching B are included\n- test_file_history_merged_only: merged_only=true excludes opened/closed MRs\n- test_file_history_discussions: show_discussions=true populates discussions vec with DiffNote snippets\n- test_file_history_limit: insert 10 MRs, limit=5, assert merge_requests.len()==5 and total_mrs==10\n- test_autocomplete: shared with Trace tests\n\n## Edge Cases\n- File never modified by any MR: empty state with helpful message and sync hint\n- Rename chain with cycles: BFS visited set in resolve_rename_chain prevents infinite loop\n- Very long file paths: truncate from left in list view (...path/to/file.rs)\n- Hundreds of MRs for a single file: default limit 50, footer shows total count\n- Discussion body_snippet may contain markdown/code — render as plain text, no parsing\n- No mr_file_changes data at all: hint that sync needs --fetch-mr-file-changes (config.sync.fetch_mr_file_changes)\n- Project scope: if global_scope.project_id is set, pass it to query and autocomplete\n\n## Dependency Context\n- bd-1f5b (blocks): Extracts query_file_history(conn, ...) from run_file_history(config, ...) in src/cli/commands/file_history.rs. The current function opens its own DB connection from Config — TUI needs a Connection-based variant since it manages its own connection.\n- src/core/file_history.rs: resolve_rename_chain() used by query_file_history internally. TUI does not call it directly.\n- FileHistoryResult, FileHistoryMr, FileDiscussion: currently defined in src/cli/commands/file_history.rs — bd-1f5b should move these to core or make them importable.\n- Navigation: uses NavigationStack.push(Screen::MrDetail(key)) from crates/lore-tui/src/navigation.rs.\n- AppState composition: FileHistoryState added as field in AppState (state/mod.rs ~line 154-174). has_text_focus/blur_text_focus at lines 194-207 must include file_history.path_focused.\n- Autocomplete: fetch_known_paths query identical to Trace screen — consider extracting to shared helper in action.rs.\n- Contextual entry: requires MrDetailState to expose selected file path. Deferred if MR Detail not yet built.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-18T18:14:13.179338Z","created_by":"tayloreernisse","updated_at":"2026-02-19T03:47:22.812185Z","closed_at":"2026-02-19T03:47:22.811968Z","close_reason":"File History screen complete: state, action, view, full wiring. 579 TUI tests pass.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1up1","depends_on_id":"bd-1f5b","type":"blocks","created_at":"2026-02-18T18:14:33.412864Z","created_by":"tayloreernisse"},{"issue_id":"bd-1up1","depends_on_id":"bd-nwux","type":"parent-child","created_at":"2026-02-18T18:14:13.180816Z","created_by":"tayloreernisse"}]} {"id":"bd-1ut","title":"[CP0] Final validation - tests, lint, typecheck","description":"## Background\n\nFinal validation ensures everything works together before marking CP0 complete. This is the integration gate - all unit tests, integration tests, lint, and type checking must pass. Manual smoke tests verify the full user experience.\n\nReference: docs/prd/checkpoint-0.md sections \"Definition of Done\", \"Manual Smoke Tests\"\n\n## Approach\n\n**Automated checks:**\n```bash\n# All tests pass\nnpm run test\n\n# TypeScript strict mode\nnpm run build # or: npx tsc --noEmit\n\n# ESLint with no errors\nnpm run lint\n```\n\n**Manual smoke tests (from PRD table):**\n\n| Command | Expected | Pass Criteria |\n|---------|----------|---------------|\n| `gi --help` | Command list | Shows all commands |\n| `gi version` | Version number | Shows installed version |\n| `gi init` | Interactive prompts | Creates valid config |\n| `gi init` (config exists) | Confirmation prompt | Warns before overwriting |\n| `gi init --force` | No prompt | Overwrites without asking |\n| `gi auth-test` | `Authenticated as @username` | Shows GitLab username |\n| `GITLAB_TOKEN=invalid gi auth-test` | Error message | Non-zero exit, clear error |\n| `gi doctor` | Status table | All required checks pass |\n| `gi doctor --json` | JSON object | Valid JSON, `success: true` |\n| `gi backup` | Backup path | Creates timestamped backup |\n| `gi sync-status` | No runs message | Stub output works |\n\n**Definition of Done gate items:**\n- [ ] `gi init` writes config to XDG path and validates projects against GitLab\n- [ ] `gi auth-test` succeeds with real PAT\n- [ ] `gi doctor` reports DB ok + GitLab ok\n- [ ] DB migrations apply; WAL + FK enabled; busy_timeout + synchronous set\n- [ ] App lock mechanism works (concurrent runs blocked)\n- [ ] All unit tests pass\n- [ ] All integration tests pass (mocked)\n- [ ] ESLint passes with no errors\n- [ ] TypeScript compiles with strict mode\n\n## Acceptance Criteria\n\n- [ ] `npm run test` exits 0 (all tests pass)\n- [ ] `npm run build` exits 0 (TypeScript compiles)\n- [ ] `npm run lint` exits 0 (no ESLint errors)\n- [ ] All 11 manual smoke tests pass\n- [ ] All 9 Definition of Done gate items verified\n\n## Files\n\nNo new files created. This bead verifies existing work.\n\n## TDD Loop\n\nThis IS the final verification step:\n\n```bash\n# Automated\nnpm run test\nnpm run build\nnpm run lint\n\n# Manual (requires GITLAB_TOKEN set with valid token)\ngi --help\ngi version\ngi init # go through setup\ngi auth-test\ngi doctor\ngi doctor --json | jq .success # should output true\ngi backup\ngi sync-status\ngi reset --confirm\ngi init # re-setup\n```\n\n## Edge Cases\n\n- Test coverage should be reasonable (aim for 80%+ on core modules)\n- Integration tests may flake on CI - check MSW setup\n- Manual tests require real GitLab token - document in README\n- ESLint may warn vs error - only errors block\n- TypeScript noImplicitAny catches missed types","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:52.078907Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:37:51.858558Z","closed_at":"2026-01-25T03:37:51.858474Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1ut","depends_on_id":"bd-1cb","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ut","depends_on_id":"bd-1gu","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ut","depends_on_id":"bd-1kh","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ut","depends_on_id":"bd-38e","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ut","depends_on_id":"bd-3kj","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1v8","title":"Update robot-docs manifest with Phase B commands","description":"## Background\n\nThe robot-docs manifest is the agent self-discovery mechanism. It must include all Phase B commands so agents can discover temporal intelligence features.\n\n## Codebase Context\n\n- handle_robot_docs() in src/main.rs (line ~1646) returns JSON with commands, exit_codes, workflows, aliases, clap_error_codes\n- Currently 18 commands documented in the manifest\n- VALID_COMMANDS array in src/main.rs (line ~448): [\"issues\", \"mrs\", \"search\", \"sync\", \"ingest\", \"count\", \"status\", \"auth\", \"doctor\", \"version\", \"init\", \"stats\", \"generate-docs\", \"embed\", \"migrate\", \"health\", \"robot-docs\", \"completions\"]\n- Phase B adds 3 new commands: timeline, file-history, trace\n- count gains new entity: \"references\" (bd-2ez)\n- Existing workflows: first_setup, daily_sync, search, pre_flight\n\n## Approach\n\n### 1. Add commands to handle_robot_docs() JSON:\n\n```json\n\"timeline\": {\n \"description\": \"Chronological timeline of events matching a keyword query\",\n \"flags\": [\"\", \"-p \", \"--since \", \"--depth \", \"--expand-mentions\", \"-n \"],\n \"example\": \"lore --robot timeline 'authentication' --since 30d\"\n},\n\"file-history\": {\n \"description\": \"Which MRs touched a file, with rename chain resolution\",\n \"flags\": [\"\", \"-p \", \"--discussions\", \"--no-follow-renames\", \"--merged\", \"-n \"],\n \"example\": \"lore --robot file-history src/auth/oauth.rs\"\n},\n\"trace\": {\n \"description\": \"Trace file -> MR -> issue -> discussions decision chain\",\n \"flags\": [\"\", \"-p \", \"--discussions\", \"--no-follow-renames\", \"-n \"],\n \"example\": \"lore --robot trace src/auth/oauth.rs\"\n}\n```\n\n### 2. Update count command to mention \"references\" entity\n\n### 3. Add temporal_intelligence workflow:\n```json\n\"temporal_intelligence\": {\n \"description\": \"Query temporal data about project history\",\n \"steps\": [\n \"lore sync (ensure events fetched with fetchResourceEvents=true)\",\n \"lore timeline '' for chronological event history\",\n \"lore file-history for file-level MR history\",\n \"lore trace for file -> MR -> issue -> discussion chain\"\n ]\n}\n```\n\n### 4. Add timeline, file-history, trace to VALID_COMMANDS array\n\n## Acceptance Criteria\n\n- [ ] robot-docs includes timeline, file-history, trace commands\n- [ ] count references documented\n- [ ] temporal_intelligence workflow present\n- [ ] VALID_COMMANDS includes all 3 new commands\n- [ ] Examples are valid, runnable commands\n- [ ] cargo check --all-targets passes\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n\n- src/main.rs (update handle_robot_docs + VALID_COMMANDS array)\n\n## TDD Loop\n\nVERIFY: lore robot-docs | jq '.data.commands.timeline'\nVERIFY: lore robot-docs | jq '.data.workflows.temporal_intelligence'","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-02T22:43:07.859092Z","created_by":"tayloreernisse","updated_at":"2026-02-05T20:17:38.827205Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1v8","depends_on_id":"bd-1ht","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1v8","depends_on_id":"bd-2ez","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1v8","depends_on_id":"bd-2n4","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1v8t","title":"Add WorkItemStatus type and SyncConfig toggle","description":"## Background\nThe GraphQL status response returns name, category, color, and iconName fields. We need a Rust struct that deserializes this directly. Category is stored as raw Option (not an enum) because GitLab 18.5+ supports custom statuses with arbitrary category values. We also need a config toggle so users can disable status enrichment.\n\n## Approach\nAdd WorkItemStatus to the existing types module. Add fetch_work_item_status to the existing SyncConfig with default_true() helper. Also add WorkItemStatus to pub use re-exports in src/gitlab/mod.rs.\n\n## Files\n- src/gitlab/types.rs (add struct after GitLabMergeRequest, before #[cfg(test)])\n- src/core/config.rs (add field to SyncConfig struct + Default impl)\n- src/gitlab/mod.rs (add WorkItemStatus to pub use)\n\n## Implementation\n\nIn src/gitlab/types.rs (needs Serialize, Deserialize derives already in scope):\n #[derive(Debug, Clone, Serialize, Deserialize)]\n pub struct WorkItemStatus {\n pub name: String,\n pub category: Option,\n pub color: Option,\n #[serde(rename = \"iconName\")]\n pub icon_name: Option,\n }\n\nIn src/core/config.rs SyncConfig struct (after fetch_mr_file_changes):\n #[serde(rename = \"fetchWorkItemStatus\", default = \"default_true\")]\n pub fetch_work_item_status: bool,\n\nIn impl Default for SyncConfig (after fetch_mr_file_changes: true):\n fetch_work_item_status: true,\n\n## Acceptance Criteria\n- [ ] WorkItemStatus deserializes: {\"name\":\"In progress\",\"category\":\"IN_PROGRESS\",\"color\":\"#1f75cb\",\"iconName\":\"status-in-progress\"}\n- [ ] Optional fields: {\"name\":\"To do\"} -> category/color/icon_name are None\n- [ ] Unknown category: {\"name\":\"Custom\",\"category\":\"SOME_FUTURE_VALUE\"} -> Ok\n- [ ] Null category: {\"name\":\"In progress\",\"category\":null} -> None\n- [ ] SyncConfig::default().fetch_work_item_status == true\n- [ ] JSON without fetchWorkItemStatus key -> defaults true\n- [ ] cargo check --all-targets passes\n\n## TDD Loop\nRED: test_work_item_status_deserialize, test_work_item_status_optional_fields, test_work_item_status_unknown_category, test_work_item_status_null_category, test_config_fetch_work_item_status_default_true, test_config_deserialize_without_key\nGREEN: Add struct + config field\nVERIFY: cargo test test_work_item_status && cargo test test_config\n\n## Edge Cases\n- serde rename \"iconName\" -> icon_name (camelCase in GraphQL)\n- Category is Option, NOT an enum\n- Config key is camelCase \"fetchWorkItemStatus\" matching existing convention","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:41:42.790001Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.416990Z","closed_at":"2026-02-11T07:21:33.416950Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1v8t","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -97,7 +97,7 @@ {"id":"bd-1yx","title":"Implement rename chain resolution for file-history","description":"## Background\n\nRename chain resolution is the core algorithm for Gate 4. When querying history of src/auth.rs, it finds MRs that touched the file when it was previously named src/authentication.rs. This is reused by Gate 5 (trace) as well.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 4.6 (Rename Handling).\n\n## Codebase Context\n\n- mr_file_changes table (migration 016, bd-1oo): merge_request_id, project_id, old_path, new_path, change_type\n- change_type='renamed' rows have both old_path and new_path populated\n- Partial index `idx_mfc_renamed` on (project_id, change_type) WHERE change_type='renamed' optimizes BFS queries\n- Also `idx_mfc_project_path` on (project_id, new_path) and `idx_mfc_project_old_path` partial index\n- No timeline/trace/file_history modules exist yet in src/core/\n\n## Approach\n\nCreate `src/core/file_history.rs`:\n\n```rust\nuse std::collections::HashSet;\nuse std::collections::VecDeque;\nuse rusqlite::Connection;\nuse crate::core::error::Result;\n\n/// Resolves a file path through its rename history.\n/// Returns all equivalent paths (original + renames) for use in queries.\n/// BFS in both directions: forward (old_path -> new_path) and backward (new_path -> old_path).\npub fn resolve_rename_chain(\n conn: &Connection,\n project_id: i64,\n path: &str,\n max_hops: usize, // default 10 from CLI\n) -> Result> {\n let mut visited: HashSet = HashSet::new();\n let mut queue: VecDeque = VecDeque::new();\n\n visited.insert(path.to_string());\n queue.push_back(path.to_string());\n\n let forward_sql = \"SELECT mfc.new_path FROM mr_file_changes mfc \\\n WHERE mfc.project_id = ?1 AND mfc.old_path = ?2 AND mfc.change_type = 'renamed'\";\n let backward_sql = \"SELECT mfc.old_path FROM mr_file_changes mfc \\\n WHERE mfc.project_id = ?1 AND mfc.new_path = ?2 AND mfc.change_type = 'renamed'\";\n\n while let Some(current) = queue.pop_front() {\n if visited.len() > max_hops + 1 { break; }\n\n // Forward: current was the old name -> discover new names\n let mut stmt = conn.prepare(forward_sql)?;\n let forward: Vec = stmt.query_map(\n rusqlite::params\\![project_id, current],\n |row| row.get(0),\n )?.filter_map(|r| r.ok()).collect();\n\n // Backward: current was the new name -> discover old names\n let mut stmt = conn.prepare(backward_sql)?;\n let backward: Vec = stmt.query_map(\n rusqlite::params\\![project_id, current],\n |row| row.get(0),\n )?.filter_map(|r| r.ok()).collect();\n\n for discovered in forward.into_iter().chain(backward) {\n if visited.insert(discovered.clone()) {\n queue.push_back(discovered);\n }\n }\n }\n\n Ok(visited.into_iter().collect())\n}\n```\n\nRegister in `src/core/mod.rs`: add `pub mod file_history;`\n\n## Acceptance Criteria\n\n- [ ] `resolve_rename_chain()` follows renames in both directions (forward + backward)\n- [ ] Cycles detected via HashSet (same path never visited twice)\n- [ ] Bounded at max_hops (default 10)\n- [ ] No renames found: returns vec with just the original path\n- [ ] max_hops=0: returns just original path without querying DB\n- [ ] Module registered in src/core/mod.rs as `pub mod file_history;`\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/core/file_history.rs` (NEW)\n- `src/core/mod.rs` (add `pub mod file_history;`)\n\n## TDD Loop\n\nRED:\n- `test_rename_chain_no_renames` — returns just original path\n- `test_rename_chain_forward` — a.rs -> b.rs -> c.rs: starting from a.rs finds all three\n- `test_rename_chain_backward` — starting from c.rs finds a.rs and b.rs\n- `test_rename_chain_cycle_detection` — a->b->a terminates without infinite loop\n- `test_rename_chain_max_hops_zero` — returns just original path\n- `test_rename_chain_max_hops_bounded` — chain longer than max is truncated\n\nTests need in-memory DB with migrations applied through 016 + mr_file_changes test data with change_type='renamed'.\n\nGREEN: Implement BFS with visited set.\n\nVERIFY: `cargo test --lib -- file_history`\n\n## Edge Cases\n\n- File never renamed: single-element vec\n- Circular rename (a->b->a): visited set prevents infinite loop\n- max_hops=0: return just original path, no queries executed\n- Case sensitivity: paths are case-sensitive (Linux default, matches GitLab behavior)\n- Multiple renames from same old_path: BFS discovers all branches\n","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:08.985345Z","created_by":"tayloreernisse","updated_at":"2026-02-13T14:00:46.354253Z","closed_at":"2026-02-13T14:00:46.354201Z","close_reason":"Implemented resolve_rename_chain() BFS in src/core/file_history.rs with 8 tests covering: no renames, forward chain, backward chain, cycle detection, max_hops=0, max_hops bounded, branching renames, project isolation. All 765 tests pass, clippy+fmt clean.","compaction_level":0,"original_size":0,"labels":["gate-4","phase-b","query"],"dependencies":[{"issue_id":"bd-1yx","depends_on_id":"bd-14q","type":"parent-child","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1yx","depends_on_id":"bd-1oo","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-1yz","title":"Implement MR document extraction","description":"## Background\nMR documents are similar to issue documents but include source/target branch information in the header. The extractor queries merge_requests and mr_labels tables. Like issue extraction, it produces a DocumentData struct for the regeneration pipeline.\n\n## Approach\nImplement `extract_mr_document()` in `src/documents/extractor.rs`:\n\n```rust\n/// Extract a searchable document from a merge request.\n/// Returns None if the MR has been deleted from the DB.\npub fn extract_mr_document(conn: &Connection, mr_id: i64) -> Result>\n```\n\n**SQL queries (from PRD Section 2.2):**\n```sql\n-- Main entity\nSELECT m.id, m.iid, m.title, m.description, m.state, m.author_username,\n m.source_branch, m.target_branch,\n m.created_at, m.updated_at, m.web_url,\n p.path_with_namespace, p.id AS project_id\nFROM merge_requests m\nJOIN projects p ON p.id = m.project_id\nWHERE m.id = ?\n\n-- Labels\nSELECT l.name FROM mr_labels ml\nJOIN labels l ON l.id = ml.label_id\nWHERE ml.merge_request_id = ?\nORDER BY l.name\n```\n\n**Document format:**\n```\n[[MergeRequest]] !456: Implement JWT authentication\nProject: group/project-one\nURL: https://gitlab.example.com/group/project-one/-/merge_requests/456\nLabels: [\"feature\", \"auth\"]\nState: opened\nAuthor: @johndoe\nSource: feature/jwt-auth -> main\n\n--- Description ---\n\nThis MR implements JWT-based authentication...\n```\n\n**Key difference from issues:** The `Source:` line with `source_branch -> target_branch`.\n\n## Acceptance Criteria\n- [ ] Deleted MR returns Ok(None)\n- [ ] MR document has `[[MergeRequest]]` prefix with `!` before iid (not `#`)\n- [ ] Source line shows `source_branch -> target_branch`\n- [ ] Labels sorted alphabetically in JSON array\n- [ ] content_hash computed from full content_text\n- [ ] labels_hash computed from sorted labels\n- [ ] paths is empty (MR-level docs don't have DiffNote paths; those are on discussion docs)\n- [ ] `cargo test extract_mr` passes\n\n## Files\n- `src/documents/extractor.rs` — implement `extract_mr_document()`\n\n## TDD Loop\nRED: Tests in `#[cfg(test)] mod tests`:\n- `test_mr_document_format` — verify header matches PRD template with Source line\n- `test_mr_not_found` — returns Ok(None)\n- `test_mr_no_description` — header only\n- `test_mr_branch_info` — Source line correct\nGREEN: Implement extract_mr_document with SQL queries\nVERIFY: `cargo test extract_mr`\n\n## Edge Cases\n- MR with NULL description: skip \"--- Description ---\" section\n- MR with NULL source_branch or target_branch: omit Source line (shouldn't happen in practice)\n- Draft MRs: state field captures this, no special handling needed","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-30T15:25:45.521703Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:30:04.308781Z","closed_at":"2026-01-30T17:30:04.308598Z","close_reason":"Implemented extract_mr_document() with Source line, PRD format, and 5 tests","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1yz","depends_on_id":"bd-36p","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1yz","depends_on_id":"bd-hrs","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1zj6","title":"OBSERV: Enrich robot JSON meta with run_id and stages","description":"## Background\nRobot JSON currently has a flat meta.elapsed_ms. This enriches it with run_id and a stages array, making every lore --robot sync output a complete performance profile.\n\n## Approach\nThe robot JSON output is built in src/cli/commands/sync.rs. The current SyncResult (line 15-25) is serialized into the data field. The meta field is built alongside it.\n\n1. Find or create the SyncMeta struct (likely near SyncResult). Add fields:\n```rust\n#[derive(Debug, Serialize)]\nstruct SyncMeta {\n run_id: String,\n elapsed_ms: u64,\n stages: Vec,\n}\n```\n\n2. After run_sync() completes, extract timings from MetricsLayer:\n```rust\nlet stages = metrics_handle.extract_timings();\nlet meta = SyncMeta {\n run_id: run_id.to_string(),\n elapsed_ms: start.elapsed().as_millis() as u64,\n stages,\n};\n```\n\n3. Build the JSON envelope:\n```rust\nlet output = serde_json::json!({\n \"ok\": true,\n \"data\": result,\n \"meta\": meta,\n});\n```\n\nThe metrics_handle (Arc) must be passed from main.rs to the command handler. This requires adding a parameter to handle_sync_cmd() and run_sync(), or using a global. Prefer parameter passing.\n\nSame pattern for standalone ingest: add stages to IngestMeta.\n\n## Acceptance Criteria\n- [ ] lore --robot sync output includes meta.run_id (string, 8 hex chars)\n- [ ] lore --robot sync output includes meta.stages (array of StageTiming)\n- [ ] meta.elapsed_ms still present (total wall clock time)\n- [ ] Each stage has name, elapsed_ms, items_processed at minimum\n- [ ] Top-level stages have sub_stages when applicable\n- [ ] lore --robot ingest also includes run_id and stages\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/cli/commands/sync.rs (add SyncMeta struct, wire extract_timings)\n- src/cli/commands/ingest.rs (same for standalone ingest)\n- src/main.rs (pass metrics_handle to command handlers)\n\n## TDD Loop\nRED: test_sync_meta_includes_stages (run robot-mode sync, parse JSON, assert meta.stages is array)\nGREEN: Add SyncMeta, extract timings, include in JSON output\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- Empty stages: if sync runs with --no-docs --no-embed, some stages won't exist. stages array is shorter, not padded.\n- extract_timings() called before root span closes: returns incomplete tree. Must call AFTER run_sync returns (span is dropped on function exit).\n- metrics_handle clone: MetricsLayer uses Arc internally, clone is cheap (reference count increment).","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:54:32.062410Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:31:11.073580Z","closed_at":"2026-02-04T17:31:11.073534Z","close_reason":"Wired MetricsLayer into subscriber stack (all 4 branches), added run_id to SyncResult, enriched SyncMeta with run_id + stages Vec, updated print_sync_json to accept MetricsLayer and extract timings","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-1zj6","depends_on_id":"bd-34ek","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1zj6","depends_on_id":"bd-3er","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-1zow","title":"Implement Search screen (state + action + view)","description":"## Background\nThe Search screen provides full-text and semantic search across all indexed documents. It supports 3 modes (lexical FTS5, hybrid FTS+vector, semantic vector-only), a split-pane layout with results on the left and preview on the right, and capability-aware mode selection based on available indexes.\n\n## Approach\nState (state/search.rs):\n- SearchState: query (String), query_input (TextInput), query_focused (bool), mode (SearchMode), results (Vec), selected_index (usize), preview (Option), capabilities (SearchCapabilities), generation (u64)\n- SearchMode: Lexical, Hybrid, Semantic\n- SearchCapabilities: has_fts (bool), has_embeddings (bool), embedding_coverage_pct (f32)\n- SearchResult: doc_id, entity_type, entity_iid, project_path, title, snippet, score, mode_used\n- SearchPreview: full document text or entity detail\n\n**Capability detection** (on screen entry):\n- Probe documents_fts table: SELECT COUNT(*) FROM documents_fts_docsize (uses fast B-tree count, not FTS5 virtual table scan — see MEMORY.md perf audit)\n- Probe embeddings: SELECT COUNT(*) FROM embeddings / SELECT COUNT(*) FROM documents to compute coverage pct\n- If has_fts=false: disable Lexical and Hybrid modes, only Semantic available\n- If has_embeddings=false: disable Semantic and Hybrid modes, only Lexical available\n- If both false: show \"No search indexes found. Run lore generate-docs and lore embed first.\"\n\n**Score explanation (e key):**\n- Press e on a selected result to toggle a score breakdown panel\n- For Lexical: show FTS5 bm25 raw score\n- For Hybrid: show FTS score, vector score, and RRF combined score with weights\n- For Semantic: show cosine similarity score\n- Panel appears below the selected result row, Esc or e dismisses\n\n**Debounced input (200ms):**\n- Uses Msg::SearchDebounceArmed and Msg::SearchDebounced timer pattern\n- On keystroke in query input: arm debounce timer via Cmd::timer(200ms, Msg::SearchDebounced)\n- On SearchDebounced: execute search with current query text\n- This prevents flooding the search backend on rapid typing\n\nAction (action.rs):\n- fetch_search_capabilities(conn) -> SearchCapabilities: probe FTS and embedding tables\n- execute_search(conn, query, mode, limit) -> Vec: dispatches to correct search backend. Uses existing crate::search module functions.\n- fetch_search_preview(conn, result) -> SearchPreview: loads full entity detail for selected result\n\nView (view/search.rs):\n- Split pane: results list (60%) | preview (40%)\n- Query bar at top with mode indicator (L/H/S)\n- Mode switching: Tab cycles modes (only available modes based on capabilities)\n- Score column shows numeric score; e key expands explanation\n- Empty query shows recent entities instead of empty state\n- Narrow terminal (<100 cols): hide preview pane\n\n## Acceptance Criteria\n- [ ] 3 search modes: Lexical, Hybrid, Semantic\n- [ ] Mode switching via Tab, only available modes selectable based on capability detection\n- [ ] Capability detection probes FTS and embedding tables on screen entry\n- [ ] Graceful degradation: unavailable modes shown as greyed out with reason\n- [ ] \"No search indexes\" message when both FTS and embeddings are empty\n- [ ] 200ms debounce on search input (timer-driven via Msg::SearchDebounceArmed/Fired)\n- [ ] Split pane: results | preview\n- [ ] Enter on result navigates to entity detail\n- [ ] Score shown next to each result\n- [ ] e key toggles score explanation panel for selected result\n- [ ] Empty query shows recent entities instead of empty state\n- [ ] Narrow terminal (<100 cols): hide preview pane\n\n## Files\n- MODIFY: crates/lore-tui/src/state/search.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add search functions)\n- CREATE: crates/lore-tui/src/view/search.rs\n\n## TDD Anchor\nRED: Write test_search_capability_detection that creates DB with FTS but no embeddings, asserts has_fts=true, has_embeddings=false, Semantic mode disabled.\nGREEN: Implement fetch_search_capabilities.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_search_capability\n\nAdditional tests:\n- test_debounce_prevents_rapid_search: simulate 5 keystrokes in 100ms, assert only 1 search executed\n- test_score_explanation_lexical: verify bm25 score shown for Lexical mode result\n- test_empty_query_shows_recent: assert recent entities returned when query is empty\n\n## Edge Cases\n- Search query < 2 chars: don't execute search (debounce filter)\n- FTS5 special characters (*, \", -): escape or pass through based on mode\n- Hybrid mode: uses existing RRF implementation from crate::search module\n- Very large result sets: limit to 100 results, show \"more results available\" hint\n- Preview pane on narrow terminal (<100 cols): hide preview, full-width results only\n- FTS count performance: use documents_fts_docsize shadow table for COUNT (19x faster)\n\n## Dependency Context\nUses existing search infrastructure from lore core (crate::search::{FtsQueryMode, to_fts_query} — note private submodules, import via crate::search).\nUses SearchDebounceArmed/SearchDebounced Msg variants from \"Implement core types\" (bd-c9gk).\nUses TaskSupervisor debounce management from \"Implement TaskSupervisor\" (bd-3le2).\nUses AppState composition from \"Implement AppState composition\" (bd-1v9m).","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:00:48.862621Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:33.891935Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1zow","depends_on_id":"bd-1mju","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1zow","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-1zow","title":"Implement Search screen (state + action + view)","description":"## Background\nThe Search screen provides full-text and semantic search across all indexed documents. It supports 3 modes (lexical FTS5, hybrid FTS+vector, semantic vector-only), a split-pane layout with results on the left and preview on the right, and capability-aware mode selection based on available indexes.\n\n## Approach\nState (state/search.rs):\n- SearchState: query (String), query_input (TextInput), query_focused (bool), mode (SearchMode), results (Vec), selected_index (usize), preview (Option), capabilities (SearchCapabilities), generation (u64)\n- SearchMode: Lexical, Hybrid, Semantic\n- SearchCapabilities: has_fts (bool), has_embeddings (bool), embedding_coverage_pct (f32)\n- SearchResult: doc_id, entity_type, entity_iid, project_path, title, snippet, score, mode_used\n- SearchPreview: full document text or entity detail\n\n**Capability detection** (on screen entry):\n- Probe documents_fts table: SELECT COUNT(*) FROM documents_fts_docsize (uses fast B-tree count, not FTS5 virtual table scan — see MEMORY.md perf audit)\n- Probe embeddings: SELECT COUNT(*) FROM embeddings / SELECT COUNT(*) FROM documents to compute coverage pct\n- If has_fts=false: disable Lexical and Hybrid modes, only Semantic available\n- If has_embeddings=false: disable Semantic and Hybrid modes, only Lexical available\n- If both false: show \"No search indexes found. Run lore generate-docs and lore embed first.\"\n\n**Score explanation (e key):**\n- Press e on a selected result to toggle a score breakdown panel\n- For Lexical: show FTS5 bm25 raw score\n- For Hybrid: show FTS score, vector score, and RRF combined score with weights\n- For Semantic: show cosine similarity score\n- Panel appears below the selected result row, Esc or e dismisses\n\n**Debounced input (200ms):**\n- Uses Msg::SearchDebounceArmed and Msg::SearchDebounced timer pattern\n- On keystroke in query input: arm debounce timer via Cmd::timer(200ms, Msg::SearchDebounced)\n- On SearchDebounced: execute search with current query text\n- This prevents flooding the search backend on rapid typing\n\nAction (action.rs):\n- fetch_search_capabilities(conn) -> SearchCapabilities: probe FTS and embedding tables\n- execute_search(conn, query, mode, limit) -> Vec: dispatches to correct search backend. Uses existing crate::search module functions.\n- fetch_search_preview(conn, result) -> SearchPreview: loads full entity detail for selected result\n\nView (view/search.rs):\n- Split pane: results list (60%) | preview (40%)\n- Query bar at top with mode indicator (L/H/S)\n- Mode switching: Tab cycles modes (only available modes based on capabilities)\n- Score column shows numeric score; e key expands explanation\n- Empty query shows recent entities instead of empty state\n- Narrow terminal (<100 cols): hide preview pane\n\n## Acceptance Criteria\n- [ ] 3 search modes: Lexical, Hybrid, Semantic\n- [ ] Mode switching via Tab, only available modes selectable based on capability detection\n- [ ] Capability detection probes FTS and embedding tables on screen entry\n- [ ] Graceful degradation: unavailable modes shown as greyed out with reason\n- [ ] \"No search indexes\" message when both FTS and embeddings are empty\n- [ ] 200ms debounce on search input (timer-driven via Msg::SearchDebounceArmed/Fired)\n- [ ] Split pane: results | preview\n- [ ] Enter on result navigates to entity detail\n- [ ] Score shown next to each result\n- [ ] e key toggles score explanation panel for selected result\n- [ ] Empty query shows recent entities instead of empty state\n- [ ] Narrow terminal (<100 cols): hide preview pane\n\n## Files\n- MODIFY: crates/lore-tui/src/state/search.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add search functions)\n- CREATE: crates/lore-tui/src/view/search.rs\n\n## TDD Anchor\nRED: Write test_search_capability_detection that creates DB with FTS but no embeddings, asserts has_fts=true, has_embeddings=false, Semantic mode disabled.\nGREEN: Implement fetch_search_capabilities.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_search_capability\n\nAdditional tests:\n- test_debounce_prevents_rapid_search: simulate 5 keystrokes in 100ms, assert only 1 search executed\n- test_score_explanation_lexical: verify bm25 score shown for Lexical mode result\n- test_empty_query_shows_recent: assert recent entities returned when query is empty\n\n## Edge Cases\n- Search query < 2 chars: don't execute search (debounce filter)\n- FTS5 special characters (*, \", -): escape or pass through based on mode\n- Hybrid mode: uses existing RRF implementation from crate::search module\n- Very large result sets: limit to 100 results, show \"more results available\" hint\n- Preview pane on narrow terminal (<100 cols): hide preview, full-width results only\n- FTS count performance: use documents_fts_docsize shadow table for COUNT (19x faster)\n\n## Dependency Context\nUses existing search infrastructure from lore core (crate::search::{FtsQueryMode, to_fts_query} — note private submodules, import via crate::search).\nUses SearchDebounceArmed/SearchDebounced Msg variants from \"Implement core types\" (bd-c9gk).\nUses TaskSupervisor debounce management from \"Implement TaskSupervisor\" (bd-3le2).\nUses AppState composition from \"Implement AppState composition\" (bd-1v9m).","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:00:48.862621Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:29:26.746150Z","closed_at":"2026-02-18T21:29:26.746088Z","close_reason":"Implemented Search screen: state (SearchMode, SearchCapabilities, SearchState with 27 tests), view (query bar + mode indicator + results list with 9 tests), action (FTS search + capability detection with 9 tests), plus SearchMode type in message.rs (4 tests). Wired into view/mod.rs dispatch.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1zow","depends_on_id":"bd-1mju","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1zwv","title":"Display assignees, due_date, and milestone in lore issues output","description":"## Background\nThe `lore issues ` command displays issue details but omits key metadata that exists in the database: assignees, due dates, and milestones. Users need this information to understand issue context without opening GitLab.\n\n**System fit**: This data is already ingested during issue sync (migration 005) but the show command never queries it.\n\n## Approach\n\nAll changes in `src/cli/commands/show.rs`:\n\n### 1. Update IssueRow struct (line ~119)\nAdd fields to internal row struct:\n```rust\nstruct IssueRow {\n // ... existing 10 fields ...\n due_date: Option, // NEW\n milestone_title: Option, // NEW\n}\n```\n\n### 2. Update find_issue() SQL (line ~137)\nExtend SELECT:\n```sql\nSELECT i.id, i.iid, i.title, i.description, i.state, i.author_username,\n i.created_at, i.updated_at, i.web_url, p.path_with_namespace,\n i.due_date, i.milestone_title -- ADD THESE\nFROM issues i ...\n```\n\nUpdate row mapping to extract columns 10 and 11.\n\n### 3. Add get_issue_assignees() (after get_issue_labels ~line 189)\n```rust\nfn get_issue_assignees(conn: &Connection, issue_id: i64) -> Result> {\n let mut stmt = conn.prepare(\n \"SELECT username FROM issue_assignees WHERE issue_id = ? ORDER BY username\"\n )?;\n let assignees = stmt\n .query_map([issue_id], |row| row.get(0))?\n .collect::, _>>()?;\n Ok(assignees)\n}\n```\n\n### 4. Update IssueDetail struct (line ~59)\n```rust\npub struct IssueDetail {\n // ... existing 12 fields ...\n pub assignees: Vec, // NEW\n pub due_date: Option, // NEW\n pub milestone: Option, // NEW\n}\n```\n\n### 5. Update IssueDetailJson struct (line ~770)\nAdd same 3 fields with identical types.\n\n### 6. Update run_show_issue() (line ~89)\n```rust\nlet assignees = get_issue_assignees(&conn, issue.id)?;\n// In return struct:\nassignees,\ndue_date: issue.due_date,\nmilestone: issue.milestone_title,\n```\n\n### 7. Update print_show_issue() (line ~533, after Author line ~548)\n```rust\nif !issue.assignees.is_empty() {\n println!(\"Assignee{}: {}\",\n if issue.assignees.len() > 1 { \"s\" } else { \"\" },\n issue.assignees.iter().map(|a| format!(\"@{}\", a)).collect::>().join(\", \"));\n}\nif let Some(due) = &issue.due_date {\n println!(\"Due: {}\", due);\n}\nif let Some(ms) = &issue.milestone {\n println!(\"Milestone: {}\", ms);\n}\n```\n\n### 8. Update From<&IssueDetail> for IssueDetailJson (line ~799)\n```rust\nassignees: issue.assignees.clone(),\ndue_date: issue.due_date.clone(),\nmilestone: issue.milestone.clone(),\n```\n\n## Acceptance Criteria\n- [ ] `cargo test test_get_issue_assignees` passes (3 tests)\n- [ ] `lore issues ` shows Assignees line when assignees exist\n- [ ] `lore issues ` shows Due line when due_date set\n- [ ] `lore issues ` shows Milestone line when milestone set\n- [ ] `lore -J issues ` includes assignees/due_date/milestone in JSON\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n- `src/cli/commands/show.rs` - ALL changes\n\n## TDD Loop\n\n**RED** - Add tests to `src/cli/commands/show.rs` `#[cfg(test)] mod tests`:\n\n```rust\nuse crate::core::db::{create_connection, run_migrations};\nuse std::path::Path;\n\nfn setup_test_db() -> Connection {\n let conn = create_connection(Path::new(\":memory:\")).unwrap();\n run_migrations(&conn).unwrap();\n conn\n}\n\n#[test]\nfn test_get_issue_assignees_empty() {\n let conn = setup_test_db();\n // seed project + issue with no assignees\n let result = get_issue_assignees(&conn, 1).unwrap();\n assert!(result.is_empty());\n}\n\n#[test]\nfn test_get_issue_assignees_multiple_sorted() {\n let conn = setup_test_db();\n // seed with alice, bob\n let result = get_issue_assignees(&conn, 1).unwrap();\n assert_eq!(result, vec![\"alice\", \"bob\"]); // alphabetical\n}\n\n#[test]\nfn test_get_issue_assignees_single() {\n let conn = setup_test_db();\n // seed with charlie only\n let result = get_issue_assignees(&conn, 1).unwrap();\n assert_eq!(result, vec![\"charlie\"]);\n}\n```\n\n**GREEN** - Implement get_issue_assignees() and struct updates\n\n**VERIFY**: `cargo test test_get_issue_assignees && cargo clippy --all-targets -- -D warnings`\n\n## Edge Cases\n- Empty assignees list -> don't print Assignees line\n- NULL due_date -> don't print Due line \n- NULL milestone_title -> don't print Milestone line\n- Single vs multiple assignees -> \"Assignee\" vs \"Assignees\" grammar","status":"closed","priority":1,"issue_type":"feature","created_at":"2026-02-05T15:16:00.105830Z","created_by":"tayloreernisse","updated_at":"2026-02-05T15:26:08.147202Z","closed_at":"2026-02-05T15:26:08.147154Z","close_reason":"Implemented: assignees, due_date, milestone now display in lore issues . All 7 new tests pass.","compaction_level":0,"original_size":0,"labels":["ISSUE"]} {"id":"bd-208","title":"[CP1] Issue ingestion module","description":"## Background\n\nThe issue ingestion module fetches and stores issues with cursor-based incremental sync. It is the primary data ingestion component, establishing the pattern reused for MR ingestion in CP2. The module handles tuple-cursor semantics, raw payload storage, label extraction, and tracking which issues need discussion sync.\n\n## Approach\n\n### Module: src/ingestion/issues.rs\n\n### Key Structs\n\n```rust\n#[derive(Debug, Default)]\npub struct IngestIssuesResult {\n pub fetched: usize,\n pub upserted: usize,\n pub labels_created: usize,\n pub issues_needing_discussion_sync: Vec,\n}\n\n#[derive(Debug, Clone)]\npub struct IssueForDiscussionSync {\n pub local_issue_id: i64,\n pub iid: i64,\n pub updated_at: i64, // ms epoch\n}\n```\n\n### Main Function\n\n```rust\npub async fn ingest_issues(\n conn: &Connection,\n client: &GitLabClient,\n config: &Config,\n project_id: i64, // Local DB project ID\n gitlab_project_id: i64,\n) -> Result\n```\n\n### Logic (Step by Step)\n\n1. **Get current cursor** from sync_cursors table:\n```sql\nSELECT updated_at_cursor, tie_breaker_id\nFROM sync_cursors\nWHERE project_id = ? AND resource_type = 'issues'\n```\n\n2. **Call pagination method** with cursor rewind:\n```rust\nlet issues_stream = client.paginate_issues(\n gitlab_project_id,\n cursor.updated_at_cursor,\n config.sync.cursor_rewind_seconds,\n);\n```\n\n3. **Apply local filtering** for tuple cursor semantics:\n```rust\n// Skip if issue.updated_at < cursor_updated_at\n// Skip if issue.updated_at == cursor_updated_at AND issue.gitlab_id <= cursor_gitlab_id\nfn passes_cursor_filter(issue: &GitLabIssue, cursor: &SyncCursor) -> bool {\n if issue.updated_at < cursor.updated_at_cursor {\n return false;\n }\n if issue.updated_at == cursor.updated_at_cursor \n && issue.gitlab_id <= cursor.tie_breaker_id {\n return false;\n }\n true\n}\n```\n\n4. **For each issue passing filter**:\n```rust\n// Begin transaction (unchecked_transaction for rusqlite)\nlet tx = conn.unchecked_transaction()?;\n\n// Store raw payload (compressed based on config)\nlet payload_id = store_raw_payload(&tx, &issue_json, config.storage.compress_raw_payloads)?;\n\n// Transform and upsert issue\nlet issue_row = transform_issue(&issue)?;\nupsert_issue(&tx, &issue_row, project_id, payload_id)?;\nlet local_issue_id = get_local_issue_id(&tx, project_id, issue.iid)?;\n\n// Clear existing label links (stale removal!)\ntx.execute(\"DELETE FROM issue_labels WHERE issue_id = ?\", [local_issue_id])?;\n\n// Extract and upsert labels\nfor label_name in &issue_row.label_names {\n let label_id = upsert_label(&tx, project_id, label_name)?;\n link_issue_label(&tx, local_issue_id, label_id)?;\n}\n\ntx.commit()?;\n```\n\n5. **Incremental cursor update** every 100 issues:\n```rust\nif batch_count % 100 == 0 {\n update_sync_cursor(conn, project_id, \"issues\", last_updated_at, last_gitlab_id)?;\n}\n```\n\n6. **Final cursor update** after all issues processed\n\n7. **Determine issues needing discussion sync**:\n```sql\nSELECT id, iid, updated_at\nFROM issues\nWHERE project_id = ?\n AND updated_at > COALESCE(discussions_synced_for_updated_at, 0)\n```\n\n### Helper Functions\n\n```rust\nfn store_raw_payload(conn, json: &Value, compress: bool) -> Result\nfn upsert_issue(conn, issue: &IssueRow, project_id: i64, payload_id: i64) -> Result<()>\nfn get_local_issue_id(conn, project_id: i64, iid: i64) -> Result\nfn upsert_label(conn, project_id: i64, name: &str) -> Result\nfn link_issue_label(conn, issue_id: i64, label_id: i64) -> Result<()>\nfn update_sync_cursor(conn, project_id: i64, resource: &str, updated_at: i64, gitlab_id: i64) -> Result<()>\n```\n\n### Critical Invariant\n\nStale label links MUST be removed on resync. The \"DELETE then INSERT\" pattern ensures GitLab reality is reflected locally. If an issue had labels [A, B] and now has [A, C], the B link must be removed.\n\n## Acceptance Criteria\n\n- [ ] `ingest_issues` returns IngestIssuesResult with all counts\n- [ ] Cursor fetched from sync_cursors at start\n- [ ] Cursor rewind applied before API call\n- [ ] Local filtering skips already-processed issues\n- [ ] Each issue wrapped in transaction for atomicity\n- [ ] Raw payload stored with correct compression\n- [ ] Issue upserted (INSERT OR REPLACE pattern)\n- [ ] Existing label links deleted before new links inserted\n- [ ] Labels upserted (INSERT OR IGNORE by project+name)\n- [ ] Cursor updated every 100 issues (crash recovery)\n- [ ] Final cursor update after all issues\n- [ ] issues_needing_discussion_sync populated correctly\n\n## Files\n\n- src/ingestion/mod.rs (add `pub mod issues;`)\n- src/ingestion/issues.rs (create)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/issue_ingestion_tests.rs\n#[tokio::test] async fn ingests_issues_from_stream()\n#[tokio::test] async fn applies_cursor_filter_correctly()\n#[tokio::test] async fn updates_cursor_every_100_issues()\n#[tokio::test] async fn stores_raw_payload_for_each_issue()\n#[tokio::test] async fn upserts_issues_correctly()\n\n// tests/label_linkage_tests.rs\n#[tokio::test] async fn extracts_and_stores_labels()\n#[tokio::test] async fn removes_stale_label_links_on_resync()\n#[tokio::test] async fn handles_empty_labels_array()\n\n// tests/discussion_eligibility_tests.rs\n#[tokio::test] async fn identifies_issues_needing_discussion_sync()\n#[tokio::test] async fn skips_issues_with_current_watermark()\n```\n\nGREEN: Implement ingest_issues with all helper functions\n\nVERIFY: `cargo test issue_ingestion && cargo test label_linkage && cargo test discussion_eligibility`\n\n## Edge Cases\n\n- Empty issues stream - return result with all zeros\n- Cursor at epoch 0 - fetch all issues (no filtering)\n- Issue with no labels - empty Vec, no label links created\n- Issue with 50+ labels - all should be linked\n- Crash mid-batch - cursor at last 100-boundary, some issues re-fetched\n- Label already exists - upsert via INSERT OR IGNORE\n- Same issue fetched twice (due to rewind) - upsert handles it","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.245404Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:52:38.003964Z","closed_at":"2026-01-25T22:52:38.003868Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-208","depends_on_id":"bd-2iq","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-208","depends_on_id":"bd-3nd","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-208","depends_on_id":"bd-xhz","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-20e","title":"Define TimelineEvent model and TimelineEventType enum","description":"## Background\n\nThe TimelineEvent model is the foundational data type for Gate 3's timeline feature. All pipeline stages (seed, expand, collect, interleave) produce or consume TimelineEvents. This must be defined first because every downstream bead (bd-32q, bd-ypa, bd-3as, bd-dty, bd-2f2) depends on these types.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 3.3 (Event Model).\n\n## Codebase Context\n\n- Migration 011 created: resource_state_events, resource_label_events, resource_milestone_events, entity_references, pending_dependent_fetches\n- source_method CHECK constraint: `'api' | 'note_parse' | 'description_parse'` (NOT spec's 'api_closes_issues' etc.)\n- reference_type CHECK constraint: `'closes' | 'mentioned' | 'related'`\n- LATEST_SCHEMA_VERSION = 14\n\n## Approach\n\nCreate `src/core/timeline.rs` with the following types:\n\n```rust\n/// The core timeline event. All pipeline stages produce or consume these.\n/// Spec ref: Section 3.3 \"Event Model\"\n#[derive(Debug, Clone, Serialize)]\npub struct TimelineEvent {\n pub timestamp: i64, // ms epoch UTC\n pub entity_type: String, // \"issue\" | \"merge_request\"\n pub entity_id: i64, // local DB id (internal, not in JSON output)\n pub entity_iid: i64,\n pub project_path: String,\n pub event_type: TimelineEventType,\n pub summary: String, // human-readable one-liner\n pub actor: Option, // username or None for system\n pub url: Option, // web URL for the event source\n pub is_seed: bool, // true if from seed phase, false if expanded\n}\n\n/// Per spec Section 3.3. Serde tagged enum for JSON output.\n/// IMPORTANT: entity_type is String (not &'static str) because serde Serialize\n/// requires owned types for struct fields when deriving.\n#[derive(Debug, Clone, Serialize)]\n#[serde(tag = \"kind\", rename_all = \"snake_case\")]\npub enum TimelineEventType {\n Created,\n StateChanged { state: String }, // spec: just the target state\n LabelAdded { label: String },\n LabelRemoved { label: String },\n MilestoneSet { milestone: String },\n MilestoneRemoved { milestone: String },\n Merged, // spec: unit variant\n NoteEvidence {\n note_id: i64, // spec: required\n snippet: String, // first ~200 chars of matching note body\n discussion_id: Option, // spec: optional\n },\n CrossReferenced { target: String }, // compact target ref like \"\\!567\" or \"#234\"\n}\n\n/// Internal entity reference used across pipeline stages.\n#[derive(Debug, Clone, Serialize)]\npub struct EntityRef {\n pub entity_type: String, // String not &'static str — needed for Serialize\n pub entity_id: i64,\n pub entity_iid: i64,\n pub project_path: String,\n}\n\n/// An entity discovered via BFS expansion.\n/// Spec ref: Section 3.5 \"expanded_entities\" JSON structure.\n#[derive(Debug, Clone, Serialize)]\npub struct ExpandedEntityRef {\n pub entity_ref: EntityRef,\n pub depth: u32,\n pub via_from: EntityRef, // the entity that referenced this one\n pub via_reference_type: String, // \"closes\", \"mentioned\", \"related\"\n pub via_source_method: String, // \"api\", \"note_parse\", \"description_parse\"\n}\n\n/// Reference to an unsynced external entity.\n/// Spec ref: Section 3.5 \"unresolved_references\" JSON structure.\n#[derive(Debug, Clone, Serialize)]\npub struct UnresolvedRef {\n pub source: EntityRef,\n pub target_project: Option,\n pub target_type: String,\n pub target_iid: i64,\n pub reference_type: String,\n}\n\n/// Complete result from the timeline pipeline.\n#[derive(Debug, Clone, Serialize)]\npub struct TimelineResult {\n pub query: String,\n pub events: Vec,\n pub seed_entities: Vec,\n pub expanded_entities: Vec,\n pub unresolved_references: Vec,\n}\n```\n\nImplement `Ord` on `TimelineEvent` for chronological sort: primary key `timestamp`, tiebreak by `entity_id` then event_type discriminant.\n\nAlso implement `PartialEq`, `Eq`, `PartialOrd` (required by Ord).\n\nRegister in `src/core/mod.rs`: `pub mod timeline;`\n\n## Acceptance Criteria\n\n- [ ] `src/core/timeline.rs` compiles with no warnings\n- [ ] All struct fields use `String` not `&'static str` (required for `#[derive(Serialize)]`)\n- [ ] `TimelineEventType` has exactly 9 variants matching spec Section 3.3\n- [ ] `NoteEvidence` has `note_id: i64`, `snippet: String`, `discussion_id: Option`\n- [ ] `ExpandedEntityRef.via_source_method` documents codebase values: api, note_parse, description_parse\n- [ ] `Ord` impl sorts by (timestamp, entity_id, event_type discriminant)\n- [ ] `PartialEq`, `Eq`, `PartialOrd` derived or implemented\n- [ ] Module registered in `src/core/mod.rs`\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/core/timeline.rs` (NEW)\n- `src/core/mod.rs` (add `pub mod timeline;`)\n\n## TDD Loop\n\nRED: Create `src/core/timeline.rs` with `#[cfg(test)] mod tests`:\n- `test_timeline_event_sort_by_timestamp` - events sort chronologically\n- `test_timeline_event_sort_tiebreak` - same-timestamp events sort stably\n- `test_timeline_event_type_serializes_tagged` - serde JSON uses `kind` tag\n- `test_note_evidence_has_note_id` - note_id present in serialized output\n\nGREEN: Implement the types and Ord trait.\n\nVERIFY: `cargo test --lib -- timeline`\n\n## Edge Cases\n\n- Ord must be consistent and total for all valid TimelineEvent pairs\n- NoteEvidence snippet truncated to 200 chars at construction, not in the type\n- entity_type uses String to satisfy serde Serialize derive requirements\n- url field: constructed from project_path + entity_type + iid; None for entities without web_url","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:33:08.569126Z","created_by":"tayloreernisse","updated_at":"2026-02-05T21:43:02.449502Z","closed_at":"2026-02-05T21:43:02.449454Z","close_reason":"Completed: Created src/core/timeline.rs with TimelineEvent, TimelineEventType (9 variants), EntityRef, ExpandedEntityRef, UnresolvedRef, TimelineResult. Ord impl sorts by (timestamp, entity_id, event_type discriminant). entity_id skipped in serde output. 6 tests pass. All quality gates pass.","compaction_level":0,"original_size":0,"labels":["gate-3","phase-b","types"],"dependencies":[{"issue_id":"bd-20e","depends_on_id":"bd-ike","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -118,7 +118,7 @@ {"id":"bd-26lp","title":"Implement CLI integration (lore tui command + binary delegation)","description":"## Background\nThe lore CLI binary needs a tui subcommand that launches the lore-tui binary. This is runtime binary delegation — lore finds lore-tui via PATH lookup and execs it, passing through relevant flags. Zero compile-time dependency from lore to lore-tui. The TUI is the human interface; the CLI is the robot/script interface.\n\n## Approach\nAdd a tui subcommand to the lore CLI:\n\n**CLI side** (`src/cli/tui.rs`):\n- Add `Tui` variant to the main CLI enum with flags: --config, --sync, --fresh, --render-mode, --ascii, --no-alt-screen\n- Implementation: resolve lore-tui binary via PATH lookup (std::process::Command with \"lore-tui\")\n- Pass through all flags as CLI arguments\n- If lore-tui not found in PATH, print helpful error: \"lore-tui binary not found. Install with: cargo install --path crates/lore-tui\"\n- Exec (not spawn+wait) using std::os::unix::process::CommandExt::exec() for clean process replacement on Unix\n\n**Binary naming**: The binary is `lore-tui` (hyphenated), matching the crate name.\n\n## Acceptance Criteria\n- [ ] lore tui launches lore-tui binary from PATH\n- [ ] All flags (--config, --sync, --fresh, --render-mode, --ascii, --no-alt-screen) are passed through\n- [ ] Missing binary produces helpful error with install instructions\n- [ ] Uses exec() on Unix for clean process replacement (no zombie parent)\n- [ ] Robot mode: lore --robot tui returns JSON error if binary not found\n- [ ] lore tui --help shows TUI-specific flags\n\n## Files\n- CREATE: src/cli/tui.rs\n- MODIFY: src/cli/mod.rs (add tui subcommand to CLI enum)\n- MODIFY: src/main.rs (add match arm for Tui variant)\n\n## TDD Anchor\nRED: Write `test_tui_binary_not_found_error` that asserts the error message includes install instructions when lore-tui is not in PATH.\nGREEN: Implement the binary lookup and error handling.\nVERIFY: cargo test tui_binary -- --nocapture\n\nAdditional tests:\n- test_tui_flag_passthrough (verify all flags are forwarded)\n- test_tui_robot_mode_json_error (structured error when binary missing)\n\n## Edge Cases\n- lore-tui binary exists but is not executable — should produce clear error\n- PATH contains multiple lore-tui versions — uses first match (standard PATH behavior)\n- Windows: exec() not available — fall back to spawn+wait+exit with same code\n- User runs lore tui in robot mode — should fail with structured JSON error (TUI is human-only)\n\n## Dependency Context\nDepends on bd-2iqk (Doctor + Stats screens) for phase ordering. The CLI integration is one of the last Phase 4 tasks because it requires lore-tui to be substantially complete for the delegation to be useful.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:39.602970Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.449333Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-26lp","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-26lp","depends_on_id":"bd-2iqk","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2711","title":"WHO: Reviews mode query (query_reviews)","description":"## Background\n\nReviews mode answers \"What review patterns does person X have?\" by analyzing the **prefix** convention in DiffNote bodies (e.g., **suggestion**: ..., **question**: ..., **nit**: ...). Only counts DiffNotes on MRs the user did NOT author (m.author_username != ?1).\n\n## Approach\n\n### Three queries:\n1. **Total DiffNotes**: COUNT(*) of DiffNotes by user on others' MRs\n2. **Distinct MRs reviewed**: COUNT(DISTINCT m.id) \n3. **Category extraction**: SQL-level prefix parsing + Rust normalization\n\n### Category extraction SQL:\n```sql\nSELECT\n SUBSTR(ltrim(n.body), 3, INSTR(SUBSTR(ltrim(n.body), 3), '**') - 1) AS raw_prefix,\n COUNT(*) AS cnt\nFROM notes n\nJOIN discussions d ON n.discussion_id = d.id\nJOIN merge_requests m ON d.merge_request_id = m.id\nWHERE n.author_username = ?1\n AND n.note_type = 'DiffNote' AND n.is_system = 0\n AND m.author_username != ?1\n AND ltrim(n.body) LIKE '**%**%' -- only bodies with **prefix** pattern\n AND n.created_at >= ?2\n AND (?3 IS NULL OR n.project_id = ?3)\nGROUP BY raw_prefix ORDER BY cnt DESC\n```\n\nKey: `ltrim(n.body)` tolerates leading whitespace before **prefix** (common in practice).\n\n### normalize_review_prefix() in Rust:\n```rust\nfn normalize_review_prefix(raw: &str) -> String {\n let s = raw.trim().trim_end_matches(':').trim().to_lowercase();\n // Strip parentheticals like \"(non-blocking)\"\n let s = if let Some(idx) = s.find('(') { s[..idx].trim().to_string() } else { s };\n // Merge nit/nitpick variants\n match s.as_str() {\n \"nitpick\" | \"nit\" => \"nit\".to_string(),\n other => other.to_string(),\n }\n}\n```\n\n### HashMap merge for normalized categories, then sort by count DESC\n\n### ReviewsResult struct:\n```rust\npub struct ReviewsResult {\n pub username: String,\n pub total_diffnotes: u32,\n pub categorized_count: u32,\n pub mrs_reviewed: u32,\n pub categories: Vec,\n}\npub struct ReviewCategory { pub name: String, pub count: u32, pub percentage: f64 }\n```\n\nNo LIMIT needed — categories are naturally bounded (few distinct prefixes).\n\n## Files\n\n- `src/cli/commands/who.rs`\n\n## TDD Loop\n\nRED:\n```\ntest_reviews_query — insert 3 DiffNotes (2 with **prefix**, 1 without); verify total=3, categorized=2, categories.len()=2\ntest_normalize_review_prefix — \"suggestion\" \"Suggestion:\" \"suggestion (non-blocking):\" \"Nitpick:\" \"nit (non-blocking):\" \"question\" \"TODO:\"\n```\n\nGREEN: Implement query_reviews + normalize_review_prefix\nVERIFY: `cargo test -- reviews`\n\n## Acceptance Criteria\n\n- [ ] test_reviews_query passes (total=3, categorized=2)\n- [ ] test_normalize_review_prefix passes (nit/nitpick merge, parenthetical strip)\n- [ ] Only counts DiffNotes on MRs user did NOT author\n- [ ] Default since window: 6m\n\n## Edge Cases\n\n- Self-authored MRs excluded (m.author_username != ?1) — user's notes on own MRs are not \"reviews\"\n- ltrim() handles leading whitespace before **prefix**\n- Empty raw_prefix after normalization filtered out (!normalized.is_empty())\n- Percentage calculated from categorized_count (not total_diffnotes)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:40:53.350210Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.599252Z","closed_at":"2026-02-08T04:10:29.599217Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2711","depends_on_id":"bd-2ldg","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2711","depends_on_id":"bd-34rr","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-296a","title":"NOTE-1E: Composite query index and author_id column (migration 022)","description":"## Background\nThe notes table needs composite covering indexes for the new query_notes() function, plus the author_id column for immutable identity (NOTE-0D). Combined in a single migration to avoid an extra migration step. Migration slot 022 is available (021 = work_item_status, 023 = issue_detail_fields already exists).\n\n## Approach\nCreate migrations/022_notes_query_index.sql with:\n\n1. Composite index for author-scoped queries (most common pattern):\n CREATE INDEX IF NOT EXISTS idx_notes_user_created\n ON notes(project_id, author_username COLLATE NOCASE, created_at DESC, id DESC)\n WHERE is_system = 0;\n\n2. Composite index for project-scoped date-range queries:\n CREATE INDEX IF NOT EXISTS idx_notes_project_created\n ON notes(project_id, created_at DESC, id DESC)\n WHERE is_system = 0;\n\n3. Discussion JOIN indexes (check if they already exist first):\n CREATE INDEX IF NOT EXISTS idx_discussions_issue_id ON discussions(issue_id);\n CREATE INDEX IF NOT EXISTS idx_discussions_mr_id ON discussions(merge_request_id);\n\n4. Immutable author identity column (for NOTE-0D):\n ALTER TABLE notes ADD COLUMN author_id INTEGER;\n CREATE INDEX IF NOT EXISTS idx_notes_author_id ON notes(author_id) WHERE author_id IS NOT NULL;\n\nRegister in src/core/db.rs MIGRATIONS array as (\"022\", include_str!(\"../../migrations/022_notes_query_index.sql\")). Insert BEFORE the existing (\"023\", ...) entry. LATEST_SCHEMA_VERSION auto-increments via MIGRATIONS.len().\n\n## Files\n- CREATE: migrations/022_notes_query_index.sql\n- MODIFY: src/core/db.rs (add (\"022\", include_str!(...)) to MIGRATIONS array, insert at position before \"023\" entry around line 73)\n\n## TDD Anchor\nRED: test_migration_022_indexes_exist — run_migrations on in-memory DB, verify 4 new indexes exist in sqlite_master.\nGREEN: Create migration file with all CREATE INDEX statements.\nVERIFY: cargo test migration_022 -- --nocapture\n\n## Acceptance Criteria\n- [ ] Migration 022 creates idx_notes_user_created partial index\n- [ ] Migration 022 creates idx_notes_project_created partial index\n- [ ] Migration 022 creates idx_discussions_issue_id (or is no-op if exists)\n- [ ] Migration 022 creates idx_discussions_mr_id (or is no-op if exists)\n- [ ] Migration 022 adds author_id INTEGER column to notes\n- [ ] Migration 022 creates idx_notes_author_id partial index\n- [ ] MIGRATIONS array in db.rs includes (\"022\", ...) before (\"023\", ...)\n- [ ] Existing tests still pass with new migration\n- [ ] Test verifying all indexes exist passes\n\n## Edge Cases\n- Partial indexes exclude system notes (is_system = 0) — filters 30-50% of notes\n- COLLATE NOCASE on author_username matches the query's case-insensitive comparison\n- author_id is nullable (existing notes won't have it until re-synced)\n- IF NOT EXISTS on all CREATE INDEX statements makes migration idempotent","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:18.127989Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.435624Z","closed_at":"2026-02-12T18:13:15.435576Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"],"dependencies":[{"issue_id":"bd-296a","depends_on_id":"bd-jbfw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-29qw","title":"Implement Timeline screen (state + action + view)","description":"## Background\nThe Timeline screen renders a chronological event stream from the 5-stage timeline pipeline (SEED -> HYDRATE -> EXPAND -> COLLECT -> RENDER). Events are color-coded by type and can be scoped to an entity, author, or time range.\n\n## Approach\nState (state/timeline.rs):\n- TimelineState: events (Vec), query (String), query_input (TextInput), query_focused (bool), selected_index (usize), scroll_offset (usize), scope (TimelineScope)\n- TimelineScope: All, Entity(EntityKey), Author(String), DateRange(DateTime, DateTime)\n\nAction (action.rs):\n- fetch_timeline(conn, scope, limit, clock) -> Vec: runs the timeline pipeline against DB\n\nView (view/timeline.rs):\n- Vertical event stream with timestamp gutter on the left\n- Color-coded event types: Created(green), Updated(yellow), Closed(red), Merged(purple), Commented(blue), Labeled(cyan), Milestoned(orange)\n- Each event: timestamp | entity ref | event description\n- Entity refs navigable via Enter\n- Query bar for filtering by text or entity\n- Keyboard: j/k scroll, Enter navigate to entity, / focus query, g+g top\n\n## Acceptance Criteria\n- [ ] Timeline renders chronological event stream\n- [ ] Events color-coded by type\n- [ ] Entity references navigable\n- [ ] Scope filters: all, per-entity, per-author, date range\n- [ ] Query bar filters events\n- [ ] Keyboard navigation works (j/k/Enter/Esc)\n- [ ] Timestamps use injected Clock\n\n## Files\n- MODIFY: crates/lore-tui/src/state/timeline.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_timeline)\n- CREATE: crates/lore-tui/src/view/timeline.rs\n\n## TDD Anchor\nRED: Write test_fetch_timeline_scoped that creates issues with events, calls fetch_timeline with Entity scope, asserts only that entity's events returned.\nGREEN: Implement fetch_timeline with scope filtering.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_timeline\n\n## Edge Cases\n- Timeline pipeline may not be fully implemented in core yet — degrade gracefully if SEED/HYDRATE/EXPAND stages are not available, fall back to raw events\n- Very long timelines: VirtualizedList or lazy loading for performance\n- Events with identical timestamps: stable sort by entity type, then iid\n\n## Dependency Context\nUses timeline pipeline types from src/core/timeline.rs if available.\nUses Clock for timestamp rendering from \"Implement Clock trait\" task.\nUses EntityKey navigation from \"Implement core types\" task.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:05.605968Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:33.993830Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-29qw","depends_on_id":"bd-1zow","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-29qw","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-29qw","title":"Implement Timeline screen (state + action + view)","description":"## Background\nThe Timeline screen renders a chronological event stream from the 5-stage timeline pipeline (SEED -> HYDRATE -> EXPAND -> COLLECT -> RENDER). Events are color-coded by type and can be scoped to an entity, author, or time range.\n\n## Approach\nState (state/timeline.rs):\n- TimelineState: events (Vec), query (String), query_input (TextInput), query_focused (bool), selected_index (usize), scroll_offset (usize), scope (TimelineScope)\n- TimelineScope: All, Entity(EntityKey), Author(String), DateRange(DateTime, DateTime)\n\nAction (action.rs):\n- fetch_timeline(conn, scope, limit, clock) -> Vec: runs the timeline pipeline against DB\n\nView (view/timeline.rs):\n- Vertical event stream with timestamp gutter on the left\n- Color-coded event types: Created(green), Updated(yellow), Closed(red), Merged(purple), Commented(blue), Labeled(cyan), Milestoned(orange)\n- Each event: timestamp | entity ref | event description\n- Entity refs navigable via Enter\n- Query bar for filtering by text or entity\n- Keyboard: j/k scroll, Enter navigate to entity, / focus query, g+g top\n\n## Acceptance Criteria\n- [ ] Timeline renders chronological event stream\n- [ ] Events color-coded by type\n- [ ] Entity references navigable\n- [ ] Scope filters: all, per-entity, per-author, date range\n- [ ] Query bar filters events\n- [ ] Keyboard navigation works (j/k/Enter/Esc)\n- [ ] Timestamps use injected Clock\n\n## Files\n- MODIFY: crates/lore-tui/src/state/timeline.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_timeline)\n- CREATE: crates/lore-tui/src/view/timeline.rs\n\n## TDD Anchor\nRED: Write test_fetch_timeline_scoped that creates issues with events, calls fetch_timeline with Entity scope, asserts only that entity's events returned.\nGREEN: Implement fetch_timeline with scope filtering.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_timeline\n\n## Edge Cases\n- Timeline pipeline may not be fully implemented in core yet — degrade gracefully if SEED/HYDRATE/EXPAND stages are not available, fall back to raw events\n- Very long timelines: VirtualizedList or lazy loading for performance\n- Events with identical timestamps: stable sort by entity type, then iid\n\n## Dependency Context\nUses timeline pipeline types from src/core/timeline.rs if available.\nUses Clock for timestamp rendering from \"Implement Clock trait\" task.\nUses EntityKey navigation from \"Implement core types\" task.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:05.605968Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:46:49.941242Z","closed_at":"2026-02-18T21:46:49.941051Z","close_reason":"Timeline screen complete: TimelineState (scope/generation/selection/scroll), action functions (4 event source collectors querying resource event tables), view renderer (color-coded event stream with scrolling), all wired into screen dispatch. 518 tests pass, clippy clean, fmt clean.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-29qw","depends_on_id":"bd-1zow","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-29wn","title":"Split app.rs into app/ module (model + key dispatch)","description":"app.rs is 712 lines and will grow as screens are added. Split into crates/lore-tui/src/app/mod.rs (LoreApp struct, new(), init()), app/update.rs (update() method, key dispatch, message handling), app/view.rs (view() delegation). Key dispatch is the largest section and the primary growth point. Keep public API identical via re-exports.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T21:24:16.854321Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:53:10.448834Z","closed_at":"2026-02-18T18:53:10.448649Z","close_reason":"Split app.rs into app/mod.rs, app/update.rs, app/tests.rs. All 177 tests pass.","compaction_level":0,"original_size":0,"labels":["TUI"]} {"id":"bd-2ac","title":"Create migration 009_embeddings.sql","description":"## Background\nMigration 009 creates the embedding storage layer for Gate B. It introduces a sqlite-vec vec0 virtual table for vector search and an embedding_metadata table for tracking provenance per chunk. Unlike migrations 007-008, this migration REQUIRES sqlite-vec to be loaded before it can be applied. The migration runner in db.rs must load the sqlite-vec extension first.\n\n## Approach\nCreate `migrations/009_embeddings.sql` per PRD Section 1.3.\n\n**Tables:**\n1. `embeddings` — vec0 virtual table with `embedding float[768]`\n2. `embedding_metadata` — tracks per-chunk provenance with composite PK (document_id, chunk_index)\n3. Orphan cleanup trigger: `documents_embeddings_ad` — deletes ALL chunk embeddings when a document is deleted using range deletion `[doc_id * 1000, (doc_id + 1) * 1000)`\n\n**Critical: sqlite-vec loading:**\nThe migration runner in `src/core/db.rs` must load sqlite-vec BEFORE applying any migrations. This means adding extension loading to the `create_connection()` or `run_migrations()` function. sqlite-vec is loaded via:\n```rust\nconn.load_extension_enable()?;\nconn.load_extension(\"vec0\", None)?; // or platform-specific path\nconn.load_extension_disable()?;\n```\n\nRegister migration 9 in `src/core/db.rs` MIGRATIONS array.\n\n## Acceptance Criteria\n- [ ] `migrations/009_embeddings.sql` file exists\n- [ ] `embeddings` vec0 virtual table created with `embedding float[768]`\n- [ ] `embedding_metadata` table has composite PK (document_id, chunk_index)\n- [ ] `embedding_metadata.document_id` has FK to documents(id) ON DELETE CASCADE\n- [ ] Error tracking fields: last_error, attempt_count, last_attempt_at\n- [ ] Orphan cleanup trigger: deletes embeddings WHERE rowid in [doc_id*1000, (doc_id+1)*1000)\n- [ ] Index on embedding_metadata(last_error) WHERE last_error IS NOT NULL\n- [ ] Index on embedding_metadata(document_id)\n- [ ] Schema version 9 recorded\n- [ ] Migration runner loads sqlite-vec before applying migrations\n- [ ] `cargo build` succeeds\n\n## Files\n- `migrations/009_embeddings.sql` — new file (copy exact SQL from PRD Section 1.3)\n- `src/core/db.rs` — add migration 9 to MIGRATIONS array; add sqlite-vec extension loading\n\n## TDD Loop\nRED: Register migration in db.rs, `cargo test migration_tests` fails\nGREEN: Create SQL file + add extension loading\nVERIFY: `cargo test migration_tests && cargo build`\n\n## Edge Cases\n- sqlite-vec not installed: migration fails with clear error (not a silent skip)\n- Migration applied without sqlite-vec loaded: `CREATE VIRTUAL TABLE` fails with \"no such module: vec0\"\n- Documents deleted before embeddings: trigger fires but vec0 DELETE on empty range is safe\n- vec0 doesn't support FK cascades: that's why we need the explicit trigger","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:33.958178Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:22:26.478290Z","closed_at":"2026-01-30T17:22:26.478229Z","close_reason":"Completed: migration 009_embeddings.sql with vec0 table, embedding_metadata with composite PK, orphan cleanup trigger, registered in db.rs","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2ac","depends_on_id":"bd-221","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2am8","title":"OBSERV: Enhance sync-status to show recent runs with metrics","description":"## Background\nsync_status currently queries sync_runs but always gets zero rows (nothing writes to the table). After bd-23a4 wires up SyncRunRecorder, rows will exist. This bead enhances the display to show recent runs with metrics.\n\n## Approach\n### src/cli/commands/sync_status.rs\n\n1. Change get_last_sync_run() (line ~66) to get_recent_sync_runs() returning last N:\n```rust\nfn get_recent_sync_runs(conn: &Connection, limit: usize) -> Result> {\n let mut stmt = conn.prepare(\n \"SELECT id, started_at, finished_at, status, command, error,\n run_id, total_items_processed, total_errors, metrics_json\n FROM sync_runs\n ORDER BY started_at DESC\n LIMIT ?1\",\n )?;\n // ... map rows to SyncRunInfo\n}\n```\n\n2. Extend SyncRunInfo to include new fields:\n```rust\npub struct SyncRunInfo {\n pub id: i64,\n pub started_at: i64,\n pub finished_at: Option,\n pub status: String,\n pub command: String,\n pub error: Option,\n pub run_id: Option, // NEW\n pub total_items_processed: i64, // NEW\n pub total_errors: i64, // NEW\n pub stages: Option>, // NEW: parsed from metrics_json\n}\n```\n\n3. Parse metrics_json into Vec:\n```rust\nlet stages: Option> = row.get::<_, Option>(9)?\n .and_then(|json| serde_json::from_str(&json).ok());\n```\n\n4. Interactive output (new format):\n```\nRecent sync runs:\n Run a1b2c3 | 2026-02-04 14:32 | 45.2s | 235 items | 1 error\n Run d4e5f6 | 2026-02-03 14:30 | 38.1s | 220 items | 0 errors\n Run g7h8i9 | 2026-02-02 14:29 | 42.7s | 228 items | 0 errors\n```\n\n5. Robot JSON output: runs array with stages parsed from metrics_json:\n```json\n{\n \"ok\": true,\n \"data\": {\n \"runs\": [{ \"run_id\": \"...\", \"stages\": [...] }],\n \"cursors\": [...],\n \"summary\": {...}\n }\n}\n```\n\n6. Add --run flag to sync-status subcommand for single-run detail view (shows full stage breakdown).\n\n## Acceptance Criteria\n- [ ] lore sync-status shows last 10 runs (not just 1) with run_id, duration, items, errors\n- [ ] lore --robot sync-status JSON includes runs array with stages parsed from metrics_json\n- [ ] lore sync-status --run a1b2c3 shows single run detail with full stage breakdown\n- [ ] When no runs exist, shows appropriate \"No sync runs recorded\" message\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/cli/commands/sync_status.rs (rewrite query, extend structs, update display)\n\n## TDD Loop\nRED:\n - test_sync_status_shows_runs: insert 3 sync_runs rows, call print function, assert all 3 shown\n - test_sync_status_json_includes_stages: insert row with metrics_json, verify robot JSON has stages\n - test_sync_status_empty: no rows, verify graceful message\nGREEN: Rewrite get_last_sync_run -> get_recent_sync_runs, extend SyncRunInfo, update output\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- metrics_json is NULL (old rows or failed runs): stages field is null/empty in output\n- metrics_json is malformed: serde_json::from_str fails silently (.ok()), stages is None\n- Duration calculation: finished_at - started_at in ms. If finished_at is NULL (running), show \"in progress\"","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:54:51.467705Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:43:07.306504Z","closed_at":"2026-02-04T17:43:07.306425Z","close_reason":"Enhanced sync-status: shows last 10 runs with run_id, duration, items, errors, parsed stages; JSON includes full stages array","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-2am8","depends_on_id":"bd-23a4","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2am8","depends_on_id":"bd-3pz","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -160,7 +160,7 @@ {"id":"bd-2no","title":"Write integration tests","description":"## Background\nIntegration tests verify that modules work together with a real SQLite database. They test FTS search (stemming, empty results), embedding storage (sqlite-vec ops), hybrid search (combined retrieval), and sync orchestration (full pipeline). Each test creates a fresh in-memory DB with migrations applied.\n\n## Approach\nCreate integration test files in `tests/`:\n\n**1. tests/fts_search.rs:**\n- Create DB, apply migrations 001-008\n- Insert test documents via SQL\n- Verify FTS5 triggers fired (documents_fts has matching count)\n- Search with various queries: stemming, prefix, empty, special chars\n- Verify result ranking (BM25 ordering)\n- Verify snippet generation\n\n**2. tests/embedding.rs:**\n- Create DB, apply migrations 001-009 (requires sqlite-vec)\n- Insert test documents + embeddings with known vectors\n- Verify KNN search returns nearest neighbors\n- Verify chunk deduplication\n- Verify orphan cleanup trigger (delete document -> embeddings gone)\n\n**3. tests/hybrid_search.rs:**\n- Create DB, apply all migrations\n- Insert documents + embeddings\n- Test all three modes: lexical, semantic, hybrid\n- Verify RRF ranking produces expected order\n- Test graceful degradation (no embeddings -> FTS fallback)\n- Test adaptive recall with filters\n\n**4. tests/sync.rs:**\n- Test sync orchestration with mock/stub GitLab responses\n- Verify pipeline stages execute in order\n- Verify lock acquisition/release\n- Verify --no-embed and --no-docs flags\n\n**Test fixtures:**\n- Deterministic embedding vectors (no Ollama required): e.g., [1.0, 0.0, 0.0, ...] for doc1, [0.0, 1.0, 0.0, ...] for doc2\n- Known documents with predictable search results\n- Fixed timestamps for reproducibility\n\n## Acceptance Criteria\n- [ ] FTS search tests pass (stemming, prefix, empty, special chars)\n- [ ] Embedding tests pass (KNN, dedup, orphan cleanup)\n- [ ] Hybrid search tests pass (all 3 modes, graceful degradation)\n- [ ] Sync tests pass (pipeline orchestration)\n- [ ] All tests use in-memory DB (no file I/O)\n- [ ] No external dependencies (no Ollama, no GitLab) — use fixtures/stubs\n- [ ] `cargo test --test fts_search --test embedding --test hybrid_search --test sync` passes\n\n## Files\n- `tests/fts_search.rs` — new file\n- `tests/embedding.rs` — new file\n- `tests/hybrid_search.rs` — new file\n- `tests/sync.rs` — new file\n- `tests/fixtures/` — optional: test helper functions (shared DB setup)\n\n## TDD Loop\nThese ARE integration tests — they verify the combined behavior of multiple beads.\nVERIFY: `cargo test --test fts_search && cargo test --test embedding && cargo test --test hybrid_search && cargo test --test sync`\n\n## Edge Cases\n- sqlite-vec not available: embedding tests should skip gracefully (or require feature flag)\n- In-memory DB with WAL mode: may behave differently than file DB — test both if critical\n- Concurrent test execution: each test creates its own DB (no shared state)","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-30T15:27:21.751019Z","created_by":"tayloreernisse","updated_at":"2026-01-30T18:11:12.432092Z","closed_at":"2026-01-30T18:11:12.432036Z","close_reason":"Integration tests: 10 FTS search tests (stemming, empty, special chars, ordering, triggers, null title), 5 embedding tests (KNN, limit, dedup, orphan trigger, empty DB), 6 hybrid search tests (lexical mode, FTS-only, graceful degradation, RRF ranking, filters, mode variants). 310 total tests pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2no","depends_on_id":"bd-1x6","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2no","depends_on_id":"bd-3eu","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2no","depends_on_id":"bd-3lu","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2no","depends_on_id":"bd-am7","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2nx","title":"OBSERV Epic: Phase 1 - Verbosity Flags + Structured File Logging","description":"Foundation layer for observability. Add -v/-vv/-vvv CLI flags, dual-layer tracing subscriber (stderr + file), daily log rotation via tracing-appender, log retention cleanup, --log-format json flag, and LoggingConfig.\n\nDepends on: nothing (first phase)\nUnblocks: Phase 2, and transitively all other phases\n\nFiles: Cargo.toml, src/cli/mod.rs, src/main.rs, src/core/config.rs, src/core/paths.rs, src/cli/commands/doctor.rs\n\nAcceptance criteria (PRD Section 6.1):\n- JSON log files written to ~/.local/share/lore/logs/ with zero config\n- -v/-vv/-vvv control stderr verbosity per table in PRD 4.3\n- RUST_LOG overrides -v for both layers\n- --log-format json emits JSON on stderr\n- Daily rotation, retention cleanup on startup\n- --quiet suppresses stderr, does NOT affect file layer\n- lore doctor reports log directory info","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-02-04T15:53:00.987774Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:15:09.465732Z","closed_at":"2026-02-04T17:15:09.465684Z","close_reason":"Phase 1 complete: dual-layer subscriber, -v/--verbose flags, --log-format json, LoggingConfig, get_log_dir(), log retention, doctor diagnostics","compaction_level":0,"original_size":0,"labels":["observability"]} {"id":"bd-2o49","title":"Epic: TUI Phase 5.6 — CLI/TUI Parity Pack","description":"## Background\nPhase 5.6 ensures the TUI displays the same data as the CLI robot mode, preventing drift between interfaces. Tests compare TUI query results against CLI --robot output for counts, list data, detail data, and search results.\n\n## Acceptance Criteria\n- [ ] Dashboard counts match lore --robot count output\n- [ ] Issue/MR list data matches lore --robot issues/mrs output\n- [ ] Issue/MR detail data matches lore --robot issues/mrs output\n- [ ] Search results identity (same IDs, same order) matches lore --robot search output\n- [ ] Terminal safety sanitization applied consistently in TUI and CLI","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:05:36.087371Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.586917Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2o49","depends_on_id":"bd-1b6k","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-2og9","title":"Implement entity cache + render cache","description":"## Background\nEntity cache provides near-instant detail view reopens during Enter/Esc drill workflows by caching IssueDetail/MrDetail payloads. Render cache prevents per-frame recomputation of expensive render artifacts (markdown to styled text, discussion tree shaping). Both use bounded LRU eviction with selective invalidation.\n\n## Approach\n\n### Entity Cache (entity_cache.rs)\n\n```rust\nuse std::collections::HashMap;\n\npub struct EntityCache {\n entries: HashMap, // value + last-access tick\n capacity: usize,\n tick: u64,\n}\n\nimpl EntityCache {\n pub fn new(capacity: usize) -> Self;\n pub fn get(&mut self, key: &EntityKey) -> Option<&V>; // updates tick\n pub fn put(&mut self, key: EntityKey, value: V); // evicts oldest if at capacity\n pub fn invalidate(&mut self, keys: &[EntityKey]); // selective by key set\n}\n```\n\n- `EntityKey` is `(EntityType, i64)` from core types (bd-c9gk) — e.g., `(EntityType::Issue, 42)`\n- Default capacity: 64 entries (sufficient for typical drill-in/out workflows)\n- LRU eviction: on `put()` when at capacity, find entry with lowest tick and remove it\n- `get()` bumps the access tick to keep recently-accessed entries alive\n- `invalidate()` takes a slice of changed keys (from sync results) and removes only those entries — NOT a blanket clear\n\n### Render Cache (render_cache.rs)\n\n```rust\npub struct RenderCacheKey {\n content_hash: u64, // FxHash of source content\n terminal_width: u16, // width affects line wrapping\n}\n\npub struct RenderCache {\n entries: HashMap,\n capacity: usize,\n}\n\nimpl RenderCache {\n pub fn new(capacity: usize) -> Self;\n pub fn get(&self, key: &RenderCacheKey) -> Option<&V>;\n pub fn put(&mut self, key: RenderCacheKey, value: V);\n pub fn invalidate_width(&mut self, keep_width: u16); // remove entries NOT matching this width\n pub fn invalidate_all(&mut self); // theme change = full clear\n}\n```\n\n- Default capacity: 256 entries\n- Used for: markdown->styled text, discussion tree layout, issue body rendering\n- `content_hash` uses `std::hash::Hasher` with FxHash (or std DefaultHasher) on source text\n- `invalidate_width(keep_width)`: on terminal resize, remove entries cached at old width\n- `invalidate_all()`: on theme change, clear everything (colors changed)\n- Both caches are NOT thread-safe (single-threaded TUI event loop). No Arc/Mutex needed.\n\n### Integration Point\nBoth caches live as fields on the main LoreApp struct. Cache miss falls through to normal DB query transparently — the action functions check cache first, query DB on miss, populate cache on return.\n\n## Acceptance Criteria\n- [ ] EntityCache::get returns Some for recently put items\n- [ ] EntityCache::put evicts the least-recently-accessed entry when at capacity\n- [ ] EntityCache::invalidate removes only the specified keys, leaves others intact\n- [ ] EntityCache capacity defaults to 64\n- [ ] RenderCache::get returns Some for matching (hash, width) pair\n- [ ] RenderCache::invalidate_width removes entries with non-matching width\n- [ ] RenderCache::invalidate_all clears everything\n- [ ] RenderCache capacity defaults to 256\n- [ ] Both caches are Send (no Rc, no raw pointers) but NOT required to be Sync\n- [ ] No unsafe code\n\n## Files\n- CREATE: crates/lore-tui/src/entity_cache.rs\n- CREATE: crates/lore-tui/src/render_cache.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add `pub mod entity_cache; pub mod render_cache;`)\n\n## TDD Anchor\nRED: Write `test_entity_cache_lru_eviction` that creates EntityCache with capacity 3, puts 4 items, asserts first item (lowest tick) is evicted and the other 3 remain.\nGREEN: Implement LRU eviction using tick-based tracking.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml entity_cache\n\nAdditional tests:\n- test_entity_cache_get_bumps_tick (accessed item survives eviction over older untouched items)\n- test_entity_cache_invalidate_selective (removes only specified keys)\n- test_entity_cache_invalidate_nonexistent_key (no panic)\n- test_render_cache_width_invalidation (entries at old width removed, current width kept)\n- test_render_cache_invalidate_all (empty after call)\n- test_render_cache_capacity_eviction\n\n## Edge Cases\n- Invalidating an EntityKey not in the cache is a no-op (no panic)\n- Zero-capacity cache: all gets return None, all puts are no-ops (degenerate but safe)\n- RenderCacheKey equality: two different strings can have the same hash (collision) — accept this; worst case is a wrong cached render that gets corrected on next invalidation\n- Entity cache should NOT be prewarmed synchronously during sync — sync results just invalidate stale entries, and the next view() call repopulates on demand\n\n## Dependency Context\nDepends on bd-c9gk (core types) for EntityKey type definition.\nBoth caches are integrated into LoreApp (bd-6pmy) as struct fields.\nAction functions (from Phase 2/3 screen beads) check cache before querying DB.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:03:25.520201Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.626204Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2og9","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2og9","depends_on_id":"bd-c9gk","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-2og9","title":"Implement entity cache + render cache","description":"## Background\nEntity cache provides near-instant detail view reopens during Enter/Esc drill workflows by caching IssueDetail/MrDetail payloads. Render cache prevents per-frame recomputation of expensive render artifacts (markdown to styled text, discussion tree shaping). Both use bounded LRU eviction with selective invalidation.\n\n## Approach\n\n### Entity Cache (entity_cache.rs)\n\n```rust\nuse std::collections::HashMap;\n\npub struct EntityCache {\n entries: HashMap, // value + last-access tick\n capacity: usize,\n tick: u64,\n}\n\nimpl EntityCache {\n pub fn new(capacity: usize) -> Self;\n pub fn get(&mut self, key: &EntityKey) -> Option<&V>; // updates tick\n pub fn put(&mut self, key: EntityKey, value: V); // evicts oldest if at capacity\n pub fn invalidate(&mut self, keys: &[EntityKey]); // selective by key set\n}\n```\n\n- `EntityKey` is `(EntityType, i64)` from core types (bd-c9gk) — e.g., `(EntityType::Issue, 42)`\n- Default capacity: 64 entries (sufficient for typical drill-in/out workflows)\n- LRU eviction: on `put()` when at capacity, find entry with lowest tick and remove it\n- `get()` bumps the access tick to keep recently-accessed entries alive\n- `invalidate()` takes a slice of changed keys (from sync results) and removes only those entries — NOT a blanket clear\n\n### Render Cache (render_cache.rs)\n\n```rust\npub struct RenderCacheKey {\n content_hash: u64, // FxHash of source content\n terminal_width: u16, // width affects line wrapping\n}\n\npub struct RenderCache {\n entries: HashMap,\n capacity: usize,\n}\n\nimpl RenderCache {\n pub fn new(capacity: usize) -> Self;\n pub fn get(&self, key: &RenderCacheKey) -> Option<&V>;\n pub fn put(&mut self, key: RenderCacheKey, value: V);\n pub fn invalidate_width(&mut self, keep_width: u16); // remove entries NOT matching this width\n pub fn invalidate_all(&mut self); // theme change = full clear\n}\n```\n\n- Default capacity: 256 entries\n- Used for: markdown->styled text, discussion tree layout, issue body rendering\n- `content_hash` uses `std::hash::Hasher` with FxHash (or std DefaultHasher) on source text\n- `invalidate_width(keep_width)`: on terminal resize, remove entries cached at old width\n- `invalidate_all()`: on theme change, clear everything (colors changed)\n- Both caches are NOT thread-safe (single-threaded TUI event loop). No Arc/Mutex needed.\n\n### Integration Point\nBoth caches live as fields on the main LoreApp struct. Cache miss falls through to normal DB query transparently — the action functions check cache first, query DB on miss, populate cache on return.\n\n## Acceptance Criteria\n- [ ] EntityCache::get returns Some for recently put items\n- [ ] EntityCache::put evicts the least-recently-accessed entry when at capacity\n- [ ] EntityCache::invalidate removes only the specified keys, leaves others intact\n- [ ] EntityCache capacity defaults to 64\n- [ ] RenderCache::get returns Some for matching (hash, width) pair\n- [ ] RenderCache::invalidate_width removes entries with non-matching width\n- [ ] RenderCache::invalidate_all clears everything\n- [ ] RenderCache capacity defaults to 256\n- [ ] Both caches are Send (no Rc, no raw pointers) but NOT required to be Sync\n- [ ] No unsafe code\n\n## Files\n- CREATE: crates/lore-tui/src/entity_cache.rs\n- CREATE: crates/lore-tui/src/render_cache.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add `pub mod entity_cache; pub mod render_cache;`)\n\n## TDD Anchor\nRED: Write `test_entity_cache_lru_eviction` that creates EntityCache with capacity 3, puts 4 items, asserts first item (lowest tick) is evicted and the other 3 remain.\nGREEN: Implement LRU eviction using tick-based tracking.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml entity_cache\n\nAdditional tests:\n- test_entity_cache_get_bumps_tick (accessed item survives eviction over older untouched items)\n- test_entity_cache_invalidate_selective (removes only specified keys)\n- test_entity_cache_invalidate_nonexistent_key (no panic)\n- test_render_cache_width_invalidation (entries at old width removed, current width kept)\n- test_render_cache_invalidate_all (empty after call)\n- test_render_cache_capacity_eviction\n\n## Edge Cases\n- Invalidating an EntityKey not in the cache is a no-op (no panic)\n- Zero-capacity cache: all gets return None, all puts are no-ops (degenerate but safe)\n- RenderCacheKey equality: two different strings can have the same hash (collision) — accept this; worst case is a wrong cached render that gets corrected on next invalidation\n- Entity cache should NOT be prewarmed synchronously during sync — sync results just invalidate stale entries, and the next view() call repopulates on demand\n\n## Dependency Context\nDepends on bd-c9gk (core types) for EntityKey type definition.\nBoth caches are integrated into LoreApp (bd-6pmy) as struct fields.\nAction functions (from Phase 2/3 screen beads) check cache before querying DB.","status":"in_progress","priority":2,"issue_type":"task","created_at":"2026-02-12T17:03:25.520201Z","created_by":"tayloreernisse","updated_at":"2026-02-19T03:51:49.836234Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2og9","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2og9","depends_on_id":"bd-c9gk","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2px","title":"[CP1] Epic: Issue Ingestion","description":"Ingest all issues, labels, and issue discussions from configured GitLab repositories with resumable cursor-based incremental sync. This establishes the core data ingestion pattern reused for MRs in CP2.\n\n## Success Criteria\n- gi ingest --type=issues fetches all issues (count matches GitLab UI)\n- Labels extracted from issue payloads (name-only)\n- Label linkage reflects current GitLab state (removed labels unlinked on re-sync)\n- Issue discussions fetched per-issue (dependent sync)\n- Cursor-based sync is resumable (re-running fetches 0 new items)\n- Discussion sync skips unchanged issues (per-issue watermark)\n- Sync tracking records all runs\n- Single-flight lock prevents concurrent runs\n\n## Internal Gates\n- Gate A: Issues only (cursor + upsert + raw payloads + list/count/show)\n- Gate B: Labels correct (stale-link removal verified)\n- Gate C: Dependent discussion sync (watermark prevents redundant refetch)\n- Gate D: Resumability proof (kill mid-run, rerun; bounded redo)\n\nReference: docs/prd/checkpoint-1.md","status":"tombstone","priority":1,"issue_type":"epic","created_at":"2026-01-25T15:42:13.167698Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.638609Z","closed_at":"2026-01-25T17:02:01.638609Z","deleted_at":"2026-01-25T17:02:01.638606Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"epic","compaction_level":0,"original_size":0} {"id":"bd-2rk9","title":"WHO: CLI skeleton — WhoArgs, Commands::Who, dispatch arm","description":"## Background\n\nWire up the CLI plumbing so `lore who --help` works and dispatch reaches the who module. This is pure boilerplate — no query logic yet.\n\n## Approach\n\n### 1. src/cli/mod.rs — WhoArgs struct (after TimelineArgs, ~line 195)\n\n```rust\n#[derive(Parser)]\n#[command(after_help = \"\\x1b[1mExamples:\\x1b[0m\n lore who src/features/auth/ # Who knows about this area?\n lore who @asmith # What is asmith working on?\n lore who @asmith --reviews # What review patterns does asmith have?\n lore who --active # What discussions need attention?\n lore who --overlap src/features/auth/ # Who else is touching these files?\n lore who --path README.md # Expert lookup for a root file\")]\npub struct WhoArgs {\n /// Username or file path (path if contains /)\n pub target: Option,\n\n /// Force expert mode for a file/directory path (handles root files like README.md, Makefile)\n #[arg(long, help_heading = \"Mode\", conflicts_with_all = [\"active\", \"overlap\", \"reviews\"])]\n pub path: Option,\n\n /// Show active unresolved discussions\n #[arg(long, help_heading = \"Mode\", conflicts_with_all = [\"target\", \"overlap\", \"reviews\", \"path\"])]\n pub active: bool,\n\n /// Find users with MRs/notes touching this file path\n #[arg(long, help_heading = \"Mode\", conflicts_with_all = [\"target\", \"active\", \"reviews\", \"path\"])]\n pub overlap: Option,\n\n /// Show review pattern analysis (requires username target)\n #[arg(long, help_heading = \"Mode\", requires = \"target\", conflicts_with_all = [\"active\", \"overlap\", \"path\"])]\n pub reviews: bool,\n\n /// Time window (7d, 2w, 6m, YYYY-MM-DD). Default varies by mode.\n #[arg(long, help_heading = \"Filters\")]\n pub since: Option,\n\n /// Scope to a project (supports fuzzy matching)\n #[arg(short = 'p', long, help_heading = \"Filters\")]\n pub project: Option,\n\n /// Maximum results per section (1..=500)\n #[arg(short = 'n', long = \"limit\", default_value = \"20\",\n value_parser = clap::value_parser!(u16).range(1..=500),\n help_heading = \"Output\")]\n pub limit: u16,\n}\n```\n\n### 2. Commands enum — add Who(WhoArgs) after Timeline, before hidden List\n\n### 3. src/cli/commands/mod.rs — add `pub mod who;` and re-exports:\n```rust\npub use who::{run_who, print_who_human, print_who_json, WhoRun};\n```\n\n### 4. src/main.rs — dispatch arm + handler:\n```rust\nSome(Commands::Who(args)) => handle_who(cli.config.as_deref(), args, robot_mode),\n```\n\n### 5. src/cli/commands/who.rs — stub file with signatures that compile\n\n## Files\n\n- `src/cli/mod.rs` — WhoArgs struct + Commands::Who variant\n- `src/cli/commands/mod.rs` — pub mod who + re-exports\n- `src/main.rs` — dispatch arm + handle_who function + imports\n- `src/cli/commands/who.rs` — CREATE stub file\n\n## TDD Loop\n\nRED: `cargo check --all-targets` fails (missing who module)\nGREEN: Create stub who.rs with empty/todo!() implementations, wire up all 4 files\nVERIFY: `cargo check --all-targets && cargo run -- who --help`\n\n## Acceptance Criteria\n\n- [ ] `cargo check --all-targets` passes\n- [ ] `lore who --help` displays all flags with correct grouping (Mode, Filters, Output)\n- [ ] `lore who --active --overlap foo` rejected by clap (conflicts_with)\n- [ ] `lore who --reviews` rejected by clap (requires target)\n- [ ] WhoArgs is pub and importable from lore::cli\n\n## Edge Cases\n\n- conflicts_with_all on --path must NOT include \"target\" (--path is used alongside positional target in some cases... actually no, --path replaces target — check the plan: it conflicts with active/overlap/reviews but NOT target. Wait, looking at the plan: --path does NOT conflict with target. But if both target and --path are provided, --path takes priority in resolve_mode. The clap struct allows both.)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:39:58.436660Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.594923Z","closed_at":"2026-02-08T04:10:29.594882Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0} {"id":"bd-2rqs","title":"Dynamic shell completions for file paths (lore complete-path)","description":"## Background\n\nTab-completion for lore commands currently only covers static subcommand/flag names via clap_complete v4 (src/main.rs handle_completions(), line ~1667). Users frequently type file paths (for who --path, file-history) and entity IIDs (for issues, mrs, show) manually. Dynamic completions would allow tab-completing these from the local SQLite database.\n\n**Pattern:** kubectl, gh, docker all use hidden subcommands for dynamic completions. clap_complete v4 has a custom completer API that can shell out to these hidden subcommands.\n\n## Codebase Context\n\n- **Static completions**: Commands::Completions variant in src/cli/mod.rs, handled by handle_completions() in src/main.rs (line ~1667) using clap_complete::generate()\n- **clap_complete v4**: Already in Cargo.toml. Supports custom completer API for dynamic values.\n- **Commands taking IIDs**: IssuesArgs (iid: Option), MrsArgs (iid: Option), Drift (for: EntityRef), Show (hidden, takes entity ref)\n- **path_resolver**: src/core/path_resolver.rs (245 lines). build_path_query() (lines 71-187) and suffix_probe() (lines 192-240) resolve partial paths against mr_file_changes. SuffixResult::Ambiguous(Vec) returns multiple matches — perfect for completions.\n- **who --path**: WhoArgs has `path: Option` field, already uses path_resolver\n- **DB access**: create_connection() from src/core/db.rs, config loading from src/core/config.rs\n- **Performance**: Must complete in <100ms. SQLite queries against indexed columns are sub-ms.\n\n## Approach\n\n### 1. Hidden Subcommands (src/cli/mod.rs)\n\nAdd hidden subcommands that query the DB and print completion candidates:\n\n```rust\n/// Hidden: emit file path completions for shell integration\n#[command(name = \"complete-path\", hide = true)]\nCompletePath {\n /// Partial path prefix to complete\n prefix: String,\n /// Project scope\n #[arg(short = 'p', long)]\n project: Option,\n},\n\n/// Hidden: emit issue IID completions\n#[command(name = \"complete-issue\", hide = true)]\nCompleteIssue {\n /// Partial IID prefix\n prefix: String,\n #[arg(short = 'p', long)]\n project: Option,\n},\n\n/// Hidden: emit MR IID completions\n#[command(name = \"complete-mr\", hide = true)]\nCompleteMr {\n /// Partial IID prefix\n prefix: String,\n #[arg(short = 'p', long)]\n project: Option,\n},\n```\n\n### 2. Completion Handlers (src/cli/commands/completions.rs NEW)\n\n```rust\npub fn complete_path(conn: &Connection, prefix: &str, project_id: Option) -> Result> {\n // Use suffix_probe() from path_resolver if prefix looks like a suffix (no leading /)\n // Otherwise: SELECT DISTINCT new_path FROM mr_file_changes WHERE new_path LIKE ?||'%' LIMIT 50\n // Also check old_path for rename awareness\n}\n\npub fn complete_issue(conn: &Connection, prefix: &str, project_id: Option) -> Result> {\n // SELECT iid, title FROM issues WHERE CAST(iid AS TEXT) LIKE ?||'%' ORDER BY updated_at DESC LIMIT 30\n // Output: \"123\\tFix login bug\" (tab-separated for shell description)\n}\n\npub fn complete_mr(conn: &Connection, prefix: &str, project_id: Option) -> Result> {\n // SELECT iid, title FROM merge_requests WHERE CAST(iid AS TEXT) LIKE ?||'%' ORDER BY updated_at DESC LIMIT 30\n // Output: \"456\\tAdd OAuth support\"\n}\n```\n\n### 3. Wire in main.rs\n\nAdd match arms for CompletePath, CompleteIssue, CompleteMr. Each:\n1. Opens DB connection (read-only)\n2. Resolves project if -p given\n3. Calls completion handler\n4. Prints one candidate per line to stdout\n5. Exits 0\n\n### 4. Shell Integration\n\nUpdate handle_completions() to generate shell scripts that call the hidden subcommands. For fish:\n```fish\ncomplete -c lore -n '__fish_seen_subcommand_from issues' -a '(lore complete-issue \"\")'\ncomplete -c lore -n '__fish_seen_subcommand_from who' -l path -a '(lore complete-path (commandline -ct))'\n```\n\nSimilar for bash (using `_lore_complete()` function) and zsh.\n\n## Acceptance Criteria\n\n- [ ] `lore complete-path \"src/co\"` prints matching file paths from mr_file_changes\n- [ ] `lore complete-issue \"12\"` prints matching issue IIDs with titles\n- [ ] `lore complete-mr \"45\"` prints matching MR IIDs with titles\n- [ ] All three hidden subcommands respect -p for project scoping\n- [ ] All three complete in <100ms (SQLite indexed queries)\n- [ ] Empty prefix returns recent/popular results (not all rows)\n- [ ] Hidden subcommands don't appear in --help or completions themselves\n- [ ] Shell completion scripts (fish, bash, zsh) call hidden subcommands for dynamic values\n- [ ] Static completions (subcommands, flags) still work as before\n- [ ] No DB connection attempted if DB doesn't exist (graceful degradation — return no completions)\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n- [ ] `cargo fmt --check` passes\n\n## Files\n\n- MODIFY: src/cli/mod.rs (add CompletePath, CompleteIssue, CompleteMr hidden variants)\n- CREATE: src/cli/commands/completions.rs (complete_path, complete_issue, complete_mr handlers)\n- MODIFY: src/cli/commands/mod.rs (add pub mod completions)\n- MODIFY: src/main.rs (match arms for hidden subcommands + update handle_completions shell scripts)\n\n## TDD Anchor\n\nRED:\n- test_complete_path_suffix_match (in-memory DB with mr_file_changes rows, verify suffix matching returns correct paths)\n- test_complete_issue_prefix (in-memory DB with issues, verify IID prefix filtering)\n- test_complete_mr_prefix (same for MRs)\n- test_complete_empty_prefix_returns_recent (verify limited results ordered by updated_at DESC)\n\nGREEN: Implement completion handlers with SQL queries.\n\nVERIFY: cargo test --lib -- completions && cargo check --all-targets\n\n## Edge Cases\n\n- DB doesn't exist yet (first run before sync): return empty completions, exit 0 (not error)\n- mr_file_changes empty (sync hasn't run with --fetch-mr-diffs): complete-path returns nothing, no error\n- Very long prefix with no matches: empty output, exit 0\n- Special characters in paths (spaces, brackets): shell quoting handled by completion framework\n- Project ambiguous with -p: exit 18, same as other commands (resolve_project pattern)\n- IID prefix \"0\": return nothing (no issues/MRs have iid=0)\n\n## Dependency Context\n\n- **path_resolver** (src/core/path_resolver.rs): provides suffix_probe() which returns SuffixResult::Exact/Ambiguous/NotFound — reuse for complete-path instead of raw SQL when prefix looks like a suffix\n- **mr_file_changes** (migration 016): provides new_path/old_path columns for file path completions\n- **clap_complete v4** (Cargo.toml): provides generate() for static completions and custom completer API for dynamic shell integration","status":"open","priority":3,"issue_type":"feature","created_at":"2026-02-13T16:31:48.589428Z","created_by":"tayloreernisse","updated_at":"2026-02-17T16:51:21.891406Z","compaction_level":0,"original_size":0,"labels":["cli-ux","gate-4"]} @@ -170,7 +170,7 @@ {"id":"bd-2tr4","title":"Epic: TUI Phase 1 — Foundation","description":"## Background\nPhase 1 builds the foundational infrastructure that all screens depend on: the full LoreApp Model implementation with key dispatch, navigation stack, task supervisor for async work management, theme configuration, common widgets, and the state/action architecture. Phase 1 deliverables are the skeleton that Phase 2 screens plug into.\n\n## Acceptance Criteria\n- [ ] LoreApp update() dispatches all Msg variants through 5-stage key pipeline\n- [ ] NavigationStack supports push/pop/forward/jump with state preservation\n- [ ] TaskSupervisor manages background tasks with dedup, cancellation, and generation IDs\n- [ ] Theme renders correctly with adaptive light/dark colors\n- [ ] Status bar, breadcrumb, loading, error toast, and help overlay widgets render\n- [ ] CommandRegistry is the single source of truth for keybindings/help/palette\n- [ ] AppState composition with per-screen states and LoadState map\n\n## Scope\nBlocked by Phase 0 (Toolchain Gate). Blocks Phase 2 (Core Screens).","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-02-12T16:55:02.650495Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:54:04.268740Z","closed_at":"2026-02-18T18:54:04.268696Z","close_reason":"All 7 acceptance criteria met: 5-stage key pipeline, NavigationStack, TaskSupervisor, Theme, common widgets, CommandRegistry, AppState composition. 177 tests pass. Unblocks Phase 2.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2tr4","depends_on_id":"bd-1cj0","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2ug","title":"[CP1] gi ingest --type=issues command","description":"CLI command to orchestrate issue ingestion.\n\n## Module\nsrc/cli/commands/ingest.rs\n\n## Clap Definition\n#[derive(Subcommand)]\npub enum Commands {\n Ingest {\n #[arg(long, value_parser = [\"issues\", \"merge_requests\"])]\n r#type: String,\n \n #[arg(long)]\n project: Option,\n \n #[arg(long)]\n force: bool,\n },\n}\n\n## Implementation\n1. Acquire app lock with heartbeat (respect --force for stale lock)\n2. Create sync_run record (status='running')\n3. For each configured project (or filtered --project):\n - Call orchestrator to ingest issues and discussions\n - Show progress (spinner or progress bar)\n4. Update sync_run (status='succeeded', metrics_json with counts)\n5. Release lock\n\n## Output Format\nIngesting issues...\n\n group/project-one: 1,234 issues fetched, 45 new labels\n\nFetching discussions (312 issues with updates)...\n\n group/project-one: 312 issues → 1,234 discussions, 5,678 notes\n\nTotal: 1,234 issues, 1,234 discussions, 5,678 notes (excluding 1,234 system notes)\nSkipped discussion sync for 922 unchanged issues.\n\n## Error Handling\n- Lock acquisition failure: exit with DatabaseLockError message\n- Network errors: show GitLabNetworkError, exit non-zero\n- Rate limiting: respect backoff, show progress\n\nFiles: src/cli/commands/ingest.rs, src/cli/commands/mod.rs\nTests: tests/integration/sync_runs_tests.rs\nDone when: Full issue + discussion ingestion works end-to-end","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T16:57:58.552504Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.875613Z","closed_at":"2026-01-25T17:02:01.875613Z","deleted_at":"2026-01-25T17:02:01.875607Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-2um","title":"[CP1] Epic: Issue Ingestion","description":"Ingest all issues, labels, and issue discussions from configured GitLab repositories with resumable cursor-based incremental sync. This checkpoint establishes the core data ingestion pattern that will be reused for MRs in Checkpoint 2.\n\n## Success Criteria\n- gi ingest --type=issues fetches all issues (count matches GitLab UI)\n- Labels extracted from issue payloads (name-only)\n- Label linkage reflects current GitLab state (removed labels unlinked on re-sync)\n- Issue discussions fetched per-issue (dependent sync)\n- Cursor-based sync is resumable (re-running fetches 0 new items)\n- Discussion sync skips unchanged issues (per-issue watermark)\n- Sync tracking records all runs (sync_runs table)\n- Single-flight lock prevents concurrent runs\n\n## Internal Gates\n- **Gate A**: Issues only - cursor + upsert + raw payloads + list/count/show working\n- **Gate B**: Labels correct - stale-link removal verified; label count matches GitLab\n- **Gate C**: Dependent discussion sync - watermark prevents redundant refetch; concurrency bounded\n- **Gate D**: Resumability proof - kill mid-run, rerun; bounded redo and no redundant discussion refetch\n\n## Reference\ndocs/prd/checkpoint-1.md","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-01-25T17:02:38.075224Z","created_by":"tayloreernisse","updated_at":"2026-01-25T23:27:15.347364Z","closed_at":"2026-01-25T23:27:15.347317Z","close_reason":"CP1 Issue Ingestion complete: all sub-tasks done, 71 tests pass, CLI commands working","compaction_level":0,"original_size":0} -{"id":"bd-2uzm","title":"Implement Trace screen (file -> MR -> issue chain drill-down)","description":"## Background\nThe Trace screen answers \"Why was this code introduced?\" by building file -> MR -> issue -> discussion chains. It wraps run_trace() from src/core/trace.rs (added in v0.8.1) in an interactive TUI view where users can drill down into any linked entity. The CLI prints flat output; the TUI makes the chain navigable.\n\nThe core query accepts a file path (with optional :line suffix), resolves renames via BFS, finds MRs that touched the file, links issues via entity_references, and extracts DiffNote discussions. Each result is a TraceChain: MR metadata + linked issues + relevant discussions.\n\n## Data Shapes (from src/core/trace.rs)\n\n```rust\npub struct TraceResult {\n pub path: String,\n pub resolved_paths: Vec, // rename chain via BFS\n pub renames_followed: bool,\n pub trace_chains: Vec,\n pub total_chains: usize,\n}\n\npub struct TraceChain {\n pub mr_iid: i64,\n pub mr_title: String,\n pub mr_state: String, // merged/opened/closed\n pub mr_author: String,\n pub change_type: String, // added/modified/deleted/renamed\n pub merged_at_iso: Option,\n pub updated_at_iso: String,\n pub web_url: Option,\n pub issues: Vec, // linked via entity_references\n pub discussions: Vec, // DiffNote threads on this file\n}\n\npub struct TraceIssue {\n pub iid: i64, pub title: String, pub state: String,\n pub reference_type: String, pub web_url: Option,\n}\n\npub struct TraceDiscussion {\n pub discussion_id: String, pub mr_iid: i64,\n pub author_username: String, pub body: String,\n pub path: String, pub created_at_iso: String,\n}\n```\n\nrun_trace() signature (src/core/trace.rs):\n```rust\npub fn run_trace(\n conn: &Connection,\n project_id: Option,\n path: &str,\n follow_renames: bool,\n include_discussions: bool,\n limit: usize,\n) -> Result\n```\n\nparse_trace_path() (src/cli/commands/trace.rs, made pub by bd-1f5b):\n```rust\npub fn parse_trace_path(input: &str) -> (String, Option)\n```\n\n## Approach\n\n**Screen enum** (message.rs):\nAdd Screen::Trace variant (no parameters — path is entered on-screen). Label: \"Trace\". Breadcrumb: \"Trace\".\n\n**Path autocomplete**: Query DISTINCT new_path from mr_file_changes (scoped to project_id if set) for fuzzy matching as user types. Cache results on first focus. SQL:\n```sql\nSELECT DISTINCT new_path FROM mr_file_changes\nWHERE project_id = ?1 ORDER BY new_path\n```\nStore as Vec in TraceState. Filter client-side with case-insensitive substring match.\n\n**State** (state/trace.rs):\n```rust\n#[derive(Debug, Default)]\npub struct TraceState {\n pub path_input: String,\n pub path_focused: bool,\n pub line_filter: Option, // from :line suffix\n pub result: Option,\n pub selected_chain_index: usize,\n pub expanded_chains: HashSet, // multiple can be expanded\n pub follow_renames: bool, // default true\n pub include_discussions: bool, // default true\n pub scroll_offset: u16,\n pub known_paths: Vec, // autocomplete cache\n pub autocomplete_matches: Vec, // filtered suggestions\n pub autocomplete_index: usize,\n}\n```\n\n**Action** (action.rs):\n- fetch_trace(conn, project_id, path, follow_renames, include_discussions, limit) -> Result: calls run_trace() directly from src/core/trace.rs\n- fetch_known_paths(conn, project_id) -> Result, LoreError>: queries mr_file_changes for autocomplete\n\n**View** (view/trace.rs):\n- Top: path input with autocomplete dropdown + toggle indicators [renames: on] [discussions: on]\n- If renames followed: rename chain breadcrumb (old_path -> ... -> new_path) in dimmed text\n- Main area: scrollable list of TraceChain entries:\n - Collapsed: MR state icon + !iid + title + author + change_type + date (single line)\n - Expanded: indented sections for linked issues and discussion snippets\n - Issues: state icon + #iid + title + reference_type\n - Discussions: @author + date + body preview (first 2 lines, truncated at 120 chars)\n- Keyboard:\n - j/k: scroll chains\n - Enter: toggle expand/collapse on selected chain\n - Enter on highlighted issue: navigate to IssueDetail(EntityKey)\n - Enter on highlighted MR line: navigate to MrDetail(EntityKey)\n - /: focus path input\n - Tab: cycle autocomplete suggestions when path focused\n - r: toggle follow_renames (re-fetches)\n - d: toggle include_discussions (re-fetches)\n - q: back\n\n**Contextual entry points** (wired from other screens):\n- MR Detail: when cursor is on a file path in the file changes list, t opens Trace pre-filled with that path\n- Issue Detail: if discussion references a file path, t opens Trace for that path\n- Requires MrDetailState and IssueDetailState to expose selected_file_path() -> Option\n\n## Acceptance Criteria\n- [ ] Screen::Trace added to message.rs Screen enum with label \"Trace\" and breadcrumb\n- [ ] TraceState struct with all fields, Default impl\n- [ ] Path input with autocomplete dropdown from mr_file_changes (fuzzy substring match)\n- [ ] :line suffix parsing via parse_trace_path (line_filter stored but used for future highlighting)\n- [ ] Rename chain displayed as breadcrumb when renames_followed is true\n- [ ] TraceChain list with expand/collapse — multiple chains expandable simultaneously\n- [ ] MR state icons: merged (purple), opened (green), closed (red) — matching CLI theme\n- [ ] Enter on issue row navigates to IssueDetail(EntityKey::issue(project_id, iid))\n- [ ] Enter on MR header navigates to MrDetail(EntityKey::mr(project_id, iid))\n- [ ] r toggles follow_renames, d toggles include_discussions — both trigger re-fetch\n- [ ] Empty state: \"No trace chains found\" with hint \"Run 'lore sync' to fetch MR file changes\"\n- [ ] Contextual navigation: t on file path in MR Detail opens Trace pre-filled\n- [ ] Registered in command palette (label \"Trace file\", keywords [\"trace\", \"provenance\", \"why\"])\n- [ ] AppState.has_text_focus() updated to include trace.path_focused\n- [ ] AppState.blur_text_focus() updated to include trace.path_focused = false\n\n## Files\n- MODIFY: crates/lore-tui/src/message.rs (add Screen::Trace variant + label + is_detail_or_entity)\n- CREATE: crates/lore-tui/src/state/trace.rs (TraceState struct + Default)\n- MODIFY: crates/lore-tui/src/state/mod.rs (pub mod trace, pub use TraceState, add to AppState, update has_text_focus/blur_text_focus)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_trace, fetch_known_paths)\n- CREATE: crates/lore-tui/src/view/trace.rs (render_trace fn)\n- MODIFY: crates/lore-tui/src/view/mod.rs (add Screen::Trace dispatch arm in render_screen)\n- MODIFY: crates/lore-tui/src/view/mr_detail.rs (add t keybinding for contextual trace — deferred if mr_detail not yet implemented)\n\n## TDD Anchor\nRED: Write test_fetch_trace_returns_chain in action tests. Setup: in-memory DB, insert project, MR, mr_file_changes row (new_path=\"src/main.rs\"), entity_reference linking MR to issue. Call fetch_trace(conn, Some(project_id), \"src/main.rs\", true, true, 50). Assert: result.trace_chains.len() == 1, result.trace_chains[0].issues.len() == 1.\nGREEN: Implement fetch_trace calling run_trace from src/core/trace.rs.\nVERIFY: cargo test -p lore-tui trace -- --nocapture\n\nAdditional tests:\n- test_trace_empty_result: path \"nonexistent.rs\" returns total_chains=0\n- test_trace_rename_chain: insert rename chain A->B->C, query A, assert resolved_paths contains all 3\n- test_trace_discussion_toggle: include_discussions=false returns empty discussions vec per chain\n- test_parse_trace_path_with_line: \"src/main.rs:42\" -> (\"src/main.rs\", Some(42))\n- test_parse_trace_path_no_line: \"src/main.rs\" -> (\"src/main.rs\", None)\n- test_autocomplete_filters_paths: known_paths=[\"src/a.rs\",\"src/b.rs\",\"lib/c.rs\"], input=\"src/\" -> matches=[\"src/a.rs\",\"src/b.rs\"]\n\n## Edge Cases\n- File path not in any MR: empty state with sync hint\n- Very long rename chains (>5 paths): show first 2 + \"... N more\" + last path\n- Hundreds of trace chains: limit default 50, show \"showing 50 of N\" footer\n- Path with Windows drive letter (C:/foo.rs): parse_trace_path handles this correctly\n- Autocomplete with thousands of paths: substring filter is O(n) but fast enough for <100k paths\n- Project scope: if global_scope.project_id is set, pass it to run_trace and autocomplete query\n- Contextual entry from MR Detail: if MR Detail screen not yet implemented, defer the t keybinding to a follow-up\n\n## Dependency Context\n- bd-1f5b (blocks): Makes parse_trace_path() pub in src/cli/commands/trace.rs. Without this, TUI must reimplement the parser.\n- src/core/trace.rs: run_trace() is already pub — no changes needed. TUI calls it directly.\n- src/core/file_history.rs: resolve_rename_chain() used transitively by run_trace — TUI does not call it directly.\n- Navigation: uses NavigationStack.push(Screen::IssueDetail(key)) and Screen::MrDetail(key) from crates/lore-tui/src/navigation.rs.\n- AppState composition: TraceState added as field in AppState struct (state/mod.rs line ~154-174). has_text_focus and blur_text_focus at lines 194-207 must include trace.path_focused.\n- Contextual entry: requires MrDetailState to expose the currently selected file path. If MR Detail is not yet built, the contextual keybinding is deferred.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-18T18:13:47.076070Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:33:32.709165Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2uzm","depends_on_id":"bd-1f5b","type":"blocks","created_at":"2026-02-18T18:14:33.294262Z","created_by":"tayloreernisse"},{"issue_id":"bd-2uzm","depends_on_id":"bd-nwux","type":"parent-child","created_at":"2026-02-18T18:13:47.079630Z","created_by":"tayloreernisse"}]} +{"id":"bd-2uzm","title":"Implement Trace screen (file -> MR -> issue chain drill-down)","description":"## Background\nThe Trace screen answers \"Why was this code introduced?\" by building file -> MR -> issue -> discussion chains. It wraps run_trace() from src/core/trace.rs (added in v0.8.1) in an interactive TUI view where users can drill down into any linked entity. The CLI prints flat output; the TUI makes the chain navigable.\n\nThe core query accepts a file path (with optional :line suffix), resolves renames via BFS, finds MRs that touched the file, links issues via entity_references, and extracts DiffNote discussions. Each result is a TraceChain: MR metadata + linked issues + relevant discussions.\n\n## Data Shapes (from src/core/trace.rs)\n\n```rust\npub struct TraceResult {\n pub path: String,\n pub resolved_paths: Vec, // rename chain via BFS\n pub renames_followed: bool,\n pub trace_chains: Vec,\n pub total_chains: usize,\n}\n\npub struct TraceChain {\n pub mr_iid: i64,\n pub mr_title: String,\n pub mr_state: String, // merged/opened/closed\n pub mr_author: String,\n pub change_type: String, // added/modified/deleted/renamed\n pub merged_at_iso: Option,\n pub updated_at_iso: String,\n pub web_url: Option,\n pub issues: Vec, // linked via entity_references\n pub discussions: Vec, // DiffNote threads on this file\n}\n\npub struct TraceIssue {\n pub iid: i64, pub title: String, pub state: String,\n pub reference_type: String, pub web_url: Option,\n}\n\npub struct TraceDiscussion {\n pub discussion_id: String, pub mr_iid: i64,\n pub author_username: String, pub body: String,\n pub path: String, pub created_at_iso: String,\n}\n```\n\nrun_trace() signature (src/core/trace.rs):\n```rust\npub fn run_trace(\n conn: &Connection,\n project_id: Option,\n path: &str,\n follow_renames: bool,\n include_discussions: bool,\n limit: usize,\n) -> Result\n```\n\nparse_trace_path() (src/cli/commands/trace.rs, made pub by bd-1f5b):\n```rust\npub fn parse_trace_path(input: &str) -> (String, Option)\n```\n\n## Approach\n\n**Screen enum** (message.rs):\nAdd Screen::Trace variant (no parameters — path is entered on-screen). Label: \"Trace\". Breadcrumb: \"Trace\".\n\n**Path autocomplete**: Query DISTINCT new_path from mr_file_changes (scoped to project_id if set) for fuzzy matching as user types. Cache results on first focus. SQL:\n```sql\nSELECT DISTINCT new_path FROM mr_file_changes\nWHERE project_id = ?1 ORDER BY new_path\n```\nStore as Vec in TraceState. Filter client-side with case-insensitive substring match.\n\n**State** (state/trace.rs):\n```rust\n#[derive(Debug, Default)]\npub struct TraceState {\n pub path_input: String,\n pub path_focused: bool,\n pub line_filter: Option, // from :line suffix\n pub result: Option,\n pub selected_chain_index: usize,\n pub expanded_chains: HashSet, // multiple can be expanded\n pub follow_renames: bool, // default true\n pub include_discussions: bool, // default true\n pub scroll_offset: u16,\n pub known_paths: Vec, // autocomplete cache\n pub autocomplete_matches: Vec, // filtered suggestions\n pub autocomplete_index: usize,\n}\n```\n\n**Action** (action.rs):\n- fetch_trace(conn, project_id, path, follow_renames, include_discussions, limit) -> Result: calls run_trace() directly from src/core/trace.rs\n- fetch_known_paths(conn, project_id) -> Result, LoreError>: queries mr_file_changes for autocomplete\n\n**View** (view/trace.rs):\n- Top: path input with autocomplete dropdown + toggle indicators [renames: on] [discussions: on]\n- If renames followed: rename chain breadcrumb (old_path -> ... -> new_path) in dimmed text\n- Main area: scrollable list of TraceChain entries:\n - Collapsed: MR state icon + !iid + title + author + change_type + date (single line)\n - Expanded: indented sections for linked issues and discussion snippets\n - Issues: state icon + #iid + title + reference_type\n - Discussions: @author + date + body preview (first 2 lines, truncated at 120 chars)\n- Keyboard:\n - j/k: scroll chains\n - Enter: toggle expand/collapse on selected chain\n - Enter on highlighted issue: navigate to IssueDetail(EntityKey)\n - Enter on highlighted MR line: navigate to MrDetail(EntityKey)\n - /: focus path input\n - Tab: cycle autocomplete suggestions when path focused\n - r: toggle follow_renames (re-fetches)\n - d: toggle include_discussions (re-fetches)\n - q: back\n\n**Contextual entry points** (wired from other screens):\n- MR Detail: when cursor is on a file path in the file changes list, t opens Trace pre-filled with that path\n- Issue Detail: if discussion references a file path, t opens Trace for that path\n- Requires MrDetailState and IssueDetailState to expose selected_file_path() -> Option\n\n## Acceptance Criteria\n- [ ] Screen::Trace added to message.rs Screen enum with label \"Trace\" and breadcrumb\n- [ ] TraceState struct with all fields, Default impl\n- [ ] Path input with autocomplete dropdown from mr_file_changes (fuzzy substring match)\n- [ ] :line suffix parsing via parse_trace_path (line_filter stored but used for future highlighting)\n- [ ] Rename chain displayed as breadcrumb when renames_followed is true\n- [ ] TraceChain list with expand/collapse — multiple chains expandable simultaneously\n- [ ] MR state icons: merged (purple), opened (green), closed (red) — matching CLI theme\n- [ ] Enter on issue row navigates to IssueDetail(EntityKey::issue(project_id, iid))\n- [ ] Enter on MR header navigates to MrDetail(EntityKey::mr(project_id, iid))\n- [ ] r toggles follow_renames, d toggles include_discussions — both trigger re-fetch\n- [ ] Empty state: \"No trace chains found\" with hint \"Run 'lore sync' to fetch MR file changes\"\n- [ ] Contextual navigation: t on file path in MR Detail opens Trace pre-filled\n- [ ] Registered in command palette (label \"Trace file\", keywords [\"trace\", \"provenance\", \"why\"])\n- [ ] AppState.has_text_focus() updated to include trace.path_focused\n- [ ] AppState.blur_text_focus() updated to include trace.path_focused = false\n\n## Files\n- MODIFY: crates/lore-tui/src/message.rs (add Screen::Trace variant + label + is_detail_or_entity)\n- CREATE: crates/lore-tui/src/state/trace.rs (TraceState struct + Default)\n- MODIFY: crates/lore-tui/src/state/mod.rs (pub mod trace, pub use TraceState, add to AppState, update has_text_focus/blur_text_focus)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_trace, fetch_known_paths)\n- CREATE: crates/lore-tui/src/view/trace.rs (render_trace fn)\n- MODIFY: crates/lore-tui/src/view/mod.rs (add Screen::Trace dispatch arm in render_screen)\n- MODIFY: crates/lore-tui/src/view/mr_detail.rs (add t keybinding for contextual trace — deferred if mr_detail not yet implemented)\n\n## TDD Anchor\nRED: Write test_fetch_trace_returns_chain in action tests. Setup: in-memory DB, insert project, MR, mr_file_changes row (new_path=\"src/main.rs\"), entity_reference linking MR to issue. Call fetch_trace(conn, Some(project_id), \"src/main.rs\", true, true, 50). Assert: result.trace_chains.len() == 1, result.trace_chains[0].issues.len() == 1.\nGREEN: Implement fetch_trace calling run_trace from src/core/trace.rs.\nVERIFY: cargo test -p lore-tui trace -- --nocapture\n\nAdditional tests:\n- test_trace_empty_result: path \"nonexistent.rs\" returns total_chains=0\n- test_trace_rename_chain: insert rename chain A->B->C, query A, assert resolved_paths contains all 3\n- test_trace_discussion_toggle: include_discussions=false returns empty discussions vec per chain\n- test_parse_trace_path_with_line: \"src/main.rs:42\" -> (\"src/main.rs\", Some(42))\n- test_parse_trace_path_no_line: \"src/main.rs\" -> (\"src/main.rs\", None)\n- test_autocomplete_filters_paths: known_paths=[\"src/a.rs\",\"src/b.rs\",\"lib/c.rs\"], input=\"src/\" -> matches=[\"src/a.rs\",\"src/b.rs\"]\n\n## Edge Cases\n- File path not in any MR: empty state with sync hint\n- Very long rename chains (>5 paths): show first 2 + \"... N more\" + last path\n- Hundreds of trace chains: limit default 50, show \"showing 50 of N\" footer\n- Path with Windows drive letter (C:/foo.rs): parse_trace_path handles this correctly\n- Autocomplete with thousands of paths: substring filter is O(n) but fast enough for <100k paths\n- Project scope: if global_scope.project_id is set, pass it to run_trace and autocomplete query\n- Contextual entry from MR Detail: if MR Detail screen not yet implemented, defer the t keybinding to a follow-up\n\n## Dependency Context\n- bd-1f5b (blocks): Makes parse_trace_path() pub in src/cli/commands/trace.rs. Without this, TUI must reimplement the parser.\n- src/core/trace.rs: run_trace() is already pub — no changes needed. TUI calls it directly.\n- src/core/file_history.rs: resolve_rename_chain() used transitively by run_trace — TUI does not call it directly.\n- Navigation: uses NavigationStack.push(Screen::IssueDetail(key)) and Screen::MrDetail(key) from crates/lore-tui/src/navigation.rs.\n- AppState composition: TraceState added as field in AppState struct (state/mod.rs line ~154-174). has_text_focus and blur_text_focus at lines 194-207 must include trace.path_focused.\n- Contextual entry: requires MrDetailState to expose the currently selected file path. If MR Detail is not yet built, the contextual keybinding is deferred.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-18T18:13:47.076070Z","created_by":"tayloreernisse","updated_at":"2026-02-19T03:50:41.546948Z","closed_at":"2026-02-19T03:50:41.546751Z","close_reason":"Trace screen complete: view/trace.rs + wiring. 586 TUI tests pass.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2uzm","depends_on_id":"bd-1f5b","type":"blocks","created_at":"2026-02-18T18:14:33.294262Z","created_by":"tayloreernisse"},{"issue_id":"bd-2uzm","depends_on_id":"bd-nwux","type":"parent-child","created_at":"2026-02-18T18:13:47.079630Z","created_by":"tayloreernisse"}]} {"id":"bd-2w1p","title":"Add half-life fields and config validation to ScoringConfig","description":"## Background\nThe flat-weight ScoringConfig (config.rs:155-167) has only 3 fields: author_weight (25), reviewer_weight (10), note_bonus (1). Time-decay scoring needs half-life parameters, a reviewer split (participated vs assigned-only), closed MR discount, substantive-note threshold, and bot filtering.\n\n## Approach\nExtend the existing ScoringConfig struct at config.rs:155. Add new fields with #[serde(default)] and camelCase rename to match existing convention (authorWeight, reviewerWeight, noteBonus). Extend the Default impl at config.rs:169 with new defaults. Extend validate_scoring() at config.rs:274-291 (currently validates 3 weights >= 0).\n\n### New fields to add:\n```rust\n#[serde(rename = \"reviewerAssignmentWeight\")]\npub reviewer_assignment_weight: i64, // default: 3\n#[serde(rename = \"authorHalfLifeDays\")]\npub author_half_life_days: u32, // default: 180\n#[serde(rename = \"reviewerHalfLifeDays\")]\npub reviewer_half_life_days: u32, // default: 90\n#[serde(rename = \"reviewerAssignmentHalfLifeDays\")]\npub reviewer_assignment_half_life_days: u32, // default: 45\n#[serde(rename = \"noteHalfLifeDays\")]\npub note_half_life_days: u32, // default: 45\n#[serde(rename = \"closedMrMultiplier\")]\npub closed_mr_multiplier: f64, // default: 0.5\n#[serde(rename = \"reviewerMinNoteChars\")]\npub reviewer_min_note_chars: u32, // default: 20\n#[serde(rename = \"excludedUsernames\")]\npub excluded_usernames: Vec, // default: vec![]\n```\n\n### Validation additions to validate_scoring() (config.rs:274):\n- All *_half_life_days must be > 0 AND <= 3650\n- All *_weight / *_bonus must be >= 0\n- reviewer_assignment_weight must be >= 0\n- closed_mr_multiplier must be finite (not NaN/Inf) AND in (0.0, 1.0]\n- reviewer_min_note_chars must be >= 0 AND <= 4096\n- excluded_usernames entries must be non-empty strings\n- Return LoreError::ConfigInvalid with clear message on failure\n\n## TDD Loop\n\n### RED (write first):\n```rust\n#[test]\nfn test_config_validation_rejects_zero_half_life() {\n let mut cfg = ScoringConfig::default();\n assert!(validate_scoring(&cfg).is_ok());\n cfg.author_half_life_days = 0;\n assert!(validate_scoring(&cfg).is_err());\n cfg.author_half_life_days = 180;\n cfg.reviewer_half_life_days = 0;\n assert!(validate_scoring(&cfg).is_err());\n cfg.reviewer_half_life_days = 90;\n cfg.closed_mr_multiplier = 0.0;\n assert!(validate_scoring(&cfg).is_err());\n cfg.closed_mr_multiplier = 1.5;\n assert!(validate_scoring(&cfg).is_err());\n cfg.closed_mr_multiplier = 1.0;\n assert!(validate_scoring(&cfg).is_ok());\n}\n\n#[test]\nfn test_config_validation_rejects_absurd_half_life() {\n let mut cfg = ScoringConfig::default();\n cfg.author_half_life_days = 5000; // > 3650 cap\n assert!(validate_scoring(&cfg).is_err());\n cfg.author_half_life_days = 3650; // boundary: valid\n assert!(validate_scoring(&cfg).is_ok());\n cfg.reviewer_min_note_chars = 5000; // > 4096 cap\n assert!(validate_scoring(&cfg).is_err());\n cfg.reviewer_min_note_chars = 4096; // boundary: valid\n assert!(validate_scoring(&cfg).is_ok());\n}\n\n#[test]\nfn test_config_validation_rejects_nan_multiplier() {\n let mut cfg = ScoringConfig::default();\n cfg.closed_mr_multiplier = f64::NAN;\n assert!(validate_scoring(&cfg).is_err());\n cfg.closed_mr_multiplier = f64::INFINITY;\n assert!(validate_scoring(&cfg).is_err());\n cfg.closed_mr_multiplier = f64::NEG_INFINITY;\n assert!(validate_scoring(&cfg).is_err());\n}\n```\n\n### GREEN: Add fields to struct + Default impl + validation rules.\n### VERIFY: cargo test -p lore -- test_config_validation\n\n## Acceptance Criteria\n- [ ] test_config_validation_rejects_zero_half_life passes\n- [ ] test_config_validation_rejects_absurd_half_life passes\n- [ ] test_config_validation_rejects_nan_multiplier passes\n- [ ] ScoringConfig::default() returns correct values for all 11 fields\n- [ ] cargo check --all-targets passes\n- [ ] Existing config deserialization works (#[serde(default)] fills new fields)\n- [ ] validate_scoring() is pub(crate) or accessible from config.rs test module\n\n## Files\n- MODIFY: src/core/config.rs (struct at line 155, Default impl at line 169, validate_scoring at line 274)\n\n## Edge Cases\n- f64 comparison: use .is_finite() for NaN/Inf check, > 0.0 and <= 1.0 for range\n- Vec default: use Vec::new()\n- Upper bounds prevent silent misconfig (5000-day half-life effectively disables decay)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T16:59:14.654469Z","created_by":"tayloreernisse","updated_at":"2026-02-12T21:01:21.744442Z","closed_at":"2026-02-12T21:01:21.744205Z","close_reason":"Completed: added 8 new fields to ScoringConfig, extended Default impl, and added validation for half-life bounds, closed_mr_multiplier, reviewer_min_note_chars, and excluded_usernames. All 19 config tests pass.","compaction_level":0,"original_size":0,"labels":["scoring"]} {"id":"bd-2wpf","title":"Ship timeline CLI with human and robot renderers","description":"## Problem\nThe timeline pipeline (5-stage SEED->HYDRATE->EXPAND->COLLECT->RENDER) is implemented but not wired to the CLI. This is one of lore's most unique features — chronological narrative reconstruction from resource events, cross-references, and notes — and it is invisible to users and agents.\n\n## Current State\n- Types defined: src/core/timeline.rs (TimelineEvent, TimelineSeed, etc.)\n- Seed stage: src/core/timeline_seed.rs (FTS search -> seed entities)\n- Expand stage: src/core/timeline_expand.rs (cross-reference expansion)\n- Collect stage: src/core/timeline_collect.rs (event gathering from resource events + notes)\n- CLI command structure: src/cli/commands/timeline.rs (exists but incomplete)\n- Remaining beads: bd-1nf (CLI wiring), bd-2f2 (human renderer), bd-dty (robot renderer)\n\n## Acceptance Criteria\n1. lore timeline 'authentication refactor' works end-to-end:\n - Searches for matching entities (SEED)\n - Fetches raw data (HYDRATE)\n - Expands via cross-references (EXPAND with --depth flag, default 1)\n - Collects events chronologically (COLLECT)\n - Renders human-readable narrative (RENDER)\n2. Human renderer output:\n - Chronological event stream with timestamps\n - Color-coded by event type (state change, label change, note, reference)\n - Actor names with role context\n - Grouped by day/week for readability\n - Evidence snippets from notes (first 200 chars)\n3. Robot renderer output (--robot / -J):\n - JSON array of events with: timestamp, event_type, actor, entity_ref, body/snippet, metadata\n - Seed entities listed separately (what matched the query)\n - Expansion depth metadata (how far from seed)\n - Total event count and time range\n4. CLI flags:\n - --project (scope to project)\n - --since (time range)\n - --depth N (expansion depth, default 1, max 3)\n - --expand-mentions (follow mention references, not just closes/related)\n - -n LIMIT (max events)\n5. Performance: timeline for a single issue with 50 events renders in <200ms\n\n## Relationship to Existing Beads\nThis supersedes/unifies: bd-1nf (CLI wiring), bd-2f2 (human renderer), bd-dty (robot renderer). Those can be closed when this ships.\n\n## Files to Modify\n- src/cli/commands/timeline.rs (CLI wiring, flag parsing, output dispatch)\n- src/core/timeline.rs (may need RENDER stage types)\n- New: src/cli/render/timeline_human.rs or inline in timeline.rs\n- New: src/cli/render/timeline_robot.rs or inline in timeline.rs","status":"closed","priority":1,"issue_type":"feature","created_at":"2026-02-12T15:46:16.246889Z","created_by":"tayloreernisse","updated_at":"2026-02-12T15:50:43.885226Z","closed_at":"2026-02-12T15:50:43.885180Z","close_reason":"Already implemented: run_timeline(), print_timeline(), print_timeline_json_with_meta(), handle_timeline() all exist and are fully wired. Code audit 2026-02-12.","compaction_level":0,"original_size":0,"labels":["cli","cli-imp"],"dependencies":[{"issue_id":"bd-2wpf","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2x2h","title":"Implement Sync screen (running + summary modes + progress coalescer)","description":"## Background\nThe Sync screen provides real-time progress visualization during data synchronization. The TUI drives sync directly via lore library calls (not subprocess) — this gives direct access to progress callbacks, proper error propagation, and cooperative cancellation via CancelToken. The TUI is the primary human interface; the CLI serves robots/scripts.\n\nAfter sync completes, the screen transitions to a summary view showing exact changed entity counts. A progress coalescer prevents render thrashing by batching rapid progress updates.\n\nDesign principle: the TUI is self-contained. It does NOT detect or react to external CLI sync operations. If someone runs lore sync externally, the TUI's natural re-query on navigation handles stale data implicitly.\n\n## Approach\nCreate state, action, and view modules for the Sync screen:\n\n**State** (crates/lore-tui/src/screen/sync/state.rs):\n- SyncScreenMode enum: FullScreen, Inline (for use from Bootstrap screen)\n- SyncState enum: Idle, Running(SyncProgress), Complete(SyncSummary), Error(String)\n- SyncProgress: per-lane progress (issues, MRs, discussions, notes, events, statuses) with counts and ETA\n- SyncSummary: changed entity counts (new, updated, deleted per type), duration, errors\n- ProgressCoalescer: buffers progress updates, emits at most every 100ms to prevent render thrash\n\n**sync_delta_ledger** (crates/lore-tui/src/screen/sync/delta_ledger.rs):\n- SyncDeltaLedger: in-memory per-run record of changed entity IDs\n- Fields: new_issue_iids (Vec), updated_issue_iids (Vec), new_mr_iids (Vec), updated_mr_iids (Vec)\n- record_change(entity_type, iid, change_kind) — called by sync progress callback\n- summary() -> SyncSummary — produces the final counts for the summary view\n- Purpose: after sync completes, the dashboard and list screens can use the ledger to highlight \"new since last sync\" items\n\n**Action** (crates/lore-tui/src/screen/sync/action.rs):\n- start_sync(db: &DbManager, config: &Config, cancel: CancelToken) -> Cmd\n- Calls lore library ingestion functions directly: ingest_issues, ingest_mrs, ingest_discussions, etc.\n- Progress callback sends Msg::SyncProgress(lane, count, total) via channel\n- On completion sends Msg::SyncComplete(SyncSummary)\n- On cancel sends Msg::SyncCancelled(partial_summary)\n\n**Per-project fault isolation:** If sync for one project fails, continue syncing other projects. Collect per-project errors and display in summary view. Don't abort entire sync on single project failure.\n\n**View** (crates/lore-tui/src/screen/sync/view.rs):\n- Running view: per-lane progress bars with counts/totals, overall ETA, cancel hint (Esc)\n- Stream stats footer: show items/sec throughput for active lanes\n- Summary view: table of entity types with new/updated/deleted columns, total duration, per-project error list\n- Error view: error message with retry option\n- Inline mode: compact single-line progress for embedding in Bootstrap screen\n\nThe Sync screen uses TaskSupervisor for the background sync task with cooperative cancellation.\n\n## Acceptance Criteria\n- [ ] Sync screen launches sync via lore library calls (NOT subprocess)\n- [ ] Per-lane progress bars update in real-time during sync\n- [ ] ProgressCoalescer batches updates to at most 10/second (100ms floor)\n- [ ] Esc cancels sync cooperatively via CancelToken, shows partial summary\n- [ ] Sync completion transitions to summary view with accurate change counts\n- [ ] Summary view shows new/updated/deleted counts per entity type\n- [ ] Error during sync shows error message with retry option\n- [ ] Sync task registered with TaskSupervisor (dedup by TaskKey::Sync)\n- [ ] Per-project fault isolation: single project failure doesn't abort entire sync\n- [ ] SyncDeltaLedger records changed entity IDs for post-sync highlighting\n- [ ] Stream stats footer shows items/sec throughput\n- [ ] ScreenMode::Inline renders compact single-line progress for Bootstrap embedding\n- [ ] Unit tests for ProgressCoalescer batching behavior\n- [ ] Unit tests for SyncDeltaLedger record/summary\n- [ ] Integration test: mock sync with FakeClock verifies progress -> summary transition\n\n## Files\n- CREATE: crates/lore-tui/src/screen/sync/state.rs\n- CREATE: crates/lore-tui/src/screen/sync/action.rs\n- CREATE: crates/lore-tui/src/screen/sync/view.rs\n- CREATE: crates/lore-tui/src/screen/sync/delta_ledger.rs\n- CREATE: crates/lore-tui/src/screen/sync/mod.rs\n- MODIFY: crates/lore-tui/src/screen/mod.rs (add pub mod sync)\n\n## TDD Anchor\nRED: Write test_progress_coalescer_batches_rapid_updates that sends 50 progress updates in 10ms and asserts coalescer emits at most 1.\nGREEN: Implement ProgressCoalescer with configurable floor interval.\nVERIFY: cargo test -p lore-tui sync -- --nocapture\n\nAdditional tests:\n- test_sync_cancel_produces_partial_summary\n- test_sync_complete_produces_full_summary\n- test_sync_error_shows_retry\n- test_sync_dedup_prevents_double_launch\n- test_delta_ledger_records_changes: record 5 new issues and 3 updated MRs, assert summary counts\n- test_per_project_fault_isolation: simulate one project failure, verify others complete\n\n## Edge Cases\n- Sync cancelled immediately after start — partial summary with zero counts is valid\n- Network timeout during sync — error state with last-known progress preserved\n- Very large sync (100k+ entities) — progress coalescer prevents render thrash\n- Sync started while another sync TaskKey::Sync exists — TaskSupervisor dedup rejects it\n- Inline mode from Bootstrap: compact rendering, no full progress bars\n\n## Dependency Context\nUses TaskSupervisor from bd-3le2 for dedup and cancellation. Uses DbManager from bd-2kop for database access. Uses lore library ingestion module directly for sync operations. Used by Bootstrap screen (bd-3ty8) in inline mode.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:09.481354Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.266057Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2x2h","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2x2h","depends_on_id":"bd-3le2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2x2h","depends_on_id":"bd-u7se","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -240,8 +240,8 @@ {"id":"bd-3ng","title":"[CP0] Database setup with migrations and app lock","description":"## Background\n\nThe database is the backbone of gitlab-inbox. SQLite with WAL mode for performance, foreign keys for integrity, and proper pragmas for reliability. Migrations allow schema evolution. App lock prevents concurrent sync corruption.\n\nReference: docs/prd/checkpoint-0.md sections \"Database Schema\", \"SQLite Runtime Pragmas\", \"App Lock Mechanism\"\n\n## Approach\n\n**src/core/db.ts:**\n```typescript\nimport Database from 'better-sqlite3';\nimport { join, dirname } from 'node:path';\nimport { mkdirSync, readdirSync, readFileSync } from 'node:fs';\nimport { getDbPath } from './paths';\nimport { dbLogger } from './logger';\n\nexport function createConnection(dbPath: string): Database.Database {\n mkdirSync(dirname(dbPath), { recursive: true });\n const db = new Database(dbPath);\n \n // Production-grade pragmas\n db.pragma('journal_mode = WAL');\n db.pragma('synchronous = NORMAL');\n db.pragma('foreign_keys = ON');\n db.pragma('busy_timeout = 5000');\n db.pragma('temp_store = MEMORY');\n \n return db;\n}\n\nexport function runMigrations(db: Database.Database, migrationsDir: string): void {\n // Create schema_version table if not exists\n // Read migration files sorted by version\n // Apply migrations not yet applied\n // Track in schema_version table\n}\n```\n\n**migrations/001_initial.sql:**\nFull schema with tables: schema_version, projects, sync_runs, app_locks, sync_cursors, raw_payloads\n\n**src/core/lock.ts:**\nAppLock class with:\n- acquire(force?): acquires lock or throws DatabaseLockError\n- release(): releases lock and stops heartbeat\n- Heartbeat timer that updates heartbeat_at every N seconds\n- Stale lock detection (heartbeat_at > staleLockMinutes ago)\n\n## Acceptance Criteria\n\n- [ ] createConnection() creates parent directories if missing\n- [ ] WAL mode verified: `db.pragma('journal_mode')` returns 'wal'\n- [ ] Foreign keys verified: `db.pragma('foreign_keys')` returns 1\n- [ ] busy_timeout verified: `db.pragma('busy_timeout')` returns 5000\n- [ ] 001_initial.sql creates all 6 tables\n- [ ] schema_version shows version 1 after migration\n- [ ] AppLock.acquire() succeeds for first caller\n- [ ] AppLock.acquire() throws DatabaseLockError for second concurrent caller\n- [ ] Stale lock (heartbeat > 10 min old) can be taken over\n- [ ] tests/unit/db.test.ts passes (8 tests)\n- [ ] tests/integration/app-lock.test.ts passes (6 tests)\n\n## Files\n\nCREATE:\n- src/core/db.ts\n- src/core/lock.ts\n- migrations/001_initial.sql\n- tests/unit/db.test.ts\n- tests/integration/app-lock.test.ts\n\n## TDD Loop\n\nRED:\n```typescript\n// tests/unit/db.test.ts\ndescribe('Database', () => {\n it('creates database file if not exists')\n it('applies migrations in order')\n it('sets WAL journal mode')\n it('enables foreign keys')\n it('sets busy_timeout=5000')\n it('sets synchronous=NORMAL')\n it('sets temp_store=MEMORY')\n it('tracks schema version')\n})\n\n// tests/integration/app-lock.test.ts\ndescribe('App Lock', () => {\n it('acquires lock successfully')\n it('updates heartbeat during operation')\n it('detects stale lock and recovers')\n it('refuses concurrent acquisition')\n it('allows force override')\n it('releases lock on completion')\n})\n```\n\nGREEN: Implement db.ts, lock.ts, 001_initial.sql\n\nVERIFY: \n```bash\nnpm run test -- tests/unit/db.test.ts\nnpm run test -- tests/integration/app-lock.test.ts\n```\n\n## Edge Cases\n\n- Migration file with syntax error should rollback and throw MigrationError\n- Lock heartbeat timer must be unref()'d to not block process exit\n- Database file permissions - fail clearly if not writable\n- Concurrent lock tests need separate database files","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:49.481012Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:08:38.612669Z","closed_at":"2026-01-25T03:08:38.612543Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3ng","depends_on_id":"bd-epj","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3o0i","title":"NOTE-2E: Generate-docs full rebuild support for notes","description":"## Background\nlore generate-docs --full seeds all issues, MRs, and discussions into the dirty queue. Notes must be seeded too. The seeding happens in src/cli/commands/generate_docs.rs at lines 38-41.\n\n## Approach\nIn src/cli/commands/generate_docs.rs, the run_generate_docs() function (line 25) calls seed_dirty() for each source type when full=true (lines 38-41):\n result.seeded += seed_dirty(&conn, SourceType::Issue, project_filter)?;\n result.seeded += seed_dirty(&conn, SourceType::MergeRequest, project_filter)?;\n result.seeded += seed_dirty(&conn, SourceType::Discussion, project_filter)?;\n\nThe seed_dirty() function (line 61) maps SourceType to table name and does:\n INSERT INTO dirty_sources SELECT source_type, id, now FROM {table} ...\n\nFor notes, the table name is 'notes' but we need to exclude system notes (is_system = 0). The current seed_dirty function doesn't support WHERE filters. Two options:\n\nOption A (preferred): Add a separate seed_dirty_notes() function that handles the is_system filter:\n fn seed_dirty_notes(conn: &Connection, project_filter: Option<&str>) -> Result {\n INSERT INTO dirty_sources (source_type, source_id, queued_at)\n SELECT 'note', n.id, ?1 FROM notes n WHERE n.is_system = 0\n [AND n.project_id IN (SELECT id FROM projects WHERE path_with_namespace LIKE ?)]\n ON CONFLICT(source_type, source_id) DO UPDATE SET queued_at = excluded.queued_at, attempt_count = 0, last_attempt_at = NULL, last_error = NULL, next_attempt_at = NULL\n }\n\nOption B: Extend seed_dirty() to accept optional WHERE clause.\n\nAdd the call in run_generate_docs() after the existing 3 seed_dirty calls:\n result.seeded += seed_dirty_notes(&conn, project_filter)?;\n\n## Files\n- MODIFY: src/cli/commands/generate_docs.rs (add seed_dirty_notes function, call it in run_generate_docs at line 41)\n\n## TDD Anchor\nRED: test_full_seed_includes_notes — setup project, 3 non-system + 1 system note, call run_generate_docs with full=true, assert 3 note entries in dirty_sources.\nGREEN: Implement seed_dirty_notes and wire into run_generate_docs.\nVERIFY: cargo test full_seed_includes_notes -- --nocapture\nTests: test_note_document_count_stable_after_second_generate_docs_full (second run is idempotent)\n\n## Acceptance Criteria\n- [ ] generate-docs --full seeds non-system notes into dirty queue\n- [ ] System notes excluded from seeding (is_system = 0 filter)\n- [ ] Project filter works (--project flag scopes to one project)\n- [ ] Second full rebuild is idempotent (same document count, ON CONFLICT resets attempt counters)\n- [ ] Both tests pass\n\n## Dependency Context\n- Depends on NOTE-2D (bd-2ezb): regenerator must handle SourceType::Note so that seeded dirty entries can actually be processed\n\n## Edge Cases\n- Empty database: no notes = no-op, no errors\n- All system notes: no entries seeded\n- project_filter with no matching notes: 0 seeded, no error","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:25.747719Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.553418Z","closed_at":"2026-02-12T18:13:15.553372Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"]} {"id":"bd-3pk","title":"OBSERV Epic: Phase 5 - Rate Limit + Retry Instrumentation","description":"Enhanced logging in GitLab HTTP client for rate limits and retries. Structured fields on retry/rate-limit events. StageTiming gets rate_limit_hits and retries counters.\n\nDepends on: Phase 2 (spans for context)\nParallel with: Phase 3\n\nFiles: src/gitlab/client.rs, src/core/metrics.rs\n\nAcceptance criteria (PRD Section 6.5):\n- 429 events log at INFO with path, attempt, retry_after_secs, status_code\n- Retry events log with path, attempt, error\n- StageTiming includes rate_limit_hits and retries (omitted when zero)\n- lore -v sync shows retry activity on stderr\n- Rate limit counts included in metrics_json","status":"closed","priority":2,"issue_type":"epic","created_at":"2026-02-04T15:53:27.517023Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:31:35.239820Z","closed_at":"2026-02-04T17:31:35.239776Z","close_reason":"All Phase 5 tasks complete: structured rate-limit logging (bd-12ae) and rate_limit_hits/retries counters in StageTiming (bd-3vqk)","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-3pk","depends_on_id":"bd-2ni","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} -{"id":"bd-3pm2","title":"Add required TUI indexes via migration","description":"## Background\nThe TUI requires covering indexes for keyset pagination and efficient entity lookups. These must be added via the migration system in the main lore crate before TUI GA. Without them, list screens will full-scan on M-tier datasets.\n\n## Approach\nAdd a new migration to `src/core/db.rs` MIGRATIONS array containing exactly 9 CREATE INDEX IF NOT EXISTS statements:\n\n**List pagination indexes (4):**\n1. `idx_issues_list_default ON issues(project_id, state, updated_at DESC, iid DESC)` — covers default issue list sort\n2. `idx_mrs_list_default ON merge_requests(project_id, state, updated_at DESC, iid DESC)` — covers default MR list sort\n3. `idx_discussions_entity ON discussions(project_id, entity_type, entity_iid, created_at DESC)` — covers discussion list for an entity\n4. `idx_notes_discussion ON notes(discussion_id, created_at ASC)` — covers note list within a discussion\n\n**Filter-path indexes (5):**\n5. `idx_issues_author ON issues(author_id, state)` — author filter\n6. `idx_issues_assignee ON issues(assignee_id, state)` — assignee filter\n7. `idx_mrs_author ON merge_requests(author_id, state)` — author filter\n8. `idx_mrs_reviewer ON merge_requests(reviewer_id, state)` — reviewer filter (first reviewer)\n9. `idx_mrs_target_branch ON merge_requests(target_branch, state)` — branch filter\n\nMigration SQL pattern:\n```sql\nCREATE INDEX IF NOT EXISTS idx_issues_list_default\n ON issues(project_id, state, updated_at DESC, iid DESC);\n-- ... repeat for all 9\n```\n\nBump `LATEST_SCHEMA_VERSION` = `MIGRATIONS.len() as i32` (automatic).\n\nAdd EXPLAIN QUERY PLAN verification tests to confirm the optimizer uses these indexes for the top TUI queries.\n\n## Acceptance Criteria\n- [ ] Migration adds exactly 9 indexes via CREATE INDEX IF NOT EXISTS\n- [ ] Migration is appended to MIGRATIONS array (not inserted in the middle)\n- [ ] LATEST_SCHEMA_VERSION increments by 1\n- [ ] Existing data is not affected (indexes are additive)\n- [ ] EXPLAIN QUERY PLAN for `SELECT * FROM issues WHERE project_id=? AND state=? ORDER BY updated_at DESC, iid DESC LIMIT 50` shows SEARCH using idx_issues_list_default\n- [ ] EXPLAIN QUERY PLAN for `SELECT * FROM merge_requests WHERE project_id=? AND state=? ORDER BY updated_at DESC, iid DESC LIMIT 50` shows SEARCH using idx_mrs_list_default\n- [ ] EXPLAIN QUERY PLAN for `SELECT * FROM discussions WHERE project_id=? AND entity_type=? AND entity_iid=? ORDER BY created_at DESC` shows SEARCH using idx_discussions_entity\n- [ ] EXPLAIN QUERY PLAN for `SELECT * FROM notes WHERE discussion_id=? ORDER BY created_at ASC` shows SEARCH using idx_notes_discussion\n- [ ] No full table scan (SCAN TABLE) on issues or merge_requests under default TUI filters\n- [ ] Migration passes on empty DB (fresh install)\n- [ ] Migration passes on DB with existing data (upgrade path)\n\n## Files\n- MODIFY: src/core/db.rs (add migration to MIGRATIONS array)\n- CREATE: tests/tui_index_verification.rs (EXPLAIN QUERY PLAN tests)\n\n## TDD Anchor\nRED: Write `test_tui_indexes_exist` that runs all migrations on in-memory DB, queries `SELECT name FROM sqlite_master WHERE type='index' AND name LIKE 'idx_%'` and asserts all 9 index names are present.\nGREEN: Add migration with 9 CREATE INDEX IF NOT EXISTS statements.\nVERIFY: cargo test test_tui_indexes\n\nAdditional tests:\n- test_explain_issues_list_uses_index (EXPLAIN QUERY PLAN output contains \"idx_issues_list_default\")\n- test_explain_mrs_list_uses_index\n- test_explain_discussions_entity_uses_index\n- test_explain_notes_discussion_uses_index\n- test_migration_idempotent (run migrations twice, no error)\n\n## Edge Cases\n- CREATE INDEX IF NOT EXISTS: safe for re-runs and idempotent migration\n- Indexes on large tables may take significant time during first migration on production DBs (1.5GB) — consider PRAGMA busy_timeout\n- Migration version must be bumped correctly: `LATEST_SCHEMA_VERSION = MIGRATIONS.len() as i32` handles this automatically\n- SQLite may choose a different index if statistics differ — EXPLAIN tests verify the optimizer's actual choice\n- Filter-path indexes use (column, state) not (state, column) because the leading column is more selective\n\n## Dependency Context\nThis modifies the main lore crate (src/core/db.rs), NOT lore-tui. It must be merged before TUI screens rely on these indexes for performance SLOs.\nDependents: bd-35g5 (Dashboard state) and all list screen beads consume these indexes.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:06:08.180922Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:28.753547Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3pm2","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} -{"id":"bd-3pxe","title":"Epic: TUI Phase 2.5 — Vertical Slice Gate","description":"## Background\nPhase 2.5 validates that the core screens work together end-to-end: Dashboard -> Issue List -> Issue Detail -> Sync flows correctly, performance SLOs are met, and there are no stuck-input bugs or cancel latency issues. This is a quality gate before investing in power features.\n\n## Acceptance Criteria\n- [ ] Dashboard + IssueList + IssueDetail + Sync screens integrated and navigable\n- [ ] p95 nav latency < 75ms on M-tier fixtures\n- [ ] Zero stuck-input-mode bugs across full flow\n- [ ] Cancel latency p95 < 2s\n- [ ] Bootstrap screen handles empty/incompatible databases gracefully","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T16:59:47.016586Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.211922Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3pxe","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-3pm2","title":"Add required TUI indexes via migration","description":"## Background\nThe TUI requires covering indexes for keyset pagination and efficient entity lookups. These must be added via the migration system in the main lore crate before TUI GA. Without them, list screens will full-scan on M-tier datasets.\n\n## Approach\nAdd a new migration to `src/core/db.rs` MIGRATIONS array containing exactly 9 CREATE INDEX IF NOT EXISTS statements:\n\n**List pagination indexes (4):**\n1. `idx_issues_list_default ON issues(project_id, state, updated_at DESC, iid DESC)` — covers default issue list sort\n2. `idx_mrs_list_default ON merge_requests(project_id, state, updated_at DESC, iid DESC)` — covers default MR list sort\n3. `idx_discussions_entity ON discussions(project_id, entity_type, entity_iid, created_at DESC)` — covers discussion list for an entity\n4. `idx_notes_discussion ON notes(discussion_id, created_at ASC)` — covers note list within a discussion\n\n**Filter-path indexes (5):**\n5. `idx_issues_author ON issues(author_id, state)` — author filter\n6. `idx_issues_assignee ON issues(assignee_id, state)` — assignee filter\n7. `idx_mrs_author ON merge_requests(author_id, state)` — author filter\n8. `idx_mrs_reviewer ON merge_requests(reviewer_id, state)` — reviewer filter (first reviewer)\n9. `idx_mrs_target_branch ON merge_requests(target_branch, state)` — branch filter\n\nMigration SQL pattern:\n```sql\nCREATE INDEX IF NOT EXISTS idx_issues_list_default\n ON issues(project_id, state, updated_at DESC, iid DESC);\n-- ... repeat for all 9\n```\n\nBump `LATEST_SCHEMA_VERSION` = `MIGRATIONS.len() as i32` (automatic).\n\nAdd EXPLAIN QUERY PLAN verification tests to confirm the optimizer uses these indexes for the top TUI queries.\n\n## Acceptance Criteria\n- [ ] Migration adds exactly 9 indexes via CREATE INDEX IF NOT EXISTS\n- [ ] Migration is appended to MIGRATIONS array (not inserted in the middle)\n- [ ] LATEST_SCHEMA_VERSION increments by 1\n- [ ] Existing data is not affected (indexes are additive)\n- [ ] EXPLAIN QUERY PLAN for `SELECT * FROM issues WHERE project_id=? AND state=? ORDER BY updated_at DESC, iid DESC LIMIT 50` shows SEARCH using idx_issues_list_default\n- [ ] EXPLAIN QUERY PLAN for `SELECT * FROM merge_requests WHERE project_id=? AND state=? ORDER BY updated_at DESC, iid DESC LIMIT 50` shows SEARCH using idx_mrs_list_default\n- [ ] EXPLAIN QUERY PLAN for `SELECT * FROM discussions WHERE project_id=? AND entity_type=? AND entity_iid=? ORDER BY created_at DESC` shows SEARCH using idx_discussions_entity\n- [ ] EXPLAIN QUERY PLAN for `SELECT * FROM notes WHERE discussion_id=? ORDER BY created_at ASC` shows SEARCH using idx_notes_discussion\n- [ ] No full table scan (SCAN TABLE) on issues or merge_requests under default TUI filters\n- [ ] Migration passes on empty DB (fresh install)\n- [ ] Migration passes on DB with existing data (upgrade path)\n\n## Files\n- MODIFY: src/core/db.rs (add migration to MIGRATIONS array)\n- CREATE: tests/tui_index_verification.rs (EXPLAIN QUERY PLAN tests)\n\n## TDD Anchor\nRED: Write `test_tui_indexes_exist` that runs all migrations on in-memory DB, queries `SELECT name FROM sqlite_master WHERE type='index' AND name LIKE 'idx_%'` and asserts all 9 index names are present.\nGREEN: Add migration with 9 CREATE INDEX IF NOT EXISTS statements.\nVERIFY: cargo test test_tui_indexes\n\nAdditional tests:\n- test_explain_issues_list_uses_index (EXPLAIN QUERY PLAN output contains \"idx_issues_list_default\")\n- test_explain_mrs_list_uses_index\n- test_explain_discussions_entity_uses_index\n- test_explain_notes_discussion_uses_index\n- test_migration_idempotent (run migrations twice, no error)\n\n## Edge Cases\n- CREATE INDEX IF NOT EXISTS: safe for re-runs and idempotent migration\n- Indexes on large tables may take significant time during first migration on production DBs (1.5GB) — consider PRAGMA busy_timeout\n- Migration version must be bumped correctly: `LATEST_SCHEMA_VERSION = MIGRATIONS.len() as i32` handles this automatically\n- SQLite may choose a different index if statistics differ — EXPLAIN tests verify the optimizer's actual choice\n- Filter-path indexes use (column, state) not (state, column) because the leading column is more selective\n\n## Dependency Context\nThis modifies the main lore crate (src/core/db.rs), NOT lore-tui. It must be merged before TUI screens rely on these indexes for performance SLOs.\nDependents: bd-35g5 (Dashboard state) and all list screen beads consume these indexes.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:06:08.180922Z","created_by":"tayloreernisse","updated_at":"2026-02-19T03:37:41.905613Z","closed_at":"2026-02-19T03:37:41.905395Z","close_reason":"Migration 027 already implements the required TUI indexes. 8 covering indexes added. EXPLAIN tests deferred to follow-up.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3pm2","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-3pxe","title":"Epic: TUI Phase 2.5 — Vertical Slice Gate","description":"## Background\nPhase 2.5 validates that the core screens work together end-to-end: Dashboard -> Issue List -> Issue Detail -> Sync flows correctly, performance SLOs are met, and there are no stuck-input bugs or cancel latency issues. This is a quality gate before investing in power features.\n\n## Acceptance Criteria\n- [ ] Dashboard + IssueList + IssueDetail + Sync screens integrated and navigable\n- [ ] p95 nav latency < 75ms on M-tier fixtures\n- [ ] Zero stuck-input-mode bugs across full flow\n- [ ] Cancel latency p95 < 2s\n- [ ] Bootstrap screen handles empty/incompatible databases gracefully","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-02-12T16:59:47.016586Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:06:30.827404Z","closed_at":"2026-02-18T21:06:30.827352Z","close_reason":"Phase 2.5 vertical slice gate complete: all core screens integrated, 417 tests passing, bootstrap + schema preflight, stale guard, input fuzz, render all screens","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3pxe","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-3pz","title":"OBSERV Epic: Phase 4 - Sync History Enrichment","description":"Wire up sync_runs INSERT/UPDATE lifecycle (table exists but nothing writes to it), schema migration 014, enhanced sync-status with recent runs and metrics.\n\nDepends on: Phase 3 (needs Vec to store in metrics_json)\nUnblocks: nothing (terminal phase)\n\nFiles: migrations/014_sync_runs_enrichment.sql (new), src/core/sync_run.rs (new), src/cli/commands/sync.rs, src/cli/commands/ingest.rs, src/cli/commands/sync_status.rs\n\nAcceptance criteria (PRD Section 6.4):\n- lore sync creates sync_runs row with status=running, updated to succeeded/failed\n- sync_runs.run_id matches log files and robot JSON\n- metrics_json contains serialized Vec\n- lore sync-status shows last 10 runs with metrics\n- Failed syncs record error and partial metrics\n- Migration 014 applies cleanly","status":"closed","priority":2,"issue_type":"epic","created_at":"2026-02-04T15:53:27.469149Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:43:07.375047Z","closed_at":"2026-02-04T17:43:07.375Z","close_reason":"Phase 4 complete: migration 014, SyncRunRecorder, wiring, sync-status enhancement","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-3pz","depends_on_id":"bd-3er","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-3q2","title":"Implement search filters module","description":"## Background\nSearch filters are applied post-retrieval to narrow results by source type, author, project, date, labels, and file paths. The filter module must preserve ranking order from the search pipeline (FTS/RRF scores). It uses SQLite's JSON1 extension (json_each) to pass ranked document IDs efficiently and maintain their original order.\n\n## Approach\nCreate `src/search/filters.rs` per PRD Section 3.3. The full implementation is specified in the PRD including the SQL query.\n\n**Key types:**\n- `SearchFilters` struct with all filter fields + `has_any_filter()` + `clamp_limit()`\n- `PathFilter` enum: `Prefix(String)` (trailing `/`) or `Exact(String)`\n\n**Core function:**\n```rust\npub fn apply_filters(\n conn: &Connection,\n document_ids: &[i64],\n filters: &SearchFilters,\n) -> Result>\n```\n\n**SQL pattern (JSON1 for ordered ID passing):**\n```sql\nSELECT d.id\nFROM json_each(?) AS j\nJOIN documents d ON d.id = j.value\nWHERE 1=1\n AND d.source_type = ? -- if source_type filter set\n AND d.author_username = ? -- if author filter set\n -- ... dynamic WHERE clauses\nORDER BY j.key -- preserves ranking order\nLIMIT ?\n```\n\n**Filter logic:**\n- Labels: AND logic via `EXISTS (SELECT 1 FROM document_labels dl WHERE dl.document_id = d.id AND dl.label_name = ?)`\n- Path prefix: `LIKE ? ESCAPE '\\\\'` with escaped wildcards\n- Path exact: `= ?`\n- Limit: clamped to [1, 100], default 20\n\n## Acceptance Criteria\n- [ ] source_type filter works (issue, merge_request, discussion)\n- [ ] author filter: exact username match\n- [ ] project_id filter: restricts to single project\n- [ ] after filter: created_at >= value\n- [ ] updated_after filter: updated_at >= value\n- [ ] labels filter: AND logic (all specified labels must be present)\n- [ ] path exact filter: matches exact path string\n- [ ] path prefix filter: trailing `/` triggers LIKE with escaped wildcards\n- [ ] Ranking order preserved (ORDER BY j.key from json_each)\n- [ ] Limit clamped: 0 -> 20 (default), 200 -> 100 (max)\n- [ ] Empty document_ids returns empty Vec\n- [ ] Multiple filters compose correctly (all applied via AND)\n- [ ] `cargo test filters` passes\n\n## Files\n- `src/search/filters.rs` — new file\n- `src/search/mod.rs` — add `pub use filters::{SearchFilters, PathFilter, apply_filters};`\n\n## TDD Loop\nRED: Tests in `filters.rs` `#[cfg(test)] mod tests`:\n- `test_no_filters` — all docs returned up to limit\n- `test_source_type_filter` — only issues returned\n- `test_author_filter` — exact match\n- `test_labels_and_logic` — must have ALL specified labels\n- `test_path_exact` — matches exact path\n- `test_path_prefix` — trailing slash matches prefix\n- `test_limit_clamping` — 0 -> 20, 200 -> 100\n- `test_ranking_preserved` — output order matches input order\n- `test_has_any_filter` — true when any filter set, false when default\nGREEN: Implement apply_filters with dynamic SQL\nVERIFY: `cargo test filters`\n\n## Edge Cases\n- Path containing SQL LIKE wildcards (`%`, `_`): must be escaped before LIKE\n- Empty labels list: no label filter applied (not \"must have zero labels\")\n- `has_any_filter()` returns false for default SearchFilters (no filters set)\n- Large document_ids array (1000+): JSON1 handles efficiently","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:13.042512Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:24:38.402483Z","closed_at":"2026-01-30T17:24:38.402302Z","close_reason":"Completed: SearchFilters with has_any_filter/clamp_limit, PathFilter enum, apply_filters with dynamic SQL + json_each ordering, escape_like, 8 tests pass","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3q2","depends_on_id":"bd-36p","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-3qm","title":"[CP1] Final validation - tests, smoke tests, integrity checks","description":"Run all tests and perform data integrity checks.\n\nValidation steps:\n1. Run all unit tests (vitest)\n2. Run all integration tests\n3. Run ESLint\n4. Run TypeScript strict check\n5. Manual smoke tests per PRD table\n6. Data integrity SQL checks:\n - Issue count matches GitLab\n - Every issue has raw_payload\n - Labels in junction exist in labels table\n - sync_cursors has entry per project\n - Re-run fetches 0 new items\n - Discussion count > 0\n - Every discussion has >= 1 note\n - individual_note=true has exactly 1 note\n\nFiles: All CP1 files\nDone when: All gate criteria from Definition of Done pass","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:20:51.994183Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.152852Z","closed_at":"2026-01-25T15:21:35.152852Z","deleted_at":"2026-01-25T15:21:35.152849Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} @@ -252,7 +252,7 @@ {"id":"bd-3sh","title":"Add 'lore count events' command with robot mode","description":"## Background\nNeed to verify event ingestion and report counts by type. The existing count command (src/cli/commands/count.rs) handles issues, mrs, discussions, notes with both human and robot output. This adds 'events' as a new count subcommand.\n\n## Approach\nExtend the existing count command in src/cli/commands/count.rs:\n\n1. Add CountTarget::Events variant (or string match) in the count dispatcher\n2. Query each event table with GROUP BY entity type:\n```sql\nSELECT \n CASE WHEN issue_id IS NOT NULL THEN 'issue' ELSE 'merge_request' END as entity_type,\n COUNT(*) as count\nFROM resource_state_events\nGROUP BY entity_type;\n-- (repeat for label and milestone events)\n```\n\n3. Human output: table format\n```\nEvent Type Issues MRs Total\nState events 1,234 567 1,801\nLabel events 2,345 890 3,235\nMilestone events 456 123 579\nTotal 4,035 1,580 5,615\n```\n\n4. Robot JSON:\n```json\n{\n \"ok\": true,\n \"data\": {\n \"state_events\": {\"issue\": 1234, \"merge_request\": 567, \"total\": 1801},\n \"label_events\": {\"issue\": 2345, \"merge_request\": 890, \"total\": 3235},\n \"milestone_events\": {\"issue\": 456, \"merge_request\": 123, \"total\": 579},\n \"total\": 5615\n }\n}\n```\n\n5. Register in CLI: add \"events\" to count's entity_type argument in src/cli/mod.rs\n\n## Acceptance Criteria\n- [ ] `lore count events` shows correct counts by event type and entity type\n- [ ] Robot JSON matches the schema above\n- [ ] Works with empty tables (all zeros)\n- [ ] Does not error if migration 011 hasn't been applied (graceful degradation or \"no event tables\" message)\n\n## Files\n- src/cli/commands/count.rs (add events counting logic)\n- src/cli/mod.rs (add \"events\" to count's accepted entity types)\n\n## TDD Loop\nRED: tests/count_tests.rs (or extend existing):\n- `test_count_events_empty_tables` - verify all zeros on fresh DB\n- `test_count_events_with_data` - seed state + label events, verify correct counts\n- `test_count_events_robot_json` - verify JSON structure\n\nGREEN: Add the events branch to count command\n\nVERIFY: `cargo test count -- --nocapture`\n\n## Edge Cases\n- Tables don't exist if user hasn't run migrate — check table existence first or catch the error\n- COUNT with GROUP BY returns no rows for empty tables — need to handle missing entity types as 0","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-02T21:31:57.379702Z","created_by":"tayloreernisse","updated_at":"2026-02-03T16:21:21.408874Z","closed_at":"2026-02-03T16:21:21.408806Z","close_reason":"Added 'events' to count CLI parser, run_count_events function, print_event_count (table format) and print_event_count_json (structured JSON). Wired into handle_count in main.rs.","compaction_level":0,"original_size":0,"labels":["cli","gate-1","phase-b"],"dependencies":[{"issue_id":"bd-3sh","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-3sh","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-3t1b","title":"Implement MR Detail (state + action + view)","description":"## Background\nThe MR Detail shows a single merge request with file changes, diff discussions (position-specific comments), and general discussions. Same progressive hydration pattern as Issue Detail. MR detail has additional sections: file change list and diff-context notes.\n\n## Approach\nState (state/mr_detail.rs):\n- MrDetailState: current_key (Option), metadata (Option), discussions (Vec), diff_discussions (Vec), file_changes (Vec), cross_refs (Vec), tree_state (TreePersistState), scroll_offset, active_tab (MrTab: Overview|Files|Discussions)\n- MrMetadata: iid, title, description, state, author, reviewer, assignee, labels, target_branch, source_branch, created_at, updated_at, web_url, draft, merge_status\n- FileChange: old_path, new_path, change_type (added/modified/deleted/renamed), diff_line_count\n- DiffDiscussion: file_path, old_line, new_line, notes (Vec)\n\nAction (action.rs):\n- fetch_mr_detail(conn, key, clock) -> Result: uses with_read_snapshot\n\nView (view/mr_detail.rs):\n- render_mr_detail(frame, state, area, theme): header, tab bar (Overview|Files|Discussions), tab content\n- Overview tab: description + cross-refs\n- Files tab: file change list with change type indicators (+/-/~)\n- Discussions tab: general discussions + diff discussions grouped by file\n\n## Acceptance Criteria\n- [ ] MR metadata loads in Phase 1\n- [ ] Tab navigation between Overview, Files, Discussions\n- [ ] File changes list shows change type and line count\n- [ ] Diff discussions grouped by file path\n- [ ] General discussions rendered in tree widget\n- [ ] Cross-references navigable (related issues, etc.)\n- [ ] All text sanitized via sanitize_for_terminal()\n- [ ] Esc returns to MR List with state preserved\n\n## Files\n- MODIFY: crates/lore-tui/src/state/mr_detail.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_mr_detail)\n- CREATE: crates/lore-tui/src/view/mr_detail.rs\n\n## TDD Anchor\nRED: Write test_fetch_mr_detail in action.rs that inserts an MR with 3 file changes, calls fetch_mr_detail, asserts 3 files returned.\nGREEN: Implement fetch_mr_detail with file change query.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_mr_detail\n\n## Edge Cases\n- MR with no file changes (draft MR created without pushes): show \"No file changes\" message\n- Diff discussions referencing deleted files: show file path with strikethrough style\n- Very large MRs (hundreds of files): paginate file list, don't load all at once\n\n## Dependency Context\nUses discussion tree and cross-ref widgets from \"Implement discussion tree + cross-reference widgets\" task.\nUses same patterns as \"Implement Issue Detail\" task.\nUses MrDetailState from \"Implement AppState composition\" task.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:59:38.427124Z","created_by":"tayloreernisse","updated_at":"2026-02-18T20:36:38.457188Z","closed_at":"2026-02-18T20:36:38.457090Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3t1b","depends_on_id":"bd-1d6z","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-3t1b","depends_on_id":"bd-2kr0","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-3t6r","title":"Epic: TUI Phase 5 — Polish","description":"## Background\nPhase 5 adds polish features: responsive breakpoints for all screens, session state persistence (resume where you left off), single-instance locking, entity/render caches for performance, text width handling for Unicode, snapshot tests, and terminal compatibility test matrix.\n\n## Acceptance Criteria\n- [ ] All screens adapt to terminal width with responsive breakpoints\n- [ ] Session state persisted and restored on relaunch\n- [ ] Single-instance lock prevents concurrent TUI launches\n- [ ] Entity cache enables near-instant detail view reopens\n- [ ] Snapshot tests produce deterministic output with FakeClock\n- [ ] Terminal compat verified across iTerm2, tmux, Alacritty, kitty","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:02:47.178645Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.435708Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3t6r","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} -{"id":"bd-3ty8","title":"Implement Bootstrap screen + schema preflight","description":"## Background\nThe Bootstrap screen handles first-launch and incompatible-database scenarios. Before entering the TUI event loop, a schema preflight check validates the database is compatible. If not, an actionable error is shown. The Bootstrap screen also guides users through initial sync if the database is empty.\n\n## Approach\n- Schema preflight in lib.rs: check schema version before creating LoreApp. If incompatible, print error with lore migrate suggestion and exit non-zero.\n- Bootstrap screen (Screen::Bootstrap): shown when database has zero issues/MRs. Shows: \"No data found. Run sync to get started.\" with option to start sync inline.\n- State: BootstrapState { has_data: bool, schema_ok: bool, config_valid: bool }\n- Action: check_data_readiness(conn) -> DataReadiness { has_issues: bool, has_mrs: bool, has_documents: bool, schema_version: i32 }\n\n## Acceptance Criteria\n- [ ] Schema preflight yields actionable error for incompatible DB versions\n- [ ] Bootstrap screen shown when database is empty\n- [ ] Bootstrap guides user to start sync\n- [ ] After sync completes, Bootstrap auto-transitions to Dashboard\n- [ ] Non-zero exit code on schema incompatibility\n\n## Files\n- CREATE: crates/lore-tui/src/state/bootstrap.rs\n- CREATE: crates/lore-tui/src/view/bootstrap.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add schema preflight check)\n- MODIFY: crates/lore-tui/src/action.rs (add check_data_readiness)\n\n## TDD Anchor\nRED: Write test_schema_preflight_rejects_old that creates DB at schema version 1, asserts preflight returns error.\nGREEN: Implement schema version check.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_schema_preflight\n\n## Edge Cases\n- Database file doesn't exist: create it, then show Bootstrap\n- Database locked by another process: show DbBusy error with suggestion\n- Config file missing: show error with lore init suggestion","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:00:02.185699Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:28.671769Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3ty8","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-3ty8","depends_on_id":"bd-6pmy","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-3ty8","title":"Implement Bootstrap screen + schema preflight","description":"## Background\nThe Bootstrap screen handles first-launch and incompatible-database scenarios. Before entering the TUI event loop, a schema preflight check validates the database is compatible. If not, an actionable error is shown. The Bootstrap screen also guides users through initial sync if the database is empty.\n\n## Approach\n- Schema preflight in lib.rs: check schema version before creating LoreApp. If incompatible, print error with lore migrate suggestion and exit non-zero.\n- Bootstrap screen (Screen::Bootstrap): shown when database has zero issues/MRs. Shows: \"No data found. Run sync to get started.\" with option to start sync inline.\n- State: BootstrapState { has_data: bool, schema_ok: bool, config_valid: bool }\n- Action: check_data_readiness(conn) -> DataReadiness { has_issues: bool, has_mrs: bool, has_documents: bool, schema_version: i32 }\n\n## Acceptance Criteria\n- [ ] Schema preflight yields actionable error for incompatible DB versions\n- [ ] Bootstrap screen shown when database is empty\n- [ ] Bootstrap guides user to start sync\n- [ ] After sync completes, Bootstrap auto-transitions to Dashboard\n- [ ] Non-zero exit code on schema incompatibility\n\n## Files\n- CREATE: crates/lore-tui/src/state/bootstrap.rs\n- CREATE: crates/lore-tui/src/view/bootstrap.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add schema preflight check)\n- MODIFY: crates/lore-tui/src/action.rs (add check_data_readiness)\n\n## TDD Anchor\nRED: Write test_schema_preflight_rejects_old that creates DB at schema version 1, asserts preflight returns error.\nGREEN: Implement schema version check.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_schema_preflight\n\n## Edge Cases\n- Database file doesn't exist: create it, then show Bootstrap\n- Database locked by another process: show DbBusy error with suggestion\n- Config file missing: show error with lore init suggestion","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:00:02.185699Z","created_by":"tayloreernisse","updated_at":"2026-02-18T20:58:27.145495Z","closed_at":"2026-02-18T20:58:27.145430Z","close_reason":"Implemented Bootstrap screen + schema preflight: state/bootstrap.rs (DataReadiness, SchemaCheck, BootstrapState), action.rs (check_schema_version, check_data_readiness, MINIMUM_SCHEMA_VERSION), view/bootstrap.rs (render_bootstrap), lib.rs (schema_preflight), update.rs (SyncStarted/SyncCompleted bootstrap auto-transition). 405 tests passing, clippy clean.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3ty8","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-3ty8","depends_on_id":"bd-6pmy","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-3vqk","title":"OBSERV: Add rate_limit_hits and retries counters to StageTiming","description":"## Background\nMetricsLayer counts span timing but doesn't yet count rate-limit hits and retries. These counters complete the observability picture, showing HOW MUCH time was spent waiting vs. working.\n\n## Approach\n### src/core/metrics.rs - StageTiming struct\n\nAdd two new fields:\n```rust\n#[derive(Debug, Clone, Serialize)]\npub struct StageTiming {\n // ... existing fields ...\n #[serde(skip_serializing_if = \"is_zero\")]\n pub rate_limit_hits: usize,\n #[serde(skip_serializing_if = \"is_zero\")]\n pub retries: usize,\n}\n```\n\n### src/core/metrics.rs - MetricsLayer\n\nThe structured log events from bd-12ae use info!() with specific fields (status_code=429, \"Rate limited, retrying\"). MetricsLayer needs to count these events within each span.\n\nAdd to SpanData:\n```rust\nstruct SpanData {\n // ... existing fields ...\n rate_limit_hits: usize,\n retries: usize,\n}\n```\n\nAdd on_event() to MetricsLayer:\n```rust\nfn on_event(&self, event: &tracing::Event<'_>, ctx: Context<'_, S>) {\n // Check if event message contains rate-limit or retry indicators\n // Increment counters on the current span\n if let Some(span_ref) = ctx.event_span(event) {\n let id = span_ref.id();\n if let Some(data) = self.spans.lock().unwrap().get_mut(&id.into_u64()) {\n let mut visitor = EventVisitor::default();\n event.record(&mut visitor);\n\n if visitor.status_code == Some(429) {\n data.rate_limit_hits += 1;\n }\n if visitor.is_retry {\n data.retries += 1;\n }\n }\n }\n}\n```\n\nThe EventVisitor checks for status_code=429 and message containing \"retrying\" to classify events.\n\nOn span close, propagate counts to parent (bubble up):\n```rust\nfn on_close(&self, id: Id, _ctx: Context<'_, S>) {\n if let Some(data) = self.spans.lock().unwrap().remove(&id.into_u64()) {\n let timing = StageTiming {\n // ... existing fields ...\n rate_limit_hits: data.rate_limit_hits,\n retries: data.retries,\n };\n // ... push to completed\n }\n}\n```\n\n## Acceptance Criteria\n- [ ] StageTiming has rate_limit_hits and retries fields\n- [ ] Fields omitted when zero in JSON serialization\n- [ ] MetricsLayer counts 429 events as rate_limit_hits\n- [ ] MetricsLayer counts retry events as retries\n- [ ] Counts bubble up to parent spans in extract_timings()\n- [ ] Rate limit counts appear in metrics_json stored in sync_runs\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/core/metrics.rs (add fields to StageTiming, add on_event to MetricsLayer, add EventVisitor)\n\n## TDD Loop\nRED:\n - test_stage_timing_rate_limit_counts: simulate 3 rate-limit events, extract, assert rate_limit_hits=3\n - test_stage_timing_retry_counts: simulate 2 retries, extract, assert retries=2\n - test_rate_limit_fields_omitted_when_zero: StageTiming with zero counts, serialize, assert no keys\nGREEN: Add fields to StageTiming, implement on_event in MetricsLayer\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- Events outside any span: ctx.event_span() returns None. Skip counting. This shouldn't happen in practice since all GitLab calls happen within stage spans.\n- Event classification: rely on structured fields (status_code=429) not message text. More reliable and less fragile.\n- Count bubbling: parent stage should aggregate child counts. In extract_timings(), sum children's counts into parent.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:55:02.523778Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:25:25.456758Z","closed_at":"2026-02-04T17:25:25.456708Z","close_reason":"Implemented rate_limit_hits and retries counters in StageTiming with skip_serializing_if for zero values","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-3vqk","depends_on_id":"bd-12ae","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-3vqk","depends_on_id":"bd-1o4h","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-3vqk","depends_on_id":"bd-3pk","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-4qd","title":"Write unit tests for core algorithms","description":"## Background\nUnit tests verify the core algorithms in isolation: document extraction formatting, FTS query sanitization, RRF scoring, content hashing, backoff curves, and filter helpers. These tests don't require a database or external services — they test pure functions and logic.\n\n## Approach\nAdd #[cfg(test)] mod tests blocks to each module:\n\n**1. src/documents/extractor.rs:**\n- test_source_type_parse_all_aliases — every alias resolves correctly\n- test_source_type_parse_unknown — returns None\n- test_source_type_as_str_roundtrip — as_str matches parse input\n- test_content_hash_deterministic — same input = same hash\n- test_list_hash_order_independent — sorted before hashing\n- test_list_hash_empty — empty vec produces consistent hash\n\n**2. src/documents/truncation.rs:**\n- test_truncation_edge_cases (per bd-18t TDD Loop)\n\n**3. src/search/fts.rs:**\n- test_to_fts_query_basic — \"auth error\" -> quoted tokens\n- test_to_fts_query_prefix — \"auth*\" preserves prefix\n- test_to_fts_query_special_chars — \"C++\" quoted correctly\n- test_to_fts_query_dash — \"-DWITH_SSL\" quoted (not NOT operator)\n- test_to_fts_query_internal_quotes — escaped by doubling\n- test_to_fts_query_empty — empty string returns empty\n\n**4. src/search/rrf.rs:**\n- test_rrf_dual_list — docs in both lists score higher\n- test_rrf_normalization — best score = 1.0\n- test_rrf_empty — empty returns empty\n\n**5. src/core/backoff.rs:**\n- test_exponential_curve — delays double each attempt\n- test_cap_at_one_hour — high attempt_count capped\n- test_jitter_range — within [0.9, 1.1) factor\n\n**6. src/search/filters.rs:**\n- test_has_any_filter — true/false for various filter combos\n- test_clamp_limit — 0->20, 200->100, 50->50\n- test_path_filter_from_str — trailing slash = Prefix\n\n**7. src/search/hybrid.rs (hydration round-trip):**\n- test_single_round_trip_query — verify hydration SQL produces correct structure\n\n## Acceptance Criteria\n- [ ] All edge cases covered per PRD acceptance criteria\n- [ ] Tests are unit tests (no DB, no network, no Ollama)\n- [ ] `cargo test` passes with all new tests\n- [ ] No test depends on execution order\n- [ ] Tests cover: document extractor formats, truncation, RRF, hashing, FTS sanitization, backoff, filters\n\n## Files\n- In-module tests in: extractor.rs, truncation.rs, fts.rs, rrf.rs, backoff.rs, filters.rs, hybrid.rs\n\n## TDD Loop\nThese tests ARE the TDD loop for their respective beads. Each implementation bead should write its tests first (RED), then implement (GREEN).\nVERIFY: `cargo test`\n\n## Edge Cases\n- Tests with Unicode: include emoji, CJK characters in truncation tests\n- Tests with empty strings: empty queries, empty content, empty labels\n- Tests with boundary values: limit=0, limit=100, limit=101","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-30T15:27:21.712924Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:46:00.059346Z","closed_at":"2026-01-30T17:46:00.059292Z","close_reason":"All acceptance criteria tests already exist across modules. 276 tests passing (189 unit + 87 integration).","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-4qd","depends_on_id":"bd-18t","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-4qd","depends_on_id":"bd-1k1","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-4qd","depends_on_id":"bd-36p","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-4qd","depends_on_id":"bd-3ez","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-4qd","depends_on_id":"bd-mem","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-5ofk","title":"Implement theme configuration (ftui ThemeBuilder)","description":"## Background\nThe TUI uses FrankenTUI's Theme struct with 19 semantic AdaptiveColor slots for consistent styling. Each slot takes an AdaptiveColor::adaptive(light, dark) pair for automatic light/dark mode switching via terminal background detection. The palette is Flexoki by Steph Ango — an ink-inspired color scheme designed in Oklab perceptual color space for balanced contrast in both modes.\n\n## Approach\nCreate `crates/lore-tui/src/theme.rs` with:\n\n### build_theme() -> Theme\nUse `Theme::builder()` from `ftui_style::theme::{Theme, ThemeBuilder, AdaptiveColor}`. Each slot gets `AdaptiveColor::adaptive(light, dark)` with Flexoki hex values:\n\n**Flexoki Base Tones:**\n- Paper #FFFCF0, Base-50 #F2F0E5, Base-100 #E6E4D9, Base-150 #DAD8CE\n- Base-200 #CECDC3, Base-300 #B7B5AC, Base-400 #9F9D96, Base-500 #878580\n- Base-600 #6F6E69, Base-700 #575653, Base-800 #403E3C, Base-850 #343331\n- Base-900 #282726, Base-950 #1C1B1A, Black #100F0F\n\n**Flexoki Accent Colors (light-600 / dark-400):**\n- Red: #AF3029 / #D14D41, Orange: #BC5215 / #DA702C\n- Yellow: #AD8301 / #D0A215, Green: #66800B / #879A39\n- Cyan: #24837B / #3AA99F, Blue: #205EA6 / #4385BE\n- Purple: #5E409D / #8B7EC8, Magenta: #A02F6F / #CE5D97\n\n**19-Slot Mapping** (light / dark):\n\n| Slot | Light | Dark |\n|------|-------|------|\n| primary | Blue-600 #205EA6 | Blue-400 #4385BE |\n| secondary | Cyan-600 #24837B | Cyan-400 #3AA99F |\n| accent | Purple-600 #5E409D | Purple-400 #8B7EC8 |\n| background | Paper #FFFCF0 | Black #100F0F |\n| surface | Base-50 #F2F0E5 | Base-900 #282726 |\n| overlay | Base-100 #E6E4D9 | Base-850 #343331 |\n| text | Base-700 #575653 | Base-200 #CECDC3 |\n| text_muted | Base-500 #878580 | Base-500 #878580 |\n| text_subtle | Base-400 #9F9D96 | Base-600 #6F6E69 |\n| success | Green-600 #66800B | Green-400 #879A39 |\n| warning | Yellow-600 #AD8301 | Yellow-400 #D0A215 |\n| error | Red-600 #AF3029 | Red-400 #D14D41 |\n| info | Blue-600 #205EA6 | Blue-400 #4385BE |\n| border | Base-300 #B7B5AC | Base-700 #575653 |\n| border_focused | Blue-600 #205EA6 | Blue-400 #4385BE |\n| selection_bg | Base-100 #E6E4D9 | Base-800 #403E3C |\n| selection_fg | Base-700 #575653 | Base-100 #E6E4D9 |\n| scrollbar_track | Base-50 #F2F0E5 | Base-900 #282726 |\n| scrollbar_thumb | Base-300 #B7B5AC | Base-700 #575653 |\n\nCode pattern:\n```rust\nuse ftui_style::theme::{Theme, AdaptiveColor};\nuse ftui_style::Color;\n\npub fn build_theme() -> Theme {\n Theme::builder()\n .primary(AdaptiveColor::adaptive(\n Color::rgb(0x20, 0x5E, 0xA6), // Blue-600\n Color::rgb(0x43, 0x85, 0xBE), // Blue-400\n ))\n // ... all 19 slots ...\n .build()\n}\n```\n\n### State Colors\nHelper functions for GitLab entity states:\n- `state_color(state: &str) -> Color` — opened: Green-400 #879A39, closed: Red-400 #D14D41, merged: Purple-400 #8B7EC8, locked: Yellow-400 #D0A215\n- These return fixed Color (not AdaptiveColor) since state colors should be consistent\n\n### Event Type Colors\nFor timeline rendering:\n- created: Green, updated: Blue, closed: Red, merged: Purple, commented: Cyan, labeled: Orange, milestoned: Yellow\n\n### label_style(hex_color: &str) -> Style\nConverts GitLab label hex strings (e.g., \"#FF0000\") to ftui Style:\n- Parse hex to RGB using `u8::from_str_radix`\n- Return `Style::default().fg(Color::rgb(r, g, b))`\n- Invalid hex: return `Style::default().fg(text_muted_color)` as fallback\n\n## Acceptance Criteria\n- [ ] build_theme() returns Theme with all 19 slots using Flexoki AdaptiveColor::adaptive values\n- [ ] Theme::builder() chain compiles and build() returns Theme\n- [ ] state_color(\"opened\") returns Green-400, state_color(\"closed\") returns Red-400, state_color(\"merged\") returns Purple-400, state_color(\"locked\") returns Yellow-400\n- [ ] label_style(\"#FF0000\") returns Style with fg Color::rgb(255, 0, 0)\n- [ ] label_style(\"invalid\") returns Style with muted fallback color (no panic)\n- [ ] label_style(\"#ff0000\") handles lowercase hex\n\n## Files\n- CREATE: crates/lore-tui/src/theme.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add `pub mod theme;`)\n\n## TDD Anchor\nRED: Write `test_build_theme_all_slots_set` that calls build_theme(), resolves for dark mode, and asserts primary == Color::rgb(0x43, 0x85, 0xBE).\nGREEN: Implement build_theme() with full Flexoki mapping.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml theme\n\nAdditional tests:\n- test_theme_light_mode_primary (resolves to #205EA6)\n- test_state_color_opened_is_green\n- test_state_color_unknown_returns_muted\n- test_label_style_valid_hex\n- test_label_style_invalid_hex_fallback\n- test_label_style_lowercase_hex\n\n## Edge Cases\n- Terminal may not support true color (RGB) — AdaptiveColor handles fallback via ftui's backend detection\n- Label colors from GitLab may include or omit the # prefix — handle both\n- Empty string label color — return fallback\n- State color for unknown states (e.g., \"all\") — return text_muted as default\n\n## Dependency Context\nDepends on bd-3ddw (scaffold) for the crate structure to exist and ftui-style dependency in Cargo.toml.\nDependents: bd-26f2 (common widgets) consumes build_theme() to style status bar, breadcrumb, etc.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:55:42.582468Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:02:52.171185Z","closed_at":"2026-02-12T20:02:52.171134Z","close_reason":"Implemented Flexoki theme: build_theme() with 19 AdaptiveColor slots, state_color(), event_color(), label_style(). 16 tests passing, clippy clean.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-5ofk","depends_on_id":"bd-3ddw","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} @@ -271,7 +271,7 @@ {"id":"bd-apmo","title":"OBSERV: Create migration 014 for sync_runs enrichment","description":"## Background\nThe sync_runs table (created in migration 001) has columns id, started_at, heartbeat_at, finished_at, status, command, error, metrics_json but NOTHING writes to it. This migration adds columns for the observability correlation ID and aggregate counts, enabling queryable sync history.\n\n## Approach\nCreate migrations/014_sync_runs_enrichment.sql:\n\n```sql\n-- Migration 014: sync_runs enrichment for observability\n-- Adds correlation ID and aggregate counts for queryable sync history\n\nALTER TABLE sync_runs ADD COLUMN run_id TEXT;\nALTER TABLE sync_runs ADD COLUMN total_items_processed INTEGER DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN total_errors INTEGER DEFAULT 0;\n\n-- Index for correlation queries (find run by run_id from logs)\nCREATE INDEX IF NOT EXISTS idx_sync_runs_run_id ON sync_runs(run_id);\n```\n\nMigration naming convention: check migrations/ directory. Current latest is 013_resource_event_watermarks.sql. Next is 014.\n\nNote: SQLite ALTER TABLE ADD COLUMN is always safe -- it sets NULL for existing rows. DEFAULT 0 applies to new INSERTs only.\n\n## Acceptance Criteria\n- [ ] Migration 014 applies cleanly on a fresh DB (all migrations 001-014)\n- [ ] Migration 014 applies cleanly on existing DB with 001-013 already applied\n- [ ] sync_runs table has run_id TEXT column\n- [ ] sync_runs table has total_items_processed INTEGER DEFAULT 0 column\n- [ ] sync_runs table has total_errors INTEGER DEFAULT 0 column\n- [ ] idx_sync_runs_run_id index exists\n- [ ] Existing sync_runs rows (if any) have NULL run_id, 0 for counts\n- [ ] cargo clippy --all-targets -- -D warnings passes (no code changes, but verify migration is picked up)\n\n## Files\n- migrations/014_sync_runs_enrichment.sql (new file)\n\n## TDD Loop\nRED:\n - test_migration_014_applies: apply all migrations on fresh in-memory DB, query sync_runs schema\n - test_migration_014_idempotent: CREATE INDEX IF NOT EXISTS makes re-run safe; ALTER TABLE ADD COLUMN is NOT idempotent in SQLite (will error). Consider: skip this test or use IF NOT EXISTS workaround\nGREEN: Create migration file\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- ALTER TABLE ADD COLUMN in SQLite: NOT idempotent. Running migration twice will error \"duplicate column name.\" The migration system should prevent re-runs, but IF NOT EXISTS is not available for ALTER TABLE in SQLite. Rely on migration tracking.\n- Migration numbering conflict: if another PR adds 014 first, renumber to 015. Check before merging.\n- metrics_json already exists (from migration 001): we don't touch it. The new columns supplement it with queryable aggregates.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:54:51.311879Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:34:05.309761Z","closed_at":"2026-02-04T17:34:05.309714Z","close_reason":"Created migration 014 adding run_id TEXT, total_items_processed INTEGER, total_errors INTEGER to sync_runs, with idx_sync_runs_run_id index","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-apmo","depends_on_id":"bd-3pz","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-arka","title":"Extend SyncRunRecorder with surgical mode lifecycle methods","description":"## Background\nThe existing `SyncRunRecorder` in `src/core/sync_run.rs` manages sync run lifecycle with three methods: `start()` (creates row, returns Self), `succeed(self, ...)` (consumes self, sets succeeded), and `fail(self, ...)` (consumes self, sets failed). Both `succeed()` and `fail()` take ownership of `self` — this is intentional to prevent double-finalization.\n\nSurgical sync needs additional lifecycle methods to:\n1. Set surgical-specific metadata (mode, phase, IIDs JSON) after `start()`\n2. Record per-entity results (increment counters, store entity-level outcomes)\n3. Cancel a run (distinct from failure — user-initiated or timeout)\n4. Update phase progression during the surgical pipeline\n\nThese methods operate on the columns added by migration 027 (bead bd-tiux).\n\n## Approach\n\n### Step 1: Add `set_surgical_metadata` method\n\nCalled once after `start()` to set the surgical mode columns:\n\n```rust\npub fn set_surgical_metadata(\n &self,\n conn: &Connection,\n mode: &str,\n phase: &str,\n iids_json: &str,\n) -> Result<()> {\n conn.execute(\n \"UPDATE sync_runs SET mode = ?1, phase = ?2, surgical_iids_json = ?3 WHERE id = ?4\",\n rusqlite::params![mode, phase, iids_json, self.row_id],\n )?;\n Ok(())\n}\n```\n\nTakes `&self` (not `self`) because the recorder continues to be used after metadata is set.\n\n### Step 2: Add `update_phase` method\n\nCalled as the surgical pipeline progresses through phases:\n\n```rust\npub fn update_phase(&self, conn: &Connection, phase: &str) -> Result<()> {\n conn.execute(\n \"UPDATE sync_runs SET phase = ?1, heartbeat_at = ?2 WHERE id = ?3\",\n rusqlite::params![phase, now_ms(), self.row_id],\n )?;\n Ok(())\n}\n```\n\n### Step 3: Add `record_entity_result` method\n\nCalled after each entity (issue or MR) is processed to increment counters:\n\n```rust\npub fn record_entity_result(\n &self,\n conn: &Connection,\n entity_type: &str,\n stage: &str,\n) -> Result<()> {\n let column = match (entity_type, stage) {\n (\"issue\", \"fetched\") => \"issues_fetched\",\n (\"issue\", \"ingested\") => \"issues_ingested\",\n (\"mr\", \"fetched\") => \"mrs_fetched\",\n (\"mr\", \"ingested\") => \"mrs_ingested\",\n (\"issue\" | \"mr\", \"skipped_stale\") => \"skipped_stale\",\n (\"doc\", \"regenerated\") => \"docs_regenerated\",\n (\"doc\", \"embedded\") => \"docs_embedded\",\n (_, \"warning\") => \"warnings_count\",\n _ => return Ok(()), // Unknown combinations are silently ignored\n };\n conn.execute(\n &format!(\"UPDATE sync_runs SET {column} = {column} + 1 WHERE id = ?1\"),\n rusqlite::params![self.row_id],\n )?;\n Ok(())\n}\n```\n\nNote: The column name comes from a hardcoded match, NOT from user input — no SQL injection risk.\n\n### Step 4: Add `cancel` method\n\nConsumes self (like succeed/fail) to finalize the run as cancelled:\n\n```rust\npub fn cancel(self, conn: &Connection, reason: &str) -> Result<()> {\n let now = now_ms();\n conn.execute(\n \"UPDATE sync_runs SET finished_at = ?1, cancelled_at = ?2, status = 'cancelled', error = ?3 WHERE id = ?4\",\n rusqlite::params![now, now, reason, self.row_id],\n )?;\n Ok(())\n}\n```\n\nTakes `self` (ownership) like `succeed()` and `fail()` — prevents further use after cancellation.\n\n### Step 5: Expose `row_id` getter\n\nThe orchestrator (bd-1i4i) may need the row_id for logging/tracing:\n\n```rust\npub fn row_id(&self) -> i64 {\n self.row_id\n}\n```\n\n## Acceptance Criteria\n- [ ] `set_surgical_metadata(&self, conn, mode, phase, iids_json)` writes mode/phase/surgical_iids_json columns\n- [ ] `update_phase(&self, conn, phase)` updates phase and heartbeat_at\n- [ ] `record_entity_result(&self, conn, entity_type, stage)` increments the correct counter column\n- [ ] `record_entity_result` silently ignores unknown entity_type/stage combinations\n- [ ] `cancel(self, conn, reason)` consumes self, sets status='cancelled', finished_at, cancelled_at, error\n- [ ] `row_id()` returns the internal row_id\n- [ ] `succeed(self, ...)` still works after `set_surgical_metadata` + `record_entity_result` calls\n- [ ] `fail(self, ...)` still works after `set_surgical_metadata` + `update_phase` calls\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n- [ ] All existing sync_run tests continue to pass\n\n## Files\n- MODIFY: src/core/sync_run.rs (add methods to SyncRunRecorder impl block)\n- MODIFY: src/core/sync_run_tests.rs (add new tests)\n\n## TDD Anchor\nRED: Write tests in `src/core/sync_run_tests.rs`:\n\n```rust\n#[test]\nfn surgical_lifecycle_start_metadata_succeed() {\n let conn = setup_test_db();\n let recorder = SyncRunRecorder::start(&conn, \"sync\", \"surg001\").unwrap();\n let row_id = recorder.row_id();\n\n recorder.set_surgical_metadata(\n &conn, \"surgical\", \"preflight\", r#\"{\"issues\":[7,8],\"mrs\":[101]}\"#,\n ).unwrap();\n\n recorder.update_phase(&conn, \"ingest\").unwrap();\n recorder.record_entity_result(&conn, \"issue\", \"fetched\").unwrap();\n recorder.record_entity_result(&conn, \"issue\", \"fetched\").unwrap();\n recorder.record_entity_result(&conn, \"issue\", \"ingested\").unwrap();\n recorder.record_entity_result(&conn, \"mr\", \"fetched\").unwrap();\n recorder.record_entity_result(&conn, \"mr\", \"ingested\").unwrap();\n\n recorder.succeed(&conn, &[], 3, 0).unwrap();\n\n let (mode, phase, iids, issues_fetched, mrs_fetched, issues_ingested, mrs_ingested, status): (\n String, String, String, i64, i64, i64, i64, String,\n ) = conn.query_row(\n \"SELECT mode, phase, surgical_iids_json, issues_fetched, mrs_fetched, issues_ingested, mrs_ingested, status\n FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?, r.get(3)?, r.get(4)?, r.get(5)?, r.get(6)?, r.get(7)?)),\n ).unwrap();\n\n assert_eq!(mode, \"surgical\");\n assert_eq!(phase, \"ingest\"); // Last phase set before succeed\n assert!(iids.contains(\"101\"));\n assert_eq!(issues_fetched, 2);\n assert_eq!(mrs_fetched, 1);\n assert_eq!(issues_ingested, 1);\n assert_eq!(mrs_ingested, 1);\n assert_eq!(status, \"succeeded\");\n}\n\n#[test]\nfn surgical_lifecycle_cancel() {\n let conn = setup_test_db();\n let recorder = SyncRunRecorder::start(&conn, \"sync\", \"cancel01\").unwrap();\n let row_id = recorder.row_id();\n\n recorder.set_surgical_metadata(&conn, \"surgical\", \"preflight\", \"{}\").unwrap();\n recorder.cancel(&conn, \"User requested cancellation\").unwrap();\n\n let (status, error, cancelled_at, finished_at): (String, Option, Option, Option) = conn.query_row(\n \"SELECT status, error, cancelled_at, finished_at FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?, r.get(3)?)),\n ).unwrap();\n\n assert_eq!(status, \"cancelled\");\n assert_eq!(error.as_deref(), Some(\"User requested cancellation\"));\n assert!(cancelled_at.is_some());\n assert!(finished_at.is_some());\n}\n\n#[test]\nfn record_entity_result_ignores_unknown() {\n let conn = setup_test_db();\n let recorder = SyncRunRecorder::start(&conn, \"sync\", \"unk001\").unwrap();\n // Should not panic or error on unknown combinations\n recorder.record_entity_result(&conn, \"widget\", \"exploded\").unwrap();\n}\n\n#[test]\nfn record_entity_result_json_counters() {\n let conn = setup_test_db();\n let recorder = SyncRunRecorder::start(&conn, \"sync\", \"cnt001\").unwrap();\n let row_id = recorder.row_id();\n\n recorder.record_entity_result(&conn, \"doc\", \"regenerated\").unwrap();\n recorder.record_entity_result(&conn, \"doc\", \"regenerated\").unwrap();\n recorder.record_entity_result(&conn, \"doc\", \"embedded\").unwrap();\n recorder.record_entity_result(&conn, \"issue\", \"skipped_stale\").unwrap();\n\n let (docs_regen, docs_embed, skipped): (i64, i64, i64) = conn.query_row(\n \"SELECT docs_regenerated, docs_embedded, skipped_stale FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?)),\n ).unwrap();\n\n assert_eq!(docs_regen, 2);\n assert_eq!(docs_embed, 1);\n assert_eq!(skipped, 1);\n}\n```\n\nGREEN: Add all methods to `SyncRunRecorder`.\nVERIFY: `cargo test surgical_lifecycle && cargo test record_entity_result`\n\n## Edge Cases\n- `succeed()` and `fail()` consume `self` — the compiler enforces that no methods are called after finalization. `cancel()` also consumes self for the same reason.\n- `set_surgical_metadata`, `update_phase`, and `record_entity_result` take `&self` — they can be called multiple times before finalization.\n- The `record_entity_result` match uses a hardcoded column name derived from known string constants, not user input. The `format!` is safe because the column name is always one of the hardcoded strings.\n- `record_entity_result` silently returns Ok(()) for unknown entity_type/stage combos rather than erroring — this avoids breaking the pipeline for non-critical telemetry.\n- Phase is NOT overwritten by `succeed()`/`fail()`/`cancel()` — the last phase set via `update_phase()` is preserved as the \"phase at completion\" for observability.\n\n## Dependency Context\nDepends on bd-tiux (migration 027) for the surgical columns to exist. Downstream beads bd-1i4i (orchestrator) and bd-3jqx (integration tests) use these methods.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:13:50.827946Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:04:15.562997Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-arka","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-arka","depends_on_id":"bd-3jqx","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-b51e","title":"WHO: Overlap mode query (query_overlap)","description":"## Background\n\nOverlap mode answers \"Who else has MRs/notes touching my files?\" — helps identify potential reviewers, collaborators, or conflicting work at a path. Tracks author and reviewer roles separately for richer signal.\n\n## Approach\n\n### SQL: two static variants (prefix/exact) with reviewer + author UNION ALL\n\nBoth branches return: username, role, touch_count (COUNT DISTINCT m.id), last_seen_at, mr_refs (GROUP_CONCAT of project-qualified refs).\n\nKey differences from Expert:\n- No scoring formula — just touch_count ranking\n- mr_refs collected for actionable output (group/project!iid format)\n- Rust-side merge needed (can't fully aggregate in SQL due to HashSet dedup of mr_refs across branches)\n\n### Reviewer branch includes:\n- Self-review exclusion: `n.author_username != m.author_username`\n- MR state filter: `m.state IN ('opened','merged')`\n- Project-qualified refs: `GROUP_CONCAT(DISTINCT (p.path_with_namespace || '!' || m.iid))`\n\n### Rust accumulator pattern:\n```rust\nstruct OverlapAcc {\n username: String,\n author_touch_count: u32,\n review_touch_count: u32,\n touch_count: u32,\n last_seen_at: i64,\n mr_refs: HashSet, // O(1) dedup from the start\n}\n// Build HashMap from rows\n// Convert to Vec, sort, bound mr_refs\n```\n\n### Bounded mr_refs:\n```rust\nconst MAX_MR_REFS_PER_USER: usize = 50;\nlet mr_refs_total = mr_refs.len() as u32;\nlet mr_refs_truncated = mr_refs.len() > MAX_MR_REFS_PER_USER;\n```\n\n### Deterministic sort: touch_count DESC, last_seen_at DESC, username ASC\n\n### format_overlap_role():\n```rust\nfn format_overlap_role(user: &OverlapUser) -> &'static str {\n match (user.author_touch_count > 0, user.review_touch_count > 0) {\n (true, true) => \"A+R\", (true, false) => \"A\",\n (false, true) => \"R\", (false, false) => \"-\",\n }\n}\n```\n\n### OverlapResult/OverlapUser structs include path_match (\"exact\"/\"prefix\"), truncated bool, per-user mr_refs_total + mr_refs_truncated\n\n## Files\n\n- `src/cli/commands/who.rs`\n\n## TDD Loop\n\nRED:\n```\ntest_overlap_dual_roles — user is author of MR 1 and reviewer of MR 2 at same path; verify A+R role, both touch counts > 0, mr_refs contain \"team/backend!\"\ntest_overlap_multi_project_mr_refs — same iid 100 in two projects; verify both \"team/backend!100\" and \"team/frontend!100\" present\ntest_overlap_excludes_self_review_notes — author comments on own MR; review_touch_count must be 0\n```\n\nGREEN: Implement query_overlap with both SQL variants + accumulator\nVERIFY: `cargo test -- overlap`\n\n## Acceptance Criteria\n\n- [ ] test_overlap_dual_roles passes (A+R role detection)\n- [ ] test_overlap_multi_project_mr_refs passes (project-qualified refs unique)\n- [ ] test_overlap_excludes_self_review_notes passes\n- [ ] Default since window: 30d\n- [ ] mr_refs sorted alphabetically for deterministic output\n- [ ] touch_count uses coherent units (COUNT DISTINCT m.id on BOTH branches)\n\n## Edge Cases\n\n- Both branches count MRs (not DiffNotes) for coherent touch_count — mixing units produces misleading totals\n- mr_refs from GROUP_CONCAT may contain duplicates across branches — HashSet handles dedup\n- Project scoping on n.project_id (not m.project_id) for index alignment\n- mr_refs sorted before output (HashSet iteration is nondeterministic)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:40:46.729921Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.598708Z","closed_at":"2026-02-08T04:10:29.598673Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-b51e","depends_on_id":"bd-2ldg","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-b51e","depends_on_id":"bd-34rr","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} -{"id":"bd-bcte","title":"Implement filter DSL parser state machine","description":"## Background\n\nThe Issue List and MR List filter bars accept typed filter expressions (e.g., `state:opened author:@asmith label:\"high priority\" -milestone:v2.0`). The PRD Appendix B defines a full state machine: Inactive -> Active -> FieldSelect/FreeText -> ValueInput. The parser needs to handle field:value pairs, negation prefix (`-`), quoted values with spaces, bare text as free-text search, and inline error diagnostics when an unrecognized field name is typed. This is a substantial subsystem that the entity table filter bar widget (bd-18qs) depends on for its core functionality.\n\n## Approach\n\nCreate a `filter_dsl.rs` module with:\n\n1. **FilterToken enum** — `Field { name: String, value: String, negated: bool }` | `FreeText(String)` | `Error { position: usize, message: String }`\n2. **`parse_filter(input: &str) -> Vec`** — Tokenizer that handles:\n - `field:value` — recognized fields: state, author, assignee, label, milestone, since, project (issue); + reviewer, draft, target, source (MR)\n - `-field:value` — negation prefix strips the `-` and sets `negated: true`\n - `field:\"quoted value\"` — double-quoted values preserve spaces\n - bare words — collected as `FreeText` tokens\n - unrecognized field names — produce `Error` token with position and message\n3. **FilterBarState** state machine:\n - `Inactive` — filter bar not focused\n - `Active(Typing)` — user typing, no suggestion yet\n - `Active(Suggesting)` — 200ms pause triggers field name suggestions\n - `FieldSelect` — dropdown showing recognized field names after `:`\n - `ValueInput` — context-dependent completions (e.g., state values: opened/closed/all)\n4. **`apply_issue_filter(tokens: &[FilterToken]) -> IssueFilterParams`** — converts tokens to query parameters\n5. **`apply_mr_filter(tokens: &[FilterToken]) -> MrFilterParams`** — MR variant with reviewer, draft, target/source fields\n\n## Acceptance Criteria\n- [ ] `parse_filter(\"state:opened\")` returns one Field token with name=\"state\", value=\"opened\", negated=false\n- [ ] `parse_filter(\"-label:bug\")` returns one Field with negated=true\n- [ ] `parse_filter('author:\"Jane Doe\"')` returns one Field with value=\"Jane Doe\" (quotes stripped)\n- [ ] `parse_filter(\"foo:bar\")` where \"foo\" is not a recognized field returns Error token with position\n- [ ] `parse_filter(\"state:opened some text\")` returns Field + FreeText tokens\n- [ ] `parse_filter(\"\")` returns empty vec\n- [ ] FilterBarState transitions match the Appendix B state machine diagram\n- [ ] apply_issue_filter correctly maps all 7 issue fields (state, author, assignee, label, milestone, since, project)\n- [ ] apply_mr_filter correctly maps additional MR fields (reviewer, draft, target, source)\n- [ ] Inline error diagnostics include the character position of the unrecognized field\n\n## Files\n- CREATE: crates/lore-tui/src/widgets/filter_dsl.rs\n- MODIFY: crates/lore-tui/src/widgets/mod.rs (add `pub mod filter_dsl;`)\n\n## TDD Anchor\nRED: Write `test_parse_simple_field_value` that asserts `parse_filter(\"state:opened\")` returns `[Field { name: \"state\", value: \"opened\", negated: false }]`.\nGREEN: Implement the tokenizer for the simplest case.\nVERIFY: cargo test -p lore-tui parse_simple\n\nAdditional tests:\n- test_parse_negation\n- test_parse_quoted_value\n- test_parse_unrecognized_field_produces_error\n- test_parse_mixed_tokens\n- test_parse_empty_input\n- test_apply_issue_filter_maps_all_fields\n- test_apply_mr_filter_maps_additional_fields\n- test_filter_bar_state_transitions\n\n## Edge Cases\n- Unclosed quote (`author:\"Jane`) — treat rest of input as the value, produce warning token\n- Empty value (`state:`) — produce Error token, not a Field with empty value\n- Multiple colons (`field:val:ue`) — first colon splits, rest is part of value\n- Unicode in field values (`author:@rene`) — must handle multi-byte chars correctly\n- Very long filter strings (>1000 chars) — must not allocate unbounded; truncate with error\n\n## Dependency Context\n- Depends on bd-18qs (entity table + filter bar widgets) which provides the TextInput widget and filter bar rendering. This bead provides the PARSER that bd-18qs's filter bar CALLS.\n- Consumed by bd-3ei1 (Issue List) and bd-2kr0 (MR List) for converting user filter input into query parameters.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:37.516695Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:29:47.312394Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-bcte","depends_on_id":"bd-18qs","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-bcte","title":"Implement filter DSL parser state machine","description":"## Background\n\nThe Issue List and MR List filter bars accept typed filter expressions (e.g., `state:opened author:@asmith label:\"high priority\" -milestone:v2.0`). The PRD Appendix B defines a full state machine: Inactive -> Active -> FieldSelect/FreeText -> ValueInput. The parser needs to handle field:value pairs, negation prefix (`-`), quoted values with spaces, bare text as free-text search, and inline error diagnostics when an unrecognized field name is typed. This is a substantial subsystem that the entity table filter bar widget (bd-18qs) depends on for its core functionality.\n\n## Approach\n\nCreate a `filter_dsl.rs` module with:\n\n1. **FilterToken enum** — `Field { name: String, value: String, negated: bool }` | `FreeText(String)` | `Error { position: usize, message: String }`\n2. **`parse_filter(input: &str) -> Vec`** — Tokenizer that handles:\n - `field:value` — recognized fields: state, author, assignee, label, milestone, since, project (issue); + reviewer, draft, target, source (MR)\n - `-field:value` — negation prefix strips the `-` and sets `negated: true`\n - `field:\"quoted value\"` — double-quoted values preserve spaces\n - bare words — collected as `FreeText` tokens\n - unrecognized field names — produce `Error` token with position and message\n3. **FilterBarState** state machine:\n - `Inactive` — filter bar not focused\n - `Active(Typing)` — user typing, no suggestion yet\n - `Active(Suggesting)` — 200ms pause triggers field name suggestions\n - `FieldSelect` — dropdown showing recognized field names after `:`\n - `ValueInput` — context-dependent completions (e.g., state values: opened/closed/all)\n4. **`apply_issue_filter(tokens: &[FilterToken]) -> IssueFilterParams`** — converts tokens to query parameters\n5. **`apply_mr_filter(tokens: &[FilterToken]) -> MrFilterParams`** — MR variant with reviewer, draft, target/source fields\n\n## Acceptance Criteria\n- [ ] `parse_filter(\"state:opened\")` returns one Field token with name=\"state\", value=\"opened\", negated=false\n- [ ] `parse_filter(\"-label:bug\")` returns one Field with negated=true\n- [ ] `parse_filter('author:\"Jane Doe\"')` returns one Field with value=\"Jane Doe\" (quotes stripped)\n- [ ] `parse_filter(\"foo:bar\")` where \"foo\" is not a recognized field returns Error token with position\n- [ ] `parse_filter(\"state:opened some text\")` returns Field + FreeText tokens\n- [ ] `parse_filter(\"\")` returns empty vec\n- [ ] FilterBarState transitions match the Appendix B state machine diagram\n- [ ] apply_issue_filter correctly maps all 7 issue fields (state, author, assignee, label, milestone, since, project)\n- [ ] apply_mr_filter correctly maps additional MR fields (reviewer, draft, target, source)\n- [ ] Inline error diagnostics include the character position of the unrecognized field\n\n## Files\n- CREATE: crates/lore-tui/src/widgets/filter_dsl.rs\n- MODIFY: crates/lore-tui/src/widgets/mod.rs (add `pub mod filter_dsl;`)\n\n## TDD Anchor\nRED: Write `test_parse_simple_field_value` that asserts `parse_filter(\"state:opened\")` returns `[Field { name: \"state\", value: \"opened\", negated: false }]`.\nGREEN: Implement the tokenizer for the simplest case.\nVERIFY: cargo test -p lore-tui parse_simple\n\nAdditional tests:\n- test_parse_negation\n- test_parse_quoted_value\n- test_parse_unrecognized_field_produces_error\n- test_parse_mixed_tokens\n- test_parse_empty_input\n- test_apply_issue_filter_maps_all_fields\n- test_apply_mr_filter_maps_additional_fields\n- test_filter_bar_state_transitions\n\n## Edge Cases\n- Unclosed quote (`author:\"Jane`) — treat rest of input as the value, produce warning token\n- Empty value (`state:`) — produce Error token, not a Field with empty value\n- Multiple colons (`field:val:ue`) — first colon splits, rest is part of value\n- Unicode in field values (`author:@rene`) — must handle multi-byte chars correctly\n- Very long filter strings (>1000 chars) — must not allocate unbounded; truncate with error\n\n## Dependency Context\n- Depends on bd-18qs (entity table + filter bar widgets) which provides the TextInput widget and filter bar rendering. This bead provides the PARSER that bd-18qs's filter bar CALLS.\n- Consumed by bd-3ei1 (Issue List) and bd-2kr0 (MR List) for converting user filter input into query parameters.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:37.516695Z","created_by":"tayloreernisse","updated_at":"2026-02-19T03:38:19.796410Z","closed_at":"2026-02-19T03:38:19.796224Z","close_reason":"Already implemented in filter_dsl.rs with parser, field validation, negation, quoted values, free-text, and 11 tests.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-bcte","depends_on_id":"bd-18qs","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-bjo","title":"Implement vector search function","description":"## Background\nVector search queries the sqlite-vec virtual table for nearest-neighbor documents. Because documents may have multiple chunks, the raw KNN results need deduplication by document_id (keeping the best/lowest distance per document). The function over-fetches 3x to ensure enough unique documents after dedup.\n\n## Approach\nCreate `src/search/vector.rs`:\n\n```rust\npub struct VectorResult {\n pub document_id: i64,\n pub distance: f64, // Lower = closer match\n}\n\n/// Search documents using sqlite-vec KNN query.\n/// Over-fetches 3x limit to handle chunk dedup.\npub fn search_vector(\n conn: &Connection,\n query_embedding: &[f32], // 768-dim embedding of search query\n limit: usize,\n) -> Result>\n```\n\n**SQL (KNN query):**\n```sql\nSELECT rowid, distance\nFROM embeddings\nWHERE embedding MATCH ?\n AND k = ?\nORDER BY distance\n```\n\n**Algorithm:**\n1. Convert query_embedding to raw LE bytes\n2. Execute KNN with k = limit * 3 (over-fetch for dedup)\n3. Decode each rowid via decode_rowid() -> (document_id, chunk_index)\n4. Group by document_id, keep minimum distance (best chunk)\n5. Sort by distance ascending\n6. Take first `limit` results\n\n## Acceptance Criteria\n- [ ] Returns deduplicated document-level results (not chunk-level)\n- [ ] Best chunk distance kept per document (lowest distance wins)\n- [ ] KNN with k parameter (3x limit)\n- [ ] Query embedding passed as raw LE bytes\n- [ ] Results sorted by distance ascending (closest first)\n- [ ] Returns at most `limit` results\n- [ ] Empty embeddings table returns empty Vec\n- [ ] `cargo build` succeeds\n\n## Files\n- `src/search/vector.rs` — new file\n- `src/search/mod.rs` — add `pub use vector::{search_vector, VectorResult};`\n\n## TDD Loop\nRED: Integration tests need sqlite-vec + seeded embeddings:\n- `test_vector_search_basic` — finds nearest document\n- `test_vector_search_dedup` — multi-chunk doc returns once with best distance\n- `test_vector_search_empty` — empty table returns empty\n- `test_vector_search_limit` — respects limit parameter\nGREEN: Implement search_vector\nVERIFY: `cargo test vector`\n\n## Edge Cases\n- All chunks belong to same document: returns single result\n- Query embedding wrong dimension: sqlite-vec may error — handle gracefully\n- Over-fetch returns fewer than limit unique docs: return what we have\n- Distance = 0.0: exact match (valid result)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:50.270357Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:44:56.233611Z","closed_at":"2026-01-30T17:44:56.233512Z","close_reason":"Implemented search_vector with KNN query, 3x over-fetch, chunk dedup. 3 tests pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-bjo","depends_on_id":"bd-1y8","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-bjo","depends_on_id":"bd-2ac","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-c9gk","title":"Implement core types (Msg, Screen, EntityKey, AppError, InputMode)","description":"## Background\nThe core types form the message-passing backbone of the Elm Architecture. Every user action and async result flows through the Msg enum. Screen identifies navigation targets. EntityKey provides safe cross-project entity identity. AppError enables structured error display. InputMode controls key dispatch routing.\n\n## Approach\nCreate crates/lore-tui/src/message.rs with:\n- Msg enum (~40 variants): RawEvent, Tick, Resize, NavigateTo, GoBack, GoForward, GoHome, JumpBack, JumpForward, OpenCommandPalette, CloseCommandPalette, CommandPaletteInput, CommandPaletteSelect, IssueListLoaded{generation, rows}, IssueListFilterChanged, IssueListSortChanged, IssueSelected, MrListLoaded{generation, rows}, MrListFilterChanged, MrSelected, IssueDetailLoaded{generation, key, detail}, MrDetailLoaded{generation, key, detail}, DiscussionsLoaded{generation, discussions}, SearchQueryChanged, SearchRequestStarted{generation, query}, SearchExecuted{generation, results}, SearchResultSelected, SearchModeChanged, SearchCapabilitiesLoaded, TimelineLoaded, TimelineEntitySelected, WhoResultLoaded, WhoModeChanged, SyncStarted, SyncProgress, SyncProgressBatch, SyncLogLine, SyncBackpressureDrop, SyncCompleted, SyncCancelled, SyncFailed, SyncStreamStats, SearchDebounceArmed, SearchDebounceFired, DashboardLoaded, Error, ShowHelp, ShowCliEquivalent, OpenInBrowser, BlurTextInput, ScrollToTopCurrentScreen, Quit\n- impl From for Msg (FrankenTUI requirement) — maps Resize, Tick, and wraps everything else in RawEvent\n- Screen enum: Dashboard, IssueList, IssueDetail(EntityKey), MrList, MrDetail(EntityKey), Search, Timeline, Who, Sync, Stats, Doctor, Bootstrap\n- Screen::label() -> &str and Screen::is_detail_or_entity() -> bool\n- EntityKey { project_id: i64, iid: i64, kind: EntityKind } with EntityKey::issue() and EntityKey::mr() constructors\n- EntityKind enum: Issue, MergeRequest\n- AppError enum: DbBusy, DbCorruption(String), NetworkRateLimited{retry_after_secs}, NetworkUnavailable, AuthFailed, ParseError(String), Internal(String) with Display impl\n- InputMode enum: Normal, Text, Palette, GoPrefix{started_at: Instant} with Default -> Normal\n\n## Acceptance Criteria\n- [ ] Msg enum compiles with all ~40 variants\n- [ ] From impl converts Resize->Msg::Resize, Tick->Msg::Tick, other->Msg::RawEvent\n- [ ] Screen enum has all 12 variants with label() and is_detail_or_entity() methods\n- [ ] EntityKey::issue() and EntityKey::mr() constructors work correctly\n- [ ] EntityKey derives Debug, Clone, PartialEq, Eq, Hash\n- [ ] AppError Display shows user-friendly messages for each variant\n- [ ] InputMode defaults to Normal\n\n## Files\n- CREATE: crates/lore-tui/src/message.rs\n\n## TDD Anchor\nRED: Write test_entity_key_equality that asserts EntityKey::issue(1, 42) == EntityKey::issue(1, 42) and EntityKey::issue(1, 42) != EntityKey::mr(1, 42).\nGREEN: Implement EntityKey with derives.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_entity_key\n\n## Edge Cases\n- Generation fields (u64) in Msg variants are critical for stale result detection — must be present on all async result variants\n- EntityKey equality must include both project_id AND iid AND kind — bare iid is unsafe with multi-project datasets\n- AppError::NetworkRateLimited retry_after_secs is Option — GitLab may not provide Retry-After header","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:53:37.143607Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:47:33.171785Z","closed_at":"2026-02-12T19:47:33.171570Z","close_reason":"All core types implemented with 11 passing tests: Msg (~40 variants with From), Screen (12 variants with label/is_detail_or_entity), EntityKey (project_id+iid+kind with Hash), AppError (Display impl), InputMode (Default=Normal). Clippy clean.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-c9gk","depends_on_id":"bd-3ddw","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-cbo","title":"[CP1] Cargo.toml updates - async-stream and futures","description":"Add required dependencies for async pagination streams.\n\n## Changes\nAdd to Cargo.toml:\n- async-stream = \"0.3\"\n- futures = \"0.3\"\n\n## Why\nThe pagination methods use async generators which require async-stream crate.\nfutures crate provides StreamExt for consuming the streams.\n\n## Done When\n- cargo check passes with new deps\n- No unused dependency warnings\n\nFiles: Cargo.toml","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:42:31.143927Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.661666Z","closed_at":"2026-01-25T17:02:01.661666Z","deleted_at":"2026-01-25T17:02:01.661662Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} @@ -304,7 +304,7 @@ {"id":"bd-mem","title":"Implement shared backoff utility","description":"## Background\nBoth `dirty_sources` and `pending_discussion_fetches` tables use exponential backoff with `next_attempt_at` timestamps. Without a shared utility, each module would duplicate the backoff curve logic, risking drift. The shared backoff module ensures consistent retry behavior across all queue consumers in Gate C.\n\n## Approach\nCreate `src/core/backoff.rs` per PRD Section 6.X.\n\n**IMPORTANT — PRD-exact signature and implementation:**\n```rust\nuse rand::Rng;\n\n/// Compute next_attempt_at with exponential backoff and jitter.\n///\n/// Formula: now + min(3600000, 1000 * 2^attempt_count) * (0.9 to 1.1)\n/// - Capped at 1 hour to prevent runaway delays\n/// - ±10% jitter prevents synchronized retries after outages\n///\n/// Used by:\n/// - `dirty_sources` retry scheduling (document regeneration failures)\n/// - `pending_discussion_fetches` retry scheduling (API fetch failures)\n///\n/// Having one implementation prevents subtle divergence between queues\n/// (e.g., different caps or jitter ranges).\npub fn compute_next_attempt_at(now: i64, attempt_count: i64) -> i64 {\n // Cap attempt_count to prevent overflow (2^30 > 1 hour anyway)\n let capped_attempts = attempt_count.min(30) as u32;\n let base_delay_ms = 1000_i64.saturating_mul(1 << capped_attempts);\n let capped_delay_ms = base_delay_ms.min(3_600_000); // 1 hour cap\n\n // Add ±10% jitter\n let jitter_factor = rand::thread_rng().gen_range(0.9..=1.1);\n let delay_with_jitter = (capped_delay_ms as f64 * jitter_factor) as i64;\n\n now + delay_with_jitter\n}\n```\n\n**Key PRD details (must match exactly):**\n- `attempt_count` parameter is `i64` (not `u32`) — matches SQLite integer type from DB columns\n- Overflow prevention: `.min(30) as u32` caps before shift\n- Base delay: `1000_i64.saturating_mul(1 << capped_attempts)` — uses `saturating_mul` for safety\n- Cap: `3_600_000` (1 hour)\n- Jitter: `gen_range(0.9..=1.1)` — inclusive range\n- Return: `i64` (milliseconds epoch)\n\n**Cargo.toml change:** Add `rand = \"0.8\"` to `[dependencies]`.\n\n## Acceptance Criteria\n- [ ] Single shared implementation used by both dirty_tracker and discussion_queue\n- [ ] Signature: `pub fn compute_next_attempt_at(now: i64, attempt_count: i64) -> i64`\n- [ ] attempt_count is i64 (matches SQLite column type), not u32\n- [ ] Overflow prevention: `.min(30) as u32` before shift\n- [ ] Base delay uses `1000_i64.saturating_mul(1 << capped_attempts)`\n- [ ] Cap at 1 hour (3,600,000 ms)\n- [ ] Jitter: `gen_range(0.9..=1.1)` inclusive range\n- [ ] Exponential curve: 1s, 2s, 4s, 8s, ... up to 1h cap\n- [ ] `cargo test backoff` passes\n\n## Files\n- `src/core/backoff.rs` — new file\n- `src/core/mod.rs` — add `pub mod backoff;`\n- `Cargo.toml` — add `rand = \"0.8\"`\n\n## TDD Loop\nRED: `src/core/backoff.rs` with `#[cfg(test)] mod tests`:\n- `test_exponential_curve` — verify delays double each attempt (within jitter range)\n- `test_cap_at_one_hour` — attempt 20+ still produces delay <= MAX_DELAY_MS * 1.1\n- `test_jitter_range` — run 100 iterations, all delays within [0.9x, 1.1x] of base\n- `test_first_retry_is_about_one_second` — attempt 1 produces ~1000ms delay\n- `test_overflow_safety` — very large attempt_count doesn't panic\nGREEN: Implement compute_next_attempt_at()\nVERIFY: `cargo test backoff`\n\n## Edge Cases\n- `attempt_count` > 30: `.min(30)` caps, saturating_mul prevents overflow\n- `attempt_count` = 0: not used in practice (callers pass `attempt_count + 1`)\n- `attempt_count` = 1: delay is ~1 second (first retry)\n- Negative attempt_count: `.min(30)` still works, shift of negative-as-u32 wraps but saturating_mul handles it","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-30T15:27:09.474Z","created_by":"tayloreernisse","updated_at":"2026-01-30T16:57:24.900137Z","closed_at":"2026-01-30T16:57:24.899942Z","close_reason":"Completed: compute_next_attempt_at with exp backoff (1s base, 1h cap, +-10% jitter), i64 params matching SQLite, overflow-safe, 5 tests pass","compaction_level":0,"original_size":0} {"id":"bd-mk3","title":"Update ingest command for merge_requests type","description":"## Background\nCLI entry point for MR ingestion. Routes `--type=merge_requests` to the orchestrator. Must ensure `--full` resets both MR cursor AND discussion watermarks. This is the user-facing command that kicks off the entire MR sync pipeline.\n\n## Approach\nUpdate `src/cli/commands/ingest.rs` to handle `merge_requests` type:\n1. Add `merge_requests` branch to the resource type match statement\n2. Validate resource type early with helpful error message\n3. Pass `full` flag through to orchestrator (it handles the watermark reset internally)\n\n## Files\n- `src/cli/commands/ingest.rs` - Add merge_requests branch to `run_ingest`\n\n## Acceptance Criteria\n- [ ] `gi ingest --type=merge_requests` runs MR ingestion successfully\n- [ ] `gi ingest --type=merge_requests --full` resets cursor AND discussion watermarks\n- [ ] `gi ingest --type=invalid` returns helpful error listing valid types\n- [ ] Progress output shows MR counts, discussion counts, and skip counts\n- [ ] Default type remains `issues` for backward compatibility\n- [ ] `cargo test ingest_command` passes\n\n## TDD Loop\nRED: `gi ingest --type=merge_requests` -> \"invalid type: merge_requests\"\nGREEN: Add merge_requests to match statement in run_ingest\nVERIFY: `gi ingest --type=merge_requests --help` shows merge_requests as valid\n\n## Function Signature\n```rust\npub async fn run_ingest(\n config: &Config,\n args: &IngestArgs,\n) -> Result<(), GiError>\n```\n\n## IngestArgs Reference (existing)\n```rust\n#[derive(Parser, Debug)]\npub struct IngestArgs {\n /// Resource type to ingest\n #[arg(long, short = 't', default_value = \"issues\")]\n pub r#type: String,\n \n /// Filter to specific project (by path or ID)\n #[arg(long, short = 'p')]\n pub project: Option,\n \n /// Force run even if another ingest is in progress\n #[arg(long, short = 'f')]\n pub force: bool,\n \n /// Full sync - reset cursor and refetch all\n #[arg(long)]\n pub full: bool,\n}\n```\n\n## Code Change\n```rust\nuse crate::core::errors::GiError;\nuse crate::ingestion::orchestrator::Orchestrator;\n\npub async fn run_ingest(\n config: &Config,\n args: &IngestArgs,\n) -> Result<(), GiError> {\n let resource_type = args.r#type.as_str();\n \n // Validate resource type early\n match resource_type {\n \"issues\" | \"merge_requests\" => {}\n _ => {\n return Err(GiError::InvalidArgument {\n name: \"type\".to_string(),\n value: resource_type.to_string(),\n expected: \"issues or merge_requests\".to_string(),\n });\n }\n }\n \n // Acquire single-flight lock (unless --force)\n if !args.force {\n acquire_ingest_lock(config, resource_type)?;\n }\n \n // Get projects to ingest (filtered if --project specified)\n let projects = get_projects_to_ingest(config, args.project.as_deref())?;\n \n for project in projects {\n println!(\"Ingesting {} for {}...\", resource_type, project.path);\n \n let orchestrator = Orchestrator::new(\n &config,\n project.id,\n project.gitlab_id,\n )?;\n \n let result = orchestrator.run_ingestion(resource_type, args.full).await?;\n \n // Print results based on resource type\n match resource_type {\n \"issues\" => {\n println!(\" {}: {} issues fetched, {} upserted\",\n project.path, result.issues_fetched, result.issues_upserted);\n }\n \"merge_requests\" => {\n println!(\" {}: {} MRs fetched, {} new labels, {} assignees, {} reviewers\",\n project.path,\n result.mrs_fetched,\n result.labels_created,\n result.assignees_linked,\n result.reviewers_linked,\n );\n println!(\" Discussions: {} synced, {} notes ({} DiffNotes)\",\n result.discussions_synced,\n result.notes_synced,\n result.diffnotes_count,\n );\n if result.mrs_skipped_discussion_sync > 0 {\n println!(\" Skipped discussion sync for {} unchanged MRs\",\n result.mrs_skipped_discussion_sync);\n }\n if result.failed_discussion_syncs > 0 {\n eprintln!(\" Warning: {} MRs failed discussion sync (will retry next run)\",\n result.failed_discussion_syncs);\n }\n }\n _ => unreachable!(),\n }\n }\n \n // Release lock\n if !args.force {\n release_ingest_lock(config, resource_type)?;\n }\n \n Ok(())\n}\n```\n\n## Output Format\n```\nIngesting merge_requests for group/project-one...\n group/project-one: 567 MRs fetched, 12 new labels, 89 assignees, 45 reviewers\n Discussions: 456 synced, 1,234 notes (89 DiffNotes)\n Skipped discussion sync for 444 unchanged MRs\n\nTotal: 567 MRs, 456 discussions, 1,234 notes\n```\n\n## Full Sync Behavior\nWhen `--full` is passed:\n1. MR cursor reset to NULL (handled by `ingest_merge_requests` with `full_sync: true`)\n2. Discussion watermarks reset to NULL (handled by `reset_discussion_watermarks` called from ingestion)\n3. All MRs re-fetched from GitLab API\n4. All discussions re-fetched for every MR\n\n## Error Types (from GiError enum)\n```rust\n// In src/core/errors.rs\npub enum GiError {\n InvalidArgument {\n name: String,\n value: String,\n expected: String,\n },\n LockError {\n resource: String,\n message: String,\n },\n // ... other variants\n}\n```\n\n## Edge Cases\n- Default type is `issues` for backward compatibility with CP1\n- Project filter (`--project`) can limit to specific project by path or ID\n- Force flag (`--force`) bypasses single-flight lock for debugging\n- If no projects configured, return helpful error about running `gi project add` first\n- Empty project (no MRs): completes successfully with \"0 MRs fetched\"","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:43.034952Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:28:52.711235Z","closed_at":"2026-01-27T00:28:52.711166Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-mk3","depends_on_id":"bd-10f","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-nu0d","title":"Implement resize storm + rapid keypress + event fuzz tests","description":"## Background\nStress tests verify the TUI handles adverse input conditions without panic: rapid terminal resizes, fast keypress sequences, and randomized event traces. The event fuzz suite uses deterministic seed replay for reproducibility.\n\n## Approach\nResize storm:\n- Send 100 resize events in rapid succession (varying sizes from 20x10 to 300x80)\n- Assert no panic, no layout corruption, final render is valid for final size\n- FrankenTUI's BOCPD resize coalescing should handle this — verify it works\n\nRapid keypress:\n- Send 50 key events in <100ms: mix of navigation, filter input, mode switches\n- Assert no panic, no stuck input mode, final state is consistent\n- Verify Ctrl+C always exits regardless of state\n\nEvent fuzz (deterministic):\n- Generate 10k randomized event traces from: key events, resize events, paste events, tick events\n- Use seeded RNG for reproducibility\n- Replay each trace, check invariants after each event:\n - Navigation stack depth >= 1 (always has at least Dashboard)\n - InputMode transitions are valid (no impossible state combinations)\n - No panic\n - LoadState transitions are valid (no Idle->Refreshing without LoadingInitial first for initial load)\n- On invariant violation: log seed + event index for reproduction\n\n## Acceptance Criteria\n- [ ] 100 rapid resizes: no panic, valid final render\n- [ ] 50 rapid keys: no stuck input mode, Ctrl+C exits\n- [ ] 10k fuzz traces: zero invariant violations\n- [ ] Fuzz tests deterministically reproducible via seed\n- [ ] Navigation invariant: stack always has at least Dashboard\n- [ ] InputMode invariant: valid transitions only\n\n## Files\n- CREATE: crates/lore-tui/tests/stress_tests.rs\n- CREATE: crates/lore-tui/tests/fuzz_tests.rs\n\n## TDD Anchor\nRED: Write test_resize_storm_no_panic that sends 100 resize events to LoreApp, asserts no panic.\nGREEN: Ensure view() handles all terminal sizes gracefully.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_resize_storm\n\n## Edge Cases\n- Zero-size terminal (0x0): must not panic, skip rendering\n- Very large terminal (500x200): must not allocate unbounded memory\n- Paste events can contain arbitrary bytes including control chars — sanitize\n- Fuzz seed must be logged at test start for reproduction\n\n## Dependency Context\nUses LoreApp from \"Implement LoreApp Model\" task.\nUses NavigationStack from \"Implement NavigationStack\" task.\nUses FakeClock for deterministic time in fuzz tests.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:04:42.012118Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:38.299688Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-nu0d","depends_on_id":"bd-1b6k","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-nu0d","depends_on_id":"bd-2nfs","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} -{"id":"bd-nwux","title":"Epic: TUI Phase 3 — Power Features","description":"## Background\nPhase 3 adds the power-user screens: Search (3 modes with preview), Timeline (5-stage pipeline visualization), Who (5 expert/workload modes), Command Palette (fuzzy match), Trace (code provenance drill-down), and File History (per-file MR timeline). These screens leverage the foundation from Phases 1-2.\n\nThe Trace and File History screens were added after v0.8.0 introduced `lore trace` and `lore file-history` CLI commands. They provide interactive drill-down into code provenance chains (file -> MR -> issue -> discussion) and per-file change timelines with rename tracking.\n\n## Acceptance Criteria\n- [ ] Search supports lexical, hybrid, and semantic modes with split-pane preview\n- [ ] Search capability detection enables/disables modes based on available indexes\n- [ ] Timeline renders chronological event stream with color-coded event types\n- [ ] Who supports Expert, Workload, Reviews, Active, and Overlap modes (with include-closed toggle)\n- [ ] Command palette provides fuzzy-match access to all commands\n- [ ] Trace screen shows file -> MR -> issue -> discussion chains with interactive drill-down\n- [ ] File History screen shows per-file MR timeline with rename chain and DiffNote snippets","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:00:27.375421Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:07:05.438191Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-nwux","depends_on_id":"bd-3pxe","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-nwux","title":"Epic: TUI Phase 3 — Power Features","description":"## Background\nPhase 3 adds the power-user screens: Search (3 modes with preview), Timeline (5-stage pipeline visualization), Who (5 expert/workload modes), Command Palette (fuzzy match), Trace (code provenance drill-down), and File History (per-file MR timeline). These screens leverage the foundation from Phases 1-2.\n\nThe Trace and File History screens were added after v0.8.0 introduced `lore trace` and `lore file-history` CLI commands. They provide interactive drill-down into code provenance chains (file -> MR -> issue -> discussion) and per-file change timelines with rename tracking.\n\n## Acceptance Criteria\n- [ ] Search supports lexical, hybrid, and semantic modes with split-pane preview\n- [ ] Search capability detection enables/disables modes based on available indexes\n- [ ] Timeline renders chronological event stream with color-coded event types\n- [ ] Who supports Expert, Workload, Reviews, Active, and Overlap modes (with include-closed toggle)\n- [ ] Command palette provides fuzzy-match access to all commands\n- [ ] Trace screen shows file -> MR -> issue -> discussion chains with interactive drill-down\n- [ ] File History screen shows per-file MR timeline with rename chain and DiffNote snippets","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:00:27.375421Z","created_by":"tayloreernisse","updated_at":"2026-02-19T03:51:04.284313Z","closed_at":"2026-02-19T03:51:04.284262Z","close_reason":"All Phase 3 screens complete: Search, Timeline, Who, Command Palette (prior sessions) + File History (bd-1up1) + Trace (bd-2uzm). 586 TUI tests pass.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-nwux","depends_on_id":"bd-3pxe","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-o7b","title":"[CP1] gi show issue command","description":"## Background\n\nThe `gi show issue ` command displays detailed information about a single issue including metadata, description, labels, and all discussions with their notes. It provides a complete view similar to the GitLab web UI.\n\n## Approach\n\n### Module: src/cli/commands/show.rs\n\n### Clap Definition\n\n```rust\n#[derive(Args)]\npub struct ShowArgs {\n /// Entity type\n #[arg(value_parser = [\"issue\", \"mr\"])]\n pub entity: String,\n\n /// Entity IID\n pub iid: i64,\n\n /// Project path (required if ambiguous)\n #[arg(long)]\n pub project: Option,\n}\n```\n\n### Handler Function\n\n```rust\npub async fn handle_show(args: ShowArgs, conn: &Connection) -> Result<()>\n```\n\n### Logic (for entity=\"issue\")\n\n1. **Find issue**: Query by iid, optionally filtered by project\n - If multiple projects have same iid, require --project or error\n2. **Load metadata**: title, state, author, created_at, updated_at, web_url\n3. **Load labels**: JOIN through issue_labels to labels table\n4. **Load discussions**: All discussions for this issue\n5. **Load notes**: All notes for each discussion, ordered by position\n6. **Format output**: Rich display with sections\n\n### Output Format (matches PRD)\n\n```\nIssue #1234: Authentication redesign\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\nProject: group/project-one\nState: opened\nAuthor: @johndoe\nCreated: 2024-01-15\nUpdated: 2024-03-20\nLabels: enhancement, auth\nURL: https://gitlab.example.com/group/project-one/-/issues/1234\n\nDescription:\n We need to redesign the authentication flow to support...\n\nDiscussions (5):\n\n @janedoe (2024-01-16):\n I agree we should move to JWT-based auth...\n\n @johndoe (2024-01-16):\n What about refresh token strategy?\n\n @bobsmith (2024-01-17):\n Have we considered OAuth2?\n```\n\n### Queries\n\n```sql\n-- Find issue\nSELECT i.*, p.path as project_path\nFROM issues i\nJOIN projects p ON i.project_id = p.id\nWHERE i.iid = ? AND (p.path = ? OR ? IS NULL)\n\n-- Get labels\nSELECT l.name FROM labels l\nJOIN issue_labels il ON l.id = il.label_id\nWHERE il.issue_id = ?\n\n-- Get discussions with notes\nSELECT d.*, n.* FROM discussions d\nJOIN notes n ON d.id = n.discussion_id\nWHERE d.issue_id = ?\nORDER BY d.first_note_at, n.position\n```\n\n## Acceptance Criteria\n\n- [ ] Shows issue metadata (title, state, author, dates, URL)\n- [ ] Shows labels as comma-separated list\n- [ ] Shows description (truncated if very long)\n- [ ] Shows discussions grouped with notes indented\n- [ ] Handles --project filter correctly\n- [ ] Errors clearly if iid is ambiguous without --project\n\n## Files\n\n- src/cli/commands/mod.rs (add `pub mod show;`)\n- src/cli/commands/show.rs (create)\n- src/cli/mod.rs (add Show variant to Commands enum)\n\n## TDD Loop\n\nRED:\n```rust\n#[tokio::test] async fn show_issue_displays_metadata()\n#[tokio::test] async fn show_issue_displays_labels()\n#[tokio::test] async fn show_issue_displays_discussions()\n#[tokio::test] async fn show_issue_requires_project_when_ambiguous()\n```\n\nGREEN: Implement handler with queries and formatting\n\nVERIFY: `cargo test show_issue`\n\n## Edge Cases\n\n- Issue with no labels - show \"Labels: (none)\"\n- Issue with no discussions - show \"Discussions: (none)\"\n- Issue with very long description - truncate with \"...\"\n- System notes in discussions - filter out or show with [system] prefix\n- Individual notes (not threaded) - show without reply indentation","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-25T17:02:38.384702Z","created_by":"tayloreernisse","updated_at":"2026-01-25T23:05:25.688102Z","closed_at":"2026-01-25T23:05:25.688043Z","close_reason":"Implemented gi show issue command with metadata, labels, and discussions display","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-o7b","depends_on_id":"bd-208","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-o7b","depends_on_id":"bd-hbo","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-ozy","title":"[CP1] Ingestion orchestrator","description":"## Background\n\nThe ingestion orchestrator coordinates issue sync followed by dependent discussion sync. It implements the CP1 canonical pattern: fetch issues, identify which need discussion sync (updated_at advanced), then execute discussion sync with bounded concurrency.\n\n## Approach\n\n### Module: src/ingestion/orchestrator.rs\n\n### Main Function\n\n```rust\npub async fn ingest_project_issues(\n conn: &Connection,\n client: &GitLabClient,\n config: &Config,\n project_id: i64, // Local DB project ID\n gitlab_project_id: i64,\n) -> Result\n\n#[derive(Debug, Default)]\npub struct IngestProjectResult {\n pub issues_fetched: usize,\n pub issues_upserted: usize,\n pub labels_created: usize,\n pub discussions_fetched: usize,\n pub notes_fetched: usize,\n pub system_notes_count: usize,\n pub issues_skipped_discussion_sync: usize,\n}\n```\n\n### Orchestration Steps\n\n1. **Call issue ingestion**: `ingest_issues(conn, client, config, project_id, gitlab_project_id)`\n2. **Get issues needing discussion sync**: From IngestIssuesResult.issues_needing_discussion_sync\n3. **Execute bounded discussion sync**:\n - Use `tokio::task::LocalSet` for single-threaded runtime\n - Respect `config.sync.dependent_concurrency` (default: 5)\n - For each IssueForDiscussionSync:\n - Call `ingest_issue_discussions(...)`\n - Aggregate results\n4. **Calculate skipped count**: total_issues - issues_needing_discussion_sync.len()\n\n### Bounded Concurrency Pattern\n\n```rust\nuse futures::stream::{self, StreamExt};\n\nlet local_set = LocalSet::new();\nlocal_set.run_until(async {\n stream::iter(issues_needing_sync)\n .map(|issue| async {\n ingest_issue_discussions(\n conn, client, config,\n project_id, gitlab_project_id,\n issue.iid, issue.local_issue_id, issue.updated_at,\n ).await\n })\n .buffer_unordered(config.sync.dependent_concurrency)\n .try_collect::>()\n .await\n}).await\n```\n\nNote: Single-threaded runtime means concurrency is I/O-bound, not parallel execution.\n\n## Acceptance Criteria\n\n- [ ] Orchestrator calls issue ingestion first\n- [ ] Only issues with updated_at > discussions_synced_for_updated_at get discussion sync\n- [ ] Bounded concurrency respects dependent_concurrency config\n- [ ] Results aggregated from both issue and discussion ingestion\n- [ ] issues_skipped_discussion_sync accurately reflects unchanged issues\n\n## Files\n\n- src/ingestion/mod.rs (add `pub mod orchestrator;`)\n- src/ingestion/orchestrator.rs (create)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/orchestrator_tests.rs\n#[tokio::test] async fn orchestrates_issue_then_discussion_sync()\n#[tokio::test] async fn skips_discussion_sync_for_unchanged_issues()\n#[tokio::test] async fn respects_bounded_concurrency()\n#[tokio::test] async fn aggregates_results_correctly()\n```\n\nGREEN: Implement orchestrator with bounded concurrency\n\nVERIFY: `cargo test orchestrator`\n\n## Edge Cases\n\n- All issues unchanged - no discussion sync calls\n- All issues new - all get discussion sync\n- dependent_concurrency=1 - sequential discussion fetches\n- Issue ingestion fails - orchestrator returns error, no discussion sync","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.289941Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:54:07.447647Z","closed_at":"2026-01-25T22:54:07.447577Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-ozy","depends_on_id":"bd-208","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-ozy","depends_on_id":"bd-hbo","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-pgdw","title":"OBSERV: Add root tracing span with run_id to sync and ingest","description":"## Background\nA root tracing span per command invocation provides the top of the span hierarchy. All child spans (ingest_issues, fetch_pages, etc.) inherit the run_id field, making every log line within a run filterable by jq.\n\n## Approach\nIn run_sync() (src/cli/commands/sync.rs:54), after generating run_id, create a root span:\n\n```rust\npub async fn run_sync(config: &Config, options: SyncOptions) -> Result {\n let run_id = &uuid::Uuid::new_v4().to_string()[..8];\n let _root = tracing::info_span!(\"sync\", %run_id).entered();\n // ... existing sync pipeline code\n}\n```\n\nIn run_ingest() (src/cli/commands/ingest.rs:107), same pattern:\n\n```rust\npub async fn run_ingest(...) -> Result {\n let run_id = &uuid::Uuid::new_v4().to_string()[..8];\n let _root = tracing::info_span!(\"ingest\", %run_id, resource_type).entered();\n // ... existing ingest code\n}\n```\n\nCRITICAL: The _root guard must live for the entire function scope. If it drops early (e.g., shadowed or moved into a block), child spans lose their parent context. Use let _root (underscore prefix) to signal intentional unused binding that's kept alive for its Drop impl.\n\nFor async functions, use .entered() NOT .enter(). In async Rust, Span::enter() returns a guard that is NOT Send, which prevents the future from being sent across threads. However, .entered() on an info_span! creates an Entered which is also !Send. For async, prefer:\n\n```rust\nlet root_span = tracing::info_span!(\"sync\", %run_id);\nasync move {\n // ... body\n}.instrument(root_span).await\n```\n\nOr use #[instrument] on the function itself with the run_id field.\n\n## Acceptance Criteria\n- [ ] Root span established for every sync and ingest invocation\n- [ ] run_id appears in span context of all child log lines\n- [ ] jq 'select(.spans[]? | .run_id)' can extract all lines from a run\n- [ ] Span is active for entire function duration (not dropped early)\n- [ ] Works correctly with async/await (span propagated across .await points)\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/cli/commands/sync.rs (add root span in run_sync, line ~54)\n- src/cli/commands/ingest.rs (add root span in run_ingest, line ~107)\n\n## TDD Loop\nRED: test_root_span_propagates_run_id (capture JSON log output, verify run_id in span context)\nGREEN: Add root spans to run_sync and run_ingest\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- Async span propagation: .entered() is !Send. For async functions, use .instrument() or #[instrument]. The run_sync function is async (line 54: pub async fn run_sync).\n- Nested command calls: run_sync calls run_ingest internally. If both create root spans, we get a nested hierarchy: sync > ingest. This is correct behavior -- the ingest span becomes a child of sync.\n- Span storage: tracing-subscriber registry handles span storage automatically. No manual setup needed beyond adding the layer.","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:54:07.771605Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:19:33.006274Z","closed_at":"2026-02-04T17:19:33.006227Z","close_reason":"Added root tracing spans with run_id to run_sync() and run_ingest() using .instrument() pattern for async compatibility","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-pgdw","depends_on_id":"bd-2ni","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-pgdw","depends_on_id":"bd-37qw","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} @@ -316,13 +316,13 @@ {"id":"bd-tfh3","title":"WHO: Comprehensive test suite","description":"## Background\n\n20+ tests covering mode resolution, path query construction, SQL queries, and edge cases. All tests use in-memory SQLite with run_migrations().\n\n## Approach\n\n### Test helpers (shared across all tests):\n```rust\nfn setup_test_db() -> Connection {\n let conn = create_connection(Path::new(\":memory:\")).unwrap();\n run_migrations(&conn).unwrap();\n conn\n}\nfn insert_project(conn, id, path) // gitlab_project_id=id*100, web_url from path\nfn insert_mr(conn, id, project_id, iid, author, state) // gitlab_id=id*10, timestamps=now_ms()\nfn insert_issue(conn, id, project_id, iid, author) // state='opened'\nfn insert_discussion(conn, id, project_id, mr_id, issue_id, resolvable, resolved)\n#[allow(clippy::too_many_arguments)]\nfn insert_diffnote(conn, id, discussion_id, project_id, author, file_path, body)\nfn insert_assignee(conn, issue_id, username)\nfn insert_reviewer(conn, mr_id, username)\n```\n\n### Test list with key assertions:\n\n**Mode resolution:**\n- test_is_file_path_discrimination: src/auth/ -> Expert, asmith -> Workload, @asmith -> Workload, asmith+--reviews -> Reviews, --path README.md -> Expert, --path Makefile -> Expert\n\n**Path queries:**\n- test_build_path_query: trailing/ -> prefix, no-dot-no-slash -> prefix, file.ext -> exact, root.md -> exact, .github/workflows/ -> prefix, v1.2/auth/ -> prefix, test_files/ -> escaped prefix\n- test_build_path_query_exact_does_not_escape: README_with_underscore.md -> raw (no \\\\_)\n- test_path_flag_dotless_root_file_is_exact: Makefile -> exact, Dockerfile -> exact\n- test_build_path_query_dotless_subdir_file_uses_db_probe: src/Dockerfile with DB data -> exact; without -> prefix\n- test_build_path_query_probe_is_project_scoped: data in proj 1, unscoped -> exact; scoped proj 2 -> prefix; scoped proj 1 -> exact\n- test_escape_like: normal->normal, has_underscore->has\\\\_underscore, has%percent->has\\\\%percent\n- test_normalize_repo_path: ./src/ -> src/, /src/ -> src/, ././src -> src, backslash conversion, // collapse, whitespace trim\n\n**Queries:**\n- test_expert_query: 3 experts ranked correctly, reviewer_b first\n- test_expert_excludes_self_review_notes: author_a review_mr_count=0, author_mr_count>0\n- test_expert_truncation: limit=2 truncated=true len=2; limit=10 truncated=false\n- test_workload_query: assigned_issues.len()=1, authored_mrs.len()=1\n- test_reviews_query: total=3, categorized=2, categories.len()=2\n- test_normalize_review_prefix: suggestion/Suggestion:/nit/nitpick/non-blocking/TODO\n- test_active_query: total=1, discussions.len()=1, note_count=2 (NOT 1), discussion_id>0\n- test_active_participants_sorted: [\"alpha_user\", \"zebra_user\"]\n- test_overlap_dual_roles: A+R role, both touch counts >0, mr_refs contain project path\n- test_overlap_multi_project_mr_refs: team/backend!100 AND team/frontend!100 present\n- test_overlap_excludes_self_review_notes: review_touch_count=0\n- test_lookup_project_path: round-trip \"team/backend\"\n\n## Files\n\n- `src/cli/commands/who.rs` (inside #[cfg(test)] mod tests)\n\n## TDD Loop\n\nTests are written alongside each query bead (RED phase). This bead tracks the full test suite as a verification gate.\nVERIFY: `cargo test -- who`\n\n## Acceptance Criteria\n\n- [ ] All 20+ tests pass\n- [ ] cargo test -- who shows 0 failures\n- [ ] No clippy warnings from test code (use #[allow(clippy::too_many_arguments)] on insert_diffnote)\n\n## Edge Cases\n\n- In-memory DB includes migration 017 (indexes created but no real data perf benefit)\n- Test timestamps use now_ms() — tests are time-independent (since_ms=0 in most queries)\n- insert_mr uses gitlab_id=id*10 to avoid conflicts","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:41:25.839065Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.601284Z","closed_at":"2026-02-08T04:10:29.601248Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-tfh3","depends_on_id":"bd-1rdi","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-tfh3","depends_on_id":"bd-2711","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-tfh3","depends_on_id":"bd-3mj2","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-tfh3","depends_on_id":"bd-b51e","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-tfh3","depends_on_id":"bd-m7k1","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-tfh3","depends_on_id":"bd-s3rc","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-tfh3","depends_on_id":"bd-zqpf","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-tir","title":"Implement generic dependent fetch queue (enqueue + drain)","description":"## Background\nThe pending_dependent_fetches table (migration 011) provides a generic job queue for all dependent resource fetches across Gates 1, 2, and 4. This module implements the queue operations: enqueue, claim, complete, fail, and stale lock reclamation. It generalizes the existing discussion_queue.rs pattern.\n\n## Approach\nCreate src/core/dependent_queue.rs with:\n\n```rust\nuse rusqlite::Connection;\nuse super::error::Result;\n\n/// A pending job from the dependent fetch queue.\npub struct PendingJob {\n pub id: i64,\n pub project_id: i64,\n pub entity_type: String, // \"issue\" | \"merge_request\"\n pub entity_iid: i64,\n pub entity_local_id: i64,\n pub job_type: String, // \"resource_events\" | \"mr_closes_issues\" | \"mr_diffs\"\n pub payload_json: Option,\n pub attempts: i32,\n}\n\n/// Enqueue a dependent fetch job. Idempotent via UNIQUE constraint (INSERT OR IGNORE).\npub fn enqueue_job(\n conn: &Connection,\n project_id: i64,\n entity_type: &str,\n entity_iid: i64,\n entity_local_id: i64,\n job_type: &str,\n payload_json: Option<&str>,\n) -> Result // returns true if actually inserted (not deduped)\n\n/// Claim a batch of jobs for processing. Atomically sets locked_at.\n/// Only claims jobs where locked_at IS NULL AND (next_retry_at IS NULL OR next_retry_at <= now).\npub fn claim_jobs(\n conn: &Connection,\n job_type: &str,\n batch_size: usize,\n) -> Result>\n\n/// Mark a job as complete (DELETE the row).\npub fn complete_job(conn: &Connection, job_id: i64) -> Result<()>\n\n/// Mark a job as failed. Increment attempts, set next_retry_at with exponential backoff, clear locked_at.\n/// Backoff: 30s * 2^(attempts-1), capped at 480s.\npub fn fail_job(conn: &Connection, job_id: i64, error: &str) -> Result<()>\n\n/// Reclaim stale locks (locked_at older than threshold).\n/// Returns count of reclaimed jobs.\npub fn reclaim_stale_locks(conn: &Connection, stale_threshold_minutes: u32) -> Result\n\n/// Count pending jobs by job_type (for stats/progress).\npub fn count_pending_jobs(conn: &Connection) -> Result>\n```\n\nRegister in src/core/mod.rs: `pub mod dependent_queue;`\n\n**Key implementation details:**\n- claim_jobs uses a two-step approach: SELECT ids WHERE available, then UPDATE SET locked_at for those ids. Use a single transaction.\n- enqueued_at = current time in ms epoch UTC\n- locked_at = current time in ms epoch UTC when claimed\n- Backoff formula: next_retry_at = now + min(30_000 * 2^(attempts-1), 480_000) ms\n\n## Acceptance Criteria\n- [ ] enqueue_job is idempotent (INSERT OR IGNORE on UNIQUE constraint)\n- [ ] enqueue_job returns true on insert, false on dedup\n- [ ] claim_jobs only claims unlocked, non-retrying jobs\n- [ ] claim_jobs respects batch_size limit\n- [ ] complete_job DELETEs the row\n- [ ] fail_job increments attempts, sets next_retry_at, clears locked_at, records last_error\n- [ ] Backoff: 30s, 60s, 120s, 240s, 480s (capped)\n- [ ] reclaim_stale_locks clears locked_at for jobs older than threshold\n- [ ] count_pending_jobs returns accurate counts by job_type\n\n## Files\n- src/core/dependent_queue.rs (new)\n- src/core/mod.rs (add `pub mod dependent_queue;`)\n\n## TDD Loop\nRED: tests/dependent_queue_tests.rs (new):\n- `test_enqueue_job_basic` - enqueue a job, verify it exists\n- `test_enqueue_job_idempotent` - enqueue same job twice, verify single row\n- `test_claim_jobs_batch` - enqueue 5, claim 3, verify 3 returned and locked\n- `test_claim_jobs_skips_locked` - lock a job, claim again, verify it's skipped\n- `test_claim_jobs_respects_retry_at` - set next_retry_at in future, verify skipped\n- `test_claim_jobs_includes_retryable` - set next_retry_at in past, verify claimed\n- `test_complete_job_deletes` - complete a job, verify gone\n- `test_fail_job_backoff` - fail 3 times, verify exponential next_retry_at values\n- `test_reclaim_stale_locks` - set old locked_at, reclaim, verify cleared\n\nSetup: create_test_db() with migrations 001-011, seed project + issue.\n\nGREEN: Implement all functions\n\nVERIFY: `cargo test dependent_queue -- --nocapture`\n\n## Edge Cases\n- claim_jobs with batch_size=0 should return empty vec (not error)\n- enqueue_job with invalid job_type will be rejected by CHECK constraint — map rusqlite error to LoreError\n- fail_job on a non-existent job_id should be a no-op (job may have been completed by another path)\n- reclaim_stale_locks with 0 threshold would reclaim everything — ensure threshold is reasonable (minimum 1 min)\n- Timestamps must use consistent ms epoch UTC (not seconds)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:31:57.290181Z","created_by":"tayloreernisse","updated_at":"2026-02-03T16:19:14.222626Z","closed_at":"2026-02-03T16:19:14.222579Z","close_reason":"Implemented PendingJob struct, enqueue_job, claim_jobs, complete_job, fail_job (with exponential backoff), reclaim_stale_locks, count_pending_jobs in src/core/dependent_queue.rs.","compaction_level":0,"original_size":0,"labels":["gate-1","phase-b","queue"],"dependencies":[{"issue_id":"bd-tir","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-tir","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-tiux","title":"Add sync_runs migration 027 for surgical mode columns","description":"## Background\nThe `sync_runs` table (created in migration 001, enriched in 014) tracks sync run lifecycle for observability and crash recovery. Surgical sync needs additional columns to track its distinct mode, phase progression, IID targeting, and per-stage counters. This is a schema-only change — no Rust struct changes beyond registering the migration SQL file.\n\nThe migration system uses a `MIGRATIONS` array in `src/core/db.rs`. Each entry is a `(version, sql_file_name)` tuple. SQL files live in `src/core/migrations/`. The current latest migration is 026 (`026_scoring_indexes.sql`), so this will be migration 027. `LATEST_SCHEMA_VERSION` is computed as `MIGRATIONS.len() as i32` and automatically becomes 27.\n\n## Approach\n\n### Step 1: Create migration SQL file: `src/core/migrations/027_surgical_sync_runs.sql`\n\n```sql\n-- Migration 027: Extend sync_runs for surgical sync observability\n-- Adds mode/phase tracking and surgical-specific counters.\n\nALTER TABLE sync_runs ADD COLUMN mode TEXT;\nALTER TABLE sync_runs ADD COLUMN phase TEXT;\nALTER TABLE sync_runs ADD COLUMN surgical_iids_json TEXT;\nALTER TABLE sync_runs ADD COLUMN issues_fetched INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN mrs_fetched INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN issues_ingested INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN mrs_ingested INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN skipped_stale INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN docs_regenerated INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN docs_embedded INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN warnings_count INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN cancelled_at INTEGER;\n\nCREATE INDEX IF NOT EXISTS idx_sync_runs_mode_started\n ON sync_runs(mode, started_at DESC);\nCREATE INDEX IF NOT EXISTS idx_sync_runs_status_phase_started\n ON sync_runs(status, phase, started_at DESC);\n```\n\n**Column semantics:**\n- `mode`: \"standard\" or \"surgical\" (NULL for pre-migration rows)\n- `phase`: preflight, ingest, dependents, docs, embed, done, failed, cancelled\n- `surgical_iids_json`: JSON like `{\"issues\":[7,8],\"mrs\":[101]}`\n- Counter columns: integers with DEFAULT 0 for backward compat\n- `cancelled_at`: ms-epoch timestamp, NULL unless cancelled\n\n### Step 2: Register in MIGRATIONS array (src/core/db.rs)\n\nAdd to the `MIGRATIONS` array (currently 26 entries ending with `026_scoring_indexes.sql`):\n\n```rust\n(27, include_str!(\"migrations/027_surgical_sync_runs.sql\")),\n```\n\n## Acceptance Criteria\n- [ ] File `src/core/migrations/027_surgical_sync_runs.sql` exists with all ALTER TABLE and CREATE INDEX statements\n- [ ] Migration 027 is registered in MIGRATIONS array in `src/core/db.rs`\n- [ ] `LATEST_SCHEMA_VERSION` evaluates to 27\n- [ ] Migration runs successfully on fresh databases (in-memory test)\n- [ ] Pre-existing sync_runs rows are unaffected (NULL mode/phase, 0 counters)\n- [ ] New columns accept expected values via INSERT and SELECT round-trip\n- [ ] NULL defaults work for mode, phase, surgical_iids_json, cancelled_at\n- [ ] DEFAULT 0 works for all counter columns\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo test` passes (all migration tests use in-memory DB)\n\n## Files\n- CREATE: src/core/migrations/027_surgical_sync_runs.sql\n- MODIFY: src/core/db.rs (add entry to MIGRATIONS array)\n\n## TDD Anchor\nRED: Write tests in `src/core/sync_run_tests.rs` (which is already `#[path]`-included from `sync_run.rs`):\n\n```rust\n#[test]\nfn sync_run_surgical_columns_exist() {\n let conn = setup_test_db();\n conn.execute(\n \"INSERT INTO sync_runs (started_at, heartbeat_at, status, command, mode, phase, surgical_iids_json)\n VALUES (1000, 1000, 'running', 'sync', 'surgical', 'preflight', '{\\\"issues\\\":[7],\\\"mrs\\\":[]}')\",\n [],\n ).unwrap();\n let (mode, phase, iids_json): (String, String, String) = conn.query_row(\n \"SELECT mode, phase, surgical_iids_json FROM sync_runs WHERE mode = 'surgical'\",\n [],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?)),\n ).unwrap();\n assert_eq!(mode, \"surgical\");\n assert_eq!(phase, \"preflight\");\n assert!(iids_json.contains(\"7\"));\n}\n\n#[test]\nfn sync_run_counter_defaults_are_zero() {\n let conn = setup_test_db();\n conn.execute(\n \"INSERT INTO sync_runs (started_at, heartbeat_at, status, command)\n VALUES (2000, 2000, 'running', 'sync')\",\n [],\n ).unwrap();\n let row_id = conn.last_insert_rowid();\n let (issues_fetched, mrs_fetched, docs_regenerated, warnings_count): (i64, i64, i64, i64) = conn.query_row(\n \"SELECT issues_fetched, mrs_fetched, docs_regenerated, warnings_count FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?, r.get(3)?)),\n ).unwrap();\n assert_eq!(issues_fetched, 0);\n assert_eq!(mrs_fetched, 0);\n assert_eq!(docs_regenerated, 0);\n assert_eq!(warnings_count, 0);\n}\n\n#[test]\nfn sync_run_nullable_columns_default_to_null() {\n let conn = setup_test_db();\n conn.execute(\n \"INSERT INTO sync_runs (started_at, heartbeat_at, status, command)\n VALUES (3000, 3000, 'running', 'sync')\",\n [],\n ).unwrap();\n let row_id = conn.last_insert_rowid();\n let (mode, phase, cancelled_at): (Option, Option, Option) = conn.query_row(\n \"SELECT mode, phase, cancelled_at FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?)),\n ).unwrap();\n assert!(mode.is_none());\n assert!(phase.is_none());\n assert!(cancelled_at.is_none());\n}\n\n#[test]\nfn sync_run_counter_round_trip() {\n let conn = setup_test_db();\n conn.execute(\n \"INSERT INTO sync_runs (started_at, heartbeat_at, status, command, mode, issues_fetched, mrs_ingested, docs_embedded)\n VALUES (4000, 4000, 'succeeded', 'sync', 'surgical', 3, 2, 5)\",\n [],\n ).unwrap();\n let row_id = conn.last_insert_rowid();\n let (issues_fetched, mrs_ingested, docs_embedded): (i64, i64, i64) = conn.query_row(\n \"SELECT issues_fetched, mrs_ingested, docs_embedded FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?)),\n ).unwrap();\n assert_eq!(issues_fetched, 3);\n assert_eq!(mrs_ingested, 2);\n assert_eq!(docs_embedded, 5);\n}\n```\n\nGREEN: Create the SQL file and register the migration.\nVERIFY: `cargo test sync_run_surgical && cargo test sync_run_counter && cargo test sync_run_nullable`\n\n## Edge Cases\n- SQLite ALTER TABLE ADD COLUMN requires DEFAULT for NOT NULL columns. All counter columns use `DEFAULT 0`.\n- mode/phase/surgical_iids_json/cancelled_at are nullable TEXT/INTEGER — no DEFAULT needed.\n- Pre-migration rows get NULL for new nullable columns and 0 for counter columns — backward compatible.\n- The indexes (`idx_sync_runs_mode_started`, `idx_sync_runs_status_phase_started`) use `IF NOT EXISTS` for idempotency.\n\n## Dependency Context\nThis is a leaf/foundation bead with no upstream dependencies. Downstream bead bd-arka (SyncRunRecorder extensions) depends on these columns existing to write surgical mode lifecycle data.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:13:19.914672Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:03:28.195017Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-tiux","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-tiux","depends_on_id":"bd-arka","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} -{"id":"bd-u7se","title":"Implement Who screen (5 modes: expert/workload/reviews/active/overlap)","description":"## Background\nThe Who screen is the people explorer, showing contributor expertise and workload across 5 modes. Each mode renders differently: Expert shows file-path expertise scores, Workload shows issue/MR assignment counts, Reviews shows review activity, Active shows recent contributors, Overlap shows shared file knowledge.\n\nOn master, the who command was refactored from a single who.rs into src/cli/commands/who/ module with types.rs, expert.rs, workload.rs, reviews.rs, active.rs, overlap.rs. Types are cleanly separated in types.rs. Query functions are currently pub(super) — bd-1f5b promotes them to pub and moves types to core.\n\n## Data Shapes (from src/cli/commands/who/types.rs on master)\n\nResult types are per-mode:\n- WhoResult enum: Expert(ExpertResult), Workload(WorkloadResult), Reviews(ReviewsResult), Active(ActiveResult), Overlap(OverlapResult)\n- ExpertResult: path_query, path_match, experts Vec, truncated — Expert has username, score, components, mr_refs, details\n- WorkloadResult: username, assigned_issues, authored_mrs, reviewing_mrs, unresolved_discussions (each with truncated flag)\n- ReviewsResult: username, total_diffnotes, categorized_count, mrs_reviewed, categories Vec\n- ActiveResult: discussions Vec, total_unresolved_in_window, truncated\n- OverlapResult: path_query, path_match, users Vec, truncated\n\nAfter bd-1f5b, these live in src/core/who_types.rs.\n\n## Query Function Signatures (after bd-1f5b promotes visibility)\n\n```rust\n// expert.rs — path-based file expertise\npub fn query_expert(conn, path, project_id, since_ms, as_of_ms, limit, scoring: &ScoringConfig, detail, explain_score, include_bots) -> Result\n\n// workload.rs — username-based assignment view\npub fn query_workload(conn, username, project_id, since_ms: Option, limit, include_closed: bool) -> Result\n\n// reviews.rs — username-based review activity\npub fn query_reviews(conn, username, project_id, since_ms) -> Result\n\n// active.rs — recent unresolved discussions\npub fn query_active(conn, project_id, since_ms, limit, include_closed: bool) -> Result\n\n// overlap.rs — shared file knowledge between contributors\npub fn query_overlap(conn, path, project_id, since_ms, limit) -> Result\n```\n\nNote: include_closed only affects query_workload and query_active. Expert, Reviews, and Overlap ignore it.\n\n## Approach\n\n**State** (state/who.rs):\n- WhoState: mode (WhoMode), result (Option), path (String), path_input (TextInput), username_input (TextInput), path_focused (bool), username_focused (bool), selected_index (usize), include_closed (bool), scroll_offset (u16)\n- WhoMode enum: Expert, Workload, Reviews, Active, Overlap\n- Expert and Overlap modes need a path input. Workload and Reviews need a username input. Active needs neither.\n\n**Action** (action.rs):\n- fetch_who_expert(conn, path, project_id, since_ms, limit, scoring) -> Result\n- fetch_who_workload(conn, username, project_id, since_ms, limit, include_closed) -> Result\n- fetch_who_reviews(conn, username, project_id, since_ms) -> Result\n- fetch_who_active(conn, project_id, since_ms, limit, include_closed) -> Result\n- fetch_who_overlap(conn, path, project_id, since_ms, limit) -> Result\nEach wraps the corresponding query_* function from who module.\n\n**View** (view/who.rs):\n- Mode tabs at top: E(xpert) | W(orkload) | R(eviews) | A(ctive) | O(verlap)\n- Input area adapts to mode: path input for Expert/Overlap, username input for Workload/Reviews, hidden for Active\n- Expert: sorted table of authors by expertise score + bar chart\n- Workload: sections for assigned issues, authored MRs, reviewing MRs, unresolved discussions\n- Reviews: table of review categories with counts and percentages\n- Active: time-sorted list of recent unresolved discussions with participants\n- Overlap: table of users with author/review touch counts\n- Keyboard: 1-5 or Tab to switch modes, j/k scroll, / focus input, c toggle include-closed, q back\n- Status bar indicator shows [closed: on/off] when include_closed is toggled\n- Truncation indicators: when result.truncated is true, show \"showing N of more\" footer\n\n## Acceptance Criteria\n- [ ] 5 modes switchable via Tab or number keys\n- [ ] Expert mode: path input filters by file path, shows expertise scores in table with bar chart\n- [ ] Workload mode: username input, shows 4 sections (assigned issues, authored MRs, reviewing MRs, unresolved discussions)\n- [ ] Reviews mode: username input, shows review category breakdown table\n- [ ] Active mode: no input needed, shows recent unresolved discussions sorted by last_note_at\n- [ ] Overlap mode: path input, shows table of users with touch counts\n- [ ] Toggle for include-closed (c key) with visual indicator — re-fetches only Workload and Active modes\n- [ ] Truncation footer when results exceed limit\n- [ ] Enter on a person in Expert/Overlap navigates to Workload for that username\n- [ ] Enter on an entity in Workload/Active navigates to IssueDetail or MrDetail\n\n## Files\n- MODIFY: crates/lore-tui/src/state/who.rs (expand from current 12-line stub)\n- MODIFY: crates/lore-tui/src/state/mod.rs (update WhoState import, add to has_text_focus/blur_text_focus)\n- MODIFY: crates/lore-tui/src/message.rs (replace placeholder WhoResult with import from core, add WhoMode enum, add Msg::WhoModeChanged, Msg::WhoIncludeClosedToggled)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_who_* functions)\n- CREATE: crates/lore-tui/src/view/who.rs\n- MODIFY: crates/lore-tui/src/view/mod.rs (add who view dispatch)\n\n## TDD Anchor\nRED: Write test_fetch_who_expert_returns_result that opens in-memory DB, inserts test MR + file changes + notes, calls fetch_who_expert(\"src/\"), asserts ExpertResult with one expert.\nGREEN: Implement fetch_who_expert calling query_expert from who module.\nVERIFY: cargo test -p lore-tui who -- --nocapture\n\nAdditional tests:\n- test_who_mode_switching: cycle through 5 modes, assert input field visibility changes\n- test_include_closed_only_affects_workload_active: toggle include_closed, verify Expert/Reviews/Overlap dont re-fetch\n- test_who_empty_result: mode with no data shows empty state message\n- test_who_truncation_indicator: result with truncated=true shows footer\n\n## Edge Cases\n- Empty results for any mode: show \"No data\" message with mode-specific hint\n- Expert mode with no diff notes: explain that expert data requires diff notes to be synced\n- Very long file paths: truncate from left (show ...path/to/file.rs)\n- include_closed toggle re-fetches immediately for Workload/Active, no-op for other modes\n- Workload unresolved_discussions may reference closed entities — include_closed=true shows them\n- ScoringConfig accessed from Config (available to TUI via db.rs module)\n\n## Dependency Context\n- bd-1f5b (blocks): Promotes query_expert, query_workload, query_reviews, query_active, query_overlap to pub and moves types to src/core/who_types.rs. Without this, TUI cannot call who queries.\n- Current WhoState stub (12 lines) in state/who.rs references message::WhoResult placeholder — must be replaced with core types.\n- AppState.has_text_focus() in state/mod.rs:194-198 must be updated to include who path_focused and username_focused.\n- AppState.blur_text_focus() in state/mod.rs:202-206 must be updated similarly.\n- Navigation from Expert/Overlap rows: Enter on a username should push Screen::Who with mode=Workload pre-filled — requires passing username to WhoState.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:22.734056Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:32:30.621517Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-u7se","depends_on_id":"bd-29qw","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-u7se","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-u7se","title":"Implement Who screen (5 modes: expert/workload/reviews/active/overlap)","description":"## Background\nThe Who screen is the people explorer, showing contributor expertise and workload across 5 modes. Each mode renders differently: Expert shows file-path expertise scores, Workload shows issue/MR assignment counts, Reviews shows review activity, Active shows recent contributors, Overlap shows shared file knowledge.\n\nOn master, the who command was refactored from a single who.rs into src/cli/commands/who/ module with types.rs, expert.rs, workload.rs, reviews.rs, active.rs, overlap.rs. Types are cleanly separated in types.rs. Query functions are currently pub(super) — bd-1f5b promotes them to pub and moves types to core.\n\n## Data Shapes (from src/cli/commands/who/types.rs on master)\n\nResult types are per-mode:\n- WhoResult enum: Expert(ExpertResult), Workload(WorkloadResult), Reviews(ReviewsResult), Active(ActiveResult), Overlap(OverlapResult)\n- ExpertResult: path_query, path_match, experts Vec, truncated — Expert has username, score, components, mr_refs, details\n- WorkloadResult: username, assigned_issues, authored_mrs, reviewing_mrs, unresolved_discussions (each with truncated flag)\n- ReviewsResult: username, total_diffnotes, categorized_count, mrs_reviewed, categories Vec\n- ActiveResult: discussions Vec, total_unresolved_in_window, truncated\n- OverlapResult: path_query, path_match, users Vec, truncated\n\nAfter bd-1f5b, these live in src/core/who_types.rs.\n\n## Query Function Signatures (after bd-1f5b promotes visibility)\n\n```rust\n// expert.rs — path-based file expertise\npub fn query_expert(conn, path, project_id, since_ms, as_of_ms, limit, scoring: &ScoringConfig, detail, explain_score, include_bots) -> Result\n\n// workload.rs — username-based assignment view\npub fn query_workload(conn, username, project_id, since_ms: Option, limit, include_closed: bool) -> Result\n\n// reviews.rs — username-based review activity\npub fn query_reviews(conn, username, project_id, since_ms) -> Result\n\n// active.rs — recent unresolved discussions\npub fn query_active(conn, project_id, since_ms, limit, include_closed: bool) -> Result\n\n// overlap.rs — shared file knowledge between contributors\npub fn query_overlap(conn, path, project_id, since_ms, limit) -> Result\n```\n\nNote: include_closed only affects query_workload and query_active. Expert, Reviews, and Overlap ignore it.\n\n## Approach\n\n**State** (state/who.rs):\n- WhoState: mode (WhoMode), result (Option), path (String), path_input (TextInput), username_input (TextInput), path_focused (bool), username_focused (bool), selected_index (usize), include_closed (bool), scroll_offset (u16)\n- WhoMode enum: Expert, Workload, Reviews, Active, Overlap\n- Expert and Overlap modes need a path input. Workload and Reviews need a username input. Active needs neither.\n\n**Action** (action.rs):\n- fetch_who_expert(conn, path, project_id, since_ms, limit, scoring) -> Result\n- fetch_who_workload(conn, username, project_id, since_ms, limit, include_closed) -> Result\n- fetch_who_reviews(conn, username, project_id, since_ms) -> Result\n- fetch_who_active(conn, project_id, since_ms, limit, include_closed) -> Result\n- fetch_who_overlap(conn, path, project_id, since_ms, limit) -> Result\nEach wraps the corresponding query_* function from who module.\n\n**View** (view/who.rs):\n- Mode tabs at top: E(xpert) | W(orkload) | R(eviews) | A(ctive) | O(verlap)\n- Input area adapts to mode: path input for Expert/Overlap, username input for Workload/Reviews, hidden for Active\n- Expert: sorted table of authors by expertise score + bar chart\n- Workload: sections for assigned issues, authored MRs, reviewing MRs, unresolved discussions\n- Reviews: table of review categories with counts and percentages\n- Active: time-sorted list of recent unresolved discussions with participants\n- Overlap: table of users with author/review touch counts\n- Keyboard: 1-5 or Tab to switch modes, j/k scroll, / focus input, c toggle include-closed, q back\n- Status bar indicator shows [closed: on/off] when include_closed is toggled\n- Truncation indicators: when result.truncated is true, show \"showing N of more\" footer\n\n## Acceptance Criteria\n- [ ] 5 modes switchable via Tab or number keys\n- [ ] Expert mode: path input filters by file path, shows expertise scores in table with bar chart\n- [ ] Workload mode: username input, shows 4 sections (assigned issues, authored MRs, reviewing MRs, unresolved discussions)\n- [ ] Reviews mode: username input, shows review category breakdown table\n- [ ] Active mode: no input needed, shows recent unresolved discussions sorted by last_note_at\n- [ ] Overlap mode: path input, shows table of users with touch counts\n- [ ] Toggle for include-closed (c key) with visual indicator — re-fetches only Workload and Active modes\n- [ ] Truncation footer when results exceed limit\n- [ ] Enter on a person in Expert/Overlap navigates to Workload for that username\n- [ ] Enter on an entity in Workload/Active navigates to IssueDetail or MrDetail\n\n## Files\n- MODIFY: crates/lore-tui/src/state/who.rs (expand from current 12-line stub)\n- MODIFY: crates/lore-tui/src/state/mod.rs (update WhoState import, add to has_text_focus/blur_text_focus)\n- MODIFY: crates/lore-tui/src/message.rs (replace placeholder WhoResult with import from core, add WhoMode enum, add Msg::WhoModeChanged, Msg::WhoIncludeClosedToggled)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_who_* functions)\n- CREATE: crates/lore-tui/src/view/who.rs\n- MODIFY: crates/lore-tui/src/view/mod.rs (add who view dispatch)\n\n## TDD Anchor\nRED: Write test_fetch_who_expert_returns_result that opens in-memory DB, inserts test MR + file changes + notes, calls fetch_who_expert(\"src/\"), asserts ExpertResult with one expert.\nGREEN: Implement fetch_who_expert calling query_expert from who module.\nVERIFY: cargo test -p lore-tui who -- --nocapture\n\nAdditional tests:\n- test_who_mode_switching: cycle through 5 modes, assert input field visibility changes\n- test_include_closed_only_affects_workload_active: toggle include_closed, verify Expert/Reviews/Overlap dont re-fetch\n- test_who_empty_result: mode with no data shows empty state message\n- test_who_truncation_indicator: result with truncated=true shows footer\n\n## Edge Cases\n- Empty results for any mode: show \"No data\" message with mode-specific hint\n- Expert mode with no diff notes: explain that expert data requires diff notes to be synced\n- Very long file paths: truncate from left (show ...path/to/file.rs)\n- include_closed toggle re-fetches immediately for Workload/Active, no-op for other modes\n- Workload unresolved_discussions may reference closed entities — include_closed=true shows them\n- ScoringConfig accessed from Config (available to TUI via db.rs module)\n\n## Dependency Context\n- bd-1f5b (blocks): Promotes query_expert, query_workload, query_reviews, query_active, query_overlap to pub and moves types to src/core/who_types.rs. Without this, TUI cannot call who queries.\n- Current WhoState stub (12 lines) in state/who.rs references message::WhoResult placeholder — must be replaced with core types.\n- AppState.has_text_focus() in state/mod.rs:194-198 must be updated to include who path_focused and username_focused.\n- AppState.blur_text_focus() in state/mod.rs:202-206 must be updated similarly.\n- Navigation from Expert/Overlap rows: Enter on a username should push Screen::Who with mode=Workload pre-filled — requires passing username to WhoState.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:22.734056Z","created_by":"tayloreernisse","updated_at":"2026-02-19T03:34:44.779093Z","closed_at":"2026-02-19T03:34:44.778985Z","close_reason":"Who screen complete: state (17 tests), action (5 bridge funcs, 5 tests), view (5 modes, 8 tests), wiring done. All quality gates pass.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-u7se","depends_on_id":"bd-29qw","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-v6i","title":"[CP1] gi ingest --type=issues command","description":"## Background\n\nThe `gi ingest --type=issues` command is the main entry point for issue ingestion. It acquires a single-flight lock, calls the orchestrator for each configured project, and outputs progress/summary to the user.\n\n## Approach\n\n### Module: src/cli/commands/ingest.rs\n\n### Clap Definition\n\n```rust\n#[derive(Args)]\npub struct IngestArgs {\n /// Resource type to ingest\n #[arg(long, value_parser = [\"issues\", \"merge_requests\"])]\n pub r#type: String,\n\n /// Filter to single project\n #[arg(long)]\n pub project: Option,\n\n /// Override stale sync lock\n #[arg(long)]\n pub force: bool,\n}\n```\n\n### Handler Function\n\n```rust\npub async fn handle_ingest(args: IngestArgs, config: &Config) -> Result<()>\n```\n\n### Logic\n\n1. **Acquire single-flight lock**: `acquire_sync_lock(conn, args.force)?`\n2. **Get projects to sync**:\n - If `args.project` specified, filter to that one\n - Otherwise, get all configured projects from DB\n3. **For each project**:\n - Print \"Ingesting issues for {project_path}...\"\n - Call `ingest_project_issues(conn, client, config, project_id, gitlab_project_id)`\n - Print \"{N} issues fetched, {M} new labels\"\n4. **Print discussion sync summary**:\n - \"Fetching discussions ({N} issues with updates)...\"\n - \"{N} discussions, {M} notes (excluding {K} system notes)\"\n - \"Skipped discussion sync for {N} unchanged issues.\"\n5. **Release lock**: Lock auto-released when handler returns\n\n### Output Format (matches PRD)\n\n```\nIngesting issues...\n\n group/project-one: 1,234 issues fetched, 45 new labels\n\nFetching discussions (312 issues with updates)...\n\n group/project-one: 312 issues → 1,234 discussions, 5,678 notes\n\nTotal: 1,234 issues, 1,234 discussions, 5,678 notes (excluding 1,234 system notes)\nSkipped discussion sync for 922 unchanged issues.\n```\n\n## Acceptance Criteria\n\n- [ ] Clap args parse --type, --project, --force correctly\n- [ ] Single-flight lock acquired before sync starts\n- [ ] Lock error message is clear if concurrent run attempted\n- [ ] Progress output shows per-project counts\n- [ ] Summary includes unchanged issues skipped count\n- [ ] --force flag allows overriding stale lock\n\n## Files\n\n- src/cli/commands/mod.rs (add `pub mod ingest;`)\n- src/cli/commands/ingest.rs (create)\n- src/cli/mod.rs (add Ingest variant to Commands enum)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/cli_ingest_tests.rs\n#[tokio::test] async fn ingest_issues_acquires_lock()\n#[tokio::test] async fn ingest_issues_fails_on_concurrent_run()\n#[tokio::test] async fn ingest_issues_respects_project_filter()\n#[tokio::test] async fn ingest_issues_force_overrides_stale_lock()\n```\n\nGREEN: Implement handler with lock and orchestrator calls\n\nVERIFY: `cargo test cli_ingest`\n\n## Edge Cases\n\n- No projects configured - return early with helpful message\n- Project filter matches nothing - error with \"project not found\"\n- Lock already held - clear error \"Sync already in progress\"\n- Ctrl-C during sync - lock should be released (via Drop or SIGINT handler)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.312565Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:56:44.090142Z","closed_at":"2026-01-25T22:56:44.090086Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-v6i","depends_on_id":"bd-ozy","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-v6tc","title":"Description","description":"This is a test","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:52:04.745618Z","updated_at":"2026-02-12T16:52:10.755235Z","closed_at":"2026-02-12T16:52:10.755188Z","close_reason":"test artifacts","compaction_level":0,"original_size":0} {"id":"bd-wcja","title":"Extend SyncResult with surgical mode fields for robot output","description":"## Background\n\nRobot mode (`--robot`) serializes `SyncResult` as JSON for machine consumers. Currently `SyncResult` (lines 31-52 of `src/cli/commands/sync.rs`) only has fields for normal full sync. Surgical sync needs additional metadata in the JSON response: whether surgical mode was active, which IIDs were requested, per-entity outcomes, and whether it was a preflight-only run. These must be `Option` fields so normal sync serialization is unchanged (serde `skip_serializing_if = \"Option::is_none\"`).\n\n## Approach\n\nAdd four `Option` fields to the existing `SyncResult` struct:\n\n```rust\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub surgical_mode: Option,\n\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub surgical_iids: Option,\n\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub entity_results: Option>,\n\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub preflight_only: Option,\n```\n\nDefine two new supporting structs in the same file:\n\n```rust\n#[derive(Debug, Default, Serialize)]\npub struct SurgicalIids {\n pub issues: Vec,\n pub merge_requests: Vec,\n}\n\n#[derive(Debug, Serialize)]\npub struct EntitySyncResult {\n pub entity_type: String, // \"issue\" or \"merge_request\"\n pub iid: u64,\n pub outcome: String, // \"synced\", \"skipped_toctou\", \"failed\", \"not_found\"\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub error: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub toctou_reason: Option,\n}\n```\n\nBecause `SyncResult` derives `Default`, the new `Option` fields default to `None` automatically. Non-surgical callers need zero changes.\n\n## Acceptance Criteria\n\n1. `SyncResult` compiles with all four new `Option` fields\n2. `SurgicalIids` and `EntitySyncResult` are defined with `Serialize` derive\n3. Serializing a `SyncResult` with surgical fields set produces JSON with `surgical_mode`, `surgical_iids`, `entity_results`, `preflight_only` keys\n4. Serializing a default `SyncResult` (all `None`) produces JSON identical to current output (no surgical keys)\n5. `SyncResult::default()` still works without specifying new fields\n6. All existing tests pass unchanged\n\n## Files\n\n- `src/cli/commands/sync.rs` — add fields to `SyncResult`, define `SurgicalIids` and `EntitySyncResult`\n\n## TDD Anchor\n\nAdd a test module or extend the existing one in `src/cli/commands/sync.rs` (or a new `sync_tests.rs` file):\n\n```rust\n#[cfg(test)]\nmod surgical_result_tests {\n use super::*;\n\n #[test]\n fn sync_result_default_omits_surgical_fields() {\n let result = SyncResult::default();\n let json = serde_json::to_value(&result).unwrap();\n assert!(json.get(\"surgical_mode\").is_none());\n assert!(json.get(\"surgical_iids\").is_none());\n assert!(json.get(\"entity_results\").is_none());\n assert!(json.get(\"preflight_only\").is_none());\n }\n\n #[test]\n fn sync_result_with_surgical_fields_serializes_correctly() {\n let result = SyncResult {\n surgical_mode: Some(true),\n surgical_iids: Some(SurgicalIids {\n issues: vec![7, 42],\n merge_requests: vec![10],\n }),\n entity_results: Some(vec![\n EntitySyncResult {\n entity_type: \"issue\".to_string(),\n iid: 7,\n outcome: \"synced\".to_string(),\n error: None,\n toctou_reason: None,\n },\n EntitySyncResult {\n entity_type: \"issue\".to_string(),\n iid: 42,\n outcome: \"skipped_toctou\".to_string(),\n error: None,\n toctou_reason: Some(\"updated_at changed\".to_string()),\n },\n ]),\n preflight_only: Some(false),\n ..SyncResult::default()\n };\n let json = serde_json::to_value(&result).unwrap();\n assert_eq!(json[\"surgical_mode\"], true);\n assert_eq!(json[\"surgical_iids\"][\"issues\"], serde_json::json!([7, 42]));\n assert_eq!(json[\"entity_results\"].as_array().unwrap().len(), 2);\n assert_eq!(json[\"entity_results\"][1][\"outcome\"], \"skipped_toctou\");\n assert_eq!(json[\"preflight_only\"], false);\n }\n\n #[test]\n fn entity_sync_result_omits_none_fields() {\n let entity = EntitySyncResult {\n entity_type: \"merge_request\".to_string(),\n iid: 10,\n outcome: \"synced\".to_string(),\n error: None,\n toctou_reason: None,\n };\n let json = serde_json::to_value(&entity).unwrap();\n assert!(json.get(\"error\").is_none());\n assert!(json.get(\"toctou_reason\").is_none());\n assert!(json.get(\"entity_type\").is_some());\n }\n}\n```\n\n## Edge Cases\n\n- `entity_results: Some(vec![])` — empty vec serializes as `[]`, not omitted. This is correct for \"surgical mode ran but had no entities to process.\"\n- `surgical_iids` with empty vecs — valid for edge case where user passes `--issue` but all IIDs are filtered out before sync.\n- Ensure `EntitySyncResult.outcome` uses a fixed set of string values. Consider a future enum, but `String` is fine for initial implementation to keep serialization simple.\n\n## Dependency Context\n\n- **No upstream dependencies** — this bead only adds struct fields, no behavioral changes.\n- **Downstream**: bd-1i4i (orchestrator) populates these fields. bd-3bec (wiring) passes them through.\n- The `#[derive(Default)]` on `SyncResult` means all `Option` fields are `None` by default, so this is a fully additive change.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:17:03.915330Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:02:01.980946Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-wcja","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-wnuo","title":"Implement performance benchmark fixtures (S/M/L tiers)","description":"## Background\nTiered performance fixtures validate latency at three data scales. S and M tiers are CI-enforced gates; L tier is advisory. Fixtures are synthetic SQLite databases with realistic data distributions.\n\n## Approach\nFixture generator (benches/ or tests/fixtures/):\n- S-tier: 10k issues, 5k MRs, 50k notes, 10k docs\n- M-tier: 100k issues, 50k MRs, 500k notes, 50k docs\n- L-tier: 250k issues, 100k MRs, 1M notes, 100k docs\n- Realistic distributions: state (60% closed, 30% opened, 10% other), authors from pool of 50 names, labels from pool of 20, dates spanning 2 years\n\nBenchmarks:\n- p95 first-paint latency: Dashboard load, Issue List load, MR List load\n- p95 keyset pagination: next page fetch\n- p95 search latency: lexical and hybrid modes\n- Memory ceiling: RSS after full dashboard + list load\n- SLO assertions per tier (see Phase 0 criteria)\n\nRequired indexes must be present in fixture DBs:\n- idx_issues_list_default, idx_mrs_list_default, idx_discussions_entity, idx_notes_discussion\n\n## Acceptance Criteria\n- [ ] S-tier fixture generated with correct counts\n- [ ] M-tier fixture generated with correct counts\n- [ ] L-tier fixture generated (on-demand, not CI)\n- [ ] p95 first-paint < 50ms (S), < 75ms (M), < 150ms (L)\n- [ ] p95 keyset pagination < 50ms (S), < 75ms (M), < 100ms (L)\n- [ ] p95 search latency < 100ms (S), < 200ms (M), < 400ms (L)\n- [ ] Memory < 150MB RSS (S), < 250MB RSS (M)\n- [ ] All required indexes present in fixtures\n- [ ] EXPLAIN QUERY PLAN shows index usage for top 10 queries\n\n## Files\n- CREATE: crates/lore-tui/benches/perf_benchmarks.rs\n- CREATE: crates/lore-tui/tests/fixtures/generate_fixtures.rs\n\n## TDD Anchor\nRED: Write benchmark_dashboard_load_s_tier that generates S-tier fixture, measures Dashboard load time, asserts p95 < 50ms.\nGREEN: Implement fetch_dashboard with efficient queries.\nVERIFY: cargo bench --manifest-path crates/lore-tui/Cargo.toml\n\n## Edge Cases\n- Fixture generation must be deterministic (seeded RNG) for reproducible benchmarks\n- CI machines may be slower — use generous multipliers or relative thresholds\n- S-tier fits in memory; M-tier requires WAL mode for concurrent access\n- Benchmark warmup: discard first 5 iterations\n\n## Dependency Context\nUses all action.rs query functions from Phase 2/3 tasks.\nUses DbManager from \"Implement DbManager\" task.\nUses required index migrations from the main lore crate.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:05:12.867291Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:38.463811Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-wnuo","depends_on_id":"bd-1b6k","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-wnuo","depends_on_id":"bd-3eis","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-wrw1","title":"Implement CLI/TUI parity tests (counts, lists, detail, search, sanitization)","description":"## Background\nParity tests ensure the TUI and CLI show the same data. Both interfaces query the same SQLite database, but through different code paths (TUI action functions vs CLI command handlers). Drift can occur when query functions are duplicated or modified independently. These tests catch drift by running both code paths against the same in-memory DB and comparing results.\n\n## Approach\n\n### Test Strategy: Library-Level (Same Process)\nTests run in the same process with a shared in-memory SQLite DB. No binary execution, no JSON parsing, no process spawning. Both TUI action functions and CLI query functions are called as library code.\n\nSetup pattern:\n```rust\nuse lore::core::db::{create_connection, run_migrations};\nuse std::path::Path;\n\nfn setup_parity_db() -> rusqlite::Connection {\n let conn = create_connection(Path::new(\":memory:\")).unwrap();\n run_migrations(&conn).unwrap();\n insert_fixture_data(&conn); // shared fixture with known counts\n conn\n}\n```\n\n### Fixture Data\nCreate a deterministic fixture with known quantities:\n- 1 project (gitlab_project_id=1, path_with_namespace=\"group/repo\", web_url=\"https://gitlab.example.com/group/repo\")\n- 15 issues (5 opened, 5 closed, 5 with various states)\n- 10 merge_requests (3 opened, 3 merged, 2 closed, 2 draft)\n- 30 discussions (20 for issues, 10 for MRs)\n- 60 notes (2 per discussion)\n- Insert via direct SQL (same pattern as existing tests in src/core/db.rs)\n\n### Parity Checks\n\n**Dashboard Count Parity:**\n- TUI: call the dashboard fetch function that returns entity counts\n- CLI: call the same count query functions used by `lore --robot count`\n- Assert: issue_count, mr_count, discussion_count, note_count all match\n\n**Issue List Parity:**\n- TUI: call issue list action with default filter (state=all, limit=50, sort=updated_at DESC)\n- CLI: call the issue list query used by `lore --robot issues`\n- Assert: same IIDs in same order, same state values for each\n\n**MR List Parity:**\n- TUI: call MR list action with default filter\n- CLI: call the MR list query used by `lore --robot mrs`\n- Assert: same IIDs in same order, same state values, same draft flags\n\n**Issue Detail Parity:**\n- TUI: call issue detail fetch for a specific IID\n- CLI: call the issue detail query used by `lore --robot issues `\n- Assert: same metadata fields (title, state, author, labels, created_at, updated_at), same discussion count\n\n**Search Parity:**\n- TUI: call search action with a known query term\n- CLI: call the search function used by `lore --robot search`\n- Assert: same document IDs returned in same rank order\n\n**Sanitization Parity:**\n- Insert an issue with ANSI escape sequences in the title: \"Normal \\x1b[31mRED\\x1b[0m text\"\n- TUI: fetch and sanitize via terminal safety module\n- CLI: fetch and render via robot mode (which strips ANSI)\n- Assert: both produce clean output without raw escape sequences\n\n## Acceptance Criteria\n- [ ] Dashboard counts: TUI == CLI for issues, MRs, discussions, notes on shared fixture\n- [ ] Issue list: TUI returns same IIDs in same order as CLI query function\n- [ ] MR list: TUI returns same IIDs in same order as CLI query function\n- [ ] Issue detail: TUI metadata matches CLI for title, state, author, discussion count\n- [ ] Search results: same document IDs in same rank order\n- [ ] Sanitization: both strip ANSI escape sequences from issue titles\n- [ ] All tests use in-memory DB (no file I/O, no binary spawning)\n- [ ] Tests are deterministic (fixed fixture, no wall clock dependency)\n\n## Files\n- CREATE: crates/lore-tui/tests/parity_tests.rs\n\n## TDD Anchor\nRED: Write `test_dashboard_count_parity` that creates shared fixture DB, calls both TUI dashboard fetch and CLI count query functions, asserts all counts equal.\nGREEN: Ensure TUI query functions exist and match CLI query logic.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml parity\n\nAdditional tests:\n- test_issue_list_parity\n- test_mr_list_parity\n- test_issue_detail_parity\n- test_search_parity\n- test_sanitization_parity\n\n## Edge Cases\n- CLI and TUI may use different default sort orders — normalize to same ORDER BY in test setup\n- CLI list commands default to limit=50, TUI may default to page size — test with explicit limit\n- Fixture must include edge cases: NULL labels, empty descriptions, issues with work item status set\n- Schema version must match between both code paths (same migration version)\n- FTS index must be populated for search parity (call generate-docs equivalent on fixture)\n\n## Dependency Context\n- Uses TUI action functions from Phase 2/3 screen beads (must exist as library code)\n- Uses CLI query functions from src/cli/ (already exist as `lore` library exports)\n- Uses lore::core::db for shared DB setup\n- Uses terminal safety module (bd-3ir1) for sanitization comparison\n- Depends on bd-14hv (soak tests) being complete per phase ordering","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:05:51.620596Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:38.629958Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-wrw1","depends_on_id":"bd-14hv","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-wrw1","depends_on_id":"bd-2o49","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} -{"id":"bd-wzqi","title":"Implement Command Palette (state + view)","description":"## Background\nThe Command Palette is a modal overlay (Ctrl+P) that provides fuzzy-match access to all commands. It uses FrankenTUI's built-in CommandPalette widget and is populated from the CommandRegistry.\n\n## Approach\nState (state/command_palette.rs):\n- CommandPaletteState: wraps ftui CommandPalette widget state\n- input (String), filtered_commands (Vec), selected_index (usize), visible (bool)\n\nView (view/command_palette.rs):\n- Modal overlay centered on screen (60% width, 50% height)\n- Text input at top for fuzzy search\n- Scrollable list of matching commands with keybinding hints\n- Enter executes selected command, Esc closes palette\n- Fuzzy matching: subsequence match on command label and help text\n\nIntegration:\n- Ctrl+P from any screen opens palette (handled in interpret_key stage 2)\n- execute_palette_action() in app.rs converts selected command to Msg\n\n## Acceptance Criteria\n- [ ] Ctrl+P opens palette from any screen in Normal mode\n- [ ] Fuzzy matching filters commands as user types\n- [ ] Commands show label + keybinding + help text\n- [ ] Enter executes selected command\n- [ ] Esc closes palette without action\n- [ ] Palette populated from CommandRegistry (single source of truth)\n- [ ] Modal renders on top of current screen content\n\n## Files\n- MODIFY: crates/lore-tui/src/state/command_palette.rs (expand from stub)\n- CREATE: crates/lore-tui/src/view/command_palette.rs\n\n## TDD Anchor\nRED: Write test_palette_fuzzy_match that creates registry with 5 commands, filters with \"iss\", asserts Issue-related commands match.\nGREEN: Implement fuzzy matching on command labels.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_palette_fuzzy\n\n## Edge Cases\n- Empty search shows all commands\n- Very long command labels: truncate with ellipsis\n- Command not available on current screen: show but gray out\n- Palette should not steal focus from text inputs — only opens in Normal mode\n\n## Dependency Context\nUses CommandRegistry from \"Implement CommandRegistry\" task.\nUses ftui CommandPalette widget from FrankenTUI.\nUses InputMode::Palette from \"Implement core types\" task.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:37.250065Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.175286Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-wzqi","depends_on_id":"bd-35g5","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-wzqi","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-wzqi","title":"Implement Command Palette (state + view)","description":"## Background\nThe Command Palette is a modal overlay (Ctrl+P) that provides fuzzy-match access to all commands. It uses FrankenTUI's built-in CommandPalette widget and is populated from the CommandRegistry.\n\n## Approach\nState (state/command_palette.rs):\n- CommandPaletteState: wraps ftui CommandPalette widget state\n- input (String), filtered_commands (Vec), selected_index (usize), visible (bool)\n\nView (view/command_palette.rs):\n- Modal overlay centered on screen (60% width, 50% height)\n- Text input at top for fuzzy search\n- Scrollable list of matching commands with keybinding hints\n- Enter executes selected command, Esc closes palette\n- Fuzzy matching: subsequence match on command label and help text\n\nIntegration:\n- Ctrl+P from any screen opens palette (handled in interpret_key stage 2)\n- execute_palette_action() in app.rs converts selected command to Msg\n\n## Acceptance Criteria\n- [ ] Ctrl+P opens palette from any screen in Normal mode\n- [ ] Fuzzy matching filters commands as user types\n- [ ] Commands show label + keybinding + help text\n- [ ] Enter executes selected command\n- [ ] Esc closes palette without action\n- [ ] Palette populated from CommandRegistry (single source of truth)\n- [ ] Modal renders on top of current screen content\n\n## Files\n- MODIFY: crates/lore-tui/src/state/command_palette.rs (expand from stub)\n- CREATE: crates/lore-tui/src/view/command_palette.rs\n\n## TDD Anchor\nRED: Write test_palette_fuzzy_match that creates registry with 5 commands, filters with \"iss\", asserts Issue-related commands match.\nGREEN: Implement fuzzy matching on command labels.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_palette_fuzzy\n\n## Edge Cases\n- Empty search shows all commands\n- Very long command labels: truncate with ellipsis\n- Command not available on current screen: show but gray out\n- Palette should not steal focus from text inputs — only opens in Normal mode\n\n## Dependency Context\nUses CommandRegistry from \"Implement CommandRegistry\" task.\nUses ftui CommandPalette widget from FrankenTUI.\nUses InputMode::Palette from \"Implement core types\" task.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:37.250065Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:12:33.957535Z","closed_at":"2026-02-18T21:12:33.957486Z","close_reason":"Command palette implemented: fuzzy matching state (13 tests), modal overlay view (6 tests), full keyboard handling (Esc/Enter/Up/Down/Backspace/typing), wired into view/mod.rs overlay layer","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-wzqi","depends_on_id":"bd-35g5","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-x8oq","title":"Write surgical_tests.rs with TDD test suite","description":"## Background\n\nThe surgical sync module (`src/ingestion/surgical.rs` from bd-3sez) needs a comprehensive test suite. Tests use in-memory SQLite (no real GitLab or Ollama) and wiremock for HTTP mocks. The test file lives at `src/ingestion/surgical_tests.rs` and is included via `#[cfg(test)] #[path = \"surgical_tests.rs\"] mod tests;` in surgical.rs.\n\nKey testing constraints:\n- In-memory DB pattern: `create_connection(Path::new(\":memory:\"))` + `run_migrations(&conn)`\n- Test project insert: `INSERT INTO projects (gitlab_project_id, path_with_namespace, web_url)` (no `name`/`last_seen_at` columns)\n- `GitLabIssue` required fields: `id`, `iid`, `project_id`, `title`, `state`, `created_at`, `updated_at`, `author`, `web_url`\n- `GitLabMergeRequest` adds: `source_branch`, `target_branch`, `draft`, `merge_status`, `reviewers`\n- `updated_at` is `String` (ISO 8601) in GitLab types, e.g. `\"2026-02-17T12:00:00.000+00:00\"`\n- `SourceType` enum variants: `Issue`, `MergeRequest`, `Discussion`, `Note`\n- `dirty_sources` table: `(source_type TEXT, source_id INTEGER)` primary key\n\n## Approach\n\nCreate `src/ingestion/surgical_tests.rs` with:\n\n### Test Helpers\n- `setup_db() -> Connection` — in-memory DB with migrations + test project row\n- `make_test_issue(iid: i64, updated_at: &str) -> GitLabIssue` — minimal valid JSON fixture\n- `make_test_mr(iid: i64, updated_at: &str) -> GitLabMergeRequest` — minimal valid JSON fixture\n- `get_db_updated_at(conn, table, iid) -> Option` — helper to query DB updated_at for assertions\n- `get_dirty_keys(conn) -> Vec<(String, i64)>` — query dirty_sources for assertions\n\n### Sync Tests (13)\n1. `test_ingest_issue_by_iid_upserts_and_marks_dirty` — fresh issue ingest, verify DB row + dirty_sources entry\n2. `test_ingest_mr_by_iid_upserts_and_marks_dirty` — fresh MR ingest, verify DB row + dirty_sources entry\n3. `test_toctou_skips_stale_issue` — insert issue at T1, call ingest with payload at T1, assert skipped_stale=true and no dirty mark\n4. `test_toctou_skips_stale_mr` — same for MRs\n5. `test_toctou_allows_newer_issue` — DB has T1, payload has T2 (T2 > T1), assert upserted=true\n6. `test_toctou_allows_newer_mr` — same for MRs\n7. `test_is_stale_parses_iso8601` — unit test: `\"2026-02-17T12:00:00.000+00:00\"` parses to correct ms-epoch\n8. `test_is_stale_handles_none_db_value` — first ingest, no DB row, assert not stale\n9. `test_is_stale_with_z_suffix` — `\"2026-02-17T12:00:00Z\"` also parses correctly\n10. `test_ingest_issue_returns_dirty_source_keys` — verify `dirty_source_keys` contains `(SourceType::Issue, local_id)`\n11. `test_ingest_mr_returns_dirty_source_keys` — verify MR dirty source keys\n12. `test_ingest_issue_updates_existing` — ingest same IID twice with newer updated_at, verify update\n13. `test_ingest_mr_updates_existing` — same for MRs\n\n### Async Preflight Test (1, wiremock)\n14. `test_preflight_fetch_returns_issues_and_mrs` — wiremock GET `/projects/:id/issues?iids[]=42` returns 200 with fixture, verify PreflightResult.issues has 1 entry\n\n### Integration Stubs (4, for bd-3jqx)\n15. `test_surgical_cancellation_during_preflight` — stub: signal.cancel() before preflight, verify early return\n16. `test_surgical_timeout_during_fetch` — stub: wiremock delay exceeds timeout\n17. `test_surgical_embed_isolation` — stub: verify only surgical docs get embedded\n18. `test_surgical_payload_integrity` — stub: verify ingested data matches GitLab payload exactly\n\n## Acceptance Criteria\n\n- [ ] All 13 sync tests pass with in-memory SQLite\n- [ ] Async preflight test passes with wiremock\n- [ ] 4 integration stubs compile and are marked `#[ignore]` (implemented in bd-3jqx)\n- [ ] Test helpers produce valid GitLabIssue/GitLabMergeRequest fixtures that pass `transform_issue`/`transform_merge_request`\n- [ ] No flaky tests: deterministic timestamps, no real network calls\n- [ ] File wired into surgical.rs via `#[cfg(test)] #[path = \"surgical_tests.rs\"] mod tests;`\n\n## Files\n\n- `src/ingestion/surgical_tests.rs` (NEW)\n- `src/ingestion/surgical.rs` (add `#[cfg(test)]` module path — created in bd-3sez)\n\n## TDD Anchor\n\nThis bead IS the test suite. Tests are written first (TDD red phase), then bd-3sez implements the production code to make them pass (green phase). Specific test signatures:\n\n```rust\n#[test]\nfn test_ingest_issue_by_iid_upserts_and_marks_dirty() {\n let conn = setup_db();\n let issue = make_test_issue(42, \"2026-02-17T12:00:00.000+00:00\");\n let config = Config::default();\n let result = ingest_issue_by_iid(&conn, &config, /*project_id=*/1, &issue).unwrap();\n assert!(result.upserted);\n assert!(!result.skipped_stale);\n let dirty = get_dirty_keys(&conn);\n assert!(dirty.contains(&(\"issue\".to_string(), /*local_id from DB*/)));\n}\n\n#[test]\nfn test_toctou_skips_stale_issue() {\n let conn = setup_db();\n let issue = make_test_issue(42, \"2026-02-17T12:00:00.000+00:00\");\n ingest_issue_by_iid(&conn, &Config::default(), 1, &issue).unwrap();\n // Ingest same timestamp again\n let result = ingest_issue_by_iid(&conn, &Config::default(), 1, &issue).unwrap();\n assert!(result.skipped_stale);\n}\n\n#[tokio::test]\nasync fn test_preflight_fetch_returns_issues_and_mrs() {\n let mock = MockServer::start().await;\n // ... wiremock setup ...\n}\n```\n\n## Edge Cases\n\n- `make_test_issue` must produce all required fields (`id`, `iid`, `project_id`, `title`, `state`, `created_at`, `updated_at`, `author` with `username` and `id`, `web_url`) or `transform_issue` will fail\n- `make_test_mr` additionally needs `source_branch`, `target_branch`, `draft`, `merge_status`, `reviewers`\n- ISO 8601 fixtures must use `+00:00` suffix (GitLab format), not `Z`\n- Integration stubs must be `#[ignore]` so they do not fail CI before bd-3jqx implements them\n- Test DB needs `run_migrations` to create all tables including `dirty_sources`, `documents`, `issues`, `merge_requests`\n\n## Dependency Context\n\n- **Blocked by bd-3sez**: Cannot compile tests until surgical.rs module exists (circular co-dependency — develop together)\n- **Blocks bd-3jqx**: Integration test stubs are implemented in that bead\n- **No other blockers**: Uses only in-memory DB and wiremock, no external dependencies","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:15:05.498388Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:02:42.840151Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"]} {"id":"bd-xhz","title":"[CP1] GitLab client pagination methods","description":"## Background\n\nGitLab pagination methods enable fetching large result sets (issues, discussions) as async streams. The client uses `x-next-page` headers to determine continuation and applies cursor rewind for tuple-based incremental sync.\n\n## Approach\n\nAdd pagination methods to GitLabClient using `async-stream` crate:\n\n### Methods to Add\n\n```rust\nimpl GitLabClient {\n /// Paginate through issues for a project.\n pub fn paginate_issues(\n &self,\n gitlab_project_id: i64,\n updated_after: Option, // ms epoch cursor\n cursor_rewind_seconds: u32,\n ) -> Pin> + Send + '_>>\n\n /// Paginate through discussions for an issue.\n pub fn paginate_issue_discussions(\n &self,\n gitlab_project_id: i64,\n issue_iid: i64,\n ) -> Pin> + Send + '_>>\n\n /// Make request and return response with headers for pagination.\n async fn request_with_headers(\n &self,\n path: &str,\n params: &[(&str, String)],\n ) -> Result<(T, HeaderMap)>\n}\n```\n\n### Pagination Logic\n\n1. Start at page 1, per_page=100\n2. For issues: add scope=all, state=all, order_by=updated_at, sort=asc\n3. Apply cursor rewind: `updated_after = cursor - rewind_seconds` (clamped to 0)\n4. Yield each item from response\n5. Check `x-next-page` header for continuation\n6. Stop when header is empty/absent OR response is empty\n\n### Cursor Rewind\n\n```rust\nif let Some(ts) = updated_after {\n let rewind_ms = (cursor_rewind_seconds as i64) * 1000;\n let rewound = (ts - rewind_ms).max(0); // Clamp to avoid underflow\n // Convert to ISO 8601 for updated_after param\n}\n```\n\n## Acceptance Criteria\n\n- [ ] `paginate_issues` returns Stream of GitLabIssue\n- [ ] `paginate_issues` adds scope=all, state=all, order_by=updated_at, sort=asc\n- [ ] `paginate_issues` applies cursor rewind with max(0) clamping\n- [ ] `paginate_issue_discussions` returns Stream of GitLabDiscussion\n- [ ] Both methods follow x-next-page header until empty\n- [ ] Both methods stop on empty response (fallback)\n- [ ] `request_with_headers` returns (T, HeaderMap) tuple\n\n## Files\n\n- src/gitlab/client.rs (edit - add methods)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/pagination_tests.rs\n#[tokio::test] async fn fetches_all_pages_when_multiple_exist()\n#[tokio::test] async fn respects_per_page_parameter()\n#[tokio::test] async fn follows_x_next_page_header_until_empty()\n#[tokio::test] async fn falls_back_to_empty_page_stop_if_headers_missing()\n#[tokio::test] async fn applies_cursor_rewind_for_tuple_semantics()\n#[tokio::test] async fn clamps_negative_rewind_to_zero()\n```\n\nGREEN: Implement pagination methods with async-stream\n\nVERIFY: `cargo test pagination`\n\n## Edge Cases\n\n- cursor_updated_at near zero - rewind must not underflow (use max(0))\n- GitLab returns empty x-next-page - treat as end of pages\n- GitLab omits pagination headers entirely - use empty response as stop condition\n- DateTime conversion fails - omit updated_after and fetch all (safe fallback)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.222168Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:28:39.192876Z","closed_at":"2026-01-25T22:28:39.192815Z","close_reason":"Implemented paginate_issues and paginate_issue_discussions with async-stream, cursor rewind with max(0) clamping, x-next-page header following, 4 unit tests passing","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-xhz","depends_on_id":"bd-1np","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-xhz","depends_on_id":"bd-2ys","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-xsgw","title":"NOTE-TEST2: Another test bead","description":"type: task","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:58:53.392214Z","updated_at":"2026-02-12T16:59:02.051710Z","closed_at":"2026-02-12T16:59:02.051663Z","close_reason":"test","compaction_level":0,"original_size":0} diff --git a/.beads/last-touched b/.beads/last-touched index f520793..fb3bff6 100644 --- a/.beads/last-touched +++ b/.beads/last-touched @@ -1 +1 @@ -bd-2kr0 +bd-2og9 diff --git a/.liquid-mail.toml b/.liquid-mail.toml new file mode 100644 index 0000000..e0ea2b4 --- /dev/null +++ b/.liquid-mail.toml @@ -0,0 +1,13 @@ +# Liquid Mail config (TOML) +# +# Prefer env vars for secrets: +# LIQUID_MAIL_HONCHO_API_KEY +# LIQUID_MAIL_HONCHO_WORKSPACE_ID +# +[honcho] +api_key = "hch-v3-pmx23gk9k60xlqffpxpyjj8pywnxkpjkic9bdygx21iydvyxbeialioz5ehhcp1r" +# workspace_id is optional. +# If omitted, Liquid Mail defaults it to the repo name (git root folder name). +# Honcho uses get-or-create semantics for workspaces, so it will be created on first use. +# workspace_id = "my-repo" +base_url = "https://api.honcho.dev" diff --git a/AGENTS.md b/AGENTS.md index 139acfc..1d48b9a 100644 --- a/AGENTS.md +++ b/AGENTS.md @@ -818,3 +818,157 @@ Parse: `file:line:col` → location | 💡 → how to fix | Exit 0/1 → pass/fa - ❌ Full scan per edit → ✅ Scope to file - ❌ Fix symptom (`if (x) { x.y }`) → ✅ Root cause (`x?.y`) ```` + + +## Integrating Liquid Mail with Beads + +**Beads** manages task status, priority, and dependencies (`br` CLI). +**Liquid Mail** provides the shared log—progress, decisions, and context that survives sessions. + +### Conventions + +- **Single source of truth**: Beads owns task state; Liquid Mail owns conversation/decisions +- **Shared identifiers**: Include the Beads issue ID in posts (e.g., `[lm-jht] Topic validation rules`) +- **Decisions before action**: Post `DECISION:` messages before risky changes, not after +- **Identity in user updates**: In every user-facing reply, include your window-name (derived from `LIQUID_MAIL_WINDOW_ID`) so humans can distinguish concurrent agents. + +### Typical Flow + +**1. Pick ready work (Beads)** +```bash +br ready # Find available work (no blockers) +br show lm-jht # Review details +br update lm-jht --status in_progress +``` + +**2. Check context (Liquid Mail)** +```bash +liquid-mail notify # See what changed since last session +liquid-mail query "lm-jht" # Find prior discussion on this issue +``` + +**3. Work and log progress (topic required)** + +The `--topic` flag is required for your first post. After that, the topic is pinned to your window. +```bash +liquid-mail post --topic auth-system "[lm-jht] START: Reviewing current topic id patterns" +liquid-mail post "[lm-jht] FINDING: IDs like lm3189... are being used as topic names" +liquid-mail post "[lm-jht] NEXT: Add validation + rename guidance" +``` + +**4. Decisions before risky changes** +```bash +liquid-mail post --decision "[lm-jht] DECISION: Reject UUID-like topic names; require slugs" +# Then implement +``` + +### Decision Conflicts (Preflight) + +When you post a decision (via `--decision` or a `DECISION:` line), Liquid Mail can preflight-check for conflicts with prior decisions **in the same topic**. + +- If a conflict is detected, `liquid-mail post` fails with `DECISION_CONFLICT`. +- Review prior decisions: `liquid-mail decisions --topic `. +- If you intend to supersede the old decision, re-run with `--yes` and include what changed and why. + +**5. Complete (Beads is authority)** +```bash +br close lm-jht # Mark complete in Beads +liquid-mail post "[lm-jht] Completed: Topic validation shipped in 177267d" +``` + +### Posting Format + +- **Short** (5-15 lines, not walls of text) +- **Prefixed** with ALL-CAPS tags: `FINDING:`, `DECISION:`, `QUESTION:`, `NEXT:` +- **Include file paths** so others can jump in: `src/services/auth.ts:42` +- **Include issue IDs** in brackets: `[lm-jht]` +- **User-facing replies**: include `AGENT: ` near the top. Get it with `liquid-mail window name`. + +### Topics (Required) + +Liquid Mail organizes messages into **topics** (Honcho sessions). Topics are **soft boundaries**—search spans all topics by default. + +**Rule:** `liquid-mail post` requires a topic: +- Provide `--topic `, OR +- Post inside a window that already has a pinned topic. + +Topic names must be: +- 4–50 characters +- lowercase letters/numbers with hyphens +- start with a letter, end with a letter/number +- no consecutive hyphens +- not reserved (`all`, `new`, `help`, `merge`, `rename`, `list`) +- not UUID-like (`lm<32-hex>` or standard UUIDs) + +Good examples: `auth-system`, `db-system`, `dashboards` + +Commands: + +- **List topics (newest first)**: `liquid-mail topics` +- **Find context across topics**: `liquid-mail query "auth"`, then pick a topic name +- **Rename a topic (alias)**: `liquid-mail topic rename ` +- **Merge two topics into a new one**: `liquid-mail topic merge --into ` + +Examples (component topic + Beads id in the subject): +```bash +liquid-mail post --topic auth-system "[lm-jht] START: Investigating token refresh failures" +liquid-mail post --topic auth-system "[lm-jht] FINDING: refresh happens in middleware, not service layer" +liquid-mail post --topic auth-system --decision "[lm-jht] DECISION: Move refresh logic into AuthService" + +liquid-mail post --topic dashboards "[lm-1p5] START: Adding latency panel" +``` + +### Context Refresh (Before New Work / After Redirects) + +If you see redirect/merge messages, refresh context before acting: +```bash +liquid-mail notify +liquid-mail window status --json +liquid-mail summarize --topic +liquid-mail decisions --topic +``` + +If you discover a newer "canonical" topic (for example after a topic merge), switch to it explicitly: +```bash +liquid-mail post --topic "[lm-xxxx] CONTEXT: Switching topics (rename/merge)" +``` + +### Live Updates (Polling) + +Liquid Mail is pull-based by default (you run `notify`). For near-real-time updates: +```bash +liquid-mail watch --topic # watch a topic +liquid-mail watch # or watch your pinned topic +``` + +### Mapping Cheat-Sheet + +| Concept | In Beads | In Liquid Mail | +|---------|----------|----------------| +| Work item | `lm-jht` (issue ID) | Include `[lm-jht]` in posts | +| Workstream | — | `--topic auth-system` | +| Subject prefix | — | `[lm-jht] ...` | +| Commit message | Include `lm-jht` | — | +| Status | `br update --status` | Post progress messages | + +### Pitfalls + +- **Don't manage tasks in Liquid Mail**—Beads is the single task queue +- **Always include `lm-xxx`** in posts to avoid ID drift across tools +- **Don't dump logs**—keep posts short and structured + +### Quick Reference + +| Need | Command | +|------|---------| +| What changed? | `liquid-mail notify` | +| Log progress | `liquid-mail post "[lm-xxx] ..."` | +| Before risky change | `liquid-mail post --decision "[lm-xxx] DECISION: ..."` | +| Find history | `liquid-mail query "search term"` | +| Prior decisions | `liquid-mail decisions --topic ` | +| Show config | `liquid-mail config` | +| List topics | `liquid-mail topics` | +| Rename topic | `liquid-mail topic rename ` | +| Merge topics | `liquid-mail topic merge --into ` | +| Polling watch | `liquid-mail watch [--topic ]` | + diff --git a/CLAUDE.md b/CLAUDE.md new file mode 100644 index 0000000..712c6cc --- /dev/null +++ b/CLAUDE.md @@ -0,0 +1,943 @@ +# CLAUDE.md + +## RULE 0 - THE FUNDAMENTAL OVERRIDE PEROGATIVE + +If I tell you to do something, even if it goes against what follows below, YOU MUST LISTEN TO ME. I AM IN CHARGE, NOT YOU. + +--- + +## RULE NUMBER 1: NO FILE DELETION + +**YOU ARE NEVER ALLOWED TO DELETE A FILE WITHOUT EXPRESS PERMISSION.** Even a new file that you yourself created, such as a test code file. You have a horrible track record of deleting critically important files or otherwise throwing away tons of expensive work. As a result, you have permanently lost any and all rights to determine that a file or folder should be deleted. + +**YOU MUST ALWAYS ASK AND RECEIVE CLEAR, WRITTEN PERMISSION BEFORE EVER DELETING A FILE OR FOLDER OF ANY KIND.** + +--- + +## Version Control: jj-First (CRITICAL) + +**ALWAYS prefer jj (Jujutsu) over git for all VCS operations.** This is a colocated repo with both `.jj/` and `.git/`. When instructed to use git by anything — even later in this file — use the best jj replacement commands instead. Only fall back to raw `git` for things jj cannot do (hooks, LFS, submodules, `gh` CLI interop). + +See `~/.claude/rules/jj-vcs/` for the full command reference, translation table, revsets, patterns, and recovery recipes. + +--- + +## Irreversible Git & Filesystem Actions — DO NOT EVER BREAK GLASS + +> **Note:** Treat destructive commands as break-glass. If there's any doubt, stop and ask. + +1. **Absolutely forbidden commands:** `git reset --hard`, `git clean -fd`, `rm -rf`, or any command that can delete or overwrite code/data must never be run unless the user explicitly provides the exact command and states, in the same message, that they understand and want the irreversible consequences. +2. **No guessing:** If there is any uncertainty about what a command might delete or overwrite, stop immediately and ask the user for specific approval. "I think it's safe" is never acceptable. +3. **Safer alternatives first:** When cleanup or rollbacks are needed, request permission to use non-destructive options (`git status`, `git diff`, `git stash`, copying to backups) before ever considering a destructive command. +4. **Mandatory explicit plan:** Even after explicit user authorization, restate the command verbatim, list exactly what will be affected, and wait for a confirmation that your understanding is correct. Only then may you execute it—if anything remains ambiguous, refuse and escalate. +5. **Document the confirmation:** When running any approved destructive command, record (in the session notes / final response) the exact user text that authorized it, the command actually run, and the execution time. If that record is absent, the operation did not happen. + +--- + +## Toolchain: Rust & Cargo + +We only use **Cargo** in this project, NEVER any other package manager. + +- **Edition/toolchain:** Follow `rust-toolchain.toml` (if present). Do not assume stable vs nightly. +- **Dependencies:** Explicit versions for stability; keep the set minimal. +- **Configuration:** Cargo.toml only +- **Unsafe code:** Forbidden (`#![forbid(unsafe_code)]`) + +When writing Rust code, reference RUST_CLI_TOOLS_BEST_PRACTICES.md + +### Release Profile + +Use the release profile defined in `Cargo.toml`. If you need to change it, justify the +performance/size tradeoff and how it impacts determinism and cancellation behavior. + +--- + +## Code Editing Discipline + +### No Script-Based Changes + +**NEVER** run a script that processes/changes code files in this repo. Brittle regex-based transformations create far more problems than they solve. + +- **Always make code changes manually**, even when there are many instances +- For many simple changes: use parallel subagents +- For subtle/complex changes: do them methodically yourself + +### No File Proliferation + +If you want to change something or add a feature, **revise existing code files in place**. + +**NEVER** create variations like: +- `mainV2.rs` +- `main_improved.rs` +- `main_enhanced.rs` + +New files are reserved for **genuinely new functionality** that makes zero sense to include in any existing file. The bar for creating new files is **incredibly high**. + +--- + +## Backwards Compatibility + +We do not care about backwards compatibility—we're in early development with no users. We want to do things the **RIGHT** way with **NO TECH DEBT**. + +- Never create "compatibility shims" +- Never create wrapper functions for deprecated APIs +- Just fix the code directly + +--- + +## Compiler Checks (CRITICAL) + +**After any substantive code changes, you MUST verify no errors were introduced:** + +```bash +# Check for compiler errors and warnings +cargo check --all-targets + +# Check for clippy lints (pedantic + nursery are enabled) +cargo clippy --all-targets -- -D warnings + +# Verify formatting +cargo fmt --check +``` + +If you see errors, **carefully understand and resolve each issue**. Read sufficient context to fix them the RIGHT way. + +--- + +## Testing + +### Unit & Property Tests + +```bash +# Run all tests +cargo test + +# Run with output +cargo test -- --nocapture +``` + +When adding or changing primitives, add tests that assert the core invariants: + +- no task leaks +- no obligation leaks +- losers are drained after races +- region close implies quiescence + +Prefer deterministic lab-runtime tests for concurrency-sensitive behavior. + +--- + +--- + +## Beads (br) — Dependency-Aware Issue Tracking + +Beads provides a lightweight, dependency-aware issue database and CLI (`br` / beads_rust) for selecting "ready work," setting priorities, and tracking status. It complements Liquid Mail's shared log for progress, decisions, and cross-session context. + +**Note:** `br` is non-invasive—it never executes git commands directly. You must run git commands manually after `br sync --flush-only`. + +### Conventions + +- **Single source of truth:** Beads for task status/priority/dependencies; Liquid Mail for conversation/decisions +- **Shared identifiers:** Include the Beads issue ID in posts (e.g., `[br-123] Topic validation rules`) +- **Decisions before action:** Post `DECISION:` messages before risky changes, not after + +### Typical Agent Flow + +1. **Pick ready work (Beads):** + ```bash + br ready --json # Choose highest priority, no blockers + ``` + +2. **Check context (Liquid Mail):** + ```bash + liquid-mail notify # See what changed since last session + liquid-mail query "br-123" # Find prior discussion on this issue + ``` + +3. **Work and log progress:** + ```bash + liquid-mail post --topic "[br-123] START: " + liquid-mail post "[br-123] FINDING: " + liquid-mail post --decision "[br-123] DECISION: " + ``` + +4. **Complete (Beads is authority):** + ```bash + br close br-123 --reason "Completed" + liquid-mail post "[br-123] Completed: " + ``` + +### Mapping Cheat Sheet + +| Concept | In Beads | In Liquid Mail | +|---------|----------|----------------| +| Work item | `br-###` (issue ID) | Include `[br-###]` in posts | +| Workstream | — | `--topic auth-system` | +| Subject prefix | — | `[br-###] ...` | +| Commit message | Include `br-###` | — | +| Status | `br update --status` | Post progress messages | + +--- + +## bv — Graph-Aware Triage Engine + +bv is a graph-aware triage engine for Beads projects (`.beads/beads.jsonl`). It computes PageRank, betweenness, critical path, cycles, HITS, eigenvector, and k-core metrics deterministically. + +**Scope boundary:** bv handles *what to work on* (triage, priority, planning). For agent-to-agent coordination (progress logging, decisions, cross-session context), use Liquid Mail. + +**CRITICAL: Use ONLY `--robot-*` flags. Bare `bv` launches an interactive TUI that blocks your session.** + +### The Workflow: Start With Triage + +**`bv --robot-triage` is your single entry point.** It returns: +- `quick_ref`: at-a-glance counts + top 3 picks +- `recommendations`: ranked actionable items with scores, reasons, unblock info +- `quick_wins`: low-effort high-impact items +- `blockers_to_clear`: items that unblock the most downstream work +- `project_health`: status/type/priority distributions, graph metrics +- `commands`: copy-paste shell commands for next steps + +```bash +bv --robot-triage # THE MEGA-COMMAND: start here +bv --robot-next # Minimal: just the single top pick + claim command +``` + +### Command Reference + +**Planning:** +| Command | Returns | +|---------|---------| +| `--robot-plan` | Parallel execution tracks with `unblocks` lists | +| `--robot-priority` | Priority misalignment detection with confidence | + +**Graph Analysis:** +| Command | Returns | +|---------|---------| +| `--robot-insights` | Full metrics: PageRank, betweenness, HITS, eigenvector, critical path, cycles, k-core, articulation points, slack | +| `--robot-label-health` | Per-label health: `health_level`, `velocity_score`, `staleness`, `blocked_count` | +| `--robot-label-flow` | Cross-label dependency: `flow_matrix`, `dependencies`, `bottleneck_labels` | +| `--robot-label-attention [--attention-limit=N]` | Attention-ranked labels | + +**History & Change Tracking:** +| Command | Returns | +|---------|---------| +| `--robot-history` | Bead-to-commit correlations | +| `--robot-diff --diff-since ` | Changes since ref: new/closed/modified issues, cycles | + +**Other:** +| Command | Returns | +|---------|---------| +| `--robot-burndown ` | Sprint burndown, scope changes, at-risk items | +| `--robot-forecast ` | ETA predictions with dependency-aware scheduling | +| `--robot-alerts` | Stale issues, blocking cascades, priority mismatches | +| `--robot-suggest` | Hygiene: duplicates, missing deps, label suggestions | +| `--robot-graph [--graph-format=json\|dot\|mermaid]` | Dependency graph export | +| `--export-graph ` | Interactive HTML visualization | + +### Scoping & Filtering + +```bash +bv --robot-plan --label backend # Scope to label's subgraph +bv --robot-insights --as-of HEAD~30 # Historical point-in-time +bv --recipe actionable --robot-plan # Pre-filter: ready to work +bv --recipe high-impact --robot-triage # Pre-filter: top PageRank +bv --robot-triage --robot-triage-by-track # Group by parallel work streams +bv --robot-triage --robot-triage-by-label # Group by domain +``` + +### Understanding Robot Output + +**All robot JSON includes:** +- `data_hash` — Fingerprint of source beads.jsonl +- `status` — Per-metric state: `computed|approx|timeout|skipped` + elapsed ms +- `as_of` / `as_of_commit` — Present when using `--as-of` + +**Two-phase analysis:** +- **Phase 1 (instant):** degree, topo sort, density +- **Phase 2 (async, 500ms timeout):** PageRank, betweenness, HITS, eigenvector, cycles + +### jq Quick Reference + +```bash +bv --robot-triage | jq '.quick_ref' # At-a-glance summary +bv --robot-triage | jq '.recommendations[0]' # Top recommendation +bv --robot-plan | jq '.plan.summary.highest_impact' # Best unblock target +bv --robot-insights | jq '.status' # Check metric readiness +bv --robot-insights | jq '.Cycles' # Circular deps (must fix!) +``` + +--- + +## UBS — Ultimate Bug Scanner + +**Golden Rule:** `ubs ` before every commit. Exit 0 = safe. Exit >0 = fix & re-run. + +### Commands + +```bash +ubs file.rs file2.rs # Specific files (< 1s) — USE THIS +ubs $(jj diff --name-only) # Changed files — before commit +ubs --only=rust,toml src/ # Language filter (3-5x faster) +ubs --ci --fail-on-warning . # CI mode — before PR +ubs . # Whole project (ignores target/, Cargo.lock) +``` + +### Output Format + +``` +⚠️ Category (N errors) + file.rs:42:5 – Issue description + 💡 Suggested fix +Exit code: 1 +``` + +Parse: `file:line:col` → location | 💡 → how to fix | Exit 0/1 → pass/fail + +### Fix Workflow + +1. Read finding → category + fix suggestion +2. Navigate `file:line:col` → view context +3. Verify real issue (not false positive) +4. Fix root cause (not symptom) +5. Re-run `ubs ` → exit 0 +6. Commit + +### Bug Severity + +- **Critical (always fix):** Memory safety, use-after-free, data races, SQL injection +- **Important (production):** Unwrap panics, resource leaks, overflow checks +- **Contextual (judgment):** TODO/FIXME, println! debugging + +--- + +## ast-grep vs ripgrep + +**Use `ast-grep` when structure matters.** It parses code and matches AST nodes, ignoring comments/strings, and can **safely rewrite** code. + +- Refactors/codemods: rename APIs, change import forms +- Policy checks: enforce patterns across a repo +- Editor/automation: LSP mode, `--json` output + +**Use `ripgrep` when text is enough.** Fastest way to grep literals/regex. + +- Recon: find strings, TODOs, log lines, config values +- Pre-filter: narrow candidate files before ast-grep + +### Rule of Thumb + +- Need correctness or **applying changes** → `ast-grep` +- Need raw speed or **hunting text** → `rg` +- Often combine: `rg` to shortlist files, then `ast-grep` to match/modify + +### Rust Examples + +```bash +# Find structured code (ignores comments) +ast-grep run -l Rust -p 'fn $NAME($$$ARGS) -> $RET { $$$BODY }' + +# Find all unwrap() calls +ast-grep run -l Rust -p '$EXPR.unwrap()' + +# Quick textual hunt +rg -n 'println!' -t rust + +# Combine speed + precision +rg -l -t rust 'unwrap\(' | xargs ast-grep run -l Rust -p '$X.unwrap()' --json +``` + +--- + +## Morph Warp Grep — AI-Powered Code Search + +**Use `mcp__morph-mcp__warp_grep` for exploratory "how does X work?" questions.** An AI agent expands your query, greps the codebase, reads relevant files, and returns precise line ranges with full context. + +**Use `ripgrep` for targeted searches.** When you know exactly what you're looking for. + +**Use `ast-grep` for structural patterns.** When you need AST precision for matching/rewriting. + +### When to Use What + +| Scenario | Tool | Why | +|----------|------|-----| +| "How is pattern matching implemented?" | `warp_grep` | Exploratory; don't know where to start | +| "Where is the quick reject filter?" | `warp_grep` | Need to understand architecture | +| "Find all uses of `Regex::new`" | `ripgrep` | Targeted literal search | +| "Find files with `println!`" | `ripgrep` | Simple pattern | +| "Replace all `unwrap()` with `expect()`" | `ast-grep` | Structural refactor | + +### warp_grep Usage + +``` +mcp__morph-mcp__warp_grep( + repoPath: "/path/to/dcg", + query: "How does the safe pattern whitelist work?" +) +``` + +Returns structured results with file paths, line ranges, and extracted code snippets. + +### Anti-Patterns + +- **Don't** use `warp_grep` to find a specific function name → use `ripgrep` +- **Don't** use `ripgrep` to understand "how does X work" → wastes time with manual reads +- **Don't** use `ripgrep` for codemods → risks collateral edits + + + +--- + +## Beads Workflow Integration + +This project uses [beads_viewer](https://github.com/Dicklesworthstone/beads_viewer) for issue tracking. Issues are stored in `.beads/` and tracked in version control. + +**Note:** `br` is non-invasive—it never executes VCS commands directly. You must commit manually after `br sync --flush-only`. + +### Essential Commands + +```bash +# View issues (launches TUI - avoid in automated sessions) +bv + +# CLI commands for agents (use these instead) +br ready # Show issues ready to work (no blockers) +br list --status=open # All open issues +br show # Full issue details with dependencies +br create --title="..." --type=task --priority=2 +br update --status=in_progress +br close --reason="Completed" +br close # Close multiple issues at once +br sync --flush-only # Export to JSONL (then: jj commit -m "Update beads") +``` + +### Workflow Pattern + +1. **Start**: Run `br ready` to find actionable work +2. **Claim**: Use `br update --status=in_progress` +3. **Work**: Implement the task +4. **Complete**: Use `br close ` +5. **Sync**: Run `br sync --flush-only`, then `git add .beads/ && git commit -m "Update beads"` + +### Key Concepts + +- **Dependencies**: Issues can block other issues. `br ready` shows only unblocked work. +- **Priority**: P0=critical, P1=high, P2=medium, P3=low, P4=backlog (use numbers, not words) +- **Types**: task, bug, feature, epic, question, docs +- **Blocking**: `br dep add ` to add dependencies + +### Session Protocol + +**Before ending any session, run this checklist (solo/lead only — workers skip VCS):** + +```bash +jj status # Check what changed +br sync --flush-only # Export beads to JSONL +jj commit -m "..." # Commit code and beads (jj auto-tracks all changes) +jj bookmark set -r @- # Point bookmark at committed work +jj git push -b # Push to remote +``` + +### Best Practices + +- Check `br ready` at session start to find available work +- Update status as you work (in_progress → closed) +- Create new issues with `br create` when you discover tasks +- Use descriptive titles and set appropriate priority/type +- Always run `br sync --flush-only` then commit before ending session (jj auto-tracks .beads/) + + + +## Landing the Plane (Session Completion) + +**When ending a work session**, you MUST complete ALL steps below. Work is NOT complete until push succeeds. + +**WHO RUNS THIS:** Solo agents run it themselves. In multi-agent sessions, ONLY the team lead runs this. Workers skip VCS entirely. + +**MANDATORY WORKFLOW:** + +1. **File issues for remaining work** - Create issues for anything that needs follow-up +2. **Run quality gates** (if code changed) - Tests, linters, builds +3. **Update issue status** - Close finished work, update in-progress items +4. **PUSH TO REMOTE** - This is MANDATORY: + ```bash + jj git fetch # Get latest remote state + jj rebase -d trunk() # Rebase onto latest trunk if needed + br sync --flush-only # Export beads to JSONL + jj commit -m "Update beads" # Commit (jj auto-tracks .beads/ changes) + jj bookmark set -r @- # Point bookmark at committed work + jj git push -b # Push to remote + jj log -r '' # Verify bookmark position + ``` +5. **Clean up** - Abandon empty orphan changes if any (`jj abandon `) +6. **Verify** - All changes committed AND pushed +7. **Hand off** - Provide context for next session + +**CRITICAL RULES:** +- Work is NOT complete until `jj git push` succeeds +- NEVER stop before pushing - that leaves work stranded locally +- NEVER say "ready to push when you are" - YOU must push +- If push fails, resolve and retry until it succeeds + +--- + +## cass — Cross-Agent Session Search + +`cass` indexes prior agent conversations (Claude Code, Codex, Cursor, Gemini, ChatGPT, etc.) so we can reuse solved problems. + +**Rules:** Never run bare `cass` (TUI). Always use `--robot` or `--json`. + +### Examples + +```bash +cass health +cass search "async runtime" --robot --limit 5 +cass view /path/to/session.jsonl -n 42 --json +cass expand /path/to/session.jsonl -n 42 -C 3 --json +cass capabilities --json +cass robot-docs guide +``` + +### Tips + +- Use `--fields minimal` for lean output +- Filter by agent with `--agent` +- Use `--days N` to limit to recent history + +stdout is data-only, stderr is diagnostics; exit code 0 means success. + +Treat cass as a way to avoid re-solving problems other agents already handled. + +--- + +## Note for Codex/GPT-5.2 + +You constantly bother me and stop working with concerned questions that look similar to this: + +``` +Unexpected changes (need guidance) + +- Working tree still shows edits I did not make in Cargo.toml, Cargo.lock, src/runtime.rs, src/scope.rs. Please advise whether to keep/commit/revert these before any further work. I did not touch them. + +Next steps (pick one) + +1. Decide how to handle the unrelated modified files above so we can resume cleanly. +``` + +NEVER EVER DO THAT AGAIN. The answer is literally ALWAYS the same: those are changes created by the potentially dozen of other agents working on the project at the same time. This is not only a common occurence, it happens multiple times PER MINUTE. The way to deal with it is simple: you NEVER, under ANY CIRCUMSTANCE, stash, revert, overwrite, or otherwise disturb in ANY way the work of other agents. Just treat those changes identically to changes that you yourself made. Just fool yourself into thinking YOU made the changes and simply don't recall it for some reason. + +--- + +## Note on Built-in TODO Functionality + +Also, if I ask you to explicitly use your built-in TODO functionality, don't complain about this and say you need to use beads. You can use built-in TODOs if I tell you specifically to do so. Always comply with such orders. + +## TDD Requirements + +Test-first development is mandatory: +1. **RED** - Write failing test first +2. **GREEN** - Minimal implementation to pass +3. **REFACTOR** - Clean up while green + +## Key Patterns + +Find the simplest solution that meets all acceptance criteria. +Use third party libraries whenever there's a well-maintained, active, and widely adopted solution (for example, date-fns for TS date math) +Build extensible pieces of logic that can easily be integrated with other pieces. +DRY principles should be loosely held. +Architecture MUST be clear and well thought-out. Ask the user for clarification whenever ambiguity is discovered around architecture, or you think a better approach than planned exists. + +--- + +## Third-Party Library Usage + +If you aren't 100% sure how to use a third-party library, **SEARCH ONLINE** to find the latest documentation and mid-2025 best practices. + +--- + +## Gitlore Robot Mode + +The `lore` CLI has a robot mode optimized for AI agent consumption with compact JSON output, structured errors with machine-actionable recovery steps, meaningful exit codes, response timing metadata, field selection for token efficiency, and TTY auto-detection. + +### Activation + +```bash +# Explicit flag +lore --robot issues -n 10 + +# JSON shorthand (-J) +lore -J issues -n 10 + +# Auto-detection (when stdout is not a TTY) +lore issues | jq . + +# Environment variable +LORE_ROBOT=1 lore issues +``` + +### Robot Mode Commands + +```bash +# List issues/MRs with JSON output +lore --robot issues -n 10 +lore --robot mrs -s opened + +# Filter issues by work item status (case-insensitive) +lore --robot issues --status "In progress" + +# List with field selection (reduces token usage ~60%) +lore --robot issues --fields minimal +lore --robot mrs --fields iid,title,state,draft + +# Show detailed entity info +lore --robot issues 123 +lore --robot mrs 456 -p group/repo + +# Count entities +lore --robot count issues +lore --robot count discussions --for mr + +# Search indexed documents +lore --robot search "authentication bug" + +# Check sync status +lore --robot status + +# Run full sync pipeline +lore --robot sync + +# Run sync without resource events +lore --robot sync --no-events + +# Surgical sync: specific entities by IID +lore --robot sync --issue 42 -p group/repo +lore --robot sync --mr 99 --mr 100 -p group/repo + +# Run ingestion only +lore --robot ingest issues + +# Trace why code was introduced +lore --robot trace src/main.rs -p group/repo + +# File-level MR history +lore --robot file-history src/auth/ -p group/repo + +# Manage cron-based auto-sync (Unix) +lore --robot cron status +lore --robot cron install --interval 15 + +# Token management +lore --robot token show + +# Check environment health +lore --robot doctor + +# Document and index statistics +lore --robot stats + +# Quick health pre-flight check (exit 0 = healthy, 19 = unhealthy) +lore --robot health + +# Generate searchable documents from ingested data +lore --robot generate-docs + +# Generate vector embeddings via Ollama +lore --robot embed + +# Agent self-discovery manifest (all commands, flags, exit codes, response schemas) +lore robot-docs + +# Version information +lore --robot version +``` + +### Response Format + +All commands return compact JSON with a uniform envelope and timing metadata: + +```json +{"ok":true,"data":{...},"meta":{"elapsed_ms":42}} +``` + +Errors return structured JSON to stderr with machine-actionable recovery steps: + +```json +{"error":{"code":"CONFIG_NOT_FOUND","message":"...","suggestion":"Run 'lore init'","actions":["lore init"]}} +``` + +The `actions` array contains executable shell commands for automated recovery. It is omitted when empty. + +### Field Selection + +The `--fields` flag on `issues` and `mrs` list commands controls which fields appear in the JSON response: + +```bash +lore -J issues --fields minimal # Preset: iid, title, state, updated_at_iso +lore -J mrs --fields iid,title,state,draft,labels # Custom field list +``` + +### Exit Codes + +| Code | Meaning | +|------|---------| +| 0 | Success | +| 1 | Internal error / not implemented | +| 2 | Usage error (invalid flags or arguments) | +| 3 | Config invalid | +| 4 | Token not set | +| 5 | GitLab auth failed | +| 6 | Resource not found | +| 7 | Rate limited | +| 8 | Network error | +| 9 | Database locked | +| 10 | Database error | +| 11 | Migration failed | +| 12 | I/O error | +| 13 | Transform error | +| 14 | Ollama unavailable | +| 15 | Ollama model not found | +| 16 | Embedding failed | +| 17 | Not found (entity does not exist) | +| 18 | Ambiguous match (use `-p` to specify project) | +| 19 | Health check failed | +| 20 | Config not found | + +### Configuration Precedence + +1. CLI flags (highest priority) +2. Environment variables (`LORE_ROBOT`, `GITLAB_TOKEN`, `LORE_CONFIG_PATH`) +3. Config file (`~/.config/lore/config.json`) +4. Built-in defaults (lowest priority) + +### Best Practices + +- Use `lore --robot` or `lore -J` for all agent interactions +- Check exit codes for error handling +- Parse JSON errors from stderr; use `actions` array for automated recovery +- Use `--fields minimal` to reduce token usage (~60% fewer tokens) +- Use `-n` / `--limit` to control response size +- Use `-q` / `--quiet` to suppress progress bars and non-essential output +- Use `--color never` in non-TTY automation for ANSI-free output +- Use `-v` / `-vv` / `-vvv` for increasing verbosity (debug/trace logging) +- Use `--log-format json` for machine-readable log output to stderr +- TTY detection handles piped commands automatically +- Use `lore --robot health` as a fast pre-flight check before queries +- Use `lore robot-docs` for response schema discovery +- The `-p` flag supports fuzzy project matching (suffix and substring) + +--- + +## Read/Write Split: lore vs glab + +| Operation | Tool | Why | +|-----------|------|-----| +| List issues/MRs | lore | Richer: includes status, discussions, closing MRs | +| View issue/MR detail | lore | Pre-joined discussions, work-item status | +| Search across entities | lore | FTS5 + vector hybrid search | +| Expert/workload analysis | lore | who command — no glab equivalent | +| Timeline reconstruction | lore | Chronological narrative — no glab equivalent | +| Create/update/close | glab | Write operations | +| Approve/merge MR | glab | Write operations | +| CI/CD pipelines | glab | Not in lore scope | + +````markdown +## UBS Quick Reference for AI Agents + +UBS stands for "Ultimate Bug Scanner": **The AI Coding Agent's Secret Weapon: Flagging Likely Bugs for Fixing Early On** + +**Install:** `curl -sSL https://raw.githubusercontent.com/Dicklesworthstone/ultimate_bug_scanner/master/install.sh | bash` + +**Golden Rule:** `ubs ` before every commit. Exit 0 = safe. Exit >0 = fix & re-run. + +**Commands:** +```bash +ubs file.ts file2.py # Specific files (< 1s) — USE THIS +ubs $(git diff --name-only --cached) # Staged files — before commit +ubs --only=js,python src/ # Language filter (3-5x faster) +ubs --ci --fail-on-warning . # CI mode — before PR +ubs --help # Full command reference +ubs sessions --entries 1 # Tail the latest install session log +ubs . # Whole project (ignores things like .venv and node_modules automatically) +``` + +**Output Format:** +``` +⚠️ Category (N errors) + file.ts:42:5 – Issue description + 💡 Suggested fix +Exit code: 1 +``` +Parse: `file:line:col` → location | 💡 → how to fix | Exit 0/1 → pass/fail + +**Fix Workflow:** +1. Read finding → category + fix suggestion +2. Navigate `file:line:col` → view context +3. Verify real issue (not false positive) +4. Fix root cause (not symptom) +5. Re-run `ubs ` → exit 0 +6. Commit + +**Speed Critical:** Scope to changed files. `ubs src/file.ts` (< 1s) vs `ubs .` (30s). Never full scan for small edits. + +**Bug Severity:** +- **Critical** (always fix): Null safety, XSS/injection, async/await, memory leaks +- **Important** (production): Type narrowing, division-by-zero, resource leaks +- **Contextual** (judgment): TODO/FIXME, console logs + +**Anti-Patterns:** +- ❌ Ignore findings → ✅ Investigate each +- ❌ Full scan per edit → ✅ Scope to file +- ❌ Fix symptom (`if (x) { x.y }`) → ✅ Root cause (`x?.y`) +```` + + +## Integrating Liquid Mail with Beads + +**Beads** manages task status, priority, and dependencies (`br` CLI). +**Liquid Mail** provides the shared log—progress, decisions, and context that survives sessions. + +### Conventions + +- **Single source of truth**: Beads owns task state; Liquid Mail owns conversation/decisions +- **Shared identifiers**: Include the Beads issue ID in posts (e.g., `[lm-jht] Topic validation rules`) +- **Decisions before action**: Post `DECISION:` messages before risky changes, not after +- **Identity in user updates**: In every user-facing reply, include your window-name (derived from `LIQUID_MAIL_WINDOW_ID`) so humans can distinguish concurrent agents. + +### Typical Flow + +**1. Pick ready work (Beads)** +```bash +br ready # Find available work (no blockers) +br show lm-jht # Review details +br update lm-jht --status in_progress +``` + +**2. Check context (Liquid Mail)** +```bash +liquid-mail notify # See what changed since last session +liquid-mail query "lm-jht" # Find prior discussion on this issue +``` + +**3. Work and log progress (topic required)** + +The `--topic` flag is required for your first post. After that, the topic is pinned to your window. +```bash +liquid-mail post --topic auth-system "[lm-jht] START: Reviewing current topic id patterns" +liquid-mail post "[lm-jht] FINDING: IDs like lm3189... are being used as topic names" +liquid-mail post "[lm-jht] NEXT: Add validation + rename guidance" +``` + +**4. Decisions before risky changes** +```bash +liquid-mail post --decision "[lm-jht] DECISION: Reject UUID-like topic names; require slugs" +# Then implement +``` + +### Decision Conflicts (Preflight) + +When you post a decision (via `--decision` or a `DECISION:` line), Liquid Mail can preflight-check for conflicts with prior decisions **in the same topic**. + +- If a conflict is detected, `liquid-mail post` fails with `DECISION_CONFLICT`. +- Review prior decisions: `liquid-mail decisions --topic `. +- If you intend to supersede the old decision, re-run with `--yes` and include what changed and why. + +**5. Complete (Beads is authority)** +```bash +br close lm-jht # Mark complete in Beads +liquid-mail post "[lm-jht] Completed: Topic validation shipped in 177267d" +``` + +### Posting Format + +- **Short** (5-15 lines, not walls of text) +- **Prefixed** with ALL-CAPS tags: `FINDING:`, `DECISION:`, `QUESTION:`, `NEXT:` +- **Include file paths** so others can jump in: `src/services/auth.ts:42` +- **Include issue IDs** in brackets: `[lm-jht]` +- **User-facing replies**: include `AGENT: ` near the top. Get it with `liquid-mail window name`. + +### Topics (Required) + +Liquid Mail organizes messages into **topics** (Honcho sessions). Topics are **soft boundaries**—search spans all topics by default. + +**Rule:** `liquid-mail post` requires a topic: +- Provide `--topic `, OR +- Post inside a window that already has a pinned topic. + +Topic names must be: +- 4–50 characters +- lowercase letters/numbers with hyphens +- start with a letter, end with a letter/number +- no consecutive hyphens +- not reserved (`all`, `new`, `help`, `merge`, `rename`, `list`) +- not UUID-like (`lm<32-hex>` or standard UUIDs) + +Good examples: `auth-system`, `db-system`, `dashboards` + +Commands: + +- **List topics (newest first)**: `liquid-mail topics` +- **Find context across topics**: `liquid-mail query "auth"`, then pick a topic name +- **Rename a topic (alias)**: `liquid-mail topic rename ` +- **Merge two topics into a new one**: `liquid-mail topic merge --into ` + +Examples (component topic + Beads id in the subject): +```bash +liquid-mail post --topic auth-system "[lm-jht] START: Investigating token refresh failures" +liquid-mail post --topic auth-system "[lm-jht] FINDING: refresh happens in middleware, not service layer" +liquid-mail post --topic auth-system --decision "[lm-jht] DECISION: Move refresh logic into AuthService" + +liquid-mail post --topic dashboards "[lm-1p5] START: Adding latency panel" +``` + +### Context Refresh (Before New Work / After Redirects) + +If you see redirect/merge messages, refresh context before acting: +```bash +liquid-mail notify +liquid-mail window status --json +liquid-mail summarize --topic +liquid-mail decisions --topic +``` + +If you discover a newer "canonical" topic (for example after a topic merge), switch to it explicitly: +```bash +liquid-mail post --topic "[lm-xxxx] CONTEXT: Switching topics (rename/merge)" +``` + +### Live Updates (Polling) + +Liquid Mail is pull-based by default (you run `notify`). For near-real-time updates: +```bash +liquid-mail watch --topic # watch a topic +liquid-mail watch # or watch your pinned topic +``` + +### Mapping Cheat-Sheet + +| Concept | In Beads | In Liquid Mail | +|---------|----------|----------------| +| Work item | `lm-jht` (issue ID) | Include `[lm-jht]` in posts | +| Workstream | — | `--topic auth-system` | +| Subject prefix | — | `[lm-jht] ...` | +| Commit message | Include `lm-jht` | — | +| Status | `br update --status` | Post progress messages | + +### Pitfalls + +- **Don't manage tasks in Liquid Mail**—Beads is the single task queue +- **Always include `lm-xxx`** in posts to avoid ID drift across tools +- **Don't dump logs**—keep posts short and structured + +### Quick Reference + +| Need | Command | +|------|---------| +| What changed? | `liquid-mail notify` | +| Log progress | `liquid-mail post "[lm-xxx] ..."` | +| Before risky change | `liquid-mail post --decision "[lm-xxx] DECISION: ..."` | +| Find history | `liquid-mail query "search term"` | +| Prior decisions | `liquid-mail decisions --topic ` | +| Show config | `liquid-mail config` | +| List topics | `liquid-mail topics` | +| Rename topic | `liquid-mail topic rename ` | +| Merge topics | `liquid-mail topic merge --into ` | +| Polling watch | `liquid-mail watch [--topic ]` | + diff --git a/crates/lore-tui/src/action.rs b/crates/lore-tui/src/action.rs deleted file mode 100644 index beb471d..0000000 --- a/crates/lore-tui/src/action.rs +++ /dev/null @@ -1,2835 +0,0 @@ -#![allow(dead_code)] - -//! Action layer — pure data-fetching functions for TUI screens. -//! -//! Actions query the local SQLite database and return data structs. -//! They never touch terminal state, never spawn tasks, and use injected -//! [`Clock`] for time calculations (deterministic tests). - -use anyhow::{Context, Result}; -use rusqlite::Connection; - -use crate::clock::Clock; -use crate::state::dashboard::{ - DashboardData, EntityCounts, LastSyncInfo, ProjectSyncInfo, RecentActivityItem, -}; -use crate::state::issue_list::{ - IssueCursor, IssueFilter, IssueListPage, IssueListRow, SortField, SortOrder, -}; -use crate::state::mr_detail::{FileChange, FileChangeType, MrDetailData, MrMetadata}; -use crate::state::mr_list::{MrCursor, MrFilter, MrListPage, MrListRow, MrSortField, MrSortOrder}; - -// --------------------------------------------------------------------------- -// Dashboard -// --------------------------------------------------------------------------- - -/// Fetch all data for the dashboard screen. -/// -/// Runs aggregation queries for entity counts, per-project sync freshness, -/// recent activity, and the last sync run summary. -pub fn fetch_dashboard(conn: &Connection, clock: &dyn Clock) -> Result { - let counts = fetch_entity_counts(conn)?; - let projects = fetch_project_sync_info(conn, clock)?; - let recent = fetch_recent_activity(conn, clock)?; - let last_sync = fetch_last_sync(conn)?; - - Ok(DashboardData { - counts, - projects, - recent, - last_sync, - }) -} - -/// Count all entities in the database. -fn fetch_entity_counts(conn: &Connection) -> Result { - let issues_total: i64 = conn - .query_row("SELECT COUNT(*) FROM issues", [], |r| r.get(0)) - .context("counting issues")?; - - let issues_open: i64 = conn - .query_row( - "SELECT COUNT(*) FROM issues WHERE state = 'opened'", - [], - |r| r.get(0), - ) - .context("counting open issues")?; - - let mrs_total: i64 = conn - .query_row("SELECT COUNT(*) FROM merge_requests", [], |r| r.get(0)) - .context("counting merge requests")?; - - let mrs_open: i64 = conn - .query_row( - "SELECT COUNT(*) FROM merge_requests WHERE state = 'opened'", - [], - |r| r.get(0), - ) - .context("counting open merge requests")?; - - let discussions: i64 = conn - .query_row("SELECT COUNT(*) FROM discussions", [], |r| r.get(0)) - .context("counting discussions")?; - - let notes_total: i64 = conn - .query_row("SELECT COUNT(*) FROM notes", [], |r| r.get(0)) - .context("counting notes")?; - - let notes_system: i64 = conn - .query_row("SELECT COUNT(*) FROM notes WHERE is_system = 1", [], |r| { - r.get(0) - }) - .context("counting system notes")?; - - let notes_system_pct = if notes_total > 0 { - u8::try_from(notes_system * 100 / notes_total).unwrap_or(100) - } else { - 0 - }; - - let documents: i64 = conn - .query_row("SELECT COUNT(*) FROM documents", [], |r| r.get(0)) - .context("counting documents")?; - - let embeddings: i64 = conn - .query_row("SELECT COUNT(*) FROM embedding_metadata", [], |r| r.get(0)) - .context("counting embeddings")?; - - #[allow(clippy::cast_sign_loss)] // SQL COUNT(*) is always >= 0 - Ok(EntityCounts { - issues_open: issues_open as u64, - issues_total: issues_total as u64, - mrs_open: mrs_open as u64, - mrs_total: mrs_total as u64, - discussions: discussions as u64, - notes_total: notes_total as u64, - notes_system_pct, - documents: documents as u64, - embeddings: embeddings as u64, - }) -} - -/// Per-project sync freshness based on the most recent sync_runs entry. -fn fetch_project_sync_info(conn: &Connection, clock: &dyn Clock) -> Result> { - let now_ms = clock.now_ms(); - - let mut stmt = conn - .prepare( - "SELECT p.path_with_namespace, - MAX(sr.finished_at) as last_sync_ms - FROM projects p - LEFT JOIN sync_runs sr ON sr.status = 'succeeded' - AND sr.finished_at IS NOT NULL - GROUP BY p.id - ORDER BY p.path_with_namespace", - ) - .context("preparing project sync query")?; - - let rows = stmt - .query_map([], |row| { - let path: String = row.get(0)?; - let last_sync_ms: Option = row.get(1)?; - Ok((path, last_sync_ms)) - }) - .context("querying project sync info")?; - - let mut result = Vec::new(); - for row in rows { - let (path, last_sync_ms) = row.context("reading project sync row")?; - let minutes_since_sync = match last_sync_ms { - Some(ms) => { - let elapsed_ms = now_ms.saturating_sub(ms); - u64::try_from(elapsed_ms / 60_000).unwrap_or(u64::MAX) - } - None => u64::MAX, // Never synced. - }; - result.push(ProjectSyncInfo { - path, - minutes_since_sync, - }); - } - - Ok(result) -} - -/// Recent activity: the 20 most recently updated issues and MRs. -fn fetch_recent_activity(conn: &Connection, clock: &dyn Clock) -> Result> { - let now_ms = clock.now_ms(); - - let mut stmt = conn - .prepare( - "SELECT entity_type, iid, title, state, updated_at FROM ( - SELECT 'issue' AS entity_type, iid, title, state, updated_at - FROM issues - UNION ALL - SELECT 'mr' AS entity_type, iid, title, state, updated_at - FROM merge_requests - ) - ORDER BY updated_at DESC - LIMIT 20", - ) - .context("preparing recent activity query")?; - - let rows = stmt - .query_map([], |row| { - let entity_type: String = row.get(0)?; - let iid: i64 = row.get(1)?; - let title: String = row.get::<_, Option>(2)?.unwrap_or_default(); - let state: String = row.get::<_, Option>(3)?.unwrap_or_default(); - let updated_at: i64 = row.get(4)?; - Ok((entity_type, iid, title, state, updated_at)) - }) - .context("querying recent activity")?; - - let mut result = Vec::new(); - for row in rows { - let (entity_type, iid, title, state, updated_at) = - row.context("reading recent activity row")?; - let elapsed_ms = now_ms.saturating_sub(updated_at); - let minutes_ago = u64::try_from(elapsed_ms / 60_000).unwrap_or(u64::MAX); - result.push(RecentActivityItem { - entity_type, - iid: iid as u64, - title, - state, - minutes_ago, - }); - } - - Ok(result) -} - -/// The most recent sync run summary. -fn fetch_last_sync(conn: &Connection) -> Result> { - let result = conn.query_row( - "SELECT status, finished_at, command, error - FROM sync_runs - ORDER BY id DESC - LIMIT 1", - [], - |row| { - Ok(LastSyncInfo { - status: row.get(0)?, - finished_at: row.get(1)?, - command: row.get(2)?, - error: row.get(3)?, - }) - }, - ); - - match result { - Ok(info) => Ok(Some(info)), - Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), - Err(e) => Err(e).context("querying last sync run"), - } -} - -// --------------------------------------------------------------------------- -// Issue List -// --------------------------------------------------------------------------- - -/// Page size for issue list queries. -const ISSUE_PAGE_SIZE: usize = 50; - -/// Fetch a page of issues matching the given filter and sort. -/// -/// Uses keyset pagination: when `cursor` is `Some`, returns rows after -/// (less-than for DESC, greater-than for ASC) the cursor boundary. -/// When `snapshot_fence` is `Some`, limits results to rows updated_at <= fence -/// to prevent newly synced items from shifting the page window. -pub fn fetch_issue_list( - conn: &Connection, - filter: &IssueFilter, - sort_field: SortField, - sort_order: SortOrder, - cursor: Option<&IssueCursor>, - snapshot_fence: Option, -) -> Result { - // -- Build dynamic WHERE conditions and params -------------------------- - let mut conditions: Vec = Vec::new(); - let mut params: Vec> = Vec::new(); - - // Filter: project_id - if let Some(pid) = filter.project_id { - conditions.push("i.project_id = ?".into()); - params.push(Box::new(pid)); - } - - // Filter: state - if let Some(ref state) = filter.state { - conditions.push("i.state = ?".into()); - params.push(Box::new(state.clone())); - } - - // Filter: author - if let Some(ref author) = filter.author { - conditions.push("i.author_username = ?".into()); - params.push(Box::new(author.clone())); - } - - // Filter: label (via join) - let label_join = if let Some(ref label) = filter.label { - conditions.push("fl.name = ?".into()); - params.push(Box::new(label.clone())); - "JOIN issue_labels fil ON fil.issue_id = i.id \ - JOIN labels fl ON fl.id = fil.label_id" - } else { - "" - }; - - // Filter: free_text (LIKE on title) - if let Some(ref text) = filter.free_text { - conditions.push("i.title LIKE ?".into()); - params.push(Box::new(format!("%{text}%"))); - } - - // Snapshot fence - if let Some(fence) = snapshot_fence { - conditions.push("i.updated_at <= ?".into()); - params.push(Box::new(fence)); - } - - // -- Count query (before cursor filter) --------------------------------- - let where_clause = if conditions.is_empty() { - String::new() - } else { - format!("WHERE {}", conditions.join(" AND ")) - }; - - let count_sql = format!( - "SELECT COUNT(DISTINCT i.id) FROM issues i \ - JOIN projects p ON p.id = i.project_id \ - {label_join} {where_clause}" - ); - let count_params: Vec<&dyn rusqlite::types::ToSql> = - params.iter().map(|b| b.as_ref()).collect(); - - let total_count: i64 = conn - .query_row(&count_sql, count_params.as_slice(), |r| r.get(0)) - .context("counting issues for list")?; - - // -- Keyset cursor condition ------------------------------------------- - let (sort_col, sort_dir) = sort_column_and_dir(sort_field, sort_order); - let cursor_op = if sort_dir == "DESC" { "<" } else { ">" }; - - if let Some(c) = cursor { - conditions.push(format!("({sort_col}, i.iid) {cursor_op} (?, ?)")); - params.push(Box::new(c.updated_at)); - params.push(Box::new(c.iid)); - } - - // -- Data query --------------------------------------------------------- - let where_clause_full = if conditions.is_empty() { - String::new() - } else { - format!("WHERE {}", conditions.join(" AND ")) - }; - - let data_sql = format!( - "SELECT p.path_with_namespace, i.iid, i.title, i.state, \ - i.author_username, i.updated_at, \ - GROUP_CONCAT(DISTINCT l.name) AS label_names \ - FROM issues i \ - JOIN projects p ON p.id = i.project_id \ - {label_join} \ - LEFT JOIN issue_labels il ON il.issue_id = i.id \ - LEFT JOIN labels l ON l.id = il.label_id \ - {where_clause_full} \ - GROUP BY i.id \ - ORDER BY {sort_col} {sort_dir}, i.iid {sort_dir} \ - LIMIT ?" - ); - - // +1 to detect if there's a next page - let fetch_limit = (ISSUE_PAGE_SIZE + 1) as i64; - params.push(Box::new(fetch_limit)); - - let all_params: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|b| b.as_ref()).collect(); - - let mut stmt = conn - .prepare(&data_sql) - .context("preparing issue list query")?; - - let rows_result = stmt - .query_map(all_params.as_slice(), |row| { - let project_path: String = row.get(0)?; - let iid: i64 = row.get(1)?; - let title: String = row.get::<_, Option>(2)?.unwrap_or_default(); - let state: String = row.get::<_, Option>(3)?.unwrap_or_default(); - let author: String = row.get::<_, Option>(4)?.unwrap_or_default(); - let updated_at: i64 = row.get(5)?; - let label_names: Option = row.get(6)?; - - let labels = label_names - .map(|s| s.split(',').map(String::from).collect()) - .unwrap_or_default(); - - Ok(IssueListRow { - project_path, - iid, - title, - state, - author, - labels, - updated_at, - }) - }) - .context("querying issue list")?; - - let mut rows: Vec = Vec::new(); - for row in rows_result { - rows.push(row.context("reading issue list row")?); - } - - // Determine next cursor from the last row (if we got more than page size) - let has_next = rows.len() > ISSUE_PAGE_SIZE; - if has_next { - rows.truncate(ISSUE_PAGE_SIZE); - } - - let next_cursor = if has_next { - rows.last().map(|r| IssueCursor { - updated_at: r.updated_at, - iid: r.iid, - }) - } else { - None - }; - - #[allow(clippy::cast_sign_loss)] - Ok(IssueListPage { - rows, - next_cursor, - total_count: total_count as u64, - }) -} - -/// Map sort field + order to SQL column name and direction keyword. -fn sort_column_and_dir(field: SortField, order: SortOrder) -> (&'static str, &'static str) { - let col = match field { - SortField::UpdatedAt => "i.updated_at", - SortField::Iid => "i.iid", - SortField::Title => "i.title", - SortField::State => "i.state", - SortField::Author => "i.author_username", - }; - let dir = match order { - SortOrder::Desc => "DESC", - SortOrder::Asc => "ASC", - }; - (col, dir) -} - -// --------------------------------------------------------------------------- -// MR List -// --------------------------------------------------------------------------- - -/// Page size for MR list queries. -const MR_PAGE_SIZE: usize = 50; - -/// Fetch a page of merge requests matching the given filter and sort. -/// -/// Uses keyset pagination and snapshot fence — same pattern as issues. -pub fn fetch_mr_list( - conn: &Connection, - filter: &MrFilter, - sort_field: MrSortField, - sort_order: MrSortOrder, - cursor: Option<&MrCursor>, - snapshot_fence: Option, -) -> Result { - // -- Build dynamic WHERE conditions and params -------------------------- - let mut conditions: Vec = Vec::new(); - let mut params: Vec> = Vec::new(); - - if let Some(pid) = filter.project_id { - conditions.push("m.project_id = ?".into()); - params.push(Box::new(pid)); - } - - if let Some(ref state) = filter.state { - conditions.push("m.state = ?".into()); - params.push(Box::new(state.clone())); - } - - if let Some(ref author) = filter.author { - conditions.push("m.author_username = ?".into()); - params.push(Box::new(author.clone())); - } - - if let Some(draft) = filter.draft { - conditions.push("m.draft = ?".into()); - params.push(Box::new(i64::from(draft))); - } - - if let Some(ref target) = filter.target_branch { - conditions.push("m.target_branch = ?".into()); - params.push(Box::new(target.clone())); - } - - if let Some(ref source) = filter.source_branch { - conditions.push("m.source_branch = ?".into()); - params.push(Box::new(source.clone())); - } - - // Filter: reviewer (via join on mr_reviewers) - let reviewer_join = if let Some(ref reviewer) = filter.reviewer { - conditions.push("rv.username = ?".into()); - params.push(Box::new(reviewer.clone())); - "JOIN mr_reviewers rv ON rv.merge_request_id = m.id" - } else { - "" - }; - - // Filter: label (via join on mr_labels + labels) - let label_join = if let Some(ref label) = filter.label { - conditions.push("fl.name = ?".into()); - params.push(Box::new(label.clone())); - "JOIN mr_labels fil ON fil.merge_request_id = m.id \ - JOIN labels fl ON fl.id = fil.label_id" - } else { - "" - }; - - // Filter: free_text (LIKE on title) - if let Some(ref text) = filter.free_text { - conditions.push("m.title LIKE ?".into()); - params.push(Box::new(format!("%{text}%"))); - } - - // Snapshot fence - if let Some(fence) = snapshot_fence { - conditions.push("m.updated_at <= ?".into()); - params.push(Box::new(fence)); - } - - // -- Count query (before cursor filter) --------------------------------- - let where_clause = if conditions.is_empty() { - String::new() - } else { - format!("WHERE {}", conditions.join(" AND ")) - }; - - let count_sql = format!( - "SELECT COUNT(DISTINCT m.id) FROM merge_requests m \ - JOIN projects p ON p.id = m.project_id \ - {reviewer_join} {label_join} {where_clause}" - ); - let count_params: Vec<&dyn rusqlite::types::ToSql> = - params.iter().map(|b| b.as_ref()).collect(); - - let total_count: i64 = conn - .query_row(&count_sql, count_params.as_slice(), |r| r.get(0)) - .context("counting MRs for list")?; - - // -- Keyset cursor condition ------------------------------------------- - let (sort_col, sort_dir) = mr_sort_column_and_dir(sort_field, sort_order); - let cursor_op = if sort_dir == "DESC" { "<" } else { ">" }; - - if let Some(c) = cursor { - conditions.push(format!("({sort_col}, m.iid) {cursor_op} (?, ?)")); - params.push(Box::new(c.updated_at)); - params.push(Box::new(c.iid)); - } - - // -- Data query --------------------------------------------------------- - let where_clause_full = if conditions.is_empty() { - String::new() - } else { - format!("WHERE {}", conditions.join(" AND ")) - }; - - let data_sql = format!( - "SELECT p.path_with_namespace, m.iid, m.title, m.state, \ - m.author_username, m.target_branch, m.updated_at, m.draft, \ - GROUP_CONCAT(DISTINCT l.name) AS label_names \ - FROM merge_requests m \ - JOIN projects p ON p.id = m.project_id \ - {reviewer_join} \ - {label_join} \ - LEFT JOIN mr_labels ml ON ml.merge_request_id = m.id \ - LEFT JOIN labels l ON l.id = ml.label_id \ - {where_clause_full} \ - GROUP BY m.id \ - ORDER BY {sort_col} {sort_dir}, m.iid {sort_dir} \ - LIMIT ?" - ); - - let fetch_limit = (MR_PAGE_SIZE + 1) as i64; - params.push(Box::new(fetch_limit)); - - let all_params: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|b| b.as_ref()).collect(); - - let mut stmt = conn.prepare(&data_sql).context("preparing MR list query")?; - - let rows_result = stmt - .query_map(all_params.as_slice(), |row| { - let project_path: String = row.get(0)?; - let iid: i64 = row.get(1)?; - let title: String = row.get::<_, Option>(2)?.unwrap_or_default(); - let state: String = row.get::<_, Option>(3)?.unwrap_or_default(); - let author: String = row.get::<_, Option>(4)?.unwrap_or_default(); - let target_branch: String = row.get::<_, Option>(5)?.unwrap_or_default(); - let updated_at: i64 = row.get(6)?; - let draft_int: i64 = row.get(7)?; - let label_names: Option = row.get(8)?; - - let labels = label_names - .map(|s| s.split(',').map(String::from).collect()) - .unwrap_or_default(); - - Ok(MrListRow { - project_path, - iid, - title, - state, - author, - target_branch, - labels, - updated_at, - draft: draft_int != 0, - }) - }) - .context("querying MR list")?; - - let mut rows: Vec = Vec::new(); - for row in rows_result { - rows.push(row.context("reading MR list row")?); - } - - let has_next = rows.len() > MR_PAGE_SIZE; - if has_next { - rows.truncate(MR_PAGE_SIZE); - } - - let next_cursor = if has_next { - rows.last().map(|r| MrCursor { - updated_at: r.updated_at, - iid: r.iid, - }) - } else { - None - }; - - #[allow(clippy::cast_sign_loss)] - Ok(MrListPage { - rows, - next_cursor, - total_count: total_count as u64, - }) -} - -/// Map MR sort field + order to SQL column name and direction keyword. -fn mr_sort_column_and_dir(field: MrSortField, order: MrSortOrder) -> (&'static str, &'static str) { - let col = match field { - MrSortField::UpdatedAt => "m.updated_at", - MrSortField::Iid => "m.iid", - MrSortField::Title => "m.title", - MrSortField::State => "m.state", - MrSortField::Author => "m.author_username", - MrSortField::TargetBranch => "m.target_branch", - }; - let dir = match order { - MrSortOrder::Desc => "DESC", - MrSortOrder::Asc => "ASC", - }; - (col, dir) -} - -// --------------------------------------------------------------------------- -// Issue Detail -// --------------------------------------------------------------------------- - -use crate::message::EntityKey; -use crate::state::issue_detail::{IssueDetailData, IssueMetadata}; -use crate::view::common::cross_ref::{CrossRef, CrossRefKind}; -use crate::view::common::discussion_tree::{DiscussionNode, NoteNode}; - -/// Fetch issue metadata and cross-references (Phase 1 load). -/// -/// Runs inside a single read transaction for snapshot consistency. -/// Returns metadata + cross-refs; discussions are loaded separately. -pub fn fetch_issue_detail(conn: &Connection, key: &EntityKey) -> Result { - let metadata = fetch_issue_metadata(conn, key)?; - let cross_refs = fetch_issue_cross_refs(conn, key)?; - Ok(IssueDetailData { - metadata, - cross_refs, - }) -} - -/// Fetch issue metadata from the local DB. -fn fetch_issue_metadata(conn: &Connection, key: &EntityKey) -> Result { - let row = conn - .query_row( - "SELECT i.iid, p.path_with_namespace, i.title, - COALESCE(i.description, ''), i.state, i.author_username, - COALESCE(i.milestone_title, ''), - i.due_date, i.created_at, i.updated_at, - COALESCE(i.web_url, ''), - (SELECT COUNT(*) FROM discussions d - WHERE d.issue_id = i.id AND d.noteable_type = 'Issue') - FROM issues i - JOIN projects p ON p.id = i.project_id - WHERE i.project_id = ?1 AND i.iid = ?2", - rusqlite::params![key.project_id, key.iid], - |row| { - Ok(IssueMetadata { - iid: row.get(0)?, - project_path: row.get(1)?, - title: row.get(2)?, - description: row.get(3)?, - state: row.get(4)?, - author: row.get::<_, Option>(5)?.unwrap_or_default(), - assignees: Vec::new(), // Fetched separately below. - labels: Vec::new(), // Fetched separately below. - milestone: { - let m: String = row.get(6)?; - if m.is_empty() { None } else { Some(m) } - }, - due_date: row.get(7)?, - created_at: row.get(8)?, - updated_at: row.get(9)?, - web_url: row.get(10)?, - discussion_count: row.get::<_, i64>(11)? as usize, - }) - }, - ) - .context("fetching issue metadata")?; - - // Fetch assignees. - let mut assignees_stmt = conn - .prepare("SELECT username FROM issue_assignees WHERE issue_id = (SELECT id FROM issues WHERE project_id = ?1 AND iid = ?2)") - .context("preparing assignees query")?; - let assignees: Vec = assignees_stmt - .query_map(rusqlite::params![key.project_id, key.iid], |r| r.get(0)) - .context("fetching assignees")? - .filter_map(Result::ok) - .collect(); - - // Fetch labels. - let mut labels_stmt = conn - .prepare( - "SELECT l.name FROM issue_labels il - JOIN labels l ON l.id = il.label_id - WHERE il.issue_id = (SELECT id FROM issues WHERE project_id = ?1 AND iid = ?2) - ORDER BY l.name", - ) - .context("preparing labels query")?; - let labels: Vec = labels_stmt - .query_map(rusqlite::params![key.project_id, key.iid], |r| r.get(0)) - .context("fetching labels")? - .filter_map(Result::ok) - .collect(); - - Ok(IssueMetadata { - assignees, - labels, - ..row - }) -} - -/// Fetch cross-references for an issue from the entity_references table. -fn fetch_issue_cross_refs(conn: &Connection, key: &EntityKey) -> Result> { - let mut stmt = conn - .prepare( - "SELECT er.reference_type, er.target_entity_type, er.target_entity_id, - er.target_entity_iid, er.target_project_path, - CASE - WHEN er.target_entity_type = 'issue' - THEN (SELECT title FROM issues WHERE id = er.target_entity_id) - WHEN er.target_entity_type = 'merge_request' - THEN (SELECT title FROM merge_requests WHERE id = er.target_entity_id) - ELSE NULL - END as entity_title, - CASE - WHEN er.target_entity_id IS NOT NULL - THEN (SELECT project_id FROM issues WHERE id = er.target_entity_id - UNION ALL - SELECT project_id FROM merge_requests WHERE id = er.target_entity_id - LIMIT 1) - ELSE NULL - END as target_project_id - FROM entity_references er - WHERE er.source_entity_type = 'issue' - AND er.source_entity_id = (SELECT id FROM issues WHERE project_id = ?1 AND iid = ?2) - ORDER BY er.reference_type, er.target_entity_iid", - ) - .context("preparing cross-ref query")?; - - let refs = stmt - .query_map(rusqlite::params![key.project_id, key.iid], |row| { - let ref_type: String = row.get(0)?; - let target_type: String = row.get(1)?; - let target_id: Option = row.get(2)?; - let target_iid: Option = row.get(3)?; - let target_path: Option = row.get(4)?; - let title: Option = row.get(5)?; - let target_project_id: Option = row.get(6)?; - - let kind = match (ref_type.as_str(), target_type.as_str()) { - ("closes", "merge_request") => CrossRefKind::ClosingMr, - ("related", "issue") => CrossRefKind::RelatedIssue, - _ => CrossRefKind::MentionedIn, - }; - - let iid = target_iid.unwrap_or(0); - let project_id = target_project_id.unwrap_or(key.project_id); - - let entity_key = match target_type.as_str() { - "merge_request" => EntityKey::mr(project_id, iid), - _ => EntityKey::issue(project_id, iid), - }; - - let label = title.unwrap_or_else(|| { - let prefix = if target_type == "merge_request" { - "!" - } else { - "#" - }; - let path = target_path.unwrap_or_default(); - if path.is_empty() { - format!("{prefix}{iid}") - } else { - format!("{path}{prefix}{iid}") - } - }); - - let navigable = target_id.is_some(); - - Ok(CrossRef { - kind, - entity_key, - label, - navigable, - }) - }) - .context("fetching cross-refs")? - .filter_map(Result::ok) - .collect(); - - Ok(refs) -} - -/// Fetch discussions for an issue (Phase 2 async load). -/// -/// Returns `DiscussionNode` tree suitable for the discussion tree widget. -pub fn fetch_issue_discussions(conn: &Connection, key: &EntityKey) -> Result> { - let issue_id: i64 = conn - .query_row( - "SELECT id FROM issues WHERE project_id = ?1 AND iid = ?2", - rusqlite::params![key.project_id, key.iid], - |r| r.get(0), - ) - .context("looking up issue id")?; - - let mut disc_stmt = conn - .prepare( - "SELECT d.id, d.gitlab_discussion_id, d.resolvable, d.resolved - FROM discussions d - WHERE d.issue_id = ?1 AND d.noteable_type = 'Issue' - ORDER BY d.first_note_at ASC, d.id ASC", - ) - .context("preparing discussions query")?; - - let mut note_stmt = conn - .prepare( - "SELECT n.author_username, n.body, n.created_at, n.is_system, - n.note_type, n.position_new_path, n.position_new_line - FROM notes n - WHERE n.discussion_id = ?1 - ORDER BY n.position ASC, n.created_at ASC", - ) - .context("preparing notes query")?; - - let discussions: Vec = disc_stmt - .query_map(rusqlite::params![issue_id], |row| { - Ok(( - row.get::<_, i64>(0)?, // id - row.get::<_, String>(1)?, // gitlab_discussion_id - row.get::<_, bool>(2)?, // resolvable - row.get::<_, bool>(3)?, // resolved - )) - }) - .context("fetching discussions")? - .filter_map(Result::ok) - .map(|(disc_db_id, discussion_id, resolvable, resolved)| { - let notes: Vec = note_stmt - .query_map(rusqlite::params![disc_db_id], |row| { - Ok(NoteNode { - author: row.get::<_, Option>(0)?.unwrap_or_default(), - body: row.get::<_, Option>(1)?.unwrap_or_default(), - created_at: row.get(2)?, - is_system: row.get(3)?, - is_diff_note: row.get::<_, Option>(4)?.as_deref() - == Some("DiffNote"), - diff_file_path: row.get(5)?, - diff_new_line: row.get(6)?, - }) - }) - .map(|rows| rows.filter_map(Result::ok).collect()) - .unwrap_or_default(); - - DiscussionNode { - discussion_id, - notes, - resolvable, - resolved, - } - }) - .collect(); - - Ok(discussions) -} - -// --------------------------------------------------------------------------- -// MR Detail -// --------------------------------------------------------------------------- - -/// Fetch MR metadata + cross-refs + file changes (Phase 1 composite). -pub fn fetch_mr_detail(conn: &Connection, key: &EntityKey) -> Result { - let metadata = fetch_mr_metadata(conn, key)?; - let cross_refs = fetch_mr_cross_refs(conn, key)?; - let file_changes = fetch_mr_file_changes(conn, key)?; - Ok(MrDetailData { - metadata, - cross_refs, - file_changes, - }) -} - -/// Fetch MR metadata from the local DB. -fn fetch_mr_metadata(conn: &Connection, key: &EntityKey) -> Result { - let row = conn - .query_row( - "SELECT m.iid, p.path_with_namespace, m.title, - COALESCE(m.description, ''), m.state, m.draft, - m.author_username, m.source_branch, m.target_branch, - COALESCE(m.detailed_merge_status, ''), - m.created_at, m.updated_at, m.merged_at, - COALESCE(m.web_url, ''), - (SELECT COUNT(*) FROM discussions d WHERE d.merge_request_id = m.id) AS disc_count, - (SELECT COUNT(*) FROM mr_file_changes fc WHERE fc.merge_request_id = m.id) AS fc_count - FROM merge_requests m - JOIN projects p ON p.id = m.project_id - WHERE m.project_id = ?1 AND m.iid = ?2", - rusqlite::params![key.project_id, key.iid], - |row| { - Ok(MrMetadata { - iid: row.get(0)?, - project_path: row.get(1)?, - title: row.get::<_, Option>(2)?.unwrap_or_default(), - description: row.get(3)?, - state: row.get::<_, Option>(4)?.unwrap_or_default(), - draft: row.get(5)?, - author: row.get::<_, Option>(6)?.unwrap_or_default(), - assignees: Vec::new(), - reviewers: Vec::new(), - labels: Vec::new(), - source_branch: row.get::<_, Option>(7)?.unwrap_or_default(), - target_branch: row.get::<_, Option>(8)?.unwrap_or_default(), - merge_status: row.get(9)?, - created_at: row.get(10)?, - updated_at: row.get(11)?, - merged_at: row.get(12)?, - web_url: row.get(13)?, - discussion_count: row.get::<_, i64>(14)? as usize, - file_change_count: row.get::<_, i64>(15)? as usize, - }) - }, - ) - .context("fetching MR metadata")?; - - // Fetch assignees. - let mut assignees_stmt = conn - .prepare( - "SELECT username FROM mr_assignees - WHERE merge_request_id = ( - SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2 - ) - ORDER BY username", - ) - .context("preparing assignees query")?; - let assignees: Vec = assignees_stmt - .query_map(rusqlite::params![key.project_id, key.iid], |row| row.get(0)) - .context("fetching assignees")? - .filter_map(Result::ok) - .collect(); - - // Fetch reviewers. - let mut reviewers_stmt = conn - .prepare( - "SELECT username FROM mr_reviewers - WHERE merge_request_id = ( - SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2 - ) - ORDER BY username", - ) - .context("preparing reviewers query")?; - let reviewers: Vec = reviewers_stmt - .query_map(rusqlite::params![key.project_id, key.iid], |row| row.get(0)) - .context("fetching reviewers")? - .filter_map(Result::ok) - .collect(); - - // Fetch labels. - let mut labels_stmt = conn - .prepare( - "SELECT l.name FROM mr_labels ml - JOIN labels l ON ml.label_id = l.id - WHERE ml.merge_request_id = ( - SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2 - ) - ORDER BY l.name", - ) - .context("preparing labels query")?; - let labels: Vec = labels_stmt - .query_map(rusqlite::params![key.project_id, key.iid], |row| row.get(0)) - .context("fetching labels")? - .filter_map(Result::ok) - .collect(); - - let mut result = row; - result.assignees = assignees; - result.reviewers = reviewers; - result.labels = labels; - Ok(result) -} - -/// Fetch cross-references for an MR. -fn fetch_mr_cross_refs(conn: &Connection, key: &EntityKey) -> Result> { - let mut stmt = conn - .prepare( - "SELECT er.reference_type, er.target_entity_type, - er.target_entity_id, er.target_entity_iid, - er.target_project_path, - CASE - WHEN er.target_entity_type = 'issue' - THEN (SELECT title FROM issues WHERE id = er.target_entity_id) - WHEN er.target_entity_type = 'merge_request' - THEN (SELECT title FROM merge_requests WHERE id = er.target_entity_id) - ELSE NULL - END as entity_title, - CASE - WHEN er.target_entity_id IS NOT NULL - THEN (SELECT project_id FROM issues WHERE id = er.target_entity_id - UNION ALL - SELECT project_id FROM merge_requests WHERE id = er.target_entity_id - LIMIT 1) - ELSE NULL - END as target_project_id - FROM entity_references er - WHERE er.source_entity_type = 'merge_request' - AND er.source_entity_id = (SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2) - ORDER BY er.reference_type, er.target_entity_iid", - ) - .context("preparing MR cross-refs query")?; - - let refs: Vec = stmt - .query_map(rusqlite::params![key.project_id, key.iid], |row| { - let ref_type: String = row.get(0)?; - let target_type: String = row.get(1)?; - let _target_id: Option = row.get(2)?; - let target_iid: Option = row.get(3)?; - let target_path: Option = row.get(4)?; - let title: Option = row.get(5)?; - let target_project_id: Option = row.get(6)?; - - let kind = match (ref_type.as_str(), target_type.as_str()) { - ("closes", "issue") => CrossRefKind::ClosingMr, - ("related", "issue") => CrossRefKind::RelatedIssue, - _ => CrossRefKind::MentionedIn, - }; - - let iid = target_iid.unwrap_or(0); - let project_id = target_project_id.unwrap_or(key.project_id); - - let entity_key = match target_type.as_str() { - "merge_request" => EntityKey::mr(project_id, iid), - _ => EntityKey::issue(project_id, iid), - }; - - let label = title.unwrap_or_else(|| { - let prefix = if target_type == "merge_request" { - "!" - } else { - "#" - }; - let path = target_path.clone().unwrap_or_default(); - if path.is_empty() { - format!("{prefix}{iid}") - } else { - format!("{path}{prefix}{iid}") - } - }); - - Ok(CrossRef { - kind, - entity_key, - label, - navigable: target_project_id.is_some(), - }) - }) - .context("fetching MR cross-refs")? - .filter_map(Result::ok) - .collect(); - - Ok(refs) -} - -/// Fetch file changes for an MR. -fn fetch_mr_file_changes(conn: &Connection, key: &EntityKey) -> Result> { - let mut stmt = conn - .prepare( - "SELECT fc.old_path, fc.new_path, fc.change_type - FROM mr_file_changes fc - WHERE fc.merge_request_id = ( - SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2 - ) - ORDER BY fc.new_path", - ) - .context("preparing file changes query")?; - - let changes: Vec = stmt - .query_map(rusqlite::params![key.project_id, key.iid], |row| { - Ok(FileChange { - old_path: row.get(0)?, - new_path: row.get(1)?, - change_type: FileChangeType::parse_db(&row.get::<_, String>(2).unwrap_or_default()), - }) - }) - .context("fetching file changes")? - .filter_map(Result::ok) - .collect(); - - Ok(changes) -} - -/// Fetch discussions for an MR (Phase 2 async load). -pub fn fetch_mr_discussions(conn: &Connection, key: &EntityKey) -> Result> { - let mr_id: i64 = conn - .query_row( - "SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2", - rusqlite::params![key.project_id, key.iid], - |row| row.get(0), - ) - .context("looking up MR id for discussions")?; - - let mut disc_stmt = conn - .prepare( - "SELECT d.id, d.gitlab_discussion_id, d.resolvable, d.resolved - FROM discussions d - WHERE d.merge_request_id = ?1 - ORDER BY d.first_note_at ASC", - ) - .context("preparing MR discussions query")?; - - let mut note_stmt = conn - .prepare( - "SELECT n.author_username, n.body, n.created_at, n.is_system, - n.note_type, n.position_new_path, n.position_new_line - FROM notes n - WHERE n.discussion_id = ?1 - ORDER BY n.position ASC, n.created_at ASC", - ) - .context("preparing MR notes query")?; - - let discussions: Vec = disc_stmt - .query_map(rusqlite::params![mr_id], |row| { - Ok(( - row.get::<_, i64>(0)?, // id - row.get::<_, String>(1)?, // gitlab_discussion_id - row.get::<_, bool>(2)?, // resolvable - row.get::<_, bool>(3)?, // resolved - )) - }) - .context("fetching MR discussions")? - .filter_map(Result::ok) - .map(|(disc_db_id, discussion_id, resolvable, resolved)| { - let notes: Vec = note_stmt - .query_map(rusqlite::params![disc_db_id], |row| { - Ok(NoteNode { - author: row.get::<_, Option>(0)?.unwrap_or_default(), - body: row.get::<_, Option>(1)?.unwrap_or_default(), - created_at: row.get(2)?, - is_system: row.get(3)?, - is_diff_note: row.get::<_, Option>(4)?.as_deref() - == Some("DiffNote"), - diff_file_path: row.get(5)?, - diff_new_line: row.get(6)?, - }) - }) - .map(|rows| rows.filter_map(Result::ok).collect()) - .unwrap_or_default(); - - DiscussionNode { - discussion_id, - notes, - resolvable, - resolved, - } - }) - .collect(); - - Ok(discussions) -} - -// --------------------------------------------------------------------------- -// Tests -// --------------------------------------------------------------------------- - -#[cfg(test)] -mod tests { - use super::*; - use crate::clock::FakeClock; - - /// Create the minimal schema needed for dashboard queries. - fn create_dashboard_schema(conn: &Connection) { - conn.execute_batch( - " - CREATE TABLE projects ( - id INTEGER PRIMARY KEY, - gitlab_project_id INTEGER UNIQUE NOT NULL, - path_with_namespace TEXT NOT NULL - ); - CREATE TABLE issues ( - id INTEGER PRIMARY KEY, - gitlab_id INTEGER UNIQUE NOT NULL, - project_id INTEGER NOT NULL, - iid INTEGER NOT NULL, - title TEXT, - state TEXT NOT NULL, - author_username TEXT, - created_at INTEGER NOT NULL, - updated_at INTEGER NOT NULL, - last_seen_at INTEGER NOT NULL - ); - CREATE TABLE merge_requests ( - id INTEGER PRIMARY KEY, - gitlab_id INTEGER UNIQUE NOT NULL, - project_id INTEGER NOT NULL, - iid INTEGER NOT NULL, - title TEXT, - state TEXT, - author_username TEXT, - created_at INTEGER, - updated_at INTEGER, - last_seen_at INTEGER NOT NULL - ); - CREATE TABLE discussions ( - id INTEGER PRIMARY KEY, - gitlab_discussion_id TEXT NOT NULL, - project_id INTEGER NOT NULL, - noteable_type TEXT NOT NULL, - last_seen_at INTEGER NOT NULL - ); - CREATE TABLE notes ( - id INTEGER PRIMARY KEY, - gitlab_id INTEGER UNIQUE NOT NULL, - discussion_id INTEGER NOT NULL, - project_id INTEGER NOT NULL, - is_system INTEGER NOT NULL DEFAULT 0, - author_username TEXT, - body TEXT, - created_at INTEGER NOT NULL, - updated_at INTEGER NOT NULL, - last_seen_at INTEGER NOT NULL - ); - CREATE TABLE documents ( - id INTEGER PRIMARY KEY, - source_type TEXT NOT NULL, - source_id INTEGER NOT NULL, - project_id INTEGER NOT NULL, - content_text TEXT NOT NULL, - content_hash TEXT NOT NULL - ); - CREATE TABLE embedding_metadata ( - document_id INTEGER NOT NULL, - chunk_index INTEGER NOT NULL DEFAULT 0, - model TEXT NOT NULL, - dims INTEGER NOT NULL, - document_hash TEXT NOT NULL, - chunk_hash TEXT NOT NULL, - created_at INTEGER NOT NULL, - PRIMARY KEY(document_id, chunk_index) - ); - CREATE TABLE sync_runs ( - id INTEGER PRIMARY KEY, - started_at INTEGER NOT NULL, - heartbeat_at INTEGER NOT NULL, - finished_at INTEGER, - status TEXT NOT NULL, - command TEXT NOT NULL, - error TEXT - ); - ", - ) - .expect("create dashboard schema"); - } - - /// Insert a test issue. - fn insert_issue(conn: &Connection, iid: i64, state: &str, updated_at: i64) { - conn.execute( - "INSERT INTO issues (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at) - VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)", - rusqlite::params![iid * 100, iid, format!("Issue {iid}"), state, updated_at], - ) - .expect("insert issue"); - } - - /// Insert a test MR. - fn insert_mr(conn: &Connection, iid: i64, state: &str, updated_at: i64) { - conn.execute( - "INSERT INTO merge_requests (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at) - VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)", - rusqlite::params![iid * 100 + 50, iid, format!("MR {iid}"), state, updated_at], - ) - .expect("insert mr"); - } - - // ----------------------------------------------------------------------- - // TDD Anchor: entity counts - // ----------------------------------------------------------------------- - - #[test] - fn test_fetch_dashboard_counts() { - let conn = Connection::open_in_memory().unwrap(); - create_dashboard_schema(&conn); - - // 5 issues: 3 open, 2 closed. - let now_ms = 1_700_000_000_000_i64; - insert_issue(&conn, 1, "opened", now_ms - 10_000); - insert_issue(&conn, 2, "opened", now_ms - 20_000); - insert_issue(&conn, 3, "opened", now_ms - 30_000); - insert_issue(&conn, 4, "closed", now_ms - 40_000); - insert_issue(&conn, 5, "closed", now_ms - 50_000); - - let clock = FakeClock::from_ms(now_ms); - let data = fetch_dashboard(&conn, &clock).unwrap(); - - assert_eq!(data.counts.issues_open, 3); - assert_eq!(data.counts.issues_total, 5); - } - - #[test] - fn test_fetch_dashboard_mr_counts() { - let conn = Connection::open_in_memory().unwrap(); - create_dashboard_schema(&conn); - - let now_ms = 1_700_000_000_000_i64; - insert_mr(&conn, 1, "opened", now_ms); - insert_mr(&conn, 2, "merged", now_ms); - insert_mr(&conn, 3, "opened", now_ms); - insert_mr(&conn, 4, "closed", now_ms); - - let clock = FakeClock::from_ms(now_ms); - let data = fetch_dashboard(&conn, &clock).unwrap(); - - assert_eq!(data.counts.mrs_open, 2); - assert_eq!(data.counts.mrs_total, 4); - } - - #[test] - fn test_fetch_dashboard_empty_database() { - let conn = Connection::open_in_memory().unwrap(); - create_dashboard_schema(&conn); - - let clock = FakeClock::from_ms(1_700_000_000_000); - let data = fetch_dashboard(&conn, &clock).unwrap(); - - assert_eq!(data.counts.issues_open, 0); - assert_eq!(data.counts.issues_total, 0); - assert_eq!(data.counts.mrs_open, 0); - assert_eq!(data.counts.mrs_total, 0); - assert_eq!(data.counts.notes_system_pct, 0); - assert!(data.projects.is_empty()); - assert!(data.recent.is_empty()); - assert!(data.last_sync.is_none()); - } - - #[test] - fn test_fetch_dashboard_notes_system_pct() { - let conn = Connection::open_in_memory().unwrap(); - create_dashboard_schema(&conn); - - // 4 notes: 1 system, 3 user -> 25% system. - for i in 0..4 { - conn.execute( - "INSERT INTO notes (gitlab_id, discussion_id, project_id, is_system, created_at, updated_at, last_seen_at) - VALUES (?1, 1, 1, ?2, 1000, 1000, 1000)", - rusqlite::params![i, if i == 0 { 1 } else { 0 }], - ) - .unwrap(); - } - - let clock = FakeClock::from_ms(1_700_000_000_000); - let data = fetch_dashboard(&conn, &clock).unwrap(); - - assert_eq!(data.counts.notes_total, 4); - assert_eq!(data.counts.notes_system_pct, 25); - } - - #[test] - fn test_fetch_dashboard_project_sync_info() { - let conn = Connection::open_in_memory().unwrap(); - create_dashboard_schema(&conn); - - conn.execute( - "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'group/alpha')", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (2, 'group/beta')", - [], - ) - .unwrap(); - - // Sync ran 30 minutes ago. sync_runs is global (no project_id), - // so all projects see the same last-sync time. - let now_ms = 1_700_000_000_000_i64; - conn.execute( - "INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command) - VALUES (?1, ?1, ?2, 'succeeded', 'sync')", - [now_ms - 30 * 60_000, now_ms - 30 * 60_000], - ) - .unwrap(); - - let clock = FakeClock::from_ms(now_ms); - let data = fetch_dashboard(&conn, &clock).unwrap(); - - assert_eq!(data.projects.len(), 2); - assert_eq!(data.projects[0].path, "group/alpha"); - assert_eq!(data.projects[0].minutes_since_sync, 30); - assert_eq!(data.projects[1].path, "group/beta"); - assert_eq!(data.projects[1].minutes_since_sync, 30); // Same: sync_runs is global. - } - - #[test] - fn test_fetch_dashboard_recent_activity_ordered() { - let conn = Connection::open_in_memory().unwrap(); - create_dashboard_schema(&conn); - - let now_ms = 1_700_000_000_000_i64; - insert_issue(&conn, 1, "opened", now_ms - 60_000); // 1 min ago - insert_mr(&conn, 1, "merged", now_ms - 120_000); // 2 min ago - insert_issue(&conn, 2, "closed", now_ms - 180_000); // 3 min ago - - let clock = FakeClock::from_ms(now_ms); - let data = fetch_dashboard(&conn, &clock).unwrap(); - - assert_eq!(data.recent.len(), 3); - assert_eq!(data.recent[0].entity_type, "issue"); - assert_eq!(data.recent[0].iid, 1); - assert_eq!(data.recent[0].minutes_ago, 1); - assert_eq!(data.recent[1].entity_type, "mr"); - assert_eq!(data.recent[1].minutes_ago, 2); - assert_eq!(data.recent[2].entity_type, "issue"); - assert_eq!(data.recent[2].minutes_ago, 3); - } - - #[test] - fn test_fetch_dashboard_last_sync() { - let conn = Connection::open_in_memory().unwrap(); - create_dashboard_schema(&conn); - - let now_ms = 1_700_000_000_000_i64; - conn.execute( - "INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command, error) - VALUES (?1, ?1, ?2, 'failed', 'sync', 'network timeout')", - [now_ms - 60_000, now_ms - 50_000], - ) - .unwrap(); - conn.execute( - "INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command) - VALUES (?1, ?1, ?2, 'succeeded', 'sync')", - [now_ms - 30_000, now_ms - 20_000], - ) - .unwrap(); - - let clock = FakeClock::from_ms(now_ms); - let data = fetch_dashboard(&conn, &clock).unwrap(); - - let sync = data.last_sync.unwrap(); - assert_eq!(sync.status, "succeeded"); - assert_eq!(sync.command, "sync"); - assert!(sync.error.is_none()); - } - - // ----------------------------------------------------------------------- - // Issue list - // ----------------------------------------------------------------------- - - /// Extended schema that adds labels + issue_labels for issue list tests. - fn create_issue_list_schema(conn: &Connection) { - create_dashboard_schema(conn); - conn.execute_batch( - " - CREATE TABLE labels ( - id INTEGER PRIMARY KEY, - gitlab_id INTEGER, - project_id INTEGER NOT NULL, - name TEXT NOT NULL, - color TEXT, - description TEXT - ); - CREATE TABLE issue_labels ( - issue_id INTEGER NOT NULL, - label_id INTEGER NOT NULL, - PRIMARY KEY(issue_id, label_id) - ); - ", - ) - .expect("create issue list schema"); - } - - /// Insert a test issue with an author. - fn insert_issue_full(conn: &Connection, iid: i64, state: &str, author: &str, updated_at: i64) { - conn.execute( - "INSERT INTO issues (gitlab_id, project_id, iid, title, state, author_username, created_at, updated_at, last_seen_at) - VALUES (?1, 1, ?2, ?3, ?4, ?5, ?6, ?6, ?6)", - rusqlite::params![ - iid * 100, - iid, - format!("Issue {iid}"), - state, - author, - updated_at - ], - ) - .expect("insert issue full"); - } - - /// Attach a label to an issue. - fn attach_label(conn: &Connection, issue_iid: i64, label_name: &str) { - // Find issue id. - let issue_id: i64 = conn - .query_row("SELECT id FROM issues WHERE iid = ?", [issue_iid], |r| { - r.get(0) - }) - .expect("find issue"); - - // Ensure label exists. - conn.execute( - "INSERT OR IGNORE INTO labels (project_id, name) VALUES (1, ?)", - [label_name], - ) - .expect("insert label"); - let label_id: i64 = conn - .query_row("SELECT id FROM labels WHERE name = ?", [label_name], |r| { - r.get(0) - }) - .expect("find label"); - - conn.execute( - "INSERT INTO issue_labels (issue_id, label_id) VALUES (?, ?)", - [issue_id, label_id], - ) - .expect("attach label"); - } - - fn setup_issue_list_data(conn: &Connection) { - let base = 1_700_000_000_000_i64; - conn.execute( - "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'group/project')", - [], - ) - .unwrap(); - - insert_issue_full(conn, 1, "opened", "alice", base - 10_000); - insert_issue_full(conn, 2, "opened", "bob", base - 20_000); - insert_issue_full(conn, 3, "closed", "alice", base - 30_000); - insert_issue_full(conn, 4, "opened", "charlie", base - 40_000); - insert_issue_full(conn, 5, "closed", "bob", base - 50_000); - - attach_label(conn, 1, "bug"); - attach_label(conn, 1, "critical"); - attach_label(conn, 2, "feature"); - attach_label(conn, 4, "bug"); - } - - #[test] - fn test_fetch_issue_list_basic() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_list_schema(&conn); - setup_issue_list_data(&conn); - - let filter = IssueFilter::default(); - let page = fetch_issue_list( - &conn, - &filter, - SortField::UpdatedAt, - SortOrder::Desc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.total_count, 5); - assert_eq!(page.rows.len(), 5); - // Newest first. - assert_eq!(page.rows[0].iid, 1); - assert_eq!(page.rows[4].iid, 5); - assert!(page.next_cursor.is_none()); - } - - #[test] - fn test_fetch_issue_list_filter_state() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_list_schema(&conn); - setup_issue_list_data(&conn); - - let filter = IssueFilter { - state: Some("opened".into()), - ..Default::default() - }; - let page = fetch_issue_list( - &conn, - &filter, - SortField::UpdatedAt, - SortOrder::Desc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.total_count, 3); - assert_eq!(page.rows.len(), 3); - assert!(page.rows.iter().all(|r| r.state == "opened")); - } - - #[test] - fn test_fetch_issue_list_filter_author() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_list_schema(&conn); - setup_issue_list_data(&conn); - - let filter = IssueFilter { - author: Some("alice".into()), - ..Default::default() - }; - let page = fetch_issue_list( - &conn, - &filter, - SortField::UpdatedAt, - SortOrder::Desc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.total_count, 2); - assert_eq!(page.rows.len(), 2); - assert!(page.rows.iter().all(|r| r.author == "alice")); - } - - #[test] - fn test_fetch_issue_list_filter_label() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_list_schema(&conn); - setup_issue_list_data(&conn); - - let filter = IssueFilter { - label: Some("bug".into()), - ..Default::default() - }; - let page = fetch_issue_list( - &conn, - &filter, - SortField::UpdatedAt, - SortOrder::Desc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.total_count, 2); // issues 1 and 4 - assert_eq!(page.rows.len(), 2); - } - - #[test] - fn test_fetch_issue_list_labels_aggregated() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_list_schema(&conn); - setup_issue_list_data(&conn); - - let filter = IssueFilter::default(); - let page = fetch_issue_list( - &conn, - &filter, - SortField::UpdatedAt, - SortOrder::Desc, - None, - None, - ) - .unwrap(); - - // Issue 1 has labels "bug" and "critical". - let issue1 = page.rows.iter().find(|r| r.iid == 1).unwrap(); - assert_eq!(issue1.labels.len(), 2); - assert!(issue1.labels.contains(&"bug".to_string())); - assert!(issue1.labels.contains(&"critical".to_string())); - - // Issue 5 has no labels. - let issue5 = page.rows.iter().find(|r| r.iid == 5).unwrap(); - assert!(issue5.labels.is_empty()); - } - - #[test] - fn test_fetch_issue_list_sort_ascending() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_list_schema(&conn); - setup_issue_list_data(&conn); - - let filter = IssueFilter::default(); - let page = fetch_issue_list( - &conn, - &filter, - SortField::UpdatedAt, - SortOrder::Asc, - None, - None, - ) - .unwrap(); - - // Oldest first. - assert_eq!(page.rows[0].iid, 5); - assert_eq!(page.rows[4].iid, 1); - } - - #[test] - fn test_fetch_issue_list_snapshot_fence() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_list_schema(&conn); - setup_issue_list_data(&conn); - - let base = 1_700_000_000_000_i64; - // Fence at base-25000: should exclude issues 1 (at base-10000) and 2 (at base-20000). - let fence = base - 25_000; - let filter = IssueFilter::default(); - let page = fetch_issue_list( - &conn, - &filter, - SortField::UpdatedAt, - SortOrder::Desc, - None, - Some(fence), - ) - .unwrap(); - - assert_eq!(page.total_count, 3); - assert_eq!(page.rows.len(), 3); - assert!(page.rows.iter().all(|r| r.updated_at <= fence)); - } - - #[test] - fn test_fetch_issue_list_empty() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_list_schema(&conn); - conn.execute( - "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'g/p')", - [], - ) - .unwrap(); - - let page = fetch_issue_list( - &conn, - &IssueFilter::default(), - SortField::UpdatedAt, - SortOrder::Desc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.total_count, 0); - assert!(page.rows.is_empty()); - assert!(page.next_cursor.is_none()); - } - - #[test] - fn test_fetch_issue_list_free_text() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_list_schema(&conn); - setup_issue_list_data(&conn); - - let filter = IssueFilter { - free_text: Some("Issue 3".into()), - ..Default::default() - }; - let page = fetch_issue_list( - &conn, - &filter, - SortField::UpdatedAt, - SortOrder::Desc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.total_count, 1); - assert_eq!(page.rows[0].iid, 3); - } - - // ----------------------------------------------------------------------- - // MR list - // ----------------------------------------------------------------------- - - /// Extended schema adding mr_labels, mr_reviewers for MR list tests. - fn create_mr_list_schema(conn: &Connection) { - create_dashboard_schema(conn); - conn.execute_batch( - " - CREATE TABLE labels ( - id INTEGER PRIMARY KEY, - gitlab_id INTEGER, - project_id INTEGER NOT NULL, - name TEXT NOT NULL, - color TEXT, - description TEXT - ); - CREATE TABLE mr_labels ( - merge_request_id INTEGER NOT NULL, - label_id INTEGER NOT NULL, - PRIMARY KEY(merge_request_id, label_id) - ); - CREATE TABLE mr_reviewers ( - merge_request_id INTEGER NOT NULL, - username TEXT NOT NULL, - PRIMARY KEY(merge_request_id, username) - ); - ALTER TABLE merge_requests ADD COLUMN draft INTEGER NOT NULL DEFAULT 0; - ALTER TABLE merge_requests ADD COLUMN target_branch TEXT; - ALTER TABLE merge_requests ADD COLUMN source_branch TEXT; - ", - ) - .expect("create MR list schema"); - } - - /// Insert a test MR with full fields. - fn insert_mr_full( - conn: &Connection, - iid: i64, - state: &str, - author: &str, - target_branch: &str, - draft: bool, - updated_at: i64, - ) { - conn.execute( - "INSERT INTO merge_requests \ - (gitlab_id, project_id, iid, title, state, author_username, \ - target_branch, draft, created_at, updated_at, last_seen_at) \ - VALUES (?1, 1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?8, ?8)", - rusqlite::params![ - iid * 100 + 50, - iid, - format!("MR {iid}"), - state, - author, - target_branch, - i64::from(draft), - updated_at, - ], - ) - .expect("insert mr full"); - } - - /// Attach a label to an MR. - fn attach_mr_label(conn: &Connection, mr_iid: i64, label_name: &str) { - let mr_id: i64 = conn - .query_row( - "SELECT id FROM merge_requests WHERE iid = ?", - [mr_iid], - |r| r.get(0), - ) - .expect("find mr"); - - conn.execute( - "INSERT OR IGNORE INTO labels (project_id, name) VALUES (1, ?)", - [label_name], - ) - .expect("insert label"); - let label_id: i64 = conn - .query_row("SELECT id FROM labels WHERE name = ?", [label_name], |r| { - r.get(0) - }) - .expect("find label"); - - conn.execute( - "INSERT INTO mr_labels (merge_request_id, label_id) VALUES (?, ?)", - [mr_id, label_id], - ) - .expect("attach mr label"); - } - - /// Add a reviewer to an MR. - fn add_mr_reviewer(conn: &Connection, mr_iid: i64, username: &str) { - let mr_id: i64 = conn - .query_row( - "SELECT id FROM merge_requests WHERE iid = ?", - [mr_iid], - |r| r.get(0), - ) - .expect("find mr"); - - conn.execute( - "INSERT INTO mr_reviewers (merge_request_id, username) VALUES (?, ?)", - rusqlite::params![mr_id, username], - ) - .expect("add mr reviewer"); - } - - fn setup_mr_list_data(conn: &Connection) { - let base = 1_700_000_000_000_i64; - conn.execute( - "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'group/project')", - [], - ) - .unwrap(); - - insert_mr_full(conn, 1, "opened", "alice", "main", false, base - 10_000); - insert_mr_full(conn, 2, "opened", "bob", "main", true, base - 20_000); - insert_mr_full(conn, 3, "merged", "alice", "develop", false, base - 30_000); - insert_mr_full(conn, 4, "opened", "charlie", "main", true, base - 40_000); - insert_mr_full(conn, 5, "closed", "bob", "release", false, base - 50_000); - - attach_mr_label(conn, 1, "backend"); - attach_mr_label(conn, 1, "urgent"); - attach_mr_label(conn, 2, "frontend"); - attach_mr_label(conn, 4, "backend"); - - add_mr_reviewer(conn, 1, "diana"); - add_mr_reviewer(conn, 2, "diana"); - add_mr_reviewer(conn, 3, "edward"); - } - - #[test] - fn test_fetch_mr_list_basic() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_list_schema(&conn); - setup_mr_list_data(&conn); - - let filter = MrFilter::default(); - let page = fetch_mr_list( - &conn, - &filter, - MrSortField::UpdatedAt, - MrSortOrder::Desc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.total_count, 5); - assert_eq!(page.rows.len(), 5); - assert_eq!(page.rows[0].iid, 1); // newest first - assert_eq!(page.rows[4].iid, 5); - assert!(page.next_cursor.is_none()); - } - - #[test] - fn test_fetch_mr_list_filter_state() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_list_schema(&conn); - setup_mr_list_data(&conn); - - let filter = MrFilter { - state: Some("opened".into()), - ..Default::default() - }; - let page = fetch_mr_list( - &conn, - &filter, - MrSortField::UpdatedAt, - MrSortOrder::Desc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.total_count, 3); - assert!(page.rows.iter().all(|r| r.state == "opened")); - } - - #[test] - fn test_fetch_mr_list_filter_draft() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_list_schema(&conn); - setup_mr_list_data(&conn); - - let filter = MrFilter { - draft: Some(true), - ..Default::default() - }; - let page = fetch_mr_list( - &conn, - &filter, - MrSortField::UpdatedAt, - MrSortOrder::Desc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.total_count, 2); // MRs 2 and 4 - assert!(page.rows.iter().all(|r| r.draft)); - } - - #[test] - fn test_fetch_mr_list_filter_target_branch() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_list_schema(&conn); - setup_mr_list_data(&conn); - - let filter = MrFilter { - target_branch: Some("main".into()), - ..Default::default() - }; - let page = fetch_mr_list( - &conn, - &filter, - MrSortField::UpdatedAt, - MrSortOrder::Desc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.total_count, 3); // MRs 1, 2, 4 - assert!(page.rows.iter().all(|r| r.target_branch == "main")); - } - - #[test] - fn test_fetch_mr_list_filter_reviewer() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_list_schema(&conn); - setup_mr_list_data(&conn); - - let filter = MrFilter { - reviewer: Some("diana".into()), - ..Default::default() - }; - let page = fetch_mr_list( - &conn, - &filter, - MrSortField::UpdatedAt, - MrSortOrder::Desc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.total_count, 2); // MRs 1 and 2 - } - - #[test] - fn test_fetch_mr_list_filter_label() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_list_schema(&conn); - setup_mr_list_data(&conn); - - let filter = MrFilter { - label: Some("backend".into()), - ..Default::default() - }; - let page = fetch_mr_list( - &conn, - &filter, - MrSortField::UpdatedAt, - MrSortOrder::Desc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.total_count, 2); // MRs 1 and 4 - } - - #[test] - fn test_fetch_mr_list_labels_aggregated() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_list_schema(&conn); - setup_mr_list_data(&conn); - - let filter = MrFilter::default(); - let page = fetch_mr_list( - &conn, - &filter, - MrSortField::UpdatedAt, - MrSortOrder::Desc, - None, - None, - ) - .unwrap(); - - let mr1 = page.rows.iter().find(|r| r.iid == 1).unwrap(); - assert_eq!(mr1.labels.len(), 2); - assert!(mr1.labels.contains(&"backend".to_string())); - assert!(mr1.labels.contains(&"urgent".to_string())); - - let mr5 = page.rows.iter().find(|r| r.iid == 5).unwrap(); - assert!(mr5.labels.is_empty()); - } - - #[test] - fn test_fetch_mr_list_sort_ascending() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_list_schema(&conn); - setup_mr_list_data(&conn); - - let filter = MrFilter::default(); - let page = fetch_mr_list( - &conn, - &filter, - MrSortField::UpdatedAt, - MrSortOrder::Asc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.rows[0].iid, 5); // oldest first - assert_eq!(page.rows[4].iid, 1); - } - - #[test] - fn test_fetch_mr_list_snapshot_fence() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_list_schema(&conn); - setup_mr_list_data(&conn); - - let base = 1_700_000_000_000_i64; - let fence = base - 25_000; - let filter = MrFilter::default(); - let page = fetch_mr_list( - &conn, - &filter, - MrSortField::UpdatedAt, - MrSortOrder::Desc, - None, - Some(fence), - ) - .unwrap(); - - assert_eq!(page.total_count, 3); - assert!(page.rows.iter().all(|r| r.updated_at <= fence)); - } - - #[test] - fn test_fetch_mr_list_empty() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_list_schema(&conn); - conn.execute( - "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'g/p')", - [], - ) - .unwrap(); - - let page = fetch_mr_list( - &conn, - &MrFilter::default(), - MrSortField::UpdatedAt, - MrSortOrder::Desc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.total_count, 0); - assert!(page.rows.is_empty()); - assert!(page.next_cursor.is_none()); - } - - #[test] - fn test_fetch_mr_list_free_text() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_list_schema(&conn); - setup_mr_list_data(&conn); - - let filter = MrFilter { - free_text: Some("MR 3".into()), - ..Default::default() - }; - let page = fetch_mr_list( - &conn, - &filter, - MrSortField::UpdatedAt, - MrSortOrder::Desc, - None, - None, - ) - .unwrap(); - - assert_eq!(page.total_count, 1); - assert_eq!(page.rows[0].iid, 3); - } - - // ----------------------------------------------------------------------- - // Issue Detail helpers - // ----------------------------------------------------------------------- - - fn create_issue_detail_schema(conn: &Connection) { - conn.execute_batch( - " - CREATE TABLE projects ( - id INTEGER PRIMARY KEY, - gitlab_project_id INTEGER UNIQUE NOT NULL, - path_with_namespace TEXT NOT NULL - ); - CREATE TABLE issues ( - id INTEGER PRIMARY KEY, - gitlab_id INTEGER UNIQUE NOT NULL, - project_id INTEGER NOT NULL, - iid INTEGER NOT NULL, - title TEXT NOT NULL, - description TEXT, - state TEXT NOT NULL DEFAULT 'opened', - author_username TEXT, - milestone_title TEXT, - due_date TEXT, - created_at INTEGER NOT NULL, - updated_at INTEGER NOT NULL, - web_url TEXT, - UNIQUE(project_id, iid) - ); - CREATE TABLE issue_assignees ( - issue_id INTEGER NOT NULL, - username TEXT NOT NULL, - UNIQUE(issue_id, username) - ); - CREATE TABLE labels ( - id INTEGER PRIMARY KEY, - project_id INTEGER NOT NULL, - name TEXT NOT NULL - ); - CREATE TABLE issue_labels ( - issue_id INTEGER NOT NULL, - label_id INTEGER NOT NULL, - UNIQUE(issue_id, label_id) - ); - CREATE TABLE discussions ( - id INTEGER PRIMARY KEY, - gitlab_discussion_id TEXT NOT NULL, - project_id INTEGER NOT NULL, - issue_id INTEGER, - merge_request_id INTEGER, - noteable_type TEXT NOT NULL, - resolvable INTEGER NOT NULL DEFAULT 0, - resolved INTEGER NOT NULL DEFAULT 0, - first_note_at INTEGER - ); - CREATE TABLE notes ( - id INTEGER PRIMARY KEY, - gitlab_id INTEGER UNIQUE NOT NULL, - discussion_id INTEGER NOT NULL, - project_id INTEGER NOT NULL, - note_type TEXT, - is_system INTEGER NOT NULL DEFAULT 0, - author_username TEXT, - body TEXT, - created_at INTEGER NOT NULL, - updated_at INTEGER NOT NULL, - position INTEGER, - position_new_path TEXT, - position_new_line INTEGER - ); - CREATE TABLE entity_references ( - id INTEGER PRIMARY KEY, - project_id INTEGER NOT NULL, - source_entity_type TEXT NOT NULL, - source_entity_id INTEGER NOT NULL, - target_entity_type TEXT NOT NULL, - target_entity_id INTEGER, - target_project_path TEXT, - target_entity_iid INTEGER, - reference_type TEXT NOT NULL, - source_method TEXT NOT NULL DEFAULT 'api', - created_at INTEGER NOT NULL DEFAULT 0 - ); - CREATE TABLE merge_requests ( - id INTEGER PRIMARY KEY, - gitlab_id INTEGER UNIQUE NOT NULL, - project_id INTEGER NOT NULL, - iid INTEGER NOT NULL, - title TEXT NOT NULL, - state TEXT NOT NULL DEFAULT 'opened', - UNIQUE(project_id, iid) - ); - ", - ) - .unwrap(); - } - - fn setup_issue_detail_data(conn: &Connection) { - // Project. - conn.execute( - "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'group/project')", - [], - ) - .unwrap(); - - // Issue. - conn.execute( - "INSERT INTO issues (id, gitlab_id, project_id, iid, title, description, state, author_username, milestone_title, due_date, created_at, updated_at, web_url) - VALUES (1, 1000, 1, 42, 'Fix authentication flow', 'Detailed description here', 'opened', 'alice', 'v1.0', '2026-03-01', 1700000000000, 1700000060000, 'https://gitlab.com/group/project/-/issues/42')", - [], - ) - .unwrap(); - - // Assignees. - conn.execute( - "INSERT INTO issue_assignees (issue_id, username) VALUES (1, 'bob')", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO issue_assignees (issue_id, username) VALUES (1, 'charlie')", - [], - ) - .unwrap(); - - // Labels. - conn.execute( - "INSERT INTO labels (id, project_id, name) VALUES (1, 1, 'backend')", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO labels (id, project_id, name) VALUES (2, 1, 'urgent')", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO issue_labels (issue_id, label_id) VALUES (1, 1)", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO issue_labels (issue_id, label_id) VALUES (1, 2)", - [], - ) - .unwrap(); - - // Discussions + notes. - conn.execute( - "INSERT INTO discussions (id, gitlab_discussion_id, project_id, issue_id, noteable_type, resolvable, resolved, first_note_at) - VALUES (1, 'disc-aaa', 1, 1, 'Issue', 0, 0, 1700000010000)", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO notes (id, gitlab_id, discussion_id, project_id, author_username, body, created_at, updated_at, position, is_system, note_type) - VALUES (1, 10001, 1, 1, 'alice', 'This looks good overall', 1700000010000, 1700000010000, 0, 0, 'DiscussionNote')", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO notes (id, gitlab_id, discussion_id, project_id, author_username, body, created_at, updated_at, position, is_system, note_type) - VALUES (2, 10002, 1, 1, 'bob', 'Agreed, but see my comment below', 1700000020000, 1700000020000, 1, 0, 'DiscussionNote')", - [], - ) - .unwrap(); - - // System note discussion. - conn.execute( - "INSERT INTO discussions (id, gitlab_discussion_id, project_id, issue_id, noteable_type, first_note_at) - VALUES (2, 'disc-bbb', 1, 1, 'Issue', 1700000030000)", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO notes (id, gitlab_id, discussion_id, project_id, author_username, body, created_at, updated_at, position, is_system, note_type) - VALUES (3, 10003, 2, 1, 'system', 'changed the description', 1700000030000, 1700000030000, 0, 1, NULL)", - [], - ) - .unwrap(); - - // Closing MR cross-ref. - conn.execute( - "INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state) - VALUES (1, 2000, 1, 10, 'Fix auth MR', 'opened')", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO entity_references (project_id, source_entity_type, source_entity_id, target_entity_type, target_entity_id, target_entity_iid, reference_type) - VALUES (1, 'issue', 1, 'merge_request', 1, 10, 'closes')", - [], - ) - .unwrap(); - } - - // ----------------------------------------------------------------------- - // Issue Detail tests - // ----------------------------------------------------------------------- - - #[test] - fn test_fetch_issue_detail_basic() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_detail_schema(&conn); - setup_issue_detail_data(&conn); - - let key = EntityKey::issue(1, 42); - let data = fetch_issue_detail(&conn, &key).unwrap(); - - assert_eq!(data.metadata.iid, 42); - assert_eq!(data.metadata.title, "Fix authentication flow"); - assert_eq!(data.metadata.state, "opened"); - assert_eq!(data.metadata.author, "alice"); - assert_eq!(data.metadata.project_path, "group/project"); - assert_eq!(data.metadata.milestone, Some("v1.0".to_string())); - assert_eq!(data.metadata.due_date, Some("2026-03-01".to_string())); - assert_eq!( - data.metadata.web_url, - "https://gitlab.com/group/project/-/issues/42" - ); - } - - #[test] - fn test_fetch_issue_detail_assignees() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_detail_schema(&conn); - setup_issue_detail_data(&conn); - - let key = EntityKey::issue(1, 42); - let data = fetch_issue_detail(&conn, &key).unwrap(); - - assert_eq!(data.metadata.assignees.len(), 2); - assert!(data.metadata.assignees.contains(&"bob".to_string())); - assert!(data.metadata.assignees.contains(&"charlie".to_string())); - } - - #[test] - fn test_fetch_issue_detail_labels() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_detail_schema(&conn); - setup_issue_detail_data(&conn); - - let key = EntityKey::issue(1, 42); - let data = fetch_issue_detail(&conn, &key).unwrap(); - - assert_eq!(data.metadata.labels, vec!["backend", "urgent"]); - } - - #[test] - fn test_fetch_issue_detail_cross_refs() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_detail_schema(&conn); - setup_issue_detail_data(&conn); - - let key = EntityKey::issue(1, 42); - let data = fetch_issue_detail(&conn, &key).unwrap(); - - assert_eq!(data.cross_refs.len(), 1); - assert_eq!(data.cross_refs[0].kind, CrossRefKind::ClosingMr); - assert_eq!(data.cross_refs[0].entity_key, EntityKey::mr(1, 10)); - assert_eq!(data.cross_refs[0].label, "Fix auth MR"); - assert!(data.cross_refs[0].navigable); - } - - #[test] - fn test_fetch_issue_detail_discussion_count() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_detail_schema(&conn); - setup_issue_detail_data(&conn); - - let key = EntityKey::issue(1, 42); - let data = fetch_issue_detail(&conn, &key).unwrap(); - - assert_eq!(data.metadata.discussion_count, 2); - } - - #[test] - fn test_fetch_issue_discussions_basic() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_detail_schema(&conn); - setup_issue_detail_data(&conn); - - let key = EntityKey::issue(1, 42); - let discussions = fetch_issue_discussions(&conn, &key).unwrap(); - - assert_eq!(discussions.len(), 2); - } - - #[test] - fn test_fetch_issue_discussions_notes() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_detail_schema(&conn); - setup_issue_detail_data(&conn); - - let key = EntityKey::issue(1, 42); - let discussions = fetch_issue_discussions(&conn, &key).unwrap(); - - // First discussion has 2 notes. - assert_eq!(discussions[0].notes.len(), 2); - assert_eq!(discussions[0].notes[0].author, "alice"); - assert_eq!(discussions[0].notes[0].body, "This looks good overall"); - assert_eq!(discussions[0].notes[1].author, "bob"); - assert!(!discussions[0].notes[0].is_system); - } - - #[test] - fn test_fetch_issue_discussions_system_note() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_detail_schema(&conn); - setup_issue_detail_data(&conn); - - let key = EntityKey::issue(1, 42); - let discussions = fetch_issue_discussions(&conn, &key).unwrap(); - - // Second discussion is a system note. - assert_eq!(discussions[1].notes.len(), 1); - assert!(discussions[1].notes[0].is_system); - assert_eq!(discussions[1].notes[0].body, "changed the description"); - } - - #[test] - fn test_fetch_issue_discussions_ordering() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_detail_schema(&conn); - setup_issue_detail_data(&conn); - - let key = EntityKey::issue(1, 42); - let discussions = fetch_issue_discussions(&conn, &key).unwrap(); - - // Ordered by first_note_at. - assert_eq!(discussions[0].discussion_id, "disc-aaa"); - assert_eq!(discussions[1].discussion_id, "disc-bbb"); - } - - #[test] - fn test_fetch_issue_detail_not_found() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_detail_schema(&conn); - setup_issue_detail_data(&conn); - - let key = EntityKey::issue(1, 999); - let result = fetch_issue_detail(&conn, &key); - assert!(result.is_err()); - } - - #[test] - fn test_fetch_issue_detail_no_description() { - let conn = Connection::open_in_memory().unwrap(); - create_issue_detail_schema(&conn); - - conn.execute( - "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'g/p')", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO issues (id, gitlab_id, project_id, iid, title, description, state, created_at, updated_at) - VALUES (1, 1000, 1, 1, 'No desc', NULL, 'opened', 0, 0)", - [], - ) - .unwrap(); - - let key = EntityKey::issue(1, 1); - let data = fetch_issue_detail(&conn, &key).unwrap(); - assert_eq!(data.metadata.description, ""); - } - - // ----------------------------------------------------------------------- - // MR Detail Tests - // ----------------------------------------------------------------------- - - fn create_mr_detail_schema(conn: &Connection) { - create_issue_detail_schema(conn); - // Add MR-specific columns and tables on top of the base schema. - conn.execute_batch( - " - -- Add columns to merge_requests that the detail query needs. - ALTER TABLE merge_requests ADD COLUMN description TEXT; - ALTER TABLE merge_requests ADD COLUMN draft INTEGER NOT NULL DEFAULT 0; - ALTER TABLE merge_requests ADD COLUMN author_username TEXT; - ALTER TABLE merge_requests ADD COLUMN source_branch TEXT; - ALTER TABLE merge_requests ADD COLUMN target_branch TEXT; - ALTER TABLE merge_requests ADD COLUMN detailed_merge_status TEXT; - ALTER TABLE merge_requests ADD COLUMN created_at INTEGER; - ALTER TABLE merge_requests ADD COLUMN updated_at INTEGER; - ALTER TABLE merge_requests ADD COLUMN merged_at INTEGER; - ALTER TABLE merge_requests ADD COLUMN web_url TEXT; - - CREATE TABLE mr_assignees ( - merge_request_id INTEGER NOT NULL, - username TEXT NOT NULL, - UNIQUE(merge_request_id, username) - ); - CREATE TABLE mr_reviewers ( - merge_request_id INTEGER NOT NULL, - username TEXT NOT NULL, - UNIQUE(merge_request_id, username) - ); - CREATE TABLE mr_labels ( - merge_request_id INTEGER NOT NULL, - label_id INTEGER NOT NULL, - UNIQUE(merge_request_id, label_id) - ); - CREATE TABLE mr_file_changes ( - id INTEGER PRIMARY KEY, - merge_request_id INTEGER NOT NULL, - project_id INTEGER NOT NULL, - old_path TEXT, - new_path TEXT NOT NULL, - change_type TEXT NOT NULL - ); - ", - ) - .unwrap(); - } - - fn setup_mr_detail_data(conn: &Connection) { - // Project (if not already inserted). - conn.execute( - "INSERT OR IGNORE INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'group/project')", - [], - ) - .unwrap(); - - // MR. - conn.execute( - "INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, description, state, draft, author_username, source_branch, target_branch, detailed_merge_status, created_at, updated_at, merged_at, web_url) - VALUES (1, 2000, 1, 10, 'Fix auth flow', 'MR description', 'opened', 0, 'alice', 'fix-auth', 'main', 'mergeable', 1700000000000, 1700000060000, NULL, 'https://gitlab.com/group/project/-/merge_requests/10')", - [], - ) - .unwrap(); - - // Assignees. - conn.execute( - "INSERT INTO mr_assignees (merge_request_id, username) VALUES (1, 'bob')", - [], - ) - .unwrap(); - - // Reviewers. - conn.execute( - "INSERT INTO mr_reviewers (merge_request_id, username) VALUES (1, 'carol')", - [], - ) - .unwrap(); - - // Labels. - conn.execute( - "INSERT OR IGNORE INTO labels (id, project_id, name) VALUES (10, 1, 'backend')", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO mr_labels (merge_request_id, label_id) VALUES (1, 10)", - [], - ) - .unwrap(); - - // File changes. - conn.execute( - "INSERT INTO mr_file_changes (merge_request_id, project_id, old_path, new_path, change_type) - VALUES (1, 1, NULL, 'src/auth.rs', 'modified')", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO mr_file_changes (merge_request_id, project_id, old_path, new_path, change_type) - VALUES (1, 1, NULL, 'src/lib.rs', 'added')", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO mr_file_changes (merge_request_id, project_id, old_path, new_path, change_type) - VALUES (1, 1, 'src/old.rs', 'src/new.rs', 'renamed')", - [], - ) - .unwrap(); - - // Discussion with a note. - conn.execute( - "INSERT INTO discussions (id, gitlab_discussion_id, project_id, merge_request_id, noteable_type, resolvable, resolved, first_note_at) - VALUES (1, 'mr_disc_1', 1, 1, 'MergeRequest', 1, 0, 1700000010000)", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO notes (id, gitlab_id, discussion_id, project_id, note_type, is_system, author_username, body, created_at, updated_at, position, position_new_path, position_new_line) - VALUES (1, 5001, 1, 1, 'DiffNote', 0, 'alice', 'Please fix this', 1700000010000, 1700000010000, 0, 'src/auth.rs', 42)", - [], - ) - .unwrap(); - - // Cross-reference (MR closes issue). - conn.execute( - "INSERT INTO issues (id, gitlab_id, project_id, iid, title, state, created_at, updated_at) - VALUES (1, 1000, 1, 5, 'Auth bug', 'opened', 0, 0)", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO entity_references (project_id, source_entity_type, source_entity_id, target_entity_type, target_entity_id, target_project_path, target_entity_iid, reference_type, source_method) - VALUES (1, 'merge_request', 1, 'issue', 1, 'group/project', 5, 'closes', 'api')", - [], - ) - .unwrap(); - } - - #[test] - fn test_fetch_mr_detail_basic_metadata() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_detail_schema(&conn); - setup_mr_detail_data(&conn); - - let key = EntityKey::mr(1, 10); - let data = fetch_mr_detail(&conn, &key).unwrap(); - - assert_eq!(data.metadata.iid, 10); - assert_eq!(data.metadata.title, "Fix auth flow"); - assert_eq!(data.metadata.description, "MR description"); - assert_eq!(data.metadata.state, "opened"); - assert!(!data.metadata.draft); - assert_eq!(data.metadata.author, "alice"); - assert_eq!(data.metadata.source_branch, "fix-auth"); - assert_eq!(data.metadata.target_branch, "main"); - assert_eq!(data.metadata.merge_status, "mergeable"); - assert!(data.metadata.merged_at.is_none()); - assert_eq!( - data.metadata.web_url, - "https://gitlab.com/group/project/-/merge_requests/10" - ); - } - - #[test] - fn test_fetch_mr_detail_assignees_reviewers_labels() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_detail_schema(&conn); - setup_mr_detail_data(&conn); - - let key = EntityKey::mr(1, 10); - let data = fetch_mr_detail(&conn, &key).unwrap(); - - assert_eq!(data.metadata.assignees, vec!["bob"]); - assert_eq!(data.metadata.reviewers, vec!["carol"]); - assert_eq!(data.metadata.labels, vec!["backend"]); - } - - #[test] - fn test_fetch_mr_detail_file_changes() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_detail_schema(&conn); - setup_mr_detail_data(&conn); - - let key = EntityKey::mr(1, 10); - let data = fetch_mr_detail(&conn, &key).unwrap(); - - assert_eq!(data.file_changes.len(), 3); - assert_eq!(data.metadata.file_change_count, 3); - - // Ordered by new_path. - assert_eq!(data.file_changes[0].new_path, "src/auth.rs"); - assert_eq!(data.file_changes[0].change_type, FileChangeType::Modified); - - assert_eq!(data.file_changes[1].new_path, "src/lib.rs"); - assert_eq!(data.file_changes[1].change_type, FileChangeType::Added); - - assert_eq!(data.file_changes[2].new_path, "src/new.rs"); - assert_eq!(data.file_changes[2].change_type, FileChangeType::Renamed); - assert_eq!(data.file_changes[2].old_path.as_deref(), Some("src/old.rs")); - } - - #[test] - fn test_fetch_mr_detail_cross_refs() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_detail_schema(&conn); - setup_mr_detail_data(&conn); - - let key = EntityKey::mr(1, 10); - let data = fetch_mr_detail(&conn, &key).unwrap(); - - assert_eq!(data.cross_refs.len(), 1); - assert_eq!(data.cross_refs[0].kind, CrossRefKind::ClosingMr); - assert_eq!(data.cross_refs[0].label, "Auth bug"); - } - - #[test] - fn test_fetch_mr_discussions() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_detail_schema(&conn); - setup_mr_detail_data(&conn); - - let key = EntityKey::mr(1, 10); - let discussions = fetch_mr_discussions(&conn, &key).unwrap(); - - assert_eq!(discussions.len(), 1); - assert_eq!(discussions[0].discussion_id, "mr_disc_1"); - assert!(discussions[0].resolvable); - assert!(!discussions[0].resolved); - assert_eq!(discussions[0].notes.len(), 1); - assert_eq!(discussions[0].notes[0].author, "alice"); - assert_eq!(discussions[0].notes[0].body, "Please fix this"); - assert!(discussions[0].notes[0].is_diff_note); - assert_eq!( - discussions[0].notes[0].diff_file_path.as_deref(), - Some("src/auth.rs") - ); - assert_eq!(discussions[0].notes[0].diff_new_line, Some(42)); - } - - #[test] - fn test_fetch_mr_detail_not_found() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_detail_schema(&conn); - - // Insert project but no MR. - conn.execute( - "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'g/p')", - [], - ) - .unwrap(); - - let key = EntityKey::mr(1, 99); - assert!(fetch_mr_detail(&conn, &key).is_err()); - } - - #[test] - fn test_fetch_mr_detail_no_file_changes() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_detail_schema(&conn); - - conn.execute( - "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'g/p')", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state, created_at, updated_at, web_url) - VALUES (1, 2000, 1, 10, 'Empty MR', 'opened', 0, 0, '')", - [], - ) - .unwrap(); - - let key = EntityKey::mr(1, 10); - let data = fetch_mr_detail(&conn, &key).unwrap(); - assert!(data.file_changes.is_empty()); - assert_eq!(data.metadata.file_change_count, 0); - } - - #[test] - fn test_fetch_mr_detail_draft() { - let conn = Connection::open_in_memory().unwrap(); - create_mr_detail_schema(&conn); - - conn.execute( - "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'g/p')", - [], - ) - .unwrap(); - conn.execute( - "INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state, draft, created_at, updated_at, web_url) - VALUES (1, 2000, 1, 10, 'Draft: WIP', 'opened', 1, 0, 0, '')", - [], - ) - .unwrap(); - - let key = EntityKey::mr(1, 10); - let data = fetch_mr_detail(&conn, &key).unwrap(); - assert!(data.metadata.draft); - } -} diff --git a/crates/lore-tui/src/action/bootstrap.rs b/crates/lore-tui/src/action/bootstrap.rs new file mode 100644 index 0000000..900cb6e --- /dev/null +++ b/crates/lore-tui/src/action/bootstrap.rs @@ -0,0 +1,298 @@ +#![allow(dead_code)] + +use anyhow::{Context, Result}; +use rusqlite::Connection; + +use crate::state::bootstrap::{DataReadiness, SchemaCheck}; + +/// Minimum schema version required by this TUI version. +pub const MINIMUM_SCHEMA_VERSION: i32 = 20; + +/// Check the schema version of the database. +/// +/// Returns [`SchemaCheck::NoDB`] if the `schema_version` table doesn't exist, +/// [`SchemaCheck::Incompatible`] if the version is below the minimum, +/// or [`SchemaCheck::Compatible`] if all is well. +pub fn check_schema_version(conn: &Connection, minimum: i32) -> SchemaCheck { + // Check if schema_version table exists. + let table_exists: bool = conn + .query_row( + "SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='schema_version'", + [], + |r| r.get::<_, i64>(0), + ) + .map(|c| c > 0) + .unwrap_or(false); + + if !table_exists { + return SchemaCheck::NoDB; + } + + // Read the current version. + match conn.query_row("SELECT version FROM schema_version LIMIT 1", [], |r| { + r.get::<_, i32>(0) + }) { + Ok(version) if version >= minimum => SchemaCheck::Compatible { version }, + Ok(found) => SchemaCheck::Incompatible { found, minimum }, + Err(_) => SchemaCheck::NoDB, + } +} + +/// Check whether the database has enough data to skip the bootstrap screen. +/// +/// Counts issues, merge requests, and search documents. The `documents` table +/// may not exist on older schemas, so its absence is treated as "no documents." +pub fn check_data_readiness(conn: &Connection) -> Result { + let has_issues: bool = conn + .query_row("SELECT EXISTS(SELECT 1 FROM issues LIMIT 1)", [], |r| { + r.get(0) + }) + .context("checking issues")?; + + let has_mrs: bool = conn + .query_row( + "SELECT EXISTS(SELECT 1 FROM merge_requests LIMIT 1)", + [], + |r| r.get(0), + ) + .context("checking merge requests")?; + + // documents table may not exist yet (created by generate-docs). + let has_documents: bool = conn + .query_row("SELECT EXISTS(SELECT 1 FROM documents LIMIT 1)", [], |r| { + r.get(0) + }) + .unwrap_or(false); + + let schema_version = conn + .query_row("SELECT version FROM schema_version LIMIT 1", [], |r| { + r.get::<_, i32>(0) + }) + .unwrap_or(0); + + Ok(DataReadiness { + has_issues, + has_mrs, + has_documents, + schema_version, + }) +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Create the minimal schema needed for bootstrap / data-readiness queries. + fn create_dashboard_schema(conn: &Connection) { + conn.execute_batch( + " + CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + ); + CREATE TABLE issues ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT NOT NULL, + author_username TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE merge_requests ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT, + author_username TEXT, + created_at INTEGER, + updated_at INTEGER, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE discussions ( + id INTEGER PRIMARY KEY, + gitlab_discussion_id TEXT NOT NULL, + project_id INTEGER NOT NULL, + noteable_type TEXT NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE notes ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + discussion_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + is_system INTEGER NOT NULL DEFAULT 0, + author_username TEXT, + body TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE documents ( + id INTEGER PRIMARY KEY, + source_type TEXT NOT NULL, + source_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + content_text TEXT NOT NULL, + content_hash TEXT NOT NULL + ); + CREATE TABLE embedding_metadata ( + document_id INTEGER NOT NULL, + chunk_index INTEGER NOT NULL DEFAULT 0, + model TEXT NOT NULL, + dims INTEGER NOT NULL, + document_hash TEXT NOT NULL, + chunk_hash TEXT NOT NULL, + created_at INTEGER NOT NULL, + PRIMARY KEY(document_id, chunk_index) + ); + CREATE TABLE sync_runs ( + id INTEGER PRIMARY KEY, + started_at INTEGER NOT NULL, + heartbeat_at INTEGER NOT NULL, + finished_at INTEGER, + status TEXT NOT NULL, + command TEXT NOT NULL, + error TEXT + ); + ", + ) + .expect("create dashboard schema"); + } + + fn insert_issue(conn: &Connection, iid: i64, state: &str, updated_at: i64) { + conn.execute( + "INSERT INTO issues (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at) + VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)", + rusqlite::params![iid * 100, iid, format!("Issue {iid}"), state, updated_at], + ) + .expect("insert issue"); + } + + fn insert_mr(conn: &Connection, iid: i64, state: &str, updated_at: i64) { + conn.execute( + "INSERT INTO merge_requests (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at) + VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)", + rusqlite::params![iid * 100 + 50, iid, format!("MR {iid}"), state, updated_at], + ) + .expect("insert mr"); + } + + /// TDD anchor test from bead spec. + #[test] + fn test_schema_preflight_rejects_old() { + let conn = Connection::open_in_memory().unwrap(); + conn.execute_batch( + "CREATE TABLE schema_version (version INTEGER); + INSERT INTO schema_version (version) VALUES (1);", + ) + .unwrap(); + + let result = check_schema_version(&conn, 20); + assert!(matches!( + result, + SchemaCheck::Incompatible { + found: 1, + minimum: 20 + } + )); + } + + #[test] + fn test_schema_preflight_accepts_compatible() { + let conn = Connection::open_in_memory().unwrap(); + conn.execute_batch( + "CREATE TABLE schema_version (version INTEGER); + INSERT INTO schema_version (version) VALUES (26);", + ) + .unwrap(); + + let result = check_schema_version(&conn, 20); + assert!(matches!(result, SchemaCheck::Compatible { version: 26 })); + } + + #[test] + fn test_schema_preflight_exact_minimum() { + let conn = Connection::open_in_memory().unwrap(); + conn.execute_batch( + "CREATE TABLE schema_version (version INTEGER); + INSERT INTO schema_version (version) VALUES (20);", + ) + .unwrap(); + + let result = check_schema_version(&conn, 20); + assert!(matches!(result, SchemaCheck::Compatible { version: 20 })); + } + + #[test] + fn test_schema_preflight_no_db() { + let conn = Connection::open_in_memory().unwrap(); + let result = check_schema_version(&conn, 20); + assert!(matches!(result, SchemaCheck::NoDB)); + } + + #[test] + fn test_schema_preflight_empty_schema_version_table() { + let conn = Connection::open_in_memory().unwrap(); + conn.execute_batch("CREATE TABLE schema_version (version INTEGER);") + .unwrap(); + + let result = check_schema_version(&conn, 20); + assert!(matches!(result, SchemaCheck::NoDB)); + } + + #[test] + fn test_check_data_readiness_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + conn.execute_batch( + "CREATE TABLE schema_version (version INTEGER); + INSERT INTO schema_version (version) VALUES (26);", + ) + .unwrap(); + + let readiness = check_data_readiness(&conn).unwrap(); + assert!(!readiness.has_issues); + assert!(!readiness.has_mrs); + assert!(!readiness.has_documents); + assert_eq!(readiness.schema_version, 26); + assert!(!readiness.has_any_data()); + } + + #[test] + fn test_check_data_readiness_with_data() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + conn.execute_batch( + "CREATE TABLE schema_version (version INTEGER); + INSERT INTO schema_version (version) VALUES (26);", + ) + .unwrap(); + + insert_issue(&conn, 1, "opened", 1_700_000_000_000); + insert_mr(&conn, 1, "merged", 1_700_000_000_000); + + let readiness = check_data_readiness(&conn).unwrap(); + assert!(readiness.has_issues); + assert!(readiness.has_mrs); + assert!(!readiness.has_documents); + assert_eq!(readiness.schema_version, 26); + assert!(readiness.has_any_data()); + } + + #[test] + fn test_check_data_readiness_documents_table_missing() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + // No documents table — should still work. + + let readiness = check_data_readiness(&conn).unwrap(); + assert!(!readiness.has_documents); + } +} diff --git a/crates/lore-tui/src/action/dashboard.rs b/crates/lore-tui/src/action/dashboard.rs new file mode 100644 index 0000000..bd840d6 --- /dev/null +++ b/crates/lore-tui/src/action/dashboard.rs @@ -0,0 +1,485 @@ +#![allow(dead_code)] + +use anyhow::{Context, Result}; +use rusqlite::Connection; + +use crate::clock::Clock; +use crate::state::dashboard::{ + DashboardData, EntityCounts, LastSyncInfo, ProjectSyncInfo, RecentActivityItem, +}; + +/// Fetch all data for the dashboard screen. +/// +/// Runs aggregation queries for entity counts, per-project sync freshness, +/// recent activity, and the last sync run summary. +pub fn fetch_dashboard(conn: &Connection, clock: &dyn Clock) -> Result { + let counts = fetch_entity_counts(conn)?; + let projects = fetch_project_sync_info(conn, clock)?; + let recent = fetch_recent_activity(conn, clock)?; + let last_sync = fetch_last_sync(conn)?; + + Ok(DashboardData { + counts, + projects, + recent, + last_sync, + }) +} + +/// Count all entities in the database. +fn fetch_entity_counts(conn: &Connection) -> Result { + let issues_total: i64 = conn + .query_row("SELECT COUNT(*) FROM issues", [], |r| r.get(0)) + .context("counting issues")?; + + let issues_open: i64 = conn + .query_row( + "SELECT COUNT(*) FROM issues WHERE state = 'opened'", + [], + |r| r.get(0), + ) + .context("counting open issues")?; + + let mrs_total: i64 = conn + .query_row("SELECT COUNT(*) FROM merge_requests", [], |r| r.get(0)) + .context("counting merge requests")?; + + let mrs_open: i64 = conn + .query_row( + "SELECT COUNT(*) FROM merge_requests WHERE state = 'opened'", + [], + |r| r.get(0), + ) + .context("counting open merge requests")?; + + let discussions: i64 = conn + .query_row("SELECT COUNT(*) FROM discussions", [], |r| r.get(0)) + .context("counting discussions")?; + + let notes_total: i64 = conn + .query_row("SELECT COUNT(*) FROM notes", [], |r| r.get(0)) + .context("counting notes")?; + + let notes_system: i64 = conn + .query_row("SELECT COUNT(*) FROM notes WHERE is_system = 1", [], |r| { + r.get(0) + }) + .context("counting system notes")?; + + let notes_system_pct = if notes_total > 0 { + u8::try_from(notes_system * 100 / notes_total).unwrap_or(100) + } else { + 0 + }; + + let documents: i64 = conn + .query_row("SELECT COUNT(*) FROM documents", [], |r| r.get(0)) + .context("counting documents")?; + + let embeddings: i64 = conn + .query_row("SELECT COUNT(*) FROM embedding_metadata", [], |r| r.get(0)) + .context("counting embeddings")?; + + #[allow(clippy::cast_sign_loss)] // SQL COUNT(*) is always >= 0 + Ok(EntityCounts { + issues_open: issues_open as u64, + issues_total: issues_total as u64, + mrs_open: mrs_open as u64, + mrs_total: mrs_total as u64, + discussions: discussions as u64, + notes_total: notes_total as u64, + notes_system_pct, + documents: documents as u64, + embeddings: embeddings as u64, + }) +} + +/// Per-project sync freshness based on the most recent sync_runs entry. +fn fetch_project_sync_info(conn: &Connection, clock: &dyn Clock) -> Result> { + let now_ms = clock.now_ms(); + + let mut stmt = conn + .prepare( + "SELECT p.path_with_namespace, + MAX(sr.finished_at) as last_sync_ms + FROM projects p + LEFT JOIN sync_runs sr ON sr.status = 'succeeded' + AND sr.finished_at IS NOT NULL + GROUP BY p.id + ORDER BY p.path_with_namespace", + ) + .context("preparing project sync query")?; + + let rows = stmt + .query_map([], |row| { + let path: String = row.get(0)?; + let last_sync_ms: Option = row.get(1)?; + Ok((path, last_sync_ms)) + }) + .context("querying project sync info")?; + + let mut result = Vec::new(); + for row in rows { + let (path, last_sync_ms) = row.context("reading project sync row")?; + let minutes_since_sync = match last_sync_ms { + Some(ms) => { + let elapsed_ms = now_ms.saturating_sub(ms); + u64::try_from(elapsed_ms / 60_000).unwrap_or(u64::MAX) + } + None => u64::MAX, // Never synced. + }; + result.push(ProjectSyncInfo { + path, + minutes_since_sync, + }); + } + + Ok(result) +} + +/// Recent activity: the 20 most recently updated issues and MRs. +fn fetch_recent_activity(conn: &Connection, clock: &dyn Clock) -> Result> { + let now_ms = clock.now_ms(); + + let mut stmt = conn + .prepare( + "SELECT entity_type, iid, title, state, updated_at FROM ( + SELECT 'issue' AS entity_type, iid, title, state, updated_at + FROM issues + UNION ALL + SELECT 'mr' AS entity_type, iid, title, state, updated_at + FROM merge_requests + ) + ORDER BY updated_at DESC + LIMIT 20", + ) + .context("preparing recent activity query")?; + + let rows = stmt + .query_map([], |row| { + let entity_type: String = row.get(0)?; + let iid: i64 = row.get(1)?; + let title: String = row.get::<_, Option>(2)?.unwrap_or_default(); + let state: String = row.get::<_, Option>(3)?.unwrap_or_default(); + let updated_at: i64 = row.get(4)?; + Ok((entity_type, iid, title, state, updated_at)) + }) + .context("querying recent activity")?; + + let mut result = Vec::new(); + for row in rows { + let (entity_type, iid, title, state, updated_at) = + row.context("reading recent activity row")?; + let elapsed_ms = now_ms.saturating_sub(updated_at); + let minutes_ago = u64::try_from(elapsed_ms / 60_000).unwrap_or(u64::MAX); + result.push(RecentActivityItem { + entity_type, + iid: iid as u64, + title, + state, + minutes_ago, + }); + } + + Ok(result) +} + +/// The most recent sync run summary. +fn fetch_last_sync(conn: &Connection) -> Result> { + let result = conn.query_row( + "SELECT status, finished_at, command, error + FROM sync_runs + ORDER BY id DESC + LIMIT 1", + [], + |row| { + Ok(LastSyncInfo { + status: row.get(0)?, + finished_at: row.get(1)?, + command: row.get(2)?, + error: row.get(3)?, + }) + }, + ); + + match result { + Ok(info) => Ok(Some(info)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e).context("querying last sync run"), + } +} + +#[cfg(test)] +mod tests { + use super::*; + use crate::clock::FakeClock; + + /// Create the minimal schema needed for dashboard queries. + fn create_dashboard_schema(conn: &Connection) { + conn.execute_batch( + " + CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + ); + CREATE TABLE issues ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT NOT NULL, + author_username TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE merge_requests ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT, + author_username TEXT, + created_at INTEGER, + updated_at INTEGER, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE discussions ( + id INTEGER PRIMARY KEY, + gitlab_discussion_id TEXT NOT NULL, + project_id INTEGER NOT NULL, + noteable_type TEXT NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE notes ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + discussion_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + is_system INTEGER NOT NULL DEFAULT 0, + author_username TEXT, + body TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE documents ( + id INTEGER PRIMARY KEY, + source_type TEXT NOT NULL, + source_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + content_text TEXT NOT NULL, + content_hash TEXT NOT NULL + ); + CREATE TABLE embedding_metadata ( + document_id INTEGER NOT NULL, + chunk_index INTEGER NOT NULL DEFAULT 0, + model TEXT NOT NULL, + dims INTEGER NOT NULL, + document_hash TEXT NOT NULL, + chunk_hash TEXT NOT NULL, + created_at INTEGER NOT NULL, + PRIMARY KEY(document_id, chunk_index) + ); + CREATE TABLE sync_runs ( + id INTEGER PRIMARY KEY, + started_at INTEGER NOT NULL, + heartbeat_at INTEGER NOT NULL, + finished_at INTEGER, + status TEXT NOT NULL, + command TEXT NOT NULL, + error TEXT + ); + ", + ) + .expect("create dashboard schema"); + } + + /// Insert a test issue. + fn insert_issue(conn: &Connection, iid: i64, state: &str, updated_at: i64) { + conn.execute( + "INSERT INTO issues (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at) + VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)", + rusqlite::params![iid * 100, iid, format!("Issue {iid}"), state, updated_at], + ) + .expect("insert issue"); + } + + /// Insert a test MR. + fn insert_mr(conn: &Connection, iid: i64, state: &str, updated_at: i64) { + conn.execute( + "INSERT INTO merge_requests (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at) + VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)", + rusqlite::params![iid * 100 + 50, iid, format!("MR {iid}"), state, updated_at], + ) + .expect("insert mr"); + } + + #[test] + fn test_fetch_dashboard_counts() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + // 5 issues: 3 open, 2 closed. + let now_ms = 1_700_000_000_000_i64; + insert_issue(&conn, 1, "opened", now_ms - 10_000); + insert_issue(&conn, 2, "opened", now_ms - 20_000); + insert_issue(&conn, 3, "opened", now_ms - 30_000); + insert_issue(&conn, 4, "closed", now_ms - 40_000); + insert_issue(&conn, 5, "closed", now_ms - 50_000); + + let clock = FakeClock::from_ms(now_ms); + let data = fetch_dashboard(&conn, &clock).unwrap(); + + assert_eq!(data.counts.issues_open, 3); + assert_eq!(data.counts.issues_total, 5); + } + + #[test] + fn test_fetch_dashboard_mr_counts() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + let now_ms = 1_700_000_000_000_i64; + insert_mr(&conn, 1, "opened", now_ms); + insert_mr(&conn, 2, "merged", now_ms); + insert_mr(&conn, 3, "opened", now_ms); + insert_mr(&conn, 4, "closed", now_ms); + + let clock = FakeClock::from_ms(now_ms); + let data = fetch_dashboard(&conn, &clock).unwrap(); + + assert_eq!(data.counts.mrs_open, 2); + assert_eq!(data.counts.mrs_total, 4); + } + + #[test] + fn test_fetch_dashboard_empty_database() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + let clock = FakeClock::from_ms(1_700_000_000_000); + let data = fetch_dashboard(&conn, &clock).unwrap(); + + assert_eq!(data.counts.issues_open, 0); + assert_eq!(data.counts.issues_total, 0); + assert_eq!(data.counts.mrs_open, 0); + assert_eq!(data.counts.mrs_total, 0); + assert_eq!(data.counts.notes_system_pct, 0); + assert!(data.projects.is_empty()); + assert!(data.recent.is_empty()); + assert!(data.last_sync.is_none()); + } + + #[test] + fn test_fetch_dashboard_notes_system_pct() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + // 4 notes: 1 system, 3 user -> 25% system. + for i in 0..4 { + conn.execute( + "INSERT INTO notes (gitlab_id, discussion_id, project_id, is_system, created_at, updated_at, last_seen_at) + VALUES (?1, 1, 1, ?2, 1000, 1000, 1000)", + rusqlite::params![i, if i == 0 { 1 } else { 0 }], + ) + .unwrap(); + } + + let clock = FakeClock::from_ms(1_700_000_000_000); + let data = fetch_dashboard(&conn, &clock).unwrap(); + + assert_eq!(data.counts.notes_total, 4); + assert_eq!(data.counts.notes_system_pct, 25); + } + + #[test] + fn test_fetch_dashboard_project_sync_info() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + conn.execute( + "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'group/alpha')", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (2, 'group/beta')", + [], + ) + .unwrap(); + + // Sync ran 30 minutes ago. sync_runs is global (no project_id), + // so all projects see the same last-sync time. + let now_ms = 1_700_000_000_000_i64; + conn.execute( + "INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command) + VALUES (?1, ?1, ?2, 'succeeded', 'sync')", + [now_ms - 30 * 60_000, now_ms - 30 * 60_000], + ) + .unwrap(); + + let clock = FakeClock::from_ms(now_ms); + let data = fetch_dashboard(&conn, &clock).unwrap(); + + assert_eq!(data.projects.len(), 2); + assert_eq!(data.projects[0].path, "group/alpha"); + assert_eq!(data.projects[0].minutes_since_sync, 30); + assert_eq!(data.projects[1].path, "group/beta"); + assert_eq!(data.projects[1].minutes_since_sync, 30); // Same: sync_runs is global. + } + + #[test] + fn test_fetch_dashboard_recent_activity_ordered() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + let now_ms = 1_700_000_000_000_i64; + insert_issue(&conn, 1, "opened", now_ms - 60_000); // 1 min ago + insert_mr(&conn, 1, "merged", now_ms - 120_000); // 2 min ago + insert_issue(&conn, 2, "closed", now_ms - 180_000); // 3 min ago + + let clock = FakeClock::from_ms(now_ms); + let data = fetch_dashboard(&conn, &clock).unwrap(); + + assert_eq!(data.recent.len(), 3); + assert_eq!(data.recent[0].entity_type, "issue"); + assert_eq!(data.recent[0].iid, 1); + assert_eq!(data.recent[0].minutes_ago, 1); + assert_eq!(data.recent[1].entity_type, "mr"); + assert_eq!(data.recent[1].minutes_ago, 2); + assert_eq!(data.recent[2].entity_type, "issue"); + assert_eq!(data.recent[2].minutes_ago, 3); + } + + #[test] + fn test_fetch_dashboard_last_sync() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + let now_ms = 1_700_000_000_000_i64; + conn.execute( + "INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command, error) + VALUES (?1, ?1, ?2, 'failed', 'sync', 'network timeout')", + [now_ms - 60_000, now_ms - 50_000], + ) + .unwrap(); + conn.execute( + "INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command) + VALUES (?1, ?1, ?2, 'succeeded', 'sync')", + [now_ms - 30_000, now_ms - 20_000], + ) + .unwrap(); + + let clock = FakeClock::from_ms(now_ms); + let data = fetch_dashboard(&conn, &clock).unwrap(); + + let sync = data.last_sync.unwrap(); + assert_eq!(sync.status, "succeeded"); + assert_eq!(sync.command, "sync"); + assert!(sync.error.is_none()); + } +} diff --git a/crates/lore-tui/src/action/file_history.rs b/crates/lore-tui/src/action/file_history.rs new file mode 100644 index 0000000..065b15c --- /dev/null +++ b/crates/lore-tui/src/action/file_history.rs @@ -0,0 +1,383 @@ +#![allow(dead_code)] + +//! File History screen actions — query MRs that touched a file path. +//! +//! Wraps the SQL queries from `lore::cli::commands::file_history` but uses +//! an injected `Connection` (TUI manages its own DB connection). + +use anyhow::Result; +use rusqlite::Connection; + +use lore::core::file_history::resolve_rename_chain; + +use crate::state::file_history::{FileDiscussion, FileHistoryMr, FileHistoryResult}; + +/// Maximum rename chain BFS depth. +const MAX_RENAME_HOPS: usize = 10; + +/// Default result limit. +const DEFAULT_LIMIT: usize = 50; + +/// Fetch file history: MRs that touched a file path, with optional rename resolution. +pub fn fetch_file_history( + conn: &Connection, + project_id: Option, + path: &str, + follow_renames: bool, + merged_only: bool, + include_discussions: bool, +) -> Result { + // Resolve rename chain unless disabled. + let (all_paths, renames_followed) = if !follow_renames { + (vec![path.to_string()], false) + } else if let Some(pid) = project_id { + let chain = resolve_rename_chain(conn, pid, path, MAX_RENAME_HOPS)?; + let followed = chain.len() > 1; + (chain, followed) + } else { + // Without project scope, can't resolve renames. + (vec![path.to_string()], false) + }; + + let paths_searched = all_paths.len(); + + // Build IN clause placeholders. + let placeholders: Vec = (0..all_paths.len()) + .map(|i| format!("?{}", i + 2)) + .collect(); + let in_clause = placeholders.join(", "); + + let merged_filter = if merged_only { + " AND mr.state = 'merged'" + } else { + "" + }; + + let project_filter = if project_id.is_some() { + "AND mfc.project_id = ?1" + } else { + "" + }; + + let limit_param = all_paths.len() + 2; + let sql = format!( + "SELECT DISTINCT \ + mr.iid, mr.title, mr.state, mr.author_username, \ + mfc.change_type, mr.merged_at, mr.updated_at, mr.merge_commit_sha \ + FROM mr_file_changes mfc \ + JOIN merge_requests mr ON mr.id = mfc.merge_request_id \ + WHERE mfc.new_path IN ({in_clause}) {project_filter} {merged_filter} \ + ORDER BY COALESCE(mr.merged_at, mr.updated_at) DESC \ + LIMIT ?{limit_param}" + ); + + let mut stmt = conn.prepare(&sql)?; + + // Bind: ?1=project_id, ?2..?N+1=paths, ?N+2=limit. + let mut params: Vec> = Vec::new(); + params.push(Box::new(project_id.unwrap_or(0))); + for p in &all_paths { + params.push(Box::new(p.clone())); + } + params.push(Box::new(DEFAULT_LIMIT as i64)); + + let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|p| p.as_ref()).collect(); + + let merge_requests: Vec = stmt + .query_map(param_refs.as_slice(), |row| { + Ok(FileHistoryMr { + iid: row.get(0)?, + title: row.get(1)?, + state: row.get(2)?, + author_username: row.get(3)?, + change_type: row.get(4)?, + merged_at_ms: row.get(5)?, + updated_at_ms: row.get::<_, i64>(6)?, + merge_commit_sha: row.get(7)?, + }) + })? + .filter_map(std::result::Result::ok) + .collect(); + + let total_mrs = merge_requests.len(); + + // Optionally fetch DiffNote discussions. + let discussions = if include_discussions && !merge_requests.is_empty() { + fetch_file_discussions(conn, &all_paths, project_id)? + } else { + Vec::new() + }; + + Ok(FileHistoryResult { + path: path.to_string(), + rename_chain: all_paths, + renames_followed, + merge_requests, + discussions, + total_mrs, + paths_searched, + }) +} + +/// Fetch DiffNote discussions referencing the given file paths. +fn fetch_file_discussions( + conn: &Connection, + paths: &[String], + project_id: Option, +) -> Result> { + let placeholders: Vec = (0..paths.len()).map(|i| format!("?{}", i + 2)).collect(); + let in_clause = placeholders.join(", "); + + let project_filter = if project_id.is_some() { + "AND d.project_id = ?1" + } else { + "" + }; + + let sql = format!( + "SELECT d.gitlab_discussion_id, n.author_username, n.body, n.new_path, n.created_at \ + FROM notes n \ + JOIN discussions d ON d.id = n.discussion_id \ + WHERE n.new_path IN ({in_clause}) {project_filter} \ + AND n.is_system = 0 \ + ORDER BY n.created_at DESC \ + LIMIT 50" + ); + + let mut stmt = conn.prepare(&sql)?; + + let mut params: Vec> = Vec::new(); + params.push(Box::new(project_id.unwrap_or(0))); + for p in paths { + params.push(Box::new(p.clone())); + } + + let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|p| p.as_ref()).collect(); + + let discussions: Vec = stmt + .query_map(param_refs.as_slice(), |row| { + let body: String = row.get(2)?; + let snippet = if body.len() > 200 { + format!("{}...", &body[..body.floor_char_boundary(200)]) + } else { + body + }; + Ok(FileDiscussion { + discussion_id: row.get(0)?, + author_username: row.get(1)?, + body_snippet: snippet, + path: row.get(3)?, + created_at_ms: row.get(4)?, + }) + })? + .filter_map(std::result::Result::ok) + .collect(); + + Ok(discussions) +} + +/// Fetch distinct file paths from mr_file_changes for autocomplete. +pub fn fetch_file_history_paths(conn: &Connection, project_id: Option) -> Result> { + let sql = if project_id.is_some() { + "SELECT DISTINCT new_path FROM mr_file_changes WHERE project_id = ?1 ORDER BY new_path LIMIT 5000" + } else { + "SELECT DISTINCT new_path FROM mr_file_changes ORDER BY new_path LIMIT 5000" + }; + + let mut stmt = conn.prepare(sql)?; + let paths: Vec = if let Some(pid) = project_id { + stmt.query_map([pid], |row| row.get(0))? + .filter_map(std::result::Result::ok) + .collect() + } else { + stmt.query_map([], |row| row.get(0))? + .filter_map(std::result::Result::ok) + .collect() + }; + + Ok(paths) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + /// Minimal schema for file history queries. + fn create_file_history_schema(conn: &Connection) { + conn.execute_batch( + " + CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + ); + CREATE TABLE merge_requests ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT, + author_id INTEGER, + author_username TEXT, + draft INTEGER NOT NULL DEFAULT 0, + created_at INTEGER, + updated_at INTEGER, + merged_at INTEGER, + merge_commit_sha TEXT, + web_url TEXT, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE mr_file_changes ( + id INTEGER PRIMARY KEY, + merge_request_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + new_path TEXT NOT NULL, + old_path TEXT, + change_type TEXT NOT NULL + ); + CREATE TABLE discussions ( + id INTEGER PRIMARY KEY, + gitlab_discussion_id TEXT NOT NULL, + project_id INTEGER NOT NULL, + noteable_type TEXT NOT NULL, + issue_id INTEGER, + merge_request_id INTEGER, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE notes ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + discussion_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + is_system INTEGER NOT NULL DEFAULT 0, + author_username TEXT, + body TEXT, + note_type TEXT, + new_path TEXT, + old_path TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + ", + ) + .expect("create file history schema"); + } + + #[test] + fn test_fetch_file_history_empty_db() { + let conn = Connection::open_in_memory().unwrap(); + create_file_history_schema(&conn); + + let result = fetch_file_history(&conn, None, "src/lib.rs", false, false, false).unwrap(); + assert!(result.merge_requests.is_empty()); + assert_eq!(result.total_mrs, 0); + assert_eq!(result.path, "src/lib.rs"); + } + + #[test] + fn test_fetch_file_history_returns_mrs() { + let conn = Connection::open_in_memory().unwrap(); + create_file_history_schema(&conn); + + // Insert project, MR, and file change. + conn.execute( + "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'grp/repo')", + [], + ).unwrap(); + conn.execute( + "INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state, author_username, updated_at, last_seen_at) \ + VALUES (1, 1000, 1, 42, 'Fix auth', 'merged', 'alice', 1700000000000, 1700000000000)", + [], + ).unwrap(); + conn.execute( + "INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, change_type) \ + VALUES (1, 1, 'src/auth.rs', 'modified')", + [], + ) + .unwrap(); + + let result = + fetch_file_history(&conn, Some(1), "src/auth.rs", false, false, false).unwrap(); + assert_eq!(result.merge_requests.len(), 1); + assert_eq!(result.merge_requests[0].iid, 42); + assert_eq!(result.merge_requests[0].title, "Fix auth"); + assert_eq!(result.merge_requests[0].change_type, "modified"); + } + + #[test] + fn test_fetch_file_history_merged_only() { + let conn = Connection::open_in_memory().unwrap(); + create_file_history_schema(&conn); + + conn.execute( + "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'grp/repo')", + [], + ).unwrap(); + // Merged MR. + conn.execute( + "INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state, author_username, updated_at, last_seen_at) \ + VALUES (1, 1000, 1, 42, 'Merged MR', 'merged', 'alice', 1700000000000, 1700000000000)", + [], + ).unwrap(); + // Open MR. + conn.execute( + "INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state, author_username, updated_at, last_seen_at) \ + VALUES (2, 1001, 1, 43, 'Open MR', 'opened', 'bob', 1700000000000, 1700000000000)", + [], + ).unwrap(); + conn.execute( + "INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, change_type) VALUES (1, 1, 'src/lib.rs', 'modified')", + [], + ).unwrap(); + conn.execute( + "INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, change_type) VALUES (2, 1, 'src/lib.rs', 'modified')", + [], + ).unwrap(); + + // Without merged_only: both MRs. + let all = fetch_file_history(&conn, Some(1), "src/lib.rs", false, false, false).unwrap(); + assert_eq!(all.merge_requests.len(), 2); + + // With merged_only: only the merged one. + let merged = fetch_file_history(&conn, Some(1), "src/lib.rs", false, true, false).unwrap(); + assert_eq!(merged.merge_requests.len(), 1); + assert_eq!(merged.merge_requests[0].state, "merged"); + } + + #[test] + fn test_fetch_file_history_paths_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_file_history_schema(&conn); + + let paths = fetch_file_history_paths(&conn, None).unwrap(); + assert!(paths.is_empty()); + } + + #[test] + fn test_fetch_file_history_paths_returns_distinct() { + let conn = Connection::open_in_memory().unwrap(); + create_file_history_schema(&conn); + + conn.execute( + "INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, change_type) VALUES (1, 1, 'src/a.rs', 'modified')", + [], + ).unwrap(); + conn.execute( + "INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, change_type) VALUES (2, 1, 'src/a.rs', 'modified')", + [], + ).unwrap(); + conn.execute( + "INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, change_type) VALUES (3, 1, 'src/b.rs', 'added')", + [], + ).unwrap(); + + let paths = fetch_file_history_paths(&conn, None).unwrap(); + assert_eq!(paths, vec!["src/a.rs", "src/b.rs"]); + } +} diff --git a/crates/lore-tui/src/action/issue_detail.rs b/crates/lore-tui/src/action/issue_detail.rs new file mode 100644 index 0000000..548d369 --- /dev/null +++ b/crates/lore-tui/src/action/issue_detail.rs @@ -0,0 +1,611 @@ +#![allow(dead_code)] + +use anyhow::{Context, Result}; +use rusqlite::Connection; + +use crate::message::EntityKey; +use crate::state::issue_detail::{IssueDetailData, IssueMetadata}; +use crate::view::common::cross_ref::{CrossRef, CrossRefKind}; +use crate::view::common::discussion_tree::{DiscussionNode, NoteNode}; + +/// Fetch issue metadata and cross-references (Phase 1 load). +/// +/// Runs inside a single read transaction for snapshot consistency. +/// Returns metadata + cross-refs; discussions are loaded separately. +pub fn fetch_issue_detail(conn: &Connection, key: &EntityKey) -> Result { + let metadata = fetch_issue_metadata(conn, key)?; + let cross_refs = fetch_issue_cross_refs(conn, key)?; + Ok(IssueDetailData { + metadata, + cross_refs, + }) +} + +/// Fetch issue metadata from the local DB. +fn fetch_issue_metadata(conn: &Connection, key: &EntityKey) -> Result { + let row = conn + .query_row( + "SELECT i.iid, p.path_with_namespace, i.title, + COALESCE(i.description, ''), i.state, i.author_username, + COALESCE(i.milestone_title, ''), + i.due_date, i.created_at, i.updated_at, + COALESCE(i.web_url, ''), + (SELECT COUNT(*) FROM discussions d + WHERE d.issue_id = i.id AND d.noteable_type = 'Issue') + FROM issues i + JOIN projects p ON p.id = i.project_id + WHERE i.project_id = ?1 AND i.iid = ?2", + rusqlite::params![key.project_id, key.iid], + |row| { + Ok(IssueMetadata { + iid: row.get(0)?, + project_path: row.get(1)?, + title: row.get(2)?, + description: row.get(3)?, + state: row.get(4)?, + author: row.get::<_, Option>(5)?.unwrap_or_default(), + assignees: Vec::new(), // Fetched separately below. + labels: Vec::new(), // Fetched separately below. + milestone: { + let m: String = row.get(6)?; + if m.is_empty() { None } else { Some(m) } + }, + due_date: row.get(7)?, + created_at: row.get(8)?, + updated_at: row.get(9)?, + web_url: row.get(10)?, + discussion_count: row.get::<_, i64>(11)? as usize, + }) + }, + ) + .context("fetching issue metadata")?; + + // Fetch assignees. + let mut assignees_stmt = conn + .prepare("SELECT username FROM issue_assignees WHERE issue_id = (SELECT id FROM issues WHERE project_id = ?1 AND iid = ?2)") + .context("preparing assignees query")?; + let assignees: Vec = assignees_stmt + .query_map(rusqlite::params![key.project_id, key.iid], |r| r.get(0)) + .context("fetching assignees")? + .collect::, _>>() + .context("reading assignee row")?; + + // Fetch labels. + let mut labels_stmt = conn + .prepare( + "SELECT l.name FROM issue_labels il + JOIN labels l ON l.id = il.label_id + WHERE il.issue_id = (SELECT id FROM issues WHERE project_id = ?1 AND iid = ?2) + ORDER BY l.name", + ) + .context("preparing labels query")?; + let labels: Vec = labels_stmt + .query_map(rusqlite::params![key.project_id, key.iid], |r| r.get(0)) + .context("fetching labels")? + .collect::, _>>() + .context("reading label row")?; + + Ok(IssueMetadata { + assignees, + labels, + ..row + }) +} + +/// Fetch cross-references for an issue from the entity_references table. +fn fetch_issue_cross_refs(conn: &Connection, key: &EntityKey) -> Result> { + let mut stmt = conn + .prepare( + "SELECT er.reference_type, er.target_entity_type, er.target_entity_id, + er.target_entity_iid, er.target_project_path, + CASE + WHEN er.target_entity_type = 'issue' + THEN (SELECT title FROM issues WHERE id = er.target_entity_id) + WHEN er.target_entity_type = 'merge_request' + THEN (SELECT title FROM merge_requests WHERE id = er.target_entity_id) + ELSE NULL + END as entity_title, + CASE + WHEN er.target_entity_id IS NOT NULL + THEN (SELECT project_id FROM issues WHERE id = er.target_entity_id + UNION ALL + SELECT project_id FROM merge_requests WHERE id = er.target_entity_id + LIMIT 1) + ELSE NULL + END as target_project_id + FROM entity_references er + WHERE er.source_entity_type = 'issue' + AND er.source_entity_id = (SELECT id FROM issues WHERE project_id = ?1 AND iid = ?2) + ORDER BY er.reference_type, er.target_entity_iid", + ) + .context("preparing cross-ref query")?; + + let refs = stmt + .query_map(rusqlite::params![key.project_id, key.iid], |row| { + let ref_type: String = row.get(0)?; + let target_type: String = row.get(1)?; + let target_id: Option = row.get(2)?; + let target_iid: Option = row.get(3)?; + let target_path: Option = row.get(4)?; + let title: Option = row.get(5)?; + let target_project_id: Option = row.get(6)?; + + let kind = match (ref_type.as_str(), target_type.as_str()) { + ("closes", "merge_request") => CrossRefKind::ClosingMr, + ("related", "issue") => CrossRefKind::RelatedIssue, + _ => CrossRefKind::MentionedIn, + }; + + let iid = target_iid.unwrap_or(0); + let project_id = target_project_id.unwrap_or(key.project_id); + + let entity_key = match target_type.as_str() { + "merge_request" => EntityKey::mr(project_id, iid), + _ => EntityKey::issue(project_id, iid), + }; + + let label = title.unwrap_or_else(|| { + let prefix = if target_type == "merge_request" { + "!" + } else { + "#" + }; + let path = target_path.unwrap_or_default(); + if path.is_empty() { + format!("{prefix}{iid}") + } else { + format!("{path}{prefix}{iid}") + } + }); + + let navigable = target_id.is_some(); + + Ok(CrossRef { + kind, + entity_key, + label, + navigable, + }) + }) + .context("fetching cross-refs")? + .collect::, _>>() + .context("reading cross-ref row")?; + + Ok(refs) +} + +/// Fetch discussions for an issue (Phase 2 async load). +/// +/// Returns `DiscussionNode` tree suitable for the discussion tree widget. +pub fn fetch_issue_discussions(conn: &Connection, key: &EntityKey) -> Result> { + let issue_id: i64 = conn + .query_row( + "SELECT id FROM issues WHERE project_id = ?1 AND iid = ?2", + rusqlite::params![key.project_id, key.iid], + |r| r.get(0), + ) + .context("looking up issue id")?; + + let mut disc_stmt = conn + .prepare( + "SELECT d.id, d.gitlab_discussion_id, d.resolvable, d.resolved + FROM discussions d + WHERE d.issue_id = ?1 AND d.noteable_type = 'Issue' + ORDER BY d.first_note_at ASC, d.id ASC", + ) + .context("preparing discussions query")?; + + let mut note_stmt = conn + .prepare( + "SELECT n.author_username, n.body, n.created_at, n.is_system, + n.note_type, n.position_new_path, n.position_new_line + FROM notes n + WHERE n.discussion_id = ?1 + ORDER BY n.position ASC, n.created_at ASC", + ) + .context("preparing notes query")?; + + let disc_rows: Vec<_> = disc_stmt + .query_map(rusqlite::params![issue_id], |row| { + Ok(( + row.get::<_, i64>(0)?, // id + row.get::<_, String>(1)?, // gitlab_discussion_id + row.get::<_, bool>(2)?, // resolvable + row.get::<_, bool>(3)?, // resolved + )) + }) + .context("fetching discussions")? + .collect::, _>>() + .context("reading discussion row")?; + + let mut discussions = Vec::new(); + for (disc_db_id, discussion_id, resolvable, resolved) in disc_rows { + let notes: Vec = note_stmt + .query_map(rusqlite::params![disc_db_id], |row| { + Ok(NoteNode { + author: row.get::<_, Option>(0)?.unwrap_or_default(), + body: row.get::<_, Option>(1)?.unwrap_or_default(), + created_at: row.get(2)?, + is_system: row.get(3)?, + is_diff_note: row.get::<_, Option>(4)?.as_deref() == Some("DiffNote"), + diff_file_path: row.get(5)?, + diff_new_line: row.get(6)?, + }) + }) + .context("fetching notes")? + .collect::, _>>() + .context("reading note row")?; + + discussions.push(DiscussionNode { + discussion_id, + notes, + resolvable, + resolved, + }); + } + + Ok(discussions) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn create_issue_detail_schema(conn: &Connection) { + conn.execute_batch( + " + CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + ); + CREATE TABLE issues ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT NOT NULL, + description TEXT, + state TEXT NOT NULL DEFAULT 'opened', + author_username TEXT, + milestone_title TEXT, + due_date TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + web_url TEXT, + UNIQUE(project_id, iid) + ); + CREATE TABLE issue_assignees ( + issue_id INTEGER NOT NULL, + username TEXT NOT NULL, + UNIQUE(issue_id, username) + ); + CREATE TABLE labels ( + id INTEGER PRIMARY KEY, + project_id INTEGER NOT NULL, + name TEXT NOT NULL + ); + CREATE TABLE issue_labels ( + issue_id INTEGER NOT NULL, + label_id INTEGER NOT NULL, + UNIQUE(issue_id, label_id) + ); + CREATE TABLE discussions ( + id INTEGER PRIMARY KEY, + gitlab_discussion_id TEXT NOT NULL, + project_id INTEGER NOT NULL, + issue_id INTEGER, + merge_request_id INTEGER, + noteable_type TEXT NOT NULL, + resolvable INTEGER NOT NULL DEFAULT 0, + resolved INTEGER NOT NULL DEFAULT 0, + first_note_at INTEGER + ); + CREATE TABLE notes ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + discussion_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + note_type TEXT, + is_system INTEGER NOT NULL DEFAULT 0, + author_username TEXT, + body TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + position INTEGER, + position_new_path TEXT, + position_new_line INTEGER + ); + CREATE TABLE entity_references ( + id INTEGER PRIMARY KEY, + project_id INTEGER NOT NULL, + source_entity_type TEXT NOT NULL, + source_entity_id INTEGER NOT NULL, + target_entity_type TEXT NOT NULL, + target_entity_id INTEGER, + target_project_path TEXT, + target_entity_iid INTEGER, + reference_type TEXT NOT NULL, + source_method TEXT NOT NULL DEFAULT 'api', + created_at INTEGER NOT NULL DEFAULT 0 + ); + CREATE TABLE merge_requests ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT NOT NULL, + state TEXT NOT NULL DEFAULT 'opened', + UNIQUE(project_id, iid) + ); + ", + ) + .unwrap(); + } + + fn setup_issue_detail_data(conn: &Connection) { + // Project. + conn.execute( + "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'group/project')", + [], + ) + .unwrap(); + + // Issue. + conn.execute( + "INSERT INTO issues (id, gitlab_id, project_id, iid, title, description, state, author_username, milestone_title, due_date, created_at, updated_at, web_url) + VALUES (1, 1000, 1, 42, 'Fix authentication flow', 'Detailed description here', 'opened', 'alice', 'v1.0', '2026-03-01', 1700000000000, 1700000060000, 'https://gitlab.com/group/project/-/issues/42')", + [], + ) + .unwrap(); + + // Assignees. + conn.execute( + "INSERT INTO issue_assignees (issue_id, username) VALUES (1, 'bob')", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO issue_assignees (issue_id, username) VALUES (1, 'charlie')", + [], + ) + .unwrap(); + + // Labels. + conn.execute( + "INSERT INTO labels (id, project_id, name) VALUES (1, 1, 'backend')", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO labels (id, project_id, name) VALUES (2, 1, 'urgent')", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO issue_labels (issue_id, label_id) VALUES (1, 1)", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO issue_labels (issue_id, label_id) VALUES (1, 2)", + [], + ) + .unwrap(); + + // Discussions + notes. + conn.execute( + "INSERT INTO discussions (id, gitlab_discussion_id, project_id, issue_id, noteable_type, resolvable, resolved, first_note_at) + VALUES (1, 'disc-aaa', 1, 1, 'Issue', 0, 0, 1700000010000)", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO notes (id, gitlab_id, discussion_id, project_id, author_username, body, created_at, updated_at, position, is_system, note_type) + VALUES (1, 10001, 1, 1, 'alice', 'This looks good overall', 1700000010000, 1700000010000, 0, 0, 'DiscussionNote')", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO notes (id, gitlab_id, discussion_id, project_id, author_username, body, created_at, updated_at, position, is_system, note_type) + VALUES (2, 10002, 1, 1, 'bob', 'Agreed, but see my comment below', 1700000020000, 1700000020000, 1, 0, 'DiscussionNote')", + [], + ) + .unwrap(); + + // System note discussion. + conn.execute( + "INSERT INTO discussions (id, gitlab_discussion_id, project_id, issue_id, noteable_type, first_note_at) + VALUES (2, 'disc-bbb', 1, 1, 'Issue', 1700000030000)", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO notes (id, gitlab_id, discussion_id, project_id, author_username, body, created_at, updated_at, position, is_system, note_type) + VALUES (3, 10003, 2, 1, 'system', 'changed the description', 1700000030000, 1700000030000, 0, 1, NULL)", + [], + ) + .unwrap(); + + // Closing MR cross-ref. + conn.execute( + "INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state) + VALUES (1, 2000, 1, 10, 'Fix auth MR', 'opened')", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO entity_references (project_id, source_entity_type, source_entity_id, target_entity_type, target_entity_id, target_entity_iid, reference_type) + VALUES (1, 'issue', 1, 'merge_request', 1, 10, 'closes')", + [], + ) + .unwrap(); + } + + #[test] + fn test_fetch_issue_detail_basic() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_detail_schema(&conn); + setup_issue_detail_data(&conn); + + let key = EntityKey::issue(1, 42); + let data = fetch_issue_detail(&conn, &key).unwrap(); + + assert_eq!(data.metadata.iid, 42); + assert_eq!(data.metadata.title, "Fix authentication flow"); + assert_eq!(data.metadata.state, "opened"); + assert_eq!(data.metadata.author, "alice"); + assert_eq!(data.metadata.project_path, "group/project"); + assert_eq!(data.metadata.milestone, Some("v1.0".to_string())); + assert_eq!(data.metadata.due_date, Some("2026-03-01".to_string())); + assert_eq!( + data.metadata.web_url, + "https://gitlab.com/group/project/-/issues/42" + ); + } + + #[test] + fn test_fetch_issue_detail_assignees() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_detail_schema(&conn); + setup_issue_detail_data(&conn); + + let key = EntityKey::issue(1, 42); + let data = fetch_issue_detail(&conn, &key).unwrap(); + + assert_eq!(data.metadata.assignees.len(), 2); + assert!(data.metadata.assignees.contains(&"bob".to_string())); + assert!(data.metadata.assignees.contains(&"charlie".to_string())); + } + + #[test] + fn test_fetch_issue_detail_labels() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_detail_schema(&conn); + setup_issue_detail_data(&conn); + + let key = EntityKey::issue(1, 42); + let data = fetch_issue_detail(&conn, &key).unwrap(); + + assert_eq!(data.metadata.labels, vec!["backend", "urgent"]); + } + + #[test] + fn test_fetch_issue_detail_cross_refs() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_detail_schema(&conn); + setup_issue_detail_data(&conn); + + let key = EntityKey::issue(1, 42); + let data = fetch_issue_detail(&conn, &key).unwrap(); + + assert_eq!(data.cross_refs.len(), 1); + assert_eq!(data.cross_refs[0].kind, CrossRefKind::ClosingMr); + assert_eq!(data.cross_refs[0].entity_key, EntityKey::mr(1, 10)); + assert_eq!(data.cross_refs[0].label, "Fix auth MR"); + assert!(data.cross_refs[0].navigable); + } + + #[test] + fn test_fetch_issue_detail_discussion_count() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_detail_schema(&conn); + setup_issue_detail_data(&conn); + + let key = EntityKey::issue(1, 42); + let data = fetch_issue_detail(&conn, &key).unwrap(); + + assert_eq!(data.metadata.discussion_count, 2); + } + + #[test] + fn test_fetch_issue_discussions_basic() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_detail_schema(&conn); + setup_issue_detail_data(&conn); + + let key = EntityKey::issue(1, 42); + let discussions = fetch_issue_discussions(&conn, &key).unwrap(); + + assert_eq!(discussions.len(), 2); + } + + #[test] + fn test_fetch_issue_discussions_notes() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_detail_schema(&conn); + setup_issue_detail_data(&conn); + + let key = EntityKey::issue(1, 42); + let discussions = fetch_issue_discussions(&conn, &key).unwrap(); + + // First discussion has 2 notes. + assert_eq!(discussions[0].notes.len(), 2); + assert_eq!(discussions[0].notes[0].author, "alice"); + assert_eq!(discussions[0].notes[0].body, "This looks good overall"); + assert_eq!(discussions[0].notes[1].author, "bob"); + assert!(!discussions[0].notes[0].is_system); + } + + #[test] + fn test_fetch_issue_discussions_system_note() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_detail_schema(&conn); + setup_issue_detail_data(&conn); + + let key = EntityKey::issue(1, 42); + let discussions = fetch_issue_discussions(&conn, &key).unwrap(); + + // Second discussion is a system note. + assert_eq!(discussions[1].notes.len(), 1); + assert!(discussions[1].notes[0].is_system); + assert_eq!(discussions[1].notes[0].body, "changed the description"); + } + + #[test] + fn test_fetch_issue_discussions_ordering() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_detail_schema(&conn); + setup_issue_detail_data(&conn); + + let key = EntityKey::issue(1, 42); + let discussions = fetch_issue_discussions(&conn, &key).unwrap(); + + // Ordered by first_note_at. + assert_eq!(discussions[0].discussion_id, "disc-aaa"); + assert_eq!(discussions[1].discussion_id, "disc-bbb"); + } + + #[test] + fn test_fetch_issue_detail_not_found() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_detail_schema(&conn); + setup_issue_detail_data(&conn); + + let key = EntityKey::issue(1, 999); + let result = fetch_issue_detail(&conn, &key); + assert!(result.is_err()); + } + + #[test] + fn test_fetch_issue_detail_no_description() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_detail_schema(&conn); + + conn.execute( + "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'g/p')", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO issues (id, gitlab_id, project_id, iid, title, description, state, created_at, updated_at) + VALUES (1, 1000, 1, 1, 'No desc', NULL, 'opened', 0, 0)", + [], + ) + .unwrap(); + + let key = EntityKey::issue(1, 1); + let data = fetch_issue_detail(&conn, &key).unwrap(); + assert_eq!(data.metadata.description, ""); + } +} diff --git a/crates/lore-tui/src/action/issue_list.rs b/crates/lore-tui/src/action/issue_list.rs new file mode 100644 index 0000000..9e665bc --- /dev/null +++ b/crates/lore-tui/src/action/issue_list.rs @@ -0,0 +1,532 @@ +#![allow(dead_code)] + +use anyhow::{Context, Result}; +use rusqlite::Connection; + +use crate::state::issue_list::{ + IssueCursor, IssueFilter, IssueListPage, IssueListRow, SortField, SortOrder, +}; + +/// Page size for issue list queries. +const ISSUE_PAGE_SIZE: usize = 50; + +/// Fetch a page of issues matching the given filter and sort. +/// +/// Uses keyset pagination: when `cursor` is `Some`, returns rows after +/// (less-than for DESC, greater-than for ASC) the cursor boundary. +/// When `snapshot_fence` is `Some`, limits results to rows updated_at <= fence +/// to prevent newly synced items from shifting the page window. +pub fn fetch_issue_list( + conn: &Connection, + filter: &IssueFilter, + sort_field: SortField, + sort_order: SortOrder, + cursor: Option<&IssueCursor>, + snapshot_fence: Option, +) -> Result { + // -- Build dynamic WHERE conditions and params -------------------------- + let mut conditions: Vec = Vec::new(); + let mut params: Vec> = Vec::new(); + + // Filter: project_id + if let Some(pid) = filter.project_id { + conditions.push("i.project_id = ?".into()); + params.push(Box::new(pid)); + } + + // Filter: state + if let Some(ref state) = filter.state { + conditions.push("i.state = ?".into()); + params.push(Box::new(state.clone())); + } + + // Filter: author + if let Some(ref author) = filter.author { + conditions.push("i.author_username = ?".into()); + params.push(Box::new(author.clone())); + } + + // Filter: label (via join) + let label_join = if let Some(ref label) = filter.label { + conditions.push("fl.name = ?".into()); + params.push(Box::new(label.clone())); + "JOIN issue_labels fil ON fil.issue_id = i.id \ + JOIN labels fl ON fl.id = fil.label_id" + } else { + "" + }; + + // Filter: free_text (LIKE on title) + if let Some(ref text) = filter.free_text { + conditions.push("i.title LIKE ?".into()); + params.push(Box::new(format!("%{text}%"))); + } + + // Snapshot fence + if let Some(fence) = snapshot_fence { + conditions.push("i.updated_at <= ?".into()); + params.push(Box::new(fence)); + } + + // -- Count query (before cursor filter) --------------------------------- + let where_clause = if conditions.is_empty() { + String::new() + } else { + format!("WHERE {}", conditions.join(" AND ")) + }; + + let count_sql = format!( + "SELECT COUNT(DISTINCT i.id) FROM issues i \ + JOIN projects p ON p.id = i.project_id \ + {label_join} {where_clause}" + ); + let count_params: Vec<&dyn rusqlite::types::ToSql> = + params.iter().map(|b| b.as_ref()).collect(); + + let total_count: i64 = conn + .query_row(&count_sql, count_params.as_slice(), |r| r.get(0)) + .context("counting issues for list")?; + + // -- Keyset cursor condition ------------------------------------------- + let (sort_col, sort_dir) = sort_column_and_dir(sort_field, sort_order); + let cursor_op = if sort_dir == "DESC" { "<" } else { ">" }; + + if let Some(c) = cursor { + conditions.push(format!("({sort_col}, i.iid) {cursor_op} (?, ?)")); + params.push(Box::new(c.updated_at)); + params.push(Box::new(c.iid)); + } + + // -- Data query --------------------------------------------------------- + let where_clause_full = if conditions.is_empty() { + String::new() + } else { + format!("WHERE {}", conditions.join(" AND ")) + }; + + let data_sql = format!( + "SELECT p.path_with_namespace, i.iid, i.title, i.state, \ + i.author_username, i.updated_at, \ + GROUP_CONCAT(DISTINCT l.name) AS label_names \ + FROM issues i \ + JOIN projects p ON p.id = i.project_id \ + {label_join} \ + LEFT JOIN issue_labels il ON il.issue_id = i.id \ + LEFT JOIN labels l ON l.id = il.label_id \ + {where_clause_full} \ + GROUP BY i.id \ + ORDER BY {sort_col} {sort_dir}, i.iid {sort_dir} \ + LIMIT ?" + ); + + // +1 to detect if there's a next page + let fetch_limit = (ISSUE_PAGE_SIZE + 1) as i64; + params.push(Box::new(fetch_limit)); + + let all_params: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|b| b.as_ref()).collect(); + + let mut stmt = conn + .prepare(&data_sql) + .context("preparing issue list query")?; + + let rows_result = stmt + .query_map(all_params.as_slice(), |row| { + let project_path: String = row.get(0)?; + let iid: i64 = row.get(1)?; + let title: String = row.get::<_, Option>(2)?.unwrap_or_default(); + let state: String = row.get::<_, Option>(3)?.unwrap_or_default(); + let author: String = row.get::<_, Option>(4)?.unwrap_or_default(); + let updated_at: i64 = row.get(5)?; + let label_names: Option = row.get(6)?; + + let labels = label_names + .map(|s| s.split(',').map(String::from).collect()) + .unwrap_or_default(); + + Ok(IssueListRow { + project_path, + iid, + title, + state, + author, + labels, + updated_at, + }) + }) + .context("querying issue list")?; + + let mut rows: Vec = Vec::new(); + for row in rows_result { + rows.push(row.context("reading issue list row")?); + } + + // Determine next cursor from the last row (if we got more than page size) + let has_next = rows.len() > ISSUE_PAGE_SIZE; + if has_next { + rows.truncate(ISSUE_PAGE_SIZE); + } + + let next_cursor = if has_next { + rows.last().map(|r| IssueCursor { + updated_at: r.updated_at, + iid: r.iid, + }) + } else { + None + }; + + #[allow(clippy::cast_sign_loss)] + Ok(IssueListPage { + rows, + next_cursor, + total_count: total_count as u64, + }) +} + +/// Map sort field + order to SQL column name and direction keyword. +fn sort_column_and_dir(field: SortField, order: SortOrder) -> (&'static str, &'static str) { + let col = match field { + SortField::UpdatedAt => "i.updated_at", + SortField::Iid => "i.iid", + SortField::Title => "i.title", + SortField::State => "i.state", + SortField::Author => "i.author_username", + }; + let dir = match order { + SortOrder::Desc => "DESC", + SortOrder::Asc => "ASC", + }; + (col, dir) +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Create the minimal schema needed for issue list queries. + fn create_issue_list_schema(conn: &Connection) { + conn.execute_batch( + " + CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + ); + CREATE TABLE issues ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT NOT NULL, + author_username TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE labels ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER, + project_id INTEGER NOT NULL, + name TEXT NOT NULL, + color TEXT, + description TEXT + ); + CREATE TABLE issue_labels ( + issue_id INTEGER NOT NULL, + label_id INTEGER NOT NULL, + PRIMARY KEY(issue_id, label_id) + ); + ", + ) + .expect("create issue list schema"); + } + + /// Insert a test issue with an author. + fn insert_issue_full(conn: &Connection, iid: i64, state: &str, author: &str, updated_at: i64) { + conn.execute( + "INSERT INTO issues (gitlab_id, project_id, iid, title, state, author_username, created_at, updated_at, last_seen_at) + VALUES (?1, 1, ?2, ?3, ?4, ?5, ?6, ?6, ?6)", + rusqlite::params![ + iid * 100, + iid, + format!("Issue {iid}"), + state, + author, + updated_at + ], + ) + .expect("insert issue full"); + } + + /// Attach a label to an issue. + fn attach_label(conn: &Connection, issue_iid: i64, label_name: &str) { + // Find issue id. + let issue_id: i64 = conn + .query_row("SELECT id FROM issues WHERE iid = ?", [issue_iid], |r| { + r.get(0) + }) + .expect("find issue"); + + // Ensure label exists. + conn.execute( + "INSERT OR IGNORE INTO labels (project_id, name) VALUES (1, ?)", + [label_name], + ) + .expect("insert label"); + let label_id: i64 = conn + .query_row("SELECT id FROM labels WHERE name = ?", [label_name], |r| { + r.get(0) + }) + .expect("find label"); + + conn.execute( + "INSERT INTO issue_labels (issue_id, label_id) VALUES (?, ?)", + [issue_id, label_id], + ) + .expect("attach label"); + } + + fn setup_issue_list_data(conn: &Connection) { + let base = 1_700_000_000_000_i64; + conn.execute( + "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'group/project')", + [], + ) + .unwrap(); + + insert_issue_full(conn, 1, "opened", "alice", base - 10_000); + insert_issue_full(conn, 2, "opened", "bob", base - 20_000); + insert_issue_full(conn, 3, "closed", "alice", base - 30_000); + insert_issue_full(conn, 4, "opened", "charlie", base - 40_000); + insert_issue_full(conn, 5, "closed", "bob", base - 50_000); + + attach_label(conn, 1, "bug"); + attach_label(conn, 1, "critical"); + attach_label(conn, 2, "feature"); + attach_label(conn, 4, "bug"); + } + + #[test] + fn test_fetch_issue_list_basic() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let filter = IssueFilter::default(); + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 5); + assert_eq!(page.rows.len(), 5); + // Newest first. + assert_eq!(page.rows[0].iid, 1); + assert_eq!(page.rows[4].iid, 5); + assert!(page.next_cursor.is_none()); + } + + #[test] + fn test_fetch_issue_list_filter_state() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let filter = IssueFilter { + state: Some("opened".into()), + ..Default::default() + }; + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 3); + assert_eq!(page.rows.len(), 3); + assert!(page.rows.iter().all(|r| r.state == "opened")); + } + + #[test] + fn test_fetch_issue_list_filter_author() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let filter = IssueFilter { + author: Some("alice".into()), + ..Default::default() + }; + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 2); + assert_eq!(page.rows.len(), 2); + assert!(page.rows.iter().all(|r| r.author == "alice")); + } + + #[test] + fn test_fetch_issue_list_filter_label() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let filter = IssueFilter { + label: Some("bug".into()), + ..Default::default() + }; + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 2); // issues 1 and 4 + assert_eq!(page.rows.len(), 2); + } + + #[test] + fn test_fetch_issue_list_labels_aggregated() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let filter = IssueFilter::default(); + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Desc, + None, + None, + ) + .unwrap(); + + // Issue 1 has labels "bug" and "critical". + let issue1 = page.rows.iter().find(|r| r.iid == 1).unwrap(); + assert_eq!(issue1.labels.len(), 2); + assert!(issue1.labels.contains(&"bug".to_string())); + assert!(issue1.labels.contains(&"critical".to_string())); + + // Issue 5 has no labels. + let issue5 = page.rows.iter().find(|r| r.iid == 5).unwrap(); + assert!(issue5.labels.is_empty()); + } + + #[test] + fn test_fetch_issue_list_sort_ascending() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let filter = IssueFilter::default(); + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Asc, + None, + None, + ) + .unwrap(); + + // Oldest first. + assert_eq!(page.rows[0].iid, 5); + assert_eq!(page.rows[4].iid, 1); + } + + #[test] + fn test_fetch_issue_list_snapshot_fence() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let base = 1_700_000_000_000_i64; + // Fence at base-25000: should exclude issues 1 (at base-10000) and 2 (at base-20000). + let fence = base - 25_000; + let filter = IssueFilter::default(); + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Desc, + None, + Some(fence), + ) + .unwrap(); + + assert_eq!(page.total_count, 3); + assert_eq!(page.rows.len(), 3); + assert!(page.rows.iter().all(|r| r.updated_at <= fence)); + } + + #[test] + fn test_fetch_issue_list_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + conn.execute( + "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'g/p')", + [], + ) + .unwrap(); + + let page = fetch_issue_list( + &conn, + &IssueFilter::default(), + SortField::UpdatedAt, + SortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 0); + assert!(page.rows.is_empty()); + assert!(page.next_cursor.is_none()); + } + + #[test] + fn test_fetch_issue_list_free_text() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let filter = IssueFilter { + free_text: Some("Issue 3".into()), + ..Default::default() + }; + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 1); + assert_eq!(page.rows[0].iid, 3); + } +} diff --git a/crates/lore-tui/src/action/mod.rs b/crates/lore-tui/src/action/mod.rs new file mode 100644 index 0000000..c0b18f1 --- /dev/null +++ b/crates/lore-tui/src/action/mod.rs @@ -0,0 +1,29 @@ +//! Action layer — pure data-fetching functions for TUI screens. +//! +//! Actions query the local SQLite database and return data structs. +//! They never touch terminal state, never spawn tasks, and use injected +//! [`Clock`] for time calculations (deterministic tests). + +mod bootstrap; +mod dashboard; +mod file_history; +mod issue_detail; +mod issue_list; +mod mr_detail; +mod mr_list; +mod search; +mod timeline; +mod trace; +mod who; + +pub use bootstrap::*; +pub use dashboard::*; +pub use file_history::*; +pub use issue_detail::*; +pub use issue_list::*; +pub use mr_detail::*; +pub use mr_list::*; +pub use search::*; +pub use timeline::*; +pub use trace::*; +pub use who::*; diff --git a/crates/lore-tui/src/action/mr_detail.rs b/crates/lore-tui/src/action/mr_detail.rs new file mode 100644 index 0000000..172b1cc --- /dev/null +++ b/crates/lore-tui/src/action/mr_detail.rs @@ -0,0 +1,694 @@ +#![allow(dead_code)] + +use anyhow::{Context, Result}; +use rusqlite::Connection; + +use crate::message::EntityKey; +use crate::state::mr_detail::{FileChange, FileChangeType, MrDetailData, MrMetadata}; +use crate::view::common::cross_ref::{CrossRef, CrossRefKind}; +use crate::view::common::discussion_tree::{DiscussionNode, NoteNode}; + +/// Fetch MR metadata + cross-refs + file changes (Phase 1 composite). +pub fn fetch_mr_detail(conn: &Connection, key: &EntityKey) -> Result { + let metadata = fetch_mr_metadata(conn, key)?; + let cross_refs = fetch_mr_cross_refs(conn, key)?; + let file_changes = fetch_mr_file_changes(conn, key)?; + Ok(MrDetailData { + metadata, + cross_refs, + file_changes, + }) +} + +/// Fetch MR metadata from the local DB. +fn fetch_mr_metadata(conn: &Connection, key: &EntityKey) -> Result { + let row = conn + .query_row( + "SELECT m.iid, p.path_with_namespace, m.title, + COALESCE(m.description, ''), m.state, m.draft, + m.author_username, m.source_branch, m.target_branch, + COALESCE(m.detailed_merge_status, ''), + m.created_at, m.updated_at, m.merged_at, + COALESCE(m.web_url, ''), + (SELECT COUNT(*) FROM discussions d WHERE d.merge_request_id = m.id) AS disc_count, + (SELECT COUNT(*) FROM mr_file_changes fc WHERE fc.merge_request_id = m.id) AS fc_count + FROM merge_requests m + JOIN projects p ON p.id = m.project_id + WHERE m.project_id = ?1 AND m.iid = ?2", + rusqlite::params![key.project_id, key.iid], + |row| { + Ok(MrMetadata { + iid: row.get(0)?, + project_path: row.get(1)?, + title: row.get::<_, Option>(2)?.unwrap_or_default(), + description: row.get(3)?, + state: row.get::<_, Option>(4)?.unwrap_or_default(), + draft: row.get(5)?, + author: row.get::<_, Option>(6)?.unwrap_or_default(), + assignees: Vec::new(), + reviewers: Vec::new(), + labels: Vec::new(), + source_branch: row.get::<_, Option>(7)?.unwrap_or_default(), + target_branch: row.get::<_, Option>(8)?.unwrap_or_default(), + merge_status: row.get(9)?, + created_at: row.get(10)?, + updated_at: row.get(11)?, + merged_at: row.get(12)?, + web_url: row.get(13)?, + discussion_count: row.get::<_, i64>(14)? as usize, + file_change_count: row.get::<_, i64>(15)? as usize, + }) + }, + ) + .context("fetching MR metadata")?; + + // Fetch assignees. + let mut assignees_stmt = conn + .prepare( + "SELECT username FROM mr_assignees + WHERE merge_request_id = ( + SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2 + ) + ORDER BY username", + ) + .context("preparing assignees query")?; + let assignees: Vec = assignees_stmt + .query_map(rusqlite::params![key.project_id, key.iid], |row| row.get(0)) + .context("fetching assignees")? + .collect::, _>>() + .context("reading assignee row")?; + + // Fetch reviewers. + let mut reviewers_stmt = conn + .prepare( + "SELECT username FROM mr_reviewers + WHERE merge_request_id = ( + SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2 + ) + ORDER BY username", + ) + .context("preparing reviewers query")?; + let reviewers: Vec = reviewers_stmt + .query_map(rusqlite::params![key.project_id, key.iid], |row| row.get(0)) + .context("fetching reviewers")? + .collect::, _>>() + .context("reading reviewer row")?; + + // Fetch labels. + let mut labels_stmt = conn + .prepare( + "SELECT l.name FROM mr_labels ml + JOIN labels l ON ml.label_id = l.id + WHERE ml.merge_request_id = ( + SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2 + ) + ORDER BY l.name", + ) + .context("preparing labels query")?; + let labels: Vec = labels_stmt + .query_map(rusqlite::params![key.project_id, key.iid], |row| row.get(0)) + .context("fetching labels")? + .collect::, _>>() + .context("reading label row")?; + + let mut result = row; + result.assignees = assignees; + result.reviewers = reviewers; + result.labels = labels; + Ok(result) +} + +/// Fetch cross-references for an MR. +fn fetch_mr_cross_refs(conn: &Connection, key: &EntityKey) -> Result> { + let mut stmt = conn + .prepare( + "SELECT er.reference_type, er.target_entity_type, + er.target_entity_id, er.target_entity_iid, + er.target_project_path, + CASE + WHEN er.target_entity_type = 'issue' + THEN (SELECT title FROM issues WHERE id = er.target_entity_id) + WHEN er.target_entity_type = 'merge_request' + THEN (SELECT title FROM merge_requests WHERE id = er.target_entity_id) + ELSE NULL + END as entity_title, + CASE + WHEN er.target_entity_id IS NOT NULL + THEN (SELECT project_id FROM issues WHERE id = er.target_entity_id + UNION ALL + SELECT project_id FROM merge_requests WHERE id = er.target_entity_id + LIMIT 1) + ELSE NULL + END as target_project_id + FROM entity_references er + WHERE er.source_entity_type = 'merge_request' + AND er.source_entity_id = (SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2) + ORDER BY er.reference_type, er.target_entity_iid", + ) + .context("preparing MR cross-refs query")?; + + let refs: Vec = stmt + .query_map(rusqlite::params![key.project_id, key.iid], |row| { + let ref_type: String = row.get(0)?; + let target_type: String = row.get(1)?; + let _target_id: Option = row.get(2)?; + let target_iid: Option = row.get(3)?; + let target_path: Option = row.get(4)?; + let title: Option = row.get(5)?; + let target_project_id: Option = row.get(6)?; + + let kind = match (ref_type.as_str(), target_type.as_str()) { + ("closes", "issue") => CrossRefKind::ClosingMr, + ("related", "issue") => CrossRefKind::RelatedIssue, + _ => CrossRefKind::MentionedIn, + }; + + let iid = target_iid.unwrap_or(0); + let project_id = target_project_id.unwrap_or(key.project_id); + + let entity_key = match target_type.as_str() { + "merge_request" => EntityKey::mr(project_id, iid), + _ => EntityKey::issue(project_id, iid), + }; + + let label = title.unwrap_or_else(|| { + let prefix = if target_type == "merge_request" { + "!" + } else { + "#" + }; + let path = target_path.clone().unwrap_or_default(); + if path.is_empty() { + format!("{prefix}{iid}") + } else { + format!("{path}{prefix}{iid}") + } + }); + + Ok(CrossRef { + kind, + entity_key, + label, + navigable: target_project_id.is_some(), + }) + }) + .context("fetching MR cross-refs")? + .collect::, _>>() + .context("reading cross-ref row")?; + + Ok(refs) +} + +/// Fetch file changes for an MR. +fn fetch_mr_file_changes(conn: &Connection, key: &EntityKey) -> Result> { + let mut stmt = conn + .prepare( + "SELECT fc.old_path, fc.new_path, fc.change_type + FROM mr_file_changes fc + WHERE fc.merge_request_id = ( + SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2 + ) + ORDER BY fc.new_path", + ) + .context("preparing file changes query")?; + + let changes: Vec = stmt + .query_map(rusqlite::params![key.project_id, key.iid], |row| { + Ok(FileChange { + old_path: row.get(0)?, + new_path: row.get(1)?, + change_type: FileChangeType::parse_db(&row.get::<_, String>(2).unwrap_or_default()), + }) + }) + .context("fetching file changes")? + .collect::, _>>() + .context("reading file change row")?; + + Ok(changes) +} + +/// Fetch discussions for an MR (Phase 2 async load). +pub fn fetch_mr_discussions(conn: &Connection, key: &EntityKey) -> Result> { + let mr_id: i64 = conn + .query_row( + "SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2", + rusqlite::params![key.project_id, key.iid], + |row| row.get(0), + ) + .context("looking up MR id for discussions")?; + + let mut disc_stmt = conn + .prepare( + "SELECT d.id, d.gitlab_discussion_id, d.resolvable, d.resolved + FROM discussions d + WHERE d.merge_request_id = ?1 + ORDER BY d.first_note_at ASC", + ) + .context("preparing MR discussions query")?; + + let mut note_stmt = conn + .prepare( + "SELECT n.author_username, n.body, n.created_at, n.is_system, + n.note_type, n.position_new_path, n.position_new_line + FROM notes n + WHERE n.discussion_id = ?1 + ORDER BY n.position ASC, n.created_at ASC", + ) + .context("preparing MR notes query")?; + + let disc_rows: Vec<_> = disc_stmt + .query_map(rusqlite::params![mr_id], |row| { + Ok(( + row.get::<_, i64>(0)?, // id + row.get::<_, String>(1)?, // gitlab_discussion_id + row.get::<_, bool>(2)?, // resolvable + row.get::<_, bool>(3)?, // resolved + )) + }) + .context("fetching MR discussions")? + .collect::, _>>() + .context("reading discussion row")?; + + let mut discussions = Vec::new(); + for (disc_db_id, discussion_id, resolvable, resolved) in disc_rows { + let notes: Vec = note_stmt + .query_map(rusqlite::params![disc_db_id], |row| { + Ok(NoteNode { + author: row.get::<_, Option>(0)?.unwrap_or_default(), + body: row.get::<_, Option>(1)?.unwrap_or_default(), + created_at: row.get(2)?, + is_system: row.get(3)?, + is_diff_note: row.get::<_, Option>(4)?.as_deref() == Some("DiffNote"), + diff_file_path: row.get(5)?, + diff_new_line: row.get(6)?, + }) + }) + .context("fetching notes")? + .collect::, _>>() + .context("reading note row")?; + + discussions.push(DiscussionNode { + discussion_id, + notes, + resolvable, + resolved, + }); + } + + Ok(discussions) +} + +#[cfg(test)] +mod tests { + use super::*; + + fn create_issue_detail_schema(conn: &Connection) { + conn.execute_batch( + " + CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + ); + CREATE TABLE issues ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT NOT NULL, + description TEXT, + state TEXT NOT NULL DEFAULT 'opened', + author_username TEXT, + milestone_title TEXT, + due_date TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + web_url TEXT, + UNIQUE(project_id, iid) + ); + CREATE TABLE issue_assignees ( + issue_id INTEGER NOT NULL, + username TEXT NOT NULL, + UNIQUE(issue_id, username) + ); + CREATE TABLE labels ( + id INTEGER PRIMARY KEY, + project_id INTEGER NOT NULL, + name TEXT NOT NULL + ); + CREATE TABLE issue_labels ( + issue_id INTEGER NOT NULL, + label_id INTEGER NOT NULL, + UNIQUE(issue_id, label_id) + ); + CREATE TABLE discussions ( + id INTEGER PRIMARY KEY, + gitlab_discussion_id TEXT NOT NULL, + project_id INTEGER NOT NULL, + issue_id INTEGER, + merge_request_id INTEGER, + noteable_type TEXT NOT NULL, + resolvable INTEGER NOT NULL DEFAULT 0, + resolved INTEGER NOT NULL DEFAULT 0, + first_note_at INTEGER + ); + CREATE TABLE notes ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + discussion_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + note_type TEXT, + is_system INTEGER NOT NULL DEFAULT 0, + author_username TEXT, + body TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + position INTEGER, + position_new_path TEXT, + position_new_line INTEGER + ); + CREATE TABLE entity_references ( + id INTEGER PRIMARY KEY, + project_id INTEGER NOT NULL, + source_entity_type TEXT NOT NULL, + source_entity_id INTEGER NOT NULL, + target_entity_type TEXT NOT NULL, + target_entity_id INTEGER, + target_project_path TEXT, + target_entity_iid INTEGER, + reference_type TEXT NOT NULL, + source_method TEXT NOT NULL DEFAULT 'api', + created_at INTEGER NOT NULL DEFAULT 0 + ); + CREATE TABLE merge_requests ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT NOT NULL, + state TEXT NOT NULL DEFAULT 'opened', + UNIQUE(project_id, iid) + ); + ", + ) + .unwrap(); + } + + fn create_mr_detail_schema(conn: &Connection) { + create_issue_detail_schema(conn); + // Add MR-specific columns and tables on top of the base schema. + conn.execute_batch( + " + -- Add columns to merge_requests that the detail query needs. + ALTER TABLE merge_requests ADD COLUMN description TEXT; + ALTER TABLE merge_requests ADD COLUMN draft INTEGER NOT NULL DEFAULT 0; + ALTER TABLE merge_requests ADD COLUMN author_username TEXT; + ALTER TABLE merge_requests ADD COLUMN source_branch TEXT; + ALTER TABLE merge_requests ADD COLUMN target_branch TEXT; + ALTER TABLE merge_requests ADD COLUMN detailed_merge_status TEXT; + ALTER TABLE merge_requests ADD COLUMN created_at INTEGER; + ALTER TABLE merge_requests ADD COLUMN updated_at INTEGER; + ALTER TABLE merge_requests ADD COLUMN merged_at INTEGER; + ALTER TABLE merge_requests ADD COLUMN web_url TEXT; + + CREATE TABLE mr_assignees ( + merge_request_id INTEGER NOT NULL, + username TEXT NOT NULL, + UNIQUE(merge_request_id, username) + ); + CREATE TABLE mr_reviewers ( + merge_request_id INTEGER NOT NULL, + username TEXT NOT NULL, + UNIQUE(merge_request_id, username) + ); + CREATE TABLE mr_labels ( + merge_request_id INTEGER NOT NULL, + label_id INTEGER NOT NULL, + UNIQUE(merge_request_id, label_id) + ); + CREATE TABLE mr_file_changes ( + id INTEGER PRIMARY KEY, + merge_request_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + old_path TEXT, + new_path TEXT NOT NULL, + change_type TEXT NOT NULL + ); + ", + ) + .unwrap(); + } + + fn setup_mr_detail_data(conn: &Connection) { + // Project (if not already inserted). + conn.execute( + "INSERT OR IGNORE INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'group/project')", + [], + ) + .unwrap(); + + // MR. + conn.execute( + "INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, description, state, draft, author_username, source_branch, target_branch, detailed_merge_status, created_at, updated_at, merged_at, web_url) + VALUES (1, 2000, 1, 10, 'Fix auth flow', 'MR description', 'opened', 0, 'alice', 'fix-auth', 'main', 'mergeable', 1700000000000, 1700000060000, NULL, 'https://gitlab.com/group/project/-/merge_requests/10')", + [], + ) + .unwrap(); + + // Assignees. + conn.execute( + "INSERT INTO mr_assignees (merge_request_id, username) VALUES (1, 'bob')", + [], + ) + .unwrap(); + + // Reviewers. + conn.execute( + "INSERT INTO mr_reviewers (merge_request_id, username) VALUES (1, 'carol')", + [], + ) + .unwrap(); + + // Labels. + conn.execute( + "INSERT OR IGNORE INTO labels (id, project_id, name) VALUES (10, 1, 'backend')", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO mr_labels (merge_request_id, label_id) VALUES (1, 10)", + [], + ) + .unwrap(); + + // File changes. + conn.execute( + "INSERT INTO mr_file_changes (merge_request_id, project_id, old_path, new_path, change_type) + VALUES (1, 1, NULL, 'src/auth.rs', 'modified')", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO mr_file_changes (merge_request_id, project_id, old_path, new_path, change_type) + VALUES (1, 1, NULL, 'src/lib.rs', 'added')", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO mr_file_changes (merge_request_id, project_id, old_path, new_path, change_type) + VALUES (1, 1, 'src/old.rs', 'src/new.rs', 'renamed')", + [], + ) + .unwrap(); + + // Discussion with a note. + conn.execute( + "INSERT INTO discussions (id, gitlab_discussion_id, project_id, merge_request_id, noteable_type, resolvable, resolved, first_note_at) + VALUES (1, 'mr_disc_1', 1, 1, 'MergeRequest', 1, 0, 1700000010000)", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO notes (id, gitlab_id, discussion_id, project_id, note_type, is_system, author_username, body, created_at, updated_at, position, position_new_path, position_new_line) + VALUES (1, 5001, 1, 1, 'DiffNote', 0, 'alice', 'Please fix this', 1700000010000, 1700000010000, 0, 'src/auth.rs', 42)", + [], + ) + .unwrap(); + + // Cross-reference (MR closes issue). + conn.execute( + "INSERT INTO issues (id, gitlab_id, project_id, iid, title, state, created_at, updated_at) + VALUES (1, 1000, 1, 5, 'Auth bug', 'opened', 0, 0)", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO entity_references (project_id, source_entity_type, source_entity_id, target_entity_type, target_entity_id, target_project_path, target_entity_iid, reference_type, source_method) + VALUES (1, 'merge_request', 1, 'issue', 1, 'group/project', 5, 'closes', 'api')", + [], + ) + .unwrap(); + } + + #[test] + fn test_fetch_mr_detail_basic_metadata() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_detail_schema(&conn); + setup_mr_detail_data(&conn); + + let key = EntityKey::mr(1, 10); + let data = fetch_mr_detail(&conn, &key).unwrap(); + + assert_eq!(data.metadata.iid, 10); + assert_eq!(data.metadata.title, "Fix auth flow"); + assert_eq!(data.metadata.description, "MR description"); + assert_eq!(data.metadata.state, "opened"); + assert!(!data.metadata.draft); + assert_eq!(data.metadata.author, "alice"); + assert_eq!(data.metadata.source_branch, "fix-auth"); + assert_eq!(data.metadata.target_branch, "main"); + assert_eq!(data.metadata.merge_status, "mergeable"); + assert!(data.metadata.merged_at.is_none()); + assert_eq!( + data.metadata.web_url, + "https://gitlab.com/group/project/-/merge_requests/10" + ); + } + + #[test] + fn test_fetch_mr_detail_assignees_reviewers_labels() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_detail_schema(&conn); + setup_mr_detail_data(&conn); + + let key = EntityKey::mr(1, 10); + let data = fetch_mr_detail(&conn, &key).unwrap(); + + assert_eq!(data.metadata.assignees, vec!["bob"]); + assert_eq!(data.metadata.reviewers, vec!["carol"]); + assert_eq!(data.metadata.labels, vec!["backend"]); + } + + #[test] + fn test_fetch_mr_detail_file_changes() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_detail_schema(&conn); + setup_mr_detail_data(&conn); + + let key = EntityKey::mr(1, 10); + let data = fetch_mr_detail(&conn, &key).unwrap(); + + assert_eq!(data.file_changes.len(), 3); + assert_eq!(data.metadata.file_change_count, 3); + + // Ordered by new_path. + assert_eq!(data.file_changes[0].new_path, "src/auth.rs"); + assert_eq!(data.file_changes[0].change_type, FileChangeType::Modified); + + assert_eq!(data.file_changes[1].new_path, "src/lib.rs"); + assert_eq!(data.file_changes[1].change_type, FileChangeType::Added); + + assert_eq!(data.file_changes[2].new_path, "src/new.rs"); + assert_eq!(data.file_changes[2].change_type, FileChangeType::Renamed); + assert_eq!(data.file_changes[2].old_path.as_deref(), Some("src/old.rs")); + } + + #[test] + fn test_fetch_mr_detail_cross_refs() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_detail_schema(&conn); + setup_mr_detail_data(&conn); + + let key = EntityKey::mr(1, 10); + let data = fetch_mr_detail(&conn, &key).unwrap(); + + assert_eq!(data.cross_refs.len(), 1); + assert_eq!(data.cross_refs[0].kind, CrossRefKind::ClosingMr); + assert_eq!(data.cross_refs[0].label, "Auth bug"); + } + + #[test] + fn test_fetch_mr_discussions() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_detail_schema(&conn); + setup_mr_detail_data(&conn); + + let key = EntityKey::mr(1, 10); + let discussions = fetch_mr_discussions(&conn, &key).unwrap(); + + assert_eq!(discussions.len(), 1); + assert_eq!(discussions[0].discussion_id, "mr_disc_1"); + assert!(discussions[0].resolvable); + assert!(!discussions[0].resolved); + assert_eq!(discussions[0].notes.len(), 1); + assert_eq!(discussions[0].notes[0].author, "alice"); + assert_eq!(discussions[0].notes[0].body, "Please fix this"); + assert!(discussions[0].notes[0].is_diff_note); + assert_eq!( + discussions[0].notes[0].diff_file_path.as_deref(), + Some("src/auth.rs") + ); + assert_eq!(discussions[0].notes[0].diff_new_line, Some(42)); + } + + #[test] + fn test_fetch_mr_detail_not_found() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_detail_schema(&conn); + + // Insert project but no MR. + conn.execute( + "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'g/p')", + [], + ) + .unwrap(); + + let key = EntityKey::mr(1, 99); + assert!(fetch_mr_detail(&conn, &key).is_err()); + } + + #[test] + fn test_fetch_mr_detail_no_file_changes() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_detail_schema(&conn); + + conn.execute( + "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'g/p')", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state, created_at, updated_at, web_url) + VALUES (1, 2000, 1, 10, 'Empty MR', 'opened', 0, 0, '')", + [], + ) + .unwrap(); + + let key = EntityKey::mr(1, 10); + let data = fetch_mr_detail(&conn, &key).unwrap(); + assert!(data.file_changes.is_empty()); + assert_eq!(data.metadata.file_change_count, 0); + } + + #[test] + fn test_fetch_mr_detail_draft() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_detail_schema(&conn); + + conn.execute( + "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'g/p')", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state, draft, created_at, updated_at, web_url) + VALUES (1, 2000, 1, 10, 'Draft: WIP', 'opened', 1, 0, 0, '')", + [], + ) + .unwrap(); + + let key = EntityKey::mr(1, 10); + let data = fetch_mr_detail(&conn, &key).unwrap(); + assert!(data.metadata.draft); + } +} diff --git a/crates/lore-tui/src/action/mr_list.rs b/crates/lore-tui/src/action/mr_list.rs new file mode 100644 index 0000000..c71871a --- /dev/null +++ b/crates/lore-tui/src/action/mr_list.rs @@ -0,0 +1,629 @@ +#![allow(dead_code)] + +use anyhow::{Context, Result}; +use rusqlite::Connection; + +use crate::state::mr_list::{MrCursor, MrFilter, MrListPage, MrListRow, MrSortField, MrSortOrder}; + +/// Page size for MR list queries. +const MR_PAGE_SIZE: usize = 50; + +/// Fetch a page of merge requests matching the given filter and sort. +/// +/// Uses keyset pagination and snapshot fence — same pattern as issues. +pub fn fetch_mr_list( + conn: &Connection, + filter: &MrFilter, + sort_field: MrSortField, + sort_order: MrSortOrder, + cursor: Option<&MrCursor>, + snapshot_fence: Option, +) -> Result { + // -- Build dynamic WHERE conditions and params -------------------------- + let mut conditions: Vec = Vec::new(); + let mut params: Vec> = Vec::new(); + + if let Some(pid) = filter.project_id { + conditions.push("m.project_id = ?".into()); + params.push(Box::new(pid)); + } + + if let Some(ref state) = filter.state { + conditions.push("m.state = ?".into()); + params.push(Box::new(state.clone())); + } + + if let Some(ref author) = filter.author { + conditions.push("m.author_username = ?".into()); + params.push(Box::new(author.clone())); + } + + if let Some(draft) = filter.draft { + conditions.push("m.draft = ?".into()); + params.push(Box::new(i64::from(draft))); + } + + if let Some(ref target) = filter.target_branch { + conditions.push("m.target_branch = ?".into()); + params.push(Box::new(target.clone())); + } + + if let Some(ref source) = filter.source_branch { + conditions.push("m.source_branch = ?".into()); + params.push(Box::new(source.clone())); + } + + // Filter: reviewer (via join on mr_reviewers) + let reviewer_join = if let Some(ref reviewer) = filter.reviewer { + conditions.push("rv.username = ?".into()); + params.push(Box::new(reviewer.clone())); + "JOIN mr_reviewers rv ON rv.merge_request_id = m.id" + } else { + "" + }; + + // Filter: label (via join on mr_labels + labels) + let label_join = if let Some(ref label) = filter.label { + conditions.push("fl.name = ?".into()); + params.push(Box::new(label.clone())); + "JOIN mr_labels fil ON fil.merge_request_id = m.id \ + JOIN labels fl ON fl.id = fil.label_id" + } else { + "" + }; + + // Filter: free_text (LIKE on title) + if let Some(ref text) = filter.free_text { + conditions.push("m.title LIKE ?".into()); + params.push(Box::new(format!("%{text}%"))); + } + + // Snapshot fence + if let Some(fence) = snapshot_fence { + conditions.push("m.updated_at <= ?".into()); + params.push(Box::new(fence)); + } + + // -- Count query (before cursor filter) --------------------------------- + let where_clause = if conditions.is_empty() { + String::new() + } else { + format!("WHERE {}", conditions.join(" AND ")) + }; + + let count_sql = format!( + "SELECT COUNT(DISTINCT m.id) FROM merge_requests m \ + JOIN projects p ON p.id = m.project_id \ + {reviewer_join} {label_join} {where_clause}" + ); + let count_params: Vec<&dyn rusqlite::types::ToSql> = + params.iter().map(|b| b.as_ref()).collect(); + + let total_count: i64 = conn + .query_row(&count_sql, count_params.as_slice(), |r| r.get(0)) + .context("counting MRs for list")?; + + // -- Keyset cursor condition ------------------------------------------- + let (sort_col, sort_dir) = mr_sort_column_and_dir(sort_field, sort_order); + let cursor_op = if sort_dir == "DESC" { "<" } else { ">" }; + + if let Some(c) = cursor { + conditions.push(format!("({sort_col}, m.iid) {cursor_op} (?, ?)")); + params.push(Box::new(c.updated_at)); + params.push(Box::new(c.iid)); + } + + // -- Data query --------------------------------------------------------- + let where_clause_full = if conditions.is_empty() { + String::new() + } else { + format!("WHERE {}", conditions.join(" AND ")) + }; + + let data_sql = format!( + "SELECT p.path_with_namespace, m.iid, m.title, m.state, \ + m.author_username, m.target_branch, m.updated_at, m.draft, \ + GROUP_CONCAT(DISTINCT l.name) AS label_names \ + FROM merge_requests m \ + JOIN projects p ON p.id = m.project_id \ + {reviewer_join} \ + {label_join} \ + LEFT JOIN mr_labels ml ON ml.merge_request_id = m.id \ + LEFT JOIN labels l ON l.id = ml.label_id \ + {where_clause_full} \ + GROUP BY m.id \ + ORDER BY {sort_col} {sort_dir}, m.iid {sort_dir} \ + LIMIT ?" + ); + + let fetch_limit = (MR_PAGE_SIZE + 1) as i64; + params.push(Box::new(fetch_limit)); + + let all_params: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|b| b.as_ref()).collect(); + + let mut stmt = conn.prepare(&data_sql).context("preparing MR list query")?; + + let rows_result = stmt + .query_map(all_params.as_slice(), |row| { + let project_path: String = row.get(0)?; + let iid: i64 = row.get(1)?; + let title: String = row.get::<_, Option>(2)?.unwrap_or_default(); + let state: String = row.get::<_, Option>(3)?.unwrap_or_default(); + let author: String = row.get::<_, Option>(4)?.unwrap_or_default(); + let target_branch: String = row.get::<_, Option>(5)?.unwrap_or_default(); + let updated_at: i64 = row.get(6)?; + let draft_int: i64 = row.get(7)?; + let label_names: Option = row.get(8)?; + + let labels = label_names + .map(|s| s.split(',').map(String::from).collect()) + .unwrap_or_default(); + + Ok(MrListRow { + project_path, + iid, + title, + state, + author, + target_branch, + labels, + updated_at, + draft: draft_int != 0, + }) + }) + .context("querying MR list")?; + + let mut rows: Vec = Vec::new(); + for row in rows_result { + rows.push(row.context("reading MR list row")?); + } + + let has_next = rows.len() > MR_PAGE_SIZE; + if has_next { + rows.truncate(MR_PAGE_SIZE); + } + + let next_cursor = if has_next { + rows.last().map(|r| MrCursor { + updated_at: r.updated_at, + iid: r.iid, + }) + } else { + None + }; + + #[allow(clippy::cast_sign_loss)] + Ok(MrListPage { + rows, + next_cursor, + total_count: total_count as u64, + }) +} + +/// Map MR sort field + order to SQL column name and direction keyword. +fn mr_sort_column_and_dir(field: MrSortField, order: MrSortOrder) -> (&'static str, &'static str) { + let col = match field { + MrSortField::UpdatedAt => "m.updated_at", + MrSortField::Iid => "m.iid", + MrSortField::Title => "m.title", + MrSortField::State => "m.state", + MrSortField::Author => "m.author_username", + MrSortField::TargetBranch => "m.target_branch", + }; + let dir = match order { + MrSortOrder::Desc => "DESC", + MrSortOrder::Asc => "ASC", + }; + (col, dir) +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Create the schema needed for MR list tests. + fn create_mr_list_schema(conn: &Connection) { + conn.execute_batch( + " + CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + ); + CREATE TABLE merge_requests ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT, + author_username TEXT, + created_at INTEGER, + updated_at INTEGER, + last_seen_at INTEGER NOT NULL, + draft INTEGER NOT NULL DEFAULT 0, + target_branch TEXT, + source_branch TEXT + ); + CREATE TABLE labels ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER, + project_id INTEGER NOT NULL, + name TEXT NOT NULL, + color TEXT, + description TEXT + ); + CREATE TABLE mr_labels ( + merge_request_id INTEGER NOT NULL, + label_id INTEGER NOT NULL, + PRIMARY KEY(merge_request_id, label_id) + ); + CREATE TABLE mr_reviewers ( + merge_request_id INTEGER NOT NULL, + username TEXT NOT NULL, + PRIMARY KEY(merge_request_id, username) + ); + ", + ) + .expect("create MR list schema"); + } + + /// Insert a test MR with full fields. + fn insert_mr_full( + conn: &Connection, + iid: i64, + state: &str, + author: &str, + target_branch: &str, + draft: bool, + updated_at: i64, + ) { + conn.execute( + "INSERT INTO merge_requests \ + (gitlab_id, project_id, iid, title, state, author_username, \ + target_branch, draft, created_at, updated_at, last_seen_at) \ + VALUES (?1, 1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?8, ?8)", + rusqlite::params![ + iid * 100 + 50, + iid, + format!("MR {iid}"), + state, + author, + target_branch, + i64::from(draft), + updated_at, + ], + ) + .expect("insert mr full"); + } + + /// Attach a label to an MR. + fn attach_mr_label(conn: &Connection, mr_iid: i64, label_name: &str) { + let mr_id: i64 = conn + .query_row( + "SELECT id FROM merge_requests WHERE iid = ?", + [mr_iid], + |r| r.get(0), + ) + .expect("find mr"); + + conn.execute( + "INSERT OR IGNORE INTO labels (project_id, name) VALUES (1, ?)", + [label_name], + ) + .expect("insert label"); + let label_id: i64 = conn + .query_row("SELECT id FROM labels WHERE name = ?", [label_name], |r| { + r.get(0) + }) + .expect("find label"); + + conn.execute( + "INSERT INTO mr_labels (merge_request_id, label_id) VALUES (?, ?)", + [mr_id, label_id], + ) + .expect("attach mr label"); + } + + /// Add a reviewer to an MR. + fn add_mr_reviewer(conn: &Connection, mr_iid: i64, username: &str) { + let mr_id: i64 = conn + .query_row( + "SELECT id FROM merge_requests WHERE iid = ?", + [mr_iid], + |r| r.get(0), + ) + .expect("find mr"); + + conn.execute( + "INSERT INTO mr_reviewers (merge_request_id, username) VALUES (?, ?)", + rusqlite::params![mr_id, username], + ) + .expect("add mr reviewer"); + } + + fn setup_mr_list_data(conn: &Connection) { + let base = 1_700_000_000_000_i64; + conn.execute( + "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'group/project')", + [], + ) + .unwrap(); + + insert_mr_full(conn, 1, "opened", "alice", "main", false, base - 10_000); + insert_mr_full(conn, 2, "opened", "bob", "main", true, base - 20_000); + insert_mr_full(conn, 3, "merged", "alice", "develop", false, base - 30_000); + insert_mr_full(conn, 4, "opened", "charlie", "main", true, base - 40_000); + insert_mr_full(conn, 5, "closed", "bob", "release", false, base - 50_000); + + attach_mr_label(conn, 1, "backend"); + attach_mr_label(conn, 1, "urgent"); + attach_mr_label(conn, 2, "frontend"); + attach_mr_label(conn, 4, "backend"); + + add_mr_reviewer(conn, 1, "diana"); + add_mr_reviewer(conn, 2, "diana"); + add_mr_reviewer(conn, 3, "edward"); + } + + #[test] + fn test_fetch_mr_list_basic() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter::default(); + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 5); + assert_eq!(page.rows.len(), 5); + assert_eq!(page.rows[0].iid, 1); // newest first + assert_eq!(page.rows[4].iid, 5); + assert!(page.next_cursor.is_none()); + } + + #[test] + fn test_fetch_mr_list_filter_state() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter { + state: Some("opened".into()), + ..Default::default() + }; + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 3); + assert!(page.rows.iter().all(|r| r.state == "opened")); + } + + #[test] + fn test_fetch_mr_list_filter_draft() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter { + draft: Some(true), + ..Default::default() + }; + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 2); // MRs 2 and 4 + assert!(page.rows.iter().all(|r| r.draft)); + } + + #[test] + fn test_fetch_mr_list_filter_target_branch() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter { + target_branch: Some("main".into()), + ..Default::default() + }; + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 3); // MRs 1, 2, 4 + assert!(page.rows.iter().all(|r| r.target_branch == "main")); + } + + #[test] + fn test_fetch_mr_list_filter_reviewer() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter { + reviewer: Some("diana".into()), + ..Default::default() + }; + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 2); // MRs 1 and 2 + } + + #[test] + fn test_fetch_mr_list_filter_label() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter { + label: Some("backend".into()), + ..Default::default() + }; + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 2); // MRs 1 and 4 + } + + #[test] + fn test_fetch_mr_list_labels_aggregated() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter::default(); + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + let mr1 = page.rows.iter().find(|r| r.iid == 1).unwrap(); + assert_eq!(mr1.labels.len(), 2); + assert!(mr1.labels.contains(&"backend".to_string())); + assert!(mr1.labels.contains(&"urgent".to_string())); + + let mr5 = page.rows.iter().find(|r| r.iid == 5).unwrap(); + assert!(mr5.labels.is_empty()); + } + + #[test] + fn test_fetch_mr_list_sort_ascending() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter::default(); + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Asc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.rows[0].iid, 5); // oldest first + assert_eq!(page.rows[4].iid, 1); + } + + #[test] + fn test_fetch_mr_list_snapshot_fence() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let base = 1_700_000_000_000_i64; + let fence = base - 25_000; + let filter = MrFilter::default(); + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + Some(fence), + ) + .unwrap(); + + assert_eq!(page.total_count, 3); + assert!(page.rows.iter().all(|r| r.updated_at <= fence)); + } + + #[test] + fn test_fetch_mr_list_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + conn.execute( + "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'g/p')", + [], + ) + .unwrap(); + + let page = fetch_mr_list( + &conn, + &MrFilter::default(), + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 0); + assert!(page.rows.is_empty()); + assert!(page.next_cursor.is_none()); + } + + #[test] + fn test_fetch_mr_list_free_text() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter { + free_text: Some("MR 3".into()), + ..Default::default() + }; + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 1); + assert_eq!(page.rows[0].iid, 3); + } +} diff --git a/crates/lore-tui/src/action/search.rs b/crates/lore-tui/src/action/search.rs new file mode 100644 index 0000000..632e673 --- /dev/null +++ b/crates/lore-tui/src/action/search.rs @@ -0,0 +1,361 @@ +#![allow(dead_code)] + +use anyhow::{Context, Result}; +use rusqlite::Connection; + +use crate::message::{EntityKey, EntityKind, SearchMode, SearchResult}; +use crate::state::search::SearchCapabilities; + +/// Probe the database to detect available search indexes. +/// +/// Checks for FTS5 documents and embedding metadata. Returns capabilities +/// that the UI uses to gate available search modes. +pub fn fetch_search_capabilities(conn: &Connection) -> Result { + // FTS: check if documents_fts has rows via the docsize shadow table + // (B-tree, not virtual table scan). + let has_fts = conn + .query_row( + "SELECT EXISTS(SELECT 1 FROM documents_fts_docsize LIMIT 1)", + [], + |r| r.get::<_, bool>(0), + ) + .unwrap_or(false); + + // Embeddings: count rows in embedding_metadata. + let embedding_count: i64 = conn + .query_row("SELECT COUNT(*) FROM embedding_metadata", [], |r| r.get(0)) + .unwrap_or(0); + + let has_embeddings = embedding_count > 0; + + // Coverage: embeddings / documents percentage. + let doc_count: i64 = conn + .query_row("SELECT COUNT(*) FROM documents", [], |r| r.get(0)) + .unwrap_or(0); + + let embedding_coverage_pct = if doc_count > 0 { + (embedding_count as f32 / doc_count as f32 * 100.0).min(100.0) + } else { + 0.0 + }; + + Ok(SearchCapabilities { + has_fts, + has_embeddings, + embedding_coverage_pct, + }) +} + +/// Execute a search query against the local database. +/// +/// Dispatches to the correct search backend based on mode: +/// - Lexical: FTS5 only (documents_fts) +/// - Hybrid: FTS5 + vector merge via RRF +/// - Semantic: vector cosine similarity only +/// +/// Returns results sorted by score descending. +pub fn execute_search( + conn: &Connection, + query: &str, + mode: SearchMode, + limit: usize, +) -> Result> { + if query.trim().is_empty() { + return Ok(Vec::new()); + } + + match mode { + SearchMode::Lexical => execute_fts_search(conn, query, limit), + SearchMode::Hybrid | SearchMode::Semantic => { + // Hybrid and Semantic require the full search pipeline from the + // core crate (async, Ollama client). For now, fall back to FTS + // for Hybrid and return empty for Semantic-only. + // TODO: Wire up async search dispatch when core search is integrated. + if mode == SearchMode::Hybrid { + execute_fts_search(conn, query, limit) + } else { + Ok(Vec::new()) + } + } + } +} + +/// FTS5 full-text search against the documents table. +fn execute_fts_search(conn: &Connection, query: &str, limit: usize) -> Result> { + // Sanitize the query for FTS5 (escape special chars, wrap terms in quotes). + let safe_query = sanitize_fts_query(query); + if safe_query.is_empty() { + return Ok(Vec::new()); + } + + // Resolve project_path via JOIN through projects table. + // Resolve iid via JOIN through the source entity table (issues or merge_requests). + // snippet column 1 = content_text (column 0 is title). + let mut stmt = conn + .prepare( + "SELECT d.source_type, d.source_id, d.title, d.project_id, + p.path_with_namespace, + snippet(documents_fts, 1, '>>>', '<<<', '...', 32) AS snip, + bm25(documents_fts) AS score, + COALESCE(i.iid, mr.iid) AS entity_iid + FROM documents_fts + JOIN documents d ON documents_fts.rowid = d.id + JOIN projects p ON p.id = d.project_id + LEFT JOIN issues i ON d.source_type = 'issue' AND i.id = d.source_id + LEFT JOIN merge_requests mr ON d.source_type = 'merge_request' AND mr.id = d.source_id + WHERE documents_fts MATCH ?1 + ORDER BY score + LIMIT ?2", + ) + .context("preparing FTS search query")?; + + let rows = stmt + .query_map(rusqlite::params![safe_query, limit as i64], |row| { + let source_type: String = row.get(0)?; + let _source_id: i64 = row.get(1)?; + let title: String = row.get::<_, Option>(2)?.unwrap_or_default(); + let project_id: i64 = row.get(3)?; + let project_path: String = row.get::<_, Option>(4)?.unwrap_or_default(); + let snippet: String = row.get::<_, Option>(5)?.unwrap_or_default(); + let score: f64 = row.get(6)?; + let entity_iid: Option = row.get(7)?; + Ok(( + source_type, + project_id, + title, + project_path, + snippet, + score, + entity_iid, + )) + }) + .context("executing FTS search")?; + + let mut results = Vec::new(); + for row in rows { + let (source_type, project_id, title, project_path, snippet, score, entity_iid) = + row.context("reading FTS search row")?; + + let kind = match source_type.as_str() { + "issue" => EntityKind::Issue, + "merge_request" | "mr" => EntityKind::MergeRequest, + _ => continue, // Skip unknown source types (discussion, note). + }; + + // Skip if we couldn't resolve the entity's iid (orphaned document). + let Some(iid) = entity_iid else { + continue; + }; + + let key = EntityKey { + project_id, + iid, + kind, + }; + + results.push(SearchResult { + key, + title, + score: score.abs(), // bm25 returns negative scores; lower = better. + snippet, + project_path, + }); + } + + Ok(results) +} + +/// Sanitize a user query for FTS5 MATCH syntax. +/// +/// Wraps individual terms in double quotes to prevent FTS5 syntax errors +/// from user-typed operators (AND, OR, NOT, *, etc.). +fn sanitize_fts_query(query: &str) -> String { + query + .split_whitespace() + .map(|term| { + // Strip any existing quotes and re-wrap. + let clean = term.replace('"', ""); + if clean.is_empty() { + String::new() + } else { + format!("\"{clean}\"") + } + }) + .filter(|s| !s.is_empty()) + .collect::>() + .join(" ") +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Create the minimal schema needed for search queries. + fn create_dashboard_schema(conn: &Connection) { + conn.execute_batch( + " + CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + ); + CREATE TABLE issues ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT NOT NULL, + author_username TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE merge_requests ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT, + author_username TEXT, + created_at INTEGER, + updated_at INTEGER, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE discussions ( + id INTEGER PRIMARY KEY, + gitlab_discussion_id TEXT NOT NULL, + project_id INTEGER NOT NULL, + noteable_type TEXT NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE notes ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + discussion_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + is_system INTEGER NOT NULL DEFAULT 0, + author_username TEXT, + body TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE documents ( + id INTEGER PRIMARY KEY, + source_type TEXT NOT NULL, + source_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + content_text TEXT NOT NULL, + content_hash TEXT NOT NULL + ); + CREATE TABLE embedding_metadata ( + document_id INTEGER NOT NULL, + chunk_index INTEGER NOT NULL DEFAULT 0, + model TEXT NOT NULL, + dims INTEGER NOT NULL, + document_hash TEXT NOT NULL, + chunk_hash TEXT NOT NULL, + created_at INTEGER NOT NULL, + PRIMARY KEY(document_id, chunk_index) + ); + CREATE TABLE sync_runs ( + id INTEGER PRIMARY KEY, + started_at INTEGER NOT NULL, + heartbeat_at INTEGER NOT NULL, + finished_at INTEGER, + status TEXT NOT NULL, + command TEXT NOT NULL, + error TEXT + ); + ", + ) + .expect("create dashboard schema"); + } + + #[test] + fn test_sanitize_fts_query_wraps_terms() { + let result = sanitize_fts_query("hello world"); + assert_eq!(result, r#""hello" "world""#); + } + + #[test] + fn test_sanitize_fts_query_strips_quotes() { + let result = sanitize_fts_query(r#""hello" "world""#); + assert_eq!(result, r#""hello" "world""#); + } + + #[test] + fn test_sanitize_fts_query_empty() { + assert_eq!(sanitize_fts_query(""), ""); + assert_eq!(sanitize_fts_query(" "), ""); + } + + #[test] + fn test_sanitize_fts_query_special_chars() { + // FTS5 operators should be safely wrapped in quotes. + let result = sanitize_fts_query("NOT AND OR"); + assert_eq!(result, r#""NOT" "AND" "OR""#); + } + + #[test] + fn test_fetch_search_capabilities_no_tables() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + let caps = fetch_search_capabilities(&conn).unwrap(); + assert!(!caps.has_fts); + assert!(!caps.has_embeddings); + assert!(!caps.has_any_index()); + } + + #[test] + fn test_fetch_search_capabilities_with_fts() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + // Create FTS table and its shadow table. + conn.execute_batch( + "CREATE VIRTUAL TABLE documents_fts USING fts5(content); + INSERT INTO documents_fts(content) VALUES ('test document');", + ) + .unwrap(); + + let caps = fetch_search_capabilities(&conn).unwrap(); + assert!(caps.has_fts); + assert!(!caps.has_embeddings); + } + + #[test] + fn test_fetch_search_capabilities_with_embeddings() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + // Insert a document so coverage calculation works. + conn.execute_batch( + "INSERT INTO documents(id, source_type, source_id, project_id, content_text, content_hash) + VALUES (1, 'issue', 1, 1, 'body text', 'abc'); + INSERT INTO embedding_metadata(document_id, chunk_index, model, dims, document_hash, chunk_hash, created_at) + VALUES (1, 0, 'test', 384, 'abc', 'def', 1700000000);", + ) + .unwrap(); + + let caps = fetch_search_capabilities(&conn).unwrap(); + assert!(!caps.has_fts); + assert!(caps.has_embeddings); + assert!(caps.embedding_coverage_pct > 0.0); + } + + #[test] + fn test_execute_search_empty_query_returns_empty() { + let conn = Connection::open_in_memory().unwrap(); + let results = execute_search(&conn, "", SearchMode::Lexical, 10).unwrap(); + assert!(results.is_empty()); + } + + #[test] + fn test_execute_search_whitespace_only_returns_empty() { + let conn = Connection::open_in_memory().unwrap(); + let results = execute_search(&conn, " ", SearchMode::Lexical, 10).unwrap(); + assert!(results.is_empty()); + } +} diff --git a/crates/lore-tui/src/action/timeline.rs b/crates/lore-tui/src/action/timeline.rs new file mode 100644 index 0000000..dc11ff3 --- /dev/null +++ b/crates/lore-tui/src/action/timeline.rs @@ -0,0 +1,845 @@ +#![allow(dead_code)] + +use anyhow::{Context, Result}; +use rusqlite::Connection; + +use crate::message::{EntityKey, EntityKind, TimelineEvent, TimelineEventKind}; +use crate::state::timeline::TimelineScope; + +/// Internal filter resolved from a [`TimelineScope`]. +/// +/// Translates the user-facing scope (which uses `EntityKey` with project_id + iid) +/// into internal DB ids for efficient querying. +enum TimelineFilter { + /// No filtering — return all events. + All, + /// Filter to events for a specific issue (internal DB id). + Issue(i64), + /// Filter to events for a specific MR (internal DB id). + MergeRequest(i64), + /// Filter to events by a specific actor. + Actor(String), +} + +/// Resolve a [`TimelineScope`] into a concrete [`TimelineFilter`]. +fn resolve_timeline_scope(conn: &Connection, scope: &TimelineScope) -> Result { + match scope { + TimelineScope::All => Ok(TimelineFilter::All), + TimelineScope::Entity(key) => { + let (table, kind_label) = match key.kind { + EntityKind::Issue => ("issues", "issue"), + EntityKind::MergeRequest => ("merge_requests", "merge request"), + }; + let sql = format!("SELECT id FROM {table} WHERE project_id = ?1 AND iid = ?2"); + let id: i64 = conn + .query_row(&sql, rusqlite::params![key.project_id, key.iid], |r| { + r.get(0) + }) + .with_context(|| { + format!( + "resolving {kind_label} #{} in project {}", + key.iid, key.project_id + ) + })?; + match key.kind { + EntityKind::Issue => Ok(TimelineFilter::Issue(id)), + EntityKind::MergeRequest => Ok(TimelineFilter::MergeRequest(id)), + } + } + TimelineScope::Author(name) => Ok(TimelineFilter::Actor(name.clone())), + } +} + +/// Fetch timeline events from raw resource event tables. +/// +/// Queries `issues`/`merge_requests` for Created events, plus +/// `resource_state_events`, `resource_label_events`, and +/// `resource_milestone_events` for lifecycle events. Results are sorted +/// by timestamp descending (most recent first) and truncated to `limit`. +pub fn fetch_timeline_events( + conn: &Connection, + scope: &TimelineScope, + limit: usize, +) -> Result> { + let filter = resolve_timeline_scope(conn, scope)?; + let mut events = Vec::new(); + + collect_tl_created_events(conn, &filter, &mut events)?; + collect_tl_state_events(conn, &filter, &mut events)?; + collect_tl_label_events(conn, &filter, &mut events)?; + collect_tl_milestone_events(conn, &filter, &mut events)?; + + // Sort by timestamp descending (most recent first), with stable tiebreak. + events.sort_by(|a, b| { + b.timestamp_ms + .cmp(&a.timestamp_ms) + .then_with(|| a.entity_key.kind.cmp(&b.entity_key.kind)) + .then_with(|| a.entity_key.iid.cmp(&b.entity_key.iid)) + }); + + events.truncate(limit); + Ok(events) +} + +/// Collect Created events from issues and merge_requests tables. +fn collect_tl_created_events( + conn: &Connection, + filter: &TimelineFilter, + events: &mut Vec, +) -> Result<()> { + // Issue created events. + if !matches!(filter, TimelineFilter::MergeRequest(_)) { + let (where_clause, params) = match filter { + TimelineFilter::All => ( + "1=1".to_string(), + Vec::>::new(), + ), + TimelineFilter::Issue(id) => ( + "i.id = ?1".to_string(), + vec![Box::new(*id) as Box], + ), + TimelineFilter::Actor(name) => ( + "i.author_username = ?1".to_string(), + vec![Box::new(name.clone()) as Box], + ), + TimelineFilter::MergeRequest(_) => unreachable!(), + }; + + let sql = format!( + "SELECT i.created_at, i.iid, i.title, i.author_username, i.project_id, p.path_with_namespace + FROM issues i + JOIN projects p ON p.id = i.project_id + WHERE {where_clause}" + ); + + let mut stmt = conn + .prepare(&sql) + .context("preparing issue created query")?; + let param_refs: Vec<&dyn rusqlite::types::ToSql> = + params.iter().map(AsRef::as_ref).collect(); + let rows = stmt + .query_map(param_refs.as_slice(), |row| { + Ok(( + row.get::<_, i64>(0)?, + row.get::<_, i64>(1)?, + row.get::<_, Option>(2)?, + row.get::<_, Option>(3)?, + row.get::<_, i64>(4)?, + row.get::<_, String>(5)?, + )) + }) + .context("querying issue created events")?; + + for row in rows { + let (created_at, iid, title, author, project_id, project_path) = + row.context("reading issue created row")?; + let title_str = title.as_deref().unwrap_or("(untitled)"); + events.push(TimelineEvent { + timestamp_ms: created_at, + entity_key: EntityKey::issue(project_id, iid), + event_kind: TimelineEventKind::Created, + summary: format!("Issue #{iid} created: {title_str}"), + detail: title, + actor: author, + project_path, + }); + } + } + + // MR created events. + if !matches!(filter, TimelineFilter::Issue(_)) { + let (where_clause, params) = match filter { + TimelineFilter::All => ( + "1=1".to_string(), + Vec::>::new(), + ), + TimelineFilter::MergeRequest(id) => ( + "mr.id = ?1".to_string(), + vec![Box::new(*id) as Box], + ), + TimelineFilter::Actor(name) => ( + "mr.author_username = ?1".to_string(), + vec![Box::new(name.clone()) as Box], + ), + TimelineFilter::Issue(_) => unreachable!(), + }; + + let sql = format!( + "SELECT mr.created_at, mr.iid, mr.title, mr.author_username, mr.project_id, p.path_with_namespace + FROM merge_requests mr + JOIN projects p ON p.id = mr.project_id + WHERE {where_clause}" + ); + + let mut stmt = conn.prepare(&sql).context("preparing MR created query")?; + let param_refs: Vec<&dyn rusqlite::types::ToSql> = + params.iter().map(AsRef::as_ref).collect(); + let rows = stmt + .query_map(param_refs.as_slice(), |row| { + Ok(( + row.get::<_, i64>(0)?, + row.get::<_, i64>(1)?, + row.get::<_, Option>(2)?, + row.get::<_, Option>(3)?, + row.get::<_, i64>(4)?, + row.get::<_, String>(5)?, + )) + }) + .context("querying MR created events")?; + + for row in rows { + let (created_at, iid, title, author, project_id, project_path) = + row.context("reading MR created row")?; + let title_str = title.as_deref().unwrap_or("(untitled)"); + events.push(TimelineEvent { + timestamp_ms: created_at, + entity_key: EntityKey::mr(project_id, iid), + event_kind: TimelineEventKind::Created, + summary: format!("MR !{iid} created: {title_str}"), + detail: title, + actor: author, + project_path, + }); + } + } + + Ok(()) +} + +/// Helper: build WHERE clause and params for resource event tables. +/// +/// Resource event tables have `issue_id` and `merge_request_id` columns +/// (exactly one is non-NULL per row), plus `actor_username`. +fn resource_event_where(filter: &TimelineFilter) -> (String, Vec>) { + match filter { + TimelineFilter::All => ("1=1".to_string(), Vec::new()), + TimelineFilter::Issue(id) => ( + "e.issue_id = ?1".to_string(), + vec![Box::new(*id) as Box], + ), + TimelineFilter::MergeRequest(id) => ( + "e.merge_request_id = ?1".to_string(), + vec![Box::new(*id) as Box], + ), + TimelineFilter::Actor(name) => ( + "e.actor_username = ?1".to_string(), + vec![Box::new(name.clone()) as Box], + ), + } +} + +/// Resolve a resource event row's entity to an EntityKey. +fn resolve_event_entity( + issue_id: Option, + mr_id: Option, + issue_iid: Option, + mr_iid: Option, + issue_project_id: Option, + mr_project_id: Option, +) -> Option<(EntityKey, i64)> { + if let (Some(iid), Some(pid)) = (issue_iid, issue_project_id) { + Some((EntityKey::issue(pid, iid), pid)) + } else if let (Some(iid), Some(pid)) = (mr_iid, mr_project_id) { + Some((EntityKey::mr(pid, iid), pid)) + } else { + // Orphaned event — entity was deleted. + let _ = (issue_id, mr_id); // suppress unused warnings + None + } +} + +/// Collect state change events from `resource_state_events`. +fn collect_tl_state_events( + conn: &Connection, + filter: &TimelineFilter, + events: &mut Vec, +) -> Result<()> { + let (where_clause, params) = resource_event_where(filter); + + let sql = format!( + "SELECT e.created_at, e.state, e.actor_username, + e.issue_id, e.merge_request_id, + i.iid, mr.iid, i.project_id, mr.project_id, + COALESCE(pi.path_with_namespace, pm.path_with_namespace) AS project_path + FROM resource_state_events e + LEFT JOIN issues i ON i.id = e.issue_id + LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id + LEFT JOIN projects pi ON pi.id = i.project_id + LEFT JOIN projects pm ON pm.id = mr.project_id + WHERE {where_clause}" + ); + + let mut stmt = conn.prepare(&sql).context("preparing state events query")?; + let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(AsRef::as_ref).collect(); + let rows = stmt + .query_map(param_refs.as_slice(), |row| { + Ok(( + row.get::<_, i64>(0)?, + row.get::<_, String>(1)?, + row.get::<_, Option>(2)?, + row.get::<_, Option>(3)?, + row.get::<_, Option>(4)?, + row.get::<_, Option>(5)?, + row.get::<_, Option>(6)?, + row.get::<_, Option>(7)?, + row.get::<_, Option>(8)?, + row.get::<_, Option>(9)?, + )) + }) + .context("querying state events")?; + + for row in rows { + let ( + created_at, + state, + actor, + issue_id, + mr_id, + issue_iid, + mr_iid, + issue_pid, + mr_pid, + project_path, + ) = row.context("reading state event row")?; + + let Some((entity_key, _pid)) = + resolve_event_entity(issue_id, mr_id, issue_iid, mr_iid, issue_pid, mr_pid) + else { + continue; + }; + + let (event_kind, summary) = if state == "merged" { + ( + TimelineEventKind::Merged, + format!("MR !{} merged", entity_key.iid), + ) + } else { + ( + TimelineEventKind::StateChanged, + format!("State changed to {state}"), + ) + }; + + events.push(TimelineEvent { + timestamp_ms: created_at, + entity_key, + event_kind, + summary, + detail: Some(state), + actor, + project_path: project_path.unwrap_or_default(), + }); + } + + Ok(()) +} + +/// Collect label change events from `resource_label_events`. +fn collect_tl_label_events( + conn: &Connection, + filter: &TimelineFilter, + events: &mut Vec, +) -> Result<()> { + let (where_clause, params) = resource_event_where(filter); + + let sql = format!( + "SELECT e.created_at, e.action, e.label_name, e.actor_username, + e.issue_id, e.merge_request_id, + i.iid, mr.iid, i.project_id, mr.project_id, + COALESCE(pi.path_with_namespace, pm.path_with_namespace) AS project_path + FROM resource_label_events e + LEFT JOIN issues i ON i.id = e.issue_id + LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id + LEFT JOIN projects pi ON pi.id = i.project_id + LEFT JOIN projects pm ON pm.id = mr.project_id + WHERE {where_clause}" + ); + + let mut stmt = conn.prepare(&sql).context("preparing label events query")?; + let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(AsRef::as_ref).collect(); + let rows = stmt + .query_map(param_refs.as_slice(), |row| { + Ok(( + row.get::<_, i64>(0)?, + row.get::<_, String>(1)?, + row.get::<_, String>(2)?, + row.get::<_, Option>(3)?, + row.get::<_, Option>(4)?, + row.get::<_, Option>(5)?, + row.get::<_, Option>(6)?, + row.get::<_, Option>(7)?, + row.get::<_, Option>(8)?, + row.get::<_, Option>(9)?, + row.get::<_, Option>(10)?, + )) + }) + .context("querying label events")?; + + for row in rows { + let ( + created_at, + action, + label_name, + actor, + issue_id, + mr_id, + issue_iid, + mr_iid, + issue_pid, + mr_pid, + project_path, + ) = row.context("reading label event row")?; + + let Some((entity_key, _pid)) = + resolve_event_entity(issue_id, mr_id, issue_iid, mr_iid, issue_pid, mr_pid) + else { + continue; + }; + + let (event_kind, summary) = match action.as_str() { + "add" => ( + TimelineEventKind::LabelAdded, + format!("Label added: {label_name}"), + ), + "remove" => ( + TimelineEventKind::LabelRemoved, + format!("Label removed: {label_name}"), + ), + _ => continue, + }; + + events.push(TimelineEvent { + timestamp_ms: created_at, + entity_key, + event_kind, + summary, + detail: Some(label_name), + actor, + project_path: project_path.unwrap_or_default(), + }); + } + + Ok(()) +} + +/// Collect milestone change events from `resource_milestone_events`. +fn collect_tl_milestone_events( + conn: &Connection, + filter: &TimelineFilter, + events: &mut Vec, +) -> Result<()> { + let (where_clause, params) = resource_event_where(filter); + + let sql = format!( + "SELECT e.created_at, e.action, e.milestone_title, e.actor_username, + e.issue_id, e.merge_request_id, + i.iid, mr.iid, i.project_id, mr.project_id, + COALESCE(pi.path_with_namespace, pm.path_with_namespace) AS project_path + FROM resource_milestone_events e + LEFT JOIN issues i ON i.id = e.issue_id + LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id + LEFT JOIN projects pi ON pi.id = i.project_id + LEFT JOIN projects pm ON pm.id = mr.project_id + WHERE {where_clause}" + ); + + let mut stmt = conn + .prepare(&sql) + .context("preparing milestone events query")?; + let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(AsRef::as_ref).collect(); + let rows = stmt + .query_map(param_refs.as_slice(), |row| { + Ok(( + row.get::<_, i64>(0)?, + row.get::<_, String>(1)?, + row.get::<_, String>(2)?, + row.get::<_, Option>(3)?, + row.get::<_, Option>(4)?, + row.get::<_, Option>(5)?, + row.get::<_, Option>(6)?, + row.get::<_, Option>(7)?, + row.get::<_, Option>(8)?, + row.get::<_, Option>(9)?, + row.get::<_, Option>(10)?, + )) + }) + .context("querying milestone events")?; + + for row in rows { + let ( + created_at, + action, + milestone_title, + actor, + issue_id, + mr_id, + issue_iid, + mr_iid, + issue_pid, + mr_pid, + project_path, + ) = row.context("reading milestone event row")?; + + let Some((entity_key, _pid)) = + resolve_event_entity(issue_id, mr_id, issue_iid, mr_iid, issue_pid, mr_pid) + else { + continue; + }; + + let (event_kind, summary) = match action.as_str() { + "add" => ( + TimelineEventKind::MilestoneSet, + format!("Milestone set: {milestone_title}"), + ), + "remove" => ( + TimelineEventKind::MilestoneRemoved, + format!("Milestone removed: {milestone_title}"), + ), + _ => continue, + }; + + events.push(TimelineEvent { + timestamp_ms: created_at, + entity_key, + event_kind, + summary, + detail: Some(milestone_title), + actor, + project_path: project_path.unwrap_or_default(), + }); + } + + Ok(()) +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Create the minimal schema needed for timeline queries. + fn create_dashboard_schema(conn: &Connection) { + conn.execute_batch( + " + CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + ); + CREATE TABLE issues ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT NOT NULL, + author_username TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE merge_requests ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT, + author_username TEXT, + created_at INTEGER, + updated_at INTEGER, + last_seen_at INTEGER NOT NULL + ); + ", + ) + .expect("create dashboard schema"); + } + + fn insert_issue(conn: &Connection, iid: i64, state: &str, updated_at: i64) { + conn.execute( + "INSERT INTO issues (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at) + VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)", + rusqlite::params![iid * 100, iid, format!("Issue {iid}"), state, updated_at], + ) + .expect("insert issue"); + } + + fn insert_mr(conn: &Connection, iid: i64, state: &str, updated_at: i64) { + conn.execute( + "INSERT INTO merge_requests (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at) + VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)", + rusqlite::params![iid * 100 + 50, iid, format!("MR {iid}"), state, updated_at], + ) + .expect("insert mr"); + } + + /// Add resource event tables to an existing schema. + fn add_resource_event_tables(conn: &Connection) { + conn.execute_batch( + " + CREATE TABLE IF NOT EXISTS resource_state_events ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + issue_id INTEGER, + merge_request_id INTEGER, + state TEXT NOT NULL, + actor_gitlab_id INTEGER, + actor_username TEXT, + created_at INTEGER NOT NULL, + source_commit TEXT, + source_merge_request_iid INTEGER + ); + CREATE TABLE IF NOT EXISTS resource_label_events ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + issue_id INTEGER, + merge_request_id INTEGER, + action TEXT NOT NULL, + label_name TEXT NOT NULL, + actor_gitlab_id INTEGER, + actor_username TEXT, + created_at INTEGER NOT NULL + ); + CREATE TABLE IF NOT EXISTS resource_milestone_events ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + issue_id INTEGER, + merge_request_id INTEGER, + action TEXT NOT NULL, + milestone_title TEXT NOT NULL, + milestone_id INTEGER, + actor_gitlab_id INTEGER, + actor_username TEXT, + created_at INTEGER NOT NULL + ); + ", + ) + .expect("create resource event tables"); + } + + /// Create a full timeline test schema (dashboard schema + resource events). + fn create_timeline_schema(conn: &Connection) { + create_dashboard_schema(conn); + add_resource_event_tables(conn); + // Insert a project for test entities. + conn.execute( + "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'group/project')", + [], + ) + .expect("insert test project"); + } + + fn insert_state_event( + conn: &Connection, + gitlab_id: i64, + issue_id: Option, + mr_id: Option, + state: &str, + actor: &str, + created_at: i64, + ) { + conn.execute( + "INSERT INTO resource_state_events (gitlab_id, project_id, issue_id, merge_request_id, state, actor_username, created_at) + VALUES (?1, 1, ?2, ?3, ?4, ?5, ?6)", + rusqlite::params![gitlab_id, issue_id, mr_id, state, actor, created_at], + ) + .expect("insert state event"); + } + + #[allow(clippy::too_many_arguments)] + fn insert_label_event( + conn: &Connection, + gitlab_id: i64, + issue_id: Option, + mr_id: Option, + action: &str, + label: &str, + actor: &str, + created_at: i64, + ) { + conn.execute( + "INSERT INTO resource_label_events (gitlab_id, project_id, issue_id, merge_request_id, action, label_name, actor_username, created_at) + VALUES (?1, 1, ?2, ?3, ?4, ?5, ?6, ?7)", + rusqlite::params![gitlab_id, issue_id, mr_id, action, label, actor, created_at], + ) + .expect("insert label event"); + } + + #[test] + fn test_fetch_timeline_scoped() { + let conn = Connection::open_in_memory().unwrap(); + create_timeline_schema(&conn); + + // Create two issues. + let now = 1_700_000_000_000_i64; + insert_issue(&conn, 1, "opened", now - 100_000); + insert_issue(&conn, 2, "opened", now - 50_000); + + // Get internal IDs. + let issue1_id: i64 = conn + .query_row("SELECT id FROM issues WHERE iid = 1", [], |r| r.get(0)) + .unwrap(); + let issue2_id: i64 = conn + .query_row("SELECT id FROM issues WHERE iid = 2", [], |r| r.get(0)) + .unwrap(); + + // State events: issue 1 closed, issue 2 label added. + insert_state_event( + &conn, + 1, + Some(issue1_id), + None, + "closed", + "alice", + now - 80_000, + ); + insert_label_event( + &conn, + 2, + Some(issue2_id), + None, + "add", + "bug", + "bob", + now - 30_000, + ); + + // Fetch scoped to issue 1. + let scope = TimelineScope::Entity(EntityKey::issue(1, 1)); + let events = fetch_timeline_events(&conn, &scope, 100).unwrap(); + + // Should only have issue 1's events: Created + StateChanged. + assert_eq!(events.len(), 2); + for event in &events { + assert_eq!(event.entity_key.iid, 1, "All events should be for issue #1"); + } + // Most recent first. + assert!(events[0].timestamp_ms >= events[1].timestamp_ms); + } + + #[test] + fn test_fetch_timeline_all_scope() { + let conn = Connection::open_in_memory().unwrap(); + create_timeline_schema(&conn); + + let now = 1_700_000_000_000_i64; + insert_issue(&conn, 1, "opened", now - 100_000); + insert_issue(&conn, 2, "opened", now - 50_000); + + let events = fetch_timeline_events(&conn, &TimelineScope::All, 100).unwrap(); + + // Should have Created events for both issues. + assert_eq!(events.len(), 2); + } + + #[test] + fn test_fetch_timeline_author_scope() { + let conn = Connection::open_in_memory().unwrap(); + create_timeline_schema(&conn); + + let now = 1_700_000_000_000_i64; + insert_issue(&conn, 1, "opened", now - 100_000); // default: no author_username in insert_issue + + let issue1_id: i64 = conn + .query_row("SELECT id FROM issues WHERE iid = 1", [], |r| r.get(0)) + .unwrap(); + + // State events by different actors. + insert_state_event( + &conn, + 1, + Some(issue1_id), + None, + "closed", + "alice", + now - 80_000, + ); + insert_state_event( + &conn, + 2, + Some(issue1_id), + None, + "reopened", + "bob", + now - 60_000, + ); + + let scope = TimelineScope::Author("alice".into()); + let events = fetch_timeline_events(&conn, &scope, 100).unwrap(); + + // Should only get alice's state event (Created events don't have author set via insert_issue). + assert!(events.iter().all(|e| e.actor.as_deref() == Some("alice"))); + } + + #[test] + fn test_fetch_timeline_respects_limit() { + let conn = Connection::open_in_memory().unwrap(); + create_timeline_schema(&conn); + + let now = 1_700_000_000_000_i64; + for i in 1..=10 { + insert_issue(&conn, i, "opened", now - (i * 10_000)); + } + + let events = fetch_timeline_events(&conn, &TimelineScope::All, 3).unwrap(); + assert_eq!(events.len(), 3); + } + + #[test] + fn test_fetch_timeline_sorted_most_recent_first() { + let conn = Connection::open_in_memory().unwrap(); + create_timeline_schema(&conn); + + let now = 1_700_000_000_000_i64; + insert_issue(&conn, 1, "opened", now - 200_000); + insert_issue(&conn, 2, "opened", now - 100_000); + insert_issue(&conn, 3, "opened", now - 300_000); + + let events = fetch_timeline_events(&conn, &TimelineScope::All, 100).unwrap(); + + for window in events.windows(2) { + assert!( + window[0].timestamp_ms >= window[1].timestamp_ms, + "Events should be sorted most-recent-first" + ); + } + } + + #[test] + fn test_fetch_timeline_state_merged_is_merged_kind() { + let conn = Connection::open_in_memory().unwrap(); + create_timeline_schema(&conn); + + let now = 1_700_000_000_000_i64; + insert_mr(&conn, 1, "merged", now - 100_000); + + let mr_id: i64 = conn + .query_row("SELECT id FROM merge_requests WHERE iid = 1", [], |r| { + r.get(0) + }) + .unwrap(); + + insert_state_event(&conn, 1, None, Some(mr_id), "merged", "alice", now - 50_000); + + let scope = TimelineScope::Entity(EntityKey::mr(1, 1)); + let events = fetch_timeline_events(&conn, &scope, 100).unwrap(); + + let merged_events: Vec<_> = events + .iter() + .filter(|e| e.event_kind == TimelineEventKind::Merged) + .collect(); + assert_eq!(merged_events.len(), 1); + assert_eq!(merged_events[0].summary, "MR !1 merged"); + } + + #[test] + fn test_fetch_timeline_empty_db() { + let conn = Connection::open_in_memory().unwrap(); + create_timeline_schema(&conn); + + let events = fetch_timeline_events(&conn, &TimelineScope::All, 100).unwrap(); + assert!(events.is_empty()); + } +} diff --git a/crates/lore-tui/src/action/trace.rs b/crates/lore-tui/src/action/trace.rs new file mode 100644 index 0000000..1782114 --- /dev/null +++ b/crates/lore-tui/src/action/trace.rs @@ -0,0 +1,234 @@ +#![allow(dead_code)] + +//! Trace screen actions — fetch file provenance chains from the local database. +//! +//! Wraps `run_trace()` from `lore::core::trace` and provides an autocomplete +//! path query for the input field. + +use anyhow::Result; +use rusqlite::Connection; + +use lore::core::trace::{self, TraceResult}; + +/// Default limit for trace chain results in TUI queries. +const DEFAULT_LIMIT: usize = 50; + +/// Fetch trace chains for a file path. +/// +/// Wraps [`trace::run_trace()`] with TUI defaults. +pub fn fetch_trace( + conn: &Connection, + project_id: Option, + path: &str, + follow_renames: bool, + include_discussions: bool, +) -> Result { + Ok(trace::run_trace( + conn, + project_id, + path, + follow_renames, + include_discussions, + DEFAULT_LIMIT, + )?) +} + +/// Fetch known file paths from `mr_file_changes` for autocomplete. +/// +/// Returns distinct `new_path` values scoped to the given project (or all +/// projects if `None`), sorted alphabetically. +pub fn fetch_known_paths(conn: &Connection, project_id: Option) -> Result> { + let mut paths = if let Some(pid) = project_id { + let mut stmt = conn.prepare( + "SELECT DISTINCT new_path FROM mr_file_changes WHERE project_id = ?1 ORDER BY new_path", + )?; + let rows = stmt.query_map([pid], |row| row.get::<_, String>(0))?; + rows.filter_map(Result::ok).collect::>() + } else { + let mut stmt = + conn.prepare("SELECT DISTINCT new_path FROM mr_file_changes ORDER BY new_path")?; + let rows = stmt.query_map([], |row| row.get::<_, String>(0))?; + rows.filter_map(Result::ok).collect::>() + }; + paths.sort(); + paths.dedup(); + Ok(paths) +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Minimal schema for trace queries. + fn create_trace_schema(conn: &Connection) { + conn.execute_batch( + " + CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + ); + CREATE TABLE merge_requests ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT, + author_username TEXT, + draft INTEGER NOT NULL DEFAULT 0, + created_at INTEGER, + updated_at INTEGER, + merged_at INTEGER, + closed_at INTEGER, + web_url TEXT, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE mr_file_changes ( + id INTEGER PRIMARY KEY, + merge_request_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + new_path TEXT NOT NULL, + old_path TEXT, + change_type TEXT NOT NULL + ); + CREATE TABLE entity_references ( + id INTEGER PRIMARY KEY, + source_entity_type TEXT NOT NULL, + source_entity_id INTEGER NOT NULL, + target_entity_type TEXT NOT NULL, + target_entity_id INTEGER, + target_iid INTEGER NOT NULL, + project_id INTEGER NOT NULL, + reference_type TEXT NOT NULL + ); + CREATE TABLE issues ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT NOT NULL, + author_username TEXT, + web_url TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE discussions ( + id INTEGER PRIMARY KEY, + gitlab_discussion_id TEXT NOT NULL, + project_id INTEGER NOT NULL, + noteable_type TEXT NOT NULL, + issue_id INTEGER, + merge_request_id INTEGER, + resolvable INTEGER NOT NULL DEFAULT 0, + resolved INTEGER NOT NULL DEFAULT 0, + last_note_at INTEGER NOT NULL DEFAULT 0, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE notes ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + discussion_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + is_system INTEGER NOT NULL DEFAULT 0, + author_username TEXT, + body TEXT, + note_type TEXT, + position_new_path TEXT, + position_old_path TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE INDEX idx_mfc_new_path_project_mr + ON mr_file_changes(new_path, project_id, merge_request_id); + CREATE INDEX idx_mfc_old_path_project_mr + ON mr_file_changes(old_path, project_id, merge_request_id); + ", + ) + .expect("create trace schema"); + } + + #[test] + fn test_fetch_trace_empty_db() { + let conn = Connection::open_in_memory().unwrap(); + create_trace_schema(&conn); + + let result = fetch_trace(&conn, None, "src/main.rs", true, true).unwrap(); + assert!(result.trace_chains.is_empty()); + assert_eq!(result.total_chains, 0); + } + + #[test] + fn test_fetch_trace_with_mr() { + let conn = Connection::open_in_memory().unwrap(); + create_trace_schema(&conn); + + // Insert a project, MR, and file change. + conn.execute_batch( + " + INSERT INTO projects(id, gitlab_project_id, path_with_namespace) + VALUES (1, 100, 'group/project'); + INSERT INTO merge_requests(id, gitlab_id, project_id, iid, title, state, author_username, updated_at, last_seen_at) + VALUES (1, 200, 1, 42, 'Add main.rs', 'merged', 'alice', 1700000000000, 1700000000000); + INSERT INTO mr_file_changes(id, merge_request_id, project_id, new_path, change_type) + VALUES (1, 1, 1, 'src/main.rs', 'added'); + ", + ) + .unwrap(); + + let result = fetch_trace(&conn, Some(1), "src/main.rs", true, false).unwrap(); + assert_eq!(result.trace_chains.len(), 1); + assert_eq!(result.trace_chains[0].mr_iid, 42); + assert_eq!(result.trace_chains[0].mr_author, "alice"); + assert_eq!(result.trace_chains[0].change_type, "added"); + } + + #[test] + fn test_fetch_known_paths_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_trace_schema(&conn); + + let paths = fetch_known_paths(&conn, None).unwrap(); + assert!(paths.is_empty()); + } + + #[test] + fn test_fetch_known_paths_with_data() { + let conn = Connection::open_in_memory().unwrap(); + create_trace_schema(&conn); + + conn.execute_batch( + " + INSERT INTO mr_file_changes(id, merge_request_id, project_id, new_path, change_type) + VALUES (1, 1, 1, 'src/b.rs', 'added'), + (2, 1, 1, 'src/a.rs', 'modified'), + (3, 2, 1, 'src/b.rs', 'modified'); + ", + ) + .unwrap(); + + let paths = fetch_known_paths(&conn, None).unwrap(); + assert_eq!(paths, vec!["src/a.rs", "src/b.rs"]); + } + + #[test] + fn test_fetch_known_paths_scoped_to_project() { + let conn = Connection::open_in_memory().unwrap(); + create_trace_schema(&conn); + + conn.execute_batch( + " + INSERT INTO mr_file_changes(id, merge_request_id, project_id, new_path, change_type) + VALUES (1, 1, 1, 'src/a.rs', 'added'), + (2, 2, 2, 'src/b.rs', 'added'); + ", + ) + .unwrap(); + + let paths = fetch_known_paths(&conn, Some(1)).unwrap(); + assert_eq!(paths, vec!["src/a.rs"]); + } +} diff --git a/crates/lore-tui/src/action/who.rs b/crates/lore-tui/src/action/who.rs new file mode 100644 index 0000000..1303e4a --- /dev/null +++ b/crates/lore-tui/src/action/who.rs @@ -0,0 +1,285 @@ +#![allow(dead_code)] + +//! Who screen actions — fetch people-intelligence data from the local database. +//! +//! Each function wraps a `query_*` function from `lore::cli::commands::who` +//! and returns the appropriate [`WhoResult`] variant. + +use anyhow::Result; +use rusqlite::Connection; + +use lore::cli::commands::who; +use lore::core::config::ScoringConfig; +use lore::core::who_types::WhoResult; + +/// Default limit for result rows in TUI who queries. +const DEFAULT_LIMIT: usize = 20; + +/// Default time window: 6 months in milliseconds. +const SIX_MONTHS_MS: i64 = 180 * 24 * 60 * 60 * 1000; + +/// Fetch expert results for a file path. +pub fn fetch_who_expert( + conn: &Connection, + path: &str, + project_id: Option, + scoring: &ScoringConfig, + now_ms: i64, +) -> Result { + let since_ms = now_ms - SIX_MONTHS_MS; + let result = who::query_expert( + conn, + path, + project_id, + since_ms, + now_ms, + DEFAULT_LIMIT, + scoring, + false, // detail + false, // explain_score + false, // include_bots + )?; + Ok(WhoResult::Expert(result)) +} + +/// Fetch workload summary for a username. +pub fn fetch_who_workload( + conn: &Connection, + username: &str, + project_id: Option, + include_closed: bool, +) -> Result { + let result = who::query_workload( + conn, + username, + project_id, + None, // since_ms — show all for workload + DEFAULT_LIMIT, + include_closed, + )?; + Ok(WhoResult::Workload(result)) +} + +/// Fetch review activity breakdown for a username. +pub fn fetch_who_reviews( + conn: &Connection, + username: &str, + project_id: Option, + now_ms: i64, +) -> Result { + let since_ms = now_ms - SIX_MONTHS_MS; + let result = who::query_reviews(conn, username, project_id, since_ms)?; + Ok(WhoResult::Reviews(result)) +} + +/// Fetch recent active (unresolved) discussions. +pub fn fetch_who_active( + conn: &Connection, + project_id: Option, + include_closed: bool, + now_ms: i64, +) -> Result { + // Active mode default window: 7 days. + let seven_days_ms: i64 = 7 * 24 * 60 * 60 * 1000; + let since_ms = now_ms - seven_days_ms; + let result = who::query_active(conn, project_id, since_ms, DEFAULT_LIMIT, include_closed)?; + Ok(WhoResult::Active(result)) +} + +/// Fetch overlap (shared file knowledge) for a path. +pub fn fetch_who_overlap( + conn: &Connection, + path: &str, + project_id: Option, + now_ms: i64, +) -> Result { + let since_ms = now_ms - SIX_MONTHS_MS; + let result = who::query_overlap(conn, path, project_id, since_ms, DEFAULT_LIMIT)?; + Ok(WhoResult::Overlap(result)) +} + +#[cfg(test)] +mod tests { + use super::*; + + /// Minimal schema for who queries (matches the real DB schema). + fn create_who_schema(conn: &Connection) { + conn.execute_batch( + " + CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + ); + CREATE TABLE issues ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT NOT NULL, + author_username TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE merge_requests ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT, + author_username TEXT, + draft INTEGER NOT NULL DEFAULT 0, + created_at INTEGER, + updated_at INTEGER, + merged_at INTEGER, + closed_at INTEGER, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE issue_assignees ( + issue_id INTEGER NOT NULL, + username TEXT NOT NULL, + PRIMARY KEY(issue_id, username) + ); + CREATE TABLE mr_reviewers ( + merge_request_id INTEGER NOT NULL, + username TEXT NOT NULL, + PRIMARY KEY(merge_request_id, username) + ); + CREATE TABLE mr_file_changes ( + id INTEGER PRIMARY KEY, + merge_request_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + new_path TEXT NOT NULL, + old_path TEXT, + change_type TEXT NOT NULL + ); + CREATE TABLE discussions ( + id INTEGER PRIMARY KEY, + gitlab_discussion_id TEXT NOT NULL, + project_id INTEGER NOT NULL, + noteable_type TEXT NOT NULL, + issue_id INTEGER, + merge_request_id INTEGER, + resolvable INTEGER NOT NULL DEFAULT 0, + resolved INTEGER NOT NULL DEFAULT 0, + last_note_at INTEGER NOT NULL DEFAULT 0, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE notes ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + discussion_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + is_system INTEGER NOT NULL DEFAULT 0, + author_username TEXT, + body TEXT, + note_type TEXT, + position_new_path TEXT, + position_old_path TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + -- Indexes needed by who queries + CREATE INDEX idx_notes_diffnote_path_created + ON notes(position_new_path, created_at) + WHERE note_type = 'DiffNote' AND is_system = 0; + CREATE INDEX idx_notes_old_path_author + ON notes(position_old_path, author_username) + WHERE note_type = 'DiffNote' AND is_system = 0; + CREATE INDEX idx_mfc_new_path_project_mr + ON mr_file_changes(new_path, project_id, merge_request_id); + CREATE INDEX idx_mfc_old_path_project_mr + ON mr_file_changes(old_path, project_id, merge_request_id); + ", + ) + .expect("create who schema"); + } + + fn default_scoring() -> ScoringConfig { + ScoringConfig::default() + } + + fn now_ms() -> i64 { + 1_700_000_000_000 // Fixed timestamp for deterministic tests. + } + + #[test] + fn test_fetch_who_expert_empty_db_returns_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_who_schema(&conn); + + let result = fetch_who_expert(&conn, "src/", None, &default_scoring(), now_ms()).unwrap(); + match result { + WhoResult::Expert(expert) => { + assert!(expert.experts.is_empty()); + assert!(!expert.truncated); + } + _ => panic!("Expected Expert variant"), + } + } + + #[test] + fn test_fetch_who_workload_empty_db_returns_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_who_schema(&conn); + + let result = fetch_who_workload(&conn, "alice", None, false).unwrap(); + match result { + WhoResult::Workload(wl) => { + assert_eq!(wl.username, "alice"); + assert!(wl.assigned_issues.is_empty()); + assert!(wl.authored_mrs.is_empty()); + } + _ => panic!("Expected Workload variant"), + } + } + + #[test] + fn test_fetch_who_reviews_empty_db_returns_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_who_schema(&conn); + + let result = fetch_who_reviews(&conn, "alice", None, now_ms()).unwrap(); + match result { + WhoResult::Reviews(rev) => { + assert_eq!(rev.username, "alice"); + assert_eq!(rev.total_diffnotes, 0); + } + _ => panic!("Expected Reviews variant"), + } + } + + #[test] + fn test_fetch_who_active_empty_db_returns_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_who_schema(&conn); + + let result = fetch_who_active(&conn, None, false, now_ms()).unwrap(); + match result { + WhoResult::Active(active) => { + assert!(active.discussions.is_empty()); + assert_eq!(active.total_unresolved_in_window, 0); + } + _ => panic!("Expected Active variant"), + } + } + + #[test] + fn test_fetch_who_overlap_empty_db_returns_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_who_schema(&conn); + + let result = fetch_who_overlap(&conn, "src/", None, now_ms()).unwrap(); + match result { + WhoResult::Overlap(overlap) => { + assert!(overlap.users.is_empty()); + assert!(!overlap.truncated); + } + _ => panic!("Expected Overlap variant"), + } + } +} diff --git a/crates/lore-tui/src/app/tests.rs b/crates/lore-tui/src/app/tests.rs index f35d8e8..6fa8016 100644 --- a/crates/lore-tui/src/app/tests.rs +++ b/crates/lore-tui/src/app/tests.rs @@ -139,6 +139,39 @@ fn test_g_then_i_navigates_to_issues() { assert!(app.navigation.is_at(&Screen::IssueList)); } +#[test] +fn test_g_then_s_on_bootstrap_starts_sync_in_place() { + let mut app = test_app(); + app.update(Msg::NavigateTo(Screen::Bootstrap)); + + // First key: 'g' + let key_g = KeyEvent::new(KeyCode::Char('g')); + app.update(Msg::RawEvent(Event::Key(key_g))); + + // Second key: 's' + let key_s = KeyEvent::new(KeyCode::Char('s')); + app.update(Msg::RawEvent(Event::Key(key_s))); + + assert!(app.navigation.is_at(&Screen::Bootstrap)); + assert!(app.state.bootstrap.sync_started); + assert!(matches!(app.input_mode, InputMode::Normal)); +} + +#[test] +fn test_g_then_s_from_dashboard_navigates_to_sync_screen() { + let mut app = test_app(); + + // First key: 'g' + let key_g = KeyEvent::new(KeyCode::Char('g')); + app.update(Msg::RawEvent(Event::Key(key_g))); + + // Second key: 's' + let key_s = KeyEvent::new(KeyCode::Char('s')); + app.update(Msg::RawEvent(Event::Key(key_s))); + + assert!(app.navigation.is_at(&Screen::Sync)); +} + #[test] fn test_go_prefix_timeout_cancels() { let clock = FakeClock::new(chrono::Utc::now()); @@ -328,3 +361,19 @@ fn test_default_is_new() { assert!(app.navigation.is_at(&Screen::Dashboard)); assert!(matches!(app.input_mode, InputMode::Normal)); } + +#[test] +fn test_sync_completed_from_bootstrap_resets_navigation_and_state() { + let mut app = test_app(); + + app.update(Msg::NavigateTo(Screen::Bootstrap)); + app.update(Msg::SyncStarted); + assert!(app.state.bootstrap.sync_started); + assert!(app.navigation.is_at(&Screen::Bootstrap)); + + app.update(Msg::SyncCompleted { elapsed_ms: 1234 }); + + assert!(app.navigation.is_at(&Screen::Dashboard)); + assert_eq!(app.navigation.depth(), 1); + assert!(!app.state.bootstrap.sync_started); +} diff --git a/crates/lore-tui/src/app/update.rs b/crates/lore-tui/src/app/update.rs index 52f6e63..b492f7c 100644 --- a/crates/lore-tui/src/app/update.rs +++ b/crates/lore-tui/src/app/update.rs @@ -125,13 +125,44 @@ impl LoreApp { } /// Handle keys in Palette mode. - fn handle_palette_mode_key(&mut self, key: &KeyEvent, _screen: &Screen) -> Cmd { - if key.code == KeyCode::Escape { - self.input_mode = InputMode::Normal; - return Cmd::none(); + fn handle_palette_mode_key(&mut self, key: &KeyEvent, screen: &Screen) -> Cmd { + match key.code { + KeyCode::Escape => { + self.state.command_palette.close(); + self.input_mode = InputMode::Normal; + Cmd::none() + } + KeyCode::Enter => { + if let Some(cmd_id) = self.state.command_palette.selected_command_id() { + self.state.command_palette.close(); + self.input_mode = InputMode::Normal; + self.execute_command(cmd_id, screen) + } else { + Cmd::none() + } + } + KeyCode::Up => { + self.state.command_palette.select_prev(); + Cmd::none() + } + KeyCode::Down => { + self.state.command_palette.select_next(); + Cmd::none() + } + KeyCode::Backspace => { + self.state + .command_palette + .delete_back(&self.command_registry, screen); + Cmd::none() + } + KeyCode::Char(c) => { + self.state + .command_palette + .insert_char(c, &self.command_registry, screen); + Cmd::none() + } + _ => Cmd::none(), } - // Palette key dispatch will be expanded in the palette widget phase. - Cmd::none() } /// Handle the second key of a g-prefix sequence. @@ -153,7 +184,7 @@ impl LoreApp { } /// Execute a command by ID. - fn execute_command(&mut self, id: &str, _screen: &Screen) -> Cmd { + fn execute_command(&mut self, id: &str, screen: &Screen) -> Cmd { match id { "quit" => Cmd::quit(), "go_back" => { @@ -166,7 +197,10 @@ impl LoreApp { } "command_palette" => { self.input_mode = InputMode::Palette; - self.state.command_palette.query_focused = true; + let screen = self.navigation.current().clone(); + self.state + .command_palette + .open(&self.command_registry, &screen); Cmd::none() } "open_in_browser" => { @@ -183,7 +217,16 @@ impl LoreApp { "go_search" => self.navigate_to(Screen::Search), "go_timeline" => self.navigate_to(Screen::Timeline), "go_who" => self.navigate_to(Screen::Who), - "go_sync" => self.navigate_to(Screen::Sync), + "go_file_history" => self.navigate_to(Screen::FileHistory), + "go_trace" => self.navigate_to(Screen::Trace), + "go_sync" => { + if screen == &Screen::Bootstrap { + self.state.bootstrap.sync_started = true; + Cmd::none() + } else { + self.navigate_to(Screen::Sync) + } + } "jump_back" => { self.navigation.jump_back(); Cmd::none() @@ -239,11 +282,7 @@ impl LoreApp { pub(crate) fn handle_msg(&mut self, msg: Msg) -> Cmd { // Record in crash context. self.crash_context.push(CrashEvent::MsgDispatched { - msg_name: format!("{msg:?}") - .split('(') - .next() - .unwrap_or("?") - .to_string(), + msg_name: msg.variant_name().to_string(), screen: self.navigation.current().label().to_string(), }); @@ -351,16 +390,24 @@ impl LoreApp { Cmd::none() } Msg::DiscussionsLoaded { - generation, + generation: _, key, discussions, } => { - let screen = Screen::IssueDetail(key.clone()); - if self - .supervisor - .is_current(&TaskKey::LoadScreen(screen.clone()), generation) - { - self.state.issue_detail.apply_discussions(discussions); + // Progressive hydration: the parent detail task already called + // supervisor.complete(), so is_current() would return false. + // Instead, check that the detail state still expects this key. + match key.kind { + crate::message::EntityKind::Issue => { + if self.state.issue_detail.current_key.as_ref() == Some(&key) { + self.state.issue_detail.apply_discussions(discussions); + } + } + crate::message::EntityKind::MergeRequest => { + if self.state.mr_detail.current_key.as_ref() == Some(&key) { + self.state.mr_detail.apply_discussions(discussions); + } + } } Cmd::none() } @@ -384,6 +431,86 @@ impl LoreApp { Cmd::none() } + // --- Sync lifecycle (Bootstrap auto-transition) --- + Msg::SyncStarted => { + if *self.navigation.current() == Screen::Bootstrap { + self.state.bootstrap.sync_started = true; + } + Cmd::none() + } + Msg::SyncCompleted { .. } => { + // If we came from Bootstrap, replace nav history with Dashboard. + if *self.navigation.current() == Screen::Bootstrap { + self.state.bootstrap.sync_started = false; + self.navigation.reset_to(Screen::Dashboard); + + // Trigger a fresh dashboard load without preserving Bootstrap in history. + let dashboard = Screen::Dashboard; + let load_state = if self.state.load_state.was_visited(&dashboard) { + LoadState::Refreshing + } else { + LoadState::LoadingInitial + }; + self.state.set_loading(dashboard.clone(), load_state); + let _handle = self.supervisor.submit(TaskKey::LoadScreen(dashboard)); + } + Cmd::none() + } + + // --- Who screen --- + Msg::WhoResultLoaded { generation, result } => { + if self + .supervisor + .is_current(&TaskKey::LoadScreen(Screen::Who), generation) + { + self.state.who.apply_results(generation, *result); + self.state.set_loading(Screen::Who, LoadState::Idle); + self.supervisor + .complete(&TaskKey::LoadScreen(Screen::Who), generation); + } + Cmd::none() + } + Msg::WhoModeChanged => { + // Mode tab changed — view will re-render from state. + Cmd::none() + } + + // --- File History screen --- + Msg::FileHistoryLoaded { generation, result } => { + if self + .supervisor + .is_current(&TaskKey::LoadScreen(Screen::FileHistory), generation) + { + self.state.file_history.apply_results(generation, *result); + self.state.set_loading(Screen::FileHistory, LoadState::Idle); + self.supervisor + .complete(&TaskKey::LoadScreen(Screen::FileHistory), generation); + } + Cmd::none() + } + Msg::FileHistoryKnownPathsLoaded { paths } => { + self.state.file_history.known_paths = paths; + Cmd::none() + } + + // --- Trace screen --- + Msg::TraceResultLoaded { generation, result } => { + if self + .supervisor + .is_current(&TaskKey::LoadScreen(Screen::Trace), generation) + { + self.state.trace.apply_result(generation, *result); + self.state.set_loading(Screen::Trace, LoadState::Idle); + self.supervisor + .complete(&TaskKey::LoadScreen(Screen::Trace), generation); + } + Cmd::none() + } + Msg::TraceKnownPathsLoaded { paths } => { + self.state.trace.known_paths = paths; + Cmd::none() + } + // All other message variants: no-op for now. // Future phases will fill these in as screens are implemented. _ => Cmd::none(), diff --git a/crates/lore-tui/src/entity_cache.rs b/crates/lore-tui/src/entity_cache.rs new file mode 100644 index 0000000..b76f299 --- /dev/null +++ b/crates/lore-tui/src/entity_cache.rs @@ -0,0 +1,232 @@ +//! Bounded LRU entity cache for near-instant detail view reopens. +//! +//! Caches `IssueDetail` / `MrDetail` payloads keyed on [`EntityKey`]. +//! Tick-based LRU eviction keeps the most-recently-accessed entries alive +//! while bounding memory usage. Selective invalidation removes only +//! stale entries after a sync, rather than flushing the whole cache. +//! +//! Single-threaded (TUI event loop) — no `Arc`/`Mutex` needed. + +use std::collections::HashMap; + +use crate::message::EntityKey; + +/// Default entity cache capacity (sufficient for drill-in/out workflows). +const DEFAULT_CAPACITY: usize = 64; + +/// Bounded LRU cache keyed on [`EntityKey`]. +/// +/// Each entry stores its value alongside a monotonic tick recording the +/// last access time. On capacity overflow, the entry with the lowest +/// tick (least recently used) is evicted. +pub struct EntityCache { + entries: HashMap, + capacity: usize, + tick: u64, +} + +impl EntityCache { + /// Create a new cache with the default capacity (64). + #[must_use] + pub fn new() -> Self { + Self { + entries: HashMap::with_capacity(DEFAULT_CAPACITY), + capacity: DEFAULT_CAPACITY, + tick: 0, + } + } + + /// Create a new cache with the given capacity. + /// + /// # Panics + /// Panics if `capacity` is zero. + #[must_use] + pub fn with_capacity(capacity: usize) -> Self { + assert!(capacity > 0, "EntityCache capacity must be > 0"); + Self { + entries: HashMap::with_capacity(capacity), + capacity, + tick: 0, + } + } + + /// Look up an entry, bumping its access tick to keep it alive. + pub fn get(&mut self, key: &EntityKey) -> Option<&V> { + self.tick += 1; + let tick = self.tick; + self.entries.get_mut(key).map(|(val, t)| { + *t = tick; + &*val + }) + } + + /// Insert an entry, evicting the least-recently-accessed entry if at capacity. + pub fn put(&mut self, key: EntityKey, value: V) { + self.tick += 1; + let tick = self.tick; + + // If key already exists, just update in place. + if let Some(entry) = self.entries.get_mut(&key) { + *entry = (value, tick); + return; + } + + // Evict LRU if at capacity. + if self.entries.len() >= self.capacity { + if let Some(lru_key) = self + .entries + .iter() + .min_by_key(|(_, (_, t))| *t) + .map(|(k, _)| k.clone()) + { + self.entries.remove(&lru_key); + } + } + + self.entries.insert(key, (value, tick)); + } + + /// Remove only the specified keys, leaving all other entries intact. + pub fn invalidate(&mut self, keys: &[EntityKey]) { + for key in keys { + self.entries.remove(key); + } + } + + /// Number of entries currently cached. + #[must_use] + pub fn len(&self) -> usize { + self.entries.len() + } + + /// Whether the cache is empty. + #[must_use] + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } +} + +impl Default for EntityCache { + fn default() -> Self { + Self::new() + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::message::EntityKey; + + fn issue(iid: i64) -> EntityKey { + EntityKey::issue(1, iid) + } + + fn mr(iid: i64) -> EntityKey { + EntityKey::mr(1, iid) + } + + #[test] + fn test_get_returns_recently_put_item() { + let mut cache = EntityCache::with_capacity(4); + cache.put(issue(1), "issue-1"); + assert_eq!(cache.get(&issue(1)), Some(&"issue-1")); + } + + #[test] + fn test_get_returns_none_for_missing_key() { + let mut cache: EntityCache<&str> = EntityCache::with_capacity(4); + assert_eq!(cache.get(&issue(99)), None); + } + + #[test] + fn test_lru_eviction_removes_least_recently_used() { + let mut cache = EntityCache::with_capacity(3); + cache.put(issue(1), "a"); // tick 1 + cache.put(issue(2), "b"); // tick 2 + cache.put(issue(3), "c"); // tick 3 + + // Access issue(1) to bump its tick above issue(2). + cache.get(&issue(1)); // tick 4 -> issue(1) now most recent + + // Insert a 4th item: should evict issue(2) (tick 2, lowest). + cache.put(issue(4), "d"); // tick 5 + + assert_eq!(cache.get(&issue(1)), Some(&"a"), "issue(1) should survive (recently accessed)"); + assert_eq!(cache.get(&issue(2)), None, "issue(2) should be evicted (LRU)"); + assert_eq!(cache.get(&issue(3)), Some(&"c"), "issue(3) should survive"); + assert_eq!(cache.get(&issue(4)), Some(&"d"), "issue(4) just inserted"); + } + + #[test] + fn test_put_overwrites_existing_key() { + let mut cache = EntityCache::with_capacity(4); + cache.put(issue(1), "v1"); + cache.put(issue(1), "v2"); + assert_eq!(cache.get(&issue(1)), Some(&"v2")); + assert_eq!(cache.len(), 1); + } + + #[test] + fn test_invalidate_removes_only_specified_keys() { + let mut cache = EntityCache::with_capacity(8); + cache.put(issue(1), "a"); + cache.put(issue(2), "b"); + cache.put(mr(3), "c"); + cache.put(mr(4), "d"); + + cache.invalidate(&[issue(2), mr(4)]); + + assert_eq!(cache.get(&issue(1)), Some(&"a"), "issue(1) not invalidated"); + assert_eq!(cache.get(&issue(2)), None, "issue(2) was invalidated"); + assert_eq!(cache.get(&mr(3)), Some(&"c"), "mr(3) not invalidated"); + assert_eq!(cache.get(&mr(4)), None, "mr(4) was invalidated"); + } + + #[test] + fn test_invalidate_with_nonexistent_keys_is_noop() { + let mut cache = EntityCache::with_capacity(4); + cache.put(issue(1), "a"); + cache.invalidate(&[issue(99), mr(99)]); + assert_eq!(cache.len(), 1); + } + + #[test] + fn test_default_capacity_is_64() { + let cache: EntityCache = EntityCache::new(); + assert_eq!(cache.capacity, DEFAULT_CAPACITY); + assert_eq!(cache.capacity, 64); + } + + #[test] + fn test_len_and_is_empty() { + let mut cache = EntityCache::with_capacity(4); + assert!(cache.is_empty()); + assert_eq!(cache.len(), 0); + + cache.put(issue(1), "a"); + assert!(!cache.is_empty()); + assert_eq!(cache.len(), 1); + } + + #[test] + #[should_panic(expected = "capacity must be > 0")] + fn test_zero_capacity_panics() { + let _: EntityCache = EntityCache::with_capacity(0); + } + + #[test] + fn test_mixed_entity_kinds() { + let mut cache = EntityCache::with_capacity(4); + // Same iid, different kinds — should be separate entries. + cache.put(issue(42), "issue-42"); + cache.put(mr(42), "mr-42"); + + assert_eq!(cache.get(&issue(42)), Some(&"issue-42")); + assert_eq!(cache.get(&mr(42)), Some(&"mr-42")); + assert_eq!(cache.len(), 2); + } +} diff --git a/crates/lore-tui/src/lib.rs b/crates/lore-tui/src/lib.rs index 2852aea..fe277e1 100644 --- a/crates/lore-tui/src/lib.rs +++ b/crates/lore-tui/src/lib.rs @@ -31,6 +31,10 @@ pub mod view; // View layer: render_screen + common widgets (bd-26f2) pub mod action; // Data-fetching actions for TUI screens (bd-35g5+) pub mod filter_dsl; // Filter DSL tokenizer for list screen filter bars (bd-18qs) +// Phase 4 modules. +pub mod entity_cache; // Bounded LRU entity cache for detail view reopens (bd-2og9) +pub mod render_cache; // Bounded render cache for expensive per-frame computations (bd-2og9) + /// Options controlling how the TUI launches. #[derive(Debug, Clone)] pub struct LaunchOptions { @@ -52,6 +56,14 @@ pub struct LaunchOptions { /// /// Loads config from `options.config_path` (or default location), /// opens the database read-only, and enters the FrankenTUI event loop. +/// +/// ## Preflight sequence +/// +/// 1. **Schema preflight** — validate the database schema version before +/// creating the app. If incompatible, print an actionable error and exit +/// with a non-zero code. +/// 2. **Data readiness** — check whether the database has any entity data. +/// If empty, start on the Bootstrap screen; otherwise start on Dashboard. pub fn launch_tui(options: LaunchOptions) -> Result<()> { let _options = options; // Phase 1 will wire this to LoreApp + App::fullscreen().run() @@ -59,6 +71,30 @@ pub fn launch_tui(options: LaunchOptions) -> Result<()> { Ok(()) } +/// Run the schema preflight check. +/// +/// Returns `Ok(())` if the schema is compatible, or an error with an +/// actionable message if it's not. The caller should exit non-zero on error. +pub fn schema_preflight(conn: &rusqlite::Connection) -> Result<()> { + use state::bootstrap::SchemaCheck; + + match action::check_schema_version(conn, action::MINIMUM_SCHEMA_VERSION) { + SchemaCheck::Compatible { .. } => Ok(()), + SchemaCheck::NoDB => { + anyhow::bail!( + "No lore database found.\n\ + Run 'lore init' to create a config, then 'lore sync' to fetch data." + ); + } + SchemaCheck::Incompatible { found, minimum } => { + anyhow::bail!( + "Database schema version {found} is too old (minimum: {minimum}).\n\ + Run 'lore migrate' to upgrade, or 'lore sync' to rebuild." + ); + } + } +} + /// Launch the TUI with an initial sync pass. /// /// Runs `lore sync` in the background while displaying a progress screen, diff --git a/crates/lore-tui/src/message.rs b/crates/lore-tui/src/message.rs index c54a6b0..833db5d 100644 --- a/crates/lore-tui/src/message.rs +++ b/crates/lore-tui/src/message.rs @@ -18,7 +18,7 @@ use ftui::Event; // --------------------------------------------------------------------------- /// Distinguishes issue vs merge request in an [`EntityKey`]. -#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, PartialOrd, Ord)] pub enum EntityKind { Issue, MergeRequest, @@ -84,6 +84,8 @@ pub enum Screen { Search, Timeline, Who, + Trace, + FileHistory, Sync, Stats, Doctor, @@ -103,6 +105,8 @@ impl Screen { Self::Search => "Search", Self::Timeline => "Timeline", Self::Who => "Who", + Self::Trace => "Trace", + Self::FileHistory => "File History", Self::Sync => "Sync", Self::Stats => "Stats", Self::Doctor => "Doctor", @@ -285,6 +289,24 @@ pub enum Msg { }, WhoModeChanged, + // --- Trace --- + TraceResultLoaded { + generation: u64, + result: Box, + }, + TraceKnownPathsLoaded { + paths: Vec, + }, + + // --- File History --- + FileHistoryLoaded { + generation: u64, + result: Box, + }, + FileHistoryKnownPathsLoaded { + paths: Vec, + }, + // --- Sync --- SyncStarted, SyncProgress { @@ -332,6 +354,72 @@ pub enum Msg { Quit, } +impl Msg { + /// Return the variant name as a static string without formatting payload. + /// + /// Used by crash context to cheaply record which message was dispatched. + pub fn variant_name(&self) -> &'static str { + match self { + Self::RawEvent(_) => "RawEvent", + Self::Tick => "Tick", + Self::Resize { .. } => "Resize", + Self::NavigateTo(_) => "NavigateTo", + Self::GoBack => "GoBack", + Self::GoForward => "GoForward", + Self::GoHome => "GoHome", + Self::JumpBack(_) => "JumpBack", + Self::JumpForward(_) => "JumpForward", + Self::OpenCommandPalette => "OpenCommandPalette", + Self::CloseCommandPalette => "CloseCommandPalette", + Self::CommandPaletteInput(_) => "CommandPaletteInput", + Self::CommandPaletteSelect(_) => "CommandPaletteSelect", + Self::IssueListLoaded { .. } => "IssueListLoaded", + Self::IssueListFilterChanged(_) => "IssueListFilterChanged", + Self::IssueListSortChanged => "IssueListSortChanged", + Self::IssueSelected(_) => "IssueSelected", + Self::MrListLoaded { .. } => "MrListLoaded", + Self::MrListFilterChanged(_) => "MrListFilterChanged", + Self::MrSelected(_) => "MrSelected", + Self::IssueDetailLoaded { .. } => "IssueDetailLoaded", + Self::MrDetailLoaded { .. } => "MrDetailLoaded", + Self::DiscussionsLoaded { .. } => "DiscussionsLoaded", + Self::SearchQueryChanged(_) => "SearchQueryChanged", + Self::SearchRequestStarted { .. } => "SearchRequestStarted", + Self::SearchExecuted { .. } => "SearchExecuted", + Self::SearchResultSelected(_) => "SearchResultSelected", + Self::SearchModeChanged => "SearchModeChanged", + Self::SearchCapabilitiesLoaded => "SearchCapabilitiesLoaded", + Self::TimelineLoaded { .. } => "TimelineLoaded", + Self::TimelineEntitySelected(_) => "TimelineEntitySelected", + Self::WhoResultLoaded { .. } => "WhoResultLoaded", + Self::WhoModeChanged => "WhoModeChanged", + Self::TraceResultLoaded { .. } => "TraceResultLoaded", + Self::TraceKnownPathsLoaded { .. } => "TraceKnownPathsLoaded", + Self::FileHistoryLoaded { .. } => "FileHistoryLoaded", + Self::FileHistoryKnownPathsLoaded { .. } => "FileHistoryKnownPathsLoaded", + Self::SyncStarted => "SyncStarted", + Self::SyncProgress { .. } => "SyncProgress", + Self::SyncProgressBatch { .. } => "SyncProgressBatch", + Self::SyncLogLine(_) => "SyncLogLine", + Self::SyncBackpressureDrop => "SyncBackpressureDrop", + Self::SyncCompleted { .. } => "SyncCompleted", + Self::SyncCancelled => "SyncCancelled", + Self::SyncFailed(_) => "SyncFailed", + Self::SyncStreamStats { .. } => "SyncStreamStats", + Self::SearchDebounceArmed { .. } => "SearchDebounceArmed", + Self::SearchDebounceFired { .. } => "SearchDebounceFired", + Self::DashboardLoaded { .. } => "DashboardLoaded", + Self::Error(_) => "Error", + Self::ShowHelp => "ShowHelp", + Self::ShowCliEquivalent => "ShowCliEquivalent", + Self::OpenInBrowser => "OpenInBrowser", + Self::BlurTextInput => "BlurTextInput", + Self::ScrollToTopCurrentScreen => "ScrollToTopCurrentScreen", + Self::Quit => "Quit", + } + } +} + /// Convert terminal events into messages. /// /// FrankenTUI requires `From` on the message type so the runtime @@ -373,26 +461,130 @@ pub struct Discussion { pub notes: Vec, } -/// Placeholder for a search result. +// --------------------------------------------------------------------------- +// SearchMode +// --------------------------------------------------------------------------- + +/// Search mode determines which backend index is used. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash, Default)] +pub enum SearchMode { + /// FTS5 only — fast, always available if documents are indexed. + #[default] + Lexical, + /// FTS5 + vector RRF merge — best quality when embeddings exist. + Hybrid, + /// Vector-only cosine similarity — requires Ollama embeddings. + Semantic, +} + +impl SearchMode { + /// Short label for the mode indicator in the query bar. + #[must_use] + pub fn label(self) -> &'static str { + match self { + Self::Lexical => "FTS", + Self::Hybrid => "Hybrid", + Self::Semantic => "Vec", + } + } + + /// Cycle to the next mode, wrapping around. + #[must_use] + pub fn next(self) -> Self { + match self { + Self::Lexical => Self::Hybrid, + Self::Hybrid => Self::Semantic, + Self::Semantic => Self::Lexical, + } + } +} + +impl std::fmt::Display for SearchMode { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + f.write_str(self.label()) + } +} + +/// A search result from the local database. #[derive(Debug, Clone)] pub struct SearchResult { pub key: EntityKey, pub title: String, pub score: f64, + pub snippet: String, + pub project_path: String, } -/// Placeholder for a timeline event. +// --------------------------------------------------------------------------- +// TimelineEventKind +// --------------------------------------------------------------------------- + +/// Event kind for color coding in the TUI timeline. +/// +/// Derived from raw resource event tables in the local database. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum TimelineEventKind { + /// Entity was created. + Created, + /// State changed (opened/closed/reopened/locked). + StateChanged, + /// Label added to entity. + LabelAdded, + /// Label removed from entity. + LabelRemoved, + /// Milestone set on entity. + MilestoneSet, + /// Milestone removed from entity. + MilestoneRemoved, + /// Merge request was merged. + Merged, +} + +impl TimelineEventKind { + /// Short display label for the event kind badge. + #[must_use] + pub fn label(self) -> &'static str { + match self { + Self::Created => "Created", + Self::StateChanged => "State", + Self::LabelAdded => "+Label", + Self::LabelRemoved => "-Label", + Self::MilestoneSet => "+Mile", + Self::MilestoneRemoved => "-Mile", + Self::Merged => "Merged", + } + } +} + +// --------------------------------------------------------------------------- +// TimelineEvent +// --------------------------------------------------------------------------- + +/// A timeline event for TUI display. +/// +/// Produced by [`crate::action::fetch_timeline_events`] from raw +/// resource event tables. Contains enough data for the view to +/// render color-coded events with navigable entity references. #[derive(Debug, Clone)] pub struct TimelineEvent { - pub timestamp: String, - pub description: String, + /// Epoch milliseconds (UTC). + pub timestamp_ms: i64, + /// Entity this event belongs to (for navigation). + pub entity_key: EntityKey, + /// Event kind for color coding. + pub event_kind: TimelineEventKind, + /// Human-readable summary (e.g., "State changed to closed"). + pub summary: String, + /// Optional detail text (e.g., label name, new state value). + pub detail: Option, + /// Who performed the action. + pub actor: Option, + /// Project path for display (e.g., "group/project"). + pub project_path: String, } -/// Placeholder for who/people intelligence result. -#[derive(Debug, Clone)] -pub struct WhoResult { - pub experts: Vec, -} +// WhoResult is re-exported from the lore core crate. +pub use lore::core::who_types::WhoResult; // DashboardData moved to crate::state::dashboard (enriched with // EntityCounts, ProjectSyncInfo, RecentActivityItem, LastSyncInfo). @@ -500,4 +692,49 @@ mod tests { let msg = Msg::from(Event::Focus(true)); assert!(matches!(msg, Msg::RawEvent(Event::Focus(true)))); } + + #[test] + fn test_search_mode_labels() { + assert_eq!(SearchMode::Lexical.label(), "FTS"); + assert_eq!(SearchMode::Hybrid.label(), "Hybrid"); + assert_eq!(SearchMode::Semantic.label(), "Vec"); + } + + #[test] + fn test_search_mode_next_cycles() { + assert_eq!(SearchMode::Lexical.next(), SearchMode::Hybrid); + assert_eq!(SearchMode::Hybrid.next(), SearchMode::Semantic); + assert_eq!(SearchMode::Semantic.next(), SearchMode::Lexical); + } + + #[test] + fn test_search_mode_display() { + assert_eq!(format!("{}", SearchMode::Lexical), "FTS"); + assert_eq!(format!("{}", SearchMode::Hybrid), "Hybrid"); + assert_eq!(format!("{}", SearchMode::Semantic), "Vec"); + } + + #[test] + fn test_search_mode_default_is_lexical() { + assert_eq!(SearchMode::default(), SearchMode::Lexical); + } + + // -- TimelineEventKind tests -- + + #[test] + fn test_timeline_event_kind_labels() { + assert_eq!(TimelineEventKind::Created.label(), "Created"); + assert_eq!(TimelineEventKind::StateChanged.label(), "State"); + assert_eq!(TimelineEventKind::LabelAdded.label(), "+Label"); + assert_eq!(TimelineEventKind::LabelRemoved.label(), "-Label"); + assert_eq!(TimelineEventKind::MilestoneSet.label(), "+Mile"); + assert_eq!(TimelineEventKind::MilestoneRemoved.label(), "-Mile"); + assert_eq!(TimelineEventKind::Merged.label(), "Merged"); + } + + #[test] + fn test_timeline_event_kind_equality() { + assert_eq!(TimelineEventKind::Created, TimelineEventKind::Created); + assert_ne!(TimelineEventKind::Created, TimelineEventKind::Merged); + } } diff --git a/crates/lore-tui/src/render_cache.rs b/crates/lore-tui/src/render_cache.rs new file mode 100644 index 0000000..1a505a4 --- /dev/null +++ b/crates/lore-tui/src/render_cache.rs @@ -0,0 +1,252 @@ +//! Bounded render cache for expensive per-frame computations. +//! +//! Caches pre-computed render artifacts (markdown to styled text, discussion +//! tree layout, issue body rendering) keyed on `(content_hash, terminal_width)`. +//! Width is part of the key because line wrapping changes with terminal size. +//! +//! Invalidation strategies: +//! - **Width change** (`invalidate_width`): purge entries not matching current width +//! - **Theme change** (`invalidate_all`): full clear (colors changed) +//! +//! Single-threaded (TUI event loop) — no `Arc`/`Mutex` needed. + +use std::collections::HashMap; + +/// Default render cache capacity. +const DEFAULT_CAPACITY: usize = 256; + +/// Cache key: content identity + terminal width that produced the render. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub struct RenderCacheKey { + /// Hash of the source content (e.g., `DefaultHasher` or FxHash of text). + pub content_hash: u64, + /// Terminal width at the time of rendering. + pub terminal_width: u16, +} + +impl RenderCacheKey { + /// Create a new render cache key. + #[must_use] + pub fn new(content_hash: u64, terminal_width: u16) -> Self { + Self { + content_hash, + terminal_width, + } + } +} + +/// Bounded cache for pre-computed render artifacts. +/// +/// Uses simple capacity-bounded insertion. When at capacity, the oldest +/// entry (lowest insertion order) is evicted. This is simpler than full +/// LRU because render cache hits tend to be ephemeral — the current +/// frame's renders are the most important. +pub struct RenderCache { + entries: HashMap, + capacity: usize, + tick: u64, +} + +impl RenderCache { + /// Create a new cache with the default capacity (256). + #[must_use] + pub fn new() -> Self { + Self { + entries: HashMap::with_capacity(DEFAULT_CAPACITY), + capacity: DEFAULT_CAPACITY, + tick: 0, + } + } + + /// Create a new cache with the given capacity. + /// + /// # Panics + /// Panics if `capacity` is zero. + #[must_use] + pub fn with_capacity(capacity: usize) -> Self { + assert!(capacity > 0, "RenderCache capacity must be > 0"); + Self { + entries: HashMap::with_capacity(capacity), + capacity, + tick: 0, + } + } + + /// Look up a cached render artifact. + pub fn get(&self, key: &RenderCacheKey) -> Option<&V> { + self.entries.get(key).map(|(v, _)| v) + } + + /// Insert a render artifact, evicting the oldest entry if at capacity. + pub fn put(&mut self, key: RenderCacheKey, value: V) { + self.tick += 1; + let tick = self.tick; + + if let Some(entry) = self.entries.get_mut(&key) { + *entry = (value, tick); + return; + } + + if self.entries.len() >= self.capacity { + if let Some(oldest_key) = self + .entries + .iter() + .min_by_key(|(_, (_, t))| *t) + .map(|(k, _)| *k) + { + self.entries.remove(&oldest_key); + } + } + + self.entries.insert(key, (value, tick)); + } + + /// Remove entries NOT matching the given width (terminal resize). + /// + /// After a resize, only entries rendered at the new width are still valid. + pub fn invalidate_width(&mut self, keep_width: u16) { + self.entries + .retain(|k, _| k.terminal_width == keep_width); + } + + /// Clear the entire cache (theme change — all colors invalidated). + pub fn invalidate_all(&mut self) { + self.entries.clear(); + } + + /// Number of entries currently cached. + #[must_use] + pub fn len(&self) -> usize { + self.entries.len() + } + + /// Whether the cache is empty. + #[must_use] + pub fn is_empty(&self) -> bool { + self.entries.is_empty() + } +} + +impl Default for RenderCache { + fn default() -> Self { + Self::new() + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + fn key(hash: u64, width: u16) -> RenderCacheKey { + RenderCacheKey::new(hash, width) + } + + #[test] + fn test_get_returns_recently_put_item() { + let mut cache = RenderCache::with_capacity(4); + cache.put(key(100, 80), "rendered-a"); + assert_eq!(cache.get(&key(100, 80)), Some(&"rendered-a")); + } + + #[test] + fn test_get_returns_none_for_missing_key() { + let cache: RenderCache<&str> = RenderCache::with_capacity(4); + assert_eq!(cache.get(&key(100, 80)), None); + } + + #[test] + fn test_same_hash_different_width_are_separate() { + let mut cache = RenderCache::with_capacity(4); + cache.put(key(100, 80), "wide"); + cache.put(key(100, 40), "narrow"); + + assert_eq!(cache.get(&key(100, 80)), Some(&"wide")); + assert_eq!(cache.get(&key(100, 40)), Some(&"narrow")); + assert_eq!(cache.len(), 2); + } + + #[test] + fn test_put_overwrites_existing_key() { + let mut cache = RenderCache::with_capacity(4); + cache.put(key(100, 80), "v1"); + cache.put(key(100, 80), "v2"); + assert_eq!(cache.get(&key(100, 80)), Some(&"v2")); + assert_eq!(cache.len(), 1); + } + + #[test] + fn test_eviction_at_capacity() { + let mut cache = RenderCache::with_capacity(2); + cache.put(key(1, 80), "a"); // tick 1 + cache.put(key(2, 80), "b"); // tick 2 + cache.put(key(3, 80), "c"); // tick 3 -> evicts key(1) (tick 1, oldest) + + assert_eq!(cache.get(&key(1, 80)), None, "oldest should be evicted"); + assert_eq!(cache.get(&key(2, 80)), Some(&"b")); + assert_eq!(cache.get(&key(3, 80)), Some(&"c")); + } + + #[test] + fn test_invalidate_width_removes_non_matching() { + let mut cache = RenderCache::with_capacity(8); + cache.put(key(1, 80), "a"); + cache.put(key(2, 80), "b"); + cache.put(key(3, 120), "c"); + cache.put(key(4, 40), "d"); + + cache.invalidate_width(80); + + assert_eq!(cache.get(&key(1, 80)), Some(&"a"), "width=80 kept"); + assert_eq!(cache.get(&key(2, 80)), Some(&"b"), "width=80 kept"); + assert_eq!(cache.get(&key(3, 120)), None, "width=120 removed"); + assert_eq!(cache.get(&key(4, 40)), None, "width=40 removed"); + assert_eq!(cache.len(), 2); + } + + #[test] + fn test_invalidate_all_clears_everything() { + let mut cache = RenderCache::with_capacity(8); + cache.put(key(1, 80), "a"); + cache.put(key(2, 120), "b"); + cache.put(key(3, 40), "c"); + + cache.invalidate_all(); + + assert!(cache.is_empty()); + assert_eq!(cache.len(), 0); + } + + #[test] + fn test_default_capacity_is_256() { + let cache: RenderCache = RenderCache::new(); + assert_eq!(cache.capacity, DEFAULT_CAPACITY); + assert_eq!(cache.capacity, 256); + } + + #[test] + fn test_len_and_is_empty() { + let mut cache = RenderCache::with_capacity(4); + assert!(cache.is_empty()); + + cache.put(key(1, 80), "a"); + assert!(!cache.is_empty()); + assert_eq!(cache.len(), 1); + } + + #[test] + #[should_panic(expected = "capacity must be > 0")] + fn test_zero_capacity_panics() { + let _: RenderCache = RenderCache::with_capacity(0); + } + + #[test] + fn test_invalidate_width_on_empty_cache_is_noop() { + let mut cache: RenderCache<&str> = RenderCache::with_capacity(4); + cache.invalidate_width(80); + assert!(cache.is_empty()); + } +} diff --git a/crates/lore-tui/src/state/bootstrap.rs b/crates/lore-tui/src/state/bootstrap.rs new file mode 100644 index 0000000..780e489 --- /dev/null +++ b/crates/lore-tui/src/state/bootstrap.rs @@ -0,0 +1,160 @@ +#![allow(dead_code)] // Phase 2.5: consumed by Bootstrap screen + +//! Bootstrap screen state. +//! +//! Handles first-launch and empty-database scenarios. The schema +//! preflight runs before the TUI event loop; the bootstrap screen +//! guides users to sync when no data is available. + +// --------------------------------------------------------------------------- +// DataReadiness +// --------------------------------------------------------------------------- + +/// Result of checking whether the database has enough data to show the TUI. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct DataReadiness { + /// Database has at least one issue. + pub has_issues: bool, + /// Database has at least one merge request. + pub has_mrs: bool, + /// Database has at least one search document. + pub has_documents: bool, + /// Current schema version from the schema_version table. + pub schema_version: i32, +} + +impl DataReadiness { + /// Whether the database has any entity data at all. + #[must_use] + pub fn has_any_data(&self) -> bool { + self.has_issues || self.has_mrs + } +} + +// --------------------------------------------------------------------------- +// SchemaCheck +// --------------------------------------------------------------------------- + +/// Result of schema version validation. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum SchemaCheck { + /// Schema is at or above the minimum required version. + Compatible { version: i32 }, + /// No database or no schema_version table found. + NoDB, + /// Schema exists but is too old for this TUI version. + Incompatible { found: i32, minimum: i32 }, +} + +// --------------------------------------------------------------------------- +// BootstrapState +// --------------------------------------------------------------------------- + +/// State for the Bootstrap screen. +#[derive(Debug, Default)] +pub struct BootstrapState { + /// Whether a data readiness check has completed. + pub readiness: Option, + /// Whether the user has initiated a sync from the bootstrap screen. + pub sync_started: bool, +} + +impl BootstrapState { + /// Apply a data readiness result. + pub fn apply_readiness(&mut self, readiness: DataReadiness) { + self.readiness = Some(readiness); + } + + /// Whether we have data (and should auto-transition to Dashboard). + #[must_use] + pub fn should_transition_to_dashboard(&self) -> bool { + self.readiness + .as_ref() + .is_some_and(DataReadiness::has_any_data) + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_data_readiness_has_any_data() { + let empty = DataReadiness { + has_issues: false, + has_mrs: false, + has_documents: false, + schema_version: 26, + }; + assert!(!empty.has_any_data()); + + let with_issues = DataReadiness { + has_issues: true, + ..empty.clone() + }; + assert!(with_issues.has_any_data()); + + let with_mrs = DataReadiness { + has_mrs: true, + ..empty + }; + assert!(with_mrs.has_any_data()); + } + + #[test] + fn test_schema_check_variants() { + let compat = SchemaCheck::Compatible { version: 26 }; + assert!(matches!(compat, SchemaCheck::Compatible { version: 26 })); + + let no_db = SchemaCheck::NoDB; + assert!(matches!(no_db, SchemaCheck::NoDB)); + + let incompat = SchemaCheck::Incompatible { + found: 10, + minimum: 20, + }; + assert!(matches!( + incompat, + SchemaCheck::Incompatible { + found: 10, + minimum: 20 + } + )); + } + + #[test] + fn test_bootstrap_state_default() { + let state = BootstrapState::default(); + assert!(state.readiness.is_none()); + assert!(!state.sync_started); + assert!(!state.should_transition_to_dashboard()); + } + + #[test] + fn test_bootstrap_state_apply_readiness_empty() { + let mut state = BootstrapState::default(); + state.apply_readiness(DataReadiness { + has_issues: false, + has_mrs: false, + has_documents: false, + schema_version: 26, + }); + assert!(!state.should_transition_to_dashboard()); + } + + #[test] + fn test_bootstrap_state_apply_readiness_with_data() { + let mut state = BootstrapState::default(); + state.apply_readiness(DataReadiness { + has_issues: true, + has_mrs: false, + has_documents: false, + schema_version: 26, + }); + assert!(state.should_transition_to_dashboard()); + } +} diff --git a/crates/lore-tui/src/state/command_palette.rs b/crates/lore-tui/src/state/command_palette.rs index 63f2562..479d779 100644 --- a/crates/lore-tui/src/state/command_palette.rs +++ b/crates/lore-tui/src/state/command_palette.rs @@ -1,11 +1,304 @@ -#![allow(dead_code)] +//! Command palette state and fuzzy matching. +//! +//! The command palette is a modal overlay (Ctrl+P) that provides fuzzy-match +//! access to all commands. Populated from [`CommandRegistry::palette_entries`]. -//! Command palette state. +use crate::commands::{CommandId, CommandRegistry}; +use crate::message::Screen; + +// --------------------------------------------------------------------------- +// PaletteEntry +// --------------------------------------------------------------------------- + +/// A single entry in the filtered palette list. +#[derive(Debug, Clone)] +pub struct PaletteEntry { + /// Command ID for execution. + pub id: CommandId, + /// Human-readable label. + pub label: &'static str, + /// Keybinding display string (e.g., "g i"). + pub keybinding: Option, + /// Help text / description. + pub help_text: &'static str, +} + +// --------------------------------------------------------------------------- +// CommandPaletteState +// --------------------------------------------------------------------------- /// State for the command palette overlay. #[derive(Debug, Default)] pub struct CommandPaletteState { + /// Current query text. pub query: String, + /// Whether the query input is focused. pub query_focused: bool, + /// Cursor position within the query string (byte offset). + pub cursor: usize, + /// Index of the currently selected entry in `filtered`. pub selected_index: usize, + /// Filtered and scored palette entries. + pub filtered: Vec, +} + +impl CommandPaletteState { + /// Open the palette: reset query, focus input, populate with all commands. + pub fn open(&mut self, registry: &CommandRegistry, screen: &Screen) { + self.query.clear(); + self.cursor = 0; + self.query_focused = true; + self.selected_index = 0; + self.refilter(registry, screen); + } + + /// Close the palette: unfocus and clear state. + pub fn close(&mut self) { + self.query_focused = false; + self.query.clear(); + self.cursor = 0; + self.selected_index = 0; + self.filtered.clear(); + } + + /// Insert a character at the cursor position. + pub fn insert_char(&mut self, c: char, registry: &CommandRegistry, screen: &Screen) { + self.query.insert(self.cursor, c); + self.cursor += c.len_utf8(); + self.selected_index = 0; + self.refilter(registry, screen); + } + + /// Delete the character before the cursor. + pub fn delete_back(&mut self, registry: &CommandRegistry, screen: &Screen) { + if self.cursor > 0 { + // Find the previous character boundary. + let prev = self.query[..self.cursor] + .char_indices() + .next_back() + .map_or(0, |(i, _)| i); + self.query.drain(prev..self.cursor); + self.cursor = prev; + self.selected_index = 0; + self.refilter(registry, screen); + } + } + + /// Move selection up by one. + pub fn select_prev(&mut self) { + self.selected_index = self.selected_index.saturating_sub(1); + } + + /// Move selection down by one. + pub fn select_next(&mut self) { + if !self.filtered.is_empty() { + self.selected_index = (self.selected_index + 1).min(self.filtered.len() - 1); + } + } + + /// Get the currently selected entry's command ID. + #[must_use] + pub fn selected_command_id(&self) -> Option { + self.filtered.get(self.selected_index).map(|e| e.id) + } + + /// Whether the palette is visible/active. + #[must_use] + pub fn is_open(&self) -> bool { + self.query_focused + } + + /// Recompute the filtered list from the registry. + fn refilter(&mut self, registry: &CommandRegistry, screen: &Screen) { + let entries = registry.palette_entries(screen); + let query_lower = self.query.to_lowercase(); + + self.filtered = entries + .into_iter() + .filter(|cmd| { + if query_lower.is_empty() { + return true; + } + fuzzy_match(&query_lower, cmd.label) || fuzzy_match(&query_lower, cmd.help_text) + }) + .map(|cmd| PaletteEntry { + id: cmd.id, + label: cmd.label, + keybinding: cmd.keybinding.as_ref().map(|kb| kb.display()), + help_text: cmd.help_text, + }) + .collect(); + + // Clamp selection. + if !self.filtered.is_empty() { + self.selected_index = self.selected_index.min(self.filtered.len() - 1); + } else { + self.selected_index = 0; + } + } +} + +// --------------------------------------------------------------------------- +// Fuzzy matching +// --------------------------------------------------------------------------- + +/// Subsequence fuzzy match: every character in `query` must appear in `text` +/// in order, case-insensitive. +fn fuzzy_match(query: &str, text: &str) -> bool { + let text_lower = text.to_lowercase(); + let mut text_chars = text_lower.chars(); + for qc in query.chars() { + if !text_chars.any(|tc| tc == qc) { + return false; + } + } + true +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::commands::build_registry; + + #[test] + fn test_fuzzy_match_exact() { + assert!(fuzzy_match("quit", "Quit")); + } + + #[test] + fn test_fuzzy_match_subsequence() { + assert!(fuzzy_match("gi", "Go to Issues")); + assert!(fuzzy_match("iss", "Go to Issues")); + } + + #[test] + fn test_fuzzy_match_case_insensitive() { + assert!(fuzzy_match("help", "Show keybinding help overlay")); + } + + #[test] + fn test_fuzzy_match_no_match() { + assert!(!fuzzy_match("xyz", "Quit")); + } + + #[test] + fn test_fuzzy_match_empty_query() { + assert!(fuzzy_match("", "anything")); + } + + #[test] + fn test_palette_open_populates_all() { + let registry = build_registry(); + let mut state = CommandPaletteState::default(); + state.open(®istry, &Screen::Dashboard); + + assert!(state.query_focused); + assert!(state.query.is_empty()); + assert!(!state.filtered.is_empty()); + // All palette-eligible commands for Dashboard should be present. + let palette_count = registry.palette_entries(&Screen::Dashboard).len(); + assert_eq!(state.filtered.len(), palette_count); + } + + #[test] + fn test_palette_filter_narrows_results() { + let registry = build_registry(); + let mut state = CommandPaletteState::default(); + state.open(®istry, &Screen::Dashboard); + + let all_count = state.filtered.len(); + state.insert_char('i', ®istry, &Screen::Dashboard); + state.insert_char('s', ®istry, &Screen::Dashboard); + state.insert_char('s', ®istry, &Screen::Dashboard); + + // "iss" should match "Go to Issues" but not most other commands. + assert!(state.filtered.len() < all_count); + assert!(state.filtered.iter().any(|e| e.label == "Go to Issues")); + } + + #[test] + fn test_palette_delete_back_widens_results() { + let registry = build_registry(); + let mut state = CommandPaletteState::default(); + state.open(®istry, &Screen::Dashboard); + + state.insert_char('q', ®istry, &Screen::Dashboard); + let narrow_count = state.filtered.len(); + state.delete_back(®istry, &Screen::Dashboard); + // After deleting, query is empty — should show all commands again. + assert!(state.filtered.len() > narrow_count); + } + + #[test] + fn test_palette_select_navigation() { + let registry = build_registry(); + let mut state = CommandPaletteState::default(); + state.open(®istry, &Screen::Dashboard); + + assert_eq!(state.selected_index, 0); + state.select_next(); + assert_eq!(state.selected_index, 1); + state.select_next(); + assert_eq!(state.selected_index, 2); + state.select_prev(); + assert_eq!(state.selected_index, 1); + state.select_prev(); + assert_eq!(state.selected_index, 0); + state.select_prev(); // Should not go below 0. + assert_eq!(state.selected_index, 0); + } + + #[test] + fn test_palette_selected_command_id() { + let registry = build_registry(); + let mut state = CommandPaletteState::default(); + state.open(®istry, &Screen::Dashboard); + + assert!(state.selected_command_id().is_some()); + } + + #[test] + fn test_palette_close_resets() { + let registry = build_registry(); + let mut state = CommandPaletteState::default(); + state.open(®istry, &Screen::Dashboard); + state.insert_char('q', ®istry, &Screen::Dashboard); + state.select_next(); + + state.close(); + assert!(!state.query_focused); + assert!(state.query.is_empty()); + assert_eq!(state.selected_index, 0); + assert!(state.filtered.is_empty()); + } + + #[test] + fn test_palette_empty_query_no_match_returns_empty() { + let registry = build_registry(); + let mut state = CommandPaletteState::default(); + state.open(®istry, &Screen::Dashboard); + + // Type something that matches nothing. + for c in "zzzzzz".chars() { + state.insert_char(c, ®istry, &Screen::Dashboard); + } + assert!(state.filtered.is_empty()); + assert!(state.selected_command_id().is_none()); + } + + #[test] + fn test_palette_keybinding_display() { + let registry = build_registry(); + let mut state = CommandPaletteState::default(); + state.open(®istry, &Screen::Dashboard); + + // "Quit" should have keybinding "q". + let quit_entry = state.filtered.iter().find(|e| e.id == "quit"); + assert!(quit_entry.is_some()); + assert_eq!(quit_entry.unwrap().keybinding.as_deref(), Some("q")); + } } diff --git a/crates/lore-tui/src/state/file_history.rs b/crates/lore-tui/src/state/file_history.rs new file mode 100644 index 0000000..4b46fa6 --- /dev/null +++ b/crates/lore-tui/src/state/file_history.rs @@ -0,0 +1,364 @@ +//! File History screen state — per-file MR timeline with rename tracking. +//! +//! Shows which MRs touched a file over time, resolving renames via BFS. +//! Users enter a file path, toggle options (follow renames, merged only, +//! show discussions), and browse a chronological MR list. + +// --------------------------------------------------------------------------- +// FileHistoryState +// --------------------------------------------------------------------------- + +/// State for the File History screen. +#[derive(Debug, Default)] +pub struct FileHistoryState { + /// User-entered file path. + pub path_input: String, + /// Cursor position within `path_input` (byte offset). + pub path_cursor: usize, + /// Whether the path input field has keyboard focus. + pub path_focused: bool, + + /// The most recent result (None until first query). + pub result: Option, + + /// Index of the currently selected MR in the result list. + pub selected_mr_index: usize, + /// Vertical scroll offset for the MR list. + pub scroll_offset: u16, + + /// Whether to follow rename chains (default true). + pub follow_renames: bool, + /// Whether to show only merged MRs (default false). + pub merged_only: bool, + /// Whether to show inline discussion snippets (default false). + pub show_discussions: bool, + + /// Cached list of known file paths for autocomplete. + pub known_paths: Vec, + /// Filtered autocomplete matches for current input. + pub autocomplete_matches: Vec, + /// Currently highlighted autocomplete suggestion index. + pub autocomplete_index: usize, + + /// Monotonic generation counter for stale-response detection. + pub generation: u64, + /// Whether a query is currently in-flight. + pub loading: bool, +} + +// --------------------------------------------------------------------------- +// Result types (local to TUI — avoids coupling to CLI command structs) +// --------------------------------------------------------------------------- + +/// Full result of a file-history query. +#[derive(Debug)] +pub struct FileHistoryResult { + /// The queried file path. + pub path: String, + /// Resolved rename chain (may be just the original path). + pub rename_chain: Vec, + /// Whether renames were actually followed. + pub renames_followed: bool, + /// MRs that touched any path in the rename chain. + pub merge_requests: Vec, + /// DiffNote discussion snippets on the file (when requested). + pub discussions: Vec, + /// Total MR count (may exceed displayed count if limited). + pub total_mrs: usize, + /// Number of distinct file paths searched. + pub paths_searched: usize, +} + +/// A single MR that touched the file. +#[derive(Debug)] +pub struct FileHistoryMr { + pub iid: i64, + pub title: String, + /// "merged", "opened", or "closed". + pub state: String, + pub author_username: String, + /// "added", "modified", "deleted", or "renamed". + pub change_type: String, + pub merged_at_ms: Option, + pub updated_at_ms: i64, + pub merge_commit_sha: Option, +} + +/// A DiffNote discussion snippet on the file. +#[derive(Debug)] +pub struct FileDiscussion { + pub discussion_id: String, + pub author_username: String, + pub body_snippet: String, + pub path: String, + pub created_at_ms: i64, +} + +// --------------------------------------------------------------------------- +// State methods +// --------------------------------------------------------------------------- + +impl FileHistoryState { + /// Enter the screen: focus the path input. + pub fn enter(&mut self) { + self.path_focused = true; + self.path_cursor = self.path_input.len(); + } + + /// Leave the screen: blur all inputs. + pub fn leave(&mut self) { + self.path_focused = false; + } + + /// Whether any text input has focus. + #[must_use] + pub fn has_text_focus(&self) -> bool { + self.path_focused + } + + /// Blur all inputs. + pub fn blur(&mut self) { + self.path_focused = false; + } + + /// Submit the current path (trigger a query). + /// Returns the generation for stale detection. + pub fn submit(&mut self) -> u64 { + self.loading = true; + self.bump_generation() + } + + /// Apply query results if generation matches. + pub fn apply_results(&mut self, generation: u64, result: FileHistoryResult) { + if generation != self.generation { + return; // Stale response — discard. + } + self.result = Some(result); + self.loading = false; + self.selected_mr_index = 0; + self.scroll_offset = 0; + } + + /// Toggle follow_renames. Returns new generation for re-query. + pub fn toggle_follow_renames(&mut self) -> u64 { + self.follow_renames = !self.follow_renames; + self.bump_generation() + } + + /// Toggle merged_only. Returns new generation for re-query. + pub fn toggle_merged_only(&mut self) -> u64 { + self.merged_only = !self.merged_only; + self.bump_generation() + } + + /// Toggle show_discussions. Returns new generation for re-query. + pub fn toggle_show_discussions(&mut self) -> u64 { + self.show_discussions = !self.show_discussions; + self.bump_generation() + } + + // --- Input field operations --- + + /// Insert a char at cursor. + pub fn insert_char(&mut self, c: char) { + if self.path_focused { + self.path_input.insert(self.path_cursor, c); + self.path_cursor += c.len_utf8(); + } + } + + /// Delete the char before cursor. + pub fn delete_char_before_cursor(&mut self) { + if self.path_focused && self.path_cursor > 0 { + let prev = prev_char_boundary(&self.path_input, self.path_cursor); + self.path_input.drain(prev..self.path_cursor); + self.path_cursor = prev; + } + } + + /// Move cursor left. + pub fn cursor_left(&mut self) { + if self.path_focused && self.path_cursor > 0 { + self.path_cursor = prev_char_boundary(&self.path_input, self.path_cursor); + } + } + + /// Move cursor right. + pub fn cursor_right(&mut self) { + if self.path_focused && self.path_cursor < self.path_input.len() { + self.path_cursor = next_char_boundary(&self.path_input, self.path_cursor); + } + } + + // --- Selection navigation --- + + /// Move selection up. + pub fn select_prev(&mut self) { + self.selected_mr_index = self.selected_mr_index.saturating_sub(1); + } + + /// Move selection down (bounded by result count). + pub fn select_next(&mut self, result_count: usize) { + if result_count > 0 { + self.selected_mr_index = (self.selected_mr_index + 1).min(result_count - 1); + } + } + + /// Ensure the selected row is visible within the viewport. + pub fn ensure_visible(&mut self, viewport_height: usize) { + if viewport_height == 0 { + return; + } + let offset = self.scroll_offset as usize; + if self.selected_mr_index < offset { + self.scroll_offset = self.selected_mr_index as u16; + } else if self.selected_mr_index >= offset + viewport_height { + self.scroll_offset = (self.selected_mr_index - viewport_height + 1) as u16; + } + } + + // --- Internal --- + + fn bump_generation(&mut self) -> u64 { + self.generation += 1; + self.generation + } +} + +/// Find the byte offset of the previous char boundary. +fn prev_char_boundary(s: &str, pos: usize) -> usize { + let mut i = pos.saturating_sub(1); + while i > 0 && !s.is_char_boundary(i) { + i -= 1; + } + i +} + +/// Find the byte offset of the next char boundary. +fn next_char_boundary(s: &str, pos: usize) -> usize { + let mut i = pos + 1; + while i < s.len() && !s.is_char_boundary(i) { + i += 1; + } + i +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_default_state() { + let state = FileHistoryState::default(); + assert!(state.path_input.is_empty()); + assert!(!state.path_focused); + assert!(state.result.is_none()); + assert!(!state.follow_renames); // Default false, toggled on by user + assert!(!state.merged_only); + assert!(!state.show_discussions); + assert_eq!(state.generation, 0); + } + + #[test] + fn test_enter_focuses_path() { + let mut state = FileHistoryState { + path_input: "src/lib.rs".into(), + ..FileHistoryState::default() + }; + state.enter(); + assert!(state.path_focused); + assert_eq!(state.path_cursor, 10); + } + + #[test] + fn test_submit_bumps_generation() { + let mut state = FileHistoryState::default(); + let generation = state.submit(); + assert_eq!(generation, 1); + assert!(state.loading); + } + + #[test] + fn test_stale_response_discarded() { + let mut state = FileHistoryState::default(); + let stale_gen = state.submit(); + // Bump again (user toggled an option). + let _new_gen = state.toggle_merged_only(); + // Stale result arrives. + state.apply_results( + stale_gen, + FileHistoryResult { + path: "src/lib.rs".into(), + rename_chain: vec!["src/lib.rs".into()], + renames_followed: false, + merge_requests: vec![], + discussions: vec![], + total_mrs: 0, + paths_searched: 1, + }, + ); + assert!(state.result.is_none()); // Discarded. + } + + #[test] + fn test_toggle_options_bump_generation() { + let mut state = FileHistoryState::default(); + let g1 = state.toggle_follow_renames(); + assert_eq!(g1, 1); + assert!(state.follow_renames); + + let g2 = state.toggle_merged_only(); + assert_eq!(g2, 2); + assert!(state.merged_only); + + let g3 = state.toggle_show_discussions(); + assert_eq!(g3, 3); + assert!(state.show_discussions); + } + + #[test] + fn test_insert_and_delete_char() { + let mut state = FileHistoryState { + path_focused: true, + ..FileHistoryState::default() + }; + state.insert_char('s'); + state.insert_char('r'); + state.insert_char('c'); + assert_eq!(state.path_input, "src"); + assert_eq!(state.path_cursor, 3); + + state.delete_char_before_cursor(); + assert_eq!(state.path_input, "sr"); + assert_eq!(state.path_cursor, 2); + } + + #[test] + fn test_select_prev_next() { + let mut state = FileHistoryState::default(); + state.select_next(5); + assert_eq!(state.selected_mr_index, 1); + state.select_next(5); + assert_eq!(state.selected_mr_index, 2); + state.select_prev(); + assert_eq!(state.selected_mr_index, 1); + state.select_prev(); + assert_eq!(state.selected_mr_index, 0); + state.select_prev(); // Should not underflow. + assert_eq!(state.selected_mr_index, 0); + } + + #[test] + fn test_ensure_visible() { + let mut state = FileHistoryState { + selected_mr_index: 15, + ..FileHistoryState::default() + }; + state.ensure_visible(5); + assert_eq!(state.scroll_offset, 11); // 15 - 5 + 1 + } +} diff --git a/crates/lore-tui/src/state/mod.rs b/crates/lore-tui/src/state/mod.rs index e67271c..8d5383f 100644 --- a/crates/lore-tui/src/state/mod.rs +++ b/crates/lore-tui/src/state/mod.rs @@ -13,8 +13,10 @@ //! [`LoreApp`](crate::app::LoreApp) which dispatches through the //! [`TaskSupervisor`](crate::task_supervisor::TaskSupervisor). +pub mod bootstrap; pub mod command_palette; pub mod dashboard; +pub mod file_history; pub mod issue_detail; pub mod issue_list; pub mod mr_detail; @@ -22,6 +24,7 @@ pub mod mr_list; pub mod search; pub mod sync; pub mod timeline; +pub mod trace; pub mod who; use std::collections::{HashMap, HashSet}; @@ -29,8 +32,10 @@ use std::collections::{HashMap, HashSet}; use crate::message::Screen; // Re-export screen states for convenience. +pub use bootstrap::BootstrapState; pub use command_palette::CommandPaletteState; pub use dashboard::DashboardState; +pub use file_history::FileHistoryState; pub use issue_detail::IssueDetailState; pub use issue_list::IssueListState; pub use mr_detail::MrDetailState; @@ -38,6 +43,7 @@ pub use mr_list::MrListState; pub use search::SearchState; pub use sync::SyncState; pub use timeline::TimelineState; +pub use trace::TraceState; pub use who::WhoState; // --------------------------------------------------------------------------- @@ -163,6 +169,7 @@ pub struct ScopeContext { #[derive(Debug, Default)] pub struct AppState { // Per-screen states. + pub bootstrap: BootstrapState, pub dashboard: DashboardState, pub issue_list: IssueListState, pub issue_detail: IssueDetailState, @@ -171,6 +178,8 @@ pub struct AppState { pub search: SearchState, pub timeline: TimelineState, pub who: WhoState, + pub trace: TraceState, + pub file_history: FileHistoryState, pub sync: SyncState, pub command_palette: CommandPaletteState, @@ -205,6 +214,9 @@ impl AppState { || self.mr_list.filter_focused || self.search.query_focused || self.command_palette.query_focused + || self.who.has_text_focus() + || self.trace.has_text_focus() + || self.file_history.has_text_focus() } /// Remove focus from all text inputs. @@ -213,6 +225,9 @@ impl AppState { self.mr_list.filter_focused = false; self.search.query_focused = false; self.command_palette.query_focused = false; + self.who.blur(); + self.trace.blur(); + self.file_history.blur(); } } diff --git a/crates/lore-tui/src/state/search.rs b/crates/lore-tui/src/state/search.rs index 3b2cf3b..ec6d46f 100644 --- a/crates/lore-tui/src/state/search.rs +++ b/crates/lore-tui/src/state/search.rs @@ -1,14 +1,569 @@ #![allow(dead_code)] -//! Search screen state. +//! Search screen state — query input, mode selection, capability detection. +//! +//! The search screen supports three modes ([`SearchMode`]): Lexical (FTS5), +//! Hybrid (FTS+vector RRF), and Semantic (vector-only). Available modes are +//! gated by [`SearchCapabilities`], which probes the database on screen entry. -use crate::message::SearchResult; +use crate::message::{SearchMode, SearchResult}; + +// --------------------------------------------------------------------------- +// SearchCapabilities +// --------------------------------------------------------------------------- + +/// What search indexes are available in the local database. +/// +/// Detected once on screen entry by probing FTS and embedding tables. +/// Used to gate which [`SearchMode`] values are selectable. +#[derive(Debug, Clone, PartialEq)] +pub struct SearchCapabilities { + /// FTS5 `documents_fts` table has rows. + pub has_fts: bool, + /// `embedding_metadata` table has rows. + pub has_embeddings: bool, + /// Percentage of documents that have embeddings (0.0–100.0). + pub embedding_coverage_pct: f32, +} + +impl Default for SearchCapabilities { + fn default() -> Self { + Self { + has_fts: false, + has_embeddings: false, + embedding_coverage_pct: 0.0, + } + } +} + +impl SearchCapabilities { + /// Whether the given mode is usable with these capabilities. + #[must_use] + pub fn supports_mode(&self, mode: SearchMode) -> bool { + match mode { + SearchMode::Lexical => self.has_fts, + SearchMode::Hybrid => self.has_fts && self.has_embeddings, + SearchMode::Semantic => self.has_embeddings, + } + } + + /// The best default mode given current capabilities. + #[must_use] + pub fn best_default_mode(&self) -> SearchMode { + if self.has_fts && self.has_embeddings { + SearchMode::Hybrid + } else if self.has_fts { + SearchMode::Lexical + } else if self.has_embeddings { + SearchMode::Semantic + } else { + SearchMode::Lexical // Fallback; UI will show "no indexes" message + } + } + + /// Whether any search index is available at all. + #[must_use] + pub fn has_any_index(&self) -> bool { + self.has_fts || self.has_embeddings + } +} + +// --------------------------------------------------------------------------- +// SearchState +// --------------------------------------------------------------------------- /// State for the search screen. #[derive(Debug, Default)] pub struct SearchState { + /// Current query text. pub query: String, + /// Whether the query input has keyboard focus. pub query_focused: bool, + /// Cursor position within the query string (byte offset). + pub cursor: usize, + /// Active search mode. + pub mode: SearchMode, + /// Available search capabilities (detected on screen entry). + pub capabilities: SearchCapabilities, + /// Current result set. pub results: Vec, + /// Index of the selected result in the list. pub selected_index: usize, + /// Monotonic generation counter for stale-response detection. + pub generation: u64, + /// Whether a search request is in-flight. + pub loading: bool, +} + +impl SearchState { + /// Enter the search screen: focus query, detect capabilities. + pub fn enter(&mut self, capabilities: SearchCapabilities) { + self.query_focused = true; + self.cursor = self.query.len(); + self.capabilities = capabilities; + // Pick the best mode for detected capabilities. + if !self.capabilities.supports_mode(self.mode) { + self.mode = self.capabilities.best_default_mode(); + } + } + + /// Leave the search screen: blur focus. + pub fn leave(&mut self) { + self.query_focused = false; + } + + /// Focus the query input. + pub fn focus_query(&mut self) { + self.query_focused = true; + self.cursor = self.query.len(); + } + + /// Blur the query input. + pub fn blur_query(&mut self) { + self.query_focused = false; + } + + /// Insert a character at the cursor position. + /// + /// Returns the new generation (caller should arm debounce timer). + pub fn insert_char(&mut self, c: char) -> u64 { + self.query.insert(self.cursor, c); + self.cursor += c.len_utf8(); + self.generation += 1; + self.generation + } + + /// Delete the character before the cursor (backspace). + /// + /// Returns the new generation if changed, or `None` if cursor was at start. + pub fn delete_back(&mut self) -> Option { + if self.cursor == 0 { + return None; + } + let prev = self.query[..self.cursor] + .char_indices() + .next_back() + .map_or(0, |(i, _)| i); + self.query.drain(prev..self.cursor); + self.cursor = prev; + self.generation += 1; + Some(self.generation) + } + + /// Move cursor left by one character. + pub fn cursor_left(&mut self) { + if self.cursor > 0 { + self.cursor = self.query[..self.cursor] + .char_indices() + .next_back() + .map_or(0, |(i, _)| i); + } + } + + /// Move cursor right by one character. + pub fn cursor_right(&mut self) { + if self.cursor < self.query.len() { + self.cursor = self.query[self.cursor..] + .chars() + .next() + .map_or(self.query.len(), |ch| self.cursor + ch.len_utf8()); + } + } + + /// Move cursor to the start of the query. + pub fn cursor_home(&mut self) { + self.cursor = 0; + } + + /// Move cursor to the end of the query. + pub fn cursor_end(&mut self) { + self.cursor = self.query.len(); + } + + /// Cycle to the next available search mode (skip unsupported modes). + pub fn cycle_mode(&mut self) { + let start = self.mode; + let mut candidate = start.next(); + // Cycle through at most 3 modes to find a supported one. + for _ in 0..3 { + if self.capabilities.supports_mode(candidate) { + self.mode = candidate; + return; + } + candidate = candidate.next(); + } + // No supported mode found (shouldn't happen if has_any_index is true). + } + + /// Apply search results from an async response. + /// + /// Only applies if the generation matches (stale guard). + pub fn apply_results(&mut self, generation: u64, results: Vec) { + if generation != self.generation { + return; // Stale response — discard. + } + self.results = results; + self.selected_index = 0; + self.loading = false; + } + + /// Move selection up in the results list. + pub fn select_prev(&mut self) { + self.selected_index = self.selected_index.saturating_sub(1); + } + + /// Move selection down in the results list. + pub fn select_next(&mut self) { + if !self.results.is_empty() { + self.selected_index = (self.selected_index + 1).min(self.results.len() - 1); + } + } + + /// Get the currently selected result, if any. + #[must_use] + pub fn selected_result(&self) -> Option<&SearchResult> { + self.results.get(self.selected_index) + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::message::{EntityKey, SearchMode}; + + fn fts_only() -> SearchCapabilities { + SearchCapabilities { + has_fts: true, + has_embeddings: false, + embedding_coverage_pct: 0.0, + } + } + + fn full_caps() -> SearchCapabilities { + SearchCapabilities { + has_fts: true, + has_embeddings: true, + embedding_coverage_pct: 85.0, + } + } + + fn embeddings_only() -> SearchCapabilities { + SearchCapabilities { + has_fts: false, + has_embeddings: true, + embedding_coverage_pct: 100.0, + } + } + + fn no_indexes() -> SearchCapabilities { + SearchCapabilities::default() + } + + fn sample_result(iid: i64) -> SearchResult { + SearchResult { + key: EntityKey::issue(1, iid), + title: format!("Issue #{iid}"), + score: 0.95, + snippet: "matched text here".into(), + project_path: "group/project".into(), + } + } + + // -- SearchCapabilities tests -- + + #[test] + fn test_capabilities_supports_mode_fts_only() { + let caps = fts_only(); + assert!(caps.supports_mode(SearchMode::Lexical)); + assert!(!caps.supports_mode(SearchMode::Hybrid)); + assert!(!caps.supports_mode(SearchMode::Semantic)); + } + + #[test] + fn test_capabilities_supports_mode_full() { + let caps = full_caps(); + assert!(caps.supports_mode(SearchMode::Lexical)); + assert!(caps.supports_mode(SearchMode::Hybrid)); + assert!(caps.supports_mode(SearchMode::Semantic)); + } + + #[test] + fn test_capabilities_supports_mode_embeddings_only() { + let caps = embeddings_only(); + assert!(!caps.supports_mode(SearchMode::Lexical)); + assert!(!caps.supports_mode(SearchMode::Hybrid)); + assert!(caps.supports_mode(SearchMode::Semantic)); + } + + #[test] + fn test_capabilities_best_default_hybrid_when_both() { + assert_eq!(full_caps().best_default_mode(), SearchMode::Hybrid); + } + + #[test] + fn test_capabilities_best_default_lexical_when_fts_only() { + assert_eq!(fts_only().best_default_mode(), SearchMode::Lexical); + } + + #[test] + fn test_capabilities_best_default_semantic_when_embeddings_only() { + assert_eq!(embeddings_only().best_default_mode(), SearchMode::Semantic); + } + + #[test] + fn test_capabilities_best_default_lexical_when_none() { + assert_eq!(no_indexes().best_default_mode(), SearchMode::Lexical); + } + + #[test] + fn test_capabilities_has_any_index() { + assert!(fts_only().has_any_index()); + assert!(full_caps().has_any_index()); + assert!(embeddings_only().has_any_index()); + assert!(!no_indexes().has_any_index()); + } + + // -- SearchState tests -- + + #[test] + fn test_enter_focuses_and_preserves_supported_mode() { + let mut state = SearchState::default(); + // Default mode is Lexical, which full_caps supports — preserved. + state.enter(full_caps()); + assert!(state.query_focused); + assert_eq!(state.mode, SearchMode::Lexical); + } + + #[test] + fn test_enter_preserves_mode_if_supported() { + let mut state = SearchState { + mode: SearchMode::Lexical, + ..SearchState::default() + }; + state.enter(full_caps()); + // Lexical is supported by full_caps, so it stays. + assert_eq!(state.mode, SearchMode::Lexical); + } + + #[test] + fn test_enter_overrides_unsupported_mode() { + let mut state = SearchState { + mode: SearchMode::Hybrid, + ..SearchState::default() + }; + state.enter(fts_only()); + // Hybrid requires embeddings, so fallback to Lexical. + assert_eq!(state.mode, SearchMode::Lexical); + } + + #[test] + fn test_insert_char_and_cursor() { + let mut state = SearchState::default(); + let generation1 = state.insert_char('h'); + let generation2 = state.insert_char('i'); + assert_eq!(state.query, "hi"); + assert_eq!(state.cursor, 2); + assert_eq!(generation1, 1); + assert_eq!(generation2, 2); + } + + #[test] + fn test_delete_back() { + let mut state = SearchState::default(); + state.insert_char('a'); + state.insert_char('b'); + state.insert_char('c'); + + let generation = state.delete_back(); + assert!(generation.is_some()); + assert_eq!(state.query, "ab"); + assert_eq!(state.cursor, 2); + } + + #[test] + fn test_delete_back_at_start_returns_none() { + let mut state = SearchState::default(); + state.insert_char('a'); + state.cursor = 0; + assert!(state.delete_back().is_none()); + assert_eq!(state.query, "a"); + } + + #[test] + fn test_cursor_movement() { + let mut state = SearchState::default(); + state.insert_char('a'); + state.insert_char('b'); + state.insert_char('c'); + assert_eq!(state.cursor, 3); + + state.cursor_left(); + assert_eq!(state.cursor, 2); + state.cursor_left(); + assert_eq!(state.cursor, 1); + state.cursor_right(); + assert_eq!(state.cursor, 2); + state.cursor_home(); + assert_eq!(state.cursor, 0); + state.cursor_end(); + assert_eq!(state.cursor, 3); + } + + #[test] + fn test_cursor_left_at_start_is_noop() { + let mut state = SearchState::default(); + state.cursor_left(); + assert_eq!(state.cursor, 0); + } + + #[test] + fn test_cursor_right_at_end_is_noop() { + let mut state = SearchState::default(); + state.insert_char('x'); + state.cursor_right(); + assert_eq!(state.cursor, 1); + } + + #[test] + fn test_cycle_mode_full_caps() { + let mut state = SearchState { + capabilities: full_caps(), + mode: SearchMode::Lexical, + ..SearchState::default() + }; + + state.cycle_mode(); + assert_eq!(state.mode, SearchMode::Hybrid); + state.cycle_mode(); + assert_eq!(state.mode, SearchMode::Semantic); + state.cycle_mode(); + assert_eq!(state.mode, SearchMode::Lexical); + } + + #[test] + fn test_cycle_mode_fts_only_stays_lexical() { + let mut state = SearchState { + capabilities: fts_only(), + mode: SearchMode::Lexical, + ..SearchState::default() + }; + + state.cycle_mode(); + // Hybrid and Semantic unsupported, wraps back to Lexical. + assert_eq!(state.mode, SearchMode::Lexical); + } + + #[test] + fn test_cycle_mode_embeddings_only() { + let mut state = SearchState { + capabilities: embeddings_only(), + mode: SearchMode::Semantic, + ..SearchState::default() + }; + + state.cycle_mode(); + // Lexical and Hybrid unsupported, wraps back to Semantic. + assert_eq!(state.mode, SearchMode::Semantic); + } + + #[test] + fn test_apply_results_matching_generation() { + let mut state = SearchState::default(); + let generation = state.insert_char('q'); + + let results = vec![sample_result(1), sample_result(2)]; + state.apply_results(generation, results); + + assert_eq!(state.results.len(), 2); + assert_eq!(state.selected_index, 0); + assert!(!state.loading); + } + + #[test] + fn test_apply_results_stale_generation_discarded() { + let mut state = SearchState::default(); + state.insert_char('q'); // gen=1 + state.insert_char('u'); // gen=2 + + let stale_results = vec![sample_result(99)]; + state.apply_results(1, stale_results); // gen 1 is stale + + assert!(state.results.is_empty()); + } + + #[test] + fn test_select_prev_next() { + let mut state = SearchState { + results: vec![sample_result(1), sample_result(2), sample_result(3)], + ..SearchState::default() + }; + + assert_eq!(state.selected_index, 0); + state.select_next(); + assert_eq!(state.selected_index, 1); + state.select_next(); + assert_eq!(state.selected_index, 2); + state.select_next(); // Clamps at end. + assert_eq!(state.selected_index, 2); + state.select_prev(); + assert_eq!(state.selected_index, 1); + state.select_prev(); + assert_eq!(state.selected_index, 0); + state.select_prev(); // Clamps at start. + assert_eq!(state.selected_index, 0); + } + + #[test] + fn test_selected_result() { + let mut state = SearchState::default(); + assert!(state.selected_result().is_none()); + + state.results = vec![sample_result(42)]; + let result = state.selected_result().unwrap(); + assert_eq!(result.key.iid, 42); + } + + #[test] + fn test_leave_blurs_focus() { + let mut state = SearchState::default(); + state.enter(fts_only()); + assert!(state.query_focused); + state.leave(); + assert!(!state.query_focused); + } + + #[test] + fn test_focus_query_moves_cursor_to_end() { + let mut state = SearchState { + query: "hello".into(), + cursor: 0, + ..SearchState::default() + }; + state.focus_query(); + assert!(state.query_focused); + assert_eq!(state.cursor, 5); + } + + #[test] + fn test_unicode_cursor_handling() { + let mut state = SearchState::default(); + // Insert a multi-byte character. + state.insert_char('田'); + assert_eq!(state.cursor, 3); // 田 is 3 bytes in UTF-8 + state.insert_char('中'); + assert_eq!(state.cursor, 6); + + state.cursor_left(); + assert_eq!(state.cursor, 3); + state.cursor_right(); + assert_eq!(state.cursor, 6); + + state.delete_back(); + assert_eq!(state.query, "田"); + assert_eq!(state.cursor, 3); + } } diff --git a/crates/lore-tui/src/state/timeline.rs b/crates/lore-tui/src/state/timeline.rs index 04172e4..c08272a 100644 --- a/crates/lore-tui/src/state/timeline.rs +++ b/crates/lore-tui/src/state/timeline.rs @@ -1,12 +1,271 @@ #![allow(dead_code)] -//! Timeline screen state. +//! Timeline screen state — event stream, scope filtering, navigation. +//! +//! The timeline displays a chronological event stream from resource event +//! tables. Events can be scoped to a specific entity, author, or shown +//! globally. [`TimelineScope`] gates the query; [`TimelineState`] manages +//! the scroll position, selected event, and generation counter for +//! stale-response detection. -use crate::message::TimelineEvent; +use crate::message::{EntityKey, TimelineEvent}; + +// --------------------------------------------------------------------------- +// TimelineScope +// --------------------------------------------------------------------------- + +/// Scope filter for the timeline event query. +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub enum TimelineScope { + /// All events across all entities. + #[default] + All, + /// Events for a specific entity (issue or MR). + Entity(EntityKey), + /// Events by a specific actor. + Author(String), +} + +// --------------------------------------------------------------------------- +// TimelineState +// --------------------------------------------------------------------------- /// State for the timeline screen. #[derive(Debug, Default)] pub struct TimelineState { + /// Loaded timeline events (sorted by timestamp, most recent first). pub events: Vec, - pub scroll_offset: u16, + /// Active scope filter. + pub scope: TimelineScope, + /// Index of the selected event in the list. + pub selected_index: usize, + /// Scroll offset for the visible window. + pub scroll_offset: usize, + /// Monotonic generation counter for stale-response detection. + pub generation: u64, + /// Whether a fetch is in-flight. + pub loading: bool, +} + +impl TimelineState { + /// Enter the timeline screen. Bumps generation for fresh data. + pub fn enter(&mut self) -> u64 { + self.generation += 1; + self.loading = true; + self.generation + } + + /// Set the scope filter and bump generation. + /// + /// Returns the new generation (caller should trigger a re-fetch). + pub fn set_scope(&mut self, scope: TimelineScope) -> u64 { + self.scope = scope; + self.generation += 1; + self.loading = true; + self.generation + } + + /// Apply timeline events from an async response. + /// + /// Only applies if the generation matches (stale guard). + pub fn apply_results(&mut self, generation: u64, events: Vec) { + if generation != self.generation { + return; // Stale response — discard. + } + self.events = events; + self.selected_index = 0; + self.scroll_offset = 0; + self.loading = false; + } + + /// Move selection up in the event list. + pub fn select_prev(&mut self) { + self.selected_index = self.selected_index.saturating_sub(1); + } + + /// Move selection down in the event list. + pub fn select_next(&mut self) { + if !self.events.is_empty() { + self.selected_index = (self.selected_index + 1).min(self.events.len() - 1); + } + } + + /// Get the currently selected event, if any. + #[must_use] + pub fn selected_event(&self) -> Option<&TimelineEvent> { + self.events.get(self.selected_index) + } + + /// Ensure the selected index is visible given the viewport height. + /// + /// Adjusts `scroll_offset` so the selected item is within the + /// visible window. + pub fn ensure_visible(&mut self, viewport_height: usize) { + if viewport_height == 0 { + return; + } + if self.selected_index < self.scroll_offset { + self.scroll_offset = self.selected_index; + } else if self.selected_index >= self.scroll_offset + viewport_height { + self.scroll_offset = self.selected_index - viewport_height + 1; + } + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::message::TimelineEventKind; + + fn sample_event(timestamp_ms: i64, iid: i64) -> TimelineEvent { + TimelineEvent { + timestamp_ms, + entity_key: EntityKey::issue(1, iid), + event_kind: TimelineEventKind::Created, + summary: format!("Issue #{iid} created"), + detail: None, + actor: Some("alice".into()), + project_path: "group/project".into(), + } + } + + #[test] + fn test_timeline_scope_default_is_all() { + assert_eq!(TimelineScope::default(), TimelineScope::All); + } + + #[test] + fn test_enter_bumps_generation() { + let mut state = TimelineState::default(); + let generation = state.enter(); + assert_eq!(generation, 1); + assert!(state.loading); + } + + #[test] + fn test_set_scope_bumps_generation() { + let mut state = TimelineState::default(); + let gen1 = state.set_scope(TimelineScope::Author("bob".into())); + assert_eq!(gen1, 1); + assert_eq!(state.scope, TimelineScope::Author("bob".into())); + + let gen2 = state.set_scope(TimelineScope::All); + assert_eq!(gen2, 2); + } + + #[test] + fn test_apply_results_matching_generation() { + let mut state = TimelineState::default(); + let generation = state.enter(); + + let events = vec![sample_event(3000, 1), sample_event(2000, 2)]; + state.apply_results(generation, events); + + assert_eq!(state.events.len(), 2); + assert_eq!(state.selected_index, 0); + assert!(!state.loading); + } + + #[test] + fn test_apply_results_stale_generation_discarded() { + let mut state = TimelineState::default(); + state.enter(); // gen=1 + let _gen2 = state.enter(); // gen=2 + + let stale_events = vec![sample_event(1000, 99)]; + state.apply_results(1, stale_events); // gen 1 is stale + + assert!(state.events.is_empty()); + } + + #[test] + fn test_select_prev_next() { + let mut state = TimelineState { + events: vec![ + sample_event(3000, 1), + sample_event(2000, 2), + sample_event(1000, 3), + ], + ..TimelineState::default() + }; + + assert_eq!(state.selected_index, 0); + state.select_next(); + assert_eq!(state.selected_index, 1); + state.select_next(); + assert_eq!(state.selected_index, 2); + state.select_next(); // Clamps at end. + assert_eq!(state.selected_index, 2); + state.select_prev(); + assert_eq!(state.selected_index, 1); + state.select_prev(); + assert_eq!(state.selected_index, 0); + state.select_prev(); // Clamps at start. + assert_eq!(state.selected_index, 0); + } + + #[test] + fn test_selected_event() { + let mut state = TimelineState::default(); + assert!(state.selected_event().is_none()); + + state.events = vec![sample_event(3000, 42)]; + let event = state.selected_event().unwrap(); + assert_eq!(event.entity_key.iid, 42); + } + + #[test] + fn test_ensure_visible_scrolls_down() { + let mut state = TimelineState { + events: vec![ + sample_event(5000, 1), + sample_event(4000, 2), + sample_event(3000, 3), + sample_event(2000, 4), + sample_event(1000, 5), + ], + selected_index: 4, + scroll_offset: 0, + ..TimelineState::default() + }; + state.ensure_visible(3); + assert_eq!(state.scroll_offset, 2); // 4 - 3 + 1 = 2 + } + + #[test] + fn test_ensure_visible_scrolls_up() { + let mut state = TimelineState { + events: vec![ + sample_event(5000, 1), + sample_event(4000, 2), + sample_event(3000, 3), + ], + selected_index: 0, + scroll_offset: 2, + ..TimelineState::default() + }; + state.ensure_visible(3); + assert_eq!(state.scroll_offset, 0); + } + + #[test] + fn test_ensure_visible_zero_viewport() { + let mut state = TimelineState { + scroll_offset: 5, + ..TimelineState::default() + }; + state.ensure_visible(0); + assert_eq!(state.scroll_offset, 5); // Unchanged. + } + + #[test] + fn test_select_next_on_empty_is_noop() { + let mut state = TimelineState::default(); + state.select_next(); + assert_eq!(state.selected_index, 0); + } } diff --git a/crates/lore-tui/src/state/trace.rs b/crates/lore-tui/src/state/trace.rs new file mode 100644 index 0000000..5cf9798 --- /dev/null +++ b/crates/lore-tui/src/state/trace.rs @@ -0,0 +1,556 @@ +//! Trace screen state — file → MR → issue chain drill-down. +//! +//! Users enter a file path, and the trace query resolves rename chains, +//! finds MRs that touched the file, links issues via entity_references, +//! and extracts DiffNote discussions. Each result chain can be +//! expanded/collapsed independently. + +use std::collections::HashSet; + +use lore::core::trace::TraceResult; + +// --------------------------------------------------------------------------- +// TraceState +// --------------------------------------------------------------------------- + +/// State for the Trace screen. +#[derive(Debug, Default)] +pub struct TraceState { + /// User-entered file path (with optional :line suffix). + pub path_input: String, + /// Cursor position within `path_input`. + pub path_cursor: usize, + /// Whether the path input field has keyboard focus. + pub path_focused: bool, + + /// Parsed line filter from `:N` suffix (stored but not yet used for highlighting). + pub line_filter: Option, + + /// The most recent trace result (None until first query). + pub result: Option, + + /// Index of the currently selected chain in the trace result. + pub selected_chain_index: usize, + /// Set of chain indices that are currently expanded. + pub expanded_chains: HashSet, + + /// Whether to follow rename chains in the query (default true). + pub follow_renames: bool, + /// Whether to include DiffNote discussions (default true). + pub include_discussions: bool, + + /// Vertical scroll offset for the chain list. + pub scroll_offset: u16, + + /// Cached list of known file paths for autocomplete. + pub known_paths: Vec, + /// Filtered autocomplete matches for current input. + pub autocomplete_matches: Vec, + /// Currently highlighted autocomplete suggestion index. + pub autocomplete_index: usize, + + /// Generation counter for stale response guard. + pub generation: u64, + /// Whether a query is in flight. + pub loading: bool, +} + +impl TraceState { + /// Initialize defaults for a fresh Trace screen entry. + pub fn enter(&mut self) { + self.path_focused = true; + self.follow_renames = true; + self.include_discussions = true; + } + + /// Clean up when leaving the Trace screen. + pub fn leave(&mut self) { + self.path_focused = false; + } + + /// Submit the current path input as a trace query. + /// + /// Bumps generation, parses the :line suffix, and returns the + /// new generation if the path is non-empty. + pub fn submit(&mut self) -> Option { + let trimmed = self.path_input.trim(); + if trimmed.is_empty() { + return None; + } + + let (path, line) = lore::cli::commands::trace::parse_trace_path(trimmed); + self.path_input = path; + self.path_cursor = self.path_input.len(); + self.line_filter = line; + + self.generation += 1; + self.loading = true; + self.selected_chain_index = 0; + self.expanded_chains.clear(); + self.scroll_offset = 0; + self.path_focused = false; + self.autocomplete_matches.clear(); + + Some(self.generation) + } + + /// Apply a trace result, guarded by generation counter. + pub fn apply_result(&mut self, generation: u64, result: TraceResult) { + if generation != self.generation { + return; // Stale response — discard. + } + self.result = Some(result); + self.loading = false; + } + + /// Toggle the expand/collapse state of the selected chain. + pub fn toggle_expand(&mut self) { + if self.expanded_chains.contains(&self.selected_chain_index) { + self.expanded_chains.remove(&self.selected_chain_index); + } else { + self.expanded_chains.insert(self.selected_chain_index); + } + } + + /// Toggle follow_renames and bump generation (triggers re-fetch). + pub fn toggle_follow_renames(&mut self) -> Option { + self.follow_renames = !self.follow_renames; + self.requery() + } + + /// Toggle include_discussions and bump generation (triggers re-fetch). + pub fn toggle_include_discussions(&mut self) -> Option { + self.include_discussions = !self.include_discussions; + self.requery() + } + + /// Re-query with current settings if path is non-empty. + fn requery(&mut self) -> Option { + if self.path_input.trim().is_empty() { + return None; + } + self.generation += 1; + self.loading = true; + self.selected_chain_index = 0; + self.expanded_chains.clear(); + self.scroll_offset = 0; + Some(self.generation) + } + + /// Select the previous chain. + pub fn select_prev(&mut self) { + if self.selected_chain_index > 0 { + self.selected_chain_index -= 1; + self.ensure_visible(); + } + } + + /// Select the next chain. + pub fn select_next(&mut self) { + let max = self.chain_count().saturating_sub(1); + if self.selected_chain_index < max { + self.selected_chain_index += 1; + self.ensure_visible(); + } + } + + /// Number of trace chains in the current result. + fn chain_count(&self) -> usize { + self.result.as_ref().map_or(0, |r| r.trace_chains.len()) + } + + /// Ensure the selected chain is visible within the scroll viewport. + fn ensure_visible(&mut self) { + let idx = self.selected_chain_index as u16; + if idx < self.scroll_offset { + self.scroll_offset = idx; + } + // Rough viewport — exact height adjusted in render. + } + + /// Whether the text input has focus. + #[must_use] + pub fn has_text_focus(&self) -> bool { + self.path_focused + } + + /// Remove focus from all text inputs. + pub fn blur(&mut self) { + self.path_focused = false; + self.autocomplete_matches.clear(); + } + + /// Focus the path input. + pub fn focus_input(&mut self) { + self.path_focused = true; + self.update_autocomplete(); + } + + // --- Text editing helpers --- + + /// Insert a character at the cursor position. + pub fn insert_char(&mut self, ch: char) { + let byte_pos = self + .path_input + .char_indices() + .nth(self.path_cursor) + .map_or(self.path_input.len(), |(i, _)| i); + self.path_input.insert(byte_pos, ch); + self.path_cursor += 1; + self.update_autocomplete(); + } + + /// Delete the character before the cursor. + pub fn delete_char_before_cursor(&mut self) { + if self.path_cursor == 0 { + return; + } + self.path_cursor -= 1; + let byte_pos = self + .path_input + .char_indices() + .nth(self.path_cursor) + .map_or(self.path_input.len(), |(i, _)| i); + let end = self + .path_input + .char_indices() + .nth(self.path_cursor + 1) + .map_or(self.path_input.len(), |(i, _)| i); + self.path_input.drain(byte_pos..end); + self.update_autocomplete(); + } + + /// Move cursor left. + pub fn cursor_left(&mut self) { + self.path_cursor = self.path_cursor.saturating_sub(1); + } + + /// Move cursor right. + pub fn cursor_right(&mut self) { + let max = self.path_input.chars().count(); + if self.path_cursor < max { + self.path_cursor += 1; + } + } + + // --- Autocomplete --- + + /// Update autocomplete matches based on current input. + pub fn update_autocomplete(&mut self) { + let input_lower = self.path_input.to_lowercase(); + if input_lower.is_empty() { + self.autocomplete_matches.clear(); + self.autocomplete_index = 0; + return; + } + + self.autocomplete_matches = self + .known_paths + .iter() + .filter(|p| p.to_lowercase().contains(&input_lower)) + .take(10) // Limit visible suggestions. + .cloned() + .collect(); + self.autocomplete_index = 0; + } + + /// Cycle to the next autocomplete suggestion. + pub fn autocomplete_next(&mut self) { + if self.autocomplete_matches.is_empty() { + return; + } + self.autocomplete_index = (self.autocomplete_index + 1) % self.autocomplete_matches.len(); + } + + /// Accept the current autocomplete suggestion into the path input. + pub fn accept_autocomplete(&mut self) { + if let Some(match_) = self.autocomplete_matches.get(self.autocomplete_index) { + self.path_input = match_.clone(); + self.path_cursor = self.path_input.chars().count(); + self.autocomplete_matches.clear(); + } + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_trace_state_default() { + let state = TraceState::default(); + assert!(state.path_input.is_empty()); + assert!(!state.path_focused); + assert!(!state.follow_renames); // Default false, enter() sets true. + assert!(state.result.is_none()); + assert_eq!(state.generation, 0); + } + + #[test] + fn test_trace_state_enter_sets_defaults() { + let mut state = TraceState::default(); + state.enter(); + assert!(state.path_focused); + assert!(state.follow_renames); + assert!(state.include_discussions); + } + + #[test] + fn test_submit_empty_returns_none() { + let mut state = TraceState::default(); + assert!(state.submit().is_none()); + assert_eq!(state.generation, 0); + } + + #[test] + fn test_submit_with_path_bumps_generation() { + let mut state = TraceState { + path_input: "src/main.rs".into(), + ..TraceState::default() + }; + let generation = state.submit(); + assert_eq!(generation, Some(1)); + assert_eq!(state.generation, 1); + assert!(state.loading); + assert!(!state.path_focused); + } + + #[test] + fn test_submit_parses_line_suffix() { + let mut state = TraceState { + path_input: "src/main.rs:42".into(), + ..TraceState::default() + }; + state.submit(); + assert_eq!(state.path_input, "src/main.rs"); + assert_eq!(state.line_filter, Some(42)); + } + + #[test] + fn test_apply_result_matching_generation() { + let mut state = TraceState { + path_input: "src/lib.rs".into(), + ..TraceState::default() + }; + state.submit(); // generation = 1 + + let result = TraceResult { + path: "src/lib.rs".into(), + resolved_paths: vec![], + renames_followed: false, + trace_chains: vec![], + total_chains: 0, + }; + + state.apply_result(1, result); + assert!(state.result.is_some()); + assert!(!state.loading); + } + + #[test] + fn test_apply_result_stale_generation_discarded() { + let mut state = TraceState { + path_input: "src/lib.rs".into(), + ..TraceState::default() + }; + state.submit(); // generation = 1 + state.path_input = "src/other.rs".into(); + state.submit(); // generation = 2 + + let stale_result = TraceResult { + path: "src/lib.rs".into(), + resolved_paths: vec![], + renames_followed: false, + trace_chains: vec![], + total_chains: 0, + }; + + state.apply_result(1, stale_result); // Stale — should be discarded. + assert!(state.result.is_none()); + assert!(state.loading); // Still loading. + } + + #[test] + fn test_toggle_expand() { + let mut state = TraceState { + selected_chain_index: 2, + ..TraceState::default() + }; + + state.toggle_expand(); + assert!(state.expanded_chains.contains(&2)); + + state.toggle_expand(); + assert!(!state.expanded_chains.contains(&2)); + } + + #[test] + fn test_toggle_follow_renames_requeues() { + let mut state = TraceState { + path_input: "src/main.rs".into(), + path_focused: true, + follow_renames: true, + include_discussions: true, + ..TraceState::default() + }; + assert!(state.follow_renames); + + let generation = state.toggle_follow_renames(); + assert!(!state.follow_renames); + assert_eq!(generation, Some(1)); + assert!(state.loading); + } + + #[test] + fn test_toggle_include_discussions_requeues() { + let mut state = TraceState { + path_input: "src/main.rs".into(), + path_focused: true, + follow_renames: true, + include_discussions: true, + ..TraceState::default() + }; + assert!(state.include_discussions); + + let generation = state.toggle_include_discussions(); + assert!(!state.include_discussions); + assert_eq!(generation, Some(1)); + } + + #[test] + fn test_select_prev_next() { + let mut state = TraceState { + result: Some(TraceResult { + path: "x".into(), + resolved_paths: vec![], + renames_followed: false, + trace_chains: vec![ + lore::core::trace::TraceChain { + mr_iid: 1, + mr_title: "a".into(), + mr_state: "merged".into(), + mr_author: "x".into(), + change_type: "modified".into(), + merged_at_iso: None, + updated_at_iso: "2024-01-01".into(), + web_url: None, + issues: vec![], + discussions: vec![], + }, + lore::core::trace::TraceChain { + mr_iid: 2, + mr_title: "b".into(), + mr_state: "merged".into(), + mr_author: "y".into(), + change_type: "added".into(), + merged_at_iso: None, + updated_at_iso: "2024-01-02".into(), + web_url: None, + issues: vec![], + discussions: vec![], + }, + ], + total_chains: 2, + }), + ..TraceState::default() + }; + + assert_eq!(state.selected_chain_index, 0); + state.select_next(); + assert_eq!(state.selected_chain_index, 1); + state.select_next(); // Clamped at max. + assert_eq!(state.selected_chain_index, 1); + state.select_prev(); + assert_eq!(state.selected_chain_index, 0); + state.select_prev(); // Clamped at 0. + assert_eq!(state.selected_chain_index, 0); + } + + #[test] + fn test_insert_char_and_delete() { + let mut state = TraceState::default(); + state.insert_char('a'); + state.insert_char('b'); + state.insert_char('c'); + assert_eq!(state.path_input, "abc"); + assert_eq!(state.path_cursor, 3); + + state.delete_char_before_cursor(); + assert_eq!(state.path_input, "ab"); + assert_eq!(state.path_cursor, 2); + } + + #[test] + fn test_autocomplete_filters() { + let mut state = TraceState { + known_paths: vec!["src/a.rs".into(), "src/b.rs".into(), "lib/c.rs".into()], + path_input: "src/".into(), + ..TraceState::default() + }; + state.update_autocomplete(); + assert_eq!(state.autocomplete_matches.len(), 2); + assert!(state.autocomplete_matches.contains(&"src/a.rs".to_string())); + assert!(state.autocomplete_matches.contains(&"src/b.rs".to_string())); + } + + #[test] + fn test_autocomplete_next_cycles() { + let mut state = TraceState { + known_paths: vec!["a.rs".into(), "ab.rs".into()], + path_input: "a".into(), + ..TraceState::default() + }; + state.update_autocomplete(); + assert_eq!(state.autocomplete_matches.len(), 2); + assert_eq!(state.autocomplete_index, 0); + + state.autocomplete_next(); + assert_eq!(state.autocomplete_index, 1); + + state.autocomplete_next(); + assert_eq!(state.autocomplete_index, 0); // Wrapped. + } + + #[test] + fn test_accept_autocomplete() { + let mut state = TraceState { + known_paths: vec!["src/main.rs".into()], + path_input: "src/".into(), + ..TraceState::default() + }; + state.update_autocomplete(); + assert_eq!(state.autocomplete_matches.len(), 1); + + state.accept_autocomplete(); + assert_eq!(state.path_input, "src/main.rs"); + assert!(state.autocomplete_matches.is_empty()); + } + + #[test] + fn test_has_text_focus() { + let state = TraceState::default(); + assert!(!state.has_text_focus()); + let state = TraceState { + path_focused: true, + ..TraceState::default() + }; + assert!(state.has_text_focus()); + } + + #[test] + fn test_blur_clears_focus_and_autocomplete() { + let mut state = TraceState { + path_focused: true, + autocomplete_matches: vec!["a".into()], + ..TraceState::default() + }; + + state.blur(); + assert!(!state.path_focused); + assert!(state.autocomplete_matches.is_empty()); + } +} diff --git a/crates/lore-tui/src/state/who.rs b/crates/lore-tui/src/state/who.rs index 1f513ed..3e3f020 100644 --- a/crates/lore-tui/src/state/who.rs +++ b/crates/lore-tui/src/state/who.rs @@ -1,12 +1,516 @@ -#![allow(dead_code)] - //! Who (people intelligence) screen state. +//! +//! Manages 5 query modes (Expert, Workload, Reviews, Active, Overlap), +//! input fields (path or username depending on mode), and result display. -use crate::message::WhoResult; +use lore::core::who_types::WhoResult; + +// --------------------------------------------------------------------------- +// WhoMode +// --------------------------------------------------------------------------- + +/// The 5 query modes for the who screen. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum WhoMode { + /// File-path expertise scores. + #[default] + Expert, + /// Issue/MR assignment workload for a username. + Workload, + /// Review activity breakdown for a username. + Reviews, + /// Recent unresolved discussions (no input needed). + Active, + /// Shared file knowledge between contributors. + Overlap, +} + +impl WhoMode { + /// Short label for mode tab rendering. + #[must_use] + pub fn label(self) -> &'static str { + match self { + Self::Expert => "Expert", + Self::Workload => "Workload", + Self::Reviews => "Reviews", + Self::Active => "Active", + Self::Overlap => "Overlap", + } + } + + /// Whether this mode requires a path input. + #[must_use] + pub fn needs_path(self) -> bool { + matches!(self, Self::Expert | Self::Overlap) + } + + /// Whether this mode requires a username input. + #[must_use] + pub fn needs_username(self) -> bool { + matches!(self, Self::Workload | Self::Reviews) + } + + /// Whether include_closed affects this mode's query. + #[must_use] + pub fn affected_by_include_closed(self) -> bool { + matches!(self, Self::Workload | Self::Active) + } + + /// Cycle to the next mode (wraps around). + #[must_use] + pub fn next(self) -> Self { + match self { + Self::Expert => Self::Workload, + Self::Workload => Self::Reviews, + Self::Reviews => Self::Active, + Self::Active => Self::Overlap, + Self::Overlap => Self::Expert, + } + } + + /// All modes in order. + pub const ALL: [Self; 5] = [ + Self::Expert, + Self::Workload, + Self::Reviews, + Self::Active, + Self::Overlap, + ]; + + /// Mode from 1-based number key (1=Expert, 2=Workload, ..., 5=Overlap). + #[must_use] + pub fn from_number(n: u8) -> Option { + match n { + 1 => Some(Self::Expert), + 2 => Some(Self::Workload), + 3 => Some(Self::Reviews), + 4 => Some(Self::Active), + 5 => Some(Self::Overlap), + _ => None, + } + } +} + +// --------------------------------------------------------------------------- +// WhoState +// --------------------------------------------------------------------------- /// State for the who/people screen. #[derive(Debug, Default)] pub struct WhoState { + /// Active query mode. + pub mode: WhoMode, + /// Current result (if any). pub result: Option, - pub scroll_offset: u16, + + // Input fields. + /// Path input text (used by Expert and Overlap modes). + pub path: String, + /// Cursor position within path string (byte offset). + pub path_cursor: usize, + /// Whether the path input has focus. + pub path_focused: bool, + + /// Username input text (used by Workload and Reviews modes). + pub username: String, + /// Cursor position within username string (byte offset). + pub username_cursor: usize, + /// Whether the username input has focus. + pub username_focused: bool, + + /// Toggle: include closed entities in Workload/Active queries. + pub include_closed: bool, + + // Result navigation. + /// Index of the selected row in the result list. + pub selected_index: usize, + /// Vertical scroll offset for the result area. + pub scroll_offset: usize, + + // Async coordination. + /// Monotonic generation counter for stale-response detection. + pub generation: u64, + /// Whether a query is in-flight. + pub loading: bool, +} + +impl WhoState { + /// Enter the who screen: focus the appropriate input. + pub fn enter(&mut self) { + self.focus_input_for_mode(); + } + + /// Leave the who screen: blur all inputs. + pub fn leave(&mut self) { + self.path_focused = false; + self.username_focused = false; + } + + /// Switch to a different mode. Clears result and resets selection. + /// Returns the new generation for stale detection. + pub fn set_mode(&mut self, mode: WhoMode) -> u64 { + if self.mode == mode { + return self.generation; + } + self.mode = mode; + self.result = None; + self.selected_index = 0; + self.scroll_offset = 0; + self.focus_input_for_mode(); + self.bump_generation() + } + + /// Toggle include_closed. Returns new generation if the mode is affected. + pub fn toggle_include_closed(&mut self) -> Option { + self.include_closed = !self.include_closed; + if self.mode.affected_by_include_closed() { + Some(self.bump_generation()) + } else { + None + } + } + + /// Apply query results if generation matches. + pub fn apply_results(&mut self, generation: u64, result: WhoResult) { + if generation != self.generation { + return; // Stale response — discard. + } + self.result = Some(result); + self.loading = false; + self.selected_index = 0; + self.scroll_offset = 0; + } + + /// Submit the current input (trigger a query). + /// Returns the generation for the new query. + pub fn submit(&mut self) -> u64 { + self.loading = true; + self.bump_generation() + } + + // --- Input field operations --- + + /// Insert a char at cursor in the active input field. + pub fn insert_char(&mut self, c: char) { + if self.path_focused { + self.path.insert(self.path_cursor, c); + self.path_cursor += c.len_utf8(); + } else if self.username_focused { + self.username.insert(self.username_cursor, c); + self.username_cursor += c.len_utf8(); + } + } + + /// Delete the char before cursor in the active input field. + pub fn delete_char_before_cursor(&mut self) { + if self.path_focused && self.path_cursor > 0 { + let prev = prev_char_boundary(&self.path, self.path_cursor); + self.path.drain(prev..self.path_cursor); + self.path_cursor = prev; + } else if self.username_focused && self.username_cursor > 0 { + let prev = prev_char_boundary(&self.username, self.username_cursor); + self.username.drain(prev..self.username_cursor); + self.username_cursor = prev; + } + } + + /// Move cursor left in the active input. + pub fn cursor_left(&mut self) { + if self.path_focused && self.path_cursor > 0 { + self.path_cursor = prev_char_boundary(&self.path, self.path_cursor); + } else if self.username_focused && self.username_cursor > 0 { + self.username_cursor = prev_char_boundary(&self.username, self.username_cursor); + } + } + + /// Move cursor right in the active input. + pub fn cursor_right(&mut self) { + if self.path_focused && self.path_cursor < self.path.len() { + self.path_cursor = next_char_boundary(&self.path, self.path_cursor); + } else if self.username_focused && self.username_cursor < self.username.len() { + self.username_cursor = next_char_boundary(&self.username, self.username_cursor); + } + } + + /// Whether any input field has focus. + #[must_use] + pub fn has_text_focus(&self) -> bool { + self.path_focused || self.username_focused + } + + /// Blur all inputs. + pub fn blur(&mut self) { + self.path_focused = false; + self.username_focused = false; + } + + /// Focus the appropriate input for the current mode. + pub fn focus_input_for_mode(&mut self) { + self.path_focused = self.mode.needs_path(); + self.username_focused = self.mode.needs_username(); + // Place cursor at end of text. + if self.path_focused { + self.path_cursor = self.path.len(); + } + if self.username_focused { + self.username_cursor = self.username.len(); + } + } + + // --- Selection navigation --- + + /// Move selection up. + pub fn select_prev(&mut self) { + self.selected_index = self.selected_index.saturating_sub(1); + } + + /// Move selection down (bounded by result count). + pub fn select_next(&mut self, result_count: usize) { + if result_count > 0 { + self.selected_index = (self.selected_index + 1).min(result_count - 1); + } + } + + /// Ensure the selected row is visible within the viewport. + pub fn ensure_visible(&mut self, viewport_height: usize) { + if viewport_height == 0 { + return; + } + if self.selected_index < self.scroll_offset { + self.scroll_offset = self.selected_index; + } else if self.selected_index >= self.scroll_offset + viewport_height { + self.scroll_offset = self.selected_index - viewport_height + 1; + } + } + + // --- Internal --- + + fn bump_generation(&mut self) -> u64 { + self.generation += 1; + self.generation + } +} + +/// Find the byte offset of the previous char boundary. +fn prev_char_boundary(s: &str, pos: usize) -> usize { + let mut i = pos.saturating_sub(1); + while i > 0 && !s.is_char_boundary(i) { + i -= 1; + } + i +} + +/// Find the byte offset of the next char boundary. +fn next_char_boundary(s: &str, pos: usize) -> usize { + let mut i = pos + 1; + while i < s.len() && !s.is_char_boundary(i) { + i += 1; + } + i +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_who_mode_defaults_to_expert() { + assert_eq!(WhoMode::default(), WhoMode::Expert); + } + + #[test] + fn test_who_mode_labels() { + assert_eq!(WhoMode::Expert.label(), "Expert"); + assert_eq!(WhoMode::Active.label(), "Active"); + assert_eq!(WhoMode::Overlap.label(), "Overlap"); + } + + #[test] + fn test_who_mode_needs_path() { + assert!(WhoMode::Expert.needs_path()); + assert!(WhoMode::Overlap.needs_path()); + assert!(!WhoMode::Workload.needs_path()); + assert!(!WhoMode::Reviews.needs_path()); + assert!(!WhoMode::Active.needs_path()); + } + + #[test] + fn test_who_mode_needs_username() { + assert!(WhoMode::Workload.needs_username()); + assert!(WhoMode::Reviews.needs_username()); + assert!(!WhoMode::Expert.needs_username()); + assert!(!WhoMode::Active.needs_username()); + } + + #[test] + fn test_who_mode_next_cycles() { + let start = WhoMode::Expert; + let m = start.next().next().next().next().next(); + assert_eq!(m, start); + } + + #[test] + fn test_who_mode_from_number() { + assert_eq!(WhoMode::from_number(1), Some(WhoMode::Expert)); + assert_eq!(WhoMode::from_number(5), Some(WhoMode::Overlap)); + assert_eq!(WhoMode::from_number(0), None); + assert_eq!(WhoMode::from_number(6), None); + } + + #[test] + fn test_who_state_default() { + let state = WhoState::default(); + assert_eq!(state.mode, WhoMode::Expert); + assert!(state.result.is_none()); + assert!(!state.include_closed); + assert_eq!(state.generation, 0); + } + + #[test] + fn test_set_mode_bumps_generation() { + let mut state = WhoState::default(); + let generation = state.set_mode(WhoMode::Workload); + assert_eq!(generation, 1); + assert_eq!(state.mode, WhoMode::Workload); + assert!(state.result.is_none()); + assert!(state.username_focused); + assert!(!state.path_focused); + } + + #[test] + fn test_set_mode_same_does_not_bump() { + let mut state = WhoState::default(); + let generation = state.set_mode(WhoMode::Expert); + assert_eq!(generation, 0); // No bump for same mode. + } + + #[test] + fn test_toggle_include_closed_returns_gen_for_affected_modes() { + let state = &mut WhoState { + mode: WhoMode::Workload, + ..WhoState::default() + }; + let generation = state.toggle_include_closed(); + assert!(generation.is_some()); + assert!(state.include_closed); + } + + #[test] + fn test_toggle_include_closed_returns_none_for_unaffected_modes() { + let state = &mut WhoState { + mode: WhoMode::Expert, + ..WhoState::default() + }; + let generation = state.toggle_include_closed(); + assert!(generation.is_none()); + assert!(state.include_closed); + } + + #[test] + fn test_stale_response_guard() { + let mut state = WhoState::default(); + let stale_gen = state.submit(); + // Bump generation again (simulating user changed mode). + let _new_gen = state.set_mode(WhoMode::Active); + // Old response arrives — should be discarded. + state.apply_results( + stale_gen, + WhoResult::Active(lore::core::who_types::ActiveResult { + discussions: vec![], + total_unresolved_in_window: 0, + truncated: false, + }), + ); + assert!(state.result.is_none()); // Stale, discarded. + } + + #[test] + fn test_insert_and_delete_char() { + let mut state = WhoState { + path_focused: true, + ..WhoState::default() + }; + state.insert_char('s'); + state.insert_char('r'); + state.insert_char('c'); + assert_eq!(state.path, "src"); + assert_eq!(state.path_cursor, 3); + + state.delete_char_before_cursor(); + assert_eq!(state.path, "sr"); + assert_eq!(state.path_cursor, 2); + } + + #[test] + fn test_cursor_movement() { + let mut state = WhoState { + username_focused: true, + username: "alice".into(), + username_cursor: 5, + ..WhoState::default() + }; + + state.cursor_left(); + assert_eq!(state.username_cursor, 4); + state.cursor_right(); + assert_eq!(state.username_cursor, 5); + // Right at end is clamped. + state.cursor_right(); + assert_eq!(state.username_cursor, 5); + } + + #[test] + fn test_select_prev_next() { + let mut state = WhoState::default(); + state.select_next(5); + assert_eq!(state.selected_index, 1); + state.select_next(5); + assert_eq!(state.selected_index, 2); + state.select_prev(); + assert_eq!(state.selected_index, 1); + state.select_prev(); + assert_eq!(state.selected_index, 0); + state.select_prev(); // Should not underflow. + assert_eq!(state.selected_index, 0); + } + + #[test] + fn test_ensure_visible() { + let mut state = WhoState { + selected_index: 15, + ..WhoState::default() + }; + state.ensure_visible(5); + assert_eq!(state.scroll_offset, 11); // 15 - 5 + 1 + } + + #[test] + fn test_enter_focuses_correct_input() { + let mut state = WhoState { + mode: WhoMode::Expert, + ..WhoState::default() + }; + state.enter(); + assert!(state.path_focused); + assert!(!state.username_focused); + + state.mode = WhoMode::Reviews; + state.enter(); + assert!(!state.path_focused); + // Reviews needs username. + // focus_input_for_mode is called in enter(). + } + + #[test] + fn test_affected_by_include_closed() { + assert!(WhoMode::Workload.affected_by_include_closed()); + assert!(WhoMode::Active.affected_by_include_closed()); + assert!(!WhoMode::Expert.affected_by_include_closed()); + assert!(!WhoMode::Reviews.affected_by_include_closed()); + assert!(!WhoMode::Overlap.affected_by_include_closed()); + } } diff --git a/crates/lore-tui/src/view/bootstrap.rs b/crates/lore-tui/src/view/bootstrap.rs new file mode 100644 index 0000000..2cd2616 --- /dev/null +++ b/crates/lore-tui/src/view/bootstrap.rs @@ -0,0 +1,134 @@ +#![allow(dead_code)] // Phase 2.5: consumed by render_screen dispatch + +//! Bootstrap screen view. +//! +//! Shown when the database has no entity data. Guides users to run +//! a sync to populate the database. + +use ftui::core::geometry::Rect; +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::Draw; +use ftui::render::frame::Frame; + +use crate::state::bootstrap::BootstrapState; + +// Colors (Flexoki palette). +const TEXT: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3); // tx +const MUTED: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2 +const ACCENT: PackedRgba = PackedRgba::rgb(0xDA, 0x70, 0x2C); // orange + +/// Render the bootstrap screen. +/// +/// Centers a message in the content area, guiding the user to start a sync. +/// When a sync is in progress, shows a "syncing" message instead. +pub fn render_bootstrap(frame: &mut Frame<'_>, state: &BootstrapState, area: Rect) { + if area.width < 10 || area.height < 5 { + return; + } + + let center_y = area.y + area.height / 2; + let max_x = area.x.saturating_add(area.width); + + // Title. + let title = "No data found"; + let title_x = area.x + area.width.saturating_sub(title.len() as u16) / 2; + frame.print_text_clipped( + title_x, + center_y.saturating_sub(2), + title, + Cell { + fg: ACCENT, + ..Cell::default() + }, + max_x, + ); + + if state.sync_started { + // Sync in progress. + let msg = "Syncing data from GitLab..."; + let msg_x = area.x + area.width.saturating_sub(msg.len() as u16) / 2; + frame.print_text_clipped( + msg_x, + center_y, + msg, + Cell { + fg: TEXT, + ..Cell::default() + }, + max_x, + ); + } else { + // Prompt user to start sync. + let msg = "Run sync to get started."; + let msg_x = area.x + area.width.saturating_sub(msg.len() as u16) / 2; + frame.print_text_clipped( + msg_x, + center_y, + msg, + Cell { + fg: TEXT, + ..Cell::default() + }, + max_x, + ); + + let hint = "Press 'g' then 's' to start sync, or 'q' to quit."; + let hint_x = area.x + area.width.saturating_sub(hint.len() as u16) / 2; + frame.print_text_clipped( + hint_x, + center_y + 2, + hint, + Cell { + fg: MUTED, + ..Cell::default() + }, + max_x, + ); + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + #[test] + fn test_render_bootstrap_no_panic() { + with_frame!(80, 24, |frame| { + let state = BootstrapState::default(); + render_bootstrap(&mut frame, &state, Rect::new(0, 1, 80, 22)); + }); + } + + #[test] + fn test_render_bootstrap_sync_started() { + with_frame!(80, 24, |frame| { + let state = BootstrapState { + sync_started: true, + ..Default::default() + }; + render_bootstrap(&mut frame, &state, Rect::new(0, 1, 80, 22)); + }); + } + + #[test] + fn test_render_bootstrap_tiny_area_noop() { + with_frame!(8, 3, |frame| { + let state = BootstrapState::default(); + render_bootstrap(&mut frame, &state, Rect::new(0, 0, 8, 3)); + // Should not panic — early return for tiny areas. + }); + } +} diff --git a/crates/lore-tui/src/view/command_palette.rs b/crates/lore-tui/src/view/command_palette.rs new file mode 100644 index 0000000..4f73769 --- /dev/null +++ b/crates/lore-tui/src/view/command_palette.rs @@ -0,0 +1,389 @@ +//! Command palette overlay — modal fuzzy-match command picker. +//! +//! Renders a centered modal with a query input at the top and a scrollable +//! list of matching commands below. Keybinding hints are right-aligned. + +use ftui::core::geometry::Rect; +use ftui::render::cell::Cell; +use ftui::render::drawing::{BorderChars, Draw}; +use ftui::render::frame::Frame; + +use crate::state::command_palette::CommandPaletteState; + +use super::{ACCENT, BG_SURFACE, BORDER, TEXT, TEXT_MUTED}; + +fn text_cell_width(text: &str) -> u16 { + text.chars().count().min(u16::MAX as usize) as u16 +} + +fn cursor_cell_offset(query: &str, cursor: usize) -> u16 { + let mut idx = cursor.min(query.len()); + while idx > 0 && !query.is_char_boundary(idx) { + idx -= 1; + } + text_cell_width(&query[..idx]) +} + +// --------------------------------------------------------------------------- +// render_command_palette +// --------------------------------------------------------------------------- + +/// Render the command palette overlay centered on the screen. +/// +/// Only renders if `state.is_open()`. The modal is 60% width, 50% height, +/// capped at 60x20. +pub fn render_command_palette(frame: &mut Frame<'_>, state: &CommandPaletteState, area: Rect) { + if !state.is_open() { + return; + } + if area.height < 5 || area.width < 20 { + return; + } + + // Modal dimensions: 60% of screen, capped. + let modal_width = (area.width * 3 / 5).clamp(30, 60); + let modal_height = (area.height / 2).clamp(6, 20); + + let modal_x = area.x + (area.width.saturating_sub(modal_width)) / 2; + let modal_y = area.y + (area.height.saturating_sub(modal_height)) / 2; + let modal_rect = Rect::new(modal_x, modal_y, modal_width, modal_height); + + // Clear background. + let bg_cell = Cell { + fg: TEXT, + bg: BG_SURFACE, + ..Cell::default() + }; + for y in modal_rect.y..modal_rect.bottom() { + for x in modal_rect.x..modal_rect.right() { + frame.buffer.set(x, y, bg_cell); + } + } + + // Border. + let border_cell = Cell { + fg: BORDER, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.draw_border(modal_rect, BorderChars::ROUNDED, border_cell); + + // Title. + let title = " Command Palette "; + let title_x = modal_x + (modal_width.saturating_sub(title.len() as u16)) / 2; + let title_cell = Cell { + fg: ACCENT, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(title_x, modal_y, title, title_cell, modal_rect.right()); + + // Inner content area (inside border). + let inner = Rect::new( + modal_x + 2, + modal_y + 1, + modal_width.saturating_sub(4), + modal_height.saturating_sub(2), + ); + if inner.width < 4 || inner.height < 2 { + return; + } + + // --- Query input line --- + let query_y = inner.y; + let prompt = "> "; + let prompt_cell = Cell { + fg: ACCENT, + bg: BG_SURFACE, + ..Cell::default() + }; + let query_start = + frame.print_text_clipped(inner.x, query_y, prompt, prompt_cell, inner.right()); + + let query_display = if state.query.is_empty() { + "Type to filter..." + } else { + &state.query + }; + let query_fg = if state.query.is_empty() { + TEXT_MUTED + } else { + TEXT + }; + let q_cell = Cell { + fg: query_fg, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(query_start, query_y, query_display, q_cell, inner.right()); + + // Cursor indicator (if query focused and not showing placeholder). + if !state.query.is_empty() { + let cursor_x = query_start.saturating_add(cursor_cell_offset(&state.query, state.cursor)); + if cursor_x < inner.right() { + let cursor_cell = Cell { + fg: BG_SURFACE, + bg: TEXT, + ..Cell::default() + }; + // Draw cursor block. If at end of text, draw a space. + let cursor_char = state + .query + .get(state.cursor..) + .and_then(|s| s.chars().next()) + .unwrap_or(' '); + frame.print_text_clipped( + cursor_x, + query_y, + &cursor_char.to_string(), + cursor_cell, + inner.right(), + ); + } + } + + // --- Separator --- + let sep_y = query_y + 1; + if sep_y >= inner.bottom() { + return; + } + let sep_cell = Cell { + fg: BORDER, + bg: BG_SURFACE, + ..Cell::default() + }; + let sep_line = "─".repeat(inner.width as usize); + frame.print_text_clipped(inner.x, sep_y, &sep_line, sep_cell, inner.right()); + + // --- Results list --- + let list_y = sep_y + 1; + let list_height = inner.bottom().saturating_sub(list_y) as usize; + if list_height == 0 { + return; + } + + if state.filtered.is_empty() { + let msg = if state.query.is_empty() { + "No commands available" + } else { + "No matching commands" + }; + let msg_cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(inner.x, list_y, msg, msg_cell, inner.right()); + return; + } + + // Scroll so the selected item is always visible. + let scroll_offset = if state.selected_index >= list_height { + state.selected_index - list_height + 1 + } else { + 0 + }; + + let normal_cell = Cell { + fg: TEXT, + bg: BG_SURFACE, + ..Cell::default() + }; + let selected_cell = Cell { + fg: BG_SURFACE, + bg: ACCENT, + ..Cell::default() + }; + let key_cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + let key_selected_cell = Cell { + fg: BG_SURFACE, + bg: ACCENT, + ..Cell::default() + }; + + for (i, entry) in state + .filtered + .iter() + .skip(scroll_offset) + .enumerate() + .take(list_height) + { + let y = list_y + i as u16; + let is_selected = i + scroll_offset == state.selected_index; + + let (label_style, kb_style) = if is_selected { + (selected_cell, key_selected_cell) + } else { + (normal_cell, key_cell) + }; + + // Fill row background for selected item. + if is_selected { + for x in inner.x..inner.right() { + frame.buffer.set(x, y, selected_cell); + } + } + + // Label (left-aligned). + frame.print_text_clipped(inner.x, y, entry.label, label_style, inner.right()); + + // Keybinding (right-aligned). + if let Some(ref kb) = entry.keybinding { + let kb_width = text_cell_width(kb); + let kb_x = inner.right().saturating_sub(kb_width); + if kb_x > inner.x + text_cell_width(entry.label).saturating_add(1) { + frame.print_text_clipped(kb_x, y, kb, kb_style, inner.right()); + } + } + } + + // Scroll indicator. + if state.filtered.len() > list_height { + let indicator = format!( + " {}/{} ", + (scroll_offset + list_height).min(state.filtered.len()), + state.filtered.len() + ); + let ind_x = modal_rect + .right() + .saturating_sub(indicator.len() as u16 + 1); + let ind_y = modal_rect.bottom().saturating_sub(1); + let ind_cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(ind_x, ind_y, &indicator, ind_cell, modal_rect.right()); + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::commands::build_registry; + use crate::message::Screen; + use crate::state::command_palette::CommandPaletteState; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + #[test] + fn test_render_palette_closed_is_noop() { + with_frame!(80, 24, |frame| { + let state = CommandPaletteState::default(); + render_command_palette(&mut frame, &state, Rect::new(0, 0, 80, 24)); + // No content rendered when palette is closed. + }); + } + + #[test] + fn test_render_palette_open_no_panic() { + with_frame!(80, 24, |frame| { + let registry = build_registry(); + let mut state = CommandPaletteState::default(); + state.open(®istry, &Screen::Dashboard); + render_command_palette(&mut frame, &state, Rect::new(0, 0, 80, 24)); + + // Should have rendered content in center area. + let has_content = (25..55u16).any(|x| { + (8..16u16).any(|y| { + let cell = frame.buffer.get(x, y).unwrap(); + !cell.is_empty() + }) + }); + assert!(has_content, "Expected palette overlay in center area"); + }); + } + + #[test] + fn test_render_palette_with_query() { + with_frame!(80, 24, |frame| { + let registry = build_registry(); + let mut state = CommandPaletteState::default(); + state.open(®istry, &Screen::Dashboard); + state.insert_char('q', ®istry, &Screen::Dashboard); + render_command_palette(&mut frame, &state, Rect::new(0, 0, 80, 24)); + }); + } + + #[test] + fn test_render_palette_unicode_cursor_uses_char_offset() { + with_frame!(80, 24, |frame| { + let registry = build_registry(); + let mut state = CommandPaletteState::default(); + state.open(®istry, &Screen::Dashboard); + state.insert_char('é', ®istry, &Screen::Dashboard); + render_command_palette(&mut frame, &state, Rect::new(0, 0, 80, 24)); + + let area = Rect::new(0, 0, 80, 24); + let modal_width = (area.width * 3 / 5).clamp(30, 60); + let modal_height = (area.height / 2).clamp(6, 20); + let modal_x = area.x + (area.width.saturating_sub(modal_width)) / 2; + let modal_y = area.y + (area.height.saturating_sub(modal_height)) / 2; + let inner = Rect::new( + modal_x + 2, + modal_y + 1, + modal_width.saturating_sub(4), + modal_height.saturating_sub(2), + ); + + // Prompt "> " is two cells; one unicode scalar should place cursor at +1. + let query_y = inner.y; + let cursor_x = inner.x + 3; + let cell = frame + .buffer + .get(cursor_x, query_y) + .expect("cursor position must be in bounds"); + assert_eq!(cell.bg, TEXT); + }); + } + + #[test] + fn test_render_palette_with_selection() { + with_frame!(80, 24, |frame| { + let registry = build_registry(); + let mut state = CommandPaletteState::default(); + state.open(®istry, &Screen::Dashboard); + state.select_next(); + state.select_next(); + render_command_palette(&mut frame, &state, Rect::new(0, 0, 80, 24)); + }); + } + + #[test] + fn test_render_palette_tiny_terminal_noop() { + with_frame!(15, 4, |frame| { + let registry = build_registry(); + let mut state = CommandPaletteState::default(); + state.open(®istry, &Screen::Dashboard); + render_command_palette(&mut frame, &state, Rect::new(0, 0, 15, 4)); + }); + } + + #[test] + fn test_render_palette_no_results() { + with_frame!(80, 24, |frame| { + let registry = build_registry(); + let mut state = CommandPaletteState::default(); + state.open(®istry, &Screen::Dashboard); + for c in "zzzzzz".chars() { + state.insert_char(c, ®istry, &Screen::Dashboard); + } + render_command_palette(&mut frame, &state, Rect::new(0, 0, 80, 24)); + }); + } +} diff --git a/crates/lore-tui/src/view/common/cross_ref.rs b/crates/lore-tui/src/view/common/cross_ref.rs index 6add2c8..abf926c 100644 --- a/crates/lore-tui/src/view/common/cross_ref.rs +++ b/crates/lore-tui/src/view/common/cross_ref.rs @@ -181,13 +181,16 @@ pub fn render_cross_refs( // Spacing x = frame.print_text_clipped(x, y, " ", badge_style, max_x); - // Entity prefix + label + // Entity prefix + label — derive sigil from entity kind, not ref kind. let prefix = match cr.kind { - CrossRefKind::ClosingMr | CrossRefKind::MentionedIn => { - format!("!{} ", cr.entity_key.iid) - } - CrossRefKind::RelatedIssue => { - format!("#{} ", cr.entity_key.iid) + CrossRefKind::ClosingMr => format!("!{} ", cr.entity_key.iid), + CrossRefKind::RelatedIssue => format!("#{} ", cr.entity_key.iid), + CrossRefKind::MentionedIn => { + let sigil = match cr.entity_key.kind { + crate::message::EntityKind::MergeRequest => "!", + crate::message::EntityKind::Issue => "#", + }; + format!("{sigil}{} ", cr.entity_key.iid) } }; diff --git a/crates/lore-tui/src/view/file_history.rs b/crates/lore-tui/src/view/file_history.rs new file mode 100644 index 0000000..d43991a --- /dev/null +++ b/crates/lore-tui/src/view/file_history.rs @@ -0,0 +1,578 @@ +#![allow(dead_code)] + +//! File History view — renders per-file MR timeline with rename chains. +//! +//! Layout: +//! ```text +//! +-----------------------------------+ +//! | Path: [src/lib.rs_] [R] [M] [D] | <- path input + option toggles +//! | Rename chain: a.rs -> b.rs -> ... | <- shown when renames followed +//! | 5 merge requests across 2 paths | <- summary line +//! +-----------------------------------+ +//! | > !42 Fix auth @alice modified ... | <- MR list (selected = >) +//! | !39 Refactor @bob renamed ... | +//! | @carol: "This looks off..." | <- inline discussion (if toggled) +//! +-----------------------------------+ +//! | r:renames m:merged d:discussions | <- hint bar +//! +-----------------------------------+ +//! ``` + +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::Draw; +use ftui::render::frame::Frame; + +use crate::state::file_history::{FileHistoryResult, FileHistoryState}; + +// --------------------------------------------------------------------------- +// Colors (Flexoki palette) +// --------------------------------------------------------------------------- + +const TEXT: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3); // tx +const TEXT_MUTED: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2 +const BG_SURFACE: PackedRgba = PackedRgba::rgb(0x28, 0x28, 0x24); // bg-2 +const ACCENT: PackedRgba = PackedRgba::rgb(0xDA, 0x70, 0x2C); // orange +const GREEN: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39); // green +const CYAN: PackedRgba = PackedRgba::rgb(0x3A, 0xA9, 0x9F); // cyan +const YELLOW: PackedRgba = PackedRgba::rgb(0xD0, 0xA2, 0x15); // yellow +const RED: PackedRgba = PackedRgba::rgb(0xAF, 0x3A, 0x29); // red +const SELECTION_BG: PackedRgba = PackedRgba::rgb(0x34, 0x34, 0x31); // bg-3 + +// --------------------------------------------------------------------------- +// Public entry point +// --------------------------------------------------------------------------- + +/// Render the File History screen. +pub fn render_file_history( + frame: &mut Frame<'_>, + state: &FileHistoryState, + area: ftui::core::geometry::Rect, +) { + if area.width < 10 || area.height < 3 { + return; // Terminal too small. + } + + let x = area.x; + let max_x = area.right(); + let width = area.width; + let mut y = area.y; + + // --- Path input bar --- + render_path_input(frame, state, x, y, width); + y += 1; + + if area.height < 5 { + return; + } + + // --- Option toggles indicator --- + render_toggle_indicators(frame, state, x, y, width); + y += 1; + + // --- Loading indicator --- + if state.loading { + render_loading(frame, x, y, max_x); + return; + } + + let Some(result) = &state.result else { + render_empty_state(frame, x, y, max_x); + return; + }; + + // --- Rename chain (if followed) --- + if result.renames_followed && result.rename_chain.len() > 1 { + render_rename_chain(frame, &result.rename_chain, x, y, max_x); + y += 1; + } + + // --- Summary line --- + render_summary(frame, result, x, y, max_x); + y += 1; + + if result.merge_requests.is_empty() { + render_no_results(frame, x, y, max_x); + return; + } + + // Reserve 1 row for hint bar at the bottom. + let hint_y = area.bottom().saturating_sub(1); + let list_height = hint_y.saturating_sub(y) as usize; + + if list_height == 0 { + return; + } + + // --- MR list --- + render_mr_list(frame, result, state, x, y, width, list_height); + + // --- Hint bar --- + render_hint_bar(frame, x, hint_y, max_x); +} + +// --------------------------------------------------------------------------- +// Components +// --------------------------------------------------------------------------- + +fn render_path_input(frame: &mut Frame<'_>, state: &FileHistoryState, x: u16, y: u16, width: u16) { + let max_x = x + width; + let label = "Path: "; + let label_style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + let after_label = frame.print_text_clipped(x, y, label, label_style, max_x); + + // Input text. + let input_style = Cell { + fg: if state.path_focused { TEXT } else { TEXT_MUTED }, + ..Cell::default() + }; + let display_text = if state.path_input.is_empty() && !state.path_focused { + "type a file path..." + } else { + &state.path_input + }; + frame.print_text_clipped(after_label, y, display_text, input_style, max_x); + + // Cursor indicator. + if state.path_focused { + let cursor_x = after_label + state.path_cursor as u16; + if cursor_x < max_x { + let cursor_cell = Cell { + fg: PackedRgba::rgb(0x10, 0x0F, 0x0F), // dark bg + bg: TEXT, + ..Cell::default() + }; + let ch = state + .path_input + .get(state.path_cursor..) + .and_then(|s| s.chars().next()) + .unwrap_or(' '); + frame.print_text_clipped(cursor_x, y, &ch.to_string(), cursor_cell, max_x); + } + } +} + +fn render_toggle_indicators( + frame: &mut Frame<'_>, + state: &FileHistoryState, + x: u16, + y: u16, + width: u16, +) { + let max_x = x + width; + + let on_style = Cell { + fg: GREEN, + ..Cell::default() + }; + let off_style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + + let renames_tag = if state.follow_renames { + "[renames:on]" + } else { + "[renames:off]" + }; + let merged_tag = if state.merged_only { + "[merged:on]" + } else { + "[merged:off]" + }; + let disc_tag = if state.show_discussions { + "[disc:on]" + } else { + "[disc:off]" + }; + + let renames_style = if state.follow_renames { + on_style + } else { + off_style + }; + let merged_style = if state.merged_only { + on_style + } else { + off_style + }; + let disc_style = if state.show_discussions { + on_style + } else { + off_style + }; + + let after_r = frame.print_text_clipped(x + 1, y, renames_tag, renames_style, max_x); + let after_m = frame.print_text_clipped(after_r + 1, y, merged_tag, merged_style, max_x); + frame.print_text_clipped(after_m + 1, y, disc_tag, disc_style, max_x); +} + +fn render_rename_chain(frame: &mut Frame<'_>, chain: &[String], x: u16, y: u16, max_x: u16) { + let label_style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + let chain_style = Cell { + fg: CYAN, + ..Cell::default() + }; + + let after_label = frame.print_text_clipped(x + 1, y, "Renames: ", label_style, max_x); + let chain_str = chain.join(" -> "); + frame.print_text_clipped(after_label, y, &chain_str, chain_style, max_x); +} + +fn render_summary(frame: &mut Frame<'_>, result: &FileHistoryResult, x: u16, y: u16, max_x: u16) { + let summary = if result.paths_searched > 1 { + format!( + "{} merge request{} across {} paths", + result.total_mrs, + if result.total_mrs == 1 { "" } else { "s" }, + result.paths_searched, + ) + } else { + format!( + "{} merge request{}", + result.total_mrs, + if result.total_mrs == 1 { "" } else { "s" }, + ) + }; + + let style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + frame.print_text_clipped(x + 1, y, &summary, style, max_x); +} + +fn render_mr_list( + frame: &mut Frame<'_>, + result: &FileHistoryResult, + state: &FileHistoryState, + x: u16, + start_y: u16, + width: u16, + height: usize, +) { + let max_x = x + width; + let offset = state.scroll_offset as usize; + + for (i, mr) in result + .merge_requests + .iter() + .skip(offset) + .enumerate() + .take(height) + { + let y = start_y + i as u16; + let row_idx = offset + i; + let selected = row_idx == state.selected_mr_index; + + // Selection background. + if selected { + let bg_cell = Cell { + bg: SELECTION_BG, + ..Cell::default() + }; + for col in x..max_x { + frame.buffer.set(col, y, bg_cell); + } + } + + // State icon. + let (icon, icon_color) = match mr.state.as_str() { + "merged" => ("M", GREEN), + "opened" => ("O", YELLOW), + "closed" => ("C", RED), + _ => ("?", TEXT_MUTED), + }; + let prefix = if selected { "> " } else { " " }; + let sel_bg = if selected { SELECTION_BG } else { BG_SURFACE }; + + let prefix_style = Cell { + fg: ACCENT, + bg: sel_bg, + ..Cell::default() + }; + let after_prefix = frame.print_text_clipped(x, y, prefix, prefix_style, max_x); + + let icon_style = Cell { + fg: icon_color, + bg: sel_bg, + ..Cell::default() + }; + let after_icon = frame.print_text_clipped(after_prefix, y, icon, icon_style, max_x); + + // !iid + let iid_str = format!(" !{}", mr.iid); + let ref_style = Cell { + fg: ACCENT, + bg: sel_bg, + ..Cell::default() + }; + let after_iid = frame.print_text_clipped(after_icon, y, &iid_str, ref_style, max_x); + + // Title (truncated). + let title = truncate_str(&mr.title, 35); + let title_style = Cell { + fg: TEXT, + bg: sel_bg, + ..Cell::default() + }; + let after_title = frame.print_text_clipped(after_iid + 1, y, &title, title_style, max_x); + + // @author + change_type + let meta = format!( + "@{} {}", + truncate_str(&mr.author_username, 12), + mr.change_type + ); + let meta_style = Cell { + fg: TEXT_MUTED, + bg: sel_bg, + ..Cell::default() + }; + frame.print_text_clipped(after_title + 1, y, &meta, meta_style, max_x); + } + + // Inline discussion snippets (rendered beneath MRs when toggled on). + // For simplicity, discussions are shown as a separate block after the MR list + // in this initial implementation. Full inline rendering (grouped by MR) is + // a follow-up enhancement. + if state.show_discussions && !result.discussions.is_empty() { + let disc_start_y = start_y + result.merge_requests.len().min(height) as u16; + let remaining = height.saturating_sub(result.merge_requests.len().min(height)); + render_discussions(frame, result, x, disc_start_y, max_x, remaining); + } +} + +fn render_discussions( + frame: &mut Frame<'_>, + result: &FileHistoryResult, + x: u16, + start_y: u16, + max_x: u16, + max_rows: usize, +) { + if max_rows == 0 { + return; + } + + let sep_style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + frame.print_text_clipped(x + 1, start_y, "-- discussions --", sep_style, max_x); + + let disc_style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + let author_style = Cell { + fg: CYAN, + ..Cell::default() + }; + + for (i, disc) in result + .discussions + .iter() + .enumerate() + .take(max_rows.saturating_sub(1)) + { + let y = start_y + 1 + i as u16; + let after_author = frame.print_text_clipped( + x + 2, + y, + &format!("@{}: ", disc.author_username), + author_style, + max_x, + ); + let snippet = truncate_str(&disc.body_snippet, 60); + frame.print_text_clipped(after_author, y, &snippet, disc_style, max_x); + } +} + +fn render_loading(frame: &mut Frame<'_>, x: u16, y: u16, max_x: u16) { + let style = Cell { + fg: ACCENT, + ..Cell::default() + }; + frame.print_text_clipped(x + 1, y, "Loading file history...", style, max_x); +} + +fn render_empty_state(frame: &mut Frame<'_>, x: u16, y: u16, max_x: u16) { + let style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + frame.print_text_clipped( + x + 1, + y, + "Enter a file path and press Enter to search.", + style, + max_x, + ); +} + +fn render_no_results(frame: &mut Frame<'_>, x: u16, y: u16, max_x: u16) { + let style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + frame.print_text_clipped(x + 1, y, "No MRs found for this file.", style, max_x); + frame.print_text_clipped( + x + 1, + y + 1, + "Hint: Ensure 'lore sync' has fetched MR file changes.", + style, + max_x, + ); +} + +fn render_hint_bar(frame: &mut Frame<'_>, x: u16, y: u16, max_x: u16) { + let style = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + + // Fill background. + for col in x..max_x { + frame.buffer.set(col, y, style); + } + + let hints = "/:path r:renames m:merged d:discussions Enter:open MR q:back"; + frame.print_text_clipped(x + 1, y, hints, style, max_x); +} + +/// Truncate a string to at most `max_chars` display characters. +fn truncate_str(s: &str, max_chars: usize) -> String { + if s.chars().count() <= max_chars { + s.to_string() + } else { + let truncated: String = s.chars().take(max_chars.saturating_sub(1)).collect(); + format!("{truncated}…") + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::state::file_history::{FileHistoryMr, FileHistoryResult, FileHistoryState}; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + fn test_area(w: u16, h: u16) -> ftui::core::geometry::Rect { + ftui::core::geometry::Rect { + x: 0, + y: 0, + width: w, + height: h, + } + } + + #[test] + fn test_render_empty_no_panic() { + with_frame!(80, 24, |frame| { + let state = FileHistoryState::default(); + render_file_history(&mut frame, &state, test_area(80, 24)); + }); + } + + #[test] + fn test_render_tiny_terminal_noop() { + with_frame!(5, 2, |frame| { + let state = FileHistoryState::default(); + render_file_history(&mut frame, &state, test_area(5, 2)); + }); + } + + #[test] + fn test_render_loading() { + with_frame!(80, 24, |frame| { + let state = FileHistoryState { + loading: true, + ..FileHistoryState::default() + }; + render_file_history(&mut frame, &state, test_area(80, 24)); + }); + } + + #[test] + fn test_render_with_results() { + with_frame!(100, 30, |frame| { + let state = FileHistoryState { + result: Some(FileHistoryResult { + path: "src/lib.rs".into(), + rename_chain: vec!["src/lib.rs".into()], + renames_followed: false, + merge_requests: vec![ + FileHistoryMr { + iid: 42, + title: "Fix authentication flow".into(), + state: "merged".into(), + author_username: "alice".into(), + change_type: "modified".into(), + merged_at_ms: Some(1_700_000_000_000), + updated_at_ms: 1_700_000_000_000, + merge_commit_sha: Some("abc123".into()), + }, + FileHistoryMr { + iid: 39, + title: "Refactor module structure".into(), + state: "opened".into(), + author_username: "bob".into(), + change_type: "renamed".into(), + merged_at_ms: None, + updated_at_ms: 1_699_000_000_000, + merge_commit_sha: None, + }, + ], + discussions: vec![], + total_mrs: 2, + paths_searched: 1, + }), + ..FileHistoryState::default() + }; + render_file_history(&mut frame, &state, test_area(100, 30)); + }); + } + + #[test] + fn test_render_with_rename_chain() { + with_frame!(80, 24, |frame| { + let state = FileHistoryState { + result: Some(FileHistoryResult { + path: "src/old.rs".into(), + rename_chain: vec!["src/old.rs".into(), "src/new.rs".into()], + renames_followed: true, + merge_requests: vec![], + discussions: vec![], + total_mrs: 0, + paths_searched: 2, + }), + ..FileHistoryState::default() + }; + render_file_history(&mut frame, &state, test_area(80, 24)); + }); + } + + #[test] + fn test_truncate_str() { + assert_eq!(truncate_str("hello", 10), "hello"); + assert_eq!(truncate_str("hello world", 5), "hell…"); + assert_eq!(truncate_str("", 5), ""); + } +} diff --git a/crates/lore-tui/src/view/issue_detail.rs b/crates/lore-tui/src/view/issue_detail.rs index 959e542..0de71ee 100644 --- a/crates/lore-tui/src/view/issue_detail.rs +++ b/crates/lore-tui/src/view/issue_detail.rs @@ -295,7 +295,7 @@ fn render_metadata_row( if !meta.labels.is_empty() { cx = frame.print_text_clipped(cx, y, " | ", muted_style, max_x); let labels_text = meta.labels.join(", "); - let _ = frame.print_text_clipped(cx, y, &labels_text, muted_style, max_x); + cx = frame.print_text_clipped(cx, y, &labels_text, muted_style, max_x); } if !meta.assignees.is_empty() { diff --git a/crates/lore-tui/src/view/mod.rs b/crates/lore-tui/src/view/mod.rs index 75c3075..83a852a 100644 --- a/crates/lore-tui/src/view/mod.rs +++ b/crates/lore-tui/src/view/mod.rs @@ -6,28 +6,43 @@ //! It composes the layout: breadcrumb bar, screen content area, status //! bar, and optional overlays (help, error toast). +pub mod bootstrap; +pub mod command_palette; pub mod common; pub mod dashboard; +pub mod file_history; pub mod issue_detail; pub mod issue_list; pub mod mr_detail; pub mod mr_list; +pub mod search; +pub mod timeline; +pub mod trace; +pub mod who; use ftui::layout::{Constraint, Flex}; -use ftui::render::cell::PackedRgba; +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::Draw; use ftui::render::frame::Frame; use crate::app::LoreApp; use crate::message::Screen; +use bootstrap::render_bootstrap; +use command_palette::render_command_palette; use common::{ render_breadcrumb, render_error_toast, render_help_overlay, render_loading, render_status_bar, }; use dashboard::render_dashboard; +use file_history::render_file_history; use issue_detail::render_issue_detail; use issue_list::render_issue_list; use mr_detail::render_mr_detail; use mr_list::render_mr_list; +use search::render_search; +use timeline::render_timeline; +use trace::render_trace; +use who::render_who; // --------------------------------------------------------------------------- // Colors (hardcoded Flexoki palette — will use Theme in Phase 2) @@ -41,6 +56,41 @@ const ERROR_BG: PackedRgba = PackedRgba::rgb(0xAF, 0x3A, 0x29); // red const ERROR_FG: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3); // tx const BORDER: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2 +fn render_sync_placeholder(frame: &mut Frame<'_>, area: ftui::core::geometry::Rect) { + if area.width < 10 || area.height < 5 { + return; + } + + let max_x = area.right(); + let center_y = area.y + area.height / 2; + + let title = "Sync"; + let title_x = area.x + area.width.saturating_sub(title.len() as u16) / 2; + frame.print_text_clipped( + title_x, + center_y.saturating_sub(1), + title, + Cell { + fg: ACCENT, + ..Cell::default() + }, + max_x, + ); + + let body = "Run `lore sync` in another terminal."; + let body_x = area.x + area.width.saturating_sub(body.len() as u16) / 2; + frame.print_text_clipped( + body_x, + center_y + 1, + body, + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); +} + // --------------------------------------------------------------------------- // render_screen // --------------------------------------------------------------------------- @@ -91,7 +141,11 @@ pub fn render_screen(frame: &mut Frame<'_>, app: &LoreApp) { render_loading(frame, content_area, load_state, TEXT, TEXT_MUTED, 0); // Per-screen content dispatch (other screens wired in later phases). - if screen == &Screen::Dashboard { + if screen == &Screen::Bootstrap { + render_bootstrap(frame, &app.state.bootstrap, content_area); + } else if screen == &Screen::Sync { + render_sync_placeholder(frame, content_area); + } else if screen == &Screen::Dashboard { render_dashboard(frame, &app.state.dashboard, content_area); } else if screen == &Screen::IssueList { render_issue_list(frame, &app.state.issue_list, content_area); @@ -101,6 +155,16 @@ pub fn render_screen(frame: &mut Frame<'_>, app: &LoreApp) { render_issue_detail(frame, &app.state.issue_detail, content_area, &*app.clock); } else if matches!(screen, Screen::MrDetail(_)) { render_mr_detail(frame, &app.state.mr_detail, content_area, &*app.clock); + } else if screen == &Screen::Search { + render_search(frame, &app.state.search, content_area); + } else if screen == &Screen::Timeline { + render_timeline(frame, &app.state.timeline, content_area, &*app.clock); + } else if screen == &Screen::Who { + render_who(frame, &app.state.who, content_area); + } else if screen == &Screen::FileHistory { + render_file_history(frame, &app.state.file_history, content_area); + } else if screen == &Screen::Trace { + render_trace(frame, &app.state.trace, content_area); } // --- Status bar --- @@ -122,6 +186,9 @@ pub fn render_screen(frame: &mut Frame<'_>, app: &LoreApp) { render_error_toast(frame, bounds, error_msg, ERROR_BG, ERROR_FG); } + // Command palette overlay. + render_command_palette(frame, &app.state.command_palette, bounds); + // Help overlay. if app.state.show_help { render_help_overlay( @@ -199,4 +266,21 @@ mod tests { render_screen(&mut frame, &app); }); } + + #[test] + fn test_render_screen_sync_has_content() { + with_frame!(80, 24, |frame| { + let mut app = LoreApp::new(); + app.navigation.push(Screen::Sync); + render_screen(&mut frame, &app); + + let has_content = (20..60u16).any(|x| { + (8..16u16).any(|y| frame.buffer.get(x, y).is_some_and(|cell| !cell.is_empty())) + }); + assert!( + has_content, + "Expected sync placeholder content in center area" + ); + }); + } } diff --git a/crates/lore-tui/src/view/mr_detail.rs b/crates/lore-tui/src/view/mr_detail.rs index 7d6e425..64c8b0d 100644 --- a/crates/lore-tui/src/view/mr_detail.rs +++ b/crates/lore-tui/src/view/mr_detail.rs @@ -203,16 +203,20 @@ fn render_metadata_row( /// Render tab bar: `[Overview] [Files (3)] [Discussions (2)]`. fn render_tab_bar(frame: &mut Frame<'_>, state: &MrDetailState, x: u16, y: u16, max_x: u16) -> u16 { + // Use metadata counts before async data loads to avoid showing 0. + let disc_count = if state.discussions_loaded { + state.discussions.len() + } else { + state.metadata.as_ref().map_or(0, |m| m.discussion_count) + }; + let tabs = [ (MrTab::Overview, "Overview".to_string()), ( MrTab::Files, format!("Files ({})", state.file_changes.len()), ), - ( - MrTab::Discussions, - format!("Discussions ({})", state.discussions.len()), - ), + (MrTab::Discussions, format!("Discussions ({disc_count})")), ]; let mut cx = x; diff --git a/crates/lore-tui/src/view/search.rs b/crates/lore-tui/src/view/search.rs new file mode 100644 index 0000000..848c222 --- /dev/null +++ b/crates/lore-tui/src/view/search.rs @@ -0,0 +1,492 @@ +#![allow(dead_code)] // Phase 3: consumed by view/mod.rs screen dispatch + +//! Search screen view — query bar, mode indicator, and results list. +//! +//! Layout: +//! ```text +//! +--[ FTS ]--- Search ──────────────────────+ +//! | > query text here_ | +//! +───────────────────────────────────────────+ +//! | #42 Fix login bug group/proj | +//! | !99 Add retry logic group/proj | +//! | #10 Update docs other/repo | +//! +───────────────────────────────────────────+ +//! | Tab: mode /: focus j/k: nav Enter: go | +//! +───────────────────────────────────────────+ +//! ``` + +use ftui::core::geometry::Rect; +use ftui::render::cell::Cell; +use ftui::render::drawing::Draw; + +/// Count display-width columns for a string (char count, not byte count). +fn text_cell_width(text: &str) -> u16 { + text.chars().count().min(u16::MAX as usize) as u16 +} + +/// Convert a byte-offset cursor position to a display-column offset. +fn cursor_cell_offset(query: &str, cursor: usize) -> u16 { + let mut idx = cursor.min(query.len()); + while idx > 0 && !query.is_char_boundary(idx) { + idx -= 1; + } + text_cell_width(&query[..idx]) +} +use ftui::render::frame::Frame; + +use crate::message::EntityKind; +use crate::state::search::SearchState; + +use super::{ACCENT, BG_SURFACE, BORDER, TEXT, TEXT_MUTED}; + +// --------------------------------------------------------------------------- +// render_search +// --------------------------------------------------------------------------- + +/// Render the search screen. +/// +/// Composes: mode indicator + query bar (row 0), separator (row 1), +/// results list (fill), and a hint bar at the bottom. +pub fn render_search(frame: &mut Frame<'_>, state: &SearchState, area: Rect) { + if area.height < 4 || area.width < 20 { + return; + } + + let mut y = area.y; + let max_x = area.right(); + + // -- Mode indicator + query bar ------------------------------------------ + y = render_query_bar(frame, state, area.x, y, area.width, max_x); + + // -- Separator ----------------------------------------------------------- + if y >= area.bottom() { + return; + } + let sep_cell = Cell { + fg: BORDER, + bg: BG_SURFACE, + ..Cell::default() + }; + let sep_line = "─".repeat(area.width as usize); + frame.print_text_clipped(area.x, y, &sep_line, sep_cell, max_x); + y += 1; + + // -- No-index warning ---------------------------------------------------- + if !state.capabilities.has_any_index() { + if y >= area.bottom() { + return; + } + let warn_cell = Cell { + fg: ACCENT, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(area.x + 1, y, "No search indexes found.", warn_cell, max_x); + y += 1; + if y < area.bottom() { + let hint_cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped( + area.x + 1, + y, + "Run: lore generate-docs && lore embed", + hint_cell, + max_x, + ); + } + return; + } + + // -- Results list -------------------------------------------------------- + let bottom_hint_row = area.bottom().saturating_sub(1); + let list_bottom = bottom_hint_row; + let list_height = list_bottom.saturating_sub(y) as usize; + + if list_height == 0 { + return; + } + + if state.results.is_empty() { + render_empty_state(frame, state, area.x + 1, y, max_x); + } else { + render_result_list(frame, state, area.x, y, area.width, list_height); + } + + // -- Bottom hint bar ----------------------------------------------------- + if bottom_hint_row < area.bottom() { + render_hint_bar(frame, state, area.x, bottom_hint_row, max_x); + } +} + +// --------------------------------------------------------------------------- +// Query bar +// --------------------------------------------------------------------------- + +/// Render the mode badge and query input. Returns the next y position. +fn render_query_bar( + frame: &mut Frame<'_>, + state: &SearchState, + x: u16, + y: u16, + width: u16, + max_x: u16, +) -> u16 { + // Mode badge: [ FTS ] or [ Hybrid ] or [ Vec ] + let mode_label = format!("[ {} ]", state.mode.label()); + let mode_cell = Cell { + fg: ACCENT, + bg: BG_SURFACE, + ..Cell::default() + }; + let after_mode = frame.print_text_clipped(x, y, &mode_label, mode_cell, max_x); + + // Space separator. + let after_sep = frame.print_text_clipped( + after_mode, + y, + " ", + Cell { + bg: BG_SURFACE, + ..Cell::default() + }, + max_x, + ); + + // Prompt. + let prompt = "> "; + let prompt_cell = Cell { + fg: ACCENT, + bg: BG_SURFACE, + ..Cell::default() + }; + let after_prompt = frame.print_text_clipped(after_sep, y, prompt, prompt_cell, max_x); + + // Query text (or placeholder). + let (display_text, text_fg) = if state.query.is_empty() { + ("Type to search...", TEXT_MUTED) + } else { + (state.query.as_str(), TEXT) + }; + let text_cell = Cell { + fg: text_fg, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(after_prompt, y, display_text, text_cell, max_x); + + // Cursor (only when focused and has query text). + if state.query_focused && !state.query.is_empty() { + let cursor_x = after_prompt + cursor_cell_offset(&state.query, state.cursor); + if cursor_x < max_x { + let cursor_cell = Cell { + fg: BG_SURFACE, + bg: TEXT, + ..Cell::default() + }; + let cursor_char = state + .query + .get(state.cursor..) + .and_then(|s| s.chars().next()) + .unwrap_or(' '); + frame.print_text_clipped(cursor_x, y, &cursor_char.to_string(), cursor_cell, max_x); + } + } + + // Loading indicator (right-aligned). + if state.loading { + let loading_text = " searching... "; + let loading_x = (x + width).saturating_sub(loading_text.len() as u16); + let loading_cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(loading_x, y, loading_text, loading_cell, max_x); + } + + y + 1 +} + +// --------------------------------------------------------------------------- +// Empty state +// --------------------------------------------------------------------------- + +/// Show a message when there are no results. +fn render_empty_state(frame: &mut Frame<'_>, state: &SearchState, x: u16, y: u16, max_x: u16) { + let msg = if state.query.is_empty() { + "Enter a search query above" + } else { + "No results found" + }; + let cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(x, y, msg, cell, max_x); +} + +// --------------------------------------------------------------------------- +// Result list +// --------------------------------------------------------------------------- + +/// Render the scrollable list of search results. +fn render_result_list( + frame: &mut Frame<'_>, + state: &SearchState, + x: u16, + start_y: u16, + width: u16, + list_height: usize, +) { + let max_x = x + width; + + // Scroll so selected item is always visible. + let scroll_offset = if state.selected_index >= list_height { + state.selected_index - list_height + 1 + } else { + 0 + }; + + let normal = Cell { + fg: TEXT, + bg: BG_SURFACE, + ..Cell::default() + }; + let selected = Cell { + fg: BG_SURFACE, + bg: ACCENT, + ..Cell::default() + }; + let muted = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + let muted_selected = Cell { + fg: BG_SURFACE, + bg: ACCENT, + ..Cell::default() + }; + + for (i, result) in state + .results + .iter() + .skip(scroll_offset) + .enumerate() + .take(list_height) + { + let y = start_y + i as u16; + let is_selected = i + scroll_offset == state.selected_index; + + let (label_style, detail_style) = if is_selected { + (selected, muted_selected) + } else { + (normal, muted) + }; + + // Fill row background for selected item. + if is_selected { + for col in x..max_x { + frame.buffer.set(col, y, selected); + } + } + + // Entity prefix: # for issues, ! for MRs. + let prefix = match result.key.kind { + EntityKind::Issue => "#", + EntityKind::MergeRequest => "!", + }; + let iid_str = format!("{}{}", prefix, result.key.iid); + let after_iid = frame.print_text_clipped(x + 1, y, &iid_str, label_style, max_x); + + // Title. + let after_title = + frame.print_text_clipped(after_iid + 1, y, &result.title, label_style, max_x); + + // Project path (right-aligned). + let path_width = result.project_path.len() as u16 + 2; + let path_x = max_x.saturating_sub(path_width); + if path_x > after_title + 1 { + frame.print_text_clipped(path_x, y, &result.project_path, detail_style, max_x); + } + } + + // Scroll indicator (overlaid on last visible row when results overflow). + if state.results.len() > list_height && list_height > 0 { + let indicator = format!( + " {}/{} ", + (scroll_offset + list_height).min(state.results.len()), + state.results.len() + ); + let ind_x = max_x.saturating_sub(indicator.len() as u16); + let ind_y = start_y + list_height.saturating_sub(1) as u16; + let ind_cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(ind_x, ind_y, &indicator, ind_cell, max_x); + } +} + +// --------------------------------------------------------------------------- +// Hint bar +// --------------------------------------------------------------------------- + +/// Render keybinding hints at the bottom of the search screen. +fn render_hint_bar(frame: &mut Frame<'_>, state: &SearchState, x: u16, y: u16, max_x: u16) { + let hint_cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + + let hints = if state.query_focused { + "Tab: mode Esc: blur Enter: search" + } else { + "Tab: mode /: focus j/k: nav Enter: open" + }; + + frame.print_text_clipped(x + 1, y, hints, hint_cell, max_x); +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::message::{EntityKey, SearchResult}; + use crate::state::search::{SearchCapabilities, SearchState}; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + fn fts_caps() -> SearchCapabilities { + SearchCapabilities { + has_fts: true, + has_embeddings: false, + embedding_coverage_pct: 0.0, + } + } + + fn sample_results(count: usize) -> Vec { + (0..count) + .map(|i| SearchResult { + key: EntityKey::issue(1, (i + 1) as i64), + title: format!("Result {}", i + 1), + score: 1.0 - (i as f64 * 0.1), + snippet: "matched text".into(), + project_path: "group/project".into(), + }) + .collect() + } + + #[test] + fn test_render_search_empty_no_panic() { + with_frame!(80, 24, |frame| { + let state = SearchState::default(); + render_search(&mut frame, &state, Rect::new(0, 0, 80, 24)); + }); + } + + #[test] + fn test_render_search_with_capabilities_no_panic() { + with_frame!(80, 24, |frame| { + let mut state = SearchState::default(); + state.enter(fts_caps()); + render_search(&mut frame, &state, Rect::new(0, 0, 80, 24)); + }); + } + + #[test] + fn test_render_search_with_results_no_panic() { + with_frame!(100, 30, |frame| { + let mut state = SearchState::default(); + state.enter(fts_caps()); + state.results = sample_results(5); + render_search(&mut frame, &state, Rect::new(0, 0, 100, 30)); + }); + } + + #[test] + fn test_render_search_with_query_no_panic() { + with_frame!(80, 24, |frame| { + let mut state = SearchState::default(); + state.enter(fts_caps()); + state.insert_char('h'); + state.insert_char('e'); + state.insert_char('l'); + state.insert_char('l'); + state.insert_char('o'); + state.results = sample_results(3); + render_search(&mut frame, &state, Rect::new(0, 0, 80, 24)); + }); + } + + #[test] + fn test_render_search_with_selection_no_panic() { + with_frame!(80, 24, |frame| { + let mut state = SearchState::default(); + state.enter(fts_caps()); + state.results = sample_results(10); + state.select_next(); + state.select_next(); + render_search(&mut frame, &state, Rect::new(0, 0, 80, 24)); + }); + } + + #[test] + fn test_render_search_tiny_terminal_noop() { + with_frame!(15, 3, |frame| { + let mut state = SearchState::default(); + state.enter(fts_caps()); + render_search(&mut frame, &state, Rect::new(0, 0, 15, 3)); + }); + } + + #[test] + fn test_render_search_no_indexes_warning() { + with_frame!(80, 24, |frame| { + let state = SearchState::default(); + // capabilities are default (no indexes) + render_search(&mut frame, &state, Rect::new(0, 0, 80, 24)); + // Should show "No search indexes found" without panicking. + }); + } + + #[test] + fn test_render_search_loading_indicator() { + with_frame!(80, 24, |frame| { + let mut state = SearchState::default(); + state.enter(fts_caps()); + state.loading = true; + render_search(&mut frame, &state, Rect::new(0, 0, 80, 24)); + }); + } + + #[test] + fn test_render_search_scrollable_results() { + with_frame!(80, 10, |frame| { + let mut state = SearchState::default(); + state.enter(fts_caps()); + state.results = sample_results(20); + // Select item near the bottom to trigger scrolling. + for _ in 0..15 { + state.select_next(); + } + render_search(&mut frame, &state, Rect::new(0, 0, 80, 10)); + }); + } +} diff --git a/crates/lore-tui/src/view/timeline.rs b/crates/lore-tui/src/view/timeline.rs new file mode 100644 index 0000000..5ec0a5c --- /dev/null +++ b/crates/lore-tui/src/view/timeline.rs @@ -0,0 +1,449 @@ +#![allow(dead_code)] // Phase 3: consumed by view/mod.rs screen dispatch + +//! Timeline screen view — chronological event stream with color-coded types. +//! +//! Layout: +//! ```text +//! +─── Timeline ──────────────────────────────+ +//! | 3h ago #42 Created: Fix login bug | +//! | 2h ago #42 State changed to closed | +//! | 1h ago !99 Label added: backend | +//! | 30m ago !99 Merged | +//! +───────────────────────────────────────────+ +//! | j/k: nav Enter: open q: back | +//! +───────────────────────────────────────────+ +//! ``` + +use ftui::core::geometry::Rect; +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::Draw; +use ftui::render::frame::Frame; + +use crate::clock::Clock; +use crate::message::TimelineEventKind; +use crate::state::timeline::TimelineState; +use crate::view::common::discussion_tree::format_relative_time; + +use super::{ACCENT, BG_SURFACE, BORDER, TEXT, TEXT_MUTED}; + +// --------------------------------------------------------------------------- +// Colors for event kinds (Flexoki palette) +// --------------------------------------------------------------------------- + +const GREEN: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39); // Created +const YELLOW: PackedRgba = PackedRgba::rgb(0xD0, 0xA2, 0x15); // StateChanged +const RED: PackedRgba = PackedRgba::rgb(0xAF, 0x3A, 0x29); // Closed (via StateChanged) +const PURPLE: PackedRgba = PackedRgba::rgb(0x8B, 0x7E, 0xC8); // Merged +const CYAN: PackedRgba = PackedRgba::rgb(0x3A, 0xA9, 0x9F); // Label +const SELECTED_FG: PackedRgba = PackedRgba::rgb(0x10, 0x0F, 0x0F); // bg (dark) + +/// Map event kind to its display color. +fn event_color(kind: TimelineEventKind, detail: Option<&str>) -> PackedRgba { + match kind { + TimelineEventKind::Created => GREEN, + TimelineEventKind::StateChanged => { + if detail == Some("closed") { + RED + } else { + YELLOW + } + } + TimelineEventKind::LabelAdded | TimelineEventKind::LabelRemoved => CYAN, + TimelineEventKind::MilestoneSet | TimelineEventKind::MilestoneRemoved => ACCENT, + TimelineEventKind::Merged => PURPLE, + } +} + +// --------------------------------------------------------------------------- +// render_timeline +// --------------------------------------------------------------------------- + +/// Render the timeline screen. +/// +/// Composes: scope header (row 0), separator (row 1), +/// event list (fill), and a hint bar at the bottom. +pub fn render_timeline( + frame: &mut Frame<'_>, + state: &TimelineState, + area: Rect, + clock: &dyn Clock, +) { + if area.height < 4 || area.width < 20 { + return; + } + + let mut y = area.y; + let max_x = area.right(); + + // -- Scope header -- + let scope_label = match &state.scope { + crate::state::timeline::TimelineScope::All => "All events".to_string(), + crate::state::timeline::TimelineScope::Entity(key) => { + let sigil = match key.kind { + crate::message::EntityKind::Issue => "#", + crate::message::EntityKind::MergeRequest => "!", + }; + format!("Entity {sigil}{}", key.iid) + } + crate::state::timeline::TimelineScope::Author(name) => format!("Author: {name}"), + }; + + let header = format!("Timeline: {scope_label}"); + let header_cell = Cell { + fg: ACCENT, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(area.x, y, &header, header_cell, max_x); + y += 1; + + // -- Separator -- + if y >= area.bottom() { + return; + } + let sep_cell = Cell { + fg: BORDER, + bg: BG_SURFACE, + ..Cell::default() + }; + let sep_line = "─".repeat(area.width as usize); + frame.print_text_clipped(area.x, y, &sep_line, sep_cell, max_x); + y += 1; + + // -- Event list -- + let bottom_hint_row = area.bottom().saturating_sub(1); + let list_height = bottom_hint_row.saturating_sub(y) as usize; + + if list_height == 0 { + return; + } + + if state.events.is_empty() { + render_empty_state(frame, state, area.x + 1, y, max_x); + } else { + render_event_list(frame, state, area.x, y, area.width, list_height, clock); + } + + // -- Hint bar -- + if bottom_hint_row < area.bottom() { + render_hint_bar(frame, area.x, bottom_hint_row, max_x); + } +} + +// --------------------------------------------------------------------------- +// Empty state +// --------------------------------------------------------------------------- + +fn render_empty_state(frame: &mut Frame<'_>, state: &TimelineState, x: u16, y: u16, max_x: u16) { + let msg = if state.loading { + "Loading timeline..." + } else { + "No timeline events found" + }; + let cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(x, y, msg, cell, max_x); +} + +// --------------------------------------------------------------------------- +// Event list +// --------------------------------------------------------------------------- + +/// Render the scrollable list of timeline events. +fn render_event_list( + frame: &mut Frame<'_>, + state: &TimelineState, + x: u16, + start_y: u16, + width: u16, + list_height: usize, + clock: &dyn Clock, +) { + let max_x = x + width; + + // Scroll so selected item is always visible. + let scroll_offset = if state.selected_index >= list_height { + state.selected_index - list_height + 1 + } else { + 0 + }; + + let selected_cell = Cell { + fg: SELECTED_FG, + bg: ACCENT, + ..Cell::default() + }; + + for (i, event) in state + .events + .iter() + .skip(scroll_offset) + .enumerate() + .take(list_height) + { + let y = start_y + i as u16; + let is_selected = i + scroll_offset == state.selected_index; + + let kind_color = event_color(event.event_kind, event.detail.as_deref()); + + // Fill row background for selected item. + if is_selected { + for col in x..max_x { + frame.buffer.set(col, y, selected_cell); + } + } + + let mut cx = x + 1; + + // Timestamp gutter (right-aligned in ~10 chars). + let time_str = format_relative_time(event.timestamp_ms, clock); + let time_width = 10u16; + let time_x = cx + time_width.saturating_sub(time_str.len() as u16); + let time_cell = if is_selected { + selected_cell + } else { + Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + } + }; + frame.print_text_clipped(time_x, y, &time_str, time_cell, cx + time_width); + cx += time_width + 1; + + // Entity prefix: #42 or !99 + let prefix = match event.entity_key.kind { + crate::message::EntityKind::Issue => "#", + crate::message::EntityKind::MergeRequest => "!", + }; + let entity_str = format!("{prefix}{}", event.entity_key.iid); + let entity_cell = if is_selected { + selected_cell + } else { + Cell { + fg: kind_color, + bg: BG_SURFACE, + ..Cell::default() + } + }; + let after_entity = frame.print_text_clipped(cx, y, &entity_str, entity_cell, max_x); + cx = after_entity + 1; + + // Event kind badge. + let badge = event.event_kind.label(); + let badge_cell = if is_selected { + selected_cell + } else { + Cell { + fg: kind_color, + bg: BG_SURFACE, + ..Cell::default() + } + }; + let after_badge = frame.print_text_clipped(cx, y, badge, badge_cell, max_x); + cx = after_badge + 1; + + // Summary text. + let summary_cell = if is_selected { + selected_cell + } else { + Cell { + fg: TEXT, + bg: BG_SURFACE, + ..Cell::default() + } + }; + frame.print_text_clipped(cx, y, &event.summary, summary_cell, max_x); + + // Actor (right-aligned) if there's room. + if let Some(ref actor) = event.actor { + let actor_str = format!(" {actor} "); + let actor_width = actor_str.len() as u16; + let actor_x = max_x.saturating_sub(actor_width); + if actor_x > cx + 5 { + let actor_cell = if is_selected { + selected_cell + } else { + Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + } + }; + frame.print_text_clipped(actor_x, y, &actor_str, actor_cell, max_x); + } + } + } + + // Scroll indicator (overlaid on last visible row when events overflow). + if state.events.len() > list_height && list_height > 0 { + let indicator = format!( + " {}/{} ", + (scroll_offset + list_height).min(state.events.len()), + state.events.len() + ); + let ind_x = max_x.saturating_sub(indicator.len() as u16); + let ind_y = start_y + list_height.saturating_sub(1) as u16; + let ind_cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(ind_x, ind_y, &indicator, ind_cell, max_x); + } +} + +// --------------------------------------------------------------------------- +// Hint bar +// --------------------------------------------------------------------------- + +fn render_hint_bar(frame: &mut Frame<'_>, x: u16, y: u16, max_x: u16) { + let hint_cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + let hints = "j/k: nav Enter: open q: back"; + frame.print_text_clipped(x + 1, y, hints, hint_cell, max_x); +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::clock::FakeClock; + use crate::message::{EntityKey, TimelineEvent, TimelineEventKind}; + use crate::state::timeline::TimelineState; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + fn sample_event(timestamp_ms: i64, iid: i64, kind: TimelineEventKind) -> TimelineEvent { + TimelineEvent { + timestamp_ms, + entity_key: EntityKey::issue(1, iid), + event_kind: kind, + summary: format!("Event for #{iid}"), + detail: None, + actor: Some("alice".into()), + project_path: "group/project".into(), + } + } + + fn test_clock() -> FakeClock { + FakeClock::from_ms(1_700_000_100_000) + } + + #[test] + fn test_render_timeline_empty_no_panic() { + with_frame!(80, 24, |frame| { + let state = TimelineState::default(); + let clock = test_clock(); + render_timeline(&mut frame, &state, Rect::new(0, 0, 80, 24), &clock); + }); + } + + #[test] + fn test_render_timeline_with_events_no_panic() { + with_frame!(100, 30, |frame| { + let state = TimelineState { + events: vec![ + sample_event(1_700_000_000_000, 1, TimelineEventKind::Created), + sample_event(1_700_000_050_000, 2, TimelineEventKind::StateChanged), + sample_event(1_700_000_080_000, 3, TimelineEventKind::Merged), + ], + ..TimelineState::default() + }; + let clock = test_clock(); + render_timeline(&mut frame, &state, Rect::new(0, 0, 100, 30), &clock); + }); + } + + #[test] + fn test_render_timeline_with_selection_no_panic() { + with_frame!(80, 24, |frame| { + let state = TimelineState { + events: vec![ + sample_event(1_700_000_000_000, 1, TimelineEventKind::Created), + sample_event(1_700_000_050_000, 2, TimelineEventKind::LabelAdded), + ], + selected_index: 1, + ..TimelineState::default() + }; + let clock = test_clock(); + render_timeline(&mut frame, &state, Rect::new(0, 0, 80, 24), &clock); + }); + } + + #[test] + fn test_render_timeline_tiny_terminal_noop() { + with_frame!(15, 3, |frame| { + let state = TimelineState::default(); + let clock = test_clock(); + render_timeline(&mut frame, &state, Rect::new(0, 0, 15, 3), &clock); + }); + } + + #[test] + fn test_render_timeline_loading_state() { + with_frame!(80, 24, |frame| { + let state = TimelineState { + loading: true, + ..TimelineState::default() + }; + let clock = test_clock(); + render_timeline(&mut frame, &state, Rect::new(0, 0, 80, 24), &clock); + }); + } + + #[test] + fn test_render_timeline_scrollable_events() { + with_frame!(80, 10, |frame| { + let state = TimelineState { + events: (0..20) + .map(|i| { + sample_event( + 1_700_000_000_000 + i * 10_000, + i + 1, + TimelineEventKind::Created, + ) + }) + .collect(), + selected_index: 15, + ..TimelineState::default() + }; + let clock = test_clock(); + render_timeline(&mut frame, &state, Rect::new(0, 0, 80, 10), &clock); + }); + } + + #[test] + fn test_event_color_created_is_green() { + assert_eq!(event_color(TimelineEventKind::Created, None), GREEN); + } + + #[test] + fn test_event_color_closed_is_red() { + assert_eq!( + event_color(TimelineEventKind::StateChanged, Some("closed")), + RED + ); + } + + #[test] + fn test_event_color_merged_is_purple() { + assert_eq!(event_color(TimelineEventKind::Merged, None), PURPLE); + } +} diff --git a/crates/lore-tui/src/view/trace.rs b/crates/lore-tui/src/view/trace.rs new file mode 100644 index 0000000..ba82820 --- /dev/null +++ b/crates/lore-tui/src/view/trace.rs @@ -0,0 +1,627 @@ +#![allow(dead_code)] + +//! Trace view — file → MR → issue chain drill-down. +//! +//! Layout: +//! ```text +//! +-----------------------------------+ +//! | Path: [src/main.rs_] [R] [D] | <- path input + toggles +//! | Renames: old.rs -> new.rs | <- shown when renames followed +//! | 3 trace chains | <- summary +//! +-----------------------------------+ +//! | > M !42 Fix auth @alice modified | <- collapsed chain (selected) +//! | O !39 Refactor @bob renamed | <- collapsed chain +//! | M !35 Init @carol added | <- expanded chain header +//! | #12 Bug: login broken (close) | <- linked issue +//! | @dave: "This path needs..." | <- discussion snippet +//! +-----------------------------------+ +//! | Enter:expand r:renames d:disc | <- hint bar +//! +-----------------------------------+ +//! ``` + +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::Draw; +use ftui::render::frame::Frame; + +use crate::state::trace::TraceState; +use lore::core::trace::TraceResult; + +// --------------------------------------------------------------------------- +// Colors (Flexoki palette) +// --------------------------------------------------------------------------- + +const TEXT: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3); // tx +const TEXT_MUTED: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2 +const BG_SURFACE: PackedRgba = PackedRgba::rgb(0x28, 0x28, 0x24); // bg-2 +const ACCENT: PackedRgba = PackedRgba::rgb(0xDA, 0x70, 0x2C); // orange +const GREEN: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39); // green +const CYAN: PackedRgba = PackedRgba::rgb(0x3A, 0xA9, 0x9F); // cyan +const YELLOW: PackedRgba = PackedRgba::rgb(0xD0, 0xA2, 0x15); // yellow +const RED: PackedRgba = PackedRgba::rgb(0xAF, 0x3A, 0x29); // red +const PURPLE: PackedRgba = PackedRgba::rgb(0x8B, 0x7E, 0xC8); // purple +const SELECTION_BG: PackedRgba = PackedRgba::rgb(0x34, 0x34, 0x31); // bg-3 + +// --------------------------------------------------------------------------- +// Public entry point +// --------------------------------------------------------------------------- + +/// Render the Trace screen. +pub fn render_trace(frame: &mut Frame<'_>, state: &TraceState, area: ftui::core::geometry::Rect) { + if area.width < 10 || area.height < 3 { + return; + } + + let x = area.x; + let max_x = area.right(); + let width = area.width; + let mut y = area.y; + + // --- Path input --- + render_path_input(frame, state, x, y, width); + y += 1; + + if area.height < 5 { + return; + } + + // --- Toggle indicators --- + render_toggle_indicators(frame, state, x, y, width); + y += 1; + + // --- Loading --- + if state.loading { + render_loading(frame, x, y, max_x); + return; + } + + let Some(result) = &state.result else { + render_empty_state(frame, x, y, max_x); + return; + }; + + // --- Rename chain --- + if result.renames_followed && result.resolved_paths.len() > 1 { + render_rename_chain(frame, &result.resolved_paths, x, y, max_x); + y += 1; + } + + // --- Summary --- + render_summary(frame, result, x, y, max_x); + y += 1; + + if result.trace_chains.is_empty() { + render_no_results(frame, x, y, max_x); + return; + } + + // Reserve hint bar. + let hint_y = area.bottom().saturating_sub(1); + let list_height = hint_y.saturating_sub(y) as usize; + + if list_height == 0 { + return; + } + + // --- Chain list --- + render_chain_list(frame, result, state, x, y, width, list_height); + + // --- Hint bar --- + render_hint_bar(frame, x, hint_y, max_x); +} + +// --------------------------------------------------------------------------- +// Components +// --------------------------------------------------------------------------- + +fn render_path_input(frame: &mut Frame<'_>, state: &TraceState, x: u16, y: u16, width: u16) { + let max_x = x + width; + let label = "Path: "; + let label_style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + let after_label = frame.print_text_clipped(x, y, label, label_style, max_x); + + let input_style = Cell { + fg: if state.path_focused { TEXT } else { TEXT_MUTED }, + ..Cell::default() + }; + let display_text = if state.path_input.is_empty() && !state.path_focused { + "type a file path..." + } else { + &state.path_input + }; + frame.print_text_clipped(after_label, y, display_text, input_style, max_x); + + // Cursor. + if state.path_focused { + let cursor_x = after_label + state.path_cursor as u16; + if cursor_x < max_x { + let cursor_cell = Cell { + fg: PackedRgba::rgb(0x10, 0x0F, 0x0F), + bg: TEXT, + ..Cell::default() + }; + let ch = state + .path_input + .chars() + .nth(state.path_cursor) + .unwrap_or(' '); + frame.print_text_clipped(cursor_x, y, &ch.to_string(), cursor_cell, max_x); + } + } +} + +fn render_toggle_indicators(frame: &mut Frame<'_>, state: &TraceState, x: u16, y: u16, width: u16) { + let max_x = x + width; + + let on_style = Cell { + fg: GREEN, + ..Cell::default() + }; + let off_style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + + let renames_tag = if state.follow_renames { + "[renames:on]" + } else { + "[renames:off]" + }; + let disc_tag = if state.include_discussions { + "[disc:on]" + } else { + "[disc:off]" + }; + + let renames_style = if state.follow_renames { + on_style + } else { + off_style + }; + let disc_style = if state.include_discussions { + on_style + } else { + off_style + }; + + let after_r = frame.print_text_clipped(x + 1, y, renames_tag, renames_style, max_x); + frame.print_text_clipped(after_r + 1, y, disc_tag, disc_style, max_x); +} + +fn render_rename_chain(frame: &mut Frame<'_>, paths: &[String], x: u16, y: u16, max_x: u16) { + let label_style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + let chain_style = Cell { + fg: CYAN, + ..Cell::default() + }; + + let after_label = frame.print_text_clipped(x + 1, y, "Renames: ", label_style, max_x); + + // For long chains, show first 2 + "..." + last. + let chain_str = if paths.len() > 5 { + let first_two = paths[..2].join(" -> "); + let last = &paths[paths.len() - 1]; + format!("{first_two} -> ... ({} more) -> {last}", paths.len() - 3) + } else { + paths.join(" -> ") + }; + frame.print_text_clipped(after_label, y, &chain_str, chain_style, max_x); +} + +fn render_summary(frame: &mut Frame<'_>, result: &TraceResult, x: u16, y: u16, max_x: u16) { + let summary = format!( + "{} trace chain{}", + result.total_chains, + if result.total_chains == 1 { "" } else { "s" }, + ); + + let style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + frame.print_text_clipped(x + 1, y, &summary, style, max_x); +} + +fn render_chain_list( + frame: &mut Frame<'_>, + result: &TraceResult, + state: &TraceState, + x: u16, + start_y: u16, + width: u16, + height: usize, +) { + let max_x = x + width; + let mut row = 0; + + for (chain_idx, chain) in result.trace_chains.iter().enumerate() { + if row >= height { + break; + } + + let y = start_y + row as u16; + let selected = chain_idx == state.selected_chain_index; + let expanded = state.expanded_chains.contains(&chain_idx); + + // Selection background. + if selected { + let bg_cell = Cell { + bg: SELECTION_BG, + ..Cell::default() + }; + for col in x..max_x { + frame.buffer.set(col, y, bg_cell); + } + } + + let sel_bg = if selected { SELECTION_BG } else { BG_SURFACE }; + + // Expand indicator. + let expand_icon = if expanded { "v " } else { "> " }; + let prefix = if selected { expand_icon } else { " " }; + let prefix_style = Cell { + fg: ACCENT, + bg: sel_bg, + ..Cell::default() + }; + let after_prefix = frame.print_text_clipped(x, y, prefix, prefix_style, max_x); + + // State icon. + let (icon, icon_color) = match chain.mr_state.as_str() { + "merged" => ("M", PURPLE), + "opened" => ("O", GREEN), + "closed" => ("C", RED), + _ => ("?", TEXT_MUTED), + }; + let icon_style = Cell { + fg: icon_color, + bg: sel_bg, + ..Cell::default() + }; + let after_icon = frame.print_text_clipped(after_prefix, y, icon, icon_style, max_x); + + // !iid + let iid_str = format!(" !{}", chain.mr_iid); + let ref_style = Cell { + fg: ACCENT, + bg: sel_bg, + ..Cell::default() + }; + let after_iid = frame.print_text_clipped(after_icon, y, &iid_str, ref_style, max_x); + + // Title. + let title = truncate_str(&chain.mr_title, 30); + let title_style = Cell { + fg: TEXT, + bg: sel_bg, + ..Cell::default() + }; + let after_title = frame.print_text_clipped(after_iid + 1, y, &title, title_style, max_x); + + // @author + change_type + let meta = format!( + "@{} {}", + truncate_str(&chain.mr_author, 12), + chain.change_type + ); + let meta_style = Cell { + fg: TEXT_MUTED, + bg: sel_bg, + ..Cell::default() + }; + frame.print_text_clipped(after_title + 1, y, &meta, meta_style, max_x); + + row += 1; + + // Expanded content: linked issues + discussions. + if expanded { + // Issues. + for issue in &chain.issues { + if row >= height { + break; + } + let iy = start_y + row as u16; + + let issue_icon = match issue.state.as_str() { + "opened" => "O", + "closed" => "C", + _ => "?", + }; + let issue_icon_color = match issue.state.as_str() { + "opened" => GREEN, + "closed" => RED, + _ => TEXT_MUTED, + }; + + let indent_style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + let after_indent = frame.print_text_clipped( + x + 4, + iy, + issue_icon, + Cell { + fg: issue_icon_color, + ..Cell::default() + }, + max_x, + ); + + let issue_ref = format!(" #{} ", issue.iid); + let issue_ref_style = Cell { + fg: YELLOW, + ..Cell::default() + }; + let after_ref = + frame.print_text_clipped(after_indent, iy, &issue_ref, issue_ref_style, max_x); + + let issue_title = truncate_str(&issue.title, 40); + let _ = indent_style; // suppress unused + frame.print_text_clipped( + after_ref, + iy, + &issue_title, + Cell { + fg: TEXT, + ..Cell::default() + }, + max_x, + ); + + row += 1; + } + + // Discussions. + for disc in &chain.discussions { + if row >= height { + break; + } + let dy = start_y + row as u16; + + let author = format!("@{}: ", truncate_str(&disc.author_username, 12)); + let author_style = Cell { + fg: CYAN, + ..Cell::default() + }; + let after_author = + frame.print_text_clipped(x + 4, dy, &author, author_style, max_x); + + let snippet = truncate_str(&disc.body, 60); + let snippet_style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + frame.print_text_clipped(after_author, dy, &snippet, snippet_style, max_x); + + row += 1; + } + } + } +} + +fn render_loading(frame: &mut Frame<'_>, x: u16, y: u16, max_x: u16) { + let style = Cell { + fg: ACCENT, + ..Cell::default() + }; + frame.print_text_clipped(x + 1, y, "Tracing file provenance...", style, max_x); +} + +fn render_empty_state(frame: &mut Frame<'_>, x: u16, y: u16, max_x: u16) { + let style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + frame.print_text_clipped( + x + 1, + y, + "Enter a file path and press Enter to trace.", + style, + max_x, + ); +} + +fn render_no_results(frame: &mut Frame<'_>, x: u16, y: u16, max_x: u16) { + let style = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + frame.print_text_clipped(x + 1, y, "No trace chains found.", style, max_x); + frame.print_text_clipped( + x + 1, + y + 1, + "Hint: Run 'lore sync' to fetch MR file changes.", + style, + max_x, + ); +} + +fn render_hint_bar(frame: &mut Frame<'_>, x: u16, y: u16, max_x: u16) { + let style = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + + for col in x..max_x { + frame.buffer.set(col, y, style); + } + + let hints = "/:path Enter:expand r:renames d:discussions q:back"; + frame.print_text_clipped(x + 1, y, hints, style, max_x); +} + +/// Truncate a string to at most `max_chars` display characters. +fn truncate_str(s: &str, max_chars: usize) -> String { + if s.chars().count() <= max_chars { + s.to_string() + } else { + let truncated: String = s.chars().take(max_chars.saturating_sub(1)).collect(); + format!("{truncated}…") + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use std::collections::HashSet; + + use super::*; + use crate::state::trace::TraceState; + use ftui::render::grapheme_pool::GraphemePool; + use lore::core::trace::{TraceChain, TraceResult}; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + fn test_area(w: u16, h: u16) -> ftui::core::geometry::Rect { + ftui::core::geometry::Rect { + x: 0, + y: 0, + width: w, + height: h, + } + } + + fn sample_chain(iid: i64, title: &str, state: &str) -> TraceChain { + TraceChain { + mr_iid: iid, + mr_title: title.into(), + mr_state: state.into(), + mr_author: "alice".into(), + change_type: "modified".into(), + merged_at_iso: None, + updated_at_iso: "2024-01-01".into(), + web_url: None, + issues: vec![], + discussions: vec![], + } + } + + #[test] + fn test_render_empty_no_panic() { + with_frame!(80, 24, |frame| { + let state = TraceState::default(); + render_trace(&mut frame, &state, test_area(80, 24)); + }); + } + + #[test] + fn test_render_tiny_terminal_noop() { + with_frame!(5, 2, |frame| { + let state = TraceState::default(); + render_trace(&mut frame, &state, test_area(5, 2)); + }); + } + + #[test] + fn test_render_loading() { + with_frame!(80, 24, |frame| { + let state = TraceState { + loading: true, + ..TraceState::default() + }; + render_trace(&mut frame, &state, test_area(80, 24)); + }); + } + + #[test] + fn test_render_with_chains() { + with_frame!(100, 30, |frame| { + let state = TraceState { + result: Some(TraceResult { + path: "src/main.rs".into(), + resolved_paths: vec!["src/main.rs".into()], + renames_followed: false, + trace_chains: vec![ + sample_chain(42, "Fix auth flow", "merged"), + sample_chain(39, "Refactor modules", "opened"), + ], + total_chains: 2, + }), + ..TraceState::default() + }; + render_trace(&mut frame, &state, test_area(100, 30)); + }); + } + + #[test] + fn test_render_expanded_chain() { + with_frame!(100, 30, |frame| { + let state = TraceState { + expanded_chains: HashSet::from([0]), + result: Some(TraceResult { + path: "src/main.rs".into(), + resolved_paths: vec!["src/main.rs".into()], + renames_followed: false, + trace_chains: vec![TraceChain { + mr_iid: 42, + mr_title: "Fix auth".into(), + mr_state: "merged".into(), + mr_author: "alice".into(), + change_type: "modified".into(), + merged_at_iso: None, + updated_at_iso: "2024-01-01".into(), + web_url: None, + issues: vec![lore::core::trace::TraceIssue { + iid: 12, + title: "Login broken".into(), + state: "closed".into(), + reference_type: "closes".into(), + web_url: None, + }], + discussions: vec![lore::core::trace::TraceDiscussion { + discussion_id: "abc".into(), + mr_iid: 42, + author_username: "bob".into(), + body: "This path needs review".into(), + path: "src/main.rs".into(), + created_at_iso: "2024-01-01".into(), + }], + }], + total_chains: 1, + }), + ..TraceState::default() + }; + render_trace(&mut frame, &state, test_area(100, 30)); + }); + } + + #[test] + fn test_render_with_rename_chain() { + with_frame!(80, 24, |frame| { + let state = TraceState { + result: Some(TraceResult { + path: "src/old.rs".into(), + resolved_paths: vec!["src/old.rs".into(), "src/new.rs".into()], + renames_followed: true, + trace_chains: vec![], + total_chains: 0, + }), + ..TraceState::default() + }; + render_trace(&mut frame, &state, test_area(80, 24)); + }); + } + + #[test] + fn test_truncate_str() { + assert_eq!(truncate_str("hello", 10), "hello"); + assert_eq!(truncate_str("hello world", 5), "hell…"); + assert_eq!(truncate_str("", 5), ""); + } +} diff --git a/crates/lore-tui/src/view/who.rs b/crates/lore-tui/src/view/who.rs new file mode 100644 index 0000000..9eaad01 --- /dev/null +++ b/crates/lore-tui/src/view/who.rs @@ -0,0 +1,1049 @@ +#![allow(dead_code)] // Phase 3: consumed by view/mod.rs screen dispatch + +//! Who (people intelligence) screen view. +//! +//! Layout: +//! ```text +//! +--[ Expert ]--[ Workload ]--[ Reviews ]--[ Active ]--[ Overlap ]--+ +//! | > path or username input_ | +//! +──────────────────────────────────────────────────────────────────+ +//! | alice score: 142 3A 2R group/project | +//! | bob score: 89 1A 4R group/project | +//! +──────────────────────────────────────────────────────────────────+ +//! | 1-5: mode /: focus c: closed j/k: nav Enter: drill-down | +//! +──────────────────────────────────────────────────────────────────+ +//! ``` + +use ftui::core::geometry::Rect; +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::Draw; +use ftui::render::frame::Frame; + +use lore::core::who_types::{ + ActiveResult, ExpertResult, OverlapResult, ReviewsResult, WhoResult, WorkloadResult, +}; + +use crate::state::who::{WhoMode, WhoState}; + +use super::{ACCENT, BG_SURFACE, BORDER, TEXT, TEXT_MUTED}; + +/// Muted accent for inactive mode tabs. +const TAB_INACTIVE: PackedRgba = PackedRgba::rgb(0x6F, 0x6E, 0x69); // ui-3 + +/// Green for bar chart / positive indicators. +const GREEN: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39); // green + +/// Cyan for secondary highlights. +const CYAN: PackedRgba = PackedRgba::rgb(0x3A, 0xA9, 0x9F); // cyan + +// --------------------------------------------------------------------------- +// render_who (top-level) +// --------------------------------------------------------------------------- + +/// Render the who screen. +pub fn render_who(frame: &mut Frame<'_>, state: &WhoState, area: Rect) { + if area.height < 5 || area.width < 30 { + return; + } + + let mut y = area.y; + let max_x = area.right(); + + // -- Mode tabs ----------------------------------------------------------- + y = render_mode_tabs(frame, state.mode, area.x, y, area.width, max_x); + + // -- Input bar ----------------------------------------------------------- + if state.mode.needs_path() || state.mode.needs_username() { + y = render_input_bar(frame, state, area.x, y, area.width, max_x); + } + + // -- Separator ----------------------------------------------------------- + if y >= area.bottom() { + return; + } + let sep_cell = Cell { + fg: BORDER, + bg: BG_SURFACE, + ..Cell::default() + }; + let sep_line = "\u{2500}".repeat(area.width as usize); + frame.print_text_clipped(area.x, y, &sep_line, sep_cell, max_x); + y += 1; + + // -- Content area -------------------------------------------------------- + let bottom_hint_row = area.bottom().saturating_sub(1); + let content_height = bottom_hint_row.saturating_sub(y) as usize; + + if content_height == 0 { + return; + } + + if state.loading { + render_loading_indicator(frame, area.x + 1, y, max_x); + } else if let Some(ref result) = state.result { + render_result(frame, result, state, area.x, y, area.width, content_height); + } else { + render_empty_state(frame, state, area.x + 1, y, max_x); + } + + // -- Include-closed indicator -------------------------------------------- + if state.include_closed && y < area.bottom() { + let indicator = " [closed: on] "; + let ind_x = max_x.saturating_sub(indicator.len() as u16); + let ind_cell = Cell { + fg: CYAN, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(ind_x, area.y, indicator, ind_cell, max_x); + } + + // -- Bottom hint bar ----------------------------------------------------- + if bottom_hint_row < area.bottom() { + render_hint_bar(frame, state, area.x, bottom_hint_row, max_x); + } +} + +// --------------------------------------------------------------------------- +// Mode tabs +// --------------------------------------------------------------------------- + +fn render_mode_tabs( + frame: &mut Frame<'_>, + current: WhoMode, + x: u16, + y: u16, + _width: u16, + max_x: u16, +) -> u16 { + let mut cursor_x = x; + + for mode in WhoMode::ALL { + let is_active = mode == current; + let label = if is_active { + format!("[ {} ]", mode.label()) + } else { + format!(" {} ", mode.label()) + }; + + let cell = Cell { + fg: if is_active { ACCENT } else { TAB_INACTIVE }, + bg: BG_SURFACE, + ..Cell::default() + }; + + cursor_x = frame.print_text_clipped(cursor_x, y, &label, cell, max_x); + + if cursor_x >= max_x { + break; + } + } + + y + 1 +} + +// --------------------------------------------------------------------------- +// Input bar +// --------------------------------------------------------------------------- + +fn render_input_bar( + frame: &mut Frame<'_>, + state: &WhoState, + x: u16, + y: u16, + _width: u16, + max_x: u16, +) -> u16 { + let prompt_cell = Cell { + fg: ACCENT, + bg: BG_SURFACE, + ..Cell::default() + }; + + let (prompt, text, placeholder, focused) = if state.mode.needs_path() { + ( + "path> ", + &state.path, + "src/features/auth/", + state.path_focused, + ) + } else { + ( + "user> ", + &state.username, + "@username", + state.username_focused, + ) + }; + + let after_prompt = frame.print_text_clipped(x, y, prompt, prompt_cell, max_x); + + let (display_text, fg) = if text.is_empty() { + (placeholder, TEXT_MUTED) + } else { + (text.as_str(), TEXT) + }; + + let text_cell = Cell { + fg, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(after_prompt, y, display_text, text_cell, max_x); + + // Cursor rendering when focused. + if focused && !text.is_empty() { + let cursor_pos = if state.mode.needs_path() { + state.path_cursor + } else { + state.username_cursor + }; + let cursor_col = text[..cursor_pos.min(text.len())] + .chars() + .count() + .min(u16::MAX as usize) as u16; + let cursor_x = after_prompt + cursor_col; + if cursor_x < max_x { + let cursor_cell = Cell { + fg: BG_SURFACE, + bg: TEXT, + ..Cell::default() + }; + let cursor_char = text + .get(cursor_pos..) + .and_then(|s| s.chars().next()) + .unwrap_or(' '); + frame.print_text_clipped(cursor_x, y, &cursor_char.to_string(), cursor_cell, max_x); + } + } + + y + 1 +} + +// --------------------------------------------------------------------------- +// Result dispatch +// --------------------------------------------------------------------------- + +fn render_result( + frame: &mut Frame<'_>, + result: &WhoResult, + state: &WhoState, + x: u16, + y: u16, + width: u16, + height: usize, +) { + match result { + WhoResult::Expert(r) => render_expert(frame, r, state, x, y, width, height), + WhoResult::Workload(r) => render_workload(frame, r, state, x, y, width, height), + WhoResult::Reviews(r) => render_reviews(frame, r, state, x, y, width, height), + WhoResult::Active(r) => render_active(frame, r, state, x, y, width, height), + WhoResult::Overlap(r) => render_overlap(frame, r, state, x, y, width, height), + } +} + +// --------------------------------------------------------------------------- +// Expert mode +// --------------------------------------------------------------------------- + +fn render_expert( + frame: &mut Frame<'_>, + result: &ExpertResult, + state: &WhoState, + x: u16, + start_y: u16, + width: u16, + height: usize, +) { + let max_x = x + width; + + if result.experts.is_empty() { + let cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped( + x + 1, + start_y, + "No experts found for this path.", + cell, + max_x, + ); + frame.print_text_clipped( + x + 1, + start_y + 1, + "Ensure diff notes are synced (lore sync).", + cell, + max_x, + ); + return; + } + + let max_score = result + .experts + .iter() + .map(|e| e.score) + .max() + .unwrap_or(1) + .max(1); + + let normal = Cell { + fg: TEXT, + bg: BG_SURFACE, + ..Cell::default() + }; + let selected_cell = Cell { + fg: BG_SURFACE, + bg: ACCENT, + ..Cell::default() + }; + let muted = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + let bar_cell = Cell { + fg: GREEN, + bg: BG_SURFACE, + ..Cell::default() + }; + + for (i, expert) in result + .experts + .iter() + .skip(state.scroll_offset) + .enumerate() + .take(height) + { + let y = start_y + i as u16; + let is_selected = i + state.scroll_offset == state.selected_index; + + let (label_style, detail_style) = if is_selected { + (selected_cell, selected_cell) + } else { + (normal, muted) + }; + + // Fill background for selected row. + if is_selected { + for col in x..max_x { + frame.buffer.set(col, y, selected_cell); + } + } + + // Username (left-aligned, 16 chars). + let name = truncate_str(&expert.username, 16); + let after_name = frame.print_text_clipped(x + 1, y, &name, label_style, max_x); + + // Score. + let score_str = format!("{:>5}", expert.score); + let after_score = + frame.print_text_clipped(after_name + 1, y, &score_str, label_style, max_x); + + // Bar chart (proportional to max score). + let bar_width = 12u16; + let filled = ((expert.score as f64 / max_score as f64) * bar_width as f64) as u16; + let bar_str: String = "\u{2588}" + .repeat(filled as usize) + .chars() + .chain(std::iter::repeat_n( + '\u{2591}', + (bar_width - filled) as usize, + )) + .collect(); + let bar_style = if is_selected { selected_cell } else { bar_cell }; + let after_bar = frame.print_text_clipped(after_score + 1, y, &bar_str, bar_style, max_x); + + // MR counts. + let counts = format!("{}A {}R", expert.author_mr_count, expert.review_mr_count); + frame.print_text_clipped(after_bar + 1, y, &counts, detail_style, max_x); + } + + // Truncation footer. + render_truncation_footer( + frame, + result.truncated, + result.experts.len(), + x, + start_y, + width, + height, + ); +} + +// --------------------------------------------------------------------------- +// Workload mode +// --------------------------------------------------------------------------- + +fn render_workload( + frame: &mut Frame<'_>, + result: &WorkloadResult, + _state: &WhoState, + x: u16, + start_y: u16, + width: u16, + height: usize, +) { + let max_x = x + width; + let mut y = start_y; + let end_y = start_y + height as u16; + + let header_cell = Cell { + fg: ACCENT, + bg: BG_SURFACE, + ..Cell::default() + }; + let normal = Cell { + fg: TEXT, + bg: BG_SURFACE, + ..Cell::default() + }; + let muted = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + + // Section: Assigned Issues + if y < end_y { + let label = format!( + "Assigned Issues ({}{})", + result.assigned_issues.len(), + if result.assigned_issues_truncated { + "+" + } else { + "" + } + ); + frame.print_text_clipped(x + 1, y, &label, header_cell, max_x); + y += 1; + } + for issue in &result.assigned_issues { + if y >= end_y { + break; + } + let line = format!(" {} {}", issue.ref_, truncate_str(&issue.title, 50)); + frame.print_text_clipped(x, y, &line, normal, max_x); + y += 1; + } + if result.assigned_issues.is_empty() && y < end_y { + frame.print_text_clipped(x + 2, y, "None", muted, max_x); + y += 1; + } + + y += 1; // Blank separator. + + // Section: Authored MRs + if y < end_y { + let label = format!( + "Authored MRs ({}{})", + result.authored_mrs.len(), + if result.authored_mrs_truncated { + "+" + } else { + "" + } + ); + frame.print_text_clipped(x + 1, y, &label, header_cell, max_x); + y += 1; + } + for mr in &result.authored_mrs { + if y >= end_y { + break; + } + let draft = if mr.draft { " [DRAFT]" } else { "" }; + let line = format!(" {} {}{}", mr.ref_, truncate_str(&mr.title, 45), draft); + frame.print_text_clipped(x, y, &line, normal, max_x); + y += 1; + } + if result.authored_mrs.is_empty() && y < end_y { + frame.print_text_clipped(x + 2, y, "None", muted, max_x); + y += 1; + } + + y += 1; + + // Section: Reviewing MRs + if y < end_y { + let label = format!( + "Reviewing MRs ({}{})", + result.reviewing_mrs.len(), + if result.reviewing_mrs_truncated { + "+" + } else { + "" + } + ); + frame.print_text_clipped(x + 1, y, &label, header_cell, max_x); + y += 1; + } + for mr in &result.reviewing_mrs { + if y >= end_y { + break; + } + let line = format!(" {} {}", mr.ref_, truncate_str(&mr.title, 50)); + frame.print_text_clipped(x, y, &line, normal, max_x); + y += 1; + } + if result.reviewing_mrs.is_empty() && y < end_y { + frame.print_text_clipped(x + 2, y, "None", muted, max_x); + y += 1; + } + + y += 1; + + // Section: Unresolved Discussions + if y < end_y { + let label = format!( + "Unresolved Discussions ({}{})", + result.unresolved_discussions.len(), + if result.unresolved_discussions_truncated { + "+" + } else { + "" + } + ); + frame.print_text_clipped(x + 1, y, &label, header_cell, max_x); + y += 1; + } + for disc in &result.unresolved_discussions { + if y >= end_y { + break; + } + let line = format!(" {} {}", disc.ref_, truncate_str(&disc.entity_title, 50)); + frame.print_text_clipped(x, y, &line, normal, max_x); + y += 1; + } + if result.unresolved_discussions.is_empty() && y < end_y { + frame.print_text_clipped(x + 2, y, "None", muted, max_x); + } +} + +// --------------------------------------------------------------------------- +// Reviews mode +// --------------------------------------------------------------------------- + +fn render_reviews( + frame: &mut Frame<'_>, + result: &ReviewsResult, + state: &WhoState, + x: u16, + start_y: u16, + width: u16, + height: usize, +) { + let max_x = x + width; + + // Summary header. + let header_cell = Cell { + fg: ACCENT, + bg: BG_SURFACE, + ..Cell::default() + }; + let summary = format!( + "{}: {} DiffNotes across {} MRs", + result.username, result.total_diffnotes, result.mrs_reviewed + ); + frame.print_text_clipped(x + 1, start_y, &summary, header_cell, max_x); + + if result.categories.is_empty() { + let muted = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped( + x + 1, + start_y + 1, + "No review categories found.", + muted, + max_x, + ); + return; + } + + let normal = Cell { + fg: TEXT, + bg: BG_SURFACE, + ..Cell::default() + }; + let selected_cell = Cell { + fg: BG_SURFACE, + bg: ACCENT, + ..Cell::default() + }; + let bar_cell = Cell { + fg: CYAN, + bg: BG_SURFACE, + ..Cell::default() + }; + + let remaining_height = height.saturating_sub(1); + + for (i, cat) in result + .categories + .iter() + .skip(state.scroll_offset) + .enumerate() + .take(remaining_height) + { + let y = start_y + 1 + i as u16; + let is_selected = i + state.scroll_offset == state.selected_index; + + let style = if is_selected { + // Fill background. + for col in x..max_x { + frame.buffer.set(col, y, selected_cell); + } + selected_cell + } else { + normal + }; + + // Category name (20 chars). + let name = format!("{:<20}", truncate_str(&cat.name, 20)); + let after_name = frame.print_text_clipped(x + 1, y, &name, style, max_x); + + // Count. + let count_str = format!("{:>4}", cat.count); + let after_count = frame.print_text_clipped(after_name + 1, y, &count_str, style, max_x); + + // Percentage bar. + let bar_width = 20u16; + let filled = (cat.percentage / 100.0 * bar_width as f64) as u16; + let bar: String = "\u{2588}" + .repeat(filled as usize) + .chars() + .chain(std::iter::repeat_n( + '\u{2591}', + (bar_width - filled) as usize, + )) + .collect(); + let bstyle = if is_selected { selected_cell } else { bar_cell }; + let after_bar = frame.print_text_clipped(after_count + 1, y, &bar, bstyle, max_x); + + // Percentage text. + let pct_str = format!("{:>5.1}%", cat.percentage); + frame.print_text_clipped(after_bar + 1, y, &pct_str, style, max_x); + } +} + +// --------------------------------------------------------------------------- +// Active mode +// --------------------------------------------------------------------------- + +fn render_active( + frame: &mut Frame<'_>, + result: &ActiveResult, + state: &WhoState, + x: u16, + start_y: u16, + width: u16, + height: usize, +) { + let max_x = x + width; + + if result.discussions.is_empty() { + let muted = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped( + x + 1, + start_y, + "No unresolved discussions in the last 7 days.", + muted, + max_x, + ); + return; + } + + // Summary. + let header_cell = Cell { + fg: ACCENT, + bg: BG_SURFACE, + ..Cell::default() + }; + let summary = format!( + "{} unresolved discussion{}", + result.total_unresolved_in_window, + if result.total_unresolved_in_window == 1 { + "" + } else { + "s" + } + ); + frame.print_text_clipped(x + 1, start_y, &summary, header_cell, max_x); + + let normal = Cell { + fg: TEXT, + bg: BG_SURFACE, + ..Cell::default() + }; + let selected_cell = Cell { + fg: BG_SURFACE, + bg: ACCENT, + ..Cell::default() + }; + let muted = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + + let remaining_height = height.saturating_sub(1); + + for (i, disc) in result + .discussions + .iter() + .skip(state.scroll_offset) + .enumerate() + .take(remaining_height) + { + let y = start_y + 1 + i as u16; + let is_selected = i + state.scroll_offset == state.selected_index; + + let (label_style, detail_style) = if is_selected { + for col in x..max_x { + frame.buffer.set(col, y, selected_cell); + } + (selected_cell, selected_cell) + } else { + (normal, muted) + }; + + // Entity ref. + let prefix = if disc.entity_type == "Issue" { + "#" + } else { + "!" + }; + let ref_str = format!("{}{}", prefix, disc.entity_iid); + let after_ref = frame.print_text_clipped(x + 1, y, &ref_str, label_style, max_x); + + // Title. + let title = truncate_str(&disc.entity_title, 40); + let after_title = frame.print_text_clipped(after_ref + 1, y, &title, label_style, max_x); + + // Note count + participants. + let participants = disc.participants.join(", "); + let meta = format!("{}n {}", disc.note_count, truncate_str(&participants, 20)); + frame.print_text_clipped(after_title + 1, y, &meta, detail_style, max_x); + } + + // Truncation. + render_truncation_footer( + frame, + result.truncated, + result.discussions.len(), + x, + start_y, + width, + height, + ); +} + +// --------------------------------------------------------------------------- +// Overlap mode +// --------------------------------------------------------------------------- + +fn render_overlap( + frame: &mut Frame<'_>, + result: &OverlapResult, + state: &WhoState, + x: u16, + start_y: u16, + width: u16, + height: usize, +) { + let max_x = x + width; + + if result.users.is_empty() { + let muted = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped( + x + 1, + start_y, + "No contributors found for this path.", + muted, + max_x, + ); + return; + } + + // Header row. + let header_cell = Cell { + fg: ACCENT, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped( + x + 1, + start_y, + "Username Author Review Total", + header_cell, + max_x, + ); + + let normal = Cell { + fg: TEXT, + bg: BG_SURFACE, + ..Cell::default() + }; + let selected_cell = Cell { + fg: BG_SURFACE, + bg: ACCENT, + ..Cell::default() + }; + + let remaining_height = height.saturating_sub(1); + + for (i, user) in result + .users + .iter() + .skip(state.scroll_offset) + .enumerate() + .take(remaining_height) + { + let y = start_y + 1 + i as u16; + let is_selected = i + state.scroll_offset == state.selected_index; + + let style = if is_selected { + for col in x..max_x { + frame.buffer.set(col, y, selected_cell); + } + selected_cell + } else { + normal + }; + + let line = format!( + "{:<18}{:>6}{:>8}{:>7}", + truncate_str(&user.username, 16), + user.author_touch_count, + user.review_touch_count, + user.touch_count, + ); + frame.print_text_clipped(x + 1, y, &line, style, max_x); + } + + // Truncation. + render_truncation_footer( + frame, + result.truncated, + result.users.len(), + x, + start_y, + width, + height, + ); +} + +// --------------------------------------------------------------------------- +// Common helpers +// --------------------------------------------------------------------------- + +fn render_empty_state(frame: &mut Frame<'_>, state: &WhoState, x: u16, y: u16, max_x: u16) { + let cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + + let msg = match state.mode { + WhoMode::Expert => "Enter a file path and press Enter to find experts.", + WhoMode::Workload => "Enter a @username and press Enter to see their workload.", + WhoMode::Reviews => "Enter a @username and press Enter to see review activity.", + WhoMode::Active => "Press Enter to find recent unresolved discussions.", + WhoMode::Overlap => "Enter a file path and press Enter to find contributors.", + }; + + frame.print_text_clipped(x, y, msg, cell, max_x); +} + +fn render_loading_indicator(frame: &mut Frame<'_>, x: u16, y: u16, max_x: u16) { + let cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(x, y, "Loading...", cell, max_x); +} + +fn render_hint_bar(frame: &mut Frame<'_>, state: &WhoState, x: u16, y: u16, max_x: u16) { + let cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + + let hints = if state.has_text_focus() { + "Esc: blur Enter: query" + } else { + "1-5: mode /: focus c: closed j/k: nav Enter: open q: back" + }; + + frame.print_text_clipped(x + 1, y, hints, cell, max_x); +} + +fn render_truncation_footer( + frame: &mut Frame<'_>, + truncated: bool, + visible_count: usize, + x: u16, + start_y: u16, + width: u16, + height: usize, +) { + if !truncated || height == 0 { + return; + } + let max_x = x + width; + let footer_y = start_y + height.saturating_sub(1) as u16; + let footer = format!(" showing {} of more ", visible_count); + let footer_x = max_x.saturating_sub(footer.len() as u16); + let cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(footer_x, footer_y, &footer, cell, max_x); +} + +/// Truncate a string to at most `max_chars` display characters. +fn truncate_str(s: &str, max_chars: usize) -> String { + let chars: Vec = s.chars().collect(); + if chars.len() <= max_chars { + s.to_string() + } else if max_chars <= 3 { + chars[..max_chars].iter().collect() + } else { + let mut result: String = chars[..max_chars - 3].iter().collect(); + result.push_str("..."); + result + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::state::who::WhoState; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + #[test] + fn test_render_who_empty_no_panic() { + with_frame!(80, 24, |frame| { + let state = WhoState::default(); + render_who(&mut frame, &state, Rect::new(0, 0, 80, 24)); + }); + } + + #[test] + fn test_render_who_tiny_terminal_noop() { + with_frame!(20, 4, |frame| { + let state = WhoState::default(); + render_who(&mut frame, &state, Rect::new(0, 0, 20, 4)); + }); + } + + #[test] + fn test_render_who_loading() { + with_frame!(80, 24, |frame| { + let state = WhoState { + loading: true, + ..WhoState::default() + }; + render_who(&mut frame, &state, Rect::new(0, 0, 80, 24)); + }); + } + + #[test] + fn test_render_who_with_expert_result() { + with_frame!(100, 30, |frame| { + let state = WhoState { + result: Some(WhoResult::Expert(lore::core::who_types::ExpertResult { + path_query: "src/".into(), + path_match: "prefix".into(), + experts: vec![lore::core::who_types::Expert { + username: "alice".into(), + score: 142, + score_raw: None, + components: None, + review_mr_count: 3, + review_note_count: 5, + author_mr_count: 2, + last_seen_ms: 1_700_000_000_000, + mr_refs: vec!["group/project!42".into()], + mr_refs_total: 1, + mr_refs_truncated: false, + details: None, + }], + truncated: false, + })), + ..WhoState::default() + }; + render_who(&mut frame, &state, Rect::new(0, 0, 100, 30)); + }); + } + + #[test] + fn test_render_who_active_mode_no_input_bar() { + with_frame!(80, 24, |frame| { + let state = WhoState { + mode: WhoMode::Active, + ..WhoState::default() + }; + render_who(&mut frame, &state, Rect::new(0, 0, 80, 24)); + // Active mode should not render input bar (no path/username needed). + }); + } + + #[test] + fn test_render_who_with_include_closed() { + with_frame!(80, 24, |frame| { + let state = WhoState { + include_closed: true, + ..WhoState::default() + }; + render_who(&mut frame, &state, Rect::new(0, 0, 80, 24)); + }); + } + + #[test] + fn test_truncate_str() { + assert_eq!(truncate_str("hello", 10), "hello"); + assert_eq!(truncate_str("hello world", 8), "hello..."); + assert_eq!(truncate_str("hi", 2), "hi"); + assert_eq!(truncate_str("abc", 3), "abc"); + } + + #[test] + fn test_render_who_all_modes_no_panic() { + for mode in WhoMode::ALL { + with_frame!(80, 24, |frame| { + let state = WhoState { + mode, + ..WhoState::default() + }; + render_who(&mut frame, &state, Rect::new(0, 0, 80, 24)); + }); + } + } +} diff --git a/crates/lore-tui/tests/vertical_slice.rs b/crates/lore-tui/tests/vertical_slice.rs new file mode 100644 index 0000000..12f8085 --- /dev/null +++ b/crates/lore-tui/tests/vertical_slice.rs @@ -0,0 +1,636 @@ +//! Vertical slice integration tests for TUI Phase 2. +//! +//! Validates that core screens work together end-to-end with synthetic +//! data flows, navigation preserves state, stale results are dropped, +//! and input mode is always recoverable. + +use ftui::render::frame::Frame; +use ftui::render::grapheme_pool::GraphemePool; +use ftui::{Cmd, Event, KeyCode, KeyEvent, Model, Modifiers}; + +use lore_tui::app::LoreApp; +use lore_tui::clock::FakeClock; +use lore_tui::message::{EntityKey, InputMode, Msg, Screen}; +use lore_tui::state::dashboard::{DashboardData, EntityCounts, LastSyncInfo}; +use lore_tui::state::issue_detail::{IssueDetailData, IssueMetadata}; +use lore_tui::state::issue_list::{IssueListPage, IssueListRow}; +use lore_tui::state::mr_detail::MrDetailData; +use lore_tui::state::mr_list::{MrListPage, MrListRow}; +use lore_tui::task_supervisor::TaskKey; + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +fn test_app() -> LoreApp { + let mut app = LoreApp::new(); + app.clock = Box::new(FakeClock::new(chrono::Utc::now())); + app +} + +fn synthetic_dashboard_data() -> DashboardData { + DashboardData { + counts: EntityCounts { + issues_total: 10, + issues_open: 5, + mrs_total: 8, + mrs_open: 3, + discussions: 15, + notes_total: 50, + notes_system_pct: 20, + documents: 20, + embeddings: 100, + }, + projects: vec![], + recent: vec![], + last_sync: Some(LastSyncInfo { + status: "succeeded".into(), + finished_at: Some(1_700_000_000_000), + command: "sync".into(), + error: None, + }), + } +} + +fn synthetic_issue_list_page() -> IssueListPage { + IssueListPage { + rows: vec![ + IssueListRow { + project_path: "group/project".into(), + iid: 1, + title: "First issue".into(), + state: "opened".into(), + author: "alice".into(), + labels: vec!["backend".into()], + updated_at: 1_700_000_000_000, + }, + IssueListRow { + project_path: "group/project".into(), + iid: 2, + title: "Second issue".into(), + state: "closed".into(), + author: "bob".into(), + labels: vec![], + updated_at: 1_700_000_010_000, + }, + ], + next_cursor: None, + total_count: 2, + } +} + +fn synthetic_issue_detail() -> IssueDetailData { + IssueDetailData { + metadata: IssueMetadata { + iid: 1, + project_path: "group/project".into(), + title: "First issue".into(), + description: "Test description".into(), + state: "opened".into(), + author: "alice".into(), + assignees: vec!["bob".into()], + labels: vec!["backend".into()], + milestone: None, + due_date: None, + created_at: 1_700_000_000_000, + updated_at: 1_700_000_060_000, + web_url: "https://gitlab.com/group/project/-/issues/1".into(), + discussion_count: 2, + }, + cross_refs: vec![], + } +} + +fn synthetic_mr_list_page() -> MrListPage { + MrListPage { + rows: vec![MrListRow { + project_path: "group/project".into(), + iid: 10, + title: "Fix auth".into(), + state: "opened".into(), + author: "alice".into(), + labels: vec![], + updated_at: 1_700_000_000_000, + draft: false, + target_branch: "main".into(), + }], + next_cursor: None, + total_count: 1, + } +} + +fn synthetic_mr_detail() -> MrDetailData { + MrDetailData { + metadata: lore_tui::state::mr_detail::MrMetadata { + iid: 10, + project_path: "group/project".into(), + title: "Fix auth".into(), + description: "MR description".into(), + state: "opened".into(), + draft: false, + author: "alice".into(), + assignees: vec!["bob".into()], + reviewers: vec!["carol".into()], + labels: vec![], + source_branch: "fix-auth".into(), + target_branch: "main".into(), + merge_status: "mergeable".into(), + created_at: 1_700_000_000_000, + updated_at: 1_700_000_060_000, + merged_at: None, + web_url: "https://gitlab.com/group/project/-/merge_requests/10".into(), + discussion_count: 1, + file_change_count: 2, + }, + cross_refs: vec![], + file_changes: vec![], + } +} + +/// Inject dashboard data with matching generation. +fn load_dashboard(app: &mut LoreApp) { + let generation = app + .supervisor + .submit(TaskKey::LoadScreen(Screen::Dashboard)) + .generation; + app.update(Msg::DashboardLoaded { + generation, + data: Box::new(synthetic_dashboard_data()), + }); +} + +/// Navigate to issue list and inject data. +fn navigate_and_load_issue_list(app: &mut LoreApp) { + app.update(Msg::NavigateTo(Screen::IssueList)); + let generation = app + .supervisor + .submit(TaskKey::LoadScreen(Screen::IssueList)) + .generation; + app.update(Msg::IssueListLoaded { + generation, + page: synthetic_issue_list_page(), + }); +} + +/// Navigate to issue detail and inject data. +fn navigate_and_load_issue_detail(app: &mut LoreApp, key: EntityKey) { + let screen = Screen::IssueDetail(key.clone()); + app.update(Msg::NavigateTo(screen.clone())); + let generation = app + .supervisor + .submit(TaskKey::LoadScreen(screen)) + .generation; + app.update(Msg::IssueDetailLoaded { + generation, + key, + data: Box::new(synthetic_issue_detail()), + }); +} + +// --------------------------------------------------------------------------- +// Nav flow tests +// --------------------------------------------------------------------------- + +/// TDD anchor: Dashboard -> IssueList -> IssueDetail -> Esc -> IssueList, +/// verifies cursor position is preserved on back-navigation. +#[test] +fn test_dashboard_to_issue_detail_roundtrip() { + let mut app = test_app(); + assert!(app.navigation.is_at(&Screen::Dashboard)); + + // Navigate to IssueList and load data. + navigate_and_load_issue_list(&mut app); + assert!(app.navigation.is_at(&Screen::IssueList)); + assert_eq!(app.state.issue_list.rows.len(), 2); + + // Navigate to IssueDetail for issue #1. + let issue_key = EntityKey::issue(1, 1); + navigate_and_load_issue_detail(&mut app, issue_key); + assert!(matches!(app.navigation.current(), Screen::IssueDetail(_))); + + // Go back — should return to IssueList with data preserved. + app.update(Msg::GoBack); + assert!(app.navigation.is_at(&Screen::IssueList)); + // Data should still be there (state preserved on navigation). + assert_eq!(app.state.issue_list.rows.len(), 2); +} + +/// Navigate Dashboard -> IssueList -> MrList -> MrDetail -> Home. +#[test] +fn test_full_nav_flow_home() { + let mut app = test_app(); + + // Issue list. + navigate_and_load_issue_list(&mut app); + assert!(app.navigation.is_at(&Screen::IssueList)); + + // MR list. + app.update(Msg::NavigateTo(Screen::MrList)); + let generation = app + .supervisor + .submit(TaskKey::LoadScreen(Screen::MrList)) + .generation; + app.update(Msg::MrListLoaded { + generation, + page: synthetic_mr_list_page(), + }); + assert!(app.navigation.is_at(&Screen::MrList)); + + // MR detail. + let mr_key = EntityKey::mr(1, 10); + let mr_screen = Screen::MrDetail(mr_key.clone()); + app.update(Msg::NavigateTo(mr_screen.clone())); + let generation = app + .supervisor + .submit(TaskKey::LoadScreen(mr_screen)) + .generation; + app.update(Msg::MrDetailLoaded { + generation, + key: mr_key, + data: Box::new(synthetic_mr_detail()), + }); + assert!(matches!(app.navigation.current(), Screen::MrDetail(_))); + + // Go home. + app.update(Msg::GoHome); + assert!(app.navigation.is_at(&Screen::Dashboard)); +} + +/// Verify back-navigation preserves issue list data and MR list data. +#[test] +fn test_state_preserved_on_back_navigation() { + let mut app = test_app(); + + // Load issue list. + navigate_and_load_issue_list(&mut app); + assert_eq!(app.state.issue_list.rows.len(), 2); + + // Navigate to MR list. + app.update(Msg::NavigateTo(Screen::MrList)); + let generation = app + .supervisor + .submit(TaskKey::LoadScreen(Screen::MrList)) + .generation; + app.update(Msg::MrListLoaded { + generation, + page: synthetic_mr_list_page(), + }); + + // Both states should be populated. + assert_eq!(app.state.issue_list.rows.len(), 2); + assert_eq!(app.state.mr_list.rows.len(), 1); + + // Go back to issue list — data should still be there. + app.update(Msg::GoBack); + assert!(app.navigation.is_at(&Screen::IssueList)); + assert_eq!(app.state.issue_list.rows.len(), 2); +} + +// --------------------------------------------------------------------------- +// Stale result guard +// --------------------------------------------------------------------------- + +/// Rapidly navigate between screens, injecting out-of-order results. +/// Stale results should be silently dropped. +#[test] +fn test_stale_result_guard_rapid_navigation() { + let mut app = test_app(); + + // Navigate to IssueList, capturing generation. + app.update(Msg::NavigateTo(Screen::IssueList)); + let generation1 = app + .supervisor + .submit(TaskKey::LoadScreen(Screen::IssueList)) + .generation; + + // Quickly navigate away and back — new generation. + app.update(Msg::GoBack); + app.update(Msg::NavigateTo(Screen::IssueList)); + let generation2 = app + .supervisor + .submit(TaskKey::LoadScreen(Screen::IssueList)) + .generation; + + // Late arrival of generation1 — should be dropped. + app.update(Msg::IssueListLoaded { + generation: generation1, + page: IssueListPage { + rows: vec![IssueListRow { + project_path: "g/p".into(), + iid: 999, + title: "stale".into(), + state: "opened".into(), + author: "x".into(), + labels: vec![], + updated_at: 0, + }], + next_cursor: None, + total_count: 1, + }, + }); + assert!( + app.state.issue_list.rows.is_empty(), + "stale result should be dropped" + ); + + // generation2 should be accepted. + app.update(Msg::IssueListLoaded { + generation: generation2, + page: synthetic_issue_list_page(), + }); + assert_eq!(app.state.issue_list.rows.len(), 2); + assert_eq!(app.state.issue_list.rows[0].title, "First issue"); +} + +// --------------------------------------------------------------------------- +// Input mode fuzz (stuck-input check) +// --------------------------------------------------------------------------- + +/// Fuzz 1000 random key sequences and verify: +/// 1. No panics +/// 2. InputMode is always recoverable via Esc + Ctrl+C +/// 3. Final state is consistent +#[test] +fn test_input_mode_fuzz_no_stuck_state() { + use std::collections::hash_map::DefaultHasher; + use std::hash::{Hash, Hasher}; + + let mut app = test_app(); + + // Deterministic pseudo-random key generation. + let keys = [ + KeyCode::Char('g'), + KeyCode::Char('i'), + KeyCode::Char('m'), + KeyCode::Char('h'), + KeyCode::Char('s'), + KeyCode::Char('q'), + KeyCode::Char('p'), + KeyCode::Char('/'), + KeyCode::Char('?'), + KeyCode::Tab, + KeyCode::BackTab, + KeyCode::Escape, + KeyCode::Enter, + KeyCode::Up, + KeyCode::Down, + KeyCode::Left, + KeyCode::Right, + KeyCode::Home, + KeyCode::End, + ]; + + let modifiers_set = [ + Modifiers::NONE, + Modifiers::SHIFT, + Modifiers::CTRL, + Modifiers::NONE, + Modifiers::NONE, + ]; + + // Run 1000 random key events. + for i in 0..1000_u64 { + // Simple deterministic hash to pick key + modifier. + let mut hasher = DefaultHasher::new(); + i.hash(&mut hasher); + let h = hasher.finish(); + + let key_code = keys[(h as usize) % keys.len()]; + let mods = modifiers_set[((h >> 16) as usize) % modifiers_set.len()]; + + // Skip Ctrl+C (would quit) and 'q' in normal mode (would quit). + if key_code == KeyCode::Char('c') && mods.contains(Modifiers::CTRL) { + continue; + } + if key_code == KeyCode::Char('q') && mods == Modifiers::NONE { + // Only skip if in Normal mode to avoid quitting the test. + if matches!(app.input_mode, InputMode::Normal) { + continue; + } + } + + let key_event = if mods == Modifiers::NONE { + KeyEvent::new(key_code) + } else { + KeyEvent::new(key_code).with_modifiers(mods) + }; + + let cmd = app.update(Msg::RawEvent(Event::Key(key_event))); + // Should never produce Quit from our filtered set (we skip q and Ctrl+C). + if matches!(cmd, Cmd::Quit) { + // This can happen from 'q' in non-Normal modes where we didn't filter. + // Recreate app to continue fuzzing. + app = test_app(); + } + } + + // Recovery check: Esc should always bring us back to Normal mode. + app.update(Msg::RawEvent(Event::Key(KeyEvent::new(KeyCode::Escape)))); + // After Esc, we should be in Normal mode (or if already Normal, stay there). + // GoPrefix → Normal, Text → Normal, Palette → Normal. + assert!( + matches!(app.input_mode, InputMode::Normal), + "Esc should always recover to Normal mode, got: {:?}", + app.input_mode + ); + + // Ctrl+C should always quit. + let ctrl_c = KeyEvent::new(KeyCode::Char('c')).with_modifiers(Modifiers::CTRL); + let cmd = app.update(Msg::RawEvent(Event::Key(ctrl_c))); + assert!(matches!(cmd, Cmd::Quit)); +} + +// --------------------------------------------------------------------------- +// Bootstrap → Dashboard transition +// --------------------------------------------------------------------------- + +/// Bootstrap screen should auto-transition to Dashboard when sync completes. +#[test] +fn test_bootstrap_to_dashboard_after_sync() { + let mut app = test_app(); + + // Start on Bootstrap screen. + app.update(Msg::NavigateTo(Screen::Bootstrap)); + assert!(app.navigation.is_at(&Screen::Bootstrap)); + assert!(!app.state.bootstrap.sync_started); + + // User starts sync via key path (g then s). + app.update(Msg::RawEvent(Event::Key(KeyEvent::new(KeyCode::Char('g'))))); + app.update(Msg::RawEvent(Event::Key(KeyEvent::new(KeyCode::Char('s'))))); + assert!(app.state.bootstrap.sync_started); + + // Sync completes — should auto-transition to Dashboard. + app.update(Msg::SyncCompleted { elapsed_ms: 5000 }); + assert!( + app.navigation.is_at(&Screen::Dashboard), + "Should auto-transition to Dashboard after sync completes on Bootstrap" + ); +} + +/// SyncCompleted on non-Bootstrap screen should NOT navigate. +#[test] +fn test_sync_completed_does_not_navigate_from_other_screens() { + let mut app = test_app(); + + // Navigate to IssueList. + app.update(Msg::NavigateTo(Screen::IssueList)); + assert!(app.navigation.is_at(&Screen::IssueList)); + + // SyncCompleted should be a no-op. + app.update(Msg::SyncCompleted { elapsed_ms: 3000 }); + assert!( + app.navigation.is_at(&Screen::IssueList), + "SyncCompleted should not navigate when not on Bootstrap" + ); +} + +// --------------------------------------------------------------------------- +// Render all screens (no-panic check) +// --------------------------------------------------------------------------- + +/// Render every screen variant to verify no panics with synthetic data. +#[test] +fn test_render_all_screens_no_panic() { + let mut pool = GraphemePool::new(); + + // Load data for all screens. + let mut app = test_app(); + load_dashboard(&mut app); + navigate_and_load_issue_list(&mut app); + app.update(Msg::GoBack); + + // Load MR list. + app.update(Msg::NavigateTo(Screen::MrList)); + let generation = app + .supervisor + .submit(TaskKey::LoadScreen(Screen::MrList)) + .generation; + app.update(Msg::MrListLoaded { + generation, + page: synthetic_mr_list_page(), + }); + app.update(Msg::GoBack); + + // Render at each screen. + let screens = [ + Screen::Dashboard, + Screen::IssueList, + Screen::MrList, + Screen::Bootstrap, + ]; + + for screen in &screens { + app.update(Msg::NavigateTo(screen.clone())); + let mut frame = Frame::new(80, 24, &mut pool); + app.view(&mut frame); + } + + // Render detail screens. + let issue_key = EntityKey::issue(1, 1); + navigate_and_load_issue_detail(&mut app, issue_key); + { + let mut frame = Frame::new(80, 24, &mut pool); + app.view(&mut frame); + } + + app.update(Msg::GoBack); + + let mr_key = EntityKey::mr(1, 10); + let mr_screen = Screen::MrDetail(mr_key.clone()); + app.update(Msg::NavigateTo(mr_screen.clone())); + let generation = app + .supervisor + .submit(TaskKey::LoadScreen(mr_screen)) + .generation; + app.update(Msg::MrDetailLoaded { + generation, + key: mr_key, + data: Box::new(synthetic_mr_detail()), + }); + { + let mut frame = Frame::new(80, 24, &mut pool); + app.view(&mut frame); + } +} + +/// Render at various terminal sizes to catch layout panics. +#[test] +fn test_render_various_sizes_no_panic() { + let mut pool = GraphemePool::new(); + let app = test_app(); + + let sizes: [(u16, u16); 5] = [ + (80, 24), // Standard + (120, 40), // Large + (40, 12), // Small + (20, 5), // Very small + (3, 3), // Minimum + ]; + + for (w, h) in &sizes { + let mut frame = Frame::new(*w, *h, &mut pool); + app.view(&mut frame); + } +} + +// --------------------------------------------------------------------------- +// Navigation depth stress +// --------------------------------------------------------------------------- + +/// Navigate deep and verify back-navigation works correctly. +#[test] +fn test_deep_navigation_and_unwind() { + let mut app = test_app(); + + // Navigate through 10 screens. + for i in 0..5 { + app.update(Msg::NavigateTo(Screen::IssueList)); + let issue_key = EntityKey::issue(1, i + 1); + app.update(Msg::NavigateTo(Screen::IssueDetail(issue_key))); + } + + // Should be at IssueDetail depth. + assert!(matches!(app.navigation.current(), Screen::IssueDetail(_))); + + // Unwind all the way back to Dashboard. + for _ in 0..20 { + app.update(Msg::GoBack); + if app.navigation.is_at(&Screen::Dashboard) { + break; + } + } + + assert!( + app.navigation.is_at(&Screen::Dashboard), + "Should eventually reach Dashboard" + ); +} + +// --------------------------------------------------------------------------- +// Performance (smoke test — real benchmarks need criterion) +// --------------------------------------------------------------------------- + +/// Verify that 100 update() + view() cycles complete quickly. +/// This is a smoke test, not a precise benchmark. +#[test] +fn test_update_view_cycle_performance_smoke() { + let mut pool = GraphemePool::new(); + let mut app = test_app(); + load_dashboard(&mut app); + + let start = std::time::Instant::now(); + for _ in 0..100 { + app.update(Msg::Tick); + let mut frame = Frame::new(80, 24, &mut pool); + app.view(&mut frame); + } + let elapsed = start.elapsed(); + + // 100 cycles should complete in well under 1 second. + // On a typical machine this takes < 10ms. + assert!( + elapsed.as_millis() < 1000, + "100 update+view cycles took {}ms — too slow", + elapsed.as_millis() + ); +}