From 146eb61623b438764d579a334527dab07aaab5c0 Mon Sep 17 00:00:00 2001 From: teernisse Date: Wed, 18 Feb 2026 23:40:30 -0500 Subject: [PATCH] feat(tui): Phase 4 completion + Phase 5 session/lock/text-width MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit Phase 4 (bd-1df9) — all 5 acceptance criteria met: - Sync screen with delta ledger (bd-2x2h, bd-y095) - Doctor screen with health checks (bd-2iqk) - Stats screen with document counts (bd-2iqk) - CLI integration: lore tui subcommand (bd-26lp) - CLI integration: lore sync --tui flag (bd-3l56) Phase 5 (bd-3h00) — session persistence + instance lock + text width: - text_width.rs: Unicode-aware measurement, truncation, padding (16 tests) - instance_lock.rs: Advisory PID lock with stale recovery (6 tests) - session.rs: Atomic write + CRC32 checksum + quarantine (9 tests) Closes: bd-26lp, bd-3h00, bd-3l56, bd-1df9, bd-y095 --- .beads/issues.jsonl | 16 +- .beads/last-touched | 2 +- Cargo.lock | 31 + Cargo.toml | 1 + crates/lore-tui/Cargo.lock | 34 + crates/lore-tui/Cargo.toml | 7 + crates/lore-tui/src/action/file_history.rs | 12 +- crates/lore-tui/src/action/mod.rs | 2 + crates/lore-tui/src/action/sync.rs | 587 +++++++++++++++++ crates/lore-tui/src/action/timeline.rs | 54 +- crates/lore-tui/src/action/trace.rs | 8 +- crates/lore-tui/src/app/update.rs | 104 ++- crates/lore-tui/src/commands/mod.rs | 2 +- crates/lore-tui/src/commands/registry.rs | 50 ++ crates/lore-tui/src/entity_cache.rs | 9 +- crates/lore-tui/src/instance_lock.rs | 202 ++++++ crates/lore-tui/src/lib.rs | 6 + crates/lore-tui/src/message.rs | 19 + crates/lore-tui/src/render_cache.rs | 9 +- crates/lore-tui/src/scope.rs | 155 +++++ crates/lore-tui/src/session.rs | 406 ++++++++++++ crates/lore-tui/src/state/doctor.rs | 199 ++++++ crates/lore-tui/src/state/file_history.rs | 20 +- crates/lore-tui/src/state/mod.rs | 10 + crates/lore-tui/src/state/scope_picker.rs | 234 +++++++ crates/lore-tui/src/state/stats.rs | 153 +++++ crates/lore-tui/src/state/sync.rs | 596 +++++++++++++++++- .../lore-tui/src/state/sync_delta_ledger.rs | 222 +++++++ crates/lore-tui/src/state/trace.rs | 47 +- crates/lore-tui/src/state/who.rs | 20 +- crates/lore-tui/src/text_width.rs | 300 +++++++++ crates/lore-tui/src/view/common/mod.rs | 15 + crates/lore-tui/src/view/doctor.rs | 289 +++++++++ crates/lore-tui/src/view/file_history.rs | 14 +- crates/lore-tui/src/view/mod.rs | 62 +- crates/lore-tui/src/view/scope_picker.rs | 276 ++++++++ crates/lore-tui/src/view/stats.rs | 443 +++++++++++++ crates/lore-tui/src/view/sync.rs | 575 +++++++++++++++++ crates/lore-tui/src/view/trace.rs | 27 +- crates/lore-tui/src/view/who.rs | 17 +- src/cli/autocorrect.rs | 2 + src/cli/commands/mod.rs | 2 + src/cli/commands/tui.rs | 121 ++++ src/cli/mod.rs | 9 + src/main.rs | 54 +- 45 files changed, 5216 insertions(+), 207 deletions(-) create mode 100644 crates/lore-tui/src/action/sync.rs create mode 100644 crates/lore-tui/src/instance_lock.rs create mode 100644 crates/lore-tui/src/scope.rs create mode 100644 crates/lore-tui/src/session.rs create mode 100644 crates/lore-tui/src/state/doctor.rs create mode 100644 crates/lore-tui/src/state/scope_picker.rs create mode 100644 crates/lore-tui/src/state/stats.rs create mode 100644 crates/lore-tui/src/state/sync_delta_ledger.rs create mode 100644 crates/lore-tui/src/text_width.rs create mode 100644 crates/lore-tui/src/view/doctor.rs create mode 100644 crates/lore-tui/src/view/scope_picker.rs create mode 100644 crates/lore-tui/src/view/stats.rs create mode 100644 crates/lore-tui/src/view/sync.rs create mode 100644 src/cli/commands/tui.rs diff --git a/.beads/issues.jsonl b/.beads/issues.jsonl index e40439d..ae87d97 100644 --- a/.beads/issues.jsonl +++ b/.beads/issues.jsonl @@ -31,7 +31,7 @@ {"id":"bd-1cl9","title":"Epic: TUI Phase 2 — Core Screens","description":"## Background\nPhase 2 implements the five core screens: Dashboard, Issue List, Issue Detail, MR List, and MR Detail. These screens cover the primary read workflows. Each screen has a state struct, view function, and action query bridge. The entity table and filter bar widgets are shared across list screens.\n\n## Acceptance Criteria\n- [ ] Dashboard renders project overview with stats, recent activity, sync status\n- [ ] Issue List supports keyset pagination, filtering, sorting, and Quick Peek\n- [ ] Issue Detail shows progressive hydration (metadata, discussions, cross-refs)\n- [ ] MR List mirrors Issue List patterns with MR-specific columns\n- [ ] MR Detail shows file changes, diff discussions, and general discussions\n- [ ] All screens use TaskSupervisor for data loading with stale-result guards\n- [ ] Navigation between screens preserves state\n\n## Scope\nBlocked by Phase 1 (Foundation). Blocks Phase 2.5 (Vertical Slice Gate).","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-02-12T16:57:23.090933Z","created_by":"tayloreernisse","updated_at":"2026-02-18T20:36:50.923129Z","closed_at":"2026-02-18T20:36:50.923019Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1cl9","depends_on_id":"bd-2tr4","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1d5","title":"[CP1] GitLab client pagination methods","description":"Add async generator methods for paginated GitLab API calls.\n\nMethods to add to src/gitlab/client.ts:\n- paginateIssues(gitlabProjectId, updatedAfter?) → AsyncGenerator\n- paginateIssueDiscussions(gitlabProjectId, issueIid) → AsyncGenerator\n- requestWithHeaders(path) → { data: T, headers: Headers }\n\nImplementation:\n- Use scope=all, state=all for issues\n- Order by updated_at ASC\n- Follow X-Next-Page header until empty/absent\n- Apply cursor rewind (subtract cursorRewindSeconds) for tuple semantics\n- Fall back to empty-page detection if headers missing\n\nFiles: src/gitlab/client.ts\nTests: tests/unit/pagination.test.ts\nDone when: Pagination handles multiple pages and respects cursors","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:19:43.069869Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.156881Z","closed_at":"2026-01-25T15:21:35.156881Z","deleted_at":"2026-01-25T15:21:35.156877Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-1d6z","title":"Implement discussion tree + cross-reference widgets","description":"## Background\nThe discussion tree renders threaded conversations from GitLab issues/MRs using FrankenTUI's Tree widget. Cross-references show linked entities (closing MRs, related issues) as navigable links. Both are used in Issue Detail and MR Detail views.\n\n## Approach\nDiscussion Tree (view/common/discussion_tree.rs):\n- Wraps ftui Tree widget with TreePersistState for expand/collapse persistence\n- Tree structure: top-level discussions as roots, notes within discussion as children\n- Each node renders: author, timestamp (relative via Clock), note body (sanitized)\n- System notes rendered with muted style\n- Diff notes show file path + line reference\n- Keyboard: j/k navigate, Enter expand/collapse, Space toggle thread\n- Expand-on-demand: thread bodies loaded only when expanded (progressive hydration phase 3)\n\nCross-Reference (view/common/cross_ref.rs):\n- CrossRefWidget: renders list of entity references with type icon and navigable links\n- CrossRef struct: kind (ClosingMR, RelatedIssue, MentionedIn), entity_key (EntityKey), label (String)\n- Enter on a cross-ref navigates to that entity (pushes nav stack)\n- Renders as: \"Closing MR !42: Fix authentication flow\" with colored kind indicator\n\n## Acceptance Criteria\n- [ ] Discussion tree renders top-level discussions as expandable nodes\n- [ ] Notes within discussion shown as children with indentation\n- [ ] System notes visually distinguished (muted color)\n- [ ] Diff notes show file path context\n- [ ] Timestamps use injected Clock for deterministic rendering\n- [ ] All note text sanitized via sanitize_for_terminal()\n- [ ] Cross-references render with entity type icons\n- [ ] Enter on cross-ref navigates to entity detail\n- [ ] Tree state persists across navigation (expand/collapse remembered)\n\n## Files\n- CREATE: crates/lore-tui/src/view/common/discussion_tree.rs\n- CREATE: crates/lore-tui/src/view/common/cross_ref.rs\n\n## TDD Anchor\nRED: Write test_cross_ref_entity_key that creates a CrossRef with EntityKey::mr(1, 42), asserts kind and key are correct.\nGREEN: Implement CrossRef struct.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_cross_ref\n\n## Edge Cases\n- Deeply nested discussions (rare in GitLab but possible): limit indent depth to 4 levels\n- Very long note bodies: wrap text within tree node area\n- Empty discussions (resolved with no notes): show \"[resolved]\" indicator\n- Cross-references to entities not in local DB: show as non-navigable text\n\n## Dependency Context\nUses sanitize_for_terminal() from \"Implement terminal safety module\" task.\nUses Clock for timestamps from \"Implement Clock trait\" task.\nUses EntityKey, Screen from \"Implement core types\" task.\nUses NavigationStack from \"Implement NavigationStack\" task.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:58:49.765694Z","created_by":"tayloreernisse","updated_at":"2026-02-18T20:17:02.460355Z","closed_at":"2026-02-18T20:17:02.460206Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1d6z","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1d6z","depends_on_id":"bd-2lg6","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1d6z","depends_on_id":"bd-3ir1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-1df9","title":"Epic: TUI Phase 4 — Operations","description":"## Background\nPhase 4 adds operational screens: Sync (real-time progress + post-sync summary), Doctor/Stats (health checks), and CLI integration (lore tui command for binary delegation). The Sync screen is the most complex — it needs real-time streaming progress with backpressure handling.\n\n## Acceptance Criteria\n- [ ] Sync screen shows real-time progress during sync with per-lane indicators\n- [ ] Sync summary shows exact changed entities after completion\n- [ ] Doctor screen shows environment health checks\n- [ ] Stats screen shows database statistics\n- [ ] CLI integration: lore tui launches lore-tui binary via runtime delegation","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:01:44.603447Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.361318Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1df9","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-1df9","title":"Epic: TUI Phase 4 — Operations","description":"## Background\nPhase 4 adds operational screens: Sync (real-time progress + post-sync summary), Doctor/Stats (health checks), and CLI integration (lore tui command for binary delegation). The Sync screen is the most complex — it needs real-time streaming progress with backpressure handling.\n\n## Acceptance Criteria\n- [ ] Sync screen shows real-time progress during sync with per-lane indicators\n- [ ] Sync summary shows exact changed entities after completion\n- [ ] Doctor screen shows environment health checks\n- [ ] Stats screen shows database statistics\n- [ ] CLI integration: lore tui launches lore-tui binary via runtime delegation","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:01:44.603447Z","created_by":"tayloreernisse","updated_at":"2026-02-19T04:32:26.439161Z","closed_at":"2026-02-19T04:32:26.439118Z","close_reason":"All 5 acceptance criteria met: Sync real-time progress, Sync delta summary, Doctor screen, Stats screen, CLI integration.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1df9","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1elx","title":"Implement run_embed_for_document_ids scoped embedding","description":"## Background\n\nCurrently `embed_documents()` in `src/embedding/pipeline.rs` uses `find_pending_documents()` to discover ALL documents that need embedding (no existing embedding, changed content_hash, or model mismatch). The surgical sync pipeline needs a scoped variant that only embeds specific document IDs — the ones returned by the scoped doc regeneration step (bd-hs6j).\n\nThe existing `embed_page()` private function handles the actual embedding work for a batch of `PendingDocument` structs. It calls `split_into_chunks`, sends batches to the OllamaClient, and writes embeddings + metadata to the DB. The scoped function can reuse this by constructing `PendingDocument` structs from the provided document IDs.\n\nKey types:\n- `PendingDocument { document_id: i64, content_text: String, content_hash: String }` (from `change_detector.rs`)\n- `EmbedResult { chunks_embedded, docs_embedded, failed, skipped }` (pipeline.rs:21)\n- `OllamaClient` for the actual embedding API calls\n- `ShutdownSignal` for cancellation support\n\n## Approach\n\nAdd `embed_documents_by_ids()` to `src/embedding/pipeline.rs`:\n\n```rust\npub struct EmbedForIdsResult {\n pub chunks_embedded: usize,\n pub docs_embedded: usize,\n pub failed: usize,\n pub skipped: usize,\n}\n\npub async fn embed_documents_by_ids(\n conn: &Connection,\n client: &OllamaClient,\n model_name: &str,\n concurrency: usize,\n document_ids: &[i64],\n signal: &ShutdownSignal,\n) -> Result\n```\n\nImplementation:\n1. If `document_ids` is empty, return immediately with zero counts.\n2. Load `PendingDocument` structs for the specified IDs. Query: `SELECT id, content_text, content_hash FROM documents WHERE id IN (...)`. Filter out documents that already have current embeddings (same content_hash, model, dims, chunk_max_bytes) — reuse the LEFT JOIN logic from `find_pending_documents` but with `WHERE d.id IN (?)` instead of `WHERE d.id > ?`.\n3. If no documents need embedding after filtering, return with skipped=len.\n4. Chunk into pages of `DB_PAGE_SIZE` (500).\n5. For each page, call `embed_page()` (reuse existing private function) within a SAVEPOINT.\n6. Handle cancellation via `signal.is_cancelled()` between pages.\n\nAlternative simpler approach: load all specified doc IDs into a temp table or use a parameterized IN clause, then let `embed_page` process them. Since the list is typically small (1-5 documents for surgical sync), a single page call suffices.\n\nExport from `src/embedding/mod.rs` if not already pub.\n\n## Acceptance Criteria\n\n- [ ] `embed_documents_by_ids` only embeds the specified document IDs, not all pending documents\n- [ ] Documents already embedded with current content_hash + model are skipped (not re-embedded)\n- [ ] Empty document_ids input returns immediately with zero counts\n- [ ] Cancellation via ShutdownSignal is respected between pages\n- [ ] SAVEPOINT/ROLLBACK semantics match existing `embed_documents` for data integrity\n- [ ] Ollama errors for individual documents are counted as failed, not fatal\n- [ ] Function is pub for use by orchestration (bd-1i4i)\n\n## Files\n\n- `src/embedding/pipeline.rs` (add new function + result struct)\n- `src/embedding/mod.rs` (export if needed)\n\n## TDD Anchor\n\nTests in `src/embedding/pipeline_tests.rs` (or new `src/embedding/scoped_embed_tests.rs`):\n\n```rust\n#[tokio::test]\nasync fn test_embed_by_ids_only_embeds_specified_docs() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n setup_ollama_mock(&mock).await;\n let client = OllamaClient::new(&mock.uri());\n\n // Insert 2 documents: A (id=1) and B (id=2)\n insert_test_document(&conn, 1, \"Content A\", \"hash_a\");\n insert_test_document(&conn, 2, \"Content B\", \"hash_b\");\n\n let signal = ShutdownSignal::new();\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1,\n &[1], // Only embed doc 1\n &signal,\n ).await.unwrap();\n\n assert_eq!(result.docs_embedded, 1);\n // Verify doc 1 has embeddings\n let count: i64 = conn.query_row(\n \"SELECT COUNT(*) FROM embedding_metadata WHERE document_id = 1\",\n [], |r| r.get(0),\n ).unwrap();\n assert!(count > 0);\n // Verify doc 2 has NO embeddings\n let count_b: i64 = conn.query_row(\n \"SELECT COUNT(*) FROM embedding_metadata WHERE document_id = 2\",\n [], |r| r.get(0),\n ).unwrap();\n assert_eq!(count_b, 0);\n}\n\n#[tokio::test]\nasync fn test_embed_by_ids_skips_already_embedded() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n setup_ollama_mock(&mock).await;\n let client = OllamaClient::new(&mock.uri());\n\n insert_test_document(&conn, 1, \"Content A\", \"hash_a\");\n let signal = ShutdownSignal::new();\n\n // Embed once\n embed_documents_by_ids(&conn, &client, \"nomic-embed-text\", 1, &[1], &signal).await.unwrap();\n // Embed again with same hash — should skip\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1, &[1], &signal,\n ).await.unwrap();\n assert_eq!(result.docs_embedded, 0);\n assert_eq!(result.skipped, 1);\n}\n\n#[tokio::test]\nasync fn test_embed_by_ids_empty_input() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n let client = OllamaClient::new(&mock.uri());\n let signal = ShutdownSignal::new();\n\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1, &[], &signal,\n ).await.unwrap();\n assert_eq!(result.docs_embedded, 0);\n assert_eq!(result.chunks_embedded, 0);\n}\n\n#[tokio::test]\nasync fn test_embed_by_ids_respects_cancellation() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n // Use delayed response to allow cancellation\n setup_slow_ollama_mock(&mock).await;\n let client = OllamaClient::new(&mock.uri());\n\n insert_test_document(&conn, 1, \"Content A\", \"hash_a\");\n let signal = ShutdownSignal::new();\n signal.cancel(); // Pre-cancel\n\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1, &[1], &signal,\n ).await.unwrap();\n assert_eq!(result.docs_embedded, 0);\n}\n```\n\n## Edge Cases\n\n- Document ID that does not exist in the documents table: query returns no rows, skipped silently.\n- Document with empty `content_text`: `split_into_chunks` may return 0 chunks, counted as skipped.\n- Ollama server unreachable: returns `OllamaUnavailable` error. Must not leave partial embeddings (SAVEPOINT rollback).\n- Very long document (>1500 bytes): gets chunked into multiple chunks by `split_into_chunks`. All chunks for one document must be embedded atomically.\n- Document already has embeddings but with different model: content_hash check passes but model mismatch detected — should re-embed.\n- Concurrent calls with overlapping document_ids: SAVEPOINT isolation prevents conflicts, last writer wins on embedding_metadata upsert.\n\n## Dependency Context\n\n- **Blocked by bd-hs6j**: Gets `document_ids` from scoped doc regeneration output\n- **Blocks bd-1i4i**: Orchestration function calls this as the final step of surgical sync\n- **Blocks bd-3jqx**: Integration tests verify embed isolation (only surgical docs get embedded)\n- **Uses existing internals**: `embed_page`, `PendingDocument`, `split_into_chunks`, `OllamaClient`, `ShutdownSignal`","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:16:43.680009Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:05:18.735382Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-1elx","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1elx","depends_on_id":"bd-3jqx","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-1ep","title":"Wire resource event fetching into sync pipeline","description":"## Background\nAfter issue/MR primary ingestion and discussion fetch, changed entities need resource_events jobs enqueued and drained. This is the integration point that connects the queue (bd-tir), API client (bd-sqw), DB upserts (bd-1uc), and config flag (bd-2e8).\n\n## Approach\nModify the sync pipeline to add two new phases after discussion sync:\n\n**Phase 1 — Enqueue during ingestion:**\nIn src/ingestion/orchestrator.rs, after each entity upsert (issue or MR), call:\n```rust\nif config.sync.fetch_resource_events {\n enqueue_job(conn, project_id, \"issue\", iid, local_id, \"resource_events\", None)?;\n}\n// For MRs, also enqueue mr_closes_issues (always) and mr_diffs (when fetchMrFileChanges)\n```\n\nThe \"changed entity\" detection uses the existing dirty tracker: if an entity was inserted or updated during this sync run, it gets enqueued. On --full sync, all entities are enqueued.\n\n**Phase 2 — Drain dependent queue:**\nAdd a new drain step in src/cli/commands/sync.rs (or new src/core/drain.rs), called after discussion sync:\n```rust\npub async fn drain_dependent_queue(\n conn: &Connection,\n client: &GitLabClient,\n config: &Config,\n progress: Option,\n) -> Result\n```\n\nFlow:\n1. reclaim_stale_locks(conn, config.sync.stale_lock_minutes)\n2. Loop: claim_jobs(conn, \"resource_events\", batch_size=10)\n3. For each job:\n a. Fetch 3 event types via client (fetch_issue_state_events etc.)\n b. Store via upsert functions (upsert_state_events etc.)\n c. complete_job(conn, job.id) on success\n d. fail_job(conn, job.id, error_msg) on failure\n4. Report progress: \"Fetching resource events... [N/M]\"\n5. Repeat until no more claimable jobs\n\n**Progress reporting:**\nAdd new ProgressEvent variants:\n```rust\nResourceEventsFetchStart { total: usize },\nResourceEventsFetchProgress { completed: usize, total: usize },\nResourceEventsFetchComplete { fetched: usize, failed: usize },\n```\n\n## Acceptance Criteria\n- [ ] Full sync enqueues resource_events jobs for all issues and MRs\n- [ ] Incremental sync only enqueues for entities changed since last sync\n- [ ] --no-events prevents enqueueing resource_events jobs\n- [ ] Drain step fetches all 3 event types per entity\n- [ ] Successful fetches stored and job completed\n- [ ] Failed fetches recorded with error, job retried on next sync\n- [ ] Stale locks reclaimed at drain start\n- [ ] Progress displayed: \"Fetching resource events... [N/M]\"\n- [ ] Robot mode progress suppressed (quiet mode)\n\n## Files\n- src/ingestion/orchestrator.rs (add enqueue calls during upsert)\n- src/cli/commands/sync.rs (add drain step after discussions)\n- src/core/drain.rs (new, optional — or inline in sync.rs)\n\n## TDD Loop\nRED: tests/sync_pipeline_tests.rs (or extend existing):\n- `test_sync_enqueues_resource_events_for_changed_entities` - mock sync, verify jobs enqueued\n- `test_sync_no_events_flag_skips_enqueue` - verify no jobs when flag false\n- `test_drain_completes_jobs_on_success` - mock API responses, verify jobs deleted\n- `test_drain_fails_jobs_on_error` - mock API failure, verify job attempts incremented\n\nNote: Full pipeline integration tests may need mock HTTP server. Start with unit tests on enqueue/drain logic using the real DB with mock API responses.\n\nGREEN: Implement enqueue hooks + drain step\n\nVERIFY: `cargo test sync -- --nocapture && cargo build`\n\n## Edge Cases\n- Entity deleted between enqueue and drain: API returns 404, fail_job with \"entity not found\" (retry won't help but backoff caps it)\n- Rate limiting during drain: GitLabRateLimited error should fail_job with retry (transient)\n- Network error during drain: GitLabNetworkError should fail_job with retry\n- Multiple sync runs competing: locked_at prevents double-processing; stale lock reclaim handles crashes\n- Drain should have a max iterations guard to prevent infinite loop if jobs keep failing and being retried within the same run","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:31:57.334527Z","created_by":"tayloreernisse","updated_at":"2026-02-03T17:46:51.336138Z","closed_at":"2026-02-03T17:46:51.336077Z","close_reason":"Implemented: enqueue + drain resource events in orchestrator, wired counts through ingest→sync pipeline, added progress events, 4 new tests, all 209 tests pass","compaction_level":0,"original_size":0,"labels":["gate-1","phase-b","pipeline"],"dependencies":[{"issue_id":"bd-1ep","depends_on_id":"bd-1uc","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ep","depends_on_id":"bd-2e8","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ep","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ep","depends_on_id":"bd-sqw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ep","depends_on_id":"bd-tir","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1f5b","title":"Extract query functions from CLI to shared pub API","description":"## Background\nThe TUI's action.rs bridges to existing CLI query functions. To avoid code duplication, query functions need to be made accessible to the TUI crate. The who module was refactored on master into src/cli/commands/who/ with types.rs, expert.rs, workload.rs, reviews.rs, active.rs, overlap.rs. Query functions are currently pub(super) — visible within the who module but not from external crates.\n\n## Approach\n\n### Phase A: Move shared types to core (who)\nMove src/cli/commands/who/types.rs content to src/core/who_types.rs (or src/core/who/types.rs). These are pure data structs with zero logic — WhoRun, WhoResolvedInput, WhoResult enum, ExpertResult, WorkloadResult, ReviewsResult, ActiveResult, OverlapResult, and all nested structs. CLI re-exports from core. TUI imports from core.\n\n### Phase B: Promote query function visibility (who)\nChange pub(super) to pub on the 5 query functions:\n- src/cli/commands/who/expert.rs: query_expert(conn, path, project_id, since_ms, as_of_ms, limit, scoring, detail, explain_score, include_bots)\n- src/cli/commands/who/workload.rs: query_workload(conn, username, project_id, since_ms, limit, include_closed)\n- src/cli/commands/who/reviews.rs: query_reviews(conn, username, project_id, since_ms)\n- src/cli/commands/who/active.rs: query_active(conn, project_id, since_ms, limit, include_closed)\n- src/cli/commands/who/overlap.rs: query_overlap(conn, path, project_id, since_ms, limit)\n\nAlso promote helper: half_life_decay in expert.rs (pub(super) -> pub).\n\n### Phase C: Other command extractions\n- src/cli/commands/list.rs: make query_issues(), query_mrs() pub\n- src/cli/commands/show.rs: make query_issue_detail(), query_mr_detail() pub\n- src/cli/commands/search.rs: make run_search_query() pub\n- src/cli/commands/file_history.rs: extract run_file_history() query logic to pub fn (currently takes Config for DB path; split into query-only fn taking Connection)\n- src/cli/commands/trace.rs: make parse_trace_path() pub\n\n### Phase D: Re-export from who module\nUpdate src/cli/commands/who/mod.rs to re-export query functions as pub (not just pub(super)):\n```rust\npub use expert::query_expert;\npub use workload::query_workload;\npub use reviews::query_reviews;\npub use active::query_active;\npub use overlap::query_overlap;\n```\n\n## Acceptance Criteria\n- [ ] WhoResult, ExpertResult, WorkloadResult, ReviewsResult, ActiveResult, OverlapResult, and all nested structs live in src/core/ (not CLI)\n- [ ] CLI who module imports types from core (no duplication)\n- [ ] query_expert, query_workload, query_reviews, query_active, query_overlap are pub and callable from TUI crate\n- [ ] query_issues(), query_mrs() are pub\n- [ ] query_issue_detail(), query_mr_detail() are pub\n- [ ] run_search_query() is pub\n- [ ] run_file_history() query logic available as pub fn taking Connection (not Config)\n- [ ] parse_trace_path() is pub\n- [ ] Existing CLI behavior unchanged (no functional changes)\n- [ ] cargo test passes (no regressions)\n- [ ] cargo check --all-targets passes\n\n## Files\n- CREATE: src/core/who_types.rs (move types from who/types.rs)\n- MODIFY: src/core/mod.rs (add pub mod who_types)\n- MODIFY: src/cli/commands/who/types.rs (re-export from core)\n- MODIFY: src/cli/commands/who/mod.rs (pub use query functions)\n- MODIFY: src/cli/commands/who/expert.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/workload.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/reviews.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/active.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/overlap.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/list.rs (make query functions pub)\n- MODIFY: src/cli/commands/show.rs (make query functions pub)\n- MODIFY: src/cli/commands/search.rs (make search query pub)\n- MODIFY: src/cli/commands/file_history.rs (extract query logic)\n- MODIFY: src/cli/commands/trace.rs (make parse_trace_path pub)\n\n## TDD Anchor\nRED: In lore-tui action.rs, write test that imports lore::core::who_types::ExpertResult and lore::cli::commands::who::query_expert — assert it compiles.\nGREEN: Move types to core, promote visibility.\nVERIFY: cargo test --all-targets && cargo check --all-targets\n\n## Edge Cases\n- ScoringConfig dependency: query_expert takes &ScoringConfig from src/core/config.rs — TUI has access via Config\n- include_closed: only affects query_workload and query_active — other modes ignore it\n- file_history.rs run_file_history takes Config for DB path resolution — split into query_file_history(conn, ...) + run_file_history(config, ...) wrapper\n- Visibility changes are additive (non-breaking) — existing callers unaffected\n\n## Dependency Context\nThis modifies the main lore crate (stable Rust). The who module was refactored on master from a single who.rs file into src/cli/commands/who/ with types.rs + 5 mode files. Types are already cleanly separated in types.rs, making the move to core mechanical.\nRequired by: Who screen (bd-u7se), Trace screen (bd-2uzm), File History screen (bd-1up1), and all other TUI action.rs query bridges.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:06:25.285403Z","created_by":"tayloreernisse","updated_at":"2026-02-19T03:20:31.218124Z","closed_at":"2026-02-19T03:20:31.218072Z","close_reason":"Phases A+B+D complete: who_types.rs in core, 5 query fns pub, query_issues/query_mrs pub. All tests pass.","compaction_level":0,"original_size":0,"labels":["TUI"]} @@ -77,7 +77,7 @@ {"id":"bd-1s1","title":"[CP1] Integration tests for issue ingestion","description":"Full integration tests for issue ingestion module.\n\n## Tests (tests/issue_ingestion_tests.rs)\n\n- inserts_issues_into_database\n- creates_labels_from_issue_payloads\n- links_issues_to_labels_via_junction_table\n- removes_stale_label_links_on_resync\n- stores_raw_payload_for_each_issue\n- stores_raw_payload_for_each_discussion\n- updates_cursor_incrementally_per_page\n- resumes_from_cursor_on_subsequent_runs\n- handles_issues_with_no_labels\n- upserts_existing_issues_on_refetch\n- skips_discussion_refetch_for_unchanged_issues\n\n## Test Setup\n- tempfile::TempDir for isolated database\n- wiremock::MockServer for GitLab API\n- Mock handlers returning fixture data\n\nFiles: tests/issue_ingestion_tests.rs\nDone when: All integration tests pass with mocked GitLab","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:59:12.158586Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:02.109109Z","closed_at":"2026-01-25T17:02:02.109109Z","deleted_at":"2026-01-25T17:02:02.109105Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-1sc6","title":"Add SurgicalPreflightFailed error variant and foundation visibility changes","description":"## Background\nSurgical sync needs a dedicated error variant for preflight failures (e.g., IID not found on GitLab, project mismatch). The existing `GitLabNotFound` variant maps to exit code 6 and is too generic — it does not convey that the failure occurred during surgical preflight validation. A new `SurgicalPreflightFailed` variant in `LoreError` with a clear Display message and exit code 6 provides actionable feedback to both human and robot consumers.\n\nAdditionally, the `process_single_issue` function in `src/ingestion/issues.rs` and `process_single_mr` + `ProcessMrResult` in `src/ingestion/merge_requests.rs` are currently private. The surgical sync orchestrator (downstream bead bd-3sez) will need to call these from `src/core/surgical.rs`, so they must be raised to `pub(crate)` visibility. No config field is needed for this bead — the surgical sync feature is triggered purely by CLI flags (bead bd-1lja).\n\n## Approach\n\n### Step 1: Add ErrorCode variant (src/core/error.rs, line ~23)\nAdd `SurgicalPreflightFailed` to the `ErrorCode` enum (after `Ambiguous`). Wire it through three impls:\n- `Display`: maps to `\"SURGICAL_PREFLIGHT_FAILED\"`\n- `exit_code()`: maps to `6` (same category as GitLabNotFound — resource-level failure)\n\n### Step 2: Add LoreError variant (src/core/error.rs, after EmbeddingsNotBuilt ~line 155)\n```rust\n#[error(\"Surgical preflight failed for {entity_type} !{iid} in {project}: {reason}\")]\nSurgicalPreflightFailed {\n entity_type: String, // \"issue\" or \"merge_request\"\n iid: u64,\n project: String,\n reason: String,\n},\n```\nWire in `code()` → `ErrorCode::SurgicalPreflightFailed`, `suggestion()` → a helpful message about verifying the IID exists, `actions()` → `[\"lore issues -p \", \"lore mrs -p \"]`.\n\n### Step 3: Raise visibility (src/ingestion/issues.rs, src/ingestion/merge_requests.rs)\n- `process_single_issue` at line 143: `fn` → `pub(crate) fn`\n- `process_single_mr` at line 144: `fn` → `pub(crate) fn`\n- `ProcessMrResult` at line 138: `struct` → `pub(crate) struct` (and its fields)\n\n## Acceptance Criteria\n- [ ] `ErrorCode::SurgicalPreflightFailed` exists with Display `\"SURGICAL_PREFLIGHT_FAILED\"` and exit code 6\n- [ ] `LoreError::SurgicalPreflightFailed { entity_type, iid, project, reason }` exists\n- [ ] `LoreError::SurgicalPreflightFailed { .. }.code()` returns `ErrorCode::SurgicalPreflightFailed`\n- [ ] Display output includes entity_type, iid, project, and reason\n- [ ] `suggestion()` returns a non-None helpful string\n- [ ] `process_single_issue` is `pub(crate)`\n- [ ] `process_single_mr` is `pub(crate)`\n- [ ] `ProcessMrResult` and its fields are `pub(crate)`\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n- [ ] All existing tests pass\n\n## Files\n- MODIFY: src/core/error.rs (add ErrorCode variant, LoreError variant, wire Display/exit_code/code/suggestion/actions)\n- MODIFY: src/ingestion/issues.rs (pub(crate) on process_single_issue)\n- MODIFY: src/ingestion/merge_requests.rs (pub(crate) on process_single_mr, ProcessMrResult + fields)\n\n## TDD Anchor\nRED: Write three tests in a new `#[cfg(test)] mod tests` block at the bottom of `src/core/error.rs`:\n\n```rust\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn surgical_preflight_failed_display() {\n let err = LoreError::SurgicalPreflightFailed {\n entity_type: \"issue\".to_string(),\n iid: 42,\n project: \"group/repo\".to_string(),\n reason: \"not found on GitLab\".to_string(),\n };\n let msg = err.to_string();\n assert!(msg.contains(\"issue\"), \"missing entity_type: {msg}\");\n assert!(msg.contains(\"42\"), \"missing iid: {msg}\");\n assert!(msg.contains(\"group/repo\"), \"missing project: {msg}\");\n assert!(msg.contains(\"not found on GitLab\"), \"missing reason: {msg}\");\n }\n\n #[test]\n fn surgical_preflight_failed_error_code() {\n let code = ErrorCode::SurgicalPreflightFailed;\n assert_eq!(code.exit_code(), 6);\n }\n\n #[test]\n fn surgical_preflight_failed_code_mapping() {\n let err = LoreError::SurgicalPreflightFailed {\n entity_type: \"merge_request\".to_string(),\n iid: 99,\n project: \"ns/proj\".to_string(),\n reason: \"404\".to_string(),\n };\n assert_eq!(err.code(), ErrorCode::SurgicalPreflightFailed);\n }\n}\n```\n\nGREEN: Add the variant and wire all impls.\nVERIFY: `cargo test surgical_preflight_failed`\n\n## Edge Cases\n- Exit code 6 is shared with `GitLabNotFound` — this is intentional (same semantic category: resource not found). The `ErrorCode` Display string distinguishes them for robot consumers.\n- The `entity_type` field uses strings (\"issue\", \"merge_request\") rather than an enum to avoid over-abstraction for two values.\n- Visibility changes are `pub(crate)`, not `pub` — these are internal implementation details, not public API.\n\n## Dependency Context\nThis is a leaf/foundation bead with no upstream dependencies. Downstream beads bd-1i4i (orchestrator) and bd-3sez (surgical.rs module) depend on this for both the error variant and the pub(crate) visibility of ingestion functions.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:11:41.476902Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:01:18.103312Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-1sc6","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1sc6","depends_on_id":"bd-3sez","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-1se","title":"Epic: Gate 2 - Cross-Reference Extraction","description":"## Background\nGate 2 builds the entity relationship graph that connects issues, MRs, and discussions. Without cross-references, temporal queries can only show events for individually-matched entities. With them, \"lore timeline auth migration\" can discover that MR !567 closed issue #234, which spawned follow-up issue #299 — even if #299 does not contain the words \"auth migration.\"\n\nThree data sources feed entity_references:\n1. **Structured API (reliable):** GET /projects/:id/merge_requests/:iid/closes_issues\n2. **State events (reliable):** resource_state_events.source_merge_request_id\n3. **System note parsing (best-effort):** \"mentioned in !456\", \"closed by !789\" patterns\n\n## Architecture\n- **entity_references table:** Already created in migration 011 (bd-hu3/bd-czk). Stores source→target relationships with reference_type (closes/mentioned/related) and source_method provenance.\n- **Directionality convention:** source = entity where reference was observed, target = entity being referenced. Consistent across all source_methods.\n- **Unresolved references:** Cross-project refs stored with target_entity_id=NULL, target_project_path populated. Still valuable for timeline narratives.\n- **closes_issues fetch:** Uses generic dependent fetch queue (job_type = mr_closes_issues). One API call per MR.\n- **System note parsing:** Local post-processing after all dependent fetches complete. No API calls. English-only, best-effort.\n\n## Children (Execution Order)\n1. **bd-czk** [CLOSED] — entity_references schema (folded into migration 011)\n2. **bd-8t4** [OPEN] — Extract cross-references from resource_state_events (source_merge_request_id)\n3. **bd-3ia** [OPEN] — Fetch closes_issues API and populate entity_references\n4. **bd-1ji** [OPEN] — Parse system notes for cross-reference patterns\n\n## Gate Completion Criteria\n- [ ] entity_references populated from closes_issues API for all synced MRs\n- [ ] entity_references populated from state events where source_merge_request_id present\n- [ ] System notes parsed for cross-reference patterns (English instances)\n- [ ] Cross-project references stored as unresolved (target_entity_id=NULL)\n- [ ] source_method tracks provenance of each reference\n- [ ] Deduplication: same relationship from multiple sources stored once (UNIQUE constraint)\n- [ ] Timeline JSON includes expansion provenance (via) for expanded entities\n- [ ] Integration test: sync with all three extraction methods, verify entity_references populated\n\n## Dependencies\n- Depends on: Gate 1 (bd-2zl) — event tables and dependent fetch queue\n- Downstream: Gate 3 (bd-ike) depends on entity_references for BFS expansion","status":"closed","priority":1,"issue_type":"feature","created_at":"2026-02-02T21:31:00.981132Z","created_by":"tayloreernisse","updated_at":"2026-02-05T16:08:26.965177Z","closed_at":"2026-02-05T16:08:26.964997Z","close_reason":"All child beads completed: bd-8t4 (state event extraction), bd-3ia (closes_issues API), bd-1ji (system note parsing)","compaction_level":0,"original_size":0,"labels":["epic","gate-2","phase-b"],"dependencies":[{"issue_id":"bd-1se","depends_on_id":"bd-2zl","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-1ser","title":"Implement scope context (global project filter)","description":"## Background\nThe scope context provides a global project filter that flows through all query bridge functions. Users can pin to a specific project set or view all projects. The P keybinding opens a scope picker overlay. Scope is persisted in session state.\n\n## Approach\nCreate crates/lore-tui/src/scope.rs:\n- ScopeContext enum: AllProjects, Pinned(Vec)\n- ProjectInfo: id (i64), path (String)\n- scope_filter_sql(scope: &ScopeContext) -> String: generates WHERE clause fragment\n- All action.rs query functions accept &ScopeContext parameter\n- Scope picker overlay: list of projects with checkbox selection\n- P keybinding toggles scope picker from any screen\n\n## Acceptance Criteria\n- [ ] AllProjects scope returns unfiltered results\n- [ ] Pinned scope filters to specific project IDs\n- [ ] All query functions respect global scope\n- [ ] P keybinding opens scope picker\n- [ ] Scope persisted in session state\n- [ ] Scope change triggers re-query of current screen\n\n## Files\n- CREATE: crates/lore-tui/src/scope.rs\n- MODIFY: crates/lore-tui/src/action.rs (add scope parameter to all queries)\n\n## TDD Anchor\nRED: Write test_scope_filter_sql that creates Pinned scope with 2 projects, asserts generated SQL contains IN (1, 2).\nGREEN: Implement scope_filter_sql.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_scope_filter\n\n## Edge Cases\n- Single-project datasets: scope picker not needed, but should still work\n- Very many projects (>50): scope picker should be scrollable\n- Scope change mid-pagination: reset cursor to first page\n\n## Dependency Context\nUses AppState from \"Implement AppState composition\" task.\nUses session persistence from \"Implement session persistence\" task.","status":"in_progress","priority":3,"issue_type":"task","created_at":"2026-02-12T17:03:37.555484Z","created_by":"tayloreernisse","updated_at":"2026-02-19T03:57:38.458188Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1ser","depends_on_id":"bd-26lp","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-1ser","title":"Implement scope context (global project filter)","description":"## Background\nThe scope context provides a global project filter that flows through all query bridge functions. Users can pin to a specific project set or view all projects. The P keybinding opens a scope picker overlay. Scope is persisted in session state.\n\n## Approach\nCreate crates/lore-tui/src/scope.rs:\n- ScopeContext enum: AllProjects, Pinned(Vec)\n- ProjectInfo: id (i64), path (String)\n- scope_filter_sql(scope: &ScopeContext) -> String: generates WHERE clause fragment\n- All action.rs query functions accept &ScopeContext parameter\n- Scope picker overlay: list of projects with checkbox selection\n- P keybinding toggles scope picker from any screen\n\n## Acceptance Criteria\n- [ ] AllProjects scope returns unfiltered results\n- [ ] Pinned scope filters to specific project IDs\n- [ ] All query functions respect global scope\n- [ ] P keybinding opens scope picker\n- [ ] Scope persisted in session state\n- [ ] Scope change triggers re-query of current screen\n\n## Files\n- CREATE: crates/lore-tui/src/scope.rs\n- MODIFY: crates/lore-tui/src/action.rs (add scope parameter to all queries)\n\n## TDD Anchor\nRED: Write test_scope_filter_sql that creates Pinned scope with 2 projects, asserts generated SQL contains IN (1, 2).\nGREEN: Implement scope_filter_sql.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_scope_filter\n\n## Edge Cases\n- Single-project datasets: scope picker not needed, but should still work\n- Very many projects (>50): scope picker should be scrollable\n- Scope change mid-pagination: reset cursor to first page\n\n## Dependency Context\nUses AppState from \"Implement AppState composition\" task.\nUses session persistence from \"Implement session persistence\" task.","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T17:03:37.555484Z","created_by":"tayloreernisse","updated_at":"2026-02-19T04:05:23.627190Z","closed_at":"2026-02-19T04:05:23.627135Z","close_reason":"Implemented scope context: scope.rs (SQL helper + project fetcher), state/scope_picker.rs, view/scope_picker.rs. P keybinding + toggle_scope. 629 tests pass.","compaction_level":0,"original_size":0,"labels":["TUI"]} {"id":"bd-1soz","title":"Add half_life_decay() pure function","description":"## Background\nThe decay function is the mathematical core of the scoring model. It must be correct, tested first (TDD RED), and verified independently of any DB or SQL changes.\n\n## Approach\nAdd to who.rs as a private function near the top of the module (before query_expert):\n\n```rust\n/// Exponential half-life decay: R = 2^(-t/h)\n/// Returns 1.0 at elapsed=0, 0.5 at elapsed=half_life, 0.0 if half_life=0.\nfn half_life_decay(elapsed_ms: i64, half_life_days: u32) -> f64 {\n let days = (elapsed_ms as f64 / 86_400_000.0).max(0.0);\n let hl = f64::from(half_life_days);\n if hl <= 0.0 { return 0.0; }\n 2.0_f64.powf(-days / hl)\n}\n```\n\n## TDD Loop\n\n### RED (write first):\n```rust\n#[test]\nfn test_half_life_decay_math() {\n let hl_180 = 180;\n // At t=0, full retention\n assert!((half_life_decay(0, hl_180) - 1.0).abs() < f64::EPSILON);\n // At t=half_life, exactly 0.5\n let one_hl_ms = 180 * 86_400_000_i64;\n assert!((half_life_decay(one_hl_ms, hl_180) - 0.5).abs() < 1e-10);\n // At t=2*half_life, exactly 0.25\n assert!((half_life_decay(2 * one_hl_ms, hl_180) - 0.25).abs() < 1e-10);\n // Negative elapsed clamped to 0 -> 1.0\n assert!((half_life_decay(-1000, hl_180) - 1.0).abs() < f64::EPSILON);\n // Zero half-life -> 0.0 (div-by-zero guard)\n assert!((half_life_decay(86_400_000, 0)).abs() < f64::EPSILON);\n}\n\n#[test]\nfn test_score_monotonicity_by_age() {\n // For any half-life, older timestamps must never produce higher decay than newer ones.\n // Use deterministic LCG PRNG (no rand dependency).\n let mut seed: u64 = 42;\n let hl = 90_u32;\n for _ in 0..50 {\n seed = seed.wrapping_mul(6364136223846793005).wrapping_add(1);\n let newer_ms = (seed % 100_000_000) as i64; // 0-100M ms (~1.15 days max)\n seed = seed.wrapping_mul(6364136223846793005).wrapping_add(1);\n let older_ms = newer_ms + (seed % 500_000_000) as i64; // always >= newer\n assert!(\n half_life_decay(older_ms, hl) <= half_life_decay(newer_ms, hl),\n \"Monotonicity violated: decay({older_ms}) > decay({newer_ms})\"\n );\n }\n}\n```\n\n### GREEN: Add the half_life_decay function (3 lines of math).\n### VERIFY: `cargo test -p lore -- test_half_life_decay_math test_score_monotonicity`\n\n## Acceptance Criteria\n- [ ] test_half_life_decay_math passes (4 boundary cases + div-by-zero guard)\n- [ ] test_score_monotonicity_by_age passes (50 random pairs, deterministic seed)\n- [ ] Function is `fn` not `pub fn` (module-private)\n- [ ] No DB dependency — pure function\n\n## Files\n- src/cli/commands/who.rs (function near top, tests in test module)\n\n## Edge Cases\n- Negative elapsed_ms: clamped to 0 via .max(0.0) -> returns 1.0\n- half_life_days = 0: returns 0.0, not NaN/Inf\n- Very large elapsed (10 years): returns very small positive f64, never negative","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T16:59:22.913281Z","created_by":"tayloreernisse","updated_at":"2026-02-12T21:07:16.929095Z","closed_at":"2026-02-12T21:07:16.928983Z","close_reason":"Completed: added half_life_decay(elapsed_ms, half_life_days) -> f64 pure function with div-by-zero guard, negative elapsed clamping, and 2 tests (boundary math + monotonicity property). All 585 tests pass.","compaction_level":0,"original_size":0,"labels":["scoring"]} {"id":"bd-1t4","title":"Epic: CP2 Gate C - Dependent Discussion Sync","description":"## Background\nGate C validates the dependent discussion sync with DiffNote position capture. This is critical for code review context preservation - without DiffNote positions, we lose the file/line context for review comments.\n\n## Acceptance Criteria (Pass/Fail)\n- [ ] Discussions fetched for MRs with updated_at > discussions_synced_for_updated_at\n- [ ] `SELECT COUNT(*) FROM discussions WHERE merge_request_id IS NOT NULL` > 0\n- [ ] DiffNotes have `position_new_path` populated (file path)\n- [ ] DiffNotes have `position_new_line` populated (line number)\n- [ ] DiffNotes have `position_type` populated (text/image/file)\n- [ ] DiffNotes have SHA triplet: `position_base_sha`, `position_start_sha`, `position_head_sha`\n- [ ] Multi-line DiffNotes have `position_line_range_start` and `position_line_range_end`\n- [ ] Unchanged MRs skip discussion refetch (watermark comparison works)\n- [ ] Watermark NOT advanced on HTTP error mid-pagination\n- [ ] Watermark NOT advanced on note timestamp parse failure\n- [ ] `gi show mr ` displays DiffNote with file context `[path:line]`\n\n## Validation Script\n```bash\n#!/bin/bash\nset -e\n\nDB_PATH=\"${XDG_DATA_HOME:-$HOME/.local/share}/gitlab-inbox/db.sqlite3\"\n\necho \"=== Gate C: Dependent Discussion Sync ===\"\n\n# 1. Check discussion count for MRs\necho \"Step 1: Check MR discussion count...\"\nMR_DISC_COUNT=$(sqlite3 \"$DB_PATH\" \"SELECT COUNT(*) FROM discussions WHERE merge_request_id IS NOT NULL;\")\necho \" MR discussions: $MR_DISC_COUNT\"\n[ \"$MR_DISC_COUNT\" -gt 0 ] || { echo \"FAIL: No MR discussions found\"; exit 1; }\n\n# 2. Check note count\necho \"Step 2: Check note count...\"\nNOTE_COUNT=$(sqlite3 \"$DB_PATH\" \"\n SELECT COUNT(*) FROM notes n\n JOIN discussions d ON d.id = n.discussion_id\n WHERE d.merge_request_id IS NOT NULL;\n\")\necho \" MR notes: $NOTE_COUNT\"\n\n# 3. Check DiffNote position data\necho \"Step 3: Check DiffNote positions...\"\nDIFFNOTE_COUNT=$(sqlite3 \"$DB_PATH\" \"SELECT COUNT(*) FROM notes WHERE position_new_path IS NOT NULL;\")\necho \" DiffNotes with position: $DIFFNOTE_COUNT\"\n\n# 4. Sample DiffNote data\necho \"Step 4: Sample DiffNote data...\"\nsqlite3 \"$DB_PATH\" \"\n SELECT \n n.gitlab_id,\n n.position_new_path,\n n.position_new_line,\n n.position_type,\n SUBSTR(n.position_head_sha, 1, 7) as head_sha\n FROM notes n\n WHERE n.position_new_path IS NOT NULL\n LIMIT 5;\n\"\n\n# 5. Check multi-line DiffNotes\necho \"Step 5: Check multi-line DiffNotes...\"\nMULTILINE_COUNT=$(sqlite3 \"$DB_PATH\" \"\n SELECT COUNT(*) FROM notes \n WHERE position_line_range_start IS NOT NULL \n AND position_line_range_end IS NOT NULL\n AND position_line_range_start != position_line_range_end;\n\")\necho \" Multi-line DiffNotes: $MULTILINE_COUNT\"\n\n# 6. Check watermarks set\necho \"Step 6: Check watermarks...\"\nWATERMARKED=$(sqlite3 \"$DB_PATH\" \"\n SELECT COUNT(*) FROM merge_requests \n WHERE discussions_synced_for_updated_at IS NOT NULL;\n\")\necho \" MRs with watermark set: $WATERMARKED\"\n\n# 7. Check last_seen_at for sweep pattern\necho \"Step 7: Check last_seen_at (sweep pattern)...\"\nsqlite3 \"$DB_PATH\" \"\n SELECT \n MIN(last_seen_at) as oldest,\n MAX(last_seen_at) as newest\n FROM discussions \n WHERE merge_request_id IS NOT NULL;\n\"\n\n# 8. Test show command with DiffNote\necho \"Step 8: Find MR with DiffNotes for show test...\"\nMR_IID=$(sqlite3 \"$DB_PATH\" \"\n SELECT DISTINCT m.iid\n FROM merge_requests m\n JOIN discussions d ON d.merge_request_id = m.id\n JOIN notes n ON n.discussion_id = d.id\n WHERE n.position_new_path IS NOT NULL\n LIMIT 1;\n\")\nif [ -n \"$MR_IID\" ]; then\n echo \" Testing: gi show mr $MR_IID\"\n gi show mr \"$MR_IID\" | head -50\nfi\n\n# 9. Re-run and verify skip count\necho \"Step 9: Re-run ingest (should skip unchanged MRs)...\"\ngi ingest --type=merge_requests\n# Should report \"Skipped discussion sync for N unchanged MRs\"\n\necho \"\"\necho \"=== Gate C: PASSED ===\"\n```\n\n## Atomicity Test (Manual - Kill Test)\n```bash\n# This tests that partial failure preserves data\n\n# 1. Get an MR with discussions\nMR_ID=$(sqlite3 \"$DB_PATH\" \"\n SELECT m.id FROM merge_requests m\n JOIN discussions d ON d.merge_request_id = m.id\n LIMIT 1;\n\")\n\n# 2. Note current note count\nBEFORE=$(sqlite3 \"$DB_PATH\" \"\n SELECT COUNT(*) FROM notes n\n JOIN discussions d ON d.id = n.discussion_id\n WHERE d.merge_request_id = $MR_ID;\n\")\necho \"Notes before: $BEFORE\"\n\n# 3. Note watermark\nWATERMARK_BEFORE=$(sqlite3 \"$DB_PATH\" \"\n SELECT discussions_synced_for_updated_at FROM merge_requests WHERE id = $MR_ID;\n\")\necho \"Watermark before: $WATERMARK_BEFORE\"\n\n# 4. Force full sync and kill mid-run\ngi ingest --type=merge_requests --full &\nPID=$!\nsleep 3 && kill -9 $PID 2>/dev/null || true\nwait $PID 2>/dev/null || true\n\n# 5. Verify notes preserved (should be same or more, never less)\nAFTER=$(sqlite3 \"$DB_PATH\" \"\n SELECT COUNT(*) FROM notes n\n JOIN discussions d ON d.id = n.discussion_id\n WHERE d.merge_request_id = $MR_ID;\n\")\necho \"Notes after kill: $AFTER\"\n[ \"$AFTER\" -ge \"$BEFORE\" ] || echo \"WARNING: Notes decreased - atomicity may be broken\"\n\n# 6. Note watermark should NOT have advanced if killed mid-pagination\nWATERMARK_AFTER=$(sqlite3 \"$DB_PATH\" \"\n SELECT discussions_synced_for_updated_at FROM merge_requests WHERE id = $MR_ID;\n\")\necho \"Watermark after: $WATERMARK_AFTER\"\n```\n\n## Test Commands (Quick Verification)\n```bash\n# Check DiffNote data:\nsqlite3 ~/.local/share/gitlab-inbox/db.sqlite3 \"\n SELECT \n (SELECT COUNT(*) FROM discussions WHERE merge_request_id IS NOT NULL) as mr_discussions,\n (SELECT COUNT(*) FROM notes WHERE position_new_path IS NOT NULL) as diffnotes,\n (SELECT COUNT(*) FROM merge_requests WHERE discussions_synced_for_updated_at IS NOT NULL) as watermarked;\n\"\n\n# Find MR with DiffNotes and show it:\ngi show mr $(sqlite3 ~/.local/share/gitlab-inbox/db.sqlite3 \"\n SELECT DISTINCT m.iid FROM merge_requests m\n JOIN discussions d ON d.merge_request_id = m.id\n JOIN notes n ON n.discussion_id = d.id\n WHERE n.position_new_path IS NOT NULL LIMIT 1;\n\")\n```\n\n## Dependencies\nThis gate requires:\n- bd-3j6 (Discussion transformer with DiffNote position extraction)\n- bd-20h (MR discussion ingestion with atomicity guarantees)\n- bd-iba (Client pagination for MR discussions)\n- Gates A and B must pass first\n\n## Edge Cases\n- MRs without discussions: should sync successfully, just with 0 discussions\n- Discussions without DiffNotes: regular comments have NULL position fields\n- Deleted discussions in GitLab: sweep pattern should remove them locally\n- Invalid note timestamps: should NOT advance watermark, should log warning","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-26T22:06:01.769694Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:48:21.060017Z","closed_at":"2026-01-27T00:48:21.059974Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1t4","depends_on_id":"bd-20h","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1ta","title":"[CP1] Integration tests for pagination","description":"Integration tests for GitLab pagination with wiremock.\n\n## Tests (tests/pagination_tests.rs)\n\n### Page Navigation\n- fetches_all_pages_when_multiple_exist\n- respects_per_page_parameter\n- follows_x_next_page_header_until_empty\n- falls_back_to_empty_page_stop_if_headers_missing\n\n### Cursor Behavior\n- applies_cursor_rewind_for_tuple_semantics\n- clamps_negative_rewind_to_zero\n\n## Test Setup\n- Use wiremock::MockServer\n- Set up handlers for /api/v4/projects/:id/issues\n- Return x-next-page headers\n- Verify request params (updated_after, per_page)\n\nFiles: tests/pagination_tests.rs\nDone when: All pagination tests pass with mocked server","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:59:07.806593Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:02.038945Z","closed_at":"2026-01-25T17:02:02.038945Z","deleted_at":"2026-01-25T17:02:02.038939Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} @@ -115,7 +115,7 @@ {"id":"bd-25hb","title":"NOTE-1C: Human and robot output formatting for notes","description":"## Background\nImplement the 4 output formatters for the notes command: human table, robot JSON, JSONL streaming, and CSV export.\n\n## Approach\nAdd to src/cli/commands/list.rs (after the query_notes function from NOTE-1A):\n\n1. pub fn print_list_notes(result: &NoteListResult) — human table:\n Use comfy-table (already in Cargo.toml) following the pattern of print_list_issues/print_list_mrs.\n Columns: ID | Author | Type | Body (truncated to 60 chars + \"...\") | Path:Line | Parent | Created\n ID: colored_cell with Cyan for gitlab_id\n Author: @username with Magenta\n Type: \"Diff\" for DiffNote, \"Disc\" for DiscussionNote, \"-\" for others\n Path: position_new_path:line (or \"-\" if no path)\n Parent: \"Issue #N\" or \"MR !N\" from noteable_type + parent_iid\n Created: format_relative_time (existing helper in list.rs)\n\n2. pub fn print_list_notes_json(result: &NoteListResult, elapsed_ms: u64, fields: Option<&[String]>) — robot JSON:\n Standard envelope: {\"ok\":true,\"data\":{\"notes\":[...],\"total_count\":N,\"showing\":M},\"meta\":{\"elapsed_ms\":U64}}\n Supports --fields via filter_fields() from crate::cli::robot\n Same pattern as print_list_issues_json.\n\n3. pub fn print_list_notes_jsonl(result: &NoteListResult) — one JSON object per line:\n Each line is one NoteListRowJson serialized. No envelope. Ideal for jq/notebook pipelines.\n Use serde_json::to_string for each row, println! each line.\n\n4. pub fn print_list_notes_csv(result: &NoteListResult) — CSV output:\n Check if csv crate is already used in the project. If not, use manual CSV with proper escaping:\n - Header row with field names matching NoteListRowJson\n - Quote fields containing commas, quotes, or newlines\n - Escape internal quotes by doubling them\n Alternatively, if adding csv crate (add csv = \"1\" to Cargo.toml [dependencies]), use csv::WriterBuilder for RFC 4180 compliance.\n\nHelper: Add a truncate_body(body: &str, max_len: usize) -> String function for the human table truncation.\n\n## Files\n- MODIFY: src/cli/commands/list.rs (4 print functions + truncate_body helper)\n- POSSIBLY MODIFY: Cargo.toml (add csv = \"1\" if using csv crate for CSV output)\n\n## TDD Anchor\nRED: test_truncate_note_body — assert 200-char body truncated to 60 + \"...\"\nGREEN: Implement truncate_body helper.\nVERIFY: cargo test truncate_note_body -- --nocapture\nTests: test_csv_output_basic (CSV output has correct header + escaped fields), test_jsonl_output_one_per_line (each line parses as valid JSON)\n\n## Acceptance Criteria\n- [ ] Human table renders with colored columns, truncated body, relative time\n- [ ] Robot JSON follows standard envelope with timing metadata\n- [ ] --fields filtering works on JSON output (via filter_fields)\n- [ ] JSONL outputs one valid JSON object per line\n- [ ] CSV properly escapes commas, quotes, and newlines in body text\n- [ ] Multi-byte chars handled correctly in CSV and truncation\n- [ ] All 3 tests pass\n\n## Dependency Context\n- Depends on NOTE-1A (bd-20p9): uses NoteListRow, NoteListRowJson, NoteListResult structs\n\n## Edge Cases\n- Empty body in table: show \"-\" or empty cell\n- Very long body with multi-byte chars: truncation must respect char boundaries (use .chars().take(n) not byte slicing)\n- JSONL with body containing newlines: serde_json::to_string escapes \\n correctly\n- CSV with body containing quotes: must double them per RFC 4180","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:00:53.482055Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:24.304235Z","closed_at":"2026-02-12T18:13:24.304188Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["cli","per-note","search"],"dependencies":[{"issue_id":"bd-25hb","depends_on_id":"bd-1oyf","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-25s","title":"robot-docs: Add Ollama dependency discovery to manifest","description":"## Background\n\nAdd Ollama dependency discovery to robot-docs so agents know which commands need Ollama and which work without it. Currently robot-docs lists commands, exit codes, workflows, and aliases — but has no dependency information.\n\n## Codebase Context\n\n- handle_robot_docs() in src/main.rs (line ~1646) returns RobotDocsData JSON\n- RobotDocsData struct has fields: commands, exit_codes, workflows, aliases, clap_error_codes\n- Currently 18 documented commands in the manifest\n- Ollama required for: embed, search --mode=semantic, search --mode=hybrid\n- Not required for: all Phase B temporal commands (timeline, file-history, trace), lexical search, count, ingest, stats, sync, doctor, health, who, show, issues, mrs, etc.\n- No dependencies field exists yet in RobotDocsData\n\n## Approach\n\n### 1. Add dependencies field to RobotDocsData (src/main.rs):\n\n```rust\n#[derive(Serialize)]\nstruct RobotDocsData {\n // ... existing fields ...\n dependencies: DependencyInfo,\n}\n\n#[derive(Serialize)]\nstruct DependencyInfo {\n ollama: OllamaDependency,\n}\n\n#[derive(Serialize)]\nstruct OllamaDependency {\n required_by: Vec,\n not_required_by: Vec,\n install: HashMap, // {\"macos\": \"brew install ollama\", \"linux\": \"curl ...\"}\n setup: String, // \"ollama pull nomic-embed-text\"\n note: String,\n}\n```\n\n### 2. Populate in handle_robot_docs():\n\n```json\n{\n \"ollama\": {\n \"required_by\": [\"embed\", \"search --mode=semantic\", \"search --mode=hybrid\"],\n \"not_required_by\": [\"issues\", \"mrs\", \"search --mode=lexical\", \"timeline\", \"file-history\", \"count\", \"ingest\", \"stats\", \"sync\", \"doctor\", \"health\", \"who\", \"show\", \"status\"],\n \"install\": {\"macos\": \"brew install ollama\", \"linux\": \"curl -fsSL https://ollama.ai/install.sh | sh\"},\n \"setup\": \"ollama pull nomic-embed-text\",\n \"note\": \"Lexical search and all temporal features work without Ollama.\"\n }\n}\n```\n\n## Acceptance Criteria\n\n- [ ] `lore robot-docs | jq '.data.dependencies.ollama'` returns structured info\n- [ ] required_by lists embed and semantic/hybrid search modes\n- [ ] not_required_by lists all commands that work without Ollama (including Phase B if they exist)\n- [ ] Install instructions for macos and linux\n- [ ] setup field includes \"ollama pull nomic-embed-text\"\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n- [ ] `cargo fmt --check` passes\n\n## Files\n\n- MODIFY: src/main.rs (add DependencyInfo/OllamaDependency structs, update RobotDocsData, populate in handle_robot_docs)\n\n## TDD Anchor\n\nNo unit test needed — this is static metadata. Verify with:\n\n```bash\ncargo check --all-targets\ncargo run --release -- robot-docs | jq '.data.dependencies.ollama.required_by'\ncargo run --release -- robot-docs | jq '.data.dependencies.ollama.not_required_by'\n```\n\n## Edge Cases\n\n- Keep not_required_by up to date as new commands are added — consider a comment in the code listing which commands to check\n- Phase B commands (timeline, file-history, trace) must be in not_required_by once they exist\n- If a command conditionally needs Ollama (like search with --mode flag), list the specific flag combination in required_by\n\n## Dependency Context\n\n- **RobotDocsData** (src/main.rs ~line 1646): the existing struct that this bead extends. Currently has commands (Vec), exit_codes (Vec), workflows (Vec), aliases (Vec), clap_error_codes (Vec). Adding a dependencies field is additive — no breaking changes.\n- **handle_robot_docs()**: the function that constructs and returns the JSON. All data is hardcoded in the function — no runtime introspection needed.","status":"open","priority":4,"issue_type":"feature","created_at":"2026-01-30T20:26:43.169688Z","created_by":"tayloreernisse","updated_at":"2026-02-17T16:53:20.425853Z","compaction_level":0,"original_size":0,"labels":["enhancement","robot-mode"]} {"id":"bd-26f2","title":"Implement common widgets (status bar, breadcrumb, loading, error toast, help overlay)","description":"## Background\nCommon widgets appear across all screens: the status bar shows context-sensitive key hints and sync status, the breadcrumb shows navigation depth, the loading spinner indicates background work, the error toast shows transient errors with auto-dismiss, and the help overlay (?) shows available keybindings.\n\n## Approach\nCreate crates/lore-tui/src/view/common/mod.rs and individual widget files:\n\nview/common/mod.rs:\n- render_breadcrumb(frame, area, nav: &NavigationStack, theme: &Theme): renders \"Dashboard > Issues > #42\" trail\n- render_status_bar(frame, area, registry: &CommandRegistry, screen: &Screen, mode: &InputMode, theme: &Theme): renders bottom bar with key hints and sync indicator\n- render_loading(frame, area, load_state: &LoadState, theme: &Theme): renders centered spinner for LoadingInitial, or subtle refresh indicator for Refreshing\n- render_error_toast(frame, area, msg: &str, theme: &Theme): renders floating toast at bottom-right with error message\n- render_help_overlay(frame, area, registry: &CommandRegistry, screen: &Screen, theme: &Theme): renders centered modal with keybinding list from registry\n\nCreate crates/lore-tui/src/view/mod.rs:\n- render_screen(frame, app: &LoreApp): top-level dispatch — renders breadcrumb + screen content + status bar + optional overlays (help, error toast, command palette)\n\n## Acceptance Criteria\n- [ ] Breadcrumb renders all stack entries with \" > \" separator\n- [ ] Status bar shows contextual hints from CommandRegistry\n- [ ] Loading spinner animates via tick subscription\n- [ ] Error toast auto-positions at bottom-right of screen\n- [ ] Help overlay shows all commands for current screen from registry\n- [ ] render_screen routes to correct per-screen view function\n- [ ] Overlays (help, error, palette) render on top of screen content\n\n## Files\n- CREATE: crates/lore-tui/src/view/mod.rs\n- CREATE: crates/lore-tui/src/view/common/mod.rs\n\n## TDD Anchor\nRED: Write test_breadcrumbs_format that creates a NavigationStack with Dashboard > IssueList, calls breadcrumbs(), asserts [\"Dashboard\", \"Issues\"].\nGREEN: Implement breadcrumbs() in NavigationStack (already in nav task) and render_breadcrumb.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_breadcrumbs\n\n## Edge Cases\n- Breadcrumb must truncate from the left if stack is too deep for terminal width\n- Status bar must handle narrow terminals (<60 cols) gracefully — show abbreviated hints\n- Error toast must handle very long messages with truncation\n- Help overlay must scroll if there are more commands than terminal height\n\n## Dependency Context\nUses NavigationStack from \"Implement NavigationStack\" task.\nUses CommandRegistry from \"Implement CommandRegistry\" task.\nUses LoadState from \"Implement AppState composition\" task.\nUses Theme from \"Implement theme configuration\" task.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:57:13.520393Z","created_by":"tayloreernisse","updated_at":"2026-02-12T21:10:58.182249Z","closed_at":"2026-02-12T21:10:58.181707Z","close_reason":"Completed: 5 common widgets + render_screen dispatch + 27 tests + clippy clean","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-26f2","depends_on_id":"bd-1qpp","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-26f2","depends_on_id":"bd-1v9m","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-26f2","depends_on_id":"bd-38lb","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-26f2","depends_on_id":"bd-5ofk","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-26lp","title":"Implement CLI integration (lore tui command + binary delegation)","description":"## Background\nThe lore CLI binary needs a tui subcommand that launches the lore-tui binary. This is runtime binary delegation — lore finds lore-tui via PATH lookup and execs it, passing through relevant flags. Zero compile-time dependency from lore to lore-tui. The TUI is the human interface; the CLI is the robot/script interface.\n\n## Approach\nAdd a tui subcommand to the lore CLI:\n\n**CLI side** (`src/cli/tui.rs`):\n- Add `Tui` variant to the main CLI enum with flags: --config, --sync, --fresh, --render-mode, --ascii, --no-alt-screen\n- Implementation: resolve lore-tui binary via PATH lookup (std::process::Command with \"lore-tui\")\n- Pass through all flags as CLI arguments\n- If lore-tui not found in PATH, print helpful error: \"lore-tui binary not found. Install with: cargo install --path crates/lore-tui\"\n- Exec (not spawn+wait) using std::os::unix::process::CommandExt::exec() for clean process replacement on Unix\n\n**Binary naming**: The binary is `lore-tui` (hyphenated), matching the crate name.\n\n## Acceptance Criteria\n- [ ] lore tui launches lore-tui binary from PATH\n- [ ] All flags (--config, --sync, --fresh, --render-mode, --ascii, --no-alt-screen) are passed through\n- [ ] Missing binary produces helpful error with install instructions\n- [ ] Uses exec() on Unix for clean process replacement (no zombie parent)\n- [ ] Robot mode: lore --robot tui returns JSON error if binary not found\n- [ ] lore tui --help shows TUI-specific flags\n\n## Files\n- CREATE: src/cli/tui.rs\n- MODIFY: src/cli/mod.rs (add tui subcommand to CLI enum)\n- MODIFY: src/main.rs (add match arm for Tui variant)\n\n## TDD Anchor\nRED: Write `test_tui_binary_not_found_error` that asserts the error message includes install instructions when lore-tui is not in PATH.\nGREEN: Implement the binary lookup and error handling.\nVERIFY: cargo test tui_binary -- --nocapture\n\nAdditional tests:\n- test_tui_flag_passthrough (verify all flags are forwarded)\n- test_tui_robot_mode_json_error (structured error when binary missing)\n\n## Edge Cases\n- lore-tui binary exists but is not executable — should produce clear error\n- PATH contains multiple lore-tui versions — uses first match (standard PATH behavior)\n- Windows: exec() not available — fall back to spawn+wait+exit with same code\n- User runs lore tui in robot mode — should fail with structured JSON error (TUI is human-only)\n\n## Dependency Context\nDepends on bd-2iqk (Doctor + Stats screens) for phase ordering. The CLI integration is one of the last Phase 4 tasks because it requires lore-tui to be substantially complete for the delegation to be useful.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:39.602970Z","created_by":"tayloreernisse","updated_at":"2026-02-19T03:57:33.709750Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-26lp","depends_on_id":"bd-2iqk","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-26lp","title":"Implement CLI integration (lore tui command + binary delegation)","description":"## Background\nThe lore CLI binary needs a tui subcommand that launches the lore-tui binary. This is runtime binary delegation — lore finds lore-tui via PATH lookup and execs it, passing through relevant flags. Zero compile-time dependency from lore to lore-tui. The TUI is the human interface; the CLI is the robot/script interface.\n\n## Approach\nAdd a tui subcommand to the lore CLI:\n\n**CLI side** (`src/cli/tui.rs`):\n- Add `Tui` variant to the main CLI enum with flags: --config, --sync, --fresh, --render-mode, --ascii, --no-alt-screen\n- Implementation: resolve lore-tui binary via PATH lookup (std::process::Command with \"lore-tui\")\n- Pass through all flags as CLI arguments\n- If lore-tui not found in PATH, print helpful error: \"lore-tui binary not found. Install with: cargo install --path crates/lore-tui\"\n- Exec (not spawn+wait) using std::os::unix::process::CommandExt::exec() for clean process replacement on Unix\n\n**Binary naming**: The binary is `lore-tui` (hyphenated), matching the crate name.\n\n## Acceptance Criteria\n- [ ] lore tui launches lore-tui binary from PATH\n- [ ] All flags (--config, --sync, --fresh, --render-mode, --ascii, --no-alt-screen) are passed through\n- [ ] Missing binary produces helpful error with install instructions\n- [ ] Uses exec() on Unix for clean process replacement (no zombie parent)\n- [ ] Robot mode: lore --robot tui returns JSON error if binary not found\n- [ ] lore tui --help shows TUI-specific flags\n\n## Files\n- CREATE: src/cli/tui.rs\n- MODIFY: src/cli/mod.rs (add tui subcommand to CLI enum)\n- MODIFY: src/main.rs (add match arm for Tui variant)\n\n## TDD Anchor\nRED: Write `test_tui_binary_not_found_error` that asserts the error message includes install instructions when lore-tui is not in PATH.\nGREEN: Implement the binary lookup and error handling.\nVERIFY: cargo test tui_binary -- --nocapture\n\nAdditional tests:\n- test_tui_flag_passthrough (verify all flags are forwarded)\n- test_tui_robot_mode_json_error (structured error when binary missing)\n\n## Edge Cases\n- lore-tui binary exists but is not executable — should produce clear error\n- PATH contains multiple lore-tui versions — uses first match (standard PATH behavior)\n- Windows: exec() not available — fall back to spawn+wait+exit with same code\n- User runs lore tui in robot mode — should fail with structured JSON error (TUI is human-only)\n\n## Dependency Context\nDepends on bd-2iqk (Doctor + Stats screens) for phase ordering. The CLI integration is one of the last Phase 4 tasks because it requires lore-tui to be substantially complete for the delegation to be useful.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:39.602970Z","created_by":"tayloreernisse","updated_at":"2026-02-19T04:32:03.723595Z","closed_at":"2026-02-19T04:32:03.723543Z","close_reason":"CLI integration: lore tui subcommand delegates to lore-tui via PATH exec(). Robot mode returns structured JSON error. 5 tests.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-26lp","depends_on_id":"bd-2iqk","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2711","title":"WHO: Reviews mode query (query_reviews)","description":"## Background\n\nReviews mode answers \"What review patterns does person X have?\" by analyzing the **prefix** convention in DiffNote bodies (e.g., **suggestion**: ..., **question**: ..., **nit**: ...). Only counts DiffNotes on MRs the user did NOT author (m.author_username != ?1).\n\n## Approach\n\n### Three queries:\n1. **Total DiffNotes**: COUNT(*) of DiffNotes by user on others' MRs\n2. **Distinct MRs reviewed**: COUNT(DISTINCT m.id) \n3. **Category extraction**: SQL-level prefix parsing + Rust normalization\n\n### Category extraction SQL:\n```sql\nSELECT\n SUBSTR(ltrim(n.body), 3, INSTR(SUBSTR(ltrim(n.body), 3), '**') - 1) AS raw_prefix,\n COUNT(*) AS cnt\nFROM notes n\nJOIN discussions d ON n.discussion_id = d.id\nJOIN merge_requests m ON d.merge_request_id = m.id\nWHERE n.author_username = ?1\n AND n.note_type = 'DiffNote' AND n.is_system = 0\n AND m.author_username != ?1\n AND ltrim(n.body) LIKE '**%**%' -- only bodies with **prefix** pattern\n AND n.created_at >= ?2\n AND (?3 IS NULL OR n.project_id = ?3)\nGROUP BY raw_prefix ORDER BY cnt DESC\n```\n\nKey: `ltrim(n.body)` tolerates leading whitespace before **prefix** (common in practice).\n\n### normalize_review_prefix() in Rust:\n```rust\nfn normalize_review_prefix(raw: &str) -> String {\n let s = raw.trim().trim_end_matches(':').trim().to_lowercase();\n // Strip parentheticals like \"(non-blocking)\"\n let s = if let Some(idx) = s.find('(') { s[..idx].trim().to_string() } else { s };\n // Merge nit/nitpick variants\n match s.as_str() {\n \"nitpick\" | \"nit\" => \"nit\".to_string(),\n other => other.to_string(),\n }\n}\n```\n\n### HashMap merge for normalized categories, then sort by count DESC\n\n### ReviewsResult struct:\n```rust\npub struct ReviewsResult {\n pub username: String,\n pub total_diffnotes: u32,\n pub categorized_count: u32,\n pub mrs_reviewed: u32,\n pub categories: Vec,\n}\npub struct ReviewCategory { pub name: String, pub count: u32, pub percentage: f64 }\n```\n\nNo LIMIT needed — categories are naturally bounded (few distinct prefixes).\n\n## Files\n\n- `src/cli/commands/who.rs`\n\n## TDD Loop\n\nRED:\n```\ntest_reviews_query — insert 3 DiffNotes (2 with **prefix**, 1 without); verify total=3, categorized=2, categories.len()=2\ntest_normalize_review_prefix — \"suggestion\" \"Suggestion:\" \"suggestion (non-blocking):\" \"Nitpick:\" \"nit (non-blocking):\" \"question\" \"TODO:\"\n```\n\nGREEN: Implement query_reviews + normalize_review_prefix\nVERIFY: `cargo test -- reviews`\n\n## Acceptance Criteria\n\n- [ ] test_reviews_query passes (total=3, categorized=2)\n- [ ] test_normalize_review_prefix passes (nit/nitpick merge, parenthetical strip)\n- [ ] Only counts DiffNotes on MRs user did NOT author\n- [ ] Default since window: 6m\n\n## Edge Cases\n\n- Self-authored MRs excluded (m.author_username != ?1) — user's notes on own MRs are not \"reviews\"\n- ltrim() handles leading whitespace before **prefix**\n- Empty raw_prefix after normalization filtered out (!normalized.is_empty())\n- Percentage calculated from categorized_count (not total_diffnotes)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:40:53.350210Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.599252Z","closed_at":"2026-02-08T04:10:29.599217Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2711","depends_on_id":"bd-2ldg","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2711","depends_on_id":"bd-34rr","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-296a","title":"NOTE-1E: Composite query index and author_id column (migration 022)","description":"## Background\nThe notes table needs composite covering indexes for the new query_notes() function, plus the author_id column for immutable identity (NOTE-0D). Combined in a single migration to avoid an extra migration step. Migration slot 022 is available (021 = work_item_status, 023 = issue_detail_fields already exists).\n\n## Approach\nCreate migrations/022_notes_query_index.sql with:\n\n1. Composite index for author-scoped queries (most common pattern):\n CREATE INDEX IF NOT EXISTS idx_notes_user_created\n ON notes(project_id, author_username COLLATE NOCASE, created_at DESC, id DESC)\n WHERE is_system = 0;\n\n2. Composite index for project-scoped date-range queries:\n CREATE INDEX IF NOT EXISTS idx_notes_project_created\n ON notes(project_id, created_at DESC, id DESC)\n WHERE is_system = 0;\n\n3. Discussion JOIN indexes (check if they already exist first):\n CREATE INDEX IF NOT EXISTS idx_discussions_issue_id ON discussions(issue_id);\n CREATE INDEX IF NOT EXISTS idx_discussions_mr_id ON discussions(merge_request_id);\n\n4. Immutable author identity column (for NOTE-0D):\n ALTER TABLE notes ADD COLUMN author_id INTEGER;\n CREATE INDEX IF NOT EXISTS idx_notes_author_id ON notes(author_id) WHERE author_id IS NOT NULL;\n\nRegister in src/core/db.rs MIGRATIONS array as (\"022\", include_str!(\"../../migrations/022_notes_query_index.sql\")). Insert BEFORE the existing (\"023\", ...) entry. LATEST_SCHEMA_VERSION auto-increments via MIGRATIONS.len().\n\n## Files\n- CREATE: migrations/022_notes_query_index.sql\n- MODIFY: src/core/db.rs (add (\"022\", include_str!(...)) to MIGRATIONS array, insert at position before \"023\" entry around line 73)\n\n## TDD Anchor\nRED: test_migration_022_indexes_exist — run_migrations on in-memory DB, verify 4 new indexes exist in sqlite_master.\nGREEN: Create migration file with all CREATE INDEX statements.\nVERIFY: cargo test migration_022 -- --nocapture\n\n## Acceptance Criteria\n- [ ] Migration 022 creates idx_notes_user_created partial index\n- [ ] Migration 022 creates idx_notes_project_created partial index\n- [ ] Migration 022 creates idx_discussions_issue_id (or is no-op if exists)\n- [ ] Migration 022 creates idx_discussions_mr_id (or is no-op if exists)\n- [ ] Migration 022 adds author_id INTEGER column to notes\n- [ ] Migration 022 creates idx_notes_author_id partial index\n- [ ] MIGRATIONS array in db.rs includes (\"022\", ...) before (\"023\", ...)\n- [ ] Existing tests still pass with new migration\n- [ ] Test verifying all indexes exist passes\n\n## Edge Cases\n- Partial indexes exclude system notes (is_system = 0) — filters 30-50% of notes\n- COLLATE NOCASE on author_username matches the query's case-insensitive comparison\n- author_id is nullable (existing notes won't have it until re-synced)\n- IF NOT EXISTS on all CREATE INDEX statements makes migration idempotent","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:18.127989Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.435624Z","closed_at":"2026-02-12T18:13:15.435576Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"],"dependencies":[{"issue_id":"bd-296a","depends_on_id":"bd-jbfw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-29qw","title":"Implement Timeline screen (state + action + view)","description":"## Background\nThe Timeline screen renders a chronological event stream from the 5-stage timeline pipeline (SEED -> HYDRATE -> EXPAND -> COLLECT -> RENDER). Events are color-coded by type and can be scoped to an entity, author, or time range.\n\n## Approach\nState (state/timeline.rs):\n- TimelineState: events (Vec), query (String), query_input (TextInput), query_focused (bool), selected_index (usize), scroll_offset (usize), scope (TimelineScope)\n- TimelineScope: All, Entity(EntityKey), Author(String), DateRange(DateTime, DateTime)\n\nAction (action.rs):\n- fetch_timeline(conn, scope, limit, clock) -> Vec: runs the timeline pipeline against DB\n\nView (view/timeline.rs):\n- Vertical event stream with timestamp gutter on the left\n- Color-coded event types: Created(green), Updated(yellow), Closed(red), Merged(purple), Commented(blue), Labeled(cyan), Milestoned(orange)\n- Each event: timestamp | entity ref | event description\n- Entity refs navigable via Enter\n- Query bar for filtering by text or entity\n- Keyboard: j/k scroll, Enter navigate to entity, / focus query, g+g top\n\n## Acceptance Criteria\n- [ ] Timeline renders chronological event stream\n- [ ] Events color-coded by type\n- [ ] Entity references navigable\n- [ ] Scope filters: all, per-entity, per-author, date range\n- [ ] Query bar filters events\n- [ ] Keyboard navigation works (j/k/Enter/Esc)\n- [ ] Timestamps use injected Clock\n\n## Files\n- MODIFY: crates/lore-tui/src/state/timeline.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_timeline)\n- CREATE: crates/lore-tui/src/view/timeline.rs\n\n## TDD Anchor\nRED: Write test_fetch_timeline_scoped that creates issues with events, calls fetch_timeline with Entity scope, asserts only that entity's events returned.\nGREEN: Implement fetch_timeline with scope filtering.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_timeline\n\n## Edge Cases\n- Timeline pipeline may not be fully implemented in core yet — degrade gracefully if SEED/HYDRATE/EXPAND stages are not available, fall back to raw events\n- Very long timelines: VirtualizedList or lazy loading for performance\n- Events with identical timestamps: stable sort by entity type, then iid\n\n## Dependency Context\nUses timeline pipeline types from src/core/timeline.rs if available.\nUses Clock for timestamp rendering from \"Implement Clock trait\" task.\nUses EntityKey navigation from \"Implement core types\" task.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:05.605968Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:46:49.941242Z","closed_at":"2026-02-18T21:46:49.941051Z","close_reason":"Timeline screen complete: TimelineState (scope/generation/selection/scroll), action functions (4 event source collectors querying resource event tables), view renderer (color-coded event stream with scrolling), all wired into screen dispatch. 518 tests pass, clippy clean, fmt clean.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-29qw","depends_on_id":"bd-1zow","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -144,7 +144,7 @@ {"id":"bd-2h0","title":"[CP1] gi list issues command","description":"List issues from the database.\n\n## Module\nsrc/cli/commands/list.rs\n\n## Clap Definition\nList {\n #[arg(value_parser = [\"issues\", \"mrs\"])]\n entity: String,\n \n #[arg(long, default_value = \"20\")]\n limit: usize,\n \n #[arg(long)]\n project: Option,\n \n #[arg(long, value_parser = [\"opened\", \"closed\", \"all\"])]\n state: Option,\n}\n\n## Output Format\nIssues (showing 20 of 3,801)\n\n #1234 Authentication redesign opened @johndoe 3 days ago\n #1233 Fix memory leak in cache closed @janedoe 5 days ago\n #1232 Add dark mode support opened @bobsmith 1 week ago\n ...\n\n## Implementation\n- Query issues table with filters\n- Join with projects table for display\n- Format updated_at as relative time (\"3 days ago\")\n- Truncate title if too long\n\nFiles: src/cli/commands/list.rs\nDone when: List displays issues with proper filtering and formatting","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:58:23.809829Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.898106Z","closed_at":"2026-01-25T17:02:01.898106Z","deleted_at":"2026-01-25T17:02:01.898102Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-2i10","title":"OBSERV: Add log file diagnostics to lore doctor","description":"## Background\nlore doctor is the diagnostic entry point. Adding log file info lets users verify logging is working and check disk usage. The existing DoctorChecks struct (src/cli/commands/doctor.rs:43-51) has checks for config, database, gitlab, projects, ollama.\n\n## Approach\nAdd a new LoggingCheck struct and field to DoctorChecks:\n\n```rust\n#[derive(Debug, Serialize)]\npub struct LoggingCheck {\n pub result: CheckResult,\n pub log_dir: String,\n pub file_count: usize,\n pub total_bytes: u64,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub oldest_file: Option,\n}\n```\n\nAdd to DoctorChecks (src/cli/commands/doctor.rs:43-51):\n```rust\npub logging: LoggingCheck,\n```\n\nImplement check_logging() function:\n```rust\nfn check_logging() -> LoggingCheck {\n let log_dir = get_log_dir(None); // TODO: accept config override\n let mut file_count = 0;\n let mut total_bytes = 0u64;\n let mut oldest: Option = None;\n\n if let Ok(entries) = std::fs::read_dir(&log_dir) {\n for entry in entries.flatten() {\n let name = entry.file_name().to_string_lossy().to_string();\n if name.starts_with(\"lore.\") && name.ends_with(\".log\") {\n file_count += 1;\n if let Ok(meta) = entry.metadata() {\n total_bytes += meta.len();\n }\n if oldest.as_ref().map_or(true, |o| name < *o) {\n oldest = Some(name);\n }\n }\n }\n }\n\n LoggingCheck {\n result: CheckResult { status: CheckStatus::Ok, message: None },\n log_dir: log_dir.display().to_string(),\n file_count,\n total_bytes,\n oldest_file: oldest,\n }\n}\n```\n\nCall from run_doctor() (src/cli/commands/doctor.rs:91-126) and add to DoctorChecks construction.\n\nFor interactive output in print_doctor_results(), add a section:\n```\nLogging\n Log directory: ~/.local/share/lore/logs/\n Log files: 7 (2.3 MB)\n Oldest: lore.2026-01-28.log\n```\n\n## Acceptance Criteria\n- [ ] lore doctor shows log directory path, file count, total size\n- [ ] lore --robot doctor JSON includes logging field with log_dir, file_count, total_bytes, oldest_file\n- [ ] When no log files exist: file_count=0, total_bytes=0, oldest_file=null\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/cli/commands/doctor.rs (add LoggingCheck struct, check_logging fn, wire into DoctorChecks)\n\n## TDD Loop\nRED: test_check_logging_with_files, test_check_logging_empty_dir\nGREEN: Implement LoggingCheck struct and check_logging function\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- Log directory doesn't exist yet (first run before any sync): report file_count=0, status Ok\n- Permission errors on read_dir: report status Warning with message","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:53:55.682986Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:15:04.520915Z","closed_at":"2026-02-04T17:15:04.520868Z","close_reason":"Added LoggingCheck to DoctorChecks with log_dir, file_count, total_bytes; shows in both interactive and robot output","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-2i10","depends_on_id":"bd-1k4","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2i10","depends_on_id":"bd-2nx","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2iq","title":"[CP1] Database migration 002_issues.sql","description":"## Background\n\nThe 002_issues.sql migration creates tables for issues, labels, issue_labels, discussions, and notes. This is the data foundation for Checkpoint 1, enabling issue ingestion with cursor-based sync, label tracking, and discussion storage.\n\n## Approach\n\nCreate `migrations/002_issues.sql` with complete SQL statements.\n\n### Full Migration SQL\n\n```sql\n-- Migration 002: Issue Ingestion Tables\n-- Applies on top of 001_initial.sql\n\n-- Issues table\nCREATE TABLE issues (\n id INTEGER PRIMARY KEY,\n gitlab_id INTEGER UNIQUE NOT NULL,\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n iid INTEGER NOT NULL,\n title TEXT,\n description TEXT,\n state TEXT NOT NULL CHECK (state IN ('opened', 'closed')),\n author_username TEXT,\n created_at INTEGER NOT NULL, -- ms epoch UTC\n updated_at INTEGER NOT NULL, -- ms epoch UTC\n last_seen_at INTEGER NOT NULL, -- updated on every upsert\n discussions_synced_for_updated_at INTEGER, -- watermark for dependent sync\n web_url TEXT,\n raw_payload_id INTEGER REFERENCES raw_payloads(id)\n);\n\nCREATE INDEX idx_issues_project_updated ON issues(project_id, updated_at);\nCREATE INDEX idx_issues_author ON issues(author_username);\nCREATE UNIQUE INDEX uq_issues_project_iid ON issues(project_id, iid);\n\n-- Labels table (name-only for CP1)\nCREATE TABLE labels (\n id INTEGER PRIMARY KEY,\n gitlab_id INTEGER, -- optional, for future Labels API\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n name TEXT NOT NULL,\n color TEXT,\n description TEXT\n);\n\nCREATE UNIQUE INDEX uq_labels_project_name ON labels(project_id, name);\nCREATE INDEX idx_labels_name ON labels(name);\n\n-- Issue-label junction (DELETE before INSERT for stale removal)\nCREATE TABLE issue_labels (\n issue_id INTEGER NOT NULL REFERENCES issues(id) ON DELETE CASCADE,\n label_id INTEGER NOT NULL REFERENCES labels(id) ON DELETE CASCADE,\n PRIMARY KEY(issue_id, label_id)\n);\n\nCREATE INDEX idx_issue_labels_label ON issue_labels(label_id);\n\n-- Discussion threads for issues (MR discussions added in CP2)\nCREATE TABLE discussions (\n id INTEGER PRIMARY KEY,\n gitlab_discussion_id TEXT NOT NULL, -- GitLab string ID (e.g., \"6a9c1750b37d...\")\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n issue_id INTEGER REFERENCES issues(id) ON DELETE CASCADE,\n merge_request_id INTEGER, -- FK added in CP2 via ALTER TABLE\n noteable_type TEXT NOT NULL CHECK (noteable_type IN ('Issue', 'MergeRequest')),\n individual_note INTEGER NOT NULL DEFAULT 0, -- 0=threaded, 1=standalone\n first_note_at INTEGER, -- min(note.created_at) for ordering\n last_note_at INTEGER, -- max(note.created_at) for \"recently active\"\n last_seen_at INTEGER NOT NULL, -- updated on every upsert\n resolvable INTEGER NOT NULL DEFAULT 0, -- MR discussions can be resolved\n resolved INTEGER NOT NULL DEFAULT 0,\n CHECK (\n (noteable_type = 'Issue' AND issue_id IS NOT NULL AND merge_request_id IS NULL) OR\n (noteable_type = 'MergeRequest' AND merge_request_id IS NOT NULL AND issue_id IS NULL)\n )\n);\n\nCREATE UNIQUE INDEX uq_discussions_project_discussion_id ON discussions(project_id, gitlab_discussion_id);\nCREATE INDEX idx_discussions_issue ON discussions(issue_id);\nCREATE INDEX idx_discussions_mr ON discussions(merge_request_id);\nCREATE INDEX idx_discussions_last_note ON discussions(last_note_at);\n\n-- Notes belong to discussions\nCREATE TABLE notes (\n id INTEGER PRIMARY KEY,\n gitlab_id INTEGER UNIQUE NOT NULL,\n discussion_id INTEGER NOT NULL REFERENCES discussions(id) ON DELETE CASCADE,\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n note_type TEXT, -- 'DiscussionNote' | 'DiffNote' | null\n is_system INTEGER NOT NULL DEFAULT 0, -- 1 for system-generated notes\n author_username TEXT,\n body TEXT,\n created_at INTEGER NOT NULL, -- ms epoch\n updated_at INTEGER NOT NULL, -- ms epoch\n last_seen_at INTEGER NOT NULL, -- updated on every upsert\n position INTEGER, -- 0-indexed array order from API\n resolvable INTEGER NOT NULL DEFAULT 0,\n resolved INTEGER NOT NULL DEFAULT 0,\n resolved_by TEXT,\n resolved_at INTEGER,\n -- DiffNote position metadata (populated for MR DiffNotes in CP2)\n position_old_path TEXT,\n position_new_path TEXT,\n position_old_line INTEGER,\n position_new_line INTEGER,\n raw_payload_id INTEGER REFERENCES raw_payloads(id)\n);\n\nCREATE INDEX idx_notes_discussion ON notes(discussion_id);\nCREATE INDEX idx_notes_author ON notes(author_username);\nCREATE INDEX idx_notes_system ON notes(is_system);\n\n-- Update schema version\nINSERT INTO schema_version (version, applied_at, description)\nVALUES (2, strftime('%s', 'now') * 1000, 'Issue ingestion tables');\n```\n\n## Acceptance Criteria\n\n- [ ] Migration file exists at `migrations/002_issues.sql`\n- [ ] All tables created: issues, labels, issue_labels, discussions, notes\n- [ ] All indexes created as specified\n- [ ] CHECK constraints on state and noteable_type work correctly\n- [ ] CASCADE deletes work (project deletion cascades)\n- [ ] Migration applies cleanly on fresh DB after 001_initial.sql\n- [ ] schema_version updated to 2 after migration\n- [ ] `gi doctor` shows schema_version = 2\n\n## Files\n\n- migrations/002_issues.sql (create)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/migration_tests.rs\n#[test] fn migration_002_creates_issues_table()\n#[test] fn migration_002_creates_labels_table()\n#[test] fn migration_002_creates_discussions_table()\n#[test] fn migration_002_creates_notes_table()\n#[test] fn migration_002_enforces_state_check()\n#[test] fn migration_002_enforces_noteable_type_check()\n#[test] fn migration_002_cascades_on_project_delete()\n```\n\nGREEN: Create migration file with all SQL\n\nVERIFY:\n```bash\n# Apply migration to test DB\nsqlite3 :memory: < migrations/001_initial.sql\nsqlite3 :memory: < migrations/002_issues.sql\n\n# Verify schema_version\nsqlite3 test.db \"SELECT version FROM schema_version ORDER BY version DESC LIMIT 1\"\n# Expected: 2\n\ncargo test migration_002\n```\n\n## Edge Cases\n\n- Applying twice - should fail on UNIQUE constraint (idempotency via version check)\n- Missing 001 - foreign key to projects fails\n- Long label names - TEXT handles any length\n- NULL description - allowed by schema\n- Empty discussions_synced_for_updated_at - NULL means never synced","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.128594Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:25:10.309900Z","closed_at":"2026-01-25T22:25:10.309852Z","close_reason":"Created 002_issues.sql with issues/labels/issue_labels/discussions/notes tables, 8 passing tests verify schema, constraints, and cascades","compaction_level":0,"original_size":0} -{"id":"bd-2iqk","title":"Implement Doctor + Stats screens","description":"## Background\nDoctor shows environment health checks (config, auth, DB, Ollama). Stats shows database statistics (entity counts, index sizes, FTS coverage). Both are informational screens using ftui JsonView or simple table layouts.\n\n## Approach\nState:\n- DoctorState: checks (Vec), overall_status (Healthy|Warning|Error)\n- StatsState: entity_stats (EntityStats), index_stats (IndexStats), fts_stats (FtsStats)\n\nAction:\n- run_doctor(config, conn) -> Vec: reuses existing lore doctor logic\n- fetch_stats(conn) -> StatsData: reuses existing lore stats logic\n\nView:\n- Doctor: vertical list of health checks with pass/fail/warn indicators\n- Stats: table of entity counts, index sizes, FTS document count, embedding coverage\n\n## Acceptance Criteria\n- [ ] Doctor shows config, auth, DB, and Ollama health status\n- [ ] Stats shows entity counts matching lore --robot stats output\n- [ ] Both screens accessible via navigation (gd for Doctor)\n- [ ] Health check results color-coded: green pass, yellow warn, red fail\n\n## Files\n- CREATE: crates/lore-tui/src/state/doctor.rs\n- CREATE: crates/lore-tui/src/state/stats.rs\n- CREATE: crates/lore-tui/src/view/doctor.rs\n- CREATE: crates/lore-tui/src/view/stats.rs\n- MODIFY: crates/lore-tui/src/action.rs (add run_doctor, fetch_stats)\n\n## TDD Anchor\nRED: Write test_fetch_stats_counts that creates DB with known data, asserts fetch_stats returns correct counts.\nGREEN: Implement fetch_stats with COUNT queries.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_stats\n\n## Edge Cases\n- Ollama not running: Doctor shows warning, not error (optional dependency)\n- Very large databases: stats queries should be fast (use shadow tables for FTS count)\n\n## Dependency Context\nUses existing doctor and stats logic from lore CLI commands.\nUses DbManager from \"Implement DbManager\" task.","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-12T17:02:21.744226Z","created_by":"tayloreernisse","updated_at":"2026-02-19T03:57:33.616067Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2iqk","depends_on_id":"bd-2x2h","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-2iqk","title":"Implement Doctor + Stats screens","description":"## Background\nDoctor shows environment health checks (config, auth, DB, Ollama). Stats shows database statistics (entity counts, index sizes, FTS coverage). Both are informational screens using ftui JsonView or simple table layouts.\n\n## Approach\nState:\n- DoctorState: checks (Vec), overall_status (Healthy|Warning|Error)\n- StatsState: entity_stats (EntityStats), index_stats (IndexStats), fts_stats (FtsStats)\n\nAction:\n- run_doctor(config, conn) -> Vec: reuses existing lore doctor logic\n- fetch_stats(conn) -> StatsData: reuses existing lore stats logic\n\nView:\n- Doctor: vertical list of health checks with pass/fail/warn indicators\n- Stats: table of entity counts, index sizes, FTS document count, embedding coverage\n\n## Acceptance Criteria\n- [ ] Doctor shows config, auth, DB, and Ollama health status\n- [ ] Stats shows entity counts matching lore --robot stats output\n- [ ] Both screens accessible via navigation (gd for Doctor)\n- [ ] Health check results color-coded: green pass, yellow warn, red fail\n\n## Files\n- CREATE: crates/lore-tui/src/state/doctor.rs\n- CREATE: crates/lore-tui/src/state/stats.rs\n- CREATE: crates/lore-tui/src/view/doctor.rs\n- CREATE: crates/lore-tui/src/view/stats.rs\n- MODIFY: crates/lore-tui/src/action.rs (add run_doctor, fetch_stats)\n\n## TDD Anchor\nRED: Write test_fetch_stats_counts that creates DB with known data, asserts fetch_stats returns correct counts.\nGREEN: Implement fetch_stats with COUNT queries.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_stats\n\n## Edge Cases\n- Ollama not running: Doctor shows warning, not error (optional dependency)\n- Very large databases: stats queries should be fast (use shadow tables for FTS count)\n\n## Dependency Context\nUses existing doctor and stats logic from lore CLI commands.\nUses DbManager from \"Implement DbManager\" task.","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T17:02:21.744226Z","created_by":"tayloreernisse","updated_at":"2026-02-19T04:23:34.018712Z","closed_at":"2026-02-19T04:23:34.018663Z","close_reason":"Doctor + Stats screens complete: DoctorState (HealthCheck/HealthStatus), StatsState (StatsData with entity/FTS/embedding/queue stats), color-coded views, gd/gx navigation, message handlers","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2iqk","depends_on_id":"bd-2x2h","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2jzn","title":"Migration 021: Add status columns to issues table","description":"## Background\nGitLab issues have work item status (To do, In progress, Done, Won't do, Duplicate) only available via GraphQL. We need 5 nullable columns on the issues table to store this data after enrichment. The status_synced_at column tracks when enrichment last wrote/cleared each row (ms epoch UTC).\n\n## Approach\nCreate a new SQL migration file and register it in the MIGRATIONS array. SQLite ALTER TABLE ADD COLUMN is non-destructive — existing rows get NULL defaults. Add a compound index for --status filter performance.\n\n## Files\n- migrations/021_work_item_status.sql (NEW)\n- src/core/db.rs (add entry to MIGRATIONS array)\n\n## Implementation\n\nmigrations/021_work_item_status.sql:\n ALTER TABLE issues ADD COLUMN status_name TEXT;\n ALTER TABLE issues ADD COLUMN status_category TEXT;\n ALTER TABLE issues ADD COLUMN status_color TEXT;\n ALTER TABLE issues ADD COLUMN status_icon_name TEXT;\n ALTER TABLE issues ADD COLUMN status_synced_at INTEGER;\n CREATE INDEX IF NOT EXISTS idx_issues_project_status_name ON issues(project_id, status_name);\n\nIn src/core/db.rs, add as last entry in MIGRATIONS array:\n (\"021\", include_str!(\"../../migrations/021_work_item_status.sql\")),\nLATEST_SCHEMA_VERSION is computed as MIGRATIONS.len() as i32 — auto-becomes 21.\n\n## Acceptance Criteria\n- [ ] Migration file exists at migrations/021_work_item_status.sql\n- [ ] MIGRATIONS array has 21 entries ending with (\"021\", ...)\n- [ ] In-memory DB: PRAGMA table_info(issues) includes all 5 new columns\n- [ ] In-memory DB: PRAGMA index_list(issues) includes idx_issues_project_status_name\n- [ ] Existing rows have NULL for all 5 new columns\n- [ ] cargo check --all-targets passes\n\n## TDD Loop\nRED: test_migration_021_adds_columns, test_migration_021_adds_index\n Pattern: create_connection(Path::new(\":memory:\")) + run_migrations(&conn), then PRAGMA queries\nGREEN: Create SQL file + register in MIGRATIONS\nVERIFY: cargo test test_migration_021\n\n## Edge Cases\n- Migration has 5 columns (including status_synced_at INTEGER), not 4\n- Test project insert uses gitlab_project_id, path_with_namespace, web_url (no name/last_seen_at)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:41:40.806320Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.414434Z","closed_at":"2026-02-11T07:21:33.414387Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2jzn","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2kop","title":"Implement DbManager (read pool + dedicated writer)","description":"## Background\nThe TUI needs concurrent database access: multiple read queries can run in parallel (e.g., loading dashboard stats while prefetching issue list), but writes must be serialized. The DbManager provides a read pool (3 connections, round-robin) plus a dedicated writer connection, accessed via closures.\n\nThe database uses WAL mode with 5000ms busy_timeout (already configured in lore's create_connection). WAL allows concurrent readers + single writer. The TUI is self-contained — it does NOT detect or react to external CLI sync operations. If someone runs lore sync externally while the TUI is open, WAL prevents conflicts and the TUI's natural re-query on navigation handles stale data implicitly.\n\n## Approach\nCreate `crates/lore-tui/src/db.rs`:\n\n```rust\npub struct DbManager {\n readers: Vec, // 3 connections, WAL mode\n writer: Connection, // dedicated writer\n next_reader: AtomicUsize, // round-robin index\n}\n```\n\n- `DbManager::open(path: &Path) -> Result` — opens 4 connections (3 read + 1 write), all with WAL + busy_timeout via lore::core::db::create_connection\n- `with_reader(&self, f: F) -> Result where F: FnOnce(&Connection) -> Result` — closure-based read access, round-robin selection\n- `with_writer(&self, f: F) -> Result where F: FnOnce(&Connection) -> Result` — closure-based write access (serialized)\n- Reader connections set `PRAGMA query_only = ON` as a safety guard\n- All connections reuse lore's `create_connection()` which sets WAL + busy_timeout + foreign_keys\n\nThe DbManager is created once at app startup and shared (via Arc) across all screen states and action tasks.\n\n## Acceptance Criteria\n- [ ] DbManager opens 3 reader + 1 writer connection\n- [ ] Readers use round-robin selection via AtomicUsize\n- [ ] Reader connections have query_only = ON\n- [ ] Writer connection allows INSERT/UPDATE/DELETE\n- [ ] with_reader and with_writer use closure-based access (no connection leaking)\n- [ ] All connections use WAL mode and 5000ms busy_timeout\n- [ ] DbManager is Send + Sync (can be shared via Arc across async tasks)\n- [ ] Unit test: concurrent reads don't block each other\n- [ ] Unit test: write through reader connection fails (query_only guard)\n\n## Files\n- CREATE: crates/lore-tui/src/db.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add pub mod db)\n\n## TDD Anchor\nRED: Write `test_reader_is_query_only` that opens a DbManager on an in-memory DB, attempts an INSERT via with_reader, and asserts it fails.\nGREEN: Implement DbManager with query_only pragma on readers.\nVERIFY: cargo test -p lore-tui db -- --nocapture\n\nAdditional tests:\n- test_writer_allows_mutations\n- test_round_robin_rotates_readers\n- test_dbmanager_is_send_sync (compile-time assert)\n- test_concurrent_reads (spawn threads, all complete without blocking)\n\n## Edge Cases\n- Database file doesn't exist — create_connection handles this (creates new DB)\n- Database locked by external process — busy_timeout handles retry\n- Connection pool exhaustion — not possible with closure-based access (connection is borrowed, not taken)\n- AtomicUsize overflow — wraps around, which is fine for round-robin (modulo 3)\n\n## Dependency Context\nDepends on bd-3ddw (scaffold) for the crate to exist. Uses lore::core::db::create_connection for connection setup. All screen action modules depend on DbManager for data access.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:53:59.708214Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:59:21.852517Z","closed_at":"2026-02-12T19:59:21.852405Z","close_reason":"Implemented DbManager: 3 reader pool (query_only, round-robin) + 1 writer, Mutex-wrapped for Send+Sync. 7 tests passing, clippy clean.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2kop","depends_on_id":"bd-3ddw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2kr0","title":"Implement MR List (state + action + view)","description":"## Background\nThe MR List mirrors the Issue List pattern with MR-specific columns (target branch, source branch, draft status, reviewer). Same keyset pagination, snapshot fence, and filter bar DSL.\n\n## Approach\nState (state/mr_list.rs):\n- MrListState: same structure as IssueListState but with MrFilter and MrListRow, plus snapshot_upper_updated_at, filter_hash, peek_visible, peek_content\n- MrFilter: state, author, reviewer, target_branch, source_branch, label, draft (bool), free_text, project_id\n- MrListRow: project_path, iid, title, state, author, reviewer, target_branch, labels, updated_at, draft\n- MrCursor: updated_at, iid for keyset pagination\n- handle_key(): j/k scroll, J/K page, Enter select, / focus filter, Tab sort, g+g top, G bottom, r refresh, Space toggle Quick Peek\n\n**Snapshot fence:** Same pattern as Issue List — store snapshot_upper_updated_at on first load and refresh, filter subsequent pages. Explicit refresh (r) resets.\n\n**filter_hash:** Same pattern as Issue List — filter change resets cursor to page 1.\n\n**Quick Peek (Space key):**\n- Space toggles right-side preview pane (40% width) showing selected MR detail\n- Preview loads asynchronously via TaskSupervisor\n- j/k updates preview for newly selected row\n- Narrow terminals (<100 cols): peek replaces list\n\nAction (action.rs):\n- fetch_mrs(conn, filter, cursor, page_size, clock, snapshot_fence) -> Result: keyset query against merge_requests table. Uses idx_mrs_list_default index.\n- fetch_mr_peek(conn, entity_key) -> Result: loads MR detail for Quick Peek preview\n\nView (view/mr_list.rs):\n- render_mr_list(frame, state, area, theme): FilterBar + EntityTable with MR columns\n- When peek_visible: split area horizontally — list (60%) | peek preview (40%)\n- Columns: IID, Title (flex), State, Author, Target, Labels, Updated, Draft indicator\n- Draft MRs shown with muted style and [WIP] tag\n\n## Acceptance Criteria\n- [ ] Keyset pagination works for MR list (same pattern as issues)\n- [ ] Browse snapshot fence prevents rows shifting during concurrent sync\n- [ ] Explicit refresh (r) resets snapshot fence\n- [ ] filter_hash resets cursor on filter change\n- [ ] MR-specific filter fields: draft, reviewer, target_branch, source_branch\n- [ ] Draft MRs visually distinguished with [WIP] indicator\n- [ ] State filter supports: opened, merged, closed, locked, all\n- [ ] Columns: IID, Title, State, Author, Target Branch, Labels, Updated\n- [ ] Enter navigates to MrDetail, Esc returns with state preserved\n- [ ] Space toggles Quick Peek right-side preview pane\n- [ ] Quick Peek loads MR detail asynchronously\n- [ ] j/k in peek mode updates preview for newly selected row\n- [ ] Narrow terminal (<100 cols): peek replaces list\n\n## Files\n- MODIFY: crates/lore-tui/src/state/mr_list.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_mrs, fetch_mr_peek)\n- CREATE: crates/lore-tui/src/view/mr_list.rs\n\n## TDD Anchor\nRED: Write test_fetch_mrs_draft_filter in action.rs that inserts 5 MRs (3 draft, 2 not), calls fetch_mrs with draft=true filter, asserts 3 results.\nGREEN: Implement fetch_mrs with draft filter.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_mrs\n\nAdditional tests:\n- test_mr_snapshot_fence: verify fence excludes newer rows\n- test_mr_filter_hash_reset: verify filter change resets cursor\n\n## Edge Cases\n- MR state \"locked\" is rare but must be handled in filter and display\n- Very long branch names: truncate with ellipsis\n- MRs with no reviewer: show \"-\" in reviewer column\n- Quick Peek on empty list: no-op\n- Rapid j/k with peek open: debounce peek loads\n\n## Dependency Context\nUses EntityTable and FilterBar from \"Implement entity table + filter bar widgets\" (bd-18qs).\nUses same keyset pagination pattern from \"Implement Issue List\" (bd-3ei1).\nUses MrListState from \"Implement AppState composition\" (bd-1v9m).\nUses TaskSupervisor for load management from \"Implement TaskSupervisor\" (bd-3le2).\nRequires idx_mrs_list_default index from \"Add required TUI indexes\" (bd-3pm2).","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:59:24.070743Z","created_by":"tayloreernisse","updated_at":"2026-02-18T20:36:57.718716Z","closed_at":"2026-02-18T20:36:57.718613Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2kr0","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2kr0","depends_on_id":"bd-3ei1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2kr0","depends_on_id":"bd-3pm2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -173,7 +173,7 @@ {"id":"bd-2uzm","title":"Implement Trace screen (file -> MR -> issue chain drill-down)","description":"## Background\nThe Trace screen answers \"Why was this code introduced?\" by building file -> MR -> issue -> discussion chains. It wraps run_trace() from src/core/trace.rs (added in v0.8.1) in an interactive TUI view where users can drill down into any linked entity. The CLI prints flat output; the TUI makes the chain navigable.\n\nThe core query accepts a file path (with optional :line suffix), resolves renames via BFS, finds MRs that touched the file, links issues via entity_references, and extracts DiffNote discussions. Each result is a TraceChain: MR metadata + linked issues + relevant discussions.\n\n## Data Shapes (from src/core/trace.rs)\n\n```rust\npub struct TraceResult {\n pub path: String,\n pub resolved_paths: Vec, // rename chain via BFS\n pub renames_followed: bool,\n pub trace_chains: Vec,\n pub total_chains: usize,\n}\n\npub struct TraceChain {\n pub mr_iid: i64,\n pub mr_title: String,\n pub mr_state: String, // merged/opened/closed\n pub mr_author: String,\n pub change_type: String, // added/modified/deleted/renamed\n pub merged_at_iso: Option,\n pub updated_at_iso: String,\n pub web_url: Option,\n pub issues: Vec, // linked via entity_references\n pub discussions: Vec, // DiffNote threads on this file\n}\n\npub struct TraceIssue {\n pub iid: i64, pub title: String, pub state: String,\n pub reference_type: String, pub web_url: Option,\n}\n\npub struct TraceDiscussion {\n pub discussion_id: String, pub mr_iid: i64,\n pub author_username: String, pub body: String,\n pub path: String, pub created_at_iso: String,\n}\n```\n\nrun_trace() signature (src/core/trace.rs):\n```rust\npub fn run_trace(\n conn: &Connection,\n project_id: Option,\n path: &str,\n follow_renames: bool,\n include_discussions: bool,\n limit: usize,\n) -> Result\n```\n\nparse_trace_path() (src/cli/commands/trace.rs, made pub by bd-1f5b):\n```rust\npub fn parse_trace_path(input: &str) -> (String, Option)\n```\n\n## Approach\n\n**Screen enum** (message.rs):\nAdd Screen::Trace variant (no parameters — path is entered on-screen). Label: \"Trace\". Breadcrumb: \"Trace\".\n\n**Path autocomplete**: Query DISTINCT new_path from mr_file_changes (scoped to project_id if set) for fuzzy matching as user types. Cache results on first focus. SQL:\n```sql\nSELECT DISTINCT new_path FROM mr_file_changes\nWHERE project_id = ?1 ORDER BY new_path\n```\nStore as Vec in TraceState. Filter client-side with case-insensitive substring match.\n\n**State** (state/trace.rs):\n```rust\n#[derive(Debug, Default)]\npub struct TraceState {\n pub path_input: String,\n pub path_focused: bool,\n pub line_filter: Option, // from :line suffix\n pub result: Option,\n pub selected_chain_index: usize,\n pub expanded_chains: HashSet, // multiple can be expanded\n pub follow_renames: bool, // default true\n pub include_discussions: bool, // default true\n pub scroll_offset: u16,\n pub known_paths: Vec, // autocomplete cache\n pub autocomplete_matches: Vec, // filtered suggestions\n pub autocomplete_index: usize,\n}\n```\n\n**Action** (action.rs):\n- fetch_trace(conn, project_id, path, follow_renames, include_discussions, limit) -> Result: calls run_trace() directly from src/core/trace.rs\n- fetch_known_paths(conn, project_id) -> Result, LoreError>: queries mr_file_changes for autocomplete\n\n**View** (view/trace.rs):\n- Top: path input with autocomplete dropdown + toggle indicators [renames: on] [discussions: on]\n- If renames followed: rename chain breadcrumb (old_path -> ... -> new_path) in dimmed text\n- Main area: scrollable list of TraceChain entries:\n - Collapsed: MR state icon + !iid + title + author + change_type + date (single line)\n - Expanded: indented sections for linked issues and discussion snippets\n - Issues: state icon + #iid + title + reference_type\n - Discussions: @author + date + body preview (first 2 lines, truncated at 120 chars)\n- Keyboard:\n - j/k: scroll chains\n - Enter: toggle expand/collapse on selected chain\n - Enter on highlighted issue: navigate to IssueDetail(EntityKey)\n - Enter on highlighted MR line: navigate to MrDetail(EntityKey)\n - /: focus path input\n - Tab: cycle autocomplete suggestions when path focused\n - r: toggle follow_renames (re-fetches)\n - d: toggle include_discussions (re-fetches)\n - q: back\n\n**Contextual entry points** (wired from other screens):\n- MR Detail: when cursor is on a file path in the file changes list, t opens Trace pre-filled with that path\n- Issue Detail: if discussion references a file path, t opens Trace for that path\n- Requires MrDetailState and IssueDetailState to expose selected_file_path() -> Option\n\n## Acceptance Criteria\n- [ ] Screen::Trace added to message.rs Screen enum with label \"Trace\" and breadcrumb\n- [ ] TraceState struct with all fields, Default impl\n- [ ] Path input with autocomplete dropdown from mr_file_changes (fuzzy substring match)\n- [ ] :line suffix parsing via parse_trace_path (line_filter stored but used for future highlighting)\n- [ ] Rename chain displayed as breadcrumb when renames_followed is true\n- [ ] TraceChain list with expand/collapse — multiple chains expandable simultaneously\n- [ ] MR state icons: merged (purple), opened (green), closed (red) — matching CLI theme\n- [ ] Enter on issue row navigates to IssueDetail(EntityKey::issue(project_id, iid))\n- [ ] Enter on MR header navigates to MrDetail(EntityKey::mr(project_id, iid))\n- [ ] r toggles follow_renames, d toggles include_discussions — both trigger re-fetch\n- [ ] Empty state: \"No trace chains found\" with hint \"Run 'lore sync' to fetch MR file changes\"\n- [ ] Contextual navigation: t on file path in MR Detail opens Trace pre-filled\n- [ ] Registered in command palette (label \"Trace file\", keywords [\"trace\", \"provenance\", \"why\"])\n- [ ] AppState.has_text_focus() updated to include trace.path_focused\n- [ ] AppState.blur_text_focus() updated to include trace.path_focused = false\n\n## Files\n- MODIFY: crates/lore-tui/src/message.rs (add Screen::Trace variant + label + is_detail_or_entity)\n- CREATE: crates/lore-tui/src/state/trace.rs (TraceState struct + Default)\n- MODIFY: crates/lore-tui/src/state/mod.rs (pub mod trace, pub use TraceState, add to AppState, update has_text_focus/blur_text_focus)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_trace, fetch_known_paths)\n- CREATE: crates/lore-tui/src/view/trace.rs (render_trace fn)\n- MODIFY: crates/lore-tui/src/view/mod.rs (add Screen::Trace dispatch arm in render_screen)\n- MODIFY: crates/lore-tui/src/view/mr_detail.rs (add t keybinding for contextual trace — deferred if mr_detail not yet implemented)\n\n## TDD Anchor\nRED: Write test_fetch_trace_returns_chain in action tests. Setup: in-memory DB, insert project, MR, mr_file_changes row (new_path=\"src/main.rs\"), entity_reference linking MR to issue. Call fetch_trace(conn, Some(project_id), \"src/main.rs\", true, true, 50). Assert: result.trace_chains.len() == 1, result.trace_chains[0].issues.len() == 1.\nGREEN: Implement fetch_trace calling run_trace from src/core/trace.rs.\nVERIFY: cargo test -p lore-tui trace -- --nocapture\n\nAdditional tests:\n- test_trace_empty_result: path \"nonexistent.rs\" returns total_chains=0\n- test_trace_rename_chain: insert rename chain A->B->C, query A, assert resolved_paths contains all 3\n- test_trace_discussion_toggle: include_discussions=false returns empty discussions vec per chain\n- test_parse_trace_path_with_line: \"src/main.rs:42\" -> (\"src/main.rs\", Some(42))\n- test_parse_trace_path_no_line: \"src/main.rs\" -> (\"src/main.rs\", None)\n- test_autocomplete_filters_paths: known_paths=[\"src/a.rs\",\"src/b.rs\",\"lib/c.rs\"], input=\"src/\" -> matches=[\"src/a.rs\",\"src/b.rs\"]\n\n## Edge Cases\n- File path not in any MR: empty state with sync hint\n- Very long rename chains (>5 paths): show first 2 + \"... N more\" + last path\n- Hundreds of trace chains: limit default 50, show \"showing 50 of N\" footer\n- Path with Windows drive letter (C:/foo.rs): parse_trace_path handles this correctly\n- Autocomplete with thousands of paths: substring filter is O(n) but fast enough for <100k paths\n- Project scope: if global_scope.project_id is set, pass it to run_trace and autocomplete query\n- Contextual entry from MR Detail: if MR Detail screen not yet implemented, defer the t keybinding to a follow-up\n\n## Dependency Context\n- bd-1f5b (blocks): Makes parse_trace_path() pub in src/cli/commands/trace.rs. Without this, TUI must reimplement the parser.\n- src/core/trace.rs: run_trace() is already pub — no changes needed. TUI calls it directly.\n- src/core/file_history.rs: resolve_rename_chain() used transitively by run_trace — TUI does not call it directly.\n- Navigation: uses NavigationStack.push(Screen::IssueDetail(key)) and Screen::MrDetail(key) from crates/lore-tui/src/navigation.rs.\n- AppState composition: TraceState added as field in AppState struct (state/mod.rs line ~154-174). has_text_focus and blur_text_focus at lines 194-207 must include trace.path_focused.\n- Contextual entry: requires MrDetailState to expose the currently selected file path. If MR Detail is not yet built, the contextual keybinding is deferred.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-18T18:13:47.076070Z","created_by":"tayloreernisse","updated_at":"2026-02-19T03:50:41.546948Z","closed_at":"2026-02-19T03:50:41.546751Z","close_reason":"Trace screen complete: view/trace.rs + wiring. 586 TUI tests pass.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2uzm","depends_on_id":"bd-1f5b","type":"blocks","created_at":"2026-02-18T18:14:33.294262Z","created_by":"tayloreernisse"},{"issue_id":"bd-2uzm","depends_on_id":"bd-nwux","type":"parent-child","created_at":"2026-02-18T18:13:47.079630Z","created_by":"tayloreernisse"}]} {"id":"bd-2w1p","title":"Add half-life fields and config validation to ScoringConfig","description":"## Background\nThe flat-weight ScoringConfig (config.rs:155-167) has only 3 fields: author_weight (25), reviewer_weight (10), note_bonus (1). Time-decay scoring needs half-life parameters, a reviewer split (participated vs assigned-only), closed MR discount, substantive-note threshold, and bot filtering.\n\n## Approach\nExtend the existing ScoringConfig struct at config.rs:155. Add new fields with #[serde(default)] and camelCase rename to match existing convention (authorWeight, reviewerWeight, noteBonus). Extend the Default impl at config.rs:169 with new defaults. Extend validate_scoring() at config.rs:274-291 (currently validates 3 weights >= 0).\n\n### New fields to add:\n```rust\n#[serde(rename = \"reviewerAssignmentWeight\")]\npub reviewer_assignment_weight: i64, // default: 3\n#[serde(rename = \"authorHalfLifeDays\")]\npub author_half_life_days: u32, // default: 180\n#[serde(rename = \"reviewerHalfLifeDays\")]\npub reviewer_half_life_days: u32, // default: 90\n#[serde(rename = \"reviewerAssignmentHalfLifeDays\")]\npub reviewer_assignment_half_life_days: u32, // default: 45\n#[serde(rename = \"noteHalfLifeDays\")]\npub note_half_life_days: u32, // default: 45\n#[serde(rename = \"closedMrMultiplier\")]\npub closed_mr_multiplier: f64, // default: 0.5\n#[serde(rename = \"reviewerMinNoteChars\")]\npub reviewer_min_note_chars: u32, // default: 20\n#[serde(rename = \"excludedUsernames\")]\npub excluded_usernames: Vec, // default: vec![]\n```\n\n### Validation additions to validate_scoring() (config.rs:274):\n- All *_half_life_days must be > 0 AND <= 3650\n- All *_weight / *_bonus must be >= 0\n- reviewer_assignment_weight must be >= 0\n- closed_mr_multiplier must be finite (not NaN/Inf) AND in (0.0, 1.0]\n- reviewer_min_note_chars must be >= 0 AND <= 4096\n- excluded_usernames entries must be non-empty strings\n- Return LoreError::ConfigInvalid with clear message on failure\n\n## TDD Loop\n\n### RED (write first):\n```rust\n#[test]\nfn test_config_validation_rejects_zero_half_life() {\n let mut cfg = ScoringConfig::default();\n assert!(validate_scoring(&cfg).is_ok());\n cfg.author_half_life_days = 0;\n assert!(validate_scoring(&cfg).is_err());\n cfg.author_half_life_days = 180;\n cfg.reviewer_half_life_days = 0;\n assert!(validate_scoring(&cfg).is_err());\n cfg.reviewer_half_life_days = 90;\n cfg.closed_mr_multiplier = 0.0;\n assert!(validate_scoring(&cfg).is_err());\n cfg.closed_mr_multiplier = 1.5;\n assert!(validate_scoring(&cfg).is_err());\n cfg.closed_mr_multiplier = 1.0;\n assert!(validate_scoring(&cfg).is_ok());\n}\n\n#[test]\nfn test_config_validation_rejects_absurd_half_life() {\n let mut cfg = ScoringConfig::default();\n cfg.author_half_life_days = 5000; // > 3650 cap\n assert!(validate_scoring(&cfg).is_err());\n cfg.author_half_life_days = 3650; // boundary: valid\n assert!(validate_scoring(&cfg).is_ok());\n cfg.reviewer_min_note_chars = 5000; // > 4096 cap\n assert!(validate_scoring(&cfg).is_err());\n cfg.reviewer_min_note_chars = 4096; // boundary: valid\n assert!(validate_scoring(&cfg).is_ok());\n}\n\n#[test]\nfn test_config_validation_rejects_nan_multiplier() {\n let mut cfg = ScoringConfig::default();\n cfg.closed_mr_multiplier = f64::NAN;\n assert!(validate_scoring(&cfg).is_err());\n cfg.closed_mr_multiplier = f64::INFINITY;\n assert!(validate_scoring(&cfg).is_err());\n cfg.closed_mr_multiplier = f64::NEG_INFINITY;\n assert!(validate_scoring(&cfg).is_err());\n}\n```\n\n### GREEN: Add fields to struct + Default impl + validation rules.\n### VERIFY: cargo test -p lore -- test_config_validation\n\n## Acceptance Criteria\n- [ ] test_config_validation_rejects_zero_half_life passes\n- [ ] test_config_validation_rejects_absurd_half_life passes\n- [ ] test_config_validation_rejects_nan_multiplier passes\n- [ ] ScoringConfig::default() returns correct values for all 11 fields\n- [ ] cargo check --all-targets passes\n- [ ] Existing config deserialization works (#[serde(default)] fills new fields)\n- [ ] validate_scoring() is pub(crate) or accessible from config.rs test module\n\n## Files\n- MODIFY: src/core/config.rs (struct at line 155, Default impl at line 169, validate_scoring at line 274)\n\n## Edge Cases\n- f64 comparison: use .is_finite() for NaN/Inf check, > 0.0 and <= 1.0 for range\n- Vec default: use Vec::new()\n- Upper bounds prevent silent misconfig (5000-day half-life effectively disables decay)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T16:59:14.654469Z","created_by":"tayloreernisse","updated_at":"2026-02-12T21:01:21.744442Z","closed_at":"2026-02-12T21:01:21.744205Z","close_reason":"Completed: added 8 new fields to ScoringConfig, extended Default impl, and added validation for half-life bounds, closed_mr_multiplier, reviewer_min_note_chars, and excluded_usernames. All 19 config tests pass.","compaction_level":0,"original_size":0,"labels":["scoring"]} {"id":"bd-2wpf","title":"Ship timeline CLI with human and robot renderers","description":"## Problem\nThe timeline pipeline (5-stage SEED->HYDRATE->EXPAND->COLLECT->RENDER) is implemented but not wired to the CLI. This is one of lore's most unique features — chronological narrative reconstruction from resource events, cross-references, and notes — and it is invisible to users and agents.\n\n## Current State\n- Types defined: src/core/timeline.rs (TimelineEvent, TimelineSeed, etc.)\n- Seed stage: src/core/timeline_seed.rs (FTS search -> seed entities)\n- Expand stage: src/core/timeline_expand.rs (cross-reference expansion)\n- Collect stage: src/core/timeline_collect.rs (event gathering from resource events + notes)\n- CLI command structure: src/cli/commands/timeline.rs (exists but incomplete)\n- Remaining beads: bd-1nf (CLI wiring), bd-2f2 (human renderer), bd-dty (robot renderer)\n\n## Acceptance Criteria\n1. lore timeline 'authentication refactor' works end-to-end:\n - Searches for matching entities (SEED)\n - Fetches raw data (HYDRATE)\n - Expands via cross-references (EXPAND with --depth flag, default 1)\n - Collects events chronologically (COLLECT)\n - Renders human-readable narrative (RENDER)\n2. Human renderer output:\n - Chronological event stream with timestamps\n - Color-coded by event type (state change, label change, note, reference)\n - Actor names with role context\n - Grouped by day/week for readability\n - Evidence snippets from notes (first 200 chars)\n3. Robot renderer output (--robot / -J):\n - JSON array of events with: timestamp, event_type, actor, entity_ref, body/snippet, metadata\n - Seed entities listed separately (what matched the query)\n - Expansion depth metadata (how far from seed)\n - Total event count and time range\n4. CLI flags:\n - --project (scope to project)\n - --since (time range)\n - --depth N (expansion depth, default 1, max 3)\n - --expand-mentions (follow mention references, not just closes/related)\n - -n LIMIT (max events)\n5. Performance: timeline for a single issue with 50 events renders in <200ms\n\n## Relationship to Existing Beads\nThis supersedes/unifies: bd-1nf (CLI wiring), bd-2f2 (human renderer), bd-dty (robot renderer). Those can be closed when this ships.\n\n## Files to Modify\n- src/cli/commands/timeline.rs (CLI wiring, flag parsing, output dispatch)\n- src/core/timeline.rs (may need RENDER stage types)\n- New: src/cli/render/timeline_human.rs or inline in timeline.rs\n- New: src/cli/render/timeline_robot.rs or inline in timeline.rs","status":"closed","priority":1,"issue_type":"feature","created_at":"2026-02-12T15:46:16.246889Z","created_by":"tayloreernisse","updated_at":"2026-02-12T15:50:43.885226Z","closed_at":"2026-02-12T15:50:43.885180Z","close_reason":"Already implemented: run_timeline(), print_timeline(), print_timeline_json_with_meta(), handle_timeline() all exist and are fully wired. Code audit 2026-02-12.","compaction_level":0,"original_size":0,"labels":["cli","cli-imp"],"dependencies":[{"issue_id":"bd-2wpf","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-2x2h","title":"Implement Sync screen (running + summary modes + progress coalescer)","description":"## Background\nThe Sync screen provides real-time progress visualization during data synchronization. The TUI drives sync directly via lore library calls (not subprocess) — this gives direct access to progress callbacks, proper error propagation, and cooperative cancellation via CancelToken. The TUI is the primary human interface; the CLI serves robots/scripts.\n\nAfter sync completes, the screen transitions to a summary view showing exact changed entity counts. A progress coalescer prevents render thrashing by batching rapid progress updates.\n\nDesign principle: the TUI is self-contained. It does NOT detect or react to external CLI sync operations. If someone runs lore sync externally, the TUI's natural re-query on navigation handles stale data implicitly.\n\n## Approach\nCreate state, action, and view modules for the Sync screen:\n\n**State** (crates/lore-tui/src/screen/sync/state.rs):\n- SyncScreenMode enum: FullScreen, Inline (for use from Bootstrap screen)\n- SyncState enum: Idle, Running(SyncProgress), Complete(SyncSummary), Error(String)\n- SyncProgress: per-lane progress (issues, MRs, discussions, notes, events, statuses) with counts and ETA\n- SyncSummary: changed entity counts (new, updated, deleted per type), duration, errors\n- ProgressCoalescer: buffers progress updates, emits at most every 100ms to prevent render thrash\n\n**sync_delta_ledger** (crates/lore-tui/src/screen/sync/delta_ledger.rs):\n- SyncDeltaLedger: in-memory per-run record of changed entity IDs\n- Fields: new_issue_iids (Vec), updated_issue_iids (Vec), new_mr_iids (Vec), updated_mr_iids (Vec)\n- record_change(entity_type, iid, change_kind) — called by sync progress callback\n- summary() -> SyncSummary — produces the final counts for the summary view\n- Purpose: after sync completes, the dashboard and list screens can use the ledger to highlight \"new since last sync\" items\n\n**Action** (crates/lore-tui/src/screen/sync/action.rs):\n- start_sync(db: &DbManager, config: &Config, cancel: CancelToken) -> Cmd\n- Calls lore library ingestion functions directly: ingest_issues, ingest_mrs, ingest_discussions, etc.\n- Progress callback sends Msg::SyncProgress(lane, count, total) via channel\n- On completion sends Msg::SyncComplete(SyncSummary)\n- On cancel sends Msg::SyncCancelled(partial_summary)\n\n**Per-project fault isolation:** If sync for one project fails, continue syncing other projects. Collect per-project errors and display in summary view. Don't abort entire sync on single project failure.\n\n**View** (crates/lore-tui/src/screen/sync/view.rs):\n- Running view: per-lane progress bars with counts/totals, overall ETA, cancel hint (Esc)\n- Stream stats footer: show items/sec throughput for active lanes\n- Summary view: table of entity types with new/updated/deleted columns, total duration, per-project error list\n- Error view: error message with retry option\n- Inline mode: compact single-line progress for embedding in Bootstrap screen\n\nThe Sync screen uses TaskSupervisor for the background sync task with cooperative cancellation.\n\n## Acceptance Criteria\n- [ ] Sync screen launches sync via lore library calls (NOT subprocess)\n- [ ] Per-lane progress bars update in real-time during sync\n- [ ] ProgressCoalescer batches updates to at most 10/second (100ms floor)\n- [ ] Esc cancels sync cooperatively via CancelToken, shows partial summary\n- [ ] Sync completion transitions to summary view with accurate change counts\n- [ ] Summary view shows new/updated/deleted counts per entity type\n- [ ] Error during sync shows error message with retry option\n- [ ] Sync task registered with TaskSupervisor (dedup by TaskKey::Sync)\n- [ ] Per-project fault isolation: single project failure doesn't abort entire sync\n- [ ] SyncDeltaLedger records changed entity IDs for post-sync highlighting\n- [ ] Stream stats footer shows items/sec throughput\n- [ ] ScreenMode::Inline renders compact single-line progress for Bootstrap embedding\n- [ ] Unit tests for ProgressCoalescer batching behavior\n- [ ] Unit tests for SyncDeltaLedger record/summary\n- [ ] Integration test: mock sync with FakeClock verifies progress -> summary transition\n\n## Files\n- CREATE: crates/lore-tui/src/screen/sync/state.rs\n- CREATE: crates/lore-tui/src/screen/sync/action.rs\n- CREATE: crates/lore-tui/src/screen/sync/view.rs\n- CREATE: crates/lore-tui/src/screen/sync/delta_ledger.rs\n- CREATE: crates/lore-tui/src/screen/sync/mod.rs\n- MODIFY: crates/lore-tui/src/screen/mod.rs (add pub mod sync)\n\n## TDD Anchor\nRED: Write test_progress_coalescer_batches_rapid_updates that sends 50 progress updates in 10ms and asserts coalescer emits at most 1.\nGREEN: Implement ProgressCoalescer with configurable floor interval.\nVERIFY: cargo test -p lore-tui sync -- --nocapture\n\nAdditional tests:\n- test_sync_cancel_produces_partial_summary\n- test_sync_complete_produces_full_summary\n- test_sync_error_shows_retry\n- test_sync_dedup_prevents_double_launch\n- test_delta_ledger_records_changes: record 5 new issues and 3 updated MRs, assert summary counts\n- test_per_project_fault_isolation: simulate one project failure, verify others complete\n\n## Edge Cases\n- Sync cancelled immediately after start — partial summary with zero counts is valid\n- Network timeout during sync — error state with last-known progress preserved\n- Very large sync (100k+ entities) — progress coalescer prevents render thrash\n- Sync started while another sync TaskKey::Sync exists — TaskSupervisor dedup rejects it\n- Inline mode from Bootstrap: compact rendering, no full progress bars\n\n## Dependency Context\nUses TaskSupervisor from bd-3le2 for dedup and cancellation. Uses DbManager from bd-2kop for database access. Uses lore library ingestion module directly for sync operations. Used by Bootstrap screen (bd-3ty8) in inline mode.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:09.481354Z","created_by":"tayloreernisse","updated_at":"2026-02-19T03:57:33.804713Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2x2h","depends_on_id":"bd-3le2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2x2h","depends_on_id":"bd-u7se","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-2x2h","title":"Implement Sync screen (running + summary modes + progress coalescer)","description":"## Background\nThe Sync screen provides real-time progress visualization during data synchronization. The TUI drives sync directly via lore library calls (not subprocess) — this gives direct access to progress callbacks, proper error propagation, and cooperative cancellation via CancelToken. The TUI is the primary human interface; the CLI serves robots/scripts.\n\nAfter sync completes, the screen transitions to a summary view showing exact changed entity counts. A progress coalescer prevents render thrashing by batching rapid progress updates.\n\nDesign principle: the TUI is self-contained. It does NOT detect or react to external CLI sync operations. If someone runs lore sync externally, the TUI's natural re-query on navigation handles stale data implicitly.\n\n## Approach\nCreate state, action, and view modules for the Sync screen:\n\n**State** (crates/lore-tui/src/screen/sync/state.rs):\n- SyncScreenMode enum: FullScreen, Inline (for use from Bootstrap screen)\n- SyncState enum: Idle, Running(SyncProgress), Complete(SyncSummary), Error(String)\n- SyncProgress: per-lane progress (issues, MRs, discussions, notes, events, statuses) with counts and ETA\n- SyncSummary: changed entity counts (new, updated, deleted per type), duration, errors\n- ProgressCoalescer: buffers progress updates, emits at most every 100ms to prevent render thrash\n\n**sync_delta_ledger** (crates/lore-tui/src/screen/sync/delta_ledger.rs):\n- SyncDeltaLedger: in-memory per-run record of changed entity IDs\n- Fields: new_issue_iids (Vec), updated_issue_iids (Vec), new_mr_iids (Vec), updated_mr_iids (Vec)\n- record_change(entity_type, iid, change_kind) — called by sync progress callback\n- summary() -> SyncSummary — produces the final counts for the summary view\n- Purpose: after sync completes, the dashboard and list screens can use the ledger to highlight \"new since last sync\" items\n\n**Action** (crates/lore-tui/src/screen/sync/action.rs):\n- start_sync(db: &DbManager, config: &Config, cancel: CancelToken) -> Cmd\n- Calls lore library ingestion functions directly: ingest_issues, ingest_mrs, ingest_discussions, etc.\n- Progress callback sends Msg::SyncProgress(lane, count, total) via channel\n- On completion sends Msg::SyncComplete(SyncSummary)\n- On cancel sends Msg::SyncCancelled(partial_summary)\n\n**Per-project fault isolation:** If sync for one project fails, continue syncing other projects. Collect per-project errors and display in summary view. Don't abort entire sync on single project failure.\n\n**View** (crates/lore-tui/src/screen/sync/view.rs):\n- Running view: per-lane progress bars with counts/totals, overall ETA, cancel hint (Esc)\n- Stream stats footer: show items/sec throughput for active lanes\n- Summary view: table of entity types with new/updated/deleted columns, total duration, per-project error list\n- Error view: error message with retry option\n- Inline mode: compact single-line progress for embedding in Bootstrap screen\n\nThe Sync screen uses TaskSupervisor for the background sync task with cooperative cancellation.\n\n## Acceptance Criteria\n- [ ] Sync screen launches sync via lore library calls (NOT subprocess)\n- [ ] Per-lane progress bars update in real-time during sync\n- [ ] ProgressCoalescer batches updates to at most 10/second (100ms floor)\n- [ ] Esc cancels sync cooperatively via CancelToken, shows partial summary\n- [ ] Sync completion transitions to summary view with accurate change counts\n- [ ] Summary view shows new/updated/deleted counts per entity type\n- [ ] Error during sync shows error message with retry option\n- [ ] Sync task registered with TaskSupervisor (dedup by TaskKey::Sync)\n- [ ] Per-project fault isolation: single project failure doesn't abort entire sync\n- [ ] SyncDeltaLedger records changed entity IDs for post-sync highlighting\n- [ ] Stream stats footer shows items/sec throughput\n- [ ] ScreenMode::Inline renders compact single-line progress for Bootstrap embedding\n- [ ] Unit tests for ProgressCoalescer batching behavior\n- [ ] Unit tests for SyncDeltaLedger record/summary\n- [ ] Integration test: mock sync with FakeClock verifies progress -> summary transition\n\n## Files\n- CREATE: crates/lore-tui/src/screen/sync/state.rs\n- CREATE: crates/lore-tui/src/screen/sync/action.rs\n- CREATE: crates/lore-tui/src/screen/sync/view.rs\n- CREATE: crates/lore-tui/src/screen/sync/delta_ledger.rs\n- CREATE: crates/lore-tui/src/screen/sync/mod.rs\n- MODIFY: crates/lore-tui/src/screen/mod.rs (add pub mod sync)\n\n## TDD Anchor\nRED: Write test_progress_coalescer_batches_rapid_updates that sends 50 progress updates in 10ms and asserts coalescer emits at most 1.\nGREEN: Implement ProgressCoalescer with configurable floor interval.\nVERIFY: cargo test -p lore-tui sync -- --nocapture\n\nAdditional tests:\n- test_sync_cancel_produces_partial_summary\n- test_sync_complete_produces_full_summary\n- test_sync_error_shows_retry\n- test_sync_dedup_prevents_double_launch\n- test_delta_ledger_records_changes: record 5 new issues and 3 updated MRs, assert summary counts\n- test_per_project_fault_isolation: simulate one project failure, verify others complete\n\n## Edge Cases\n- Sync cancelled immediately after start — partial summary with zero counts is valid\n- Network timeout during sync — error state with last-known progress preserved\n- Very large sync (100k+ entities) — progress coalescer prevents render thrash\n- Sync started while another sync TaskKey::Sync exists — TaskSupervisor dedup rejects it\n- Inline mode from Bootstrap: compact rendering, no full progress bars\n\n## Dependency Context\nUses TaskSupervisor from bd-3le2 for dedup and cancellation. Uses DbManager from bd-2kop for database access. Uses lore library ingestion module directly for sync operations. Used by Bootstrap screen (bd-3ty8) in inline mode.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:09.481354Z","created_by":"tayloreernisse","updated_at":"2026-02-19T04:40:45.881183Z","closed_at":"2026-02-19T04:40:45.881129Z","close_reason":"Completed in previous session: Sync screen view + all 9 message handlers wired.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2x2h","depends_on_id":"bd-3le2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2x2h","depends_on_id":"bd-u7se","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2y79","title":"Add work item status via GraphQL enrichment","description":"## Summary\n\nGitLab 18.2+ has native work item status (To do, In progress, Done, Won't do, Duplicate) available ONLY via GraphQL, not REST. This enriches synced issues with status information by making supplementary GraphQL calls after REST ingestion.\n\n**Plan document:** plans/work-item-status-graphql.md\n\n## Critical Findings (from API research)\n\n- **EE-only (Premium/Ultimate)** — Free tier won't have the widget at all\n- **GraphQL auth differs from REST** — must use `Authorization: Bearer `, NOT `PRIVATE-TOKEN`\n- **Must use `workItems` resolver, NOT `project.issues`** — legacy issues path doesn't expose status widgets\n- **5 categories:** TRIAGE, TO_DO, IN_PROGRESS, DONE, CANCELED (not 3 as originally assumed)\n- **Max 100 items per GraphQL page** (standard GitLab limit)\n- **Custom statuses possible on 18.5+** — can't assume only system-defined statuses\n\n## Migration\n\nUses migration **021** (001-020 already exist on disk).\nAdds `status_name TEXT` and `status_category TEXT` to `issues` table (both nullable).\n\n## Files\n\n- src/gitlab/graphql.rs (NEW — minimal GraphQL client + status fetcher)\n- src/gitlab/mod.rs (add pub mod graphql)\n- src/gitlab/types.rs (WorkItemStatus, WorkItemStatusCategory enum)\n- src/core/db.rs (migration 021 in MIGRATIONS array)\n- src/core/config.rs (fetch_work_item_status toggle in SyncConfig)\n- src/ingestion/orchestrator.rs (enrichment step after issue sync)\n- src/cli/commands/show.rs (display status)\n- src/cli/commands/list.rs (status in list output + --status filter)\n\n## Acceptance Criteria\n\n- [ ] GraphQL client POSTs queries with Bearer auth and handles errors\n- [ ] Status fetched via workItems resolver with pagination\n- [ ] Migration 021 adds status_name and status_category to issues\n- [ ] lore show issue displays status (when available)\n- [ ] lore --robot show issue includes status in JSON\n- [ ] lore list issues --status filter works\n- [ ] Graceful degradation: Free tier, old GitLab, disabled GraphQL all handled\n- [ ] Config toggle: fetch_work_item_status (default true)\n- [ ] cargo check + clippy + tests pass","status":"closed","priority":1,"issue_type":"feature","created_at":"2026-02-05T18:32:39.287957Z","created_by":"tayloreernisse","updated_at":"2026-02-17T15:08:29.499020Z","closed_at":"2026-02-17T15:08:29.498969Z","close_reason":"Already implemented: GraphQL status enrichment shipped in v0.8.x — migration 021, graphql.rs, --status filter, --no-status flag all complete","compaction_level":0,"original_size":0,"labels":["api","phase-b"]} {"id":"bd-2ygk","title":"Implement user flow integration tests (9 PRD flows)","description":"## Background\n\nThe PRD Section 6 defines 9 end-to-end user flows that exercise cross-screen navigation, state preservation, and data flow. The existing vertical slice test (bd-1mju) covers one flow (Dashboard -> Issue List -> Issue Detail -> Sync). These integration tests cover the remaining 8 flows plus re-test the vertical slice from a user-journey perspective. Each test simulates a realistic keystroke sequence using FrankenTUI's test harness and verifies that the correct screens are reached with the correct data visible.\n\n## Approach\n\nCreate a test module `tests/tui_user_flows.rs` with 9 test functions, each simulating a keystroke sequence against a FrankenTUI `TestHarness` with a pre-populated test database. Tests use `FakeClock` for deterministic timestamps.\n\n**Test database fixture**: A shared setup function creates an in-memory SQLite DB with ~20 issues, ~10 MRs, ~30 discussions, a few experts, and timeline events. This fixture is reused across all flow tests.\n\n**Flow tests**:\n\n1. **`test_flow_find_expert`** — Dashboard -> `w` -> type \"src/auth/\" -> verify Expert mode results appear -> `↓` select first person -> `Enter` -> verify navigation to Issue List filtered by that person\n2. **`test_flow_timeline_query`** — Dashboard -> `t` -> type \"auth timeout\" -> `Enter` -> verify Timeline shows seed events -> `Enter` on first event -> verify entity detail opens -> `Esc` -> back on Timeline\n3. **`test_flow_quick_search`** — Any screen -> `/` -> type query -> verify results appear -> `Tab` (switch mode) -> verify mode label changes -> `Enter` -> verify entity detail opens\n4. **`test_flow_sync_and_browse`** — Dashboard -> `s` -> `Enter` (start sync) -> wait for completion -> verify Summary shows deltas -> `i` -> verify Issue List filtered to new items\n5. **`test_flow_review_workload`** — Dashboard -> `w` -> `Tab` (Workload mode) -> type \"@bjones\" -> verify workload sections appear (assigned, authored, reviewing)\n6. **`test_flow_command_palette`** — Any screen -> `Ctrl+P` -> type \"mrs draft\" -> verify fuzzy match -> `Enter` -> verify MR List opened with draft filter\n7. **`test_flow_morning_triage`** — Dashboard -> `i` -> verify Issue List (opened, sorted by updated) -> `Enter` on first -> verify Issue Detail -> `Esc` -> verify cursor preserved on same row -> `j` -> verify cursor moved\n8. **`test_flow_direct_screen_jumps`** — Issue Detail -> `gt` -> verify Timeline -> `gw` -> verify Who -> `gi` -> verify Issue List -> `H` -> verify Dashboard (clean reset)\n9. **`test_flow_risk_sweep`** — Dashboard -> scroll to Insights -> `Enter` on first insight -> verify pre-filtered Issue List\n\nEach test follows the pattern:\n```rust\n#[test]\nfn test_flow_X() {\n let (harness, app) = setup_test_harness_with_fixture();\n // Send keystrokes\n harness.send_key(Key::Char('w'));\n // Assert screen state\n assert_eq!(app.current_screen(), Screen::Who);\n // Assert visible content\n let frame = harness.render();\n assert!(frame.contains(\"Expert\"));\n}\n```\n\n## Acceptance Criteria\n- [ ] All 9 flow tests exist and compile\n- [ ] Each test uses the shared DB fixture (no per-test DB setup)\n- [ ] Each test verifies screen transitions via `current_screen()` assertions\n- [ ] Each test verifies at least one content assertion (rendered text contains expected data)\n- [ ] test_flow_morning_triage verifies cursor preservation after Enter/Esc round-trip\n- [ ] test_flow_direct_screen_jumps verifies the g-prefix navigation chain\n- [ ] test_flow_sync_and_browse verifies delta-filtered navigation after sync\n- [ ] All tests use FakeClock for deterministic timestamps\n- [ ] Tests complete in <5 seconds each (no real I/O)\n\n## Files\n- CREATE: crates/lore-tui/tests/tui_user_flows.rs\n- MODIFY: (none — this is a new test file only)\n\n## TDD Anchor\nRED: Write `test_flow_morning_triage` first — it exercises the most common daily workflow (Dashboard -> Issue List -> Issue Detail -> back with cursor preservation). Start with just the Dashboard -> Issue List transition.\nGREEN: Requires all Phase 2 core screens to be working; the test itself is the GREEN verification.\nVERIFY: cargo test -p lore-tui test_flow_morning_triage\n\nAdditional tests: All 9 flows listed above.\n\n## Edge Cases\n- Flow tests must handle async data loading — use harness.tick() or harness.wait_for_idle() to let async tasks complete before asserting\n- g-prefix timeout (500ms) — tests must send the second key within the timeout; use harness clock control\n- Sync flow test needs a mock sync that completes quickly — use a pre-populated SyncDeltaLedger rather than running actual sync\n\n## Dependency Context\n- Depends on bd-1mju (vertical slice integration test) which establishes the test harness patterns and fixture setup.\n- Depends on bd-2nfs (snapshot test infrastructure) which provides the FakeClock and TestHarness setup.\n- Depends on all Phase 2 core screen beads (bd-35g5 Dashboard, bd-3ei1 Issue List, bd-8ab7 Issue Detail, bd-2kr0 MR List, bd-3t1b MR Detail) being implemented.\n- Depends on Phase 3 power feature beads (bd-1zow Search, bd-29qw Timeline, bd-u7se Who, bd-wzqi Command Palette) being implemented.\n- Depends on bd-2x2h (Sync screen) for the sync+browse flow test.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:41.060826Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:29:52.743563Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2ygk","depends_on_id":"bd-1mju","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-1zow","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-29qw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-2kr0","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-2nfs","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-2x2h","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-35g5","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-3ei1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-3t1b","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-8ab7","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-u7se","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-wzqi","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2yo","title":"Fetch MR diffs API and populate mr_file_changes","description":"## Background\n\nThis bead fetches MR diff metadata from the GitLab API and populates the mr_file_changes table created by migration 016. It extracts only file-level metadata (paths, change type) and discards actual diff content.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 4.3 (Ingestion).\n\n## Codebase Context\n\n- pending_dependent_fetches already has `job_type='mr_diffs'` in CHECK constraint (migration 011)\n- dependent_queue.rs has: enqueue_job(), claim_jobs(), complete_job(), fail_job() with exponential backoff\n- Orchestrator pattern: enqueue after entity ingestion, drain after primary ingestion completes\n- GitLab client uses fetch_all_pages() for pagination\n- Existing drain patterns in orchestrator.rs: drain_resource_events() and drain_mr_closes_issues() — follow same pattern\n- config.sync.fetch_mr_file_changes flag guards enqueue (see bd-jec)\n- mr_file_changes table created by migration 016 (bd-1oo) — NOT 015 (015 is commit SHAs)\n- merge_commit_sha and squash_commit_sha already captured during MR ingestion (src/ingestion/merge_requests.rs lines 184, 205-206, 230-231) — no work needed for those fields\n\n## Approach\n\n### 1. API Client — add to `src/gitlab/client.rs`:\n\n```rust\npub async fn fetch_mr_diffs(\n &self,\n project_id: i64,\n mr_iid: i64,\n) -> Result> {\n let path = format\\!(\"/projects/{project_id}/merge_requests/{mr_iid}/diffs\");\n self.fetch_all_pages(&path, &[(\"per_page\", \"100\")]).await\n .or_else(|e| coalesce_not_found(e, Vec::new()))\n}\n```\n\n### 2. Types — add to `src/gitlab/types.rs`:\n\n```rust\n#[derive(Debug, Clone, Deserialize, Serialize)]\npub struct GitLabMrDiff {\n pub old_path: String,\n pub new_path: String,\n pub new_file: bool,\n pub renamed_file: bool,\n pub deleted_file: bool,\n // Ignore: diff, a_mode, b_mode, generated_file (not stored)\n}\n```\n\nAdd `GitLabMrDiff` to `src/gitlab/mod.rs` re-exports.\n\n### 3. Change Type Derivation (in new file):\n\n```rust\nfn derive_change_type(diff: &GitLabMrDiff) -> &'static str {\n if diff.new_file { \"added\" }\n else if diff.renamed_file { \"renamed\" }\n else if diff.deleted_file { \"deleted\" }\n else { \"modified\" }\n}\n```\n\n### 4. DB Storage — new `src/ingestion/mr_diffs.rs`:\n\n```rust\npub fn upsert_mr_file_changes(\n conn: &Connection,\n mr_local_id: i64,\n project_id: i64,\n diffs: &[GitLabMrDiff],\n) -> Result {\n // DELETE FROM mr_file_changes WHERE merge_request_id = ?\n // INSERT each diff row with derived change_type\n // DELETE+INSERT is simpler than UPSERT for array replacement\n}\n```\n\nAdd `pub mod mr_diffs;` to `src/ingestion/mod.rs`.\n\n### 5. Queue Integration — in orchestrator.rs:\n\n```rust\n// After MR upsert, if config.sync.fetch_mr_file_changes:\nenqueue_job(conn, project_id, \"merge_request\", mr_iid, mr_local_id, \"mr_diffs\")?;\n```\n\nAdd `drain_mr_diffs()` following the drain_mr_closes_issues() pattern. Call it after drain_mr_closes_issues() in the sync pipeline.\n\n## Acceptance Criteria\n\n- [ ] `fetch_mr_diffs()` calls GET /projects/:id/merge_requests/:iid/diffs with pagination\n- [ ] GitLabMrDiff type added to src/gitlab/types.rs and re-exported from src/gitlab/mod.rs\n- [ ] Change type derived: new_file->added, renamed_file->renamed, deleted_file->deleted, else->modified\n- [ ] mr_file_changes rows have correct old_path, new_path, change_type\n- [ ] Old rows deleted before insert (clean replacement per MR)\n- [ ] Jobs only enqueued when config.sync.fetch_mr_file_changes is true\n- [ ] 404/403 API errors handled gracefully (empty result, not failure)\n- [ ] drain_mr_diffs() added to orchestrator.rs sync pipeline\n- [ ] `pub mod mr_diffs;` added to src/ingestion/mod.rs\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/gitlab/client.rs` (add fetch_mr_diffs method)\n- `src/gitlab/types.rs` (add GitLabMrDiff struct)\n- `src/gitlab/mod.rs` (re-export GitLabMrDiff)\n- `src/ingestion/mr_diffs.rs` (NEW — upsert_mr_file_changes + derive_change_type)\n- `src/ingestion/mod.rs` (add pub mod mr_diffs)\n- `src/ingestion/orchestrator.rs` (enqueue mr_diffs jobs + drain_mr_diffs)\n\n## TDD Loop\n\nRED:\n- `test_derive_change_type_added` - new_file=true -> \"added\"\n- `test_derive_change_type_renamed` - renamed_file=true -> \"renamed\"\n- `test_derive_change_type_deleted` - deleted_file=true -> \"deleted\"\n- `test_derive_change_type_modified` - all false -> \"modified\"\n- `test_upsert_replaces_existing` - second upsert replaces first\n\nGREEN: Implement API client, type derivation, DB ops, orchestrator wiring.\n\nVERIFY: `cargo test --lib -- mr_diffs`\n\n## Edge Cases\n\n- MR with 500+ files: paginate properly via fetch_all_pages\n- Binary files: handled as modified (renamed_file/new_file/deleted_file all false)\n- File renamed AND modified: renamed_file=true takes precedence\n- Draft MRs: still fetch diffs\n- Deleted MR: 404 -> empty vec via coalesce_not_found()\n- merge_commit_sha/squash_commit_sha: already handled in merge_requests.rs ingestion — NOT part of this bead\n","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:08.939514Z","created_by":"tayloreernisse","updated_at":"2026-02-08T18:27:05.993580Z","closed_at":"2026-02-08T18:27:05.993482Z","close_reason":"Implemented: GitLabMrDiff type, fetch_mr_diffs client method, upsert_mr_file_changes in new mr_diffs.rs module, enqueue_mr_diffs_jobs + drain_mr_diffs in orchestrator, migration 020 for diffs_synced_for_updated_at watermark, progress events, autocorrect registry. All 390 tests pass, clippy clean.","compaction_level":0,"original_size":0,"labels":["api","gate-4","phase-b"],"dependencies":[{"issue_id":"bd-2yo","depends_on_id":"bd-14q","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2yo","depends_on_id":"bd-1oo","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2yo","depends_on_id":"bd-jec","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2yo","depends_on_id":"bd-tir","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -218,7 +218,7 @@ {"id":"bd-3eu","title":"Implement hybrid search with adaptive recall","description":"## Background\nHybrid search is the top-level search orchestrator that combines FTS5 lexical results with sqlite-vec semantic results via RRF ranking. It supports three modes (Lexical, Semantic, Hybrid) and implements adaptive recall (wider initial fetch when filters are applied) and graceful degradation (falls back to FTS when Ollama is unavailable). All modes use RRF for consistent --explain output.\n\n## Approach\nCreate `src/search/hybrid.rs` per PRD Section 5.3.\n\n**Key types:**\n```rust\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum SearchMode {\n Hybrid, // Vector + FTS with RRF\n Lexical, // FTS only\n Semantic, // Vector only\n}\n\nimpl SearchMode {\n pub fn from_str(s: &str) -> Option {\n match s.to_lowercase().as_str() {\n \"hybrid\" => Some(Self::Hybrid),\n \"lexical\" | \"fts\" => Some(Self::Lexical),\n \"semantic\" | \"vector\" => Some(Self::Semantic),\n _ => None,\n }\n }\n\n pub fn as_str(&self) -> &'static str {\n match self {\n Self::Hybrid => \"hybrid\",\n Self::Lexical => \"lexical\",\n Self::Semantic => \"semantic\",\n }\n }\n}\n\npub struct HybridResult {\n pub document_id: i64,\n pub score: f64, // Normalized RRF score (0-1)\n pub vector_rank: Option,\n pub fts_rank: Option,\n pub rrf_score: f64, // Raw RRF score\n}\n```\n\n**Core function (ASYNC, PRD-exact signature):**\n```rust\npub async fn search_hybrid(\n conn: &Connection,\n client: Option<&OllamaClient>, // None if Ollama unavailable\n ollama_base_url: Option<&str>, // For actionable error messages\n query: &str,\n mode: SearchMode,\n filters: &SearchFilters,\n fts_mode: FtsQueryMode,\n) -> Result<(Vec, Vec)>\n```\n\n**IMPORTANT — client is `Option<&OllamaClient>`:** This enables graceful degradation. When Ollama is unavailable, the caller passes `None` and hybrid mode falls back to FTS-only with a warning. The `ollama_base_url` is separate so error messages can include it even when client is None.\n\n**Adaptive recall constants (PRD Section 5.3):**\n```rust\nconst BASE_RECALL_MIN: usize = 50;\nconst FILTERED_RECALL_MIN: usize = 200;\nconst RECALL_CAP: usize = 1500;\n```\n\n**Recall formula:**\n```rust\nlet requested = filters.clamp_limit();\nlet top_k = if filters.has_any_filter() {\n (requested * 50).max(FILTERED_RECALL_MIN).min(RECALL_CAP)\n} else {\n (requested * 10).max(BASE_RECALL_MIN).min(RECALL_CAP)\n};\n```\n\n**Mode behavior:**\n- **Lexical:** FTS only -> rank_rrf with empty vector list (single-list RRF)\n- **Semantic:** Vector only -> requires client (error if None) -> rank_rrf with empty FTS list\n- **Hybrid:** Both FTS + vector -> rank_rrf with both lists\n- **Hybrid with client=None:** Graceful degradation to Lexical with warning, NOT error\n\n**Graceful degradation logic:**\n```rust\nSearchMode::Hybrid => {\n let fts_results = search_fts(conn, query, top_k, fts_mode)?;\n let fts_tuples: Vec<_> = fts_results.iter().map(|r| (r.document_id, r.rank)).collect();\n\n match client {\n Some(client) => {\n let query_embedding = client.embed_batch(vec\\![query.to_string()]).await?;\n let embedding = query_embedding.into_iter().next().unwrap();\n let vec_results = search_vector(conn, &embedding, top_k)?;\n let vec_tuples: Vec<_> = vec_results.iter().map(|r| (r.document_id, r.distance)).collect();\n let ranked = rank_rrf(&vec_tuples, &fts_tuples);\n // ... map to HybridResult\n Ok((results, warnings))\n }\n None => {\n warnings.push(\"Ollama unavailable, falling back to lexical search\".into());\n let ranked = rank_rrf(&[], &fts_tuples);\n // ... map to HybridResult\n Ok((results, warnings))\n }\n }\n}\n```\n\n## Acceptance Criteria\n- [ ] Function is `async` (per PRD — Ollama client methods are async)\n- [ ] Signature takes `client: Option<&OllamaClient>` (not required)\n- [ ] Signature takes `ollama_base_url: Option<&str>` for actionable error messages\n- [ ] Returns `(Vec, Vec)` — results + warnings\n- [ ] Lexical mode: FTS-only results ranked via RRF (single list)\n- [ ] Semantic mode: vector-only results ranked via RRF; error if client is None\n- [ ] Hybrid mode: both FTS + vector results merged via RRF\n- [ ] Graceful degradation: client=None in Hybrid falls back to FTS with warning (not error)\n- [ ] Adaptive recall: unfiltered max(50, limit*10), filtered max(200, limit*50), capped 1500\n- [ ] All modes produce consistent --explain output (vector_rank, fts_rank, rrf_score)\n- [ ] SearchMode::from_str accepts aliases: \"fts\" for Lexical, \"vector\" for Semantic\n- [ ] `cargo build` succeeds\n\n## Files\n- `src/search/hybrid.rs` — new file\n- `src/search/mod.rs` — add `pub use hybrid::{search_hybrid, HybridResult, SearchMode};`\n\n## TDD Loop\nRED: Tests (some integration, some unit):\n- `test_lexical_mode` — FTS results only\n- `test_semantic_mode` — vector results only\n- `test_hybrid_mode` — both lists merged\n- `test_graceful_degradation` — None client falls back to FTS with warning in warnings vec\n- `test_adaptive_recall_unfiltered` — recall = max(50, limit*10)\n- `test_adaptive_recall_filtered` — recall = max(200, limit*50)\n- `test_recall_cap` — never exceeds 1500\n- `test_search_mode_from_str` — \"hybrid\", \"lexical\", \"fts\", \"semantic\", \"vector\", invalid\nGREEN: Implement search_hybrid\nVERIFY: `cargo test hybrid`\n\n## Edge Cases\n- Both FTS and vector return zero results: empty output (not error)\n- FTS returns results but vector returns empty: RRF still works (single-list)\n- Very high limit (100) with filters: recall = min(5000, 1500) = 1500\n- Semantic mode with client=None: error (OllamaUnavailable), not degradation\n- Semantic mode with 0% coverage: return LoreError::EmbeddingsNotBuilt","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:50.343002Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:56:16.631748Z","closed_at":"2026-01-30T17:56:16.631682Z","close_reason":"Implemented hybrid search with 3 modes (lexical/semantic/hybrid), graceful degradation when Ollama unavailable, adaptive recall (50-1500), RRF fusion. 6 tests pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3eu","depends_on_id":"bd-1k1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3eu","depends_on_id":"bd-335","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3eu","depends_on_id":"bd-3ez","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3eu","depends_on_id":"bd-bjo","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3ez","title":"Implement RRF ranking","description":"## Background\nReciprocal Rank Fusion (RRF) combines results from multiple retrieval systems (FTS5 lexical + sqlite-vec semantic) into a single ranked list without requiring score normalization. Documents appearing in both lists rank higher than single-list documents. This is the core ranking algorithm for hybrid search in Gate B.\n\n## Approach\nCreate \\`src/search/rrf.rs\\` per PRD Section 5.2.\n\n```rust\nuse std::collections::HashMap;\n\nconst RRF_K: f64 = 60.0;\n\npub struct RrfResult {\n pub document_id: i64,\n pub rrf_score: f64, // Raw RRF score\n pub normalized_score: f64, // Normalized to 0-1 (rrf_score / max)\n pub vector_rank: Option, // 1-indexed rank in vector list\n pub fts_rank: Option, // 1-indexed rank in FTS list\n}\n\n/// Input: tuples of (document_id, score/distance) — already sorted by retriever.\n/// Ranks are 1-indexed (first result = rank 1).\n/// Score = sum of 1/(k + rank) for each list containing the document.\npub fn rank_rrf(\n vector_results: &[(i64, f64)], // (doc_id, distance)\n fts_results: &[(i64, f64)], // (doc_id, bm25_score)\n) -> Vec\n```\n\n**Algorithm (per PRD):**\n1. Build HashMap\n2. For each vector result at position i: score += 1/(K + (i+1)), record vector_rank = i+1 (**1-indexed**)\n3. For each FTS result at position i: score += 1/(K + (i+1)), record fts_rank = i+1 (**1-indexed**)\n4. Sort descending by rrf_score\n5. Normalize: each result.normalized_score = result.rrf_score / max_score (best = 1.0)\n\n**Key PRD details:**\n- Ranks are **1-indexed** (rank 1 = best, not rank 0)\n- Input is \\`&[(i64, f64)]\\` tuples, NOT custom structs\n- Output has both \\`rrf_score\\` (raw) and \\`normalized_score\\` (0-1)\n\n## Acceptance Criteria\n- [ ] Documents in both lists score higher than single-list documents\n- [ ] Single-list documents are included (not dropped)\n- [ ] Ranks are 1-indexed (first element = rank 1)\n- [ ] Raw RRF score available in rrf_score field\n- [ ] Normalized score: best = 1.0, all in [0, 1]\n- [ ] Results sorted descending by rrf_score\n- [ ] vector_rank and fts_rank tracked per result for --explain\n- [ ] Empty input lists handled (return empty)\n- [ ] One empty list + one non-empty returns results from non-empty list\n\n## Files\n- \\`src/search/rrf.rs\\` — new file\n- \\`src/search/mod.rs\\` — add \\`mod rrf; pub use rrf::{rank_rrf, RrfResult};\\`\n\n## TDD Loop\nRED: Tests in \\`#[cfg(test)] mod tests\\`:\n- \\`test_dual_list_ranks_higher\\` — doc in both lists scores > doc in one list\n- \\`test_single_list_included\\` — FTS-only and vector-only docs appear\n- \\`test_normalization\\` — best score is 1.0, all in [0, 1]\n- \\`test_empty_inputs\\` — empty returns empty\n- \\`test_ranks_are_1_indexed\\` — verify vector_rank/fts_rank start at 1\n- \\`test_raw_and_normalized_scores\\` — both fields populated correctly\nGREEN: Implement rank_rrf()\nVERIFY: \\`cargo test rrf\\`\n\n## Edge Cases\n- Duplicate document_id within same list: shouldn't happen, use first occurrence\n- Single result in one list, zero in other: normalized_score = 1.0\n- Very large input lists: HashMap handles efficiently","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:50.309012Z","created_by":"tayloreernisse","updated_at":"2026-01-30T16:53:04.128560Z","closed_at":"2026-01-30T16:53:04.128498Z","close_reason":"Completed: RRF ranking with 1-indexed ranks, raw+normalized scores, vector_rank/fts_rank provenance, 7 tests pass","compaction_level":0,"original_size":0} {"id":"bd-3fjk","title":"Implement stale response + SQLITE_BUSY + cancel race tests","description":"## Background\nThese tests verify the TUI handles async race conditions correctly: stale responses from superseded tasks are dropped, SQLITE_BUSY errors trigger retry with backoff, and query cancellation doesn't bleed across tasks or leave stuck loading states.\n\n## Approach\nStale response tests:\n- Submit task A (generation 1), then submit task B (generation 2) with same key\n- Deliver task A's result: assert it's dropped (generation mismatch)\n- Deliver task B's result: assert it's applied\n\nSQLITE_BUSY retry tests:\n- Lock DB with a writer, attempt read query, assert retry with exponential backoff\n- Verify TUI shows \"Database busy\" toast, not a crash\n\nCancel race tests:\n- Submit task, cancel via CancelToken, immediately submit new task with same key\n- Assert old task's CancelToken is set, new task proceeds normally\n- Rapid cancel-then-resubmit: no stuck LoadingInitial state after sequence\n- Cross-task bleed: interrupt handle only cancels the owning task's connection\n\n## Acceptance Criteria\n- [ ] Stale response with old generation silently dropped\n- [ ] SQLITE_BUSY shows user-friendly error, retries automatically\n- [ ] Cancel-then-resubmit: no stuck loading state\n- [ ] InterruptHandle only cancels its owning task's query\n- [ ] Rapid sequence (5 cancel+submit in 100ms): final state is correct\n\n## Files\n- CREATE: crates/lore-tui/tests/race_condition_tests.rs\n\n## TDD Anchor\nRED: Write test_stale_response_dropped that submits two tasks, delivers first result, asserts state unchanged.\nGREEN: Ensure is_current() check in update() guards all *Loaded handlers.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_stale_response\n\n## Edge Cases\n- SQLITE_BUSY timeout must be configurable (default 5000ms)\n- Rapid navigation can create >10 pending tasks — all but latest should be cancelled\n- CancelToken check must be in hot loops, not just at task entry\n\n## Dependency Context\nUses TaskSupervisor from \"Implement TaskSupervisor\" task.\nUses DbManager from \"Implement DbManager\" task.\nUses LoreApp update() stale-result guards from \"Implement LoreApp Model\" task.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:04:20.574583Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:38.215724Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3fjk","depends_on_id":"bd-1b6k","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3fjk","depends_on_id":"bd-2nfs","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-3h00","title":"Implement session persistence + instance lock + text width","description":"## Background\nSession state persistence allows the TUI to resume where the user left off (current screen, filter state, scroll position). Instance locking prevents data corruption from accidental double-launch. Text width handling ensures correct rendering of CJK, emoji, and combining marks.\n\n## Approach\nSession (session.rs):\n- SessionState: versioned struct with current_screen, nav_history, per-screen filter/scroll state, global_scope\n- save(path): atomic write (tmp->fsync->rename) + CRC32 checksum + max-size guard (1MB)\n- load(path) -> Result: validate CRC32, reject corrupted (quarantine bad file), handle version migration\n- corruption quarantine: move bad files to .quarantine/ subdir\n\nInstance Lock (instance_lock.rs):\n- InstanceLock: advisory lock file (~/.local/share/lore/tui.lock) with PID written\n- acquire() -> Result: try lock, check for stale lock (PID no longer running), clear stale, create new\n- Drop impl: remove lock file\n- On collision: clear error message with running PID\n\nText Width (text_width.rs):\n- measure_display_width(s: &str) -> usize: terminal display width using unicode-width + unicode-segmentation\n- truncate_display_width(s: &str, max_width: usize) -> String: truncate at grapheme boundary, append ellipsis\n- pad_display_width(s: &str, width: usize) -> String: pad with spaces to target display width\n- Handles: CJK (2-cell), emoji ZWJ sequences, skin tone modifiers, flag sequences, combining marks\n\n## Acceptance Criteria\n- [ ] Session state saved on quit, restored on launch\n- [ ] Atomic write prevents partial session files\n- [ ] CRC32 checksum detects corruption\n- [ ] Corrupted sessions quarantined (not deleted)\n- [ ] Max 1MB session file size enforced\n- [ ] Instance lock prevents double-launch with clear error\n- [ ] Stale lock (dead PID) automatically recovered\n- [ ] Lock released on normal exit and on panic (Drop impl)\n- [ ] CJK characters measured as 2 cells wide\n- [ ] Emoji ZWJ sequences treated as single grapheme cluster\n- [ ] Truncation never splits a grapheme cluster\n\n## Files\n- CREATE: crates/lore-tui/src/session.rs\n- CREATE: crates/lore-tui/src/instance_lock.rs\n- CREATE: crates/lore-tui/src/text_width.rs\n\n## TDD Anchor\nRED: Write test_measure_cjk_width that asserts measure_display_width(\"Hello\") == 5 and measure_display_width(\"日本語\") == 6.\nGREEN: Implement measure_display_width using unicode-width.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_measure\n\nAdditional tests:\n- test_session_roundtrip: save and load, assert equal\n- test_session_corruption_detected: modify saved file, assert load returns error\n- test_instance_lock_stale_recovery: create lock with dead PID, assert acquire succeeds\n- test_truncate_emoji: truncate string with emoji, assert no split grapheme\n\n## Edge Cases\n- Lock file dir doesn't exist: create it\n- PID reuse: rare but possible — stale lock detection uses PID existence check only\n- Session version migration: old version file should be handled gracefully (reset to defaults)\n- text_width: some terminals render emoji incorrectly — we use standard wcwidth semantics","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:03:09.241016Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:44:25.212954Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3h00","depends_on_id":"bd-26lp","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-3h00","title":"Implement session persistence + instance lock + text width","description":"## Background\nSession state persistence allows the TUI to resume where the user left off (current screen, filter state, scroll position). Instance locking prevents data corruption from accidental double-launch. Text width handling ensures correct rendering of CJK, emoji, and combining marks.\n\n## Approach\nSession (session.rs):\n- SessionState: versioned struct with current_screen, nav_history, per-screen filter/scroll state, global_scope\n- save(path): atomic write (tmp->fsync->rename) + CRC32 checksum + max-size guard (1MB)\n- load(path) -> Result: validate CRC32, reject corrupted (quarantine bad file), handle version migration\n- corruption quarantine: move bad files to .quarantine/ subdir\n\nInstance Lock (instance_lock.rs):\n- InstanceLock: advisory lock file (~/.local/share/lore/tui.lock) with PID written\n- acquire() -> Result: try lock, check for stale lock (PID no longer running), clear stale, create new\n- Drop impl: remove lock file\n- On collision: clear error message with running PID\n\nText Width (text_width.rs):\n- measure_display_width(s: &str) -> usize: terminal display width using unicode-width + unicode-segmentation\n- truncate_display_width(s: &str, max_width: usize) -> String: truncate at grapheme boundary, append ellipsis\n- pad_display_width(s: &str, width: usize) -> String: pad with spaces to target display width\n- Handles: CJK (2-cell), emoji ZWJ sequences, skin tone modifiers, flag sequences, combining marks\n\n## Acceptance Criteria\n- [ ] Session state saved on quit, restored on launch\n- [ ] Atomic write prevents partial session files\n- [ ] CRC32 checksum detects corruption\n- [ ] Corrupted sessions quarantined (not deleted)\n- [ ] Max 1MB session file size enforced\n- [ ] Instance lock prevents double-launch with clear error\n- [ ] Stale lock (dead PID) automatically recovered\n- [ ] Lock released on normal exit and on panic (Drop impl)\n- [ ] CJK characters measured as 2 cells wide\n- [ ] Emoji ZWJ sequences treated as single grapheme cluster\n- [ ] Truncation never splits a grapheme cluster\n\n## Files\n- CREATE: crates/lore-tui/src/session.rs\n- CREATE: crates/lore-tui/src/instance_lock.rs\n- CREATE: crates/lore-tui/src/text_width.rs\n\n## TDD Anchor\nRED: Write test_measure_cjk_width that asserts measure_display_width(\"Hello\") == 5 and measure_display_width(\"日本語\") == 6.\nGREEN: Implement measure_display_width using unicode-width.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_measure\n\nAdditional tests:\n- test_session_roundtrip: save and load, assert equal\n- test_session_corruption_detected: modify saved file, assert load returns error\n- test_instance_lock_stale_recovery: create lock with dead PID, assert acquire succeeds\n- test_truncate_emoji: truncate string with emoji, assert no split grapheme\n\n## Edge Cases\n- Lock file dir doesn't exist: create it\n- PID reuse: rare but possible — stale lock detection uses PID existence check only\n- Session version migration: old version file should be handled gracefully (reset to defaults)\n- text_width: some terminals render emoji incorrectly — we use standard wcwidth semantics","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:03:09.241016Z","created_by":"tayloreernisse","updated_at":"2026-02-19T04:40:08.321589Z","closed_at":"2026-02-19T04:40:08.321538Z","close_reason":"All 3 components implemented with 31 tests: text_width.rs (16 tests), instance_lock.rs (6 tests), session.rs (9 tests). Atomic writes, CRC32, quarantine, stale PID recovery, Unicode width.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3h00","depends_on_id":"bd-26lp","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3hjh","title":"Quality gates: cargo check, clippy, fmt, test","description":"## Background\nFinal verification that all implementation beads integrate cleanly. Must pass all quality gates before the feature is considered complete.\n\n## Approach\nRun all 4 quality gate commands. Fix any issues discovered.\n\n## Commands (in order)\n1. cargo check --all-targets (zero errors)\n2. cargo clippy --all-targets -- -D warnings (pedantic + nursery clean)\n3. cargo fmt --check (formatted)\n4. cargo test (all green, including all 42 new tests)\n\n## Acceptance Criteria\n- [ ] cargo check --all-targets: exit 0\n- [ ] cargo clippy --all-targets -- -D warnings: exit 0\n- [ ] cargo fmt --check: exit 0\n- [ ] cargo test: all pass (0 failures)\n- [ ] All 42 new tests from the plan are present and green\n\n## Known Gotchas from Plan's Trial Run\n- clippy::items_after_test_module: ansi256_from_rgb must be BEFORE #[cfg(test)]\n- clippy::collapsible_if: use let-chain syntax (if x && let ...)\n- clippy::manual_range_contains: use (16..=231).contains(&blue)\n- r##\"...\"## needed for test JSON with hex colors","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-11T06:42:34.364266Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.423111Z","closed_at":"2026-02-11T07:21:33.423074Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3hjh","depends_on_id":"bd-1b91","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3hjh","depends_on_id":"bd-2sr2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3hjh","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3hjh","depends_on_id":"bd-3a4k","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3hy","title":"[CP1] Test fixtures for mocked GitLab responses","description":"Create mock response files for integration tests using wiremock.\n\n## Fixtures to Create\n\n### tests/fixtures/gitlab_issue.json\nSingle issue with labels:\n- id, iid, project_id, title, description, state\n- author object\n- labels array (string names)\n- timestamps\n- web_url\n\n### tests/fixtures/gitlab_issues_page.json\nArray of issues simulating paginated response:\n- 3-5 issues with varying states\n- Mix of labels\n\n### tests/fixtures/gitlab_discussion.json\nSingle discussion:\n- id (string)\n- individual_note: false\n- notes array with 2+ notes\n- Include one system note\n\n### tests/fixtures/gitlab_discussions_page.json\nArray of discussions:\n- Mix of individual_note true/false\n- Include resolvable/resolved examples\n\n## Edge Cases to Cover\n- Issue with no labels (empty array)\n- Issue with labels_details (ignored in CP1)\n- Discussion with individual_note=true (single note)\n- System notes with system=true\n- Resolvable notes\n\nFiles: tests/fixtures/gitlab_issue.json, gitlab_issues_page.json, gitlab_discussion.json, gitlab_discussions_page.json\nDone when: wiremock handlers can use fixtures for deterministic tests","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:59:01.206436Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.991367Z","closed_at":"2026-01-25T17:02:01.991367Z","deleted_at":"2026-01-25T17:02:01.991362Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-3ia","title":"Fetch closes_issues API and populate entity_references","description":"## Background\nGET /projects/:id/merge_requests/:iid/closes_issues returns issues that will close when MR merges. This is the most reliable source for MR→issue relationships. Uses the generic dependent fetch queue (job_type = 'mr_closes_issues').\n\n## Approach\n\n**1. Add API endpoint to GitLab client (src/gitlab/client.rs):**\n```rust\n/// Fetch issues that will be closed when this MR merges.\npub async fn fetch_mr_closes_issues(\n &self, \n project_id: i64, \n iid: i64\n) -> Result>\n```\n\nNew type in src/gitlab/types.rs:\n```rust\n#[derive(Debug, Clone, Deserialize)]\npub struct GitLabIssueRef {\n pub id: i64,\n pub iid: i64,\n pub project_id: i64,\n pub title: String,\n pub state: String,\n pub web_url: String,\n}\n```\n\nURL: `GET /api/v4/projects/{project_id}/merge_requests/{iid}/closes_issues?per_page=100`\n\n**2. Enqueue jobs during MR ingestion:**\nIn orchestrator.rs, after MR upsert:\n```rust\nenqueue_job(conn, project_id, \"merge_request\", iid, local_id, \"mr_closes_issues\", None)?;\n```\n\nThis is always enqueued (not gated by a config flag) because cross-reference data is fundamental to all temporal queries.\n\n**3. Process jobs in drain step:**\nIn the drain dispatcher (from bd-1ep), handle \"mr_closes_issues\" job_type:\n```rust\nlet closes_issues = client.fetch_mr_closes_issues(gitlab_project_id, job.entity_iid).await?;\nfor issue_ref in &closes_issues {\n let target_id = resolve_issue_local_id(conn, project_id, issue_ref.iid);\n insert_entity_reference(conn, EntityReference {\n source_entity_type: \"merge_request\",\n source_entity_id: job.entity_local_id,\n target_entity_type: \"issue\",\n target_entity_id: target_id, // Some(id) or None for cross-project\n target_project_path: if target_id.is_none() { Some(resolve_project_path(issue_ref.project_id)) } else { None },\n target_entity_iid: if target_id.is_none() { Some(issue_ref.iid) } else { None },\n reference_type: \"closes\",\n source_method: \"api_closes_issues\",\n created_at: None,\n })?;\n}\n```\n\n**4. Insert helper for entity_references:**\nAdd to src/core/references.rs:\n```rust\npub fn insert_entity_reference(conn: &Connection, ref_: &EntityReference) -> Result\n// INSERT OR IGNORE, returns true if inserted\n```\n\n## Acceptance Criteria\n- [ ] closes_issues API called for all MRs during sync\n- [ ] Entity references created with reference_type='closes', source_method='api_closes_issues'\n- [ ] Source = MR, target = issue (correct directionality)\n- [ ] Cross-project issues stored as unresolved (target_entity_id=NULL, target_project_path set)\n- [ ] Idempotent: re-sync doesn't create duplicate references\n- [ ] 404 on deleted MR handled gracefully (fail_job)\n\n## Files\n- src/gitlab/client.rs (add fetch_mr_closes_issues)\n- src/gitlab/types.rs (add GitLabIssueRef)\n- src/core/references.rs (add insert_entity_reference helper)\n- src/ingestion/orchestrator.rs (enqueue mr_closes_issues jobs)\n- src/core/drain.rs or sync.rs (handle mr_closes_issues in drain dispatcher)\n\n## TDD Loop\nRED: tests/references_tests.rs:\n- `test_closes_issues_creates_references` - mock closes_issues response, verify entity_references rows\n- `test_closes_issues_cross_project_unresolved` - issue from different project stored as unresolved\n- `test_closes_issues_idempotent` - process same job twice, verify no duplicates\n\ntests/gitlab_types_tests.rs:\n- `test_deserialize_issue_ref` - verify GitLabIssueRef deserialization\n\nGREEN: Implement API endpoint, enqueue hook, drain handler, insert helper\n\nVERIFY: `cargo test references -- --nocapture && cargo test gitlab_types -- --nocapture`\n\n## Edge Cases\n- closes_issues API returns issues from OTHER projects (cross-project closing) — must check if issue is in local DB\n- Empty response (MR doesn't close any issues) — no refs created, job still completed\n- MR may close the same issue via description (\"Closes #123\") and via commits — API deduplicates, but our INSERT OR IGNORE handles it too\n- The closes_issues API may return stale data for draft MRs (issues that *would* close but haven't yet)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:32:33.561956Z","created_by":"tayloreernisse","updated_at":"2026-02-04T20:15:54.763773Z","closed_at":"2026-02-04T20:15:54.763643Z","compaction_level":0,"original_size":0,"labels":["api","gate-2","phase-b"],"dependencies":[{"issue_id":"bd-3ia","depends_on_id":"bd-1se","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3ia","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3ia","depends_on_id":"bd-tir","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -229,7 +229,7 @@ {"id":"bd-3jqx","title":"Implement async integration tests: cancellation, timeout, embed isolation, payload integrity","description":"## Background\n\nThe surgical sync pipeline involves async operations, cancellation signals, timeouts, scoped embedding, and multi-entity coordination. Unit tests in individual beads cover their own logic, but integration tests are needed to verify the full pipeline under realistic conditions: cancellation at different stages, timeout behavior with continuation, embedding scope isolation (only affected documents get embedded), and payload integrity (project_id mismatches rejected). These tests use wiremock for HTTP mocking and tokio for async runtime.\n\n## Approach\n\nCreate `tests/surgical_integration.rs` as an integration test file (Rust convention: `tests/` directory for integration tests). Six test functions covering the critical behavioral properties of the surgical pipeline:\n\n1. **Cancellation before preflight**: Signal cancelled before any HTTP call. Verify: recorder marked failed, no GitLab requests made, result has zero updates.\n2. **Cancellation during dependent stage**: Signal cancelled after preflight succeeds but during discussion fetch. Verify: partial results recorded, recorder marked failed, entities processed before cancellation have outcomes.\n3. **Per-entity timeout with continuation**: One entity's GitLab endpoint is slow (wiremock delay). Verify: that entity gets `failed` outcome with timeout error, remaining entities continue and succeed.\n4. **Embed scope isolation**: Sync two issues. Verify: only documents generated from those two issues are embedded, not the entire corpus. Assert by checking document IDs passed to embed function.\n5. **Payload project_id mismatch rejection**: Preflight returns an issue with `project_id` different from the resolved project. Verify: that entity gets `failed` outcome with clear error, other entities unaffected.\n6. **Successful full pipeline**: Sync one issue end-to-end through all stages. Verify: SyncResult has correct counts, entity_results has `synced` outcome, documents regenerated, embeddings created.\n\nAll tests use in-memory SQLite (`create_connection(Path::new(\":memory:\"))` + `run_migrations`) and wiremock `MockServer`.\n\n## Acceptance Criteria\n\n1. All 6 tests compile and pass\n2. Tests are isolated (each creates its own DB and mock server)\n3. Cancellation tests verify recorder state (failed status in sync_runs table)\n4. Timeout test uses wiremock delay, not `tokio::time::sleep` on the test side\n5. Embed isolation test verifies document-level scoping, not just function call\n6. Tests run in CI without flakiness (no real network, no real Ollama)\n\n## Files\n\n- `tests/surgical_integration.rs` — all 6 integration tests\n\n## TDD Anchor\n\n```rust\n// tests/surgical_integration.rs\n\nuse lore::cli::commands::sync::{SyncOptions, SyncResult};\nuse lore::core::db::{create_connection, run_migrations};\nuse lore::core::shutdown::ShutdownSignal;\nuse lore::Config;\nuse std::path::Path;\nuse std::time::Duration;\nuse wiremock::{Mock, MockServer, ResponseTemplate};\nuse wiremock::matchers::{method, path_regex};\n\nfn test_config(mock_url: &str) -> Config {\n let mut config = Config::default();\n config.gitlab.url = mock_url.to_string();\n config.gitlab.token = \"test-token\".to_string();\n config\n}\n\nfn setup_db() -> rusqlite::Connection {\n let conn = create_connection(Path::new(\":memory:\")).unwrap();\n run_migrations(&conn).unwrap();\n conn.execute(\n \"INSERT INTO projects (gitlab_project_id, path_with_namespace, web_url)\n VALUES (1, 'group/project', 'https://gitlab.example.com/group/project')\",\n [],\n ).unwrap();\n conn\n}\n\nfn mock_issue_json(iid: u64) -> serde_json::Value {\n serde_json::json!({\n \"id\": 100 + iid, \"iid\": iid, \"project_id\": 1, \"title\": format!(\"Issue {}\", iid),\n \"state\": \"opened\", \"created_at\": \"2026-01-01T00:00:00Z\",\n \"updated_at\": \"2026-02-17T00:00:00Z\",\n \"author\": {\"id\": 1, \"username\": \"dev\", \"name\": \"Dev\"},\n \"web_url\": format!(\"https://gitlab.example.com/group/project/-/issues/{}\", iid)\n })\n}\n\n#[tokio::test]\nasync fn cancellation_before_preflight() {\n let server = MockServer::start().await;\n // No mocks mounted — if any request is made, wiremock will return 404\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issues: vec![7],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n signal.cancel(); // Cancel before anything starts\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"cancel-pre\"), &signal,\n ).await.unwrap();\n\n assert_eq!(result.issues_updated, 0);\n assert_eq!(result.mrs_updated, 0);\n // Verify no HTTP requests were made\n assert_eq!(server.received_requests().await.unwrap().len(), 0);\n}\n\n#[tokio::test]\nasync fn cancellation_during_dependent_stage() {\n let server = MockServer::start().await;\n // Mock issue fetch (preflight succeeds)\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([mock_issue_json(7)])))\n .mount(&server).await;\n // Mock discussion fetch with delay (gives time to cancel)\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues/7/discussions\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([]))\n .set_body_delay(Duration::from_secs(2)))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issues: vec![7],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n // Cancel after a short delay (after preflight, during dependents)\n let signal_clone = signal.clone();\n tokio::spawn(async move {\n tokio::time::sleep(Duration::from_millis(200)).await;\n signal_clone.cancel();\n });\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"cancel-dep\"), &signal,\n ).await.unwrap();\n\n // Preflight should have run, but ingest may be partial\n assert!(result.surgical_mode == Some(true));\n}\n\n#[tokio::test]\nasync fn per_entity_timeout_with_continuation() {\n let server = MockServer::start().await;\n // Issue 7: slow response (simulates timeout)\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\\?.*iids\\[\\]=7\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([mock_issue_json(7)]))\n .set_body_delay(Duration::from_secs(30)))\n .mount(&server).await;\n // Issue 42: fast response\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\\?.*iids\\[\\]=42\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([mock_issue_json(42)])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issues: vec![7, 42],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n // With a per-entity timeout, issue 7 should fail, issue 42 should succeed\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"timeout-test\"), &signal,\n ).await.unwrap();\n\n let entities = result.entity_results.as_ref().unwrap();\n // One should be failed (timeout), one should be synced\n let failed = entities.iter().filter(|e| e.outcome == \"failed\").count();\n let synced = entities.iter().filter(|e| e.outcome == \"synced\").count();\n assert!(failed >= 1 || synced >= 1, \"Expected mixed outcomes\");\n}\n\n#[tokio::test]\nasync fn embed_scope_isolation() {\n let server = MockServer::start().await;\n // Mock two issues\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([\n mock_issue_json(7), mock_issue_json(42)\n ])))\n .mount(&server).await;\n // Mock empty discussions for both\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues/\\d+/discussions\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issues: vec![7, 42],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n no_embed: false,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"embed-iso\"), &signal,\n ).await.unwrap();\n\n // Embedding should only have processed documents from issues 7 and 42\n // Not the full corpus. Verify via document counts.\n assert!(result.documents_embedded <= 2,\n \"Expected at most 2 documents embedded (one per issue), got {}\",\n result.documents_embedded);\n}\n\n#[tokio::test]\nasync fn payload_project_id_mismatch_rejection() {\n let server = MockServer::start().await;\n // Return issue with project_id=999 (doesn't match resolved project_id=1)\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([{\n \"id\": 200, \"iid\": 7, \"project_id\": 999, \"title\": \"Wrong Project\",\n \"state\": \"opened\", \"created_at\": \"2026-01-01T00:00:00Z\",\n \"updated_at\": \"2026-02-17T00:00:00Z\",\n \"author\": {\"id\": 1, \"username\": \"dev\", \"name\": \"Dev\"},\n \"web_url\": \"https://gitlab.example.com/other/project/-/issues/7\"\n }])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issues: vec![7],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"mismatch\"), &signal,\n ).await.unwrap();\n\n let entities = result.entity_results.as_ref().unwrap();\n assert_eq!(entities.len(), 1);\n assert_eq!(entities[0].outcome, \"failed\");\n assert!(entities[0].error.as_ref().unwrap().contains(\"project_id\"));\n}\n\n#[tokio::test]\nasync fn successful_full_pipeline() {\n let server = MockServer::start().await;\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([mock_issue_json(7)])))\n .mount(&server).await;\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues/7/discussions\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([])))\n .mount(&server).await;\n // Mock any resource event endpoints\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues/7/resource_\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issues: vec![7],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n no_embed: true, // Skip embed to avoid Ollama dependency\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"full-pipe\"), &signal,\n ).await.unwrap();\n\n assert_eq!(result.surgical_mode, Some(true));\n assert_eq!(result.surgical_iids.as_ref().unwrap().issues, vec![7]);\n assert_eq!(result.preflight_only, Some(false));\n\n let entities = result.entity_results.as_ref().unwrap();\n assert_eq!(entities.len(), 1);\n assert_eq!(entities[0].entity_type, \"issue\");\n assert_eq!(entities[0].iid, 7);\n assert_eq!(entities[0].outcome, \"synced\");\n assert!(entities[0].error.is_none());\n\n assert!(result.issues_updated >= 1);\n assert!(result.documents_regenerated >= 1);\n}\n```\n\n## Edge Cases\n\n- **Wiremock delay vs tokio timeout**: Use `set_body_delay` on wiremock, not `tokio::time::sleep` in tests. The per-entity timeout in the orchestrator (bd-1i4i) should use `tokio::time::timeout` around the HTTP call.\n- **Embed isolation without Ollama**: Tests that verify embed scoping should either mock Ollama or use `no_embed: true` and verify the document ID list passed to the embed function. The `successful_full_pipeline` test uses `no_embed: true` to avoid requiring a running Ollama server in CI.\n- **Test isolation**: Each test creates its own `MockServer`, in-memory DB, and `ShutdownSignal`. No shared state between tests.\n- **Flakiness prevention**: Cancellation timing tests (test 2) use deterministic delays (cancel after 200ms, response delayed 2s). If flaky, increase the gap between cancel time and response delay.\n- **CI compatibility**: No real GitLab, no real Ollama, no real filesystem locks (in-memory DB means AppLock may need adaptation for tests — consider a test-only lock bypass or use a temp file DB for lock tests).\n\n## Dependency Context\n\n- **Depends on (upstream)**: bd-1i4i (the `run_sync_surgical` function under test), bd-wcja (SyncResult surgical fields to assert), bd-1lja (SyncOptions extensions), bd-3sez (surgical ingest for TOCTOU test), bd-arka (SyncRunRecorder for recorder state assertions), bd-1elx (scoped embed for isolation test), bd-kanh (per-entity helpers)\n- **No downstream dependents** — this is a terminal test-only bead.\n- These tests validate the behavioral contracts that all upstream beads promise. They are the acceptance gate for the surgical sync feature.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:18:46.182356Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:04:49.331351Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"]} {"id":"bd-3js","title":"Implement MR CLI commands (list, show, count)","description":"## Background\nCLI commands for viewing and filtering merge requests. Includes list, show, and count commands with MR-specific filters.\n\n## Approach\nUpdate existing CLI command files:\n1. `list.rs` - Add MR listing with filters\n2. `show.rs` - Add MR detail view with discussions\n3. `count.rs` - Add MR counting with state breakdown\n\n## Files\n- `src/cli/commands/list.rs` - Add MR subcommand\n- `src/cli/commands/show.rs` - Add MR detail view\n- `src/cli/commands/count.rs` - Add MR counting\n\n## Acceptance Criteria\n- [ ] `gi list mrs` shows MR table with iid, title, state, author, branches\n- [ ] `gi list mrs --state=merged` filters by state\n- [ ] `gi list mrs --state=locked` filters locally (not server-side)\n- [ ] `gi list mrs --draft` shows only draft MRs\n- [ ] `gi list mrs --no-draft` excludes draft MRs\n- [ ] `gi list mrs --reviewer=username` filters by reviewer\n- [ ] `gi list mrs --target-branch=main` filters by target branch\n- [ ] `gi list mrs --source-branch=feature/x` filters by source branch\n- [ ] Draft MRs show `[DRAFT]` prefix in title\n- [ ] `gi show mr ` displays full detail including discussions\n- [ ] DiffNote shows file context: `[src/file.ts:45]`\n- [ ] Multi-line DiffNote shows: `[src/file.ts:45-48]`\n- [ ] `gi show mr` shows `detailed_merge_status`\n- [ ] `gi count mrs` shows total with state breakdown\n- [ ] `gi sync-status` shows MR cursor positions\n- [ ] `cargo test cli_commands` passes\n\n## TDD Loop\nRED: `cargo test list_mrs` -> command not found\nGREEN: Add MR subcommand\nVERIFY: `gi list mrs --help`\n\n## gi list mrs Output\n```\nMerge Requests (showing 20 of 1,234)\n\n !847 Refactor auth to use JWT tokens merged @johndoe main <- feature/jwt 3 days ago\n !846 Fix memory leak in websocket handler opened @janedoe main <- fix/websocket 5 days ago\n !845 [DRAFT] Add dark mode CSS variables opened @bobsmith main <- ui/dark-mode 1 week ago\n```\n\n## SQL for MR Listing\n```sql\nSELECT \n m.iid, m.title, m.state, m.draft, m.author_username,\n m.target_branch, m.source_branch, m.updated_at\nFROM merge_requests m\nWHERE m.project_id = ?\n AND (? IS NULL OR m.state = ?) -- state filter\n AND (? IS NULL OR m.draft = ?) -- draft filter\n AND (? IS NULL OR m.author_username = ?) -- author filter\n AND (? IS NULL OR m.target_branch = ?) -- target-branch filter\n AND (? IS NULL OR m.source_branch = ?) -- source-branch filter\n AND (? IS NULL OR EXISTS ( -- reviewer filter\n SELECT 1 FROM mr_reviewers r \n WHERE r.merge_request_id = m.id AND r.username = ?\n ))\nORDER BY m.updated_at DESC\nLIMIT ?\n```\n\n## gi show mr Output\n```\nMerge Request !847: Refactor auth to use JWT tokens\n================================================================================\n\nProject: group/project-one\nState: merged\nDraft: No\nAuthor: @johndoe\nAssignees: @janedoe, @bobsmith\nReviewers: @alice, @charlie\nSource: feature/jwt\nTarget: main\nMerge Status: mergeable\nMerged By: @alice\nMerged At: 2024-03-20 14:30:00\nLabels: enhancement, auth, reviewed\n\nDescription:\n Moving away from session cookies to JWT-based authentication...\n\nDiscussions (8):\n\n @janedoe (2024-03-16) [src/auth/jwt.ts:45]:\n Should we use a separate signing key for refresh tokens?\n\n @johndoe (2024-03-16):\n Good point. I'll add a separate key with rotation support.\n\n @alice (2024-03-18) [RESOLVED]:\n Looks good! Just one nit about the token expiry constant.\n```\n\n## DiffNote File Context Display\n```rust\n// Build file context string\nlet file_context = match (note.position_new_path, note.position_new_line, note.position_line_range_end) {\n (Some(path), Some(line), Some(end_line)) if line != end_line => {\n format!(\"[{}:{}-{}]\", path, line, end_line)\n }\n (Some(path), Some(line), _) => {\n format!(\"[{}:{}]\", path, line)\n }\n _ => String::new(),\n};\n```\n\n## gi count mrs Output\n```\nMerge Requests: 1,234\n opened: 89\n merged: 1,045\n closed: 100\n```\n\n## Filter Arguments (clap)\n```rust\n#[derive(Parser)]\nstruct ListMrsArgs {\n #[arg(long)]\n state: Option, // opened|merged|closed|locked|all\n #[arg(long)]\n draft: bool,\n #[arg(long)]\n no_draft: bool,\n #[arg(long)]\n author: Option,\n #[arg(long)]\n assignee: Option,\n #[arg(long)]\n reviewer: Option,\n #[arg(long)]\n target_branch: Option,\n #[arg(long)]\n source_branch: Option,\n #[arg(long)]\n label: Vec,\n #[arg(long)]\n project: Option,\n #[arg(long, default_value = \"20\")]\n limit: u32,\n}\n```\n\n## Edge Cases\n- `--state=locked` must filter locally (GitLab API doesn't support it)\n- Ambiguous MR iid across projects: prompt for `--project`\n- Empty discussions: show \"No discussions\" message\n- Multi-line DiffNotes: show line range in context","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:43.354939Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:37:31.792569Z","closed_at":"2026-01-27T00:37:31.792504Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3js","depends_on_id":"bd-20h","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3js","depends_on_id":"bd-ser","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3kj","title":"[CP0] gi version, backup, reset, sync-status commands","description":"## Background\n\nThese are the remaining utility commands for CP0. version is trivial. backup creates safety copies before destructive operations. reset provides clean-slate capability. sync-status is a stub for CP0 that will be implemented in CP1.\n\nReference: docs/prd/checkpoint-0.md sections \"gi version\", \"gi backup\", \"gi reset\", \"gi sync-status\"\n\n## Approach\n\n**src/cli/commands/version.ts:**\n```typescript\nimport { Command } from 'commander';\nimport { version } from '../../../package.json' with { type: 'json' };\n\nexport const versionCommand = new Command('version')\n .description('Show version information')\n .action(() => {\n console.log(\\`gi version \\${version}\\`);\n });\n```\n\n**src/cli/commands/backup.ts:**\n```typescript\nimport { Command } from 'commander';\nimport { copyFileSync, mkdirSync } from 'node:fs';\nimport { loadConfig } from '../../core/config';\nimport { getDbPath, getBackupDir } from '../../core/paths';\n\nexport const backupCommand = new Command('backup')\n .description('Create timestamped database backup')\n .action(async (options, command) => {\n const globalOpts = command.optsWithGlobals();\n const config = loadConfig(globalOpts.config);\n \n const dbPath = getDbPath(config.storage?.dbPath);\n const backupDir = getBackupDir(config.storage?.backupDir);\n \n mkdirSync(backupDir, { recursive: true });\n \n // Format: data-2026-01-24T10-30-00.db (colons replaced for Windows compat)\n const timestamp = new Date().toISOString().replace(/:/g, '-').replace(/\\\\..*/, '');\n const backupPath = \\`\\${backupDir}/data-\\${timestamp}.db\\`;\n \n copyFileSync(dbPath, backupPath);\n console.log(\\`Created backup: \\${backupPath}\\`);\n });\n```\n\n**src/cli/commands/reset.ts:**\n```typescript\nimport { Command } from 'commander';\nimport { unlinkSync, existsSync } from 'node:fs';\nimport { createInterface } from 'node:readline';\nimport { loadConfig } from '../../core/config';\nimport { getDbPath } from '../../core/paths';\n\nexport const resetCommand = new Command('reset')\n .description('Delete database and reset all state')\n .option('--confirm', 'Skip confirmation prompt')\n .action(async (options, command) => {\n const globalOpts = command.optsWithGlobals();\n const config = loadConfig(globalOpts.config);\n const dbPath = getDbPath(config.storage?.dbPath);\n \n if (!existsSync(dbPath)) {\n console.log('No database to reset.');\n return;\n }\n \n if (!options.confirm) {\n console.log(\\`This will delete:\\n - Database: \\${dbPath}\\n - All sync cursors\\n - All cached data\\n\\`);\n // Prompt for 'yes' confirmation\n // If not 'yes', exit 2\n }\n \n unlinkSync(dbPath);\n // Also delete WAL and SHM files if they exist\n if (existsSync(\\`\\${dbPath}-wal\\`)) unlinkSync(\\`\\${dbPath}-wal\\`);\n if (existsSync(\\`\\${dbPath}-shm\\`)) unlinkSync(\\`\\${dbPath}-shm\\`);\n \n console.log(\"Database reset. Run 'gi sync' to repopulate.\");\n });\n```\n\n**src/cli/commands/sync-status.ts:**\n```typescript\n// CP0 stub - full implementation in CP1\nexport const syncStatusCommand = new Command('sync-status')\n .description('Show sync state')\n .action(() => {\n console.log(\"No sync runs yet. Run 'gi sync' to start.\");\n });\n```\n\n## Acceptance Criteria\n\n- [ ] `gi version` outputs \"gi version X.Y.Z\"\n- [ ] `gi backup` creates timestamped copy of database\n- [ ] Backup filename is Windows-compatible (no colons)\n- [ ] Backup directory created if missing\n- [ ] `gi reset` prompts for 'yes' confirmation\n- [ ] `gi reset --confirm` skips prompt\n- [ ] Reset deletes .db, .db-wal, and .db-shm files\n- [ ] Reset exits 2 if user doesn't type 'yes'\n- [ ] `gi sync-status` outputs stub message\n\n## Files\n\nCREATE:\n- src/cli/commands/version.ts\n- src/cli/commands/backup.ts\n- src/cli/commands/reset.ts\n- src/cli/commands/sync-status.ts\n\n## TDD Loop\n\nN/A - simple commands, verify manually:\n\n```bash\ngi version\ngi backup\nls ~/.local/share/gi/backups/\ngi reset # type 'no'\ngi reset --confirm\nls ~/.local/share/gi/data.db # should not exist\ngi sync-status\n```\n\n## Edge Cases\n\n- Backup when database doesn't exist - show clear error\n- Reset when database doesn't exist - show \"No database to reset\"\n- WAL/SHM files may not exist - check before unlinking\n- Timestamp with milliseconds could cause very long filename\n- readline prompt in non-interactive terminal - handle SIGINT","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:51.774210Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:31:46.227285Z","closed_at":"2026-01-25T03:31:46.227220Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3kj","depends_on_id":"bd-13b","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3kj","depends_on_id":"bd-3ng","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-3l56","title":"Add lore sync --tui convenience flag","description":"## Background\n\nThe PRD defines two CLI entry paths to the TUI: `lore tui` (full TUI) and `lore sync --tui` (convenience shortcut that launches the TUI directly on the Sync screen in inline mode). The `lore tui` command is covered by bd-26lp. This bead adds the `--tui` flag to the existing `SyncArgs` struct, which delegates to the `lore-tui` binary with `--sync` flag.\n\n## Approach\n\nTwo changes to the existing lore CLI crate (NOT the lore-tui crate):\n\n1. **Add `--tui` flag to `SyncArgs`** in `src/cli/mod.rs`:\n ```rust\n /// Show sync progress in interactive TUI (inline mode)\n #[arg(long)]\n pub tui: bool,\n ```\n\n2. **Handle the flag in sync command dispatch** in `src/main.rs` (or wherever Commands::Sync is matched):\n - If `args.tui` is true, call `resolve_tui_binary()` (from bd-26lp) and spawn it with `--sync` flag\n - Forward the config path if specified\n - Exit with the lore-tui process exit code\n - If lore-tui is not found, print a helpful error message\n\nThe `resolve_tui_binary()` function is implemented by bd-26lp (CLI integration). This bead simply adds the flag and the early-return delegation path in the sync command handler.\n\n## Acceptance Criteria\n- [ ] `lore sync --tui` is accepted by the CLI parser (no unknown flag error)\n- [ ] When `--tui` is set, the sync command delegates to `lore-tui --sync` binary\n- [ ] Config path is forwarded if `--config` was specified\n- [ ] If lore-tui binary is not found, prints error with install instructions and exits non-zero\n- [ ] `lore sync --tui --full` does NOT pass `--full` to lore-tui (TUI has its own sync controls)\n- [ ] `--tui` flag appears in `lore sync --help` output\n\n## Files\n- MODIFY: src/cli/mod.rs (add `tui: bool` field to `SyncArgs` struct at line ~776)\n- MODIFY: src/main.rs or src/cli/commands/sync.rs (add early-return delegation when `args.tui`)\n\n## TDD Anchor\nRED: Write `test_sync_tui_flag_accepted` that verifies `SyncArgs` can be parsed with `--tui` flag.\nGREEN: Add the `tui: bool` field to SyncArgs.\nVERIFY: cargo test sync_tui_flag\n\nAdditional tests:\n- test_sync_tui_flag_default_false (not set by default)\n\n## Edge Cases\n- `--tui` combined with `--dry-run` — the TUI handles dry-run internally, so `--dry-run` should be ignored when `--tui` is set (or warn)\n- `--tui` when lore-tui binary does not exist — clear error, not a panic\n- `--tui` in robot mode (`--robot`) — nonsensical combination, should error with \"cannot use --tui with --robot\"\n\n## Dependency Context\n- Depends on bd-26lp (CLI integration) which implements `resolve_tui_binary()` and `validate_tui_compat()` functions that this bead calls.\n- The SyncArgs struct is at src/cli/mod.rs:739. The existing fields are: full, no_full, force, no_force, no_embed, no_docs, no_events, no_file_changes, dry_run, no_dry_run.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:40.785182Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:29:49.341576Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3l56","depends_on_id":"bd-26lp","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-3l56","title":"Add lore sync --tui convenience flag","description":"## Background\n\nThe PRD defines two CLI entry paths to the TUI: `lore tui` (full TUI) and `lore sync --tui` (convenience shortcut that launches the TUI directly on the Sync screen in inline mode). The `lore tui` command is covered by bd-26lp. This bead adds the `--tui` flag to the existing `SyncArgs` struct, which delegates to the `lore-tui` binary with `--sync` flag.\n\n## Approach\n\nTwo changes to the existing lore CLI crate (NOT the lore-tui crate):\n\n1. **Add `--tui` flag to `SyncArgs`** in `src/cli/mod.rs`:\n ```rust\n /// Show sync progress in interactive TUI (inline mode)\n #[arg(long)]\n pub tui: bool,\n ```\n\n2. **Handle the flag in sync command dispatch** in `src/main.rs` (or wherever Commands::Sync is matched):\n - If `args.tui` is true, call `resolve_tui_binary()` (from bd-26lp) and spawn it with `--sync` flag\n - Forward the config path if specified\n - Exit with the lore-tui process exit code\n - If lore-tui is not found, print a helpful error message\n\nThe `resolve_tui_binary()` function is implemented by bd-26lp (CLI integration). This bead simply adds the flag and the early-return delegation path in the sync command handler.\n\n## Acceptance Criteria\n- [ ] `lore sync --tui` is accepted by the CLI parser (no unknown flag error)\n- [ ] When `--tui` is set, the sync command delegates to `lore-tui --sync` binary\n- [ ] Config path is forwarded if `--config` was specified\n- [ ] If lore-tui binary is not found, prints error with install instructions and exits non-zero\n- [ ] `lore sync --tui --full` does NOT pass `--full` to lore-tui (TUI has its own sync controls)\n- [ ] `--tui` flag appears in `lore sync --help` output\n\n## Files\n- MODIFY: src/cli/mod.rs (add `tui: bool` field to `SyncArgs` struct at line ~776)\n- MODIFY: src/main.rs or src/cli/commands/sync.rs (add early-return delegation when `args.tui`)\n\n## TDD Anchor\nRED: Write `test_sync_tui_flag_accepted` that verifies `SyncArgs` can be parsed with `--tui` flag.\nGREEN: Add the `tui: bool` field to SyncArgs.\nVERIFY: cargo test sync_tui_flag\n\nAdditional tests:\n- test_sync_tui_flag_default_false (not set by default)\n\n## Edge Cases\n- `--tui` combined with `--dry-run` — the TUI handles dry-run internally, so `--dry-run` should be ignored when `--tui` is set (or warn)\n- `--tui` when lore-tui binary does not exist — clear error, not a panic\n- `--tui` in robot mode (`--robot`) — nonsensical combination, should error with \"cannot use --tui with --robot\"\n\n## Dependency Context\n- Depends on bd-26lp (CLI integration) which implements `resolve_tui_binary()` and `validate_tui_compat()` functions that this bead calls.\n- The SyncArgs struct is at src/cli/mod.rs:739. The existing fields are: full, no_full, force, no_force, no_embed, no_docs, no_events, no_file_changes, dry_run, no_dry_run.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:40.785182Z","created_by":"tayloreernisse","updated_at":"2026-02-19T04:47:46.349240Z","closed_at":"2026-02-19T04:47:46.349151Z","close_reason":"Added --tui flag to SyncArgs with early-return delegation to lore-tui --sync. Robot mode check, config forwarding, autocorrect registry updated.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3l56","depends_on_id":"bd-26lp","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3lc","title":"Rename GiError to LoreError across codebase","description":"## Background\nThe codebase currently uses `GiError` as the primary error enum name (legacy from when the project was called \"gi\"). Checkpoint 3 introduces new modules (documents, search, embedding) that import error types. Renaming before Gate A work begins prevents every subsequent bead from needing to reference the old name and avoids merge conflicts across parallel work streams.\n\n## Approach\nMechanical find-and-replace using `ast-grep` or `sed`:\n1. Rename the enum declaration in `src/core/error.rs`: `pub enum GiError` -> `pub enum LoreError`\n2. Update the type alias: `pub type Result = std::result::Result;`\n3. Update re-exports in `src/core/mod.rs` and `src/lib.rs`\n4. Update all `use` statements across ~16 files that import `GiError`\n5. Update any `GiError::` variant construction sites\n6. Run `cargo build` to verify no references remain\n\n**Do NOT change:**\n- Error variant names (ConfigNotFound, etc.) — only the enum name\n- ErrorCode enum — it's already named correctly\n- RobotError — already named correctly\n\n## Acceptance Criteria\n- [ ] `cargo build` succeeds with zero warnings about GiError\n- [ ] `rg GiError src/` returns zero results\n- [ ] `rg LoreError src/core/error.rs` shows the enum declaration\n- [ ] `src/core/mod.rs` re-exports `LoreError` (not `GiError`)\n- [ ] `src/lib.rs` re-exports `LoreError`\n- [ ] All `use crate::core::error::LoreError` imports compile\n\n## Files\n- `src/core/error.rs` — enum rename + type alias\n- `src/core/mod.rs` — re-export update\n- `src/lib.rs` — re-export update\n- All files matching `rg 'GiError' src/` (~16 files: ingestion/*.rs, cli/commands/*.rs, gitlab/*.rs, main.rs)\n\n## TDD Loop\nRED: `cargo build` fails after renaming enum but before fixing imports\nGREEN: Fix all imports; `cargo build` succeeds\nVERIFY: `cargo build && rg GiError src/ && echo \"FAIL: GiError references remain\" || echo \"PASS: clean\"`\n\n## Edge Cases\n- Some files may use `GiError` in string literals (error messages) — do NOT rename those, only type references\n- `impl From for GiError` blocks must become `impl From for LoreError`\n- The `thiserror` derive macro on the enum does not reference the name, so no macro changes needed","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:25:25.694773Z","created_by":"tayloreernisse","updated_at":"2026-01-30T16:50:10.612340Z","closed_at":"2026-01-30T16:50:10.612278Z","close_reason":"Completed: renamed GiError to LoreError across all 16 files, cargo build + 164 tests pass","compaction_level":0,"original_size":0} {"id":"bd-3le2","title":"Implement TaskSupervisor (dedup + cancellation + generation IDs)","description":"## Background\nBackground tasks (DB queries, sync, search) are managed by a centralized TaskSupervisor that prevents redundant work, enables cooperative cancellation, and uses generation IDs for stale-result detection. This is the ONLY allowed path for background work — state handlers return ScreenIntent, not Cmd::task directly.\n\n## Approach\nCreate crates/lore-tui/src/task_supervisor.rs:\n- TaskKey enum: LoadScreen(Screen), Search, SyncStream, FilterRequery(Screen) — dedup keys, NOT generation-bearing\n- TaskPriority enum: Input(0), Navigation(1), Background(2)\n- CancelToken: AtomicBool wrapper with cancel(), is_cancelled()\n- TaskHandle struct: key (TaskKey), generation (u64), cancel (Arc), interrupt (Option)\n- TaskSupervisor struct: active (HashMap), generation (AtomicU64)\n- submit(key: TaskKey) -> TaskHandle: cancels existing task with same key (via CancelToken), increments generation, stores new handle, returns TaskHandle\n- is_current(key: &TaskKey, generation: u64) -> bool: checks if generation matches active handle\n- complete(key: &TaskKey, generation: u64): removes handle if generation matches\n- cancel_all(): cancels all active tasks (used on quit)\n\n## Acceptance Criteria\n- [ ] submit() with existing key cancels previous task's CancelToken\n- [ ] submit() returns handle with monotonically increasing generation\n- [ ] is_current() returns true only for the latest generation\n- [ ] complete() removes handle only if generation matches (prevents removing newer task)\n- [ ] CancelToken is Arc-wrapped and thread-safe (Send+Sync)\n- [ ] TaskHandle includes optional InterruptHandle for SQLite cancellation\n- [ ] Generation counter never wraps during reasonable use (AtomicU64)\n\n## Files\n- CREATE: crates/lore-tui/src/task_supervisor.rs\n\n## TDD Anchor\nRED: Write test_submit_cancels_previous that submits two tasks with same key, asserts first task's CancelToken is cancelled.\nGREEN: Implement submit() with cancel-on-supersede logic.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_submit_cancels\n\nAdditional tests:\n- test_is_current_after_supersede: old generation returns false, new returns true\n- test_complete_removes_handle: after complete, key is absent from active map\n- test_complete_ignores_stale: completing with old generation doesn't remove newer task\n- test_generation_monotonic: submit() always returns increasing generation values\n\n## Edge Cases\n- CancelToken uses Relaxed ordering — sufficient for cooperative cancellation polling\n- Generation u64 overflow is theoretical but worth noting (would require 2^64 submissions)\n- submit() must cancel old task BEFORE storing new handle to prevent race conditions\n- InterruptHandle is rusqlite-specific — only set for tasks that lease a reader connection","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:56:21.102488Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:34:00.626061Z","closed_at":"2026-02-12T20:34:00.626008Z","close_reason":"Implemented task_supervisor.rs: TaskSupervisor with dedup/cancel/generation IDs, CancelToken (AtomicBool), InterruptHandle (rusqlite), TaskHandle, TaskKey, TaskPriority. Also added Hash to Screen. 11 tests. Quality gate green (102 total).","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3le2","depends_on_id":"bd-c9gk","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3lu","title":"Implement lore search CLI command (lexical mode)","description":"## Background\nThe search CLI command is the user-facing entry point for Gate A lexical search. It orchestrates the search pipeline: query parsing -> FTS5 search -> filter application -> result hydration (single round-trip) -> display. Gate B extends this same command with --mode=hybrid and --mode=semantic. The hydration query is critical for performance — it fetches all display fields + labels + paths in one SQL query using json_each() + json_group_array().\n\n## Approach\nCreate `src/cli/commands/search.rs` per PRD Section 3.4.\n\n**Key types:**\n- `SearchResultDisplay` — display-ready result with all fields (dates as ISO via `ms_to_iso`)\n- `ExplainData` — ranking explanation for --explain flag (vector_rank, fts_rank, rrf_score)\n- `SearchResponse` — wrapper with query, mode, total_results, results, warnings\n\n**Core function:**\n```rust\npub fn run_search(\n config: &Config,\n query: &str,\n mode: SearchMode,\n filters: SearchFilters,\n explain: bool,\n) -> Result\n```\n\n**Pipeline:**\n1. Parse query + filters\n2. Execute search based on mode -> ranked doc_ids (+ explain ranks)\n3. Apply post-retrieval filters via apply_filters() preserving ranking order\n4. Hydrate results in single DB round-trip using json_each + json_group_array\n5. Attach snippets: prefer FTS snippet, fallback to `generate_fallback_snippet()` for semantic-only\n6. Convert timestamps via `ms_to_iso()` from `crate::core::time`\n7. Build SearchResponse\n\n**Hydration query (critical — single round-trip, replaces 60 queries with 1):**\n```sql\nSELECT d.id, d.source_type, d.title, d.url, d.author_username,\n d.created_at, d.updated_at, d.content_text,\n p.path_with_namespace AS project_path,\n (SELECT json_group_array(dl.label_name)\n FROM document_labels dl WHERE dl.document_id = d.id) AS labels,\n (SELECT json_group_array(dp.path)\n FROM document_paths dp WHERE dp.document_id = d.id) AS paths\nFROM json_each(?) AS j\nJOIN documents d ON d.id = j.value\nJOIN projects p ON p.id = d.project_id\nORDER BY j.key\n```\n\n**Human output uses `console::style` for terminal formatting:**\n```rust\nuse console::style;\n// Type prefix in cyan\nprintln!(\"[{}] {} - {} ({})\", i+1, style(type_prefix).cyan(), title, score);\n// URL in dim\nprintln!(\" {}\", style(url).dim());\n```\n\n**JSON robot mode includes elapsed_ms in meta (PRD Section 3.4):**\n```rust\npub fn print_search_results_json(response: &SearchResponse, elapsed_ms: u64) {\n let output = serde_json::json!({\n \"ok\": true,\n \"data\": response,\n \"meta\": { \"elapsed_ms\": elapsed_ms }\n });\n println!(\"{}\", serde_json::to_string_pretty(&output).unwrap());\n}\n```\n\n**CLI args in `src/cli/mod.rs` (PRD Section 3.4):**\n```rust\n#[derive(Args)]\npub struct SearchArgs {\n query: String,\n #[arg(long, default_value = \"hybrid\")]\n mode: String,\n #[arg(long, value_name = \"TYPE\")]\n r#type: Option,\n #[arg(long)]\n author: Option,\n #[arg(long)]\n project: Option,\n #[arg(long, action = clap::ArgAction::Append)]\n label: Vec,\n #[arg(long)]\n path: Option,\n #[arg(long)]\n after: Option,\n #[arg(long)]\n updated_after: Option,\n #[arg(long, default_value = \"20\")]\n limit: usize,\n #[arg(long)]\n explain: bool,\n #[arg(long, default_value = \"safe\")]\n fts_mode: String,\n}\n```\n\n**IMPORTANT: default_value = \"hybrid\"** — When Ollama is unavailable, hybrid mode gracefully degrades to FTS-only with a warning (not an error). `lore search` works without Ollama.\n\n## Acceptance Criteria\n- [ ] Default mode is \"hybrid\" (not \"lexical\") per PRD\n- [ ] Hybrid mode degrades gracefully to FTS-only when Ollama unavailable (warning, not error)\n- [ ] All filters work (type, author, project, label, path, after, updated_after, limit)\n- [ ] Label filter uses `clap::ArgAction::Append` for repeatable --label flags\n- [ ] Hydration in single query (not N+1) — uses json_each + json_group_array\n- [ ] Timestamps converted via `ms_to_iso()` for display (ISO format)\n- [ ] Human output uses `console::style` for colored type prefix (cyan) and dim URLs\n- [ ] JSON robot mode includes `elapsed_ms` in `meta` field\n- [ ] Semantic-only results get fallback snippets via `generate_fallback_snippet()`\n- [ ] Empty results show friendly message: \"No results found for 'query'\"\n- [ ] \"No data indexed\" message if documents table empty\n- [ ] --explain shows vector_rank, fts_rank, rrf_score per result\n- [ ] --fts-mode=safe preserves prefix `*` while escaping special chars\n- [ ] --fts-mode=raw passes FTS5 MATCH syntax through unchanged\n- [ ] --mode=semantic with 0% embedding coverage returns LoreError::EmbeddingsNotBuilt (not OllamaUnavailable)\n- [ ] SearchArgs registered in cli/mod.rs with Clap derive\n- [ ] `cargo build` succeeds\n\n## Files\n- `src/cli/commands/search.rs` — new file\n- `src/cli/commands/mod.rs` — add `pub mod search;`\n- `src/cli/mod.rs` — add SearchArgs struct, wire up search subcommand\n- `src/main.rs` — add search command handler\n\n## TDD Loop\nRED: Integration test requiring DB with documents\n- `test_lexical_search_returns_results` — FTS search returns hits\n- `test_hydration_single_query` — verify no N+1 (mock/inspect query count)\n- `test_json_output_includes_elapsed` — robot mode JSON has meta.elapsed_ms\n- `test_empty_results_message` — zero results shows friendly message\n- `test_fallback_snippet` — semantic-only result uses truncated content\nGREEN: Implement run_search + hydrate_results + print functions\nVERIFY: `cargo build && cargo test search`\n\n## Edge Cases\n- Zero results: display friendly empty message, JSON returns empty array\n- --mode=semantic with 0% embedding coverage: return LoreError::EmbeddingsNotBuilt\n- json_group_array returns \"[]\" for documents with no labels — parse as empty array\n- Very long snippets: truncated at display time\n- Hybrid default works without Ollama: degrades to FTS-only with warning\n- ms_to_iso with epoch 0: return valid ISO string (not crash)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:13.109876Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:52:24.320923Z","closed_at":"2026-01-30T17:52:24.320857Z","close_reason":"Implemented search CLI with FTS5 + RRF ranking, single-query hydration (json_each + json_group_array), adaptive recall, all filters, --explain, human + JSON output. Builds clean.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3lu","depends_on_id":"bd-1k1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3lu","depends_on_id":"bd-3q2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3lu","depends_on_id":"bd-3qs","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -326,7 +326,7 @@ {"id":"bd-x8oq","title":"Write surgical_tests.rs with TDD test suite","description":"## Background\n\nThe surgical sync module (`src/ingestion/surgical.rs` from bd-3sez) needs a comprehensive test suite. Tests use in-memory SQLite (no real GitLab or Ollama) and wiremock for HTTP mocks. The test file lives at `src/ingestion/surgical_tests.rs` and is included via `#[cfg(test)] #[path = \"surgical_tests.rs\"] mod tests;` in surgical.rs.\n\nKey testing constraints:\n- In-memory DB pattern: `create_connection(Path::new(\":memory:\"))` + `run_migrations(&conn)`\n- Test project insert: `INSERT INTO projects (gitlab_project_id, path_with_namespace, web_url)` (no `name`/`last_seen_at` columns)\n- `GitLabIssue` required fields: `id`, `iid`, `project_id`, `title`, `state`, `created_at`, `updated_at`, `author`, `web_url`\n- `GitLabMergeRequest` adds: `source_branch`, `target_branch`, `draft`, `merge_status`, `reviewers`\n- `updated_at` is `String` (ISO 8601) in GitLab types, e.g. `\"2026-02-17T12:00:00.000+00:00\"`\n- `SourceType` enum variants: `Issue`, `MergeRequest`, `Discussion`, `Note`\n- `dirty_sources` table: `(source_type TEXT, source_id INTEGER)` primary key\n\n## Approach\n\nCreate `src/ingestion/surgical_tests.rs` with:\n\n### Test Helpers\n- `setup_db() -> Connection` — in-memory DB with migrations + test project row\n- `make_test_issue(iid: i64, updated_at: &str) -> GitLabIssue` — minimal valid JSON fixture\n- `make_test_mr(iid: i64, updated_at: &str) -> GitLabMergeRequest` — minimal valid JSON fixture\n- `get_db_updated_at(conn, table, iid) -> Option` — helper to query DB updated_at for assertions\n- `get_dirty_keys(conn) -> Vec<(String, i64)>` — query dirty_sources for assertions\n\n### Sync Tests (13)\n1. `test_ingest_issue_by_iid_upserts_and_marks_dirty` — fresh issue ingest, verify DB row + dirty_sources entry\n2. `test_ingest_mr_by_iid_upserts_and_marks_dirty` — fresh MR ingest, verify DB row + dirty_sources entry\n3. `test_toctou_skips_stale_issue` — insert issue at T1, call ingest with payload at T1, assert skipped_stale=true and no dirty mark\n4. `test_toctou_skips_stale_mr` — same for MRs\n5. `test_toctou_allows_newer_issue` — DB has T1, payload has T2 (T2 > T1), assert upserted=true\n6. `test_toctou_allows_newer_mr` — same for MRs\n7. `test_is_stale_parses_iso8601` — unit test: `\"2026-02-17T12:00:00.000+00:00\"` parses to correct ms-epoch\n8. `test_is_stale_handles_none_db_value` — first ingest, no DB row, assert not stale\n9. `test_is_stale_with_z_suffix` — `\"2026-02-17T12:00:00Z\"` also parses correctly\n10. `test_ingest_issue_returns_dirty_source_keys` — verify `dirty_source_keys` contains `(SourceType::Issue, local_id)`\n11. `test_ingest_mr_returns_dirty_source_keys` — verify MR dirty source keys\n12. `test_ingest_issue_updates_existing` — ingest same IID twice with newer updated_at, verify update\n13. `test_ingest_mr_updates_existing` — same for MRs\n\n### Async Preflight Test (1, wiremock)\n14. `test_preflight_fetch_returns_issues_and_mrs` — wiremock GET `/projects/:id/issues?iids[]=42` returns 200 with fixture, verify PreflightResult.issues has 1 entry\n\n### Integration Stubs (4, for bd-3jqx)\n15. `test_surgical_cancellation_during_preflight` — stub: signal.cancel() before preflight, verify early return\n16. `test_surgical_timeout_during_fetch` — stub: wiremock delay exceeds timeout\n17. `test_surgical_embed_isolation` — stub: verify only surgical docs get embedded\n18. `test_surgical_payload_integrity` — stub: verify ingested data matches GitLab payload exactly\n\n## Acceptance Criteria\n\n- [ ] All 13 sync tests pass with in-memory SQLite\n- [ ] Async preflight test passes with wiremock\n- [ ] 4 integration stubs compile and are marked `#[ignore]` (implemented in bd-3jqx)\n- [ ] Test helpers produce valid GitLabIssue/GitLabMergeRequest fixtures that pass `transform_issue`/`transform_merge_request`\n- [ ] No flaky tests: deterministic timestamps, no real network calls\n- [ ] File wired into surgical.rs via `#[cfg(test)] #[path = \"surgical_tests.rs\"] mod tests;`\n\n## Files\n\n- `src/ingestion/surgical_tests.rs` (NEW)\n- `src/ingestion/surgical.rs` (add `#[cfg(test)]` module path — created in bd-3sez)\n\n## TDD Anchor\n\nThis bead IS the test suite. Tests are written first (TDD red phase), then bd-3sez implements the production code to make them pass (green phase). Specific test signatures:\n\n```rust\n#[test]\nfn test_ingest_issue_by_iid_upserts_and_marks_dirty() {\n let conn = setup_db();\n let issue = make_test_issue(42, \"2026-02-17T12:00:00.000+00:00\");\n let config = Config::default();\n let result = ingest_issue_by_iid(&conn, &config, /*project_id=*/1, &issue).unwrap();\n assert!(result.upserted);\n assert!(!result.skipped_stale);\n let dirty = get_dirty_keys(&conn);\n assert!(dirty.contains(&(\"issue\".to_string(), /*local_id from DB*/)));\n}\n\n#[test]\nfn test_toctou_skips_stale_issue() {\n let conn = setup_db();\n let issue = make_test_issue(42, \"2026-02-17T12:00:00.000+00:00\");\n ingest_issue_by_iid(&conn, &Config::default(), 1, &issue).unwrap();\n // Ingest same timestamp again\n let result = ingest_issue_by_iid(&conn, &Config::default(), 1, &issue).unwrap();\n assert!(result.skipped_stale);\n}\n\n#[tokio::test]\nasync fn test_preflight_fetch_returns_issues_and_mrs() {\n let mock = MockServer::start().await;\n // ... wiremock setup ...\n}\n```\n\n## Edge Cases\n\n- `make_test_issue` must produce all required fields (`id`, `iid`, `project_id`, `title`, `state`, `created_at`, `updated_at`, `author` with `username` and `id`, `web_url`) or `transform_issue` will fail\n- `make_test_mr` additionally needs `source_branch`, `target_branch`, `draft`, `merge_status`, `reviewers`\n- ISO 8601 fixtures must use `+00:00` suffix (GitLab format), not `Z`\n- Integration stubs must be `#[ignore]` so they do not fail CI before bd-3jqx implements them\n- Test DB needs `run_migrations` to create all tables including `dirty_sources`, `documents`, `issues`, `merge_requests`\n\n## Dependency Context\n\n- **Blocked by bd-3sez**: Cannot compile tests until surgical.rs module exists (circular co-dependency — develop together)\n- **Blocks bd-3jqx**: Integration test stubs are implemented in that bead\n- **No other blockers**: Uses only in-memory DB and wiremock, no external dependencies","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:15:05.498388Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:02:42.840151Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"]} {"id":"bd-xhz","title":"[CP1] GitLab client pagination methods","description":"## Background\n\nGitLab pagination methods enable fetching large result sets (issues, discussions) as async streams. The client uses `x-next-page` headers to determine continuation and applies cursor rewind for tuple-based incremental sync.\n\n## Approach\n\nAdd pagination methods to GitLabClient using `async-stream` crate:\n\n### Methods to Add\n\n```rust\nimpl GitLabClient {\n /// Paginate through issues for a project.\n pub fn paginate_issues(\n &self,\n gitlab_project_id: i64,\n updated_after: Option, // ms epoch cursor\n cursor_rewind_seconds: u32,\n ) -> Pin> + Send + '_>>\n\n /// Paginate through discussions for an issue.\n pub fn paginate_issue_discussions(\n &self,\n gitlab_project_id: i64,\n issue_iid: i64,\n ) -> Pin> + Send + '_>>\n\n /// Make request and return response with headers for pagination.\n async fn request_with_headers(\n &self,\n path: &str,\n params: &[(&str, String)],\n ) -> Result<(T, HeaderMap)>\n}\n```\n\n### Pagination Logic\n\n1. Start at page 1, per_page=100\n2. For issues: add scope=all, state=all, order_by=updated_at, sort=asc\n3. Apply cursor rewind: `updated_after = cursor - rewind_seconds` (clamped to 0)\n4. Yield each item from response\n5. Check `x-next-page` header for continuation\n6. Stop when header is empty/absent OR response is empty\n\n### Cursor Rewind\n\n```rust\nif let Some(ts) = updated_after {\n let rewind_ms = (cursor_rewind_seconds as i64) * 1000;\n let rewound = (ts - rewind_ms).max(0); // Clamp to avoid underflow\n // Convert to ISO 8601 for updated_after param\n}\n```\n\n## Acceptance Criteria\n\n- [ ] `paginate_issues` returns Stream of GitLabIssue\n- [ ] `paginate_issues` adds scope=all, state=all, order_by=updated_at, sort=asc\n- [ ] `paginate_issues` applies cursor rewind with max(0) clamping\n- [ ] `paginate_issue_discussions` returns Stream of GitLabDiscussion\n- [ ] Both methods follow x-next-page header until empty\n- [ ] Both methods stop on empty response (fallback)\n- [ ] `request_with_headers` returns (T, HeaderMap) tuple\n\n## Files\n\n- src/gitlab/client.rs (edit - add methods)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/pagination_tests.rs\n#[tokio::test] async fn fetches_all_pages_when_multiple_exist()\n#[tokio::test] async fn respects_per_page_parameter()\n#[tokio::test] async fn follows_x_next_page_header_until_empty()\n#[tokio::test] async fn falls_back_to_empty_page_stop_if_headers_missing()\n#[tokio::test] async fn applies_cursor_rewind_for_tuple_semantics()\n#[tokio::test] async fn clamps_negative_rewind_to_zero()\n```\n\nGREEN: Implement pagination methods with async-stream\n\nVERIFY: `cargo test pagination`\n\n## Edge Cases\n\n- cursor_updated_at near zero - rewind must not underflow (use max(0))\n- GitLab returns empty x-next-page - treat as end of pages\n- GitLab omits pagination headers entirely - use empty response as stop condition\n- DateTime conversion fails - omit updated_after and fetch all (safe fallback)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.222168Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:28:39.192876Z","closed_at":"2026-01-25T22:28:39.192815Z","close_reason":"Implemented paginate_issues and paginate_issue_discussions with async-stream, cursor rewind with max(0) clamping, x-next-page header following, 4 unit tests passing","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-xhz","depends_on_id":"bd-1np","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-xhz","depends_on_id":"bd-2ys","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-xsgw","title":"NOTE-TEST2: Another test bead","description":"type: task","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:58:53.392214Z","updated_at":"2026-02-12T16:59:02.051710Z","closed_at":"2026-02-12T16:59:02.051663Z","close_reason":"test","compaction_level":0,"original_size":0} -{"id":"bd-y095","title":"Implement SyncDeltaLedger for post-sync filtered navigation","description":"## Background\n\nAfter a sync completes, the Sync Summary screen shows delta counts (+12 new issues, +3 new MRs). Pressing `i` or `m` should navigate to Issue/MR List filtered to show ONLY the entities that changed in this sync run. The SyncDeltaLedger is an in-memory data structure (not persisted to DB) that records the exact IIDs of new/updated entities during a sync run. It lives for the duration of one TUI session and is cleared when a new sync starts. If the ledger is unavailable (e.g., after app restart), the Sync Summary falls back to a timestamp-based filter using `sync_status.last_completed_at`.\n\n## Approach\n\nCreate a `sync_delta.rs` module with:\n\n1. **`SyncDeltaLedger` struct**:\n ```rust\n pub struct SyncDeltaLedger {\n issues_new: Vec, // IIDs of newly created issues\n issues_updated: Vec, // IIDs of updated (not new) issues\n mrs_new: Vec, // IIDs of newly created MRs\n mrs_updated: Vec, // IIDs of updated MRs\n discussions_new: usize, // count only (too many to track individually)\n events_new: usize, // count only\n completed_at: Option, // timestamp when sync finished (fallback anchor)\n }\n ```\n2. **Builder pattern** — `SyncDeltaLedger::new()` starts empty, populated during sync via:\n - `record_issue(iid: i64, is_new: bool)`\n - `record_mr(iid: i64, is_new: bool)`\n - `record_discussions(count: usize)`\n - `record_events(count: usize)`\n - `finalize(completed_at: i64)` — marks ledger as complete\n3. **Query methods**:\n - `new_issue_iids() -> &[i64]` — for `i` key navigation in Summary mode\n - `new_mr_iids() -> &[i64]` — for `m` key navigation\n - `all_changed_issue_iids() -> Vec` — new + updated combined\n - `all_changed_mr_iids() -> Vec` — new + updated combined\n - `is_available() -> bool` — true if finalize() was called\n - `fallback_timestamp() -> Option` — completed_at for timestamp-based fallback\n4. **`clear()`** — resets all fields when a new sync starts\n\nThe ledger is owned by `SyncState` (part of `AppState`) and populated by the sync action handler when processing `SyncResult` from `run_sync()`. The existing `SyncResult` struct (src/cli/commands/sync.rs:30) already tracks `issues_updated` and `mrs_updated` counts but not individual IIDs — the TUI sync action will need to collect IIDs from the ingest callbacks.\n\n## Acceptance Criteria\n- [ ] `SyncDeltaLedger::new()` creates an empty ledger with `is_available() == false`\n- [ ] `record_issue(42, true)` adds 42 to `issues_new`; `record_issue(43, false)` adds to `issues_updated`\n- [ ] `new_issue_iids()` returns only new IIDs, `all_changed_issue_iids()` returns new + updated\n- [ ] `finalize(ts)` sets `is_available() == true` and stores the timestamp\n- [ ] `clear()` resets everything back to empty with `is_available() == false`\n- [ ] `fallback_timestamp()` returns None before finalize, Some(ts) after\n- [ ] Ledger handles >10,000 IIDs without issues (just Vec growth)\n\n## Files\n- CREATE: crates/lore-tui/src/sync_delta.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add `pub mod sync_delta;`)\n\n## TDD Anchor\nRED: Write `test_empty_ledger_not_available` that asserts `SyncDeltaLedger::new().is_available() == false` and `new_issue_iids().is_empty()`.\nGREEN: Implement the struct with new() and is_available().\nVERIFY: cargo test -p lore-tui sync_delta\n\nAdditional tests:\n- test_record_and_query_issues\n- test_record_and_query_mrs\n- test_finalize_makes_available\n- test_clear_resets_everything\n- test_all_changed_combines_new_and_updated\n- test_fallback_timestamp\n\n## Edge Cases\n- Recording the same IID twice (e.g., issue updated twice during sync) — should deduplicate or allow duplicates? Allow duplicates (Vec, not HashSet) for simplicity; consumers can deduplicate if needed.\n- Very large syncs with >50,000 entities — Vec is fine, no cap needed.\n- Calling query methods before finalize — returns data so far (is_available=false signals incompleteness).\n\n## Dependency Context\n- Depends on bd-2x2h (Sync screen) which owns SyncState and drives the sync lifecycle. The ledger is a field of SyncState.\n- Consumed by Sync Summary mode's `i`/`m` key handlers to produce filtered Issue/MR List navigation with exact IID sets.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:38.738460Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:29:48.475698Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-y095","depends_on_id":"bd-2x2h","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-y095","title":"Implement SyncDeltaLedger for post-sync filtered navigation","description":"## Background\n\nAfter a sync completes, the Sync Summary screen shows delta counts (+12 new issues, +3 new MRs). Pressing `i` or `m` should navigate to Issue/MR List filtered to show ONLY the entities that changed in this sync run. The SyncDeltaLedger is an in-memory data structure (not persisted to DB) that records the exact IIDs of new/updated entities during a sync run. It lives for the duration of one TUI session and is cleared when a new sync starts. If the ledger is unavailable (e.g., after app restart), the Sync Summary falls back to a timestamp-based filter using `sync_status.last_completed_at`.\n\n## Approach\n\nCreate a `sync_delta.rs` module with:\n\n1. **`SyncDeltaLedger` struct**:\n ```rust\n pub struct SyncDeltaLedger {\n issues_new: Vec, // IIDs of newly created issues\n issues_updated: Vec, // IIDs of updated (not new) issues\n mrs_new: Vec, // IIDs of newly created MRs\n mrs_updated: Vec, // IIDs of updated MRs\n discussions_new: usize, // count only (too many to track individually)\n events_new: usize, // count only\n completed_at: Option, // timestamp when sync finished (fallback anchor)\n }\n ```\n2. **Builder pattern** — `SyncDeltaLedger::new()` starts empty, populated during sync via:\n - `record_issue(iid: i64, is_new: bool)`\n - `record_mr(iid: i64, is_new: bool)`\n - `record_discussions(count: usize)`\n - `record_events(count: usize)`\n - `finalize(completed_at: i64)` — marks ledger as complete\n3. **Query methods**:\n - `new_issue_iids() -> &[i64]` — for `i` key navigation in Summary mode\n - `new_mr_iids() -> &[i64]` — for `m` key navigation\n - `all_changed_issue_iids() -> Vec` — new + updated combined\n - `all_changed_mr_iids() -> Vec` — new + updated combined\n - `is_available() -> bool` — true if finalize() was called\n - `fallback_timestamp() -> Option` — completed_at for timestamp-based fallback\n4. **`clear()`** — resets all fields when a new sync starts\n\nThe ledger is owned by `SyncState` (part of `AppState`) and populated by the sync action handler when processing `SyncResult` from `run_sync()`. The existing `SyncResult` struct (src/cli/commands/sync.rs:30) already tracks `issues_updated` and `mrs_updated` counts but not individual IIDs — the TUI sync action will need to collect IIDs from the ingest callbacks.\n\n## Acceptance Criteria\n- [ ] `SyncDeltaLedger::new()` creates an empty ledger with `is_available() == false`\n- [ ] `record_issue(42, true)` adds 42 to `issues_new`; `record_issue(43, false)` adds to `issues_updated`\n- [ ] `new_issue_iids()` returns only new IIDs, `all_changed_issue_iids()` returns new + updated\n- [ ] `finalize(ts)` sets `is_available() == true` and stores the timestamp\n- [ ] `clear()` resets everything back to empty with `is_available() == false`\n- [ ] `fallback_timestamp()` returns None before finalize, Some(ts) after\n- [ ] Ledger handles >10,000 IIDs without issues (just Vec growth)\n\n## Files\n- CREATE: crates/lore-tui/src/sync_delta.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add `pub mod sync_delta;`)\n\n## TDD Anchor\nRED: Write `test_empty_ledger_not_available` that asserts `SyncDeltaLedger::new().is_available() == false` and `new_issue_iids().is_empty()`.\nGREEN: Implement the struct with new() and is_available().\nVERIFY: cargo test -p lore-tui sync_delta\n\nAdditional tests:\n- test_record_and_query_issues\n- test_record_and_query_mrs\n- test_finalize_makes_available\n- test_clear_resets_everything\n- test_all_changed_combines_new_and_updated\n- test_fallback_timestamp\n\n## Edge Cases\n- Recording the same IID twice (e.g., issue updated twice during sync) — should deduplicate or allow duplicates? Allow duplicates (Vec, not HashSet) for simplicity; consumers can deduplicate if needed.\n- Very large syncs with >50,000 entities — Vec is fine, no cap needed.\n- Calling query methods before finalize — returns data so far (is_available=false signals incompleteness).\n\n## Dependency Context\n- Depends on bd-2x2h (Sync screen) which owns SyncState and drives the sync lifecycle. The ledger is a field of SyncState.\n- Consumed by Sync Summary mode's `i`/`m` key handlers to produce filtered Issue/MR List navigation with exact IID sets.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:38.738460Z","created_by":"tayloreernisse","updated_at":"2026-02-19T04:40:57.445633Z","closed_at":"2026-02-19T04:40:57.445577Z","close_reason":"SyncDeltaLedger implemented as part of bd-2x2h Sync screen.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-y095","depends_on_id":"bd-2x2h","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-ymd","title":"[CP1] Final validation - Gate A through D","description":"Run all tests and verify all internal gates pass.\n\n## Gate A: Issues Only (Must Pass First)\n- [ ] gi ingest --type=issues fetches all issues from configured projects\n- [ ] Issues stored with correct schema, including last_seen_at\n- [ ] Cursor-based sync is resumable (re-run fetches only new/updated)\n- [ ] Incremental cursor updates every 100 issues\n- [ ] Raw payloads stored for each issue\n- [ ] gi list issues and gi count issues work\n\n## Gate B: Labels Correct (Must Pass)\n- [ ] Labels extracted and stored (name-only)\n- [ ] Label links created correctly\n- [ ] Stale label links removed on re-sync (verified with test)\n- [ ] Label count per issue matches GitLab\n\n## Gate C: Dependent Discussion Sync (Must Pass)\n- [ ] Discussions fetched for issues with updated_at advancement\n- [ ] Notes stored with is_system flag correctly set\n- [ ] Raw payloads stored for discussions and notes\n- [ ] discussions_synced_for_updated_at watermark updated after sync\n- [ ] Unchanged issues skip discussion refetch (verified with test)\n- [ ] Bounded concurrency (dependent_concurrency respected)\n\n## Gate D: Resumability Proof (Must Pass)\n- [ ] Kill mid-run, rerun; bounded redo (cursor progress preserved)\n- [ ] No redundant discussion refetch after crash recovery\n- [ ] Single-flight lock prevents concurrent runs\n\n## Final Gate (Must Pass)\n- [ ] All unit tests pass (cargo test)\n- [ ] All integration tests pass (mocked with wiremock)\n- [ ] cargo clippy passes with no warnings\n- [ ] cargo fmt --check passes\n- [ ] Compiles with --release\n\n## Validation Commands\ncargo test\ncargo clippy -- -D warnings\ncargo fmt --check\ncargo build --release\n\nFiles: All CP1 files\nDone when: All gate criteria pass","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T16:59:26.795633Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:02.132613Z","closed_at":"2026-01-25T17:02:02.132613Z","deleted_at":"2026-01-25T17:02:02.132608Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-ypa","title":"Implement timeline expand phase: BFS cross-reference expansion","description":"## Background\n\nThe expand phase is step 3 of the timeline pipeline (spec Section 3.2). Starting from seed entities, it performs BFS over entity_references to discover related entities not matched by keywords.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 3.2 step 3, Section 3.5 (expanded_entities JSON).\n\n## Codebase Context\n\n- entity_references table exists (migration 011) with columns: source_entity_type, source_entity_id, target_entity_type, target_entity_id, target_project_path, target_entity_iid, reference_type, source_method, created_at\n- reference_type CHECK: `'closes' | 'mentioned' | 'related'`\n- source_method CHECK: `'api' | 'note_parse' | 'description_parse'` — use these values in provenance, NOT the spec's original values\n- Indexes: idx_entity_refs_source (source_entity_type, source_entity_id), idx_entity_refs_target (target_entity_id WHERE NOT NULL)\n\n## Approach\n\nCreate `src/core/timeline_expand.rs`:\n\n```rust\nuse std::collections::{HashSet, VecDeque};\nuse rusqlite::Connection;\nuse crate::core::timeline::{EntityRef, ExpandedEntityRef, UnresolvedRef};\n\npub struct ExpandResult {\n pub expanded_entities: Vec,\n pub unresolved_references: Vec,\n}\n\npub fn expand_timeline(\n conn: &Connection,\n seeds: &[EntityRef],\n depth: u32, // 0=no expansion, 1=default, 2+=deep\n include_mentions: bool, // --expand-mentions flag\n max_entities: usize, // cap at 100 to prevent explosion\n) -> Result { ... }\n```\n\n### BFS Algorithm\n\n```\nvisited: HashSet<(String, i64)> = seeds as set (entity_type, entity_id)\nqueue: VecDeque<(EntityRef, u32)> for multi-hop\n\nFor each seed:\n query_neighbors(conn, seed, edge_types) -> outgoing + incoming refs\n - Outgoing: SELECT target_* FROM entity_references WHERE source_entity_type=? AND source_entity_id=? AND reference_type IN (...)\n - Incoming: SELECT source_* FROM entity_references WHERE target_entity_type=? AND target_entity_id=? AND reference_type IN (...)\n - Unresolved (target_entity_id IS NULL): collect in UnresolvedRef, don't traverse\n - New resolved: add to expanded with provenance (via_from, via_reference_type, via_source_method)\n - If current_depth < depth: enqueue for further BFS\n```\n\n### Edge Type Filtering\n\n```rust\nfn edge_types(include_mentions: bool) -> Vec<&'static str> {\n if include_mentions {\n vec![\"closes\", \"related\", \"mentioned\"]\n } else {\n vec![\"closes\", \"related\"]\n }\n}\n```\n\n### Provenance (Critical for spec compliance)\n\nEach expanded entity needs via object per spec Section 3.5:\n- via_from: EntityRef of the entity that referenced this one\n- via_reference_type: from entity_references.reference_type column\n- via_source_method: from entity_references.source_method column (**codebase values: 'api', 'note_parse', 'description_parse'**)\n\nRegister in `src/core/mod.rs`: `pub mod timeline_expand;`\n\n## Acceptance Criteria\n\n- [ ] BFS traverses outgoing AND incoming edges in entity_references\n- [ ] Default: only \"closes\" and \"related\" edges (not \"mentioned\")\n- [ ] --expand-mentions: also traverses \"mentioned\" edges\n- [ ] depth=0: returns empty expanded list\n- [ ] max_entities cap prevents explosion (default 100)\n- [ ] Provenance: via_source_method uses codebase values (api/note_parse/description_parse), NOT spec values\n- [ ] Unresolved references (target_entity_id IS NULL) collected, not traversed\n- [ ] No duplicates: visited set by (entity_type, entity_id)\n- [ ] Self-references skipped\n- [ ] Module registered in src/core/mod.rs\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/core/timeline_expand.rs` (NEW)\n- `src/core/mod.rs` (add `pub mod timeline_expand;`)\n\n## TDD Loop\n\nRED: Tests in `src/core/timeline_expand.rs`:\n- `test_expand_depth_zero` - returns empty\n- `test_expand_finds_linked_entity` - seed issue -> closes -> linked MR\n- `test_expand_bidirectional` - starting from target also finds source\n- `test_expand_respects_max_entities`\n- `test_expand_skips_mentions_by_default`\n- `test_expand_includes_mentions_when_flagged`\n- `test_expand_collects_unresolved`\n- `test_expand_tracks_provenance` - verify via_source_method is 'api' not 'api_closes_issues'\n\nTests need in-memory DB with migrations 001-014 applied + entity_references test data.\n\nGREEN: Implement BFS.\n\nVERIFY: `cargo test --lib -- timeline_expand`\n\n## Edge Cases\n\n- Circular references: visited set prevents infinite loop\n- Entity referenced from multiple seeds: first-come provenance wins\n- Empty entity_references: returns empty, not error\n- Cross-project refs with NULL target_entity_id: add to unresolved","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:33:08.659381Z","created_by":"tayloreernisse","updated_at":"2026-02-05T21:49:46.868460Z","closed_at":"2026-02-05T21:49:46.868410Z","close_reason":"Completed: Created src/core/timeline_expand.rs with BFS cross-reference expansion. Bidirectional traversal, depth limiting, mention filtering, max entity cap, provenance tracking, unresolved reference collection. 10 tests pass. All quality gates pass.","compaction_level":0,"original_size":0,"labels":["gate-3","phase-b","query"],"dependencies":[{"issue_id":"bd-ypa","depends_on_id":"bd-32q","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-ypa","depends_on_id":"bd-3ia","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-ypa","depends_on_id":"bd-ike","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-z0s","title":"[CP1] Final validation - Gate A through D","description":"Run all tests and verify all internal gates pass.\n\n## Gate A: Issues Only (Must Pass First)\n- [ ] gi ingest --type=issues fetches all issues from configured projects\n- [ ] Issues stored with correct schema, including last_seen_at\n- [ ] Cursor-based sync is resumable (re-run fetches only new/updated)\n- [ ] Incremental cursor updates every 100 issues\n- [ ] Raw payloads stored for each issue\n- [ ] gi list issues and gi count issues work\n\n## Gate B: Labels Correct (Must Pass)\n- [ ] Labels extracted and stored (name-only)\n- [ ] Label links created correctly\n- [ ] **Stale label links removed on re-sync** (verified with test)\n- [ ] Label count per issue matches GitLab\n\n## Gate C: Dependent Discussion Sync (Must Pass)\n- [ ] Discussions fetched for issues with updated_at advancement\n- [ ] Notes stored with is_system flag correctly set\n- [ ] Raw payloads stored for discussions and notes\n- [ ] discussions_synced_for_updated_at watermark updated after sync\n- [ ] **Unchanged issues skip discussion refetch** (verified with test)\n- [ ] Bounded concurrency (dependent_concurrency respected)\n\n## Gate D: Resumability Proof (Must Pass)\n- [ ] Kill mid-run, rerun; bounded redo (cursor progress preserved)\n- [ ] No redundant discussion refetch after crash recovery\n- [ ] Single-flight lock prevents concurrent runs\n\n## Final Gate (Must Pass)\n- [ ] All unit tests pass (cargo test)\n- [ ] All integration tests pass (mocked with wiremock)\n- [ ] cargo clippy passes with no warnings\n- [ ] cargo fmt --check passes\n- [ ] Compiles with --release\n\n## Validation Commands\ncargo test\ncargo clippy -- -D warnings\ncargo fmt --check\ncargo build --release\n\n## Data Integrity Checks\n- SELECT COUNT(*) FROM issues matches GitLab issue count\n- Every issue has a raw_payloads row\n- Every discussion has a raw_payloads row\n- Labels in issue_labels junction all exist in labels table\n- Re-running gi ingest --type=issues fetches 0 new items\n- After removing a label in GitLab and re-syncing, the link is removed\n\nFiles: All CP1 files\nDone when: All gate criteria pass","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.459095Z","created_by":"tayloreernisse","updated_at":"2026-01-25T23:27:09.567537Z","closed_at":"2026-01-25T23:27:09.567478Z","close_reason":"All gates pass: 71 tests, clippy clean, fmt clean, release build successful","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-z0s","depends_on_id":"bd-17v","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-z0s","depends_on_id":"bd-2f0","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-z0s","depends_on_id":"bd-39w","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-z0s","depends_on_id":"bd-3n1","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-z0s","depends_on_id":"bd-o7b","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-z0s","depends_on_id":"bd-v6i","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} diff --git a/.beads/last-touched b/.beads/last-touched index c02d987..540aa5f 100644 --- a/.beads/last-touched +++ b/.beads/last-touched @@ -1 +1 @@ -bd-1ser +bd-3l56 diff --git a/Cargo.lock b/Cargo.lock index 96a115f..0782dec 100644 --- a/Cargo.lock +++ b/Cargo.lock @@ -485,6 +485,12 @@ dependencies = [ "litrs", ] +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + [[package]] name = "encode_unicode" version = "1.0.0" @@ -500,6 +506,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "env_home" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7f84e12ccf0a7ddc17a6c41c93326024c42920d7ee630d04950e6926645c0fe" + [[package]] name = "equivalent" version = "1.0.2" @@ -1191,6 +1203,7 @@ dependencies = [ "url", "urlencoding", "uuid", + "which", "wiremock", ] @@ -2507,6 +2520,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "which" +version = "7.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d643ce3fd3e5b54854602a080f34fb10ab75e0b813ee32d00ca2b44fa74762" +dependencies = [ + "either", + "env_home", + "rustix", + "winsafe", +] + [[package]] name = "winapi" version = "0.3.9" @@ -2764,6 +2789,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "winsafe" +version = "0.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" + [[package]] name = "wiremock" version = "0.6.5" diff --git a/Cargo.toml b/Cargo.toml index b06a775..ad9ffbf 100644 --- a/Cargo.toml +++ b/Cargo.toml @@ -49,6 +49,7 @@ httpdate = "1" uuid = { version = "1", features = ["v4"] } regex = "1" strsim = "0.11" +which = "7" [target.'cfg(unix)'.dependencies] libc = "0.2" diff --git a/crates/lore-tui/Cargo.lock b/crates/lore-tui/Cargo.lock index c461984..0a99286 100644 --- a/crates/lore-tui/Cargo.lock +++ b/crates/lore-tui/Cargo.lock @@ -485,6 +485,12 @@ dependencies = [ "litrs", ] +[[package]] +name = "either" +version = "1.15.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "48c757948c5ede0e46177b7add2e67155f70e33c07fea8284df6576da70b3719" + [[package]] name = "encode_unicode" version = "1.0.0" @@ -500,6 +506,12 @@ dependencies = [ "cfg-if", ] +[[package]] +name = "env_home" +version = "0.1.0" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "c7f84e12ccf0a7ddc17a6c41c93326024c42920d7ee630d04950e6926645c0fe" + [[package]] name = "equivalent" version = "1.0.2" @@ -1336,6 +1348,7 @@ dependencies = [ "url", "urlencoding", "uuid", + "which", ] [[package]] @@ -1345,6 +1358,7 @@ dependencies = [ "anyhow", "chrono", "clap", + "crc32fast", "crossterm 0.28.1", "dirs", "ftui", @@ -1354,6 +1368,8 @@ dependencies = [ "serde", "serde_json", "tempfile", + "unicode-segmentation", + "unicode-width 0.2.2", ] [[package]] @@ -2782,6 +2798,18 @@ dependencies = [ "wasm-bindgen", ] +[[package]] +name = "which" +version = "7.0.3" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "24d643ce3fd3e5b54854602a080f34fb10ab75e0b813ee32d00ca2b44fa74762" +dependencies = [ + "either", + "env_home", + "rustix 1.1.3", + "winsafe", +] + [[package]] name = "winapi" version = "0.3.9" @@ -3048,6 +3076,12 @@ dependencies = [ "memchr", ] +[[package]] +name = "winsafe" +version = "0.0.19" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "d135d17ab770252ad95e9a872d365cf3090e3be864a34ab46f48555993efc904" + [[package]] name = "wit-bindgen" version = "0.51.0" diff --git a/crates/lore-tui/Cargo.toml b/crates/lore-tui/Cargo.toml index 9cbed7b..8059841 100644 --- a/crates/lore-tui/Cargo.toml +++ b/crates/lore-tui/Cargo.toml @@ -42,5 +42,12 @@ serde_json = "1" # Regex (used by safety module for PII/secret redaction) regex = "1" +# Unicode text measurement +unicode-width = "0.2" +unicode-segmentation = "1" + +# Session persistence (CRC32 checksum) +crc32fast = "1" + [dev-dependencies] tempfile = "3" diff --git a/crates/lore-tui/src/action/file_history.rs b/crates/lore-tui/src/action/file_history.rs index 44dd53a..ddb5f9f 100644 --- a/crates/lore-tui/src/action/file_history.rs +++ b/crates/lore-tui/src/action/file_history.rs @@ -96,8 +96,7 @@ pub fn fetch_file_history( merge_commit_sha: row.get(7)?, }) })? - .filter_map(std::result::Result::ok) - .collect(); + .collect::, _>>()?; let total_mrs = merge_requests.len(); @@ -170,8 +169,7 @@ fn fetch_file_discussions( created_at_ms: row.get(4)?, }) })? - .filter_map(std::result::Result::ok) - .collect(); + .collect::, _>>()?; Ok(discussions) } @@ -187,12 +185,10 @@ pub fn fetch_file_history_paths(conn: &Connection, project_id: Option) -> R let mut stmt = conn.prepare(sql)?; let paths: Vec = if let Some(pid) = project_id { stmt.query_map([pid], |row| row.get(0))? - .filter_map(std::result::Result::ok) - .collect() + .collect::, _>>()? } else { stmt.query_map([], |row| row.get(0))? - .filter_map(std::result::Result::ok) - .collect() + .collect::, _>>()? }; Ok(paths) diff --git a/crates/lore-tui/src/action/mod.rs b/crates/lore-tui/src/action/mod.rs index c0b18f1..27a2a77 100644 --- a/crates/lore-tui/src/action/mod.rs +++ b/crates/lore-tui/src/action/mod.rs @@ -12,6 +12,7 @@ mod issue_list; mod mr_detail; mod mr_list; mod search; +mod sync; mod timeline; mod trace; mod who; @@ -24,6 +25,7 @@ pub use issue_list::*; pub use mr_detail::*; pub use mr_list::*; pub use search::*; +pub use sync::*; pub use timeline::*; pub use trace::*; pub use who::*; diff --git a/crates/lore-tui/src/action/sync.rs b/crates/lore-tui/src/action/sync.rs new file mode 100644 index 0000000..3403f85 --- /dev/null +++ b/crates/lore-tui/src/action/sync.rs @@ -0,0 +1,587 @@ +#![allow(dead_code)] + +//! Sync screen actions — query sync run history and detect running syncs. +//! +//! With cron-driven syncs as the primary mechanism, the TUI's sync screen +//! acts as a status dashboard. These pure query functions read `sync_runs` +//! and `projects` to populate the screen. + +use anyhow::{Context, Result}; +use rusqlite::Connection; + +use crate::clock::Clock; + +/// How many recent runs to display in the sync history. +const HISTORY_LIMIT: usize = 10; + +/// If a "running" sync hasn't heartbeated in this many milliseconds, +/// consider it stale (likely crashed). +const STALE_HEARTBEAT_MS: i64 = 120_000; // 2 minutes + +// --------------------------------------------------------------------------- +// Data types +// --------------------------------------------------------------------------- + +/// Overview data for the sync screen. +#[derive(Debug, Default)] +pub struct SyncOverview { + /// Info about a currently running sync, if any. + pub running: Option, + /// Most recent completed (succeeded or failed) run. + pub last_completed: Option, + /// Recent sync run history (newest first). + pub recent_runs: Vec, + /// Configured project paths. + pub projects: Vec, +} + +/// A sync that is currently in progress. +#[derive(Debug, Clone)] +pub struct RunningSyncInfo { + /// Row ID in sync_runs. + pub id: i64, + /// When this sync started (ms epoch). + pub started_at: i64, + /// Last heartbeat (ms epoch). + pub heartbeat_at: i64, + /// How long it's been running (ms). + pub elapsed_ms: u64, + /// Whether the heartbeat is stale (sync may have crashed). + pub stale: bool, + /// Items processed so far. + pub items_processed: u64, +} + +/// Summary of a single sync run. +#[derive(Debug, Clone)] +pub struct SyncRunInfo { + /// Row ID in sync_runs. + pub id: i64, + /// 'succeeded', 'failed', or 'running'. + pub status: String, + /// The command that was run (e.g., 'sync', 'ingest issues'). + pub command: String, + /// When this sync started (ms epoch). + pub started_at: i64, + /// When this sync finished (ms epoch), if completed. + pub finished_at: Option, + /// Duration in ms (computed from started_at/finished_at). + pub duration_ms: Option, + /// Total items processed. + pub items_processed: u64, + /// Total errors encountered. + pub errors: u64, + /// Error message if the run failed. + pub error: Option, + /// Correlation ID for log matching. + pub run_id: Option, +} + +// --------------------------------------------------------------------------- +// Public API +// --------------------------------------------------------------------------- + +/// Fetch the complete sync overview for the sync screen. +/// +/// Combines running sync detection, last completed run, recent history, +/// and configured projects into a single struct. +pub fn fetch_sync_overview(conn: &Connection, clock: &dyn Clock) -> Result { + let running = detect_running_sync(conn, clock)?; + let recent_runs = fetch_recent_runs(conn, HISTORY_LIMIT)?; + let last_completed = recent_runs + .iter() + .find(|r| r.status == "succeeded" || r.status == "failed") + .cloned(); + let projects = fetch_configured_projects(conn)?; + + Ok(SyncOverview { + running, + last_completed, + recent_runs, + projects, + }) +} + +/// Detect a currently running sync from the `sync_runs` table. +/// +/// A sync is considered "running" if `status = 'running'`. It's marked +/// stale if the heartbeat is older than [`STALE_HEARTBEAT_MS`]. +pub fn detect_running_sync( + conn: &Connection, + clock: &dyn Clock, +) -> Result> { + let result = conn.query_row( + "SELECT id, started_at, heartbeat_at, total_items_processed + FROM sync_runs + WHERE status = 'running' + ORDER BY id DESC + LIMIT 1", + [], + |row| { + let id: i64 = row.get(0)?; + let started_at: i64 = row.get(1)?; + let heartbeat_at: i64 = row.get(2)?; + let items: Option = row.get(3)?; + Ok((id, started_at, heartbeat_at, items.unwrap_or(0))) + }, + ); + + match result { + Ok((id, started_at, heartbeat_at, items)) => { + let now = clock.now_ms(); + let elapsed_ms = now.saturating_sub(started_at); + let stale = (now - heartbeat_at) > STALE_HEARTBEAT_MS; + + #[allow(clippy::cast_sign_loss)] + Ok(Some(RunningSyncInfo { + id, + started_at, + heartbeat_at, + elapsed_ms: elapsed_ms as u64, + stale, + items_processed: items as u64, + })) + } + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e).context("detecting running sync"), + } +} + +/// Fetch recent sync runs (newest first). +pub fn fetch_recent_runs(conn: &Connection, limit: usize) -> Result> { + let mut stmt = conn + .prepare( + "SELECT id, status, command, started_at, finished_at, + total_items_processed, total_errors, error, run_id + FROM sync_runs + ORDER BY id DESC + LIMIT ?1", + ) + .context("preparing sync runs query")?; + + let rows = stmt + .query_map([limit as i64], |row| { + let id: i64 = row.get(0)?; + let status: String = row.get(1)?; + let command: String = row.get(2)?; + let started_at: i64 = row.get(3)?; + let finished_at: Option = row.get(4)?; + let items: Option = row.get(5)?; + let errors: Option = row.get(6)?; + let error: Option = row.get(7)?; + let run_id: Option = row.get(8)?; + + Ok(( + id, status, command, started_at, finished_at, items, errors, error, run_id, + )) + }) + .context("querying sync runs")?; + + let mut result = Vec::new(); + for row in rows { + let (id, status, command, started_at, finished_at, items, errors, error, run_id) = + row.context("reading sync run row")?; + + #[allow(clippy::cast_sign_loss)] + let duration_ms = finished_at.map(|f| (f - started_at) as u64); + + #[allow(clippy::cast_sign_loss)] + result.push(SyncRunInfo { + id, + status, + command, + started_at, + finished_at, + duration_ms, + items_processed: items.unwrap_or(0) as u64, + errors: errors.unwrap_or(0) as u64, + error, + run_id, + }); + } + + Ok(result) +} + +/// Fetch configured project paths from the `projects` table. +pub fn fetch_configured_projects(conn: &Connection) -> Result> { + let mut stmt = conn + .prepare("SELECT path_with_namespace FROM projects ORDER BY path_with_namespace") + .context("preparing projects query")?; + + let rows = stmt + .query_map([], |row| row.get::<_, String>(0)) + .context("querying projects")?; + + let mut result = Vec::new(); + for row in rows { + result.push(row.context("reading project row")?); + } + Ok(result) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::clock::FakeClock; + + /// Create the minimal schema needed for sync queries. + fn create_sync_schema(conn: &Connection) { + conn.execute_batch( + " + CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + ); + CREATE TABLE sync_runs ( + id INTEGER PRIMARY KEY, + started_at INTEGER NOT NULL, + heartbeat_at INTEGER NOT NULL, + finished_at INTEGER, + status TEXT NOT NULL, + command TEXT NOT NULL, + error TEXT, + metrics_json TEXT, + run_id TEXT, + total_items_processed INTEGER DEFAULT 0, + total_errors INTEGER DEFAULT 0 + ); + ", + ) + .expect("create sync schema"); + } + + fn insert_project(conn: &Connection, id: i64, path: &str) { + conn.execute( + "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) + VALUES (?1, ?2, ?3)", + rusqlite::params![id, id * 100, path], + ) + .expect("insert project"); + } + + fn insert_sync_run( + conn: &Connection, + started_at: i64, + finished_at: Option, + status: &str, + command: &str, + items: i64, + errors: i64, + error: Option<&str>, + ) -> i64 { + conn.execute( + "INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command, + total_items_processed, total_errors, error) + VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)", + rusqlite::params![ + started_at, + finished_at.unwrap_or(started_at), + finished_at, + status, + command, + items, + errors, + error, + ], + ) + .expect("insert sync run"); + conn.last_insert_rowid() + } + + // ----------------------------------------------------------------------- + // detect_running_sync + // ----------------------------------------------------------------------- + + #[test] + fn test_detect_running_sync_none_when_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + let clock = FakeClock::from_ms(1_700_000_000_000); + + let result = detect_running_sync(&conn, &clock).unwrap(); + assert!(result.is_none()); + } + + #[test] + fn test_detect_running_sync_none_when_all_completed() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + + let now = 1_700_000_000_000_i64; + insert_sync_run(&conn, now - 60_000, Some(now - 30_000), "succeeded", "sync", 100, 0, None); + insert_sync_run(&conn, now - 120_000, Some(now - 90_000), "failed", "sync", 50, 2, Some("timeout")); + + let clock = FakeClock::from_ms(now); + let result = detect_running_sync(&conn, &clock).unwrap(); + assert!(result.is_none()); + } + + #[test] + fn test_detect_running_sync_found() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + + let now = 1_700_000_000_000_i64; + let started = now - 30_000; // 30 seconds ago + // Heartbeat at started_at (fresh since we just set it) + conn.execute( + "INSERT INTO sync_runs (started_at, heartbeat_at, status, command, total_items_processed) + VALUES (?1, ?2, 'running', 'sync', 42)", + [started, now - 5_000], // heartbeat 5 seconds ago + ) + .unwrap(); + + let clock = FakeClock::from_ms(now); + let running = detect_running_sync(&conn, &clock).unwrap().unwrap(); + + assert_eq!(running.elapsed_ms, 30_000); + assert_eq!(running.items_processed, 42); + assert!(!running.stale); + } + + #[test] + fn test_detect_running_sync_stale_heartbeat() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + + let now = 1_700_000_000_000_i64; + let started = now - 300_000; // 5 minutes ago + // Heartbeat 3 minutes ago — stale + conn.execute( + "INSERT INTO sync_runs (started_at, heartbeat_at, status, command) + VALUES (?1, ?2, 'running', 'sync')", + [started, now - 180_000], + ) + .unwrap(); + + let clock = FakeClock::from_ms(now); + let running = detect_running_sync(&conn, &clock).unwrap().unwrap(); + + assert!(running.stale); + assert_eq!(running.elapsed_ms, 300_000); + } + + // ----------------------------------------------------------------------- + // fetch_recent_runs + // ----------------------------------------------------------------------- + + #[test] + fn test_fetch_recent_runs_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + + let runs = fetch_recent_runs(&conn, 10).unwrap(); + assert!(runs.is_empty()); + } + + #[test] + fn test_fetch_recent_runs_ordered_newest_first() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + + let now = 1_700_000_000_000_i64; + insert_sync_run(&conn, now - 120_000, Some(now - 90_000), "succeeded", "sync", 100, 0, None); + insert_sync_run(&conn, now - 60_000, Some(now - 30_000), "succeeded", "sync", 200, 0, None); + + let runs = fetch_recent_runs(&conn, 10).unwrap(); + assert_eq!(runs.len(), 2); + // Newest first (higher id) + assert_eq!(runs[0].items_processed, 200); + assert_eq!(runs[1].items_processed, 100); + } + + #[test] + fn test_fetch_recent_runs_respects_limit() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + + let now = 1_700_000_000_000_i64; + for i in 0..5 { + insert_sync_run( + &conn, + now - (5 - i) * 60_000, + Some(now - (5 - i) * 60_000 + 30_000), + "succeeded", + "sync", + i * 10, + 0, + None, + ); + } + + let runs = fetch_recent_runs(&conn, 3).unwrap(); + assert_eq!(runs.len(), 3); + } + + #[test] + fn test_fetch_recent_runs_duration_computed() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + + let now = 1_700_000_000_000_i64; + insert_sync_run(&conn, now - 60_000, Some(now - 15_000), "succeeded", "sync", 0, 0, None); + + let runs = fetch_recent_runs(&conn, 10).unwrap(); + assert_eq!(runs[0].duration_ms, Some(45_000)); + } + + #[test] + fn test_fetch_recent_runs_running_no_duration() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + + let now = 1_700_000_000_000_i64; + insert_sync_run(&conn, now - 60_000, None, "running", "sync", 0, 0, None); + + let runs = fetch_recent_runs(&conn, 10).unwrap(); + assert_eq!(runs[0].status, "running"); + assert!(runs[0].duration_ms.is_none()); + } + + #[test] + fn test_fetch_recent_runs_failed_with_error() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + + let now = 1_700_000_000_000_i64; + insert_sync_run( + &conn, + now - 60_000, + Some(now - 30_000), + "failed", + "sync", + 50, + 3, + Some("network timeout"), + ); + + let runs = fetch_recent_runs(&conn, 10).unwrap(); + assert_eq!(runs[0].status, "failed"); + assert_eq!(runs[0].errors, 3); + assert_eq!(runs[0].error.as_deref(), Some("network timeout")); + } + + // ----------------------------------------------------------------------- + // fetch_configured_projects + // ----------------------------------------------------------------------- + + #[test] + fn test_fetch_configured_projects_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + + let projects = fetch_configured_projects(&conn).unwrap(); + assert!(projects.is_empty()); + } + + #[test] + fn test_fetch_configured_projects_sorted() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + + insert_project(&conn, 1, "group/beta"); + insert_project(&conn, 2, "group/alpha"); + insert_project(&conn, 3, "other/gamma"); + + let projects = fetch_configured_projects(&conn).unwrap(); + assert_eq!(projects, vec!["group/alpha", "group/beta", "other/gamma"]); + } + + // ----------------------------------------------------------------------- + // fetch_sync_overview (integration) + // ----------------------------------------------------------------------- + + #[test] + fn test_fetch_sync_overview_empty_db() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + let clock = FakeClock::from_ms(1_700_000_000_000); + + let overview = fetch_sync_overview(&conn, &clock).unwrap(); + assert!(overview.running.is_none()); + assert!(overview.last_completed.is_none()); + assert!(overview.recent_runs.is_empty()); + assert!(overview.projects.is_empty()); + } + + #[test] + fn test_fetch_sync_overview_with_history() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + + let now = 1_700_000_000_000_i64; + insert_project(&conn, 1, "group/repo"); + insert_sync_run(&conn, now - 120_000, Some(now - 90_000), "succeeded", "sync", 150, 0, None); + insert_sync_run(&conn, now - 60_000, Some(now - 30_000), "failed", "sync", 50, 2, Some("db locked")); + + let clock = FakeClock::from_ms(now); + let overview = fetch_sync_overview(&conn, &clock).unwrap(); + + assert!(overview.running.is_none()); + assert_eq!(overview.recent_runs.len(), 2); + assert_eq!(overview.projects, vec!["group/repo"]); + + // last_completed should be the newest completed run (failed, id=2) + let last = overview.last_completed.unwrap(); + assert_eq!(last.status, "failed"); + assert_eq!(last.errors, 2); + } + + #[test] + fn test_fetch_sync_overview_with_running_sync() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + + let now = 1_700_000_000_000_i64; + insert_project(&conn, 1, "group/repo"); + + // A completed run. + insert_sync_run(&conn, now - 600_000, Some(now - 570_000), "succeeded", "sync", 200, 0, None); + + // A currently running sync. + conn.execute( + "INSERT INTO sync_runs (started_at, heartbeat_at, status, command, total_items_processed) + VALUES (?1, ?2, 'running', 'sync', 75)", + [now - 20_000, now - 2_000], + ) + .unwrap(); + + let clock = FakeClock::from_ms(now); + let overview = fetch_sync_overview(&conn, &clock).unwrap(); + + assert!(overview.running.is_some()); + let running = overview.running.unwrap(); + assert_eq!(running.elapsed_ms, 20_000); + assert_eq!(running.items_processed, 75); + assert!(!running.stale); + + // last_completed should find the succeeded run, not the running one. + let last = overview.last_completed.unwrap(); + assert_eq!(last.status, "succeeded"); + assert_eq!(last.items_processed, 200); + } + + #[test] + fn test_sync_run_info_with_run_id() { + let conn = Connection::open_in_memory().unwrap(); + create_sync_schema(&conn); + + let now = 1_700_000_000_000_i64; + conn.execute( + "INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command, + total_items_processed, total_errors, run_id) + VALUES (?1, ?1, ?2, 'succeeded', 'sync', 100, 0, 'abc-123')", + [now - 60_000, now - 30_000], + ) + .unwrap(); + + let runs = fetch_recent_runs(&conn, 10).unwrap(); + assert_eq!(runs[0].run_id.as_deref(), Some("abc-123")); + } +} diff --git a/crates/lore-tui/src/action/timeline.rs b/crates/lore-tui/src/action/timeline.rs index dc11ff3..1391ba0 100644 --- a/crates/lore-tui/src/action/timeline.rs +++ b/crates/lore-tui/src/action/timeline.rs @@ -64,10 +64,12 @@ pub fn fetch_timeline_events( let filter = resolve_timeline_scope(conn, scope)?; let mut events = Vec::new(); - collect_tl_created_events(conn, &filter, &mut events)?; - collect_tl_state_events(conn, &filter, &mut events)?; - collect_tl_label_events(conn, &filter, &mut events)?; - collect_tl_milestone_events(conn, &filter, &mut events)?; + // Each collector is given the full limit. After merge-sorting, we truncate + // to `limit`. Worst case we hold 4*limit events in memory (bounded). + collect_tl_created_events(conn, &filter, limit, &mut events)?; + collect_tl_state_events(conn, &filter, limit, &mut events)?; + collect_tl_label_events(conn, &filter, limit, &mut events)?; + collect_tl_milestone_events(conn, &filter, limit, &mut events)?; // Sort by timestamp descending (most recent first), with stable tiebreak. events.sort_by(|a, b| { @@ -85,11 +87,12 @@ pub fn fetch_timeline_events( fn collect_tl_created_events( conn: &Connection, filter: &TimelineFilter, + limit: usize, events: &mut Vec, ) -> Result<()> { // Issue created events. if !matches!(filter, TimelineFilter::MergeRequest(_)) { - let (where_clause, params) = match filter { + let (where_clause, mut params) = match filter { TimelineFilter::All => ( "1=1".to_string(), Vec::>::new(), @@ -105,12 +108,16 @@ fn collect_tl_created_events( TimelineFilter::MergeRequest(_) => unreachable!(), }; + let limit_param = params.len() + 1; let sql = format!( "SELECT i.created_at, i.iid, i.title, i.author_username, i.project_id, p.path_with_namespace FROM issues i JOIN projects p ON p.id = i.project_id - WHERE {where_clause}" + WHERE {where_clause} + ORDER BY i.created_at DESC + LIMIT ?{limit_param}" ); + params.push(Box::new(limit as i64)); let mut stmt = conn .prepare(&sql) @@ -148,7 +155,7 @@ fn collect_tl_created_events( // MR created events. if !matches!(filter, TimelineFilter::Issue(_)) { - let (where_clause, params) = match filter { + let (where_clause, mut params) = match filter { TimelineFilter::All => ( "1=1".to_string(), Vec::>::new(), @@ -164,12 +171,16 @@ fn collect_tl_created_events( TimelineFilter::Issue(_) => unreachable!(), }; + let limit_param = params.len() + 1; let sql = format!( "SELECT mr.created_at, mr.iid, mr.title, mr.author_username, mr.project_id, p.path_with_namespace FROM merge_requests mr JOIN projects p ON p.id = mr.project_id - WHERE {where_clause}" + WHERE {where_clause} + ORDER BY mr.created_at DESC + LIMIT ?{limit_param}" ); + params.push(Box::new(limit as i64)); let mut stmt = conn.prepare(&sql).context("preparing MR created query")?; let param_refs: Vec<&dyn rusqlite::types::ToSql> = @@ -252,9 +263,11 @@ fn resolve_event_entity( fn collect_tl_state_events( conn: &Connection, filter: &TimelineFilter, + limit: usize, events: &mut Vec, ) -> Result<()> { - let (where_clause, params) = resource_event_where(filter); + let (where_clause, mut params) = resource_event_where(filter); + let limit_param = params.len() + 1; let sql = format!( "SELECT e.created_at, e.state, e.actor_username, @@ -266,8 +279,11 @@ fn collect_tl_state_events( LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id LEFT JOIN projects pi ON pi.id = i.project_id LEFT JOIN projects pm ON pm.id = mr.project_id - WHERE {where_clause}" + WHERE {where_clause} + ORDER BY e.created_at DESC + LIMIT ?{limit_param}" ); + params.push(Box::new(limit as i64)); let mut stmt = conn.prepare(&sql).context("preparing state events query")?; let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(AsRef::as_ref).collect(); @@ -338,9 +354,11 @@ fn collect_tl_state_events( fn collect_tl_label_events( conn: &Connection, filter: &TimelineFilter, + limit: usize, events: &mut Vec, ) -> Result<()> { - let (where_clause, params) = resource_event_where(filter); + let (where_clause, mut params) = resource_event_where(filter); + let limit_param = params.len() + 1; let sql = format!( "SELECT e.created_at, e.action, e.label_name, e.actor_username, @@ -352,8 +370,11 @@ fn collect_tl_label_events( LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id LEFT JOIN projects pi ON pi.id = i.project_id LEFT JOIN projects pm ON pm.id = mr.project_id - WHERE {where_clause}" + WHERE {where_clause} + ORDER BY e.created_at DESC + LIMIT ?{limit_param}" ); + params.push(Box::new(limit as i64)); let mut stmt = conn.prepare(&sql).context("preparing label events query")?; let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(AsRef::as_ref).collect(); @@ -426,9 +447,11 @@ fn collect_tl_label_events( fn collect_tl_milestone_events( conn: &Connection, filter: &TimelineFilter, + limit: usize, events: &mut Vec, ) -> Result<()> { - let (where_clause, params) = resource_event_where(filter); + let (where_clause, mut params) = resource_event_where(filter); + let limit_param = params.len() + 1; let sql = format!( "SELECT e.created_at, e.action, e.milestone_title, e.actor_username, @@ -440,8 +463,11 @@ fn collect_tl_milestone_events( LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id LEFT JOIN projects pi ON pi.id = i.project_id LEFT JOIN projects pm ON pm.id = mr.project_id - WHERE {where_clause}" + WHERE {where_clause} + ORDER BY e.created_at DESC + LIMIT ?{limit_param}" ); + params.push(Box::new(limit as i64)); let mut stmt = conn .prepare(&sql) diff --git a/crates/lore-tui/src/action/trace.rs b/crates/lore-tui/src/action/trace.rs index 1782114..0d3533d 100644 --- a/crates/lore-tui/src/action/trace.rs +++ b/crates/lore-tui/src/action/trace.rs @@ -38,20 +38,18 @@ pub fn fetch_trace( /// Returns distinct `new_path` values scoped to the given project (or all /// projects if `None`), sorted alphabetically. pub fn fetch_known_paths(conn: &Connection, project_id: Option) -> Result> { - let mut paths = if let Some(pid) = project_id { + let paths = if let Some(pid) = project_id { let mut stmt = conn.prepare( "SELECT DISTINCT new_path FROM mr_file_changes WHERE project_id = ?1 ORDER BY new_path", )?; let rows = stmt.query_map([pid], |row| row.get::<_, String>(0))?; - rows.filter_map(Result::ok).collect::>() + rows.collect::, _>>()? } else { let mut stmt = conn.prepare("SELECT DISTINCT new_path FROM mr_file_changes ORDER BY new_path")?; let rows = stmt.query_map([], |row| row.get::<_, String>(0))?; - rows.filter_map(Result::ok).collect::>() + rows.collect::, _>>()? }; - paths.sort(); - paths.dedup(); Ok(paths) } diff --git a/crates/lore-tui/src/app/update.rs b/crates/lore-tui/src/app/update.rs index b492f7c..4f84851 100644 --- a/crates/lore-tui/src/app/update.rs +++ b/crates/lore-tui/src/app/update.rs @@ -219,6 +219,8 @@ impl LoreApp { "go_who" => self.navigate_to(Screen::Who), "go_file_history" => self.navigate_to(Screen::FileHistory), "go_trace" => self.navigate_to(Screen::Trace), + "go_doctor" => self.navigate_to(Screen::Doctor), + "go_stats" => self.navigate_to(Screen::Stats), "go_sync" => { if screen == &Screen::Bootstrap { self.state.bootstrap.sync_started = true; @@ -235,6 +237,19 @@ impl LoreApp { self.navigation.jump_forward(); Cmd::none() } + "toggle_scope" => { + if self.state.scope_picker.visible { + self.state.scope_picker.close(); + Cmd::none() + } else { + // Fetch projects and open picker asynchronously. + Cmd::task(move || { + // The actual DB query runs in the task; for now, open + // immediately with cached projects if available. + Msg::ScopeProjectsLoaded { projects: vec![] } + }) + } + } "move_down" | "move_up" | "select_item" | "focus_filter" | "scroll_to_top" => { // Screen-specific actions — delegated in future phases. Cmd::none() @@ -431,14 +446,37 @@ impl LoreApp { Cmd::none() } - // --- Sync lifecycle (Bootstrap auto-transition) --- + // --- Sync lifecycle --- Msg::SyncStarted => { + self.state.sync.start(); if *self.navigation.current() == Screen::Bootstrap { self.state.bootstrap.sync_started = true; } Cmd::none() } - Msg::SyncCompleted { .. } => { + Msg::SyncProgress { + stage, + current, + total, + } => { + self.state.sync.update_progress(&stage, current, total); + Cmd::none() + } + Msg::SyncProgressBatch { stage, batch_size } => { + self.state.sync.update_batch(&stage, batch_size); + Cmd::none() + } + Msg::SyncLogLine(line) => { + self.state.sync.add_log_line(line); + Cmd::none() + } + Msg::SyncBackpressureDrop => { + // Silently drop — the coalescer already handles throttling. + Cmd::none() + } + Msg::SyncCompleted { elapsed_ms } => { + self.state.sync.complete(elapsed_ms); + // If we came from Bootstrap, replace nav history with Dashboard. if *self.navigation.current() == Screen::Bootstrap { self.state.bootstrap.sync_started = false; @@ -456,6 +494,18 @@ impl LoreApp { } Cmd::none() } + Msg::SyncCancelled => { + self.state.sync.cancel(); + Cmd::none() + } + Msg::SyncFailed(err) => { + self.state.sync.fail(err); + Cmd::none() + } + Msg::SyncStreamStats { bytes, items } => { + self.state.sync.update_stream_stats(bytes, items); + Cmd::none() + } // --- Who screen --- Msg::WhoResultLoaded { generation, result } => { @@ -511,6 +561,56 @@ impl LoreApp { Cmd::none() } + // --- Doctor --- + Msg::DoctorLoaded { checks } => { + self.state.doctor.apply_checks(checks); + self.state.set_loading(Screen::Doctor, LoadState::Idle); + Cmd::none() + } + + // --- Stats --- + Msg::StatsLoaded { data } => { + self.state.stats.apply_data(data); + self.state.set_loading(Screen::Stats, LoadState::Idle); + Cmd::none() + } + + // --- Timeline --- + Msg::TimelineLoaded { generation, events } => { + if self + .supervisor + .is_current(&TaskKey::LoadScreen(Screen::Timeline), generation) + { + self.state.timeline.apply_results(generation, events); + self.state.set_loading(Screen::Timeline, LoadState::Idle); + self.supervisor + .complete(&TaskKey::LoadScreen(Screen::Timeline), generation); + } + Cmd::none() + } + + // --- Search --- + Msg::SearchExecuted { generation, results } => { + if self + .supervisor + .is_current(&TaskKey::LoadScreen(Screen::Search), generation) + { + self.state.search.apply_results(generation, results); + self.state.set_loading(Screen::Search, LoadState::Idle); + self.supervisor + .complete(&TaskKey::LoadScreen(Screen::Search), generation); + } + Cmd::none() + } + + // --- Scope --- + Msg::ScopeProjectsLoaded { projects } => { + self.state + .scope_picker + .open(projects, &self.state.global_scope); + Cmd::none() + } + // All other message variants: no-op for now. // Future phases will fill these in as screens are implemented. _ => Cmd::none(), diff --git a/crates/lore-tui/src/commands/mod.rs b/crates/lore-tui/src/commands/mod.rs index 4216c36..485ad79 100644 --- a/crates/lore-tui/src/commands/mod.rs +++ b/crates/lore-tui/src/commands/mod.rs @@ -112,7 +112,7 @@ mod tests { let cmd = reg.complete_sequence( &KeyCode::Char('g'), &Modifiers::NONE, - &KeyCode::Char('x'), + &KeyCode::Char('z'), &Modifiers::NONE, &Screen::Dashboard, ); diff --git a/crates/lore-tui/src/commands/registry.rs b/crates/lore-tui/src/commands/registry.rs index c1e2556..e6aa37e 100644 --- a/crates/lore-tui/src/commands/registry.rs +++ b/crates/lore-tui/src/commands/registry.rs @@ -213,6 +213,16 @@ pub fn build_registry() -> CommandRegistry { available_in: ScreenFilter::Global, available_in_text_mode: false, }, + CommandDef { + id: "toggle_scope", + label: "Project Scope", + keybinding: Some(KeyCombo::key(KeyCode::Char('P'))), + cli_equivalent: None, + help_text: "Toggle project scope filter", + status_hint: "P:scope", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, // --- Navigation: g-prefix sequences --- CommandDef { id: "go_home", @@ -284,6 +294,46 @@ pub fn build_registry() -> CommandRegistry { available_in: ScreenFilter::Global, available_in_text_mode: false, }, + CommandDef { + id: "go_file_history", + label: "Go to File History", + keybinding: Some(KeyCombo::g_then('f')), + cli_equivalent: Some("lore file-history"), + help_text: "Jump to file history", + status_hint: "gf:files", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + CommandDef { + id: "go_trace", + label: "Go to Trace", + keybinding: Some(KeyCombo::g_then('r')), + cli_equivalent: Some("lore trace"), + help_text: "Jump to trace", + status_hint: "gr:trace", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + CommandDef { + id: "go_doctor", + label: "Go to Doctor", + keybinding: Some(KeyCombo::g_then('d')), + cli_equivalent: Some("lore doctor"), + help_text: "Jump to environment health checks", + status_hint: "gd:doctor", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + CommandDef { + id: "go_stats", + label: "Go to Stats", + keybinding: Some(KeyCombo::g_then('x')), + cli_equivalent: Some("lore stats"), + help_text: "Jump to database statistics", + status_hint: "gx:stats", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, // --- Vim-style jump list --- CommandDef { id: "jump_back", diff --git a/crates/lore-tui/src/entity_cache.rs b/crates/lore-tui/src/entity_cache.rs index b76f299..3cc7845 100644 --- a/crates/lore-tui/src/entity_cache.rs +++ b/crates/lore-tui/src/entity_cache.rs @@ -72,15 +72,14 @@ impl EntityCache { } // Evict LRU if at capacity. - if self.entries.len() >= self.capacity { - if let Some(lru_key) = self + if self.entries.len() >= self.capacity + && let Some(lru_key) = self .entries .iter() .min_by_key(|(_, (_, t))| *t) .map(|(k, _)| k.clone()) - { - self.entries.remove(&lru_key); - } + { + self.entries.remove(&lru_key); } self.entries.insert(key, (value, tick)); diff --git a/crates/lore-tui/src/instance_lock.rs b/crates/lore-tui/src/instance_lock.rs new file mode 100644 index 0000000..7b05b63 --- /dev/null +++ b/crates/lore-tui/src/instance_lock.rs @@ -0,0 +1,202 @@ +//! Single-instance advisory lock for the TUI. +//! +//! Prevents concurrent `lore-tui` launches from corrupting state. +//! Uses an advisory lock file with PID. Stale locks (dead PID) are +//! automatically recovered. + +use std::fs; +use std::io::Write; +use std::path::{Path, PathBuf}; + +/// Advisory lock preventing concurrent TUI launches. +/// +/// On `acquire()`, writes the current PID to the lock file. +/// On `Drop`, removes the lock file (best-effort). +#[derive(Debug)] +pub struct InstanceLock { + path: PathBuf, +} + +/// Error returned when another instance is already running. +#[derive(Debug)] +pub struct LockConflict { + pub pid: u32, + pub path: PathBuf, +} + +impl std::fmt::Display for LockConflict { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + write!( + f, + "Another lore-tui instance is running (PID {}). Lock file: {}", + self.pid, + self.path.display() + ) + } +} + +impl std::error::Error for LockConflict {} + +impl InstanceLock { + /// Try to acquire the instance lock. + /// + /// - If the lock file doesn't exist, creates it with our PID. + /// - If the lock file exists with a live PID, returns `LockConflict`. + /// - If the lock file exists with a dead PID, removes the stale lock and acquires. + pub fn acquire(lock_dir: &Path) -> Result> { + // Ensure lock directory exists. + fs::create_dir_all(lock_dir)?; + + let path = lock_dir.join("tui.lock"); + + // Check for existing lock. + if path.exists() { + let contents = fs::read_to_string(&path).unwrap_or_default(); + if let Ok(pid) = contents.trim().parse::() + && is_process_alive(pid) + { + return Err(Box::new(LockConflict { + pid, + path: path.clone(), + })); + } + // Stale lock — PID is dead, or corrupt file. Remove and re-acquire. + fs::remove_file(&path)?; + } + + // Write our PID. + let mut file = fs::File::create(&path)?; + write!(file, "{}", std::process::id())?; + file.sync_all()?; + + Ok(Self { path }) + } + + /// Path to the lock file. + #[must_use] + pub fn path(&self) -> &Path { + &self.path + } +} + +impl Drop for InstanceLock { + fn drop(&mut self) { + // Best-effort cleanup. If it fails, the stale lock will be + // recovered on next launch via the dead-PID check. + let _ = fs::remove_file(&self.path); + } +} + +/// Check whether a process with the given PID is alive. +/// +/// Uses `kill -0 ` on Unix (exit 0 = alive, non-zero = dead). +/// On non-Unix, conservatively assumes alive. +#[cfg(unix)] +fn is_process_alive(pid: u32) -> bool { + std::process::Command::new("kill") + .args(["-0", &pid.to_string()]) + .stdout(std::process::Stdio::null()) + .stderr(std::process::Stdio::null()) + .status() + .is_ok_and(|s| s.success()) +} + +#[cfg(not(unix))] +fn is_process_alive(_pid: u32) -> bool { + // Conservative fallback: assume alive. + true +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_acquire_and_release() { + let dir = tempfile::tempdir().unwrap(); + let lock_path = dir.path().join("tui.lock"); + + { + let _lock = InstanceLock::acquire(dir.path()).unwrap(); + assert!(lock_path.exists()); + + // Lock file should contain our PID. + let contents = fs::read_to_string(&lock_path).unwrap(); + assert_eq!(contents, format!("{}", std::process::id())); + } + // After drop, lock file should be removed. + assert!(!lock_path.exists()); + } + + #[test] + fn test_double_acquire_fails() { + let dir = tempfile::tempdir().unwrap(); + let _lock = InstanceLock::acquire(dir.path()).unwrap(); + + // Second acquire should fail because our PID is still alive. + let result = InstanceLock::acquire(dir.path()); + assert!(result.is_err()); + + let err = result.unwrap_err(); + let conflict = err.downcast_ref::().unwrap(); + assert_eq!(conflict.pid, std::process::id()); + } + + #[test] + fn test_stale_lock_recovery() { + let dir = tempfile::tempdir().unwrap(); + let lock_path = dir.path().join("tui.lock"); + + // Write a lock file with a dead PID (PID 1 is init, but PID 99999999 + // almost certainly doesn't exist). + let dead_pid = 99_999_999u32; + fs::write(&lock_path, dead_pid.to_string()).unwrap(); + + // Should succeed — stale lock is recovered. + let _lock = InstanceLock::acquire(dir.path()).unwrap(); + assert!(lock_path.exists()); + + // Lock file now contains our PID, not the dead one. + let contents = fs::read_to_string(&lock_path).unwrap(); + assert_eq!(contents, format!("{}", std::process::id())); + } + + #[test] + fn test_corrupt_lock_file_recovered() { + let dir = tempfile::tempdir().unwrap(); + let lock_path = dir.path().join("tui.lock"); + + // Write garbage to the lock file. + fs::write(&lock_path, "not-a-pid").unwrap(); + + // Should succeed — corrupt lock is treated as stale. + let lock = InstanceLock::acquire(dir.path()).unwrap(); + let contents = fs::read_to_string(lock.path()).unwrap(); + assert_eq!(contents, format!("{}", std::process::id())); + } + + #[test] + fn test_creates_lock_directory() { + let dir = tempfile::tempdir().unwrap(); + let nested = dir.path().join("a").join("b").join("c"); + + let lock = InstanceLock::acquire(&nested).unwrap(); + assert!(nested.join("tui.lock").exists()); + drop(lock); + } + + #[test] + fn test_lock_conflict_display() { + let conflict = LockConflict { + pid: 12345, + path: PathBuf::from("/tmp/tui.lock"), + }; + let msg = format!("{conflict}"); + assert!(msg.contains("12345")); + assert!(msg.contains("/tmp/tui.lock")); + } +} diff --git a/crates/lore-tui/src/lib.rs b/crates/lore-tui/src/lib.rs index fe277e1..8dcf5eb 100644 --- a/crates/lore-tui/src/lib.rs +++ b/crates/lore-tui/src/lib.rs @@ -34,6 +34,12 @@ pub mod filter_dsl; // Filter DSL tokenizer for list screen filter bars (bd-18qs // Phase 4 modules. pub mod entity_cache; // Bounded LRU entity cache for detail view reopens (bd-2og9) pub mod render_cache; // Bounded render cache for expensive per-frame computations (bd-2og9) +pub mod scope; // Global scope context: SQL helpers + project listing (bd-1ser) + +// Phase 5 modules. +pub mod instance_lock; // Single-instance advisory lock for TUI (bd-3h00) +pub mod session; // Session state persistence: save/load/quarantine (bd-3h00) +pub mod text_width; // Unicode-aware text width measurement + truncation (bd-3h00) /// Options controlling how the TUI launches. #[derive(Debug, Clone)] diff --git a/crates/lore-tui/src/message.rs b/crates/lore-tui/src/message.rs index 833db5d..0052a24 100644 --- a/crates/lore-tui/src/message.rs +++ b/crates/lore-tui/src/message.rs @@ -307,6 +307,22 @@ pub enum Msg { paths: Vec, }, + // --- Scope --- + /// Projects loaded for the scope picker. + ScopeProjectsLoaded { + projects: Vec, + }, + + // --- Doctor --- + DoctorLoaded { + checks: Vec, + }, + + // --- Stats --- + StatsLoaded { + data: crate::state::stats::StatsData, + }, + // --- Sync --- SyncStarted, SyncProgress { @@ -397,6 +413,9 @@ impl Msg { Self::TraceKnownPathsLoaded { .. } => "TraceKnownPathsLoaded", Self::FileHistoryLoaded { .. } => "FileHistoryLoaded", Self::FileHistoryKnownPathsLoaded { .. } => "FileHistoryKnownPathsLoaded", + Self::ScopeProjectsLoaded { .. } => "ScopeProjectsLoaded", + Self::DoctorLoaded { .. } => "DoctorLoaded", + Self::StatsLoaded { .. } => "StatsLoaded", Self::SyncStarted => "SyncStarted", Self::SyncProgress { .. } => "SyncProgress", Self::SyncProgressBatch { .. } => "SyncProgressBatch", diff --git a/crates/lore-tui/src/render_cache.rs b/crates/lore-tui/src/render_cache.rs index 1a505a4..68a9e96 100644 --- a/crates/lore-tui/src/render_cache.rs +++ b/crates/lore-tui/src/render_cache.rs @@ -87,15 +87,14 @@ impl RenderCache { return; } - if self.entries.len() >= self.capacity { - if let Some(oldest_key) = self + if self.entries.len() >= self.capacity + && let Some(oldest_key) = self .entries .iter() .min_by_key(|(_, (_, t))| *t) .map(|(k, _)| *k) - { - self.entries.remove(&oldest_key); - } + { + self.entries.remove(&oldest_key); } self.entries.insert(key, (value, tick)); diff --git a/crates/lore-tui/src/scope.rs b/crates/lore-tui/src/scope.rs new file mode 100644 index 0000000..a9fa05d --- /dev/null +++ b/crates/lore-tui/src/scope.rs @@ -0,0 +1,155 @@ +//! Global scope context helpers: SQL fragment generation and project listing. +//! +//! The [`ScopeContext`] struct lives in [`state::mod`] and holds the active +//! project filter. This module provides: +//! +//! - [`scope_filter_sql`] — generates a SQL WHERE clause fragment +//! - [`fetch_projects`] — lists available projects for the scope picker +//! +//! Action functions already accept `project_id: Option` — callers pass +//! `scope.project_id` directly. The helpers here are for screens that build +//! custom SQL or need the project list for UI. + +use anyhow::{Context, Result}; +use rusqlite::Connection; + +/// Project metadata for the scope picker overlay. +#[derive(Debug, Clone)] +pub struct ProjectInfo { + /// Internal database ID (projects.id). + pub id: i64, + /// GitLab path (e.g., "group/repo"). + pub path: String, +} + +/// Generate a SQL WHERE clause fragment that filters by project_id. +/// +/// Returns an empty string for `None` (all projects), or +/// `" AND {table_alias}.project_id = {id}"` for `Some(id)`. +/// +/// The leading `AND` makes it safe to append to an existing WHERE clause. +/// +/// # Examples +/// +/// ```ignore +/// let filter = scope_filter_sql(Some(42), "mr"); +/// assert_eq!(filter, " AND mr.project_id = 42"); +/// +/// let filter = scope_filter_sql(None, "mr"); +/// assert_eq!(filter, ""); +/// ``` +#[must_use] +pub fn scope_filter_sql(project_id: Option, table_alias: &str) -> String { + match project_id { + Some(id) => format!(" AND {table_alias}.project_id = {id}"), + None => String::new(), + } +} + +/// Fetch all projects from the database for the scope picker. +/// +/// Returns projects sorted by path. Used to populate the scope picker +/// overlay when the user presses `P`. +pub fn fetch_projects(conn: &Connection) -> Result> { + let mut stmt = conn + .prepare("SELECT id, path_with_namespace FROM projects ORDER BY path_with_namespace") + .context("preparing projects query")?; + + let projects = stmt + .query_map([], |row| { + Ok(ProjectInfo { + id: row.get(0)?, + path: row.get(1)?, + }) + }) + .context("querying projects")? + .filter_map(std::result::Result::ok) + .collect(); + + Ok(projects) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_scope_filter_sql_none_returns_empty() { + let sql = scope_filter_sql(None, "mr"); + assert_eq!(sql, ""); + } + + #[test] + fn test_scope_filter_sql_some_returns_and_clause() { + let sql = scope_filter_sql(Some(42), "mr"); + assert_eq!(sql, " AND mr.project_id = 42"); + } + + #[test] + fn test_scope_filter_sql_different_alias() { + let sql = scope_filter_sql(Some(7), "mfc"); + assert_eq!(sql, " AND mfc.project_id = 7"); + } + + #[test] + fn test_fetch_projects_empty_db() { + let conn = Connection::open_in_memory().unwrap(); + conn.execute_batch( + "CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + )", + ) + .unwrap(); + + let projects = fetch_projects(&conn).unwrap(); + assert!(projects.is_empty()); + } + + #[test] + fn test_fetch_projects_returns_sorted() { + let conn = Connection::open_in_memory().unwrap(); + conn.execute_batch( + "CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + )", + ) + .unwrap(); + + conn.execute( + "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'z-group/repo')", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (2, 200, 'a-group/repo')", + [], + ) + .unwrap(); + + let projects = fetch_projects(&conn).unwrap(); + assert_eq!(projects.len(), 2); + assert_eq!(projects[0].path, "a-group/repo"); + assert_eq!(projects[0].id, 2); + assert_eq!(projects[1].path, "z-group/repo"); + assert_eq!(projects[1].id, 1); + } + + #[test] + fn test_scope_filter_sql_composable_in_query() { + // Verify the fragment works when embedded in a full SQL statement. + let project_id = Some(5); + let filter = scope_filter_sql(project_id, "mr"); + let sql = format!( + "SELECT * FROM merge_requests mr WHERE mr.state = 'merged'{filter} ORDER BY mr.updated_at" + ); + assert!(sql.contains("AND mr.project_id = 5")); + } +} diff --git a/crates/lore-tui/src/session.rs b/crates/lore-tui/src/session.rs new file mode 100644 index 0000000..0d75183 --- /dev/null +++ b/crates/lore-tui/src/session.rs @@ -0,0 +1,406 @@ +//! Session state persistence — save on quit, restore on launch. +//! +//! Enables the TUI to resume where the user left off: current screen, +//! navigation history, filter state, scroll positions. +//! +//! ## File format +//! +//! `session.json` is a versioned JSON blob with a CRC32 checksum appended +//! as the last 8 hex characters. Writes are atomic (tmp → fsync → rename). +//! Corrupt files are quarantined, not deleted. + +use std::fs; +use std::io::Write; +use std::path::Path; + +use serde::{Deserialize, Serialize}; + +/// Maximum session file size (1 MB). Files larger than this are rejected. +const MAX_SESSION_SIZE: u64 = 1_024 * 1_024; + +/// Current session format version. Bump when the schema changes. +const SESSION_VERSION: u32 = 1; + +// --------------------------------------------------------------------------- +// Persisted screen (decoupled from message::Screen) +// --------------------------------------------------------------------------- + +/// Lightweight screen identifier for serialization. +/// +/// Decoupled from `message::Screen` so session persistence doesn't require +/// `Serialize`/`Deserialize` on core types. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq, Eq)] +#[serde(tag = "kind")] +pub enum PersistedScreen { + Dashboard, + IssueList, + IssueDetail { project_id: i64, iid: i64 }, + MrList, + MrDetail { project_id: i64, iid: i64 }, + Search, + Timeline, + Who, + Trace, + FileHistory, + Sync, + Stats, + Doctor, +} + +// --------------------------------------------------------------------------- +// Session state +// --------------------------------------------------------------------------- + +/// Versioned session state persisted to disk. +#[derive(Debug, Clone, Serialize, Deserialize, PartialEq)] +pub struct SessionState { + /// Format version for migration. + pub version: u32, + /// Screen to restore on launch. + pub current_screen: PersistedScreen, + /// Navigation history (back stack). + pub nav_history: Vec, + /// Per-screen filter text (screen name -> filter string). + pub filters: Vec<(String, String)>, + /// Per-screen scroll offset (screen name -> offset). + pub scroll_offsets: Vec<(String, u16)>, + /// Global scope project path filter (if set). + pub global_scope: Option, +} + +impl Default for SessionState { + fn default() -> Self { + Self { + version: SESSION_VERSION, + current_screen: PersistedScreen::Dashboard, + nav_history: Vec::new(), + filters: Vec::new(), + scroll_offsets: Vec::new(), + global_scope: None, + } + } +} + +// --------------------------------------------------------------------------- +// Save / Load +// --------------------------------------------------------------------------- + +/// Save session state atomically. +/// +/// Writes to a temp file, fsyncs, appends CRC32 checksum, then renames +/// over the target path. This prevents partial writes on crash. +pub fn save_session(state: &SessionState, path: &Path) -> Result<(), SessionError> { + // Ensure parent directory exists. + if let Some(parent) = path.parent() { + fs::create_dir_all(parent).map_err(|e| SessionError::Io(e.to_string()))?; + } + + let json = serde_json::to_string_pretty(state) + .map_err(|e| SessionError::Serialize(e.to_string()))?; + + // Check size before writing. + if json.len() as u64 > MAX_SESSION_SIZE { + return Err(SessionError::TooLarge { + size: json.len() as u64, + max: MAX_SESSION_SIZE, + }); + } + + // Compute CRC32 over the JSON payload. + let checksum = crc32fast::hash(json.as_bytes()); + let payload = format!("{json}\n{checksum:08x}"); + + // Write to temp file, fsync, rename. + let tmp_path = path.with_extension("tmp"); + let mut file = + fs::File::create(&tmp_path).map_err(|e| SessionError::Io(e.to_string()))?; + file.write_all(payload.as_bytes()) + .map_err(|e| SessionError::Io(e.to_string()))?; + file.sync_all() + .map_err(|e| SessionError::Io(e.to_string()))?; + drop(file); + + fs::rename(&tmp_path, path).map_err(|e| SessionError::Io(e.to_string()))?; + + Ok(()) +} + +/// Load session state from disk. +/// +/// Validates CRC32 checksum. On corruption, quarantines the file and +/// returns `SessionError::Corrupt`. +pub fn load_session(path: &Path) -> Result { + if !path.exists() { + return Err(SessionError::NotFound); + } + + // Check file size before reading. + let metadata = fs::metadata(path).map_err(|e| SessionError::Io(e.to_string()))?; + if metadata.len() > MAX_SESSION_SIZE { + quarantine(path)?; + return Err(SessionError::TooLarge { + size: metadata.len(), + max: MAX_SESSION_SIZE, + }); + } + + let raw = fs::read_to_string(path).map_err(|e| SessionError::Io(e.to_string()))?; + + // Split: everything before the last newline is JSON, after is the checksum. + let (json, checksum_hex) = raw + .rsplit_once('\n') + .ok_or_else(|| SessionError::Corrupt("no checksum separator".into()))?; + + // Validate checksum. + let expected = u32::from_str_radix(checksum_hex.trim(), 16) + .map_err(|_| SessionError::Corrupt("invalid checksum hex".into()))?; + let actual = crc32fast::hash(json.as_bytes()); + if actual != expected { + quarantine(path)?; + return Err(SessionError::Corrupt(format!( + "CRC32 mismatch: expected {expected:08x}, got {actual:08x}" + ))); + } + + // Deserialize. + let state: SessionState = serde_json::from_str(json) + .map_err(|e| SessionError::Corrupt(format!("JSON parse error: {e}")))?; + + // Version check — future-proof: reject newer versions, accept current. + if state.version > SESSION_VERSION { + return Err(SessionError::Corrupt(format!( + "session version {} is newer than supported ({})", + state.version, SESSION_VERSION + ))); + } + + Ok(state) +} + +/// Move a corrupt session file to `.quarantine/` instead of deleting it. +fn quarantine(path: &Path) -> Result<(), SessionError> { + let quarantine_dir = path + .parent() + .unwrap_or(Path::new(".")) + .join(".quarantine"); + fs::create_dir_all(&quarantine_dir).map_err(|e| SessionError::Io(e.to_string()))?; + + let filename = path + .file_name() + .unwrap_or_default() + .to_string_lossy() + .to_string(); + let ts = chrono::Utc::now().format("%Y%m%d_%H%M%S"); + let quarantine_path = quarantine_dir.join(format!("{filename}.{ts}")); + + fs::rename(path, &quarantine_path).map_err(|e| SessionError::Io(e.to_string()))?; + + Ok(()) +} + +// --------------------------------------------------------------------------- +// Errors +// --------------------------------------------------------------------------- + +/// Session persistence errors. +#[derive(Debug, Clone, PartialEq)] +pub enum SessionError { + /// Session file not found (first launch). + NotFound, + /// File is corrupt (bad checksum, invalid JSON, etc.). + Corrupt(String), + /// File exceeds size limit. + TooLarge { size: u64, max: u64 }, + /// I/O error. + Io(String), + /// Serialization error. + Serialize(String), +} + +impl std::fmt::Display for SessionError { + fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result { + match self { + Self::NotFound => write!(f, "session file not found"), + Self::Corrupt(msg) => write!(f, "corrupt session: {msg}"), + Self::TooLarge { size, max } => { + write!(f, "session file too large ({size} bytes, max {max})") + } + Self::Io(msg) => write!(f, "session I/O error: {msg}"), + Self::Serialize(msg) => write!(f, "session serialization error: {msg}"), + } + } +} + +impl std::error::Error for SessionError {} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + fn sample_state() -> SessionState { + SessionState { + version: SESSION_VERSION, + current_screen: PersistedScreen::IssueList, + nav_history: vec![PersistedScreen::Dashboard], + filters: vec![("IssueList".into(), "bug".into())], + scroll_offsets: vec![("IssueList".into(), 5)], + global_scope: Some("group/project".into()), + } + } + + #[test] + fn test_session_roundtrip() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("session.json"); + + let state = sample_state(); + save_session(&state, &path).unwrap(); + + let loaded = load_session(&path).unwrap(); + assert_eq!(state, loaded); + } + + #[test] + fn test_session_default_roundtrip() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("session.json"); + + let state = SessionState::default(); + save_session(&state, &path).unwrap(); + + let loaded = load_session(&path).unwrap(); + assert_eq!(state, loaded); + } + + #[test] + fn test_session_not_found() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("nonexistent.json"); + + let result = load_session(&path); + assert_eq!(result.unwrap_err(), SessionError::NotFound); + } + + #[test] + fn test_session_corruption_detected() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("session.json"); + + let state = sample_state(); + save_session(&state, &path).unwrap(); + + // Tamper with the file — modify a byte in the JSON section. + let raw = fs::read_to_string(&path).unwrap(); + let tampered = raw.replacen("IssueList", "MrList___", 1); + fs::write(&path, tampered).unwrap(); + + let result = load_session(&path); + assert!(matches!(result, Err(SessionError::Corrupt(_)))); + } + + #[test] + fn test_session_corruption_quarantines_file() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("session.json"); + + let state = sample_state(); + save_session(&state, &path).unwrap(); + + // Tamper with the checksum line. + let raw = fs::read_to_string(&path).unwrap(); + let tampered = format!("{}\ndeadbeef", raw.rsplit_once('\n').unwrap().0); + fs::write(&path, tampered).unwrap(); + + let _ = load_session(&path); + + // Original file should be gone. + assert!(!path.exists()); + + // Quarantine directory should contain the file. + let quarantine_dir = dir.path().join(".quarantine"); + assert!(quarantine_dir.exists()); + let entries: Vec<_> = fs::read_dir(&quarantine_dir).unwrap().collect(); + assert_eq!(entries.len(), 1); + } + + #[test] + fn test_session_creates_parent_directory() { + let dir = tempfile::tempdir().unwrap(); + let nested = dir.path().join("a").join("b").join("session.json"); + + let state = SessionState::default(); + save_session(&state, &nested).unwrap(); + assert!(nested.exists()); + } + + #[test] + fn test_session_persisted_screen_variants() { + let screens = vec![ + PersistedScreen::Dashboard, + PersistedScreen::IssueList, + PersistedScreen::IssueDetail { + project_id: 1, + iid: 42, + }, + PersistedScreen::MrList, + PersistedScreen::MrDetail { + project_id: 2, + iid: 99, + }, + PersistedScreen::Search, + PersistedScreen::Timeline, + PersistedScreen::Who, + PersistedScreen::Trace, + PersistedScreen::FileHistory, + PersistedScreen::Sync, + PersistedScreen::Stats, + PersistedScreen::Doctor, + ]; + + for screen in screens { + let state = SessionState { + current_screen: screen.clone(), + ..SessionState::default() + }; + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("session.json"); + save_session(&state, &path).unwrap(); + let loaded = load_session(&path).unwrap(); + assert_eq!(state.current_screen, loaded.current_screen); + } + } + + #[test] + fn test_session_max_size_enforced() { + let state = SessionState { + filters: (0..100_000) + .map(|i| (format!("key_{i}"), "x".repeat(100))) + .collect(), + ..SessionState::default() + }; + + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("session.json"); + + let result = save_session(&state, &path); + assert!(matches!(result, Err(SessionError::TooLarge { .. }))); + } + + #[test] + fn test_session_atomic_write_no_partial() { + let dir = tempfile::tempdir().unwrap(); + let path = dir.path().join("session.json"); + let tmp_path = path.with_extension("tmp"); + + let state = sample_state(); + save_session(&state, &path).unwrap(); + + // After save, no tmp file should remain. + assert!(!tmp_path.exists()); + assert!(path.exists()); + } +} diff --git a/crates/lore-tui/src/state/doctor.rs b/crates/lore-tui/src/state/doctor.rs new file mode 100644 index 0000000..eb720eb --- /dev/null +++ b/crates/lore-tui/src/state/doctor.rs @@ -0,0 +1,199 @@ +#![allow(dead_code)] + +//! Doctor screen state — health check results. +//! +//! Displays a list of environment health checks with pass/warn/fail +//! indicators. Checks are synchronous (config, DB, projects, FTS) — +//! network checks (GitLab auth, Ollama) are not run from the TUI. + +// --------------------------------------------------------------------------- +// HealthStatus +// --------------------------------------------------------------------------- + +/// Status of a single health check. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum HealthStatus { + Pass, + Warn, + Fail, +} + +impl HealthStatus { + /// Human-readable label for display. + #[must_use] + pub fn label(self) -> &'static str { + match self { + Self::Pass => "PASS", + Self::Warn => "WARN", + Self::Fail => "FAIL", + } + } +} + +// --------------------------------------------------------------------------- +// HealthCheck +// --------------------------------------------------------------------------- + +/// A single health check result for display. +#[derive(Debug, Clone)] +pub struct HealthCheck { + /// Check category name (e.g., "Config", "Database"). + pub name: String, + /// Pass/warn/fail status. + pub status: HealthStatus, + /// Human-readable detail (e.g., path, version, count). + pub detail: String, +} + +// --------------------------------------------------------------------------- +// DoctorState +// --------------------------------------------------------------------------- + +/// State for the Doctor screen. +#[derive(Debug, Default)] +pub struct DoctorState { + /// Health check results (empty until loaded). + pub checks: Vec, + /// Whether checks have been loaded at least once. + pub loaded: bool, +} + +impl DoctorState { + /// Apply loaded health check results. + pub fn apply_checks(&mut self, checks: Vec) { + self.checks = checks; + self.loaded = true; + } + + /// Overall status — worst status across all checks. + #[must_use] + pub fn overall_status(&self) -> HealthStatus { + if self.checks.iter().any(|c| c.status == HealthStatus::Fail) { + HealthStatus::Fail + } else if self.checks.iter().any(|c| c.status == HealthStatus::Warn) { + HealthStatus::Warn + } else { + HealthStatus::Pass + } + } + + /// Count of checks by status. + #[must_use] + pub fn count_by_status(&self, status: HealthStatus) -> usize { + self.checks.iter().filter(|c| c.status == status).count() + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + fn sample_checks() -> Vec { + vec![ + HealthCheck { + name: "Config".into(), + status: HealthStatus::Pass, + detail: "/home/user/.config/lore/config.json".into(), + }, + HealthCheck { + name: "Database".into(), + status: HealthStatus::Pass, + detail: "schema v12".into(), + }, + HealthCheck { + name: "Projects".into(), + status: HealthStatus::Warn, + detail: "0 projects configured".into(), + }, + HealthCheck { + name: "FTS Index".into(), + status: HealthStatus::Fail, + detail: "No documents indexed".into(), + }, + ] + } + + #[test] + fn test_default_state() { + let state = DoctorState::default(); + assert!(state.checks.is_empty()); + assert!(!state.loaded); + } + + #[test] + fn test_apply_checks() { + let mut state = DoctorState::default(); + state.apply_checks(sample_checks()); + assert!(state.loaded); + assert_eq!(state.checks.len(), 4); + } + + #[test] + fn test_overall_status_fail_wins() { + let mut state = DoctorState::default(); + state.apply_checks(sample_checks()); + assert_eq!(state.overall_status(), HealthStatus::Fail); + } + + #[test] + fn test_overall_status_all_pass() { + let mut state = DoctorState::default(); + state.apply_checks(vec![ + HealthCheck { + name: "Config".into(), + status: HealthStatus::Pass, + detail: "ok".into(), + }, + HealthCheck { + name: "Database".into(), + status: HealthStatus::Pass, + detail: "ok".into(), + }, + ]); + assert_eq!(state.overall_status(), HealthStatus::Pass); + } + + #[test] + fn test_overall_status_warn_without_fail() { + let mut state = DoctorState::default(); + state.apply_checks(vec![ + HealthCheck { + name: "Config".into(), + status: HealthStatus::Pass, + detail: "ok".into(), + }, + HealthCheck { + name: "Ollama".into(), + status: HealthStatus::Warn, + detail: "not running".into(), + }, + ]); + assert_eq!(state.overall_status(), HealthStatus::Warn); + } + + #[test] + fn test_overall_status_empty_is_pass() { + let state = DoctorState::default(); + assert_eq!(state.overall_status(), HealthStatus::Pass); + } + + #[test] + fn test_count_by_status() { + let mut state = DoctorState::default(); + state.apply_checks(sample_checks()); + assert_eq!(state.count_by_status(HealthStatus::Pass), 2); + assert_eq!(state.count_by_status(HealthStatus::Warn), 1); + assert_eq!(state.count_by_status(HealthStatus::Fail), 1); + } + + #[test] + fn test_health_status_labels() { + assert_eq!(HealthStatus::Pass.label(), "PASS"); + assert_eq!(HealthStatus::Warn.label(), "WARN"); + assert_eq!(HealthStatus::Fail.label(), "FAIL"); + } +} diff --git a/crates/lore-tui/src/state/file_history.rs b/crates/lore-tui/src/state/file_history.rs index 4b46fa6..6536444 100644 --- a/crates/lore-tui/src/state/file_history.rs +++ b/crates/lore-tui/src/state/file_history.rs @@ -4,6 +4,8 @@ //! Users enter a file path, toggle options (follow renames, merged only, //! show discussions), and browse a chronological MR list. +use crate::text_width::{next_char_boundary, prev_char_boundary}; + // --------------------------------------------------------------------------- // FileHistoryState // --------------------------------------------------------------------------- @@ -225,24 +227,6 @@ impl FileHistoryState { } } -/// Find the byte offset of the previous char boundary. -fn prev_char_boundary(s: &str, pos: usize) -> usize { - let mut i = pos.saturating_sub(1); - while i > 0 && !s.is_char_boundary(i) { - i -= 1; - } - i -} - -/// Find the byte offset of the next char boundary. -fn next_char_boundary(s: &str, pos: usize) -> usize { - let mut i = pos + 1; - while i < s.len() && !s.is_char_boundary(i) { - i += 1; - } - i -} - // --------------------------------------------------------------------------- // Tests // --------------------------------------------------------------------------- diff --git a/crates/lore-tui/src/state/mod.rs b/crates/lore-tui/src/state/mod.rs index 8d5383f..c88a753 100644 --- a/crates/lore-tui/src/state/mod.rs +++ b/crates/lore-tui/src/state/mod.rs @@ -16,15 +16,19 @@ pub mod bootstrap; pub mod command_palette; pub mod dashboard; +pub mod doctor; pub mod file_history; pub mod issue_detail; pub mod issue_list; pub mod mr_detail; pub mod mr_list; pub mod search; +pub mod stats; pub mod sync; +pub mod sync_delta_ledger; pub mod timeline; pub mod trace; +pub mod scope_picker; pub mod who; use std::collections::{HashMap, HashSet}; @@ -35,15 +39,18 @@ use crate::message::Screen; pub use bootstrap::BootstrapState; pub use command_palette::CommandPaletteState; pub use dashboard::DashboardState; +pub use doctor::DoctorState; pub use file_history::FileHistoryState; pub use issue_detail::IssueDetailState; pub use issue_list::IssueListState; pub use mr_detail::MrDetailState; pub use mr_list::MrListState; pub use search::SearchState; +pub use stats::StatsState; pub use sync::SyncState; pub use timeline::TimelineState; pub use trace::TraceState; +pub use scope_picker::ScopePickerState; pub use who::WhoState; // --------------------------------------------------------------------------- @@ -171,17 +178,20 @@ pub struct AppState { // Per-screen states. pub bootstrap: BootstrapState, pub dashboard: DashboardState, + pub doctor: DoctorState, pub issue_list: IssueListState, pub issue_detail: IssueDetailState, pub mr_list: MrListState, pub mr_detail: MrDetailState, pub search: SearchState, + pub stats: StatsState, pub timeline: TimelineState, pub who: WhoState, pub trace: TraceState, pub file_history: FileHistoryState, pub sync: SyncState, pub command_palette: CommandPaletteState, + pub scope_picker: ScopePickerState, // Cross-cutting state. pub global_scope: ScopeContext, diff --git a/crates/lore-tui/src/state/scope_picker.rs b/crates/lore-tui/src/state/scope_picker.rs new file mode 100644 index 0000000..33af80d --- /dev/null +++ b/crates/lore-tui/src/state/scope_picker.rs @@ -0,0 +1,234 @@ +//! Scope picker overlay state. +//! +//! The scope picker lets users filter all screens to a specific project. +//! It appears as a modal overlay when the user presses `P`. + +use crate::scope::ProjectInfo; +use crate::state::ScopeContext; + +/// State for the scope picker overlay. +#[derive(Debug, Default)] +pub struct ScopePickerState { + /// Available projects (populated on open). + pub projects: Vec, + /// Currently highlighted index (0 = "All Projects", 1..N = specific projects). + pub selected_index: usize, + /// Whether the picker overlay is visible. + pub visible: bool, + /// Scroll offset for long project lists. + pub scroll_offset: usize, +} + +/// Max visible rows in the picker before scrolling kicks in. +const MAX_VISIBLE_ROWS: usize = 15; + +impl ScopePickerState { + /// Open the picker with the given project list. + /// + /// Pre-selects the row matching the current scope, or "All Projects" (index 0) + /// if no project filter is active. + pub fn open(&mut self, projects: Vec, current_scope: &ScopeContext) { + self.projects = projects; + self.visible = true; + self.scroll_offset = 0; + + // Pre-select the currently active scope. + self.selected_index = match current_scope.project_id { + None => 0, // "All Projects" row + Some(id) => self + .projects + .iter() + .position(|p| p.id == id) + .map_or(0, |i| i + 1), // +1 because index 0 is "All Projects" + }; + + self.ensure_visible(); + } + + /// Close the picker without changing scope. + pub fn close(&mut self) { + self.visible = false; + } + + /// Move selection up. + pub fn select_prev(&mut self) { + if self.selected_index > 0 { + self.selected_index -= 1; + self.ensure_visible(); + } + } + + /// Move selection down. + pub fn select_next(&mut self) { + let max_index = self.projects.len(); // 0="All" + N projects + if self.selected_index < max_index { + self.selected_index += 1; + self.ensure_visible(); + } + } + + /// Confirm the current selection and return the new scope. + #[must_use] + pub fn confirm(&self) -> ScopeContext { + if self.selected_index == 0 { + // "All Projects" + ScopeContext { + project_id: None, + project_name: None, + } + } else { + let project = &self.projects[self.selected_index - 1]; + ScopeContext { + project_id: Some(project.id), + project_name: Some(project.path.clone()), + } + } + } + + /// Total number of rows (1 for "All" + project count). + #[must_use] + pub fn row_count(&self) -> usize { + 1 + self.projects.len() + } + + /// Ensure the selected index is within the visible scroll window. + fn ensure_visible(&mut self) { + if self.selected_index < self.scroll_offset { + self.scroll_offset = self.selected_index; + } else if self.selected_index >= self.scroll_offset + MAX_VISIBLE_ROWS { + self.scroll_offset = self.selected_index.saturating_sub(MAX_VISIBLE_ROWS - 1); + } + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + fn sample_projects() -> Vec { + vec![ + ProjectInfo { + id: 1, + path: "alpha/repo".into(), + }, + ProjectInfo { + id: 2, + path: "beta/repo".into(), + }, + ProjectInfo { + id: 3, + path: "gamma/repo".into(), + }, + ] + } + + #[test] + fn test_open_no_scope_selects_all() { + let mut picker = ScopePickerState::default(); + let scope = ScopeContext::default(); + picker.open(sample_projects(), &scope); + + assert!(picker.visible); + assert_eq!(picker.selected_index, 0); // "All Projects" + assert_eq!(picker.projects.len(), 3); + } + + #[test] + fn test_open_with_scope_preselects_project() { + let mut picker = ScopePickerState::default(); + let scope = ScopeContext { + project_id: Some(2), + project_name: Some("beta/repo".into()), + }; + picker.open(sample_projects(), &scope); + + assert_eq!(picker.selected_index, 2); // index 1 in projects = index 2 in picker + } + + #[test] + fn test_select_prev_and_next() { + let mut picker = ScopePickerState::default(); + picker.open(sample_projects(), &ScopeContext::default()); + + picker.select_next(); + assert_eq!(picker.selected_index, 1); + picker.select_next(); + assert_eq!(picker.selected_index, 2); + picker.select_prev(); + assert_eq!(picker.selected_index, 1); + } + + #[test] + fn test_select_prev_at_zero_stays() { + let mut picker = ScopePickerState::default(); + picker.open(sample_projects(), &ScopeContext::default()); + + picker.select_prev(); + assert_eq!(picker.selected_index, 0); + } + + #[test] + fn test_select_next_at_max_stays() { + let mut picker = ScopePickerState::default(); + picker.open(sample_projects(), &ScopeContext::default()); + + // 4 total rows (All + 3 projects), max index = 3 + for _ in 0..10 { + picker.select_next(); + } + assert_eq!(picker.selected_index, 3); + } + + #[test] + fn test_confirm_all_projects() { + let mut picker = ScopePickerState::default(); + picker.open(sample_projects(), &ScopeContext::default()); + + let scope = picker.confirm(); + assert!(scope.project_id.is_none()); + assert!(scope.project_name.is_none()); + } + + #[test] + fn test_confirm_specific_project() { + let mut picker = ScopePickerState::default(); + picker.open(sample_projects(), &ScopeContext::default()); + + picker.select_next(); // index 1 = first project (alpha/repo, id=1) + let scope = picker.confirm(); + assert_eq!(scope.project_id, Some(1)); + assert_eq!(scope.project_name.as_deref(), Some("alpha/repo")); + } + + #[test] + fn test_close_hides_picker() { + let mut picker = ScopePickerState::default(); + picker.open(sample_projects(), &ScopeContext::default()); + assert!(picker.visible); + + picker.close(); + assert!(!picker.visible); + } + + #[test] + fn test_row_count() { + let mut picker = ScopePickerState::default(); + picker.open(sample_projects(), &ScopeContext::default()); + assert_eq!(picker.row_count(), 4); // "All" + 3 projects + } + + #[test] + fn test_open_with_unknown_project_selects_all() { + let mut picker = ScopePickerState::default(); + let scope = ScopeContext { + project_id: Some(999), // Not in list + project_name: Some("unknown".into()), + }; + picker.open(sample_projects(), &scope); + assert_eq!(picker.selected_index, 0); // Falls back to "All" + } +} diff --git a/crates/lore-tui/src/state/stats.rs b/crates/lore-tui/src/state/stats.rs new file mode 100644 index 0000000..d5d0a9a --- /dev/null +++ b/crates/lore-tui/src/state/stats.rs @@ -0,0 +1,153 @@ +#![allow(dead_code)] + +//! Stats screen state — database and index statistics. +//! +//! Shows entity counts, FTS coverage, embedding coverage, and queue +//! health. Data is produced by synchronous DB queries. + +// --------------------------------------------------------------------------- +// StatsData +// --------------------------------------------------------------------------- + +/// Database statistics for TUI display. +#[derive(Debug, Clone, Default)] +pub struct StatsData { + /// Total documents in the database. + pub total_documents: i64, + /// Issues stored. + pub issues: i64, + /// Merge requests stored. + pub merge_requests: i64, + /// Discussions stored. + pub discussions: i64, + /// Notes stored. + pub notes: i64, + /// Documents indexed in FTS. + pub fts_indexed: i64, + /// Documents with embeddings. + pub embedded_documents: i64, + /// Total embedding chunks. + pub total_chunks: i64, + /// Embedding coverage percentage (0.0–100.0). + pub coverage_pct: f64, + /// Pending queue items (dirty sources). + pub queue_pending: i64, + /// Failed queue items. + pub queue_failed: i64, +} + +impl StatsData { + /// FTS coverage percentage relative to total documents. + #[must_use] + pub fn fts_coverage_pct(&self) -> f64 { + if self.total_documents == 0 { + 0.0 + } else { + (self.fts_indexed as f64 / self.total_documents as f64) * 100.0 + } + } + + /// Whether there are pending queue items that need processing. + #[must_use] + pub fn has_queue_work(&self) -> bool { + self.queue_pending > 0 || self.queue_failed > 0 + } +} + +// --------------------------------------------------------------------------- +// StatsState +// --------------------------------------------------------------------------- + +/// State for the Stats screen. +#[derive(Debug, Default)] +pub struct StatsState { + /// Statistics data (None until loaded). + pub data: Option, + /// Whether data has been loaded at least once. + pub loaded: bool, +} + +impl StatsState { + /// Apply loaded stats data. + pub fn apply_data(&mut self, data: StatsData) { + self.data = Some(data); + self.loaded = true; + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + fn sample_stats() -> StatsData { + StatsData { + total_documents: 500, + issues: 200, + merge_requests: 150, + discussions: 100, + notes: 50, + fts_indexed: 450, + embedded_documents: 300, + total_chunks: 1200, + coverage_pct: 60.0, + queue_pending: 5, + queue_failed: 1, + } + } + + #[test] + fn test_default_state() { + let state = StatsState::default(); + assert!(state.data.is_none()); + assert!(!state.loaded); + } + + #[test] + fn test_apply_data() { + let mut state = StatsState::default(); + state.apply_data(sample_stats()); + assert!(state.loaded); + assert!(state.data.is_some()); + } + + #[test] + fn test_fts_coverage_pct() { + let stats = sample_stats(); + let pct = stats.fts_coverage_pct(); + assert!((pct - 90.0).abs() < 0.01); // 450/500 = 90% + } + + #[test] + fn test_fts_coverage_pct_zero_documents() { + let stats = StatsData::default(); + assert_eq!(stats.fts_coverage_pct(), 0.0); + } + + #[test] + fn test_has_queue_work() { + let stats = sample_stats(); + assert!(stats.has_queue_work()); + } + + #[test] + fn test_no_queue_work() { + let stats = StatsData { + queue_pending: 0, + queue_failed: 0, + ..sample_stats() + }; + assert!(!stats.has_queue_work()); + } + + #[test] + fn test_stats_data_default() { + let stats = StatsData::default(); + assert_eq!(stats.total_documents, 0); + assert_eq!(stats.issues, 0); + assert_eq!(stats.coverage_pct, 0.0); + } +} diff --git a/crates/lore-tui/src/state/sync.rs b/crates/lore-tui/src/state/sync.rs index e4c53b3..1f5b531 100644 --- a/crates/lore-tui/src/state/sync.rs +++ b/crates/lore-tui/src/state/sync.rs @@ -1,15 +1,597 @@ #![allow(dead_code)] -//! Sync screen state. +//! Sync screen state: progress tracking, coalescing, and summary. +//! +//! The sync screen shows real-time progress during data synchronization +//! and transitions to a summary view when complete. A progress coalescer +//! prevents render thrashing from rapid progress updates. + +use std::time::Instant; + +// --------------------------------------------------------------------------- +// Sync lanes (entity types being synced) +// --------------------------------------------------------------------------- + +/// Sync entity types that progress is tracked for. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Hash)] +pub enum SyncLane { + Issues, + MergeRequests, + Discussions, + Notes, + Events, + Statuses, +} + +impl SyncLane { + /// Human-readable label for this lane. + #[must_use] + pub fn label(self) -> &'static str { + match self { + Self::Issues => "Issues", + Self::MergeRequests => "MRs", + Self::Discussions => "Discussions", + Self::Notes => "Notes", + Self::Events => "Events", + Self::Statuses => "Statuses", + } + } + + /// All lanes in display order. + pub const ALL: &'static [SyncLane] = &[ + Self::Issues, + Self::MergeRequests, + Self::Discussions, + Self::Notes, + Self::Events, + Self::Statuses, + ]; +} + +// --------------------------------------------------------------------------- +// Per-lane progress +// --------------------------------------------------------------------------- + +/// Progress for a single sync lane. +#[derive(Debug, Clone, Default)] +pub struct LaneProgress { + /// Current items processed. + pub current: u64, + /// Total items expected (0 = unknown). + pub total: u64, + /// Whether this lane has completed. + pub done: bool, +} + +impl LaneProgress { + /// Fraction complete (0.0..=1.0). Returns 0.0 if total is unknown. + #[must_use] + pub fn fraction(&self) -> f64 { + if self.total == 0 { + return 0.0; + } + (self.current as f64 / self.total as f64).clamp(0.0, 1.0) + } +} + +// --------------------------------------------------------------------------- +// Sync summary +// --------------------------------------------------------------------------- + +/// Per-entity-type change counts after sync completes. +#[derive(Debug, Clone, Default)] +pub struct EntityChangeCounts { + pub new: u64, + pub updated: u64, +} + +/// Summary of a completed sync run. +#[derive(Debug, Clone, Default)] +pub struct SyncSummary { + pub issues: EntityChangeCounts, + pub merge_requests: EntityChangeCounts, + pub discussions: EntityChangeCounts, + pub notes: EntityChangeCounts, + pub elapsed_ms: u64, + /// Per-project errors (project path -> error message). + pub project_errors: Vec<(String, String)>, +} + +impl SyncSummary { + /// Total number of changes across all entity types. + #[must_use] + pub fn total_changes(&self) -> u64 { + self.issues.new + + self.issues.updated + + self.merge_requests.new + + self.merge_requests.updated + + self.discussions.new + + self.discussions.updated + + self.notes.new + + self.notes.updated + } + + /// Whether any errors occurred during sync. + #[must_use] + pub fn has_errors(&self) -> bool { + !self.project_errors.is_empty() + } +} + +// --------------------------------------------------------------------------- +// Sync screen mode +// --------------------------------------------------------------------------- + +/// Display mode for the sync screen. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum SyncScreenMode { + /// Full-screen sync progress with per-lane bars. + #[default] + FullScreen, + /// Compact single-line progress for embedding in Bootstrap screen. + Inline, +} + +// --------------------------------------------------------------------------- +// Sync phase +// --------------------------------------------------------------------------- + +/// Current phase of the sync operation. +#[derive(Debug, Clone, PartialEq, Eq, Default)] +pub enum SyncPhase { + /// Sync hasn't started yet. + #[default] + Idle, + /// Sync is running. + Running, + /// Sync completed successfully. + Complete, + /// Sync was cancelled by user. + Cancelled, + /// Sync failed with an error. + Failed(String), +} + +// --------------------------------------------------------------------------- +// Progress coalescer +// --------------------------------------------------------------------------- + +/// Batches rapid progress updates to prevent render thrashing. +/// +/// At most one update is emitted per `floor_ms`. Updates arriving faster +/// are coalesced — only the latest value survives. +#[derive(Debug)] +pub struct ProgressCoalescer { + /// Minimum interval between emitted updates. + floor_ms: u64, + /// Timestamp of the last emitted update. + last_emit: Option, + /// Number of updates coalesced (dropped) since last emit. + coalesced_count: u64, +} + +impl ProgressCoalescer { + /// Create a new coalescer with the given floor interval in milliseconds. + #[must_use] + pub fn new(floor_ms: u64) -> Self { + Self { + floor_ms, + last_emit: None, + coalesced_count: 0, + } + } + + /// Default coalescer with 100ms floor (10 updates/second max). + #[must_use] + pub fn default_floor() -> Self { + Self::new(100) + } + + /// Should this update be emitted? + /// + /// Returns `true` if enough time has elapsed since the last emit. + /// The caller should only render/process the update when this returns true. + pub fn should_emit(&mut self) -> bool { + let now = Instant::now(); + match self.last_emit { + None => { + self.last_emit = Some(now); + self.coalesced_count = 0; + true + } + Some(last) => { + let elapsed_ms = now.duration_since(last).as_millis() as u64; + if elapsed_ms >= self.floor_ms { + self.last_emit = Some(now); + self.coalesced_count = 0; + true + } else { + self.coalesced_count += 1; + false + } + } + } + } + + /// Number of updates that have been coalesced since the last emit. + #[must_use] + pub fn coalesced_count(&self) -> u64 { + self.coalesced_count + } + + /// Reset the coalescer (e.g., when sync restarts). + pub fn reset(&mut self) { + self.last_emit = None; + self.coalesced_count = 0; + } +} + +// --------------------------------------------------------------------------- +// SyncState +// --------------------------------------------------------------------------- /// State for the sync progress/summary screen. -#[derive(Debug, Default)] +#[derive(Debug)] pub struct SyncState { + /// Current sync phase. + pub phase: SyncPhase, + /// Display mode (full screen vs inline). + pub mode: SyncScreenMode, + /// Per-lane progress (updated during Running phase). + pub lanes: [LaneProgress; 6], + /// Current stage label (e.g., "Fetching issues..."). pub stage: String, - pub current: u64, - pub total: u64, + /// Log lines from the sync process. pub log_lines: Vec, - pub completed: bool, - pub elapsed_ms: Option, - pub error: Option, + /// Stream throughput stats (items per second). + pub items_per_sec: f64, + /// Bytes synced. + pub bytes_synced: u64, + /// Total items synced. + pub items_synced: u64, + /// When the current sync run started (for throughput calculation). + pub started_at: Option, + /// Progress coalescer for render throttling. + pub coalescer: ProgressCoalescer, + /// Summary (populated after sync completes). + pub summary: Option, + /// Scroll offset for log lines view. + pub log_scroll_offset: usize, +} + +impl Default for SyncState { + fn default() -> Self { + Self { + phase: SyncPhase::Idle, + mode: SyncScreenMode::FullScreen, + lanes: Default::default(), + stage: String::new(), + log_lines: Vec::new(), + items_per_sec: 0.0, + bytes_synced: 0, + items_synced: 0, + started_at: None, + coalescer: ProgressCoalescer::default_floor(), + summary: None, + log_scroll_offset: 0, + } + } +} + +impl SyncState { + /// Reset state for a new sync run. + pub fn start(&mut self) { + self.phase = SyncPhase::Running; + self.lanes = Default::default(); + self.stage.clear(); + self.log_lines.clear(); + self.items_per_sec = 0.0; + self.bytes_synced = 0; + self.items_synced = 0; + self.started_at = Some(Instant::now()); + self.coalescer.reset(); + self.summary = None; + self.log_scroll_offset = 0; + } + + /// Apply a progress update for a specific lane. + pub fn update_progress(&mut self, stage: &str, current: u64, total: u64) { + self.stage = stage.to_string(); + + // Map stage name to lane index. + if let Some(lane) = self.lane_for_stage(stage) { + lane.current = current; + lane.total = total; + } + } + + /// Apply a batch progress increment. + pub fn update_batch(&mut self, stage: &str, batch_size: u64) { + self.stage = stage.to_string(); + + if let Some(lane) = self.lane_for_stage(stage) { + lane.current += batch_size; + } + } + + /// Mark sync as completed with summary. + pub fn complete(&mut self, elapsed_ms: u64) { + self.phase = SyncPhase::Complete; + // Mark all lanes as done. + for lane in &mut self.lanes { + lane.done = true; + } + // Build summary from lane data if not already set. + if self.summary.is_none() { + self.summary = Some(SyncSummary { + elapsed_ms, + ..Default::default() + }); + } else if let Some(ref mut summary) = self.summary { + summary.elapsed_ms = elapsed_ms; + } + } + + /// Mark sync as cancelled. + pub fn cancel(&mut self) { + self.phase = SyncPhase::Cancelled; + } + + /// Mark sync as failed. + pub fn fail(&mut self, error: String) { + self.phase = SyncPhase::Failed(error); + } + + /// Add a log line. + pub fn add_log_line(&mut self, line: String) { + self.log_lines.push(line); + // Auto-scroll to bottom. + if self.log_lines.len() > 1 { + self.log_scroll_offset = self.log_lines.len().saturating_sub(20); + } + } + + /// Update stream stats. + pub fn update_stream_stats(&mut self, bytes: u64, items: u64) { + self.bytes_synced = bytes; + self.items_synced = items; + // Compute actual throughput from elapsed time since sync start. + if items > 0 { + if let Some(started) = self.started_at { + let elapsed_secs = started.elapsed().as_secs_f64(); + if elapsed_secs > 0.0 { + self.items_per_sec = items as f64 / elapsed_secs; + } + } + } + } + + /// Whether sync is currently running. + #[must_use] + pub fn is_running(&self) -> bool { + self.phase == SyncPhase::Running + } + + /// Overall progress fraction (average of all lanes). + #[must_use] + pub fn overall_progress(&self) -> f64 { + let active_lanes: Vec<&LaneProgress> = + self.lanes.iter().filter(|l| l.total > 0).collect(); + if active_lanes.is_empty() { + return 0.0; + } + let sum: f64 = active_lanes.iter().map(|l| l.fraction()).sum(); + sum / active_lanes.len() as f64 + } + + /// Map a stage name to the corresponding lane. + fn lane_for_stage(&mut self, stage: &str) -> Option<&mut LaneProgress> { + let lower = stage.to_lowercase(); + let idx = if lower.contains("issue") { + Some(0) + } else if lower.contains("merge") || lower.contains("mr") { + Some(1) + } else if lower.contains("discussion") { + Some(2) + } else if lower.contains("note") { + Some(3) + } else if lower.contains("event") { + Some(4) + } else if lower.contains("status") { + Some(5) + } else { + None + }; + idx.map(|i| &mut self.lanes[i]) + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use std::thread; + use std::time::Duration; + + #[test] + fn test_lane_progress_fraction() { + let lane = LaneProgress { + current: 50, + total: 100, + done: false, + }; + assert!((lane.fraction() - 0.5).abs() < f64::EPSILON); + } + + #[test] + fn test_lane_progress_fraction_zero_total() { + let lane = LaneProgress::default(); + assert!((lane.fraction()).abs() < f64::EPSILON); + } + + #[test] + fn test_sync_summary_total_changes() { + let summary = SyncSummary { + issues: EntityChangeCounts { new: 5, updated: 3 }, + merge_requests: EntityChangeCounts { new: 2, updated: 1 }, + ..Default::default() + }; + assert_eq!(summary.total_changes(), 11); + } + + #[test] + fn test_sync_summary_has_errors() { + let mut summary = SyncSummary::default(); + assert!(!summary.has_errors()); + + summary + .project_errors + .push(("grp/repo".into(), "timeout".into())); + assert!(summary.has_errors()); + } + + #[test] + fn test_sync_state_start_resets() { + let mut state = SyncState { + stage: "old".into(), + phase: SyncPhase::Complete, + ..SyncState::default() + }; + state.log_lines.push("old log".into()); + + state.start(); + + assert_eq!(state.phase, SyncPhase::Running); + assert!(state.stage.is_empty()); + assert!(state.log_lines.is_empty()); + } + + #[test] + fn test_sync_state_update_progress() { + let mut state = SyncState::default(); + state.start(); + + state.update_progress("Fetching issues", 10, 50); + assert_eq!(state.lanes[0].current, 10); + assert_eq!(state.lanes[0].total, 50); + assert_eq!(state.stage, "Fetching issues"); + } + + #[test] + fn test_sync_state_update_batch() { + let mut state = SyncState::default(); + state.start(); + + state.update_batch("MR processing", 5); + state.update_batch("MR processing", 3); + assert_eq!(state.lanes[1].current, 8); // MR lane + } + + #[test] + fn test_sync_state_complete() { + let mut state = SyncState::default(); + state.start(); + + state.complete(5000); + assert_eq!(state.phase, SyncPhase::Complete); + assert!(state.summary.is_some()); + assert_eq!(state.summary.as_ref().unwrap().elapsed_ms, 5000); + } + + #[test] + fn test_sync_state_overall_progress() { + let mut state = SyncState::default(); + state.start(); + + state.update_progress("issues", 50, 100); + state.update_progress("merge requests", 25, 100); + // Two active lanes: 0.5 and 0.25, average = 0.375 + assert!((state.overall_progress() - 0.375).abs() < 0.01); + } + + #[test] + fn test_sync_state_overall_progress_no_active_lanes() { + let state = SyncState::default(); + assert!((state.overall_progress()).abs() < f64::EPSILON); + } + + #[test] + fn test_progress_coalescer_first_always_emits() { + let mut coalescer = ProgressCoalescer::new(100); + assert!(coalescer.should_emit()); + } + + #[test] + fn test_progress_coalescer_rapid_updates_coalesced() { + let mut coalescer = ProgressCoalescer::new(100); + assert!(coalescer.should_emit()); // First always emits. + + // Rapid-fire updates within 100ms should be coalesced. + let mut emitted = 0; + for _ in 0..50 { + if coalescer.should_emit() { + emitted += 1; + } + } + // With ~0ms between calls, at most 0-1 additional emits expected. + assert!( + emitted <= 1, + "Expected at most 1 emit, got {emitted}" + ); + } + + #[test] + fn test_progress_coalescer_emits_after_floor() { + let mut coalescer = ProgressCoalescer::new(50); + assert!(coalescer.should_emit()); + + // Wait longer than floor. + thread::sleep(Duration::from_millis(60)); + assert!(coalescer.should_emit()); + } + + #[test] + fn test_progress_coalescer_reset() { + let mut coalescer = ProgressCoalescer::new(100); + coalescer.should_emit(); + coalescer.should_emit(); // Coalesced. + + coalescer.reset(); + assert!(coalescer.should_emit()); // Fresh start. + } + + #[test] + fn test_sync_lane_labels() { + assert_eq!(SyncLane::Issues.label(), "Issues"); + assert_eq!(SyncLane::MergeRequests.label(), "MRs"); + assert_eq!(SyncLane::Notes.label(), "Notes"); + } + + #[test] + fn test_sync_state_add_log_line() { + let mut state = SyncState::default(); + state.add_log_line("line 1".into()); + state.add_log_line("line 2".into()); + assert_eq!(state.log_lines.len(), 2); + assert_eq!(state.log_lines[0], "line 1"); + } + + #[test] + fn test_sync_state_cancel() { + let mut state = SyncState::default(); + state.start(); + state.cancel(); + assert_eq!(state.phase, SyncPhase::Cancelled); + } + + #[test] + fn test_sync_state_fail() { + let mut state = SyncState::default(); + state.start(); + state.fail("network timeout".into()); + assert!(matches!(state.phase, SyncPhase::Failed(_))); + } } diff --git a/crates/lore-tui/src/state/sync_delta_ledger.rs b/crates/lore-tui/src/state/sync_delta_ledger.rs new file mode 100644 index 0000000..6d62d0c --- /dev/null +++ b/crates/lore-tui/src/state/sync_delta_ledger.rs @@ -0,0 +1,222 @@ +#![allow(dead_code)] + +//! Sync delta ledger — records entity changes during a sync run. +//! +//! After sync completes, the dashboard and list screens can query the +//! ledger to highlight "new since last sync" items. The ledger is +//! ephemeral (per-run, not persisted to disk). + +use std::collections::HashSet; + +/// Kind of change that occurred to an entity during sync. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum ChangeKind { + New, + Updated, +} + +/// Entity type for the ledger. +#[derive(Debug, Clone, Copy, PartialEq, Eq)] +pub enum LedgerEntityType { + Issue, + MergeRequest, + Discussion, + Note, +} + +/// Per-run record of changed entity IDs during sync. +/// +/// Used to highlight newly synced items in list/dashboard views. +#[derive(Debug, Default)] +pub struct SyncDeltaLedger { + pub new_issue_iids: HashSet, + pub updated_issue_iids: HashSet, + pub new_mr_iids: HashSet, + pub updated_mr_iids: HashSet, + pub new_discussion_count: u64, + pub updated_discussion_count: u64, + pub new_note_count: u64, +} + +impl SyncDeltaLedger { + /// Record a change to an entity. + pub fn record_change(&mut self, entity_type: LedgerEntityType, iid: i64, kind: ChangeKind) { + match (entity_type, kind) { + (LedgerEntityType::Issue, ChangeKind::New) => { + self.new_issue_iids.insert(iid); + } + (LedgerEntityType::Issue, ChangeKind::Updated) => { + self.updated_issue_iids.insert(iid); + } + (LedgerEntityType::MergeRequest, ChangeKind::New) => { + self.new_mr_iids.insert(iid); + } + (LedgerEntityType::MergeRequest, ChangeKind::Updated) => { + self.updated_mr_iids.insert(iid); + } + (LedgerEntityType::Discussion, ChangeKind::New) => { + self.new_discussion_count += 1; + } + (LedgerEntityType::Discussion, ChangeKind::Updated) => { + self.updated_discussion_count += 1; + } + (LedgerEntityType::Note, ChangeKind::New) => { + self.new_note_count += 1; + } + (LedgerEntityType::Note, ChangeKind::Updated) => { + // Notes don't have a meaningful "updated" count. + } + } + } + + /// Produce a summary of changes from this sync run. + #[must_use] + pub fn summary(&self) -> super::sync::SyncSummary { + use super::sync::{EntityChangeCounts, SyncSummary}; + SyncSummary { + issues: EntityChangeCounts { + new: self.new_issue_iids.len() as u64, + updated: self.updated_issue_iids.len() as u64, + }, + merge_requests: EntityChangeCounts { + new: self.new_mr_iids.len() as u64, + updated: self.updated_mr_iids.len() as u64, + }, + discussions: EntityChangeCounts { + new: self.new_discussion_count, + updated: self.updated_discussion_count, + }, + notes: EntityChangeCounts { + new: self.new_note_count, + updated: 0, + }, + ..Default::default() + } + } + + /// Whether any entity was an issue IID that was newly added in this sync. + #[must_use] + pub fn is_new_issue(&self, iid: i64) -> bool { + self.new_issue_iids.contains(&iid) + } + + /// Whether any entity was an MR IID that was newly added in this sync. + #[must_use] + pub fn is_new_mr(&self, iid: i64) -> bool { + self.new_mr_iids.contains(&iid) + } + + /// Total changes recorded. + #[must_use] + pub fn total_changes(&self) -> u64 { + self.new_issue_iids.len() as u64 + + self.updated_issue_iids.len() as u64 + + self.new_mr_iids.len() as u64 + + self.updated_mr_iids.len() as u64 + + self.new_discussion_count + + self.updated_discussion_count + + self.new_note_count + } + + /// Clear the ledger for a new sync run. + pub fn clear(&mut self) { + self.new_issue_iids.clear(); + self.updated_issue_iids.clear(); + self.new_mr_iids.clear(); + self.updated_mr_iids.clear(); + self.new_discussion_count = 0; + self.updated_discussion_count = 0; + self.new_note_count = 0; + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_record_new_issues() { + let mut ledger = SyncDeltaLedger::default(); + ledger.record_change(LedgerEntityType::Issue, 1, ChangeKind::New); + ledger.record_change(LedgerEntityType::Issue, 2, ChangeKind::New); + ledger.record_change(LedgerEntityType::Issue, 3, ChangeKind::Updated); + + assert_eq!(ledger.new_issue_iids.len(), 2); + assert_eq!(ledger.updated_issue_iids.len(), 1); + assert!(ledger.is_new_issue(1)); + assert!(ledger.is_new_issue(2)); + assert!(!ledger.is_new_issue(3)); + } + + #[test] + fn test_record_new_mrs() { + let mut ledger = SyncDeltaLedger::default(); + ledger.record_change(LedgerEntityType::MergeRequest, 10, ChangeKind::New); + ledger.record_change(LedgerEntityType::MergeRequest, 20, ChangeKind::Updated); + + assert!(ledger.is_new_mr(10)); + assert!(!ledger.is_new_mr(20)); + } + + #[test] + fn test_summary_counts() { + let mut ledger = SyncDeltaLedger::default(); + ledger.record_change(LedgerEntityType::Issue, 1, ChangeKind::New); + ledger.record_change(LedgerEntityType::Issue, 2, ChangeKind::New); + ledger.record_change(LedgerEntityType::Issue, 3, ChangeKind::Updated); + ledger.record_change(LedgerEntityType::MergeRequest, 10, ChangeKind::New); + ledger.record_change(LedgerEntityType::Discussion, 0, ChangeKind::New); + ledger.record_change(LedgerEntityType::Note, 0, ChangeKind::New); + + let summary = ledger.summary(); + assert_eq!(summary.issues.new, 2); + assert_eq!(summary.issues.updated, 1); + assert_eq!(summary.merge_requests.new, 1); + assert_eq!(summary.discussions.new, 1); + assert_eq!(summary.notes.new, 1); + } + + #[test] + fn test_total_changes() { + let mut ledger = SyncDeltaLedger::default(); + ledger.record_change(LedgerEntityType::Issue, 1, ChangeKind::New); + ledger.record_change(LedgerEntityType::MergeRequest, 10, ChangeKind::Updated); + ledger.record_change(LedgerEntityType::Note, 0, ChangeKind::New); + + assert_eq!(ledger.total_changes(), 3); + } + + #[test] + fn test_dedup_same_iid() { + let mut ledger = SyncDeltaLedger::default(); + // Recording same IID twice should deduplicate. + ledger.record_change(LedgerEntityType::Issue, 1, ChangeKind::New); + ledger.record_change(LedgerEntityType::Issue, 1, ChangeKind::New); + + assert_eq!(ledger.new_issue_iids.len(), 1); + } + + #[test] + fn test_clear() { + let mut ledger = SyncDeltaLedger::default(); + ledger.record_change(LedgerEntityType::Issue, 1, ChangeKind::New); + ledger.record_change(LedgerEntityType::Note, 0, ChangeKind::New); + + ledger.clear(); + + assert_eq!(ledger.total_changes(), 0); + assert!(ledger.new_issue_iids.is_empty()); + } + + #[test] + fn test_empty_ledger_summary() { + let ledger = SyncDeltaLedger::default(); + let summary = ledger.summary(); + assert_eq!(summary.total_changes(), 0); + assert!(!summary.has_errors()); + } +} diff --git a/crates/lore-tui/src/state/trace.rs b/crates/lore-tui/src/state/trace.rs index 5cf9798..2c28189 100644 --- a/crates/lore-tui/src/state/trace.rs +++ b/crates/lore-tui/src/state/trace.rs @@ -9,6 +9,8 @@ use std::collections::HashSet; use lore::core::trace::TraceResult; +use crate::text_width::{next_char_boundary, prev_char_boundary}; + // --------------------------------------------------------------------------- // TraceState // --------------------------------------------------------------------------- @@ -18,7 +20,7 @@ use lore::core::trace::TraceResult; pub struct TraceState { /// User-entered file path (with optional :line suffix). pub path_input: String, - /// Cursor position within `path_input`. + /// Cursor position within `path_input` (byte offset). pub path_cursor: usize, /// Whether the path input field has keyboard focus. pub path_focused: bool, @@ -188,48 +190,35 @@ impl TraceState { // --- Text editing helpers --- - /// Insert a character at the cursor position. + /// Insert a character at the cursor position (byte offset). pub fn insert_char(&mut self, ch: char) { - let byte_pos = self - .path_input - .char_indices() - .nth(self.path_cursor) - .map_or(self.path_input.len(), |(i, _)| i); - self.path_input.insert(byte_pos, ch); - self.path_cursor += 1; + self.path_input.insert(self.path_cursor, ch); + self.path_cursor += ch.len_utf8(); self.update_autocomplete(); } - /// Delete the character before the cursor. + /// Delete the character before the cursor (byte offset). pub fn delete_char_before_cursor(&mut self) { if self.path_cursor == 0 { return; } - self.path_cursor -= 1; - let byte_pos = self - .path_input - .char_indices() - .nth(self.path_cursor) - .map_or(self.path_input.len(), |(i, _)| i); - let end = self - .path_input - .char_indices() - .nth(self.path_cursor + 1) - .map_or(self.path_input.len(), |(i, _)| i); - self.path_input.drain(byte_pos..end); + let prev = prev_char_boundary(&self.path_input, self.path_cursor); + self.path_input.drain(prev..self.path_cursor); + self.path_cursor = prev; self.update_autocomplete(); } - /// Move cursor left. + /// Move cursor left (byte offset). pub fn cursor_left(&mut self) { - self.path_cursor = self.path_cursor.saturating_sub(1); + if self.path_cursor > 0 { + self.path_cursor = prev_char_boundary(&self.path_input, self.path_cursor); + } } - /// Move cursor right. + /// Move cursor right (byte offset). pub fn cursor_right(&mut self) { - let max = self.path_input.chars().count(); - if self.path_cursor < max { - self.path_cursor += 1; + if self.path_cursor < self.path_input.len() { + self.path_cursor = next_char_boundary(&self.path_input, self.path_cursor); } } @@ -266,7 +255,7 @@ impl TraceState { pub fn accept_autocomplete(&mut self) { if let Some(match_) = self.autocomplete_matches.get(self.autocomplete_index) { self.path_input = match_.clone(); - self.path_cursor = self.path_input.chars().count(); + self.path_cursor = self.path_input.len(); self.autocomplete_matches.clear(); } } diff --git a/crates/lore-tui/src/state/who.rs b/crates/lore-tui/src/state/who.rs index 3e3f020..57b52d1 100644 --- a/crates/lore-tui/src/state/who.rs +++ b/crates/lore-tui/src/state/who.rs @@ -5,6 +5,8 @@ use lore::core::who_types::WhoResult; +use crate::text_width::{next_char_boundary, prev_char_boundary}; + // --------------------------------------------------------------------------- // WhoMode // --------------------------------------------------------------------------- @@ -291,24 +293,6 @@ impl WhoState { } } -/// Find the byte offset of the previous char boundary. -fn prev_char_boundary(s: &str, pos: usize) -> usize { - let mut i = pos.saturating_sub(1); - while i > 0 && !s.is_char_boundary(i) { - i -= 1; - } - i -} - -/// Find the byte offset of the next char boundary. -fn next_char_boundary(s: &str, pos: usize) -> usize { - let mut i = pos + 1; - while i < s.len() && !s.is_char_boundary(i) { - i += 1; - } - i -} - // --------------------------------------------------------------------------- // Tests // --------------------------------------------------------------------------- diff --git a/crates/lore-tui/src/text_width.rs b/crates/lore-tui/src/text_width.rs new file mode 100644 index 0000000..8367dfc --- /dev/null +++ b/crates/lore-tui/src/text_width.rs @@ -0,0 +1,300 @@ +//! Unicode-aware text width measurement and truncation. +//! +//! Terminal cells aren't 1:1 with bytes or even chars. CJK characters +//! occupy 2 cells, emoji ZWJ sequences are single grapheme clusters, +//! and combining marks have zero width. This module provides correct +//! measurement and truncation that never splits a grapheme cluster. + +use unicode_segmentation::UnicodeSegmentation; +use unicode_width::UnicodeWidthStr; + +/// Measure the display width of a string in terminal cells. +/// +/// - ASCII characters: 1 cell each +/// - CJK characters: 2 cells each +/// - Emoji: varies (ZWJ sequences treated as grapheme clusters) +/// - Combining marks: 0 cells +#[must_use] +pub fn measure_display_width(s: &str) -> usize { + UnicodeWidthStr::width(s) +} + +/// Truncate a string to fit within `max_width` terminal cells. +/// +/// Appends an ellipsis character if truncation occurs. Never splits +/// a grapheme cluster — if appending the next cluster would exceed +/// the limit, it stops before that cluster. +/// +/// The ellipsis itself occupies 1 cell of the budget. +#[must_use] +pub fn truncate_display_width(s: &str, max_width: usize) -> String { + let full_width = measure_display_width(s); + if full_width <= max_width { + return s.to_string(); + } + + if max_width == 0 { + return String::new(); + } + + // Reserve 1 cell for the ellipsis. + let budget = max_width.saturating_sub(1); + let mut result = String::new(); + let mut used = 0; + + for grapheme in s.graphemes(true) { + let gw = UnicodeWidthStr::width(grapheme); + if used + gw > budget { + break; + } + result.push_str(grapheme); + used += gw; + } + + result.push('\u{2026}'); // ellipsis + result +} + +/// Pad a string with trailing spaces to reach `width` terminal cells. +/// +/// If the string is already wider than `width`, returns it unchanged. +#[must_use] +pub fn pad_display_width(s: &str, width: usize) -> String { + let current = measure_display_width(s); + if current >= width { + return s.to_string(); + } + let padding = width - current; + let mut result = s.to_string(); + for _ in 0..padding { + result.push(' '); + } + result +} + +// --------------------------------------------------------------------------- +// Cursor / char-boundary helpers +// --------------------------------------------------------------------------- + +/// Find the byte offset of the previous char boundary before `pos`. +/// +/// Walks backwards from `pos - 1` until a valid char boundary is found. +/// Returns 0 if `pos` is 0 or 1. +pub(crate) fn prev_char_boundary(s: &str, pos: usize) -> usize { + let mut i = pos.saturating_sub(1); + while i > 0 && !s.is_char_boundary(i) { + i -= 1; + } + i +} + +/// Find the byte offset of the next char boundary after `pos`. +/// +/// Walks forward from `pos + 1` until a valid char boundary is found. +/// Returns `s.len()` if already at or past the end. +pub(crate) fn next_char_boundary(s: &str, pos: usize) -> usize { + let mut i = pos + 1; + while i < s.len() && !s.is_char_boundary(i) { + i += 1; + } + i +} + +/// Convert a byte-offset cursor position to a display-column offset. +/// +/// Snaps to the nearest char boundary at or before `cursor`, then counts +/// the number of characters from the start of the string to that point. +/// This gives the correct terminal column offset for cursor rendering. +pub(crate) fn cursor_cell_offset(text: &str, cursor: usize) -> u16 { + let mut idx = cursor.min(text.len()); + while idx > 0 && !text.is_char_boundary(idx) { + idx -= 1; + } + text[..idx].chars().count().min(u16::MAX as usize) as u16 +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + // --- measure_display_width --- + + #[test] + fn test_measure_ascii() { + assert_eq!(measure_display_width("Hello"), 5); + } + + #[test] + fn test_measure_empty() { + assert_eq!(measure_display_width(""), 0); + } + + #[test] + fn test_measure_cjk_width() { + // TDD anchor from the bead spec + assert_eq!(measure_display_width("Hello"), 5); + assert_eq!(measure_display_width("\u{65E5}\u{672C}\u{8A9E}"), 6); // 日本語 = 3 chars * 2 cells + } + + #[test] + fn test_measure_mixed_ascii_cjk() { + // "Hi日本" = 2 + 2 + 2 = 6 + assert_eq!(measure_display_width("Hi\u{65E5}\u{672C}"), 6); + } + + #[test] + fn test_measure_combining_marks() { + // e + combining acute accent = 1 cell (combining mark is 0-width) + assert_eq!(measure_display_width("e\u{0301}"), 1); + } + + // --- truncate_display_width --- + + #[test] + fn test_truncate_no_truncation_needed() { + assert_eq!(truncate_display_width("Hello", 10), "Hello"); + } + + #[test] + fn test_truncate_exact_fit() { + assert_eq!(truncate_display_width("Hello", 5), "Hello"); + } + + #[test] + fn test_truncate_ascii() { + // "Hello World" is 11 cells. Truncate to 8: budget=7 for text + 1 for ellipsis + let result = truncate_display_width("Hello World", 8); + assert_eq!(measure_display_width(&result), 8); // 7 chars + ellipsis + assert!(result.ends_with('\u{2026}')); + } + + #[test] + fn test_truncate_cjk_no_split() { + // 日本語テスト = 6 chars * 2 cells = 12 cells + // Truncate to 5: budget=4 for text + 1 for ellipsis + // Can fit 2 CJK chars (4 cells), then ellipsis + let result = truncate_display_width("\u{65E5}\u{672C}\u{8A9E}\u{30C6}\u{30B9}\u{30C8}", 5); + assert!(result.ends_with('\u{2026}')); + assert!(measure_display_width(&result) <= 5); + } + + #[test] + fn test_truncate_zero_width() { + assert_eq!(truncate_display_width("Hello", 0), ""); + } + + #[test] + fn test_truncate_width_one() { + // Only room for the ellipsis itself + let result = truncate_display_width("Hello", 1); + assert_eq!(result, "\u{2026}"); + } + + #[test] + fn test_truncate_emoji() { + // Family emoji (ZWJ sequence) — should not be split + let family = "\u{1F468}\u{200D}\u{1F469}\u{200D}\u{1F467}"; // 👨‍👩‍👧 + let result = truncate_display_width(&format!("{family}Hello"), 3); + // The emoji grapheme cluster is > 1 cell; if it doesn't fit in budget, + // it should be skipped entirely, leaving just the ellipsis or less. + assert!(measure_display_width(&result) <= 3); + } + + // --- pad_display_width --- + + #[test] + fn test_pad_basic() { + let result = pad_display_width("Hi", 5); + assert_eq!(result, "Hi "); + assert_eq!(measure_display_width(&result), 5); + } + + #[test] + fn test_pad_already_wide_enough() { + assert_eq!(pad_display_width("Hello", 3), "Hello"); + } + + #[test] + fn test_pad_exact_width() { + assert_eq!(pad_display_width("Hello", 5), "Hello"); + } + + #[test] + fn test_pad_cjk() { + // 日本 = 4 cells, pad to 6 = 2 spaces + let result = pad_display_width("\u{65E5}\u{672C}", 6); + assert_eq!(measure_display_width(&result), 6); + assert!(result.ends_with(" ")); + } + + // --- prev_char_boundary / next_char_boundary --- + + #[test] + fn test_prev_char_boundary_ascii() { + assert_eq!(prev_char_boundary("hello", 3), 2); + assert_eq!(prev_char_boundary("hello", 1), 0); + } + + #[test] + fn test_prev_char_boundary_at_zero() { + assert_eq!(prev_char_boundary("hello", 0), 0); + } + + #[test] + fn test_prev_char_boundary_multibyte() { + // "aé" = 'a' (1 byte) + 'é' (2 bytes) = 3 bytes total + let s = "a\u{00E9}b"; + // Position 3 = start of 'b', prev boundary = 1 (start of 'é') + assert_eq!(prev_char_boundary(s, 3), 1); + // Position 2 = mid-'é' byte, should snap to 1 + assert_eq!(prev_char_boundary(s, 2), 1); + } + + #[test] + fn test_next_char_boundary_ascii() { + assert_eq!(next_char_boundary("hello", 0), 1); + assert_eq!(next_char_boundary("hello", 3), 4); + } + + #[test] + fn test_next_char_boundary_multibyte() { + // "aé" = 'a' (1 byte) + 'é' (2 bytes) + let s = "a\u{00E9}b"; + // Position 1 = start of 'é', next boundary = 3 (start of 'b') + assert_eq!(next_char_boundary(s, 1), 3); + } + + #[test] + fn test_next_char_boundary_at_end() { + assert_eq!(next_char_boundary("hi", 2), 3); + } + + // --- cursor_cell_offset --- + + #[test] + fn test_cursor_cell_offset_ascii() { + assert_eq!(cursor_cell_offset("hello", 0), 0); + assert_eq!(cursor_cell_offset("hello", 3), 3); + assert_eq!(cursor_cell_offset("hello", 5), 5); + } + + #[test] + fn test_cursor_cell_offset_multibyte() { + // "aéb" = byte offsets: a=0, é=1..3, b=3 + let s = "a\u{00E9}b"; + assert_eq!(cursor_cell_offset(s, 0), 0); // before 'a' + assert_eq!(cursor_cell_offset(s, 1), 1); // after 'a', before 'é' + assert_eq!(cursor_cell_offset(s, 2), 1); // mid-'é', snaps back to 1 + assert_eq!(cursor_cell_offset(s, 3), 2); // after 'é', before 'b' + assert_eq!(cursor_cell_offset(s, 4), 3); // after 'b' + } + + #[test] + fn test_cursor_cell_offset_beyond_end() { + assert_eq!(cursor_cell_offset("hi", 99), 2); + } +} diff --git a/crates/lore-tui/src/view/common/mod.rs b/crates/lore-tui/src/view/common/mod.rs index 4939159..65b21f3 100644 --- a/crates/lore-tui/src/view/common/mod.rs +++ b/crates/lore-tui/src/view/common/mod.rs @@ -26,3 +26,18 @@ pub use filter_bar::{FilterBarColors, FilterBarState, render_filter_bar}; pub use help_overlay::render_help_overlay; pub use loading::render_loading; pub use status_bar::render_status_bar; + +/// Truncate a string to at most `max_chars` display characters. +/// +/// Uses Unicode ellipsis `…` for truncation. If `max_chars` is too small +/// for an ellipsis (<=1), just truncates without one. +pub fn truncate_str(s: &str, max_chars: usize) -> String { + if s.chars().count() <= max_chars { + s.to_string() + } else if max_chars <= 1 { + s.chars().take(max_chars).collect() + } else { + let truncated: String = s.chars().take(max_chars.saturating_sub(1)).collect(); + format!("{truncated}\u{2026}") + } +} diff --git a/crates/lore-tui/src/view/doctor.rs b/crates/lore-tui/src/view/doctor.rs new file mode 100644 index 0000000..a40fdc3 --- /dev/null +++ b/crates/lore-tui/src/view/doctor.rs @@ -0,0 +1,289 @@ +//! Doctor screen view — health check results. +//! +//! Renders a vertical list of health checks with colored status +//! indicators (green PASS, yellow WARN, red FAIL). + +use ftui::core::geometry::Rect; +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::Draw; +use ftui::render::frame::Frame; + +use crate::state::doctor::{DoctorState, HealthStatus}; + +use super::{TEXT, TEXT_MUTED}; + +/// Pass green. +const PASS_FG: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39); +/// Warning yellow. +const WARN_FG: PackedRgba = PackedRgba::rgb(0xD0, 0xA2, 0x15); +/// Fail red. +const FAIL_FG: PackedRgba = PackedRgba::rgb(0xD1, 0x4D, 0x41); + +// --------------------------------------------------------------------------- +// Public entry point +// --------------------------------------------------------------------------- + +/// Render the doctor screen. +pub fn render_doctor(frame: &mut Frame<'_>, state: &DoctorState, area: Rect) { + if area.width < 10 || area.height < 3 { + return; + } + + let max_x = area.right(); + + if !state.loaded { + // Not yet loaded — show centered prompt. + let msg = "Loading health checks..."; + let x = area.x + area.width.saturating_sub(msg.len() as u16) / 2; + let y = area.y + area.height / 2; + frame.print_text_clipped( + x, + y, + msg, + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); + return; + } + + // Title. + let overall = state.overall_status(); + let title_fg = status_color(overall); + let title = format!("Doctor — {}", overall.label()); + frame.print_text_clipped( + area.x + 2, + area.y + 1, + &title, + Cell { + fg: title_fg, + ..Cell::default() + }, + max_x, + ); + + // Summary line. + let pass_count = state.count_by_status(HealthStatus::Pass); + let warn_count = state.count_by_status(HealthStatus::Warn); + let fail_count = state.count_by_status(HealthStatus::Fail); + let summary = format!( + "{} passed, {} warnings, {} failed", + pass_count, warn_count, fail_count + ); + frame.print_text_clipped( + area.x + 2, + area.y + 2, + &summary, + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); + + // Health check rows. + let rows_start_y = area.y + 4; + let name_width = 16u16; + + for (i, check) in state.checks.iter().enumerate() { + let y = rows_start_y + i as u16; + if y >= area.bottom().saturating_sub(2) { + break; + } + + // Status badge. + let badge = format!("[{}]", check.status.label()); + let badge_fg = status_color(check.status); + frame.print_text_clipped( + area.x + 2, + y, + &badge, + Cell { + fg: badge_fg, + ..Cell::default() + }, + max_x, + ); + + // Check name. + let name_x = area.x + 2 + 7; // "[PASS] " = 7 chars + let name = format!("{: max_detail { + format!( + "{}...", + &check.detail[..check.detail.floor_char_boundary(max_detail.saturating_sub(3))] + ) + } else { + check.detail.clone() + }; + frame.print_text_clipped( + detail_x, + y, + &detail, + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); + } + + // Hint at bottom. + let hint_y = area.bottom().saturating_sub(1); + frame.print_text_clipped( + area.x + 2, + hint_y, + "Esc: back | lore doctor (full check)", + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); +} + +/// Map health status to a display color. +fn status_color(status: HealthStatus) -> PackedRgba { + match status { + HealthStatus::Pass => PASS_FG, + HealthStatus::Warn => WARN_FG, + HealthStatus::Fail => FAIL_FG, + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::state::doctor::HealthCheck; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + fn sample_checks() -> Vec { + vec![ + HealthCheck { + name: "Config".into(), + status: HealthStatus::Pass, + detail: "/home/user/.config/lore/config.json".into(), + }, + HealthCheck { + name: "Database".into(), + status: HealthStatus::Pass, + detail: "schema v12".into(), + }, + HealthCheck { + name: "Projects".into(), + status: HealthStatus::Warn, + detail: "0 projects configured".into(), + }, + HealthCheck { + name: "FTS Index".into(), + status: HealthStatus::Fail, + detail: "No documents indexed".into(), + }, + ] + } + + #[test] + fn test_render_not_loaded() { + with_frame!(80, 24, |frame| { + let state = DoctorState::default(); + let area = frame.bounds(); + render_doctor(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_with_checks() { + with_frame!(80, 24, |frame| { + let mut state = DoctorState::default(); + state.apply_checks(sample_checks()); + let area = frame.bounds(); + render_doctor(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_all_pass() { + with_frame!(80, 24, |frame| { + let mut state = DoctorState::default(); + state.apply_checks(vec![HealthCheck { + name: "Config".into(), + status: HealthStatus::Pass, + detail: "ok".into(), + }]); + let area = frame.bounds(); + render_doctor(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_tiny_terminal() { + with_frame!(8, 2, |frame| { + let mut state = DoctorState::default(); + state.apply_checks(sample_checks()); + let area = frame.bounds(); + render_doctor(&mut frame, &state, area); + // Should not panic. + }); + } + + #[test] + fn test_render_narrow_terminal_truncates() { + with_frame!(40, 20, |frame| { + let mut state = DoctorState::default(); + state.apply_checks(vec![HealthCheck { + name: "Database".into(), + status: HealthStatus::Pass, + detail: "This is a very long detail string that should be truncated".into(), + }]); + let area = frame.bounds(); + render_doctor(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_many_checks_clips() { + with_frame!(80, 10, |frame| { + let mut state = DoctorState::default(); + let mut checks = Vec::new(); + for i in 0..20 { + checks.push(HealthCheck { + name: format!("Check {i}"), + status: HealthStatus::Pass, + detail: "ok".into(), + }); + } + state.apply_checks(checks); + let area = frame.bounds(); + render_doctor(&mut frame, &state, area); + // Should clip without panicking. + }); + } +} diff --git a/crates/lore-tui/src/view/file_history.rs b/crates/lore-tui/src/view/file_history.rs index d43991a..92de8f8 100644 --- a/crates/lore-tui/src/view/file_history.rs +++ b/crates/lore-tui/src/view/file_history.rs @@ -22,6 +22,7 @@ use ftui::render::drawing::Draw; use ftui::render::frame::Frame; use crate::state::file_history::{FileHistoryResult, FileHistoryState}; +use super::common::truncate_str; // --------------------------------------------------------------------------- // Colors (Flexoki palette) @@ -136,7 +137,8 @@ fn render_path_input(frame: &mut Frame<'_>, state: &FileHistoryState, x: u16, y: // Cursor indicator. if state.path_focused { - let cursor_x = after_label + state.path_cursor as u16; + let cursor_col = state.path_input[..state.path_cursor].chars().count() as u16; + let cursor_x = after_label + cursor_col; if cursor_x < max_x { let cursor_cell = Cell { fg: PackedRgba::rgb(0x10, 0x0F, 0x0F), // dark bg @@ -446,16 +448,6 @@ fn render_hint_bar(frame: &mut Frame<'_>, x: u16, y: u16, max_x: u16) { frame.print_text_clipped(x + 1, y, hints, style, max_x); } -/// Truncate a string to at most `max_chars` display characters. -fn truncate_str(s: &str, max_chars: usize) -> String { - if s.chars().count() <= max_chars { - s.to_string() - } else { - let truncated: String = s.chars().take(max_chars.saturating_sub(1)).collect(); - format!("{truncated}…") - } -} - // --------------------------------------------------------------------------- // Tests // --------------------------------------------------------------------------- diff --git a/crates/lore-tui/src/view/mod.rs b/crates/lore-tui/src/view/mod.rs index 83a852a..0a876ec 100644 --- a/crates/lore-tui/src/view/mod.rs +++ b/crates/lore-tui/src/view/mod.rs @@ -10,6 +10,7 @@ pub mod bootstrap; pub mod command_palette; pub mod common; pub mod dashboard; +pub mod doctor; pub mod file_history; pub mod issue_detail; pub mod issue_list; @@ -18,11 +19,13 @@ pub mod mr_list; pub mod search; pub mod timeline; pub mod trace; +pub mod scope_picker; +pub mod stats; +pub mod sync; pub mod who; use ftui::layout::{Constraint, Flex}; -use ftui::render::cell::{Cell, PackedRgba}; -use ftui::render::drawing::Draw; +use ftui::render::cell::PackedRgba; use ftui::render::frame::Frame; use crate::app::LoreApp; @@ -34,6 +37,7 @@ use common::{ render_breadcrumb, render_error_toast, render_help_overlay, render_loading, render_status_bar, }; use dashboard::render_dashboard; +use doctor::render_doctor; use file_history::render_file_history; use issue_detail::render_issue_detail; use issue_list::render_issue_list; @@ -42,6 +46,9 @@ use mr_list::render_mr_list; use search::render_search; use timeline::render_timeline; use trace::render_trace; +use scope_picker::render_scope_picker; +use stats::render_stats; +use sync::render_sync; use who::render_who; // --------------------------------------------------------------------------- @@ -56,41 +63,6 @@ const ERROR_BG: PackedRgba = PackedRgba::rgb(0xAF, 0x3A, 0x29); // red const ERROR_FG: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3); // tx const BORDER: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2 -fn render_sync_placeholder(frame: &mut Frame<'_>, area: ftui::core::geometry::Rect) { - if area.width < 10 || area.height < 5 { - return; - } - - let max_x = area.right(); - let center_y = area.y + area.height / 2; - - let title = "Sync"; - let title_x = area.x + area.width.saturating_sub(title.len() as u16) / 2; - frame.print_text_clipped( - title_x, - center_y.saturating_sub(1), - title, - Cell { - fg: ACCENT, - ..Cell::default() - }, - max_x, - ); - - let body = "Run `lore sync` in another terminal."; - let body_x = area.x + area.width.saturating_sub(body.len() as u16) / 2; - frame.print_text_clipped( - body_x, - center_y + 1, - body, - Cell { - fg: TEXT_MUTED, - ..Cell::default() - }, - max_x, - ); -} - // --------------------------------------------------------------------------- // render_screen // --------------------------------------------------------------------------- @@ -144,7 +116,7 @@ pub fn render_screen(frame: &mut Frame<'_>, app: &LoreApp) { if screen == &Screen::Bootstrap { render_bootstrap(frame, &app.state.bootstrap, content_area); } else if screen == &Screen::Sync { - render_sync_placeholder(frame, content_area); + render_sync(frame, &app.state.sync, content_area); } else if screen == &Screen::Dashboard { render_dashboard(frame, &app.state.dashboard, content_area); } else if screen == &Screen::IssueList { @@ -165,6 +137,10 @@ pub fn render_screen(frame: &mut Frame<'_>, app: &LoreApp) { render_file_history(frame, &app.state.file_history, content_area); } else if screen == &Screen::Trace { render_trace(frame, &app.state.trace, content_area); + } else if screen == &Screen::Doctor { + render_doctor(frame, &app.state.doctor, content_area); + } else if screen == &Screen::Stats { + render_stats(frame, &app.state.stats, content_area); } // --- Status bar --- @@ -189,6 +165,14 @@ pub fn render_screen(frame: &mut Frame<'_>, app: &LoreApp) { // Command palette overlay. render_command_palette(frame, &app.state.command_palette, bounds); + // Scope picker overlay. + render_scope_picker( + frame, + &app.state.scope_picker, + &app.state.global_scope, + bounds, + ); + // Help overlay. if app.state.show_help { render_help_overlay( @@ -279,7 +263,7 @@ mod tests { }); assert!( has_content, - "Expected sync placeholder content in center area" + "Expected sync idle content in center area" ); }); } diff --git a/crates/lore-tui/src/view/scope_picker.rs b/crates/lore-tui/src/view/scope_picker.rs new file mode 100644 index 0000000..bd1a7eb --- /dev/null +++ b/crates/lore-tui/src/view/scope_picker.rs @@ -0,0 +1,276 @@ +//! Scope picker overlay — modal project filter selector. +//! +//! Renders a centered modal listing all available projects. The user +//! selects "All Projects" or a specific project to filter all screens. + +use ftui::core::geometry::Rect; +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::{BorderChars, Draw}; +use ftui::render::frame::Frame; + +use crate::state::scope_picker::ScopePickerState; +use crate::state::ScopeContext; + +use super::{ACCENT, BG_SURFACE, BORDER, TEXT, TEXT_MUTED}; + +/// Selection highlight background. +const SELECTION_BG: PackedRgba = PackedRgba::rgb(0x3A, 0x3A, 0x34); + +// --------------------------------------------------------------------------- +// render_scope_picker +// --------------------------------------------------------------------------- + +/// Render the scope picker overlay centered on the screen. +/// +/// Only renders if `state.visible`. The modal is 50% width, up to 40x20. +pub fn render_scope_picker( + frame: &mut Frame<'_>, + state: &ScopePickerState, + current_scope: &ScopeContext, + area: Rect, +) { + if !state.visible { + return; + } + if area.height < 5 || area.width < 20 { + return; + } + + // Modal dimensions. + let modal_width = (area.width / 2).clamp(25, 40); + let row_count = state.row_count(); + // +3 for border top, title gap, border bottom. + let modal_height = ((row_count + 3) as u16).clamp(5, 20).min(area.height - 2); + + let modal_x = area.x + (area.width.saturating_sub(modal_width)) / 2; + let modal_y = area.y + (area.height.saturating_sub(modal_height)) / 2; + let modal_rect = Rect::new(modal_x, modal_y, modal_width, modal_height); + + // Clear background. + let bg_cell = Cell { + fg: TEXT, + bg: BG_SURFACE, + ..Cell::default() + }; + for y in modal_rect.y..modal_rect.bottom() { + for x in modal_rect.x..modal_rect.right() { + frame.buffer.set(x, y, bg_cell); + } + } + + // Border. + let border_cell = Cell { + fg: BORDER, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.draw_border(modal_rect, BorderChars::ROUNDED, border_cell); + + // Title. + let title = " Project Scope "; + let title_x = modal_x + (modal_width.saturating_sub(title.len() as u16)) / 2; + let title_cell = Cell { + fg: ACCENT, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped(title_x, modal_y, title, title_cell, modal_rect.right()); + + // Content area (inside border). + let content_x = modal_x + 1; + let content_max_x = modal_rect.right().saturating_sub(1); + let content_width = content_max_x.saturating_sub(content_x); + let first_row_y = modal_y + 1; + let max_rows = (modal_height.saturating_sub(2)) as usize; // Inside borders. + + // Render rows. + let visible_end = (state.scroll_offset + max_rows).min(row_count); + for vis_idx in 0..max_rows { + let row_idx = state.scroll_offset + vis_idx; + if row_idx >= row_count { + break; + } + + let y = first_row_y + vis_idx as u16; + let selected = row_idx == state.selected_index; + + let bg = if selected { SELECTION_BG } else { BG_SURFACE }; + + // Fill row background. + if selected { + let sel_cell = Cell { + fg: TEXT, + bg, + ..Cell::default() + }; + for x in content_x..content_max_x { + frame.buffer.set(x, y, sel_cell); + } + } + + // Row content. + let (label, is_active) = if row_idx == 0 { + let active = current_scope.project_id.is_none(); + ("All Projects".to_string(), active) + } else { + let project = &state.projects[row_idx - 1]; + let active = current_scope.project_id == Some(project.id); + (project.path.clone(), active) + }; + + // Active indicator. + let prefix = if is_active { "> " } else { " " }; + + let fg = if is_active { ACCENT } else { TEXT }; + let cell = Cell { + fg, + bg, + ..Cell::default() + }; + + // Truncate label to fit. + let max_label_len = content_width.saturating_sub(2) as usize; // 2 for prefix + let display = if label.len() > max_label_len { + format!("{prefix}{}...", &label[..label.floor_char_boundary(max_label_len.saturating_sub(3))]) + } else { + format!("{prefix}{label}") + }; + + frame.print_text_clipped(content_x, y, &display, cell, content_max_x); + } + + // Scroll indicators. + if state.scroll_offset > 0 { + let arrow_cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + frame.print_text_clipped( + content_max_x.saturating_sub(1), + first_row_y, + "^", + arrow_cell, + modal_rect.right(), + ); + } + if visible_end < row_count { + let arrow_cell = Cell { + fg: TEXT_MUTED, + bg: BG_SURFACE, + ..Cell::default() + }; + let bottom_y = first_row_y + (max_rows as u16).saturating_sub(1); + frame.print_text_clipped( + content_max_x.saturating_sub(1), + bottom_y, + "v", + arrow_cell, + modal_rect.right(), + ); + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::scope::ProjectInfo; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + fn sample_projects() -> Vec { + vec![ + ProjectInfo { + id: 1, + path: "alpha/repo".into(), + }, + ProjectInfo { + id: 2, + path: "beta/repo".into(), + }, + ] + } + + #[test] + fn test_render_hidden_noop() { + with_frame!(80, 24, |frame| { + let state = ScopePickerState::default(); + let scope = ScopeContext::default(); + let area = frame.bounds(); + render_scope_picker(&mut frame, &state, &scope, area); + // Should not panic. + }); + } + + #[test] + fn test_render_visible_no_panic() { + with_frame!(80, 24, |frame| { + let mut state = ScopePickerState::default(); + let scope = ScopeContext::default(); + state.open(sample_projects(), &scope); + let area = frame.bounds(); + render_scope_picker(&mut frame, &state, &scope, area); + }); + } + + #[test] + fn test_render_with_selection() { + with_frame!(80, 24, |frame| { + let mut state = ScopePickerState::default(); + let scope = ScopeContext::default(); + state.open(sample_projects(), &scope); + state.select_next(); // Move to first project + let area = frame.bounds(); + render_scope_picker(&mut frame, &state, &scope, area); + }); + } + + #[test] + fn test_render_tiny_terminal_noop() { + with_frame!(15, 4, |frame| { + let mut state = ScopePickerState::default(); + let scope = ScopeContext::default(); + state.open(sample_projects(), &scope); + let area = frame.bounds(); + render_scope_picker(&mut frame, &state, &scope, area); + // Should not panic on tiny terminals. + }); + } + + #[test] + fn test_render_active_scope_highlighted() { + with_frame!(80, 24, |frame| { + let mut state = ScopePickerState::default(); + let scope = ScopeContext { + project_id: Some(2), + project_name: Some("beta/repo".into()), + }; + state.open(sample_projects(), &scope); + let area = frame.bounds(); + render_scope_picker(&mut frame, &state, &scope, area); + }); + } + + #[test] + fn test_render_empty_project_list() { + with_frame!(80, 24, |frame| { + let mut state = ScopePickerState::default(); + let scope = ScopeContext::default(); + state.open(vec![], &scope); + let area = frame.bounds(); + render_scope_picker(&mut frame, &state, &scope, area); + // Only "All Projects" row, should not panic. + }); + } +} diff --git a/crates/lore-tui/src/view/stats.rs b/crates/lore-tui/src/view/stats.rs new file mode 100644 index 0000000..a2c1d37 --- /dev/null +++ b/crates/lore-tui/src/view/stats.rs @@ -0,0 +1,443 @@ +//! Stats screen view — database and index statistics. +//! +//! Renders entity counts, FTS/embedding coverage, and queue health +//! as a simple table layout. + +use ftui::core::geometry::Rect; +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::Draw; +use ftui::render::frame::Frame; + +use crate::state::stats::StatsState; + +use super::{ACCENT, TEXT, TEXT_MUTED}; + +/// Success green (for good coverage). +const GOOD_FG: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39); +/// Warning yellow (for partial coverage). +const WARN_FG: PackedRgba = PackedRgba::rgb(0xD0, 0xA2, 0x15); + +// --------------------------------------------------------------------------- +// Public entry point +// --------------------------------------------------------------------------- + +/// Render the stats screen. +pub fn render_stats(frame: &mut Frame<'_>, state: &StatsState, area: Rect) { + if area.width < 10 || area.height < 3 { + return; + } + + let max_x = area.right(); + + if !state.loaded { + let msg = "Loading statistics..."; + let x = area.x + area.width.saturating_sub(msg.len() as u16) / 2; + let y = area.y + area.height / 2; + frame.print_text_clipped( + x, + y, + msg, + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); + return; + } + + let data = match &state.data { + Some(d) => d, + None => return, + }; + + // Title. + frame.print_text_clipped( + area.x + 2, + area.y + 1, + "Database Statistics", + Cell { + fg: ACCENT, + ..Cell::default() + }, + max_x, + ); + + let mut y = area.y + 3; + let label_width = 22u16; + let value_x = area.x + 2 + label_width; + + // --- Entity Counts section --- + if y < area.bottom().saturating_sub(2) { + frame.print_text_clipped( + area.x + 2, + y, + "Entities", + Cell { + fg: TEXT, + ..Cell::default() + }, + max_x, + ); + y += 1; + } + + let entity_rows: [(&str, i64); 4] = [ + (" Issues", data.issues), + (" Merge Requests", data.merge_requests), + (" Discussions", data.discussions), + (" Notes", data.notes), + ]; + + for (label, count) in &entity_rows { + if y >= area.bottom().saturating_sub(2) { + break; + } + render_stat_row(frame, area.x + 2, y, label, &format_count(*count), label_width, max_x); + y += 1; + } + + // Total. + if y < area.bottom().saturating_sub(2) { + let total = data.issues + data.merge_requests + data.discussions + data.notes; + render_stat_row( + frame, + area.x + 2, + y, + " Total", + &format_count(total), + label_width, + max_x, + ); + y += 1; + } + + y += 1; // Blank line. + + // --- Index Coverage section --- + if y < area.bottom().saturating_sub(2) { + frame.print_text_clipped( + area.x + 2, + y, + "Index Coverage", + Cell { + fg: TEXT, + ..Cell::default() + }, + max_x, + ); + y += 1; + } + + // FTS. + if y < area.bottom().saturating_sub(2) { + let fts_pct = data.fts_coverage_pct(); + let fts_text = format!("{} ({:.0}%)", format_count(data.fts_indexed), fts_pct); + let fg = coverage_color(fts_pct); + frame.print_text_clipped( + area.x + 2, + y, + &format!("{: 0 && y < area.bottom().saturating_sub(2) { + let failed_cell = Cell { + fg: WARN_FG, + ..Cell::default() + }; + frame.print_text_clipped( + area.x + 2, + y, + &format!("{:, + x: u16, + y: u16, + label: &str, + value: &str, + label_width: u16, + max_x: u16, +) { + let value_x = x + label_width; + frame.print_text_clipped( + x, + y, + &format!("{label: PackedRgba { + if pct >= 90.0 { + GOOD_FG + } else if pct >= 50.0 { + WARN_FG + } else { + TEXT + } +} + +/// Format a count with comma separators for readability. +fn format_count(n: i64) -> String { + if n < 1_000 { + return n.to_string(); + } + let s = n.to_string(); + let mut result = String::with_capacity(s.len() + s.len() / 3); + for (i, c) in s.chars().enumerate() { + if i > 0 && (s.len() - i).is_multiple_of(3) { + result.push(','); + } + result.push(c); + } + result +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::state::stats::StatsData; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + fn sample_data() -> StatsData { + StatsData { + total_documents: 500, + issues: 200, + merge_requests: 150, + discussions: 100, + notes: 50, + fts_indexed: 450, + embedded_documents: 300, + total_chunks: 1200, + coverage_pct: 60.0, + queue_pending: 5, + queue_failed: 1, + } + } + + #[test] + fn test_render_not_loaded() { + with_frame!(80, 24, |frame| { + let state = StatsState::default(); + let area = frame.bounds(); + render_stats(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_with_data() { + with_frame!(80, 24, |frame| { + let mut state = StatsState::default(); + state.apply_data(sample_data()); + let area = frame.bounds(); + render_stats(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_no_queue_work() { + with_frame!(80, 24, |frame| { + let mut state = StatsState::default(); + state.apply_data(StatsData { + queue_pending: 0, + queue_failed: 0, + ..sample_data() + }); + let area = frame.bounds(); + render_stats(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_tiny_terminal() { + with_frame!(8, 2, |frame| { + let mut state = StatsState::default(); + state.apply_data(sample_data()); + let area = frame.bounds(); + render_stats(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_short_terminal() { + with_frame!(80, 8, |frame| { + let mut state = StatsState::default(); + state.apply_data(sample_data()); + let area = frame.bounds(); + render_stats(&mut frame, &state, area); + // Should clip without panicking. + }); + } + + #[test] + fn test_format_count_small() { + assert_eq!(format_count(0), "0"); + assert_eq!(format_count(42), "42"); + assert_eq!(format_count(999), "999"); + } + + #[test] + fn test_format_count_thousands() { + assert_eq!(format_count(1_000), "1,000"); + assert_eq!(format_count(12_345), "12,345"); + assert_eq!(format_count(1_234_567), "1,234,567"); + } + + #[test] + fn test_coverage_color_thresholds() { + assert_eq!(coverage_color(100.0), GOOD_FG); + assert_eq!(coverage_color(90.0), GOOD_FG); + assert_eq!(coverage_color(89.9), WARN_FG); + assert_eq!(coverage_color(50.0), WARN_FG); + assert_eq!(coverage_color(49.9), TEXT); + } +} diff --git a/crates/lore-tui/src/view/sync.rs b/crates/lore-tui/src/view/sync.rs new file mode 100644 index 0000000..db16f90 --- /dev/null +++ b/crates/lore-tui/src/view/sync.rs @@ -0,0 +1,575 @@ +//! Sync screen view — progress bars, summary table, and log. +//! +//! Renders the sync screen in different phases: +//! - **Idle**: prompt to start sync +//! - **Running**: per-lane progress bars with throughput stats +//! - **Complete**: summary table with change counts +//! - **Cancelled/Failed**: status message with retry hint + +use ftui::core::geometry::Rect; +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::Draw; +use ftui::render::frame::Frame; + +use crate::state::sync::{SyncLane, SyncPhase, SyncState}; + +use super::{ACCENT, TEXT, TEXT_MUTED}; + +/// Progress bar fill color. +const PROGRESS_FG: PackedRgba = PackedRgba::rgb(0xDA, 0x70, 0x2C); // orange +/// Progress bar background. +const PROGRESS_BG: PackedRgba = PackedRgba::rgb(0x34, 0x34, 0x30); +/// Success green. +const SUCCESS_FG: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39); +/// Error red. +const ERROR_FG: PackedRgba = PackedRgba::rgb(0xD1, 0x4D, 0x41); + +// --------------------------------------------------------------------------- +// Public entry point +// --------------------------------------------------------------------------- + +/// Render the sync screen. +pub fn render_sync(frame: &mut Frame<'_>, state: &SyncState, area: Rect) { + if area.width < 10 || area.height < 3 { + return; + } + + match &state.phase { + SyncPhase::Idle => render_idle(frame, area), + SyncPhase::Running => render_running(frame, state, area), + SyncPhase::Complete => render_summary(frame, state, area), + SyncPhase::Cancelled => render_cancelled(frame, area), + SyncPhase::Failed(err) => render_failed(frame, area, err), + } +} + +// --------------------------------------------------------------------------- +// Idle view +// --------------------------------------------------------------------------- + +fn render_idle(frame: &mut Frame<'_>, area: Rect) { + let max_x = area.right(); + let center_y = area.y + area.height / 2; + + let title = "Sync"; + let title_x = area.x + area.width.saturating_sub(title.len() as u16) / 2; + frame.print_text_clipped( + title_x, + center_y.saturating_sub(1), + title, + Cell { + fg: ACCENT, + ..Cell::default() + }, + max_x, + ); + + let hint = "Press Enter to start sync, or run `lore sync` externally."; + let hint_x = area.x + area.width.saturating_sub(hint.len() as u16) / 2; + frame.print_text_clipped( + hint_x, + center_y + 1, + hint, + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); +} + +// --------------------------------------------------------------------------- +// Running view — per-lane progress bars +// --------------------------------------------------------------------------- + +fn render_running(frame: &mut Frame<'_>, state: &SyncState, area: Rect) { + let max_x = area.right(); + + // Title. + let title = "Syncing..."; + let title_x = area.x + 2; + frame.print_text_clipped( + title_x, + area.y + 1, + title, + Cell { + fg: ACCENT, + ..Cell::default() + }, + max_x, + ); + + // Stage label. + if !state.stage.is_empty() { + let stage_cell = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + frame.print_text_clipped(title_x, area.y + 2, &state.stage, stage_cell, max_x); + } + + // Per-lane progress bars. + let bar_start_y = area.y + 4; + let label_width = 14u16; // "Discussions " is the longest + let bar_x = area.x + 2 + label_width; + let bar_width = area.width.saturating_sub(4 + label_width + 12); // 12 for count text + + for (i, lane) in SyncLane::ALL.iter().enumerate() { + let y = bar_start_y + i as u16; + if y >= area.bottom().saturating_sub(3) { + break; + } + + let lane_progress = &state.lanes[i]; + + // Lane label. + let label = format!("{:<12}", lane.label()); + frame.print_text_clipped( + area.x + 2, + y, + &label, + Cell { + fg: TEXT, + ..Cell::default() + }, + bar_x, + ); + + // Progress bar. + if bar_width > 2 { + render_progress_bar(frame, bar_x, y, bar_width, lane_progress.fraction()); + } + + // Count text (e.g., "50/100"). + let count_x = bar_x + bar_width + 1; + let count_text = if lane_progress.total > 0 { + format!("{}/{}", lane_progress.current, lane_progress.total) + } else if lane_progress.current > 0 { + format!("{}", lane_progress.current) + } else { + "--".to_string() + }; + frame.print_text_clipped( + count_x, + y, + &count_text, + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); + } + + // Throughput stats. + let stats_y = bar_start_y + SyncLane::ALL.len() as u16 + 1; + if stats_y < area.bottom().saturating_sub(2) && state.items_synced > 0 { + let stats = format!( + "{} items synced ({:.0} items/sec)", + state.items_synced, state.items_per_sec + ); + frame.print_text_clipped( + area.x + 2, + stats_y, + &stats, + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); + } + + // Cancel hint at bottom. + let hint_y = area.bottom().saturating_sub(1); + frame.print_text_clipped( + area.x + 2, + hint_y, + "Esc: cancel sync", + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); +} + +/// Render a horizontal progress bar. +fn render_progress_bar(frame: &mut Frame<'_>, x: u16, y: u16, width: u16, fraction: f64) { + let filled = ((width as f64) * fraction).round() as u16; + let max_x = x + width; + + for col in x..max_x { + let is_filled = col < x + filled; + let cell = Cell { + fg: if is_filled { PROGRESS_FG } else { PROGRESS_BG }, + bg: if is_filled { PROGRESS_FG } else { PROGRESS_BG }, + ..Cell::default() + }; + frame.buffer.set(col, y, cell); + } +} + +// --------------------------------------------------------------------------- +// Summary view +// --------------------------------------------------------------------------- + +fn render_summary(frame: &mut Frame<'_>, state: &SyncState, area: Rect) { + let max_x = area.right(); + + // Title. + let title = "Sync Complete"; + let title_x = area.x + 2; + frame.print_text_clipped( + title_x, + area.y + 1, + title, + Cell { + fg: SUCCESS_FG, + ..Cell::default() + }, + max_x, + ); + + if let Some(ref summary) = state.summary { + // Duration. + let duration = format_duration(summary.elapsed_ms); + frame.print_text_clipped( + title_x, + area.y + 2, + &format!("Duration: {duration}"), + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); + + // Summary table header. + let table_y = area.y + 4; + let header = format!("{:<16} {:>6} {:>8}", "Entity", "New", "Updated"); + frame.print_text_clipped( + area.x + 2, + table_y, + &header, + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); + + // Summary rows. + let rows = [ + ("Issues", summary.issues.new, summary.issues.updated), + ("MRs", summary.merge_requests.new, summary.merge_requests.updated), + ("Discussions", summary.discussions.new, summary.discussions.updated), + ("Notes", summary.notes.new, summary.notes.updated), + ]; + + for (i, (label, new, updated)) in rows.iter().enumerate() { + let row_y = table_y + 1 + i as u16; + if row_y >= area.bottom().saturating_sub(3) { + break; + } + + let row = format!("{label:<16} {new:>6} {updated:>8}"); + let fg = if *new > 0 || *updated > 0 { + TEXT + } else { + TEXT_MUTED + }; + frame.print_text_clipped( + area.x + 2, + row_y, + &row, + Cell { + fg, + ..Cell::default() + }, + max_x, + ); + } + + // Total. + let total_y = table_y + 1 + rows.len() as u16; + if total_y < area.bottom().saturating_sub(2) { + let total = format!("Total changes: {}", summary.total_changes()); + frame.print_text_clipped( + area.x + 2, + total_y, + &total, + Cell { + fg: ACCENT, + ..Cell::default() + }, + max_x, + ); + } + + // Per-project errors. + if summary.has_errors() { + let err_y = total_y + 2; + if err_y < area.bottom().saturating_sub(1) { + frame.print_text_clipped( + area.x + 2, + err_y, + "Errors:", + Cell { + fg: ERROR_FG, + ..Cell::default() + }, + max_x, + ); + for (i, (project, err)) in summary.project_errors.iter().enumerate() { + let y = err_y + 1 + i as u16; + if y >= area.bottom().saturating_sub(1) { + break; + } + let line = format!(" {project}: {err}"); + frame.print_text_clipped( + area.x + 2, + y, + &line, + Cell { + fg: ERROR_FG, + ..Cell::default() + }, + max_x, + ); + } + } + } + } + + // Navigation hint at bottom. + let hint_y = area.bottom().saturating_sub(1); + frame.print_text_clipped( + area.x + 2, + hint_y, + "Esc: back | Enter: sync again", + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); +} + +// --------------------------------------------------------------------------- +// Cancelled / Failed views +// --------------------------------------------------------------------------- + +fn render_cancelled(frame: &mut Frame<'_>, area: Rect) { + let max_x = area.right(); + let center_y = area.y + area.height / 2; + + frame.print_text_clipped( + area.x + 2, + center_y.saturating_sub(1), + "Sync Cancelled", + Cell { + fg: ACCENT, + ..Cell::default() + }, + max_x, + ); + frame.print_text_clipped( + area.x + 2, + center_y + 1, + "Press Enter to retry, or Esc to go back.", + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); +} + +fn render_failed(frame: &mut Frame<'_>, area: Rect, error: &str) { + let max_x = area.right(); + let center_y = area.y + area.height / 2; + + frame.print_text_clipped( + area.x + 2, + center_y.saturating_sub(2), + "Sync Failed", + Cell { + fg: ERROR_FG, + ..Cell::default() + }, + max_x, + ); + + // Truncate error to fit screen. + let max_len = area.width.saturating_sub(4) as usize; + let display_err = if error.len() > max_len { + format!("{}...", &error[..error.floor_char_boundary(max_len.saturating_sub(3))]) + } else { + error.to_string() + }; + frame.print_text_clipped( + area.x + 2, + center_y, + &display_err, + Cell { + fg: TEXT, + ..Cell::default() + }, + max_x, + ); + + frame.print_text_clipped( + area.x + 2, + center_y + 2, + "Press Enter to retry, or Esc to go back.", + Cell { + fg: TEXT_MUTED, + ..Cell::default() + }, + max_x, + ); +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +fn format_duration(ms: u64) -> String { + let secs = ms / 1000; + let mins = secs / 60; + let remaining_secs = secs % 60; + if mins > 0 { + format!("{mins}m {remaining_secs}s") + } else { + format!("{secs}s") + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::state::sync::{EntityChangeCounts, SyncSummary}; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + #[test] + fn test_render_idle_no_panic() { + with_frame!(80, 24, |frame| { + let state = SyncState::default(); + let area = frame.bounds(); + render_sync(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_running_no_panic() { + with_frame!(80, 24, |frame| { + let mut state = SyncState::default(); + state.start(); + state.update_progress("issues", 25, 100); + let area = frame.bounds(); + render_sync(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_complete_no_panic() { + with_frame!(80, 24, |frame| { + let mut state = SyncState::default(); + state.start(); + state.complete(5000); + state.summary = Some(SyncSummary { + issues: EntityChangeCounts { new: 5, updated: 3 }, + merge_requests: EntityChangeCounts { new: 2, updated: 1 }, + elapsed_ms: 5000, + ..Default::default() + }); + let area = frame.bounds(); + render_sync(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_cancelled_no_panic() { + with_frame!(80, 24, |frame| { + let mut state = SyncState::default(); + state.start(); + state.cancel(); + let area = frame.bounds(); + render_sync(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_failed_no_panic() { + with_frame!(80, 24, |frame| { + let mut state = SyncState::default(); + state.start(); + state.fail("network timeout".into()); + let area = frame.bounds(); + render_sync(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_tiny_terminal() { + with_frame!(8, 2, |frame| { + let state = SyncState::default(); + let area = frame.bounds(); + render_sync(&mut frame, &state, area); + // Should not panic. + }); + } + + #[test] + fn test_render_complete_with_errors() { + with_frame!(80, 24, |frame| { + let mut state = SyncState::default(); + state.start(); + state.complete(3000); + state.summary = Some(SyncSummary { + elapsed_ms: 3000, + project_errors: vec![ + ("grp/repo".into(), "timeout".into()), + ], + ..Default::default() + }); + let area = frame.bounds(); + render_sync(&mut frame, &state, area); + }); + } + + #[test] + fn test_format_duration_seconds() { + assert_eq!(format_duration(3500), "3s"); + } + + #[test] + fn test_format_duration_minutes() { + assert_eq!(format_duration(125_000), "2m 5s"); + } + + #[test] + fn test_render_running_with_stats() { + with_frame!(80, 24, |frame| { + let mut state = SyncState::default(); + state.start(); + state.update_progress("issues", 50, 200); + state.update_stream_stats(1024, 50); + let area = frame.bounds(); + render_sync(&mut frame, &state, area); + }); + } +} diff --git a/crates/lore-tui/src/view/trace.rs b/crates/lore-tui/src/view/trace.rs index ba82820..95cfea4 100644 --- a/crates/lore-tui/src/view/trace.rs +++ b/crates/lore-tui/src/view/trace.rs @@ -24,16 +24,16 @@ use ftui::render::drawing::Draw; use ftui::render::frame::Frame; use crate::state::trace::TraceState; +use crate::text_width::cursor_cell_offset; use lore::core::trace::TraceResult; +use super::common::truncate_str; +use super::{ACCENT, BG_SURFACE, TEXT, TEXT_MUTED}; + // --------------------------------------------------------------------------- -// Colors (Flexoki palette) +// Colors (Flexoki palette — extras not in parent module) // --------------------------------------------------------------------------- -const TEXT: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3); // tx -const TEXT_MUTED: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2 -const BG_SURFACE: PackedRgba = PackedRgba::rgb(0x28, 0x28, 0x24); // bg-2 -const ACCENT: PackedRgba = PackedRgba::rgb(0xDA, 0x70, 0x2C); // orange const GREEN: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39); // green const CYAN: PackedRgba = PackedRgba::rgb(0x3A, 0xA9, 0x9F); // cyan const YELLOW: PackedRgba = PackedRgba::rgb(0xD0, 0xA2, 0x15); // yellow @@ -135,7 +135,8 @@ fn render_path_input(frame: &mut Frame<'_>, state: &TraceState, x: u16, y: u16, // Cursor. if state.path_focused { - let cursor_x = after_label + state.path_cursor as u16; + let cursor_col = state.path_input[..state.path_cursor].chars().count() as u16; + let cursor_x = after_label + cursor_col; if cursor_x < max_x { let cursor_cell = Cell { fg: PackedRgba::rgb(0x10, 0x0F, 0x0F), @@ -144,8 +145,8 @@ fn render_path_input(frame: &mut Frame<'_>, state: &TraceState, x: u16, y: u16, }; let ch = state .path_input - .chars() - .nth(state.path_cursor) + .get(state.path_cursor..) + .and_then(|s| s.chars().next()) .unwrap_or(' '); frame.print_text_clipped(cursor_x, y, &ch.to_string(), cursor_cell, max_x); } @@ -457,16 +458,6 @@ fn render_hint_bar(frame: &mut Frame<'_>, x: u16, y: u16, max_x: u16) { frame.print_text_clipped(x + 1, y, hints, style, max_x); } -/// Truncate a string to at most `max_chars` display characters. -fn truncate_str(s: &str, max_chars: usize) -> String { - if s.chars().count() <= max_chars { - s.to_string() - } else { - let truncated: String = s.chars().take(max_chars.saturating_sub(1)).collect(); - format!("{truncated}…") - } -} - // --------------------------------------------------------------------------- // Tests // --------------------------------------------------------------------------- diff --git a/crates/lore-tui/src/view/who.rs b/crates/lore-tui/src/view/who.rs index 9eaad01..d6e5bc6 100644 --- a/crates/lore-tui/src/view/who.rs +++ b/crates/lore-tui/src/view/who.rs @@ -25,6 +25,7 @@ use lore::core::who_types::{ use crate::state::who::{WhoMode, WhoState}; +use super::common::truncate_str; use super::{ACCENT, BG_SURFACE, BORDER, TEXT, TEXT_MUTED}; /// Muted accent for inactive mode tabs. @@ -915,20 +916,6 @@ fn render_truncation_footer( frame.print_text_clipped(footer_x, footer_y, &footer, cell, max_x); } -/// Truncate a string to at most `max_chars` display characters. -fn truncate_str(s: &str, max_chars: usize) -> String { - let chars: Vec = s.chars().collect(); - if chars.len() <= max_chars { - s.to_string() - } else if max_chars <= 3 { - chars[..max_chars].iter().collect() - } else { - let mut result: String = chars[..max_chars - 3].iter().collect(); - result.push_str("..."); - result - } -} - // --------------------------------------------------------------------------- // Tests // --------------------------------------------------------------------------- @@ -1029,7 +1016,7 @@ mod tests { #[test] fn test_truncate_str() { assert_eq!(truncate_str("hello", 10), "hello"); - assert_eq!(truncate_str("hello world", 8), "hello..."); + assert_eq!(truncate_str("hello world", 8), "hello w\u{2026}"); assert_eq!(truncate_str("hi", 2), "hi"); assert_eq!(truncate_str("abc", 3), "abc"); } diff --git a/src/cli/autocorrect.rs b/src/cli/autocorrect.rs index e8dffd3..b851893 100644 --- a/src/cli/autocorrect.rs +++ b/src/cli/autocorrect.rs @@ -128,6 +128,7 @@ const COMMAND_FLAGS: &[(&str, &[&str])] = &[ "--dry-run", "--no-dry-run", "--timings", + "--tui", ], ), ( @@ -256,6 +257,7 @@ const COMMAND_FLAGS: &[(&str, &[&str])] = &[ ("generate-docs", &["--full", "--project"]), ("completions", &[]), ("robot-docs", &["--brief"]), + ("tui", &["--config"]), ( "list", &[ diff --git a/src/cli/commands/mod.rs b/src/cli/commands/mod.rs index f83a35c..7cfa18a 100644 --- a/src/cli/commands/mod.rs +++ b/src/cli/commands/mod.rs @@ -15,6 +15,7 @@ pub mod sync; pub mod sync_status; pub mod timeline; pub mod trace; +pub mod tui; pub mod who; pub use auth_test::run_auth_test; @@ -50,6 +51,7 @@ pub use sync::{SyncOptions, SyncResult, print_sync, print_sync_json, run_sync}; pub use sync_status::{print_sync_status, print_sync_status_json, run_sync_status}; pub use timeline::{TimelineParams, print_timeline, print_timeline_json_with_meta, run_timeline}; pub use trace::{parse_trace_path, print_trace, print_trace_json}; +pub use tui::{TuiArgs, find_lore_tui, run_tui}; pub use who::{ WhoRun, half_life_decay, print_who_human, print_who_json, query_active, query_expert, query_overlap, query_reviews, query_workload, run_who, diff --git a/src/cli/commands/tui.rs b/src/cli/commands/tui.rs new file mode 100644 index 0000000..8444f80 --- /dev/null +++ b/src/cli/commands/tui.rs @@ -0,0 +1,121 @@ +//! `lore tui` subcommand — delegates to the `lore-tui` binary. +//! +//! Resolves `lore-tui` via PATH and execs it, replacing the current process. +//! In robot mode, returns a structured JSON error (TUI is human-only). + +use std::path::PathBuf; + +use clap::Parser; + +/// Launch the interactive TUI dashboard +#[derive(Parser, Debug)] +pub struct TuiArgs { + /// Path to config file (forwarded to lore-tui) + #[arg(long)] + pub config: Option, +} + +/// Resolve the `lore-tui` binary via PATH lookup. +pub fn find_lore_tui() -> Option { + which::which("lore-tui").ok() +} + +/// Run the TUI subcommand. +/// +/// In robot mode this returns an error (TUI requires a terminal). +/// Otherwise it execs `lore-tui`, replacing the current process. +pub fn run_tui(args: &TuiArgs, robot_mode: bool) -> Result<(), Box> { + if robot_mode { + let err = serde_json::json!({ + "error": { + "code": "TUI_NOT_AVAILABLE", + "message": "The TUI requires an interactive terminal and cannot run in robot mode.", + "suggestion": "Use `lore --robot ` for programmatic access.", + "actions": [] + } + }); + eprintln!("{err}"); + std::process::exit(2); + } + + let binary = find_lore_tui().ok_or_else(|| { + "Could not find `lore-tui` on PATH.\n\n\ + Install it with:\n \ + cargo install --path crates/lore-tui\n\n\ + Or build the workspace:\n \ + cargo build --release -p lore-tui" + .to_string() + })?; + + // Build the command with explicit arguments (no shell interpolation). + let mut cmd = std::process::Command::new(&binary); + if let Some(ref config) = args.config { + cmd.arg("--config").arg(config); + } + + // On Unix, exec() replaces the current process entirely. + // This gives lore-tui direct terminal control (stdin/stdout/stderr). + #[cfg(unix)] + { + use std::os::unix::process::CommandExt; + let err = cmd.exec(); + // exec() only returns on error + Err(format!("Failed to exec lore-tui at {}: {err}", binary.display()).into()) + } + + // On non-Unix, spawn and wait. + #[cfg(not(unix))] + { + let status = cmd.status()?; + if status.success() { + Ok(()) + } else { + std::process::exit(status.code().unwrap_or(1)); + } + } +} + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_find_lore_tui_does_not_panic() { + // Just verify the lookup doesn't panic; it may or may not find the binary. + let _ = find_lore_tui(); + } + + #[test] + fn test_robot_mode_error_json_structure() { + let err = serde_json::json!({ + "error": { + "code": "TUI_NOT_AVAILABLE", + "message": "The TUI requires an interactive terminal and cannot run in robot mode.", + "suggestion": "Use `lore --robot ` for programmatic access.", + "actions": [] + } + }); + let parsed: serde_json::Value = serde_json::from_str(&err.to_string()).unwrap(); + assert_eq!(parsed["error"]["code"], "TUI_NOT_AVAILABLE"); + } + + #[test] + fn test_tui_args_default() { + let args = TuiArgs { config: None }; + assert!(args.config.is_none()); + } + + #[test] + fn test_tui_args_with_config() { + let args = TuiArgs { + config: Some("/tmp/test.json".into()), + }; + assert_eq!(args.config.as_deref(), Some("/tmp/test.json")); + } + + #[test] + fn test_binary_not_found_error_message() { + let msg = "Could not find `lore-tui` on PATH."; + assert!(msg.contains("lore-tui")); + } +} diff --git a/src/cli/mod.rs b/src/cli/mod.rs index 08d81a9..e51cb70 100644 --- a/src/cli/mod.rs +++ b/src/cli/mod.rs @@ -7,6 +7,8 @@ pub mod robot; use clap::{Parser, Subcommand}; use std::io::IsTerminal; +use commands::tui::TuiArgs; + #[derive(Parser)] #[command(name = "lore")] #[command(version = env!("LORE_VERSION"), about = "Local GitLab data management with semantic search", long_about = None)] @@ -241,6 +243,9 @@ pub enum Commands { /// Trace why code was introduced: file -> MR -> issue -> discussion Trace(TraceArgs), + /// Launch the interactive TUI dashboard + Tui(TuiArgs), + /// Detect discussion divergence from original intent Drift { /// Entity type (currently only "issues" supported) @@ -805,6 +810,10 @@ pub struct SyncArgs { /// Show detailed timing breakdown for sync stages #[arg(short = 't', long = "timings")] pub timings: bool, + + /// Show sync progress in interactive TUI + #[arg(long)] + pub tui: bool, } #[derive(Parser)] diff --git a/src/main.rs b/src/main.rs index 3edf1e3..c4ffc72 100644 --- a/src/main.rs +++ b/src/main.rs @@ -22,9 +22,9 @@ use lore::cli::commands::{ print_sync, print_sync_json, print_sync_status, print_sync_status_json, print_timeline, print_timeline_json_with_meta, print_trace, print_trace_json, print_who_human, print_who_json, query_notes, run_auth_test, run_count, run_count_events, run_doctor, run_drift, run_embed, - run_file_history, run_generate_docs, run_ingest, run_ingest_dry_run, run_init, run_list_issues, - run_list_mrs, run_search, run_show_issue, run_show_mr, run_stats, run_sync, run_sync_status, - run_timeline, run_who, + find_lore_tui, run_file_history, run_generate_docs, run_ingest, run_ingest_dry_run, run_init, + run_list_issues, run_list_mrs, run_search, run_show_issue, run_show_mr, run_stats, run_sync, + run_sync_status, run_timeline, run_tui, run_who, }; use lore::cli::render::{ColorMode, GlyphMode, Icons, LoreRenderer, Theme}; use lore::cli::robot::{RobotMeta, strip_schemas}; @@ -203,6 +203,7 @@ async fn main() { handle_file_history(cli.config.as_deref(), args, robot_mode) } Some(Commands::Trace(args)) => handle_trace(cli.config.as_deref(), args, robot_mode), + Some(Commands::Tui(args)) => run_tui(&args, robot_mode), Some(Commands::Drift { entity_type, iid, @@ -2153,6 +2154,53 @@ async fn handle_sync_cmd( robot_mode: bool, metrics: &MetricsLayer, ) -> Result<(), Box> { + // --tui: delegate to lore-tui binary with --sync flag. + // Uses explicit argument list (no shell interpolation) for safe process execution. + if args.tui { + if robot_mode { + let err = serde_json::json!({ + "error": { + "code": "TUI_NOT_AVAILABLE", + "message": "Cannot use --tui with --robot. The TUI requires an interactive terminal.", + "suggestion": "Remove the --tui flag for robot mode.", + "actions": [] + } + }); + eprintln!("{err}"); + std::process::exit(2); + } + + let binary = find_lore_tui().ok_or( + "Could not find `lore-tui` on PATH.\n\n\ + Install it with:\n \ + cargo install --path crates/lore-tui\n\n\ + Or build the workspace:\n \ + cargo build --release -p lore-tui", + )?; + + let mut cmd = std::process::Command::new(&binary); + cmd.arg("--sync"); + if let Some(config_path) = config_override { + cmd.arg("--config").arg(config_path); + } + + #[cfg(unix)] + { + use std::os::unix::process::CommandExt; + let err = cmd.exec(); + return Err(format!("Failed to exec lore-tui: {err}").into()); + } + + #[cfg(not(unix))] + { + let status = cmd.status()?; + if status.success() { + return Ok(()); + } + std::process::exit(status.code().unwrap_or(1)); + } + } + let dry_run = args.dry_run && !args.no_dry_run; let mut config = Config::load(config_override)?;