From 90c8b432673ce809fb20af6a679dd45ef04b188a Mon Sep 17 00:00:00 2001 From: teernisse Date: Wed, 18 Feb 2026 13:06:06 -0500 Subject: [PATCH] feat(tui): Phase 2 Issue List + MR List screens Implement state, action, and view layers for both list screens: - Issue List: keyset pagination, snapshot fence, filter DSL, label aggregation - MR List: mirrors Issue pattern with draft/reviewer/target branch filters - Migration 027: covering indexes for TUI list screen queries - Updated Msg types to use typed Page structs instead of raw Vec - 303 tests passing, clippy clean Beads: bd-3ei1, bd-2kr0, bd-3pm2 --- .beads/issues.jsonl | 80 +- .beads/last-touched | 2 +- crates/lore-tui/Cargo.lock | 109 +- crates/lore-tui/src/action.rs | 1628 +++++++++++++++++ crates/lore-tui/src/app/mod.rs | 73 + crates/lore-tui/src/app/tests.rs | 330 ++++ crates/lore-tui/src/{app.rs => app/update.rs} | 391 +--- crates/lore-tui/src/clock.rs | 14 + .../src/{commands.rs => commands.rs.bak} | 0 crates/lore-tui/src/commands/defs.rs | 180 ++ crates/lore-tui/src/commands/mod.rs | 227 +++ crates/lore-tui/src/commands/registry.rs | 418 +++++ crates/lore-tui/src/crash_context.rs | 7 + crates/lore-tui/src/filter_dsl.rs | 316 ++++ crates/lore-tui/src/layout.rs | 102 ++ crates/lore-tui/src/lib.rs | 5 + crates/lore-tui/src/message.rs | 31 +- crates/lore-tui/src/navigation.rs | 59 +- crates/lore-tui/src/state/dashboard.rs | 249 ++- crates/lore-tui/src/state/issue_list.rs | 372 +++- crates/lore-tui/src/state/mod.rs | 11 +- crates/lore-tui/src/state/mr_list.rs | 418 ++++- .../lore-tui/src/view/common/entity_table.rs | 676 +++++++ .../lore-tui/src/view/common/error_toast.rs | 10 +- crates/lore-tui/src/view/common/filter_bar.rs | 469 +++++ crates/lore-tui/src/view/common/mod.rs | 4 + crates/lore-tui/src/view/dashboard.rs | 554 ++++++ crates/lore-tui/src/view/issue_list.rs | 353 ++++ crates/lore-tui/src/view/mod.rs | 21 +- crates/lore-tui/src/view/mr_list.rs | 390 ++++ migrations/027_tui_list_indexes.sql | 41 + src/core/config.rs | 789 ++++++++ src/core/db.rs | 4 + 33 files changed, 7850 insertions(+), 483 deletions(-) create mode 100644 crates/lore-tui/src/action.rs create mode 100644 crates/lore-tui/src/app/mod.rs create mode 100644 crates/lore-tui/src/app/tests.rs rename crates/lore-tui/src/{app.rs => app/update.rs} (50%) rename crates/lore-tui/src/{commands.rs => commands.rs.bak} (100%) create mode 100644 crates/lore-tui/src/commands/defs.rs create mode 100644 crates/lore-tui/src/commands/mod.rs create mode 100644 crates/lore-tui/src/commands/registry.rs create mode 100644 crates/lore-tui/src/filter_dsl.rs create mode 100644 crates/lore-tui/src/layout.rs create mode 100644 crates/lore-tui/src/view/common/entity_table.rs create mode 100644 crates/lore-tui/src/view/common/filter_bar.rs create mode 100644 crates/lore-tui/src/view/dashboard.rs create mode 100644 crates/lore-tui/src/view/issue_list.rs create mode 100644 crates/lore-tui/src/view/mr_list.rs create mode 100644 migrations/027_tui_list_indexes.sql diff --git a/.beads/issues.jsonl b/.beads/issues.jsonl index a2ae9ca..eb53a18 100644 --- a/.beads/issues.jsonl +++ b/.beads/issues.jsonl @@ -1,26 +1,28 @@ {"id":"bd-10f","title":"Update orchestrator for MR ingestion","description":"## Background\nOrchestrator coordinates MR ingestion followed by dependent discussion sync. Discussion sync targets are queried from DB (not collected in-memory) to handle large projects without memory growth. This is critical for projects with 10k+ MRs where collecting sync targets in memory during ingestion would cause unbounded growth.\n\n## Approach\nUpdate `src/ingestion/orchestrator.rs` to:\n1. Support `merge_requests` resource type in `run_ingestion()` match arm\n2. Query DB for MRs needing discussion sync after MR ingestion completes\n3. Execute discussion sync with bounded concurrency using `futures::stream::buffer_unordered`\n\n## Files\n- `src/ingestion/orchestrator.rs` - Update existing orchestrator\n\n## Acceptance Criteria\n- [ ] `run_ingestion()` handles `resource_type == \"merge_requests\"`\n- [ ] After MR ingestion, queries DB for MRs where `updated_at > discussions_synced_for_updated_at`\n- [ ] Discussion sync uses `dependent_concurrency` from config (default 5)\n- [ ] Each MR's discussion sync is independent (partial failures don't block others)\n- [ ] Results aggregated from MR ingestion + all discussion ingestion results\n- [ ] `cargo test orchestrator` passes\n\n## TDD Loop\nRED: `cargo test orchestrator_mr` -> merge_requests not handled\nGREEN: Add MR branch to orchestrator\nVERIFY: `cargo test orchestrator`\n\n## Struct Definition\n```rust\n/// Lightweight struct for DB query results - only fields needed for discussion sync\nstruct MrForDiscussionSync {\n local_mr_id: i64,\n iid: i64,\n updated_at: i64,\n}\n```\n\n## DB Query for Discussion Sync Targets\n```sql\nSELECT id, iid, updated_at\nFROM merge_requests\nWHERE project_id = ?\n AND (discussions_synced_for_updated_at IS NULL\n OR updated_at > discussions_synced_for_updated_at)\nORDER BY updated_at ASC;\n```\n\n## Orchestrator Flow\n```rust\npub async fn run_ingestion(\n &self,\n resource_type: &str,\n full_sync: bool,\n) -> Result {\n match resource_type {\n \"issues\" => self.run_issue_ingestion(full_sync).await,\n \"merge_requests\" => self.run_mr_ingestion(full_sync).await,\n _ => Err(GiError::InvalidArgument {\n name: \"type\".to_string(),\n value: resource_type.to_string(),\n expected: \"issues or merge_requests\".to_string(),\n }),\n }\n}\n\nasync fn run_mr_ingestion(&self, full_sync: bool) -> Result {\n // 1. Ingest MRs (handles cursor reset if full_sync)\n let mr_result = ingest_merge_requests(\n &self.conn, &self.client, &self.config,\n self.project_id, self.gitlab_project_id, full_sync,\n ).await?;\n \n // 2. Query DB for MRs needing discussion sync\n // CRITICAL: Do this AFTER ingestion, not during, to avoid memory growth\n let mrs_needing_sync: Vec = {\n let mut stmt = self.conn.prepare(\n \"SELECT id, iid, updated_at FROM merge_requests\n WHERE project_id = ? AND (discussions_synced_for_updated_at IS NULL\n OR updated_at > discussions_synced_for_updated_at)\n ORDER BY updated_at ASC\"\n )?;\n stmt.query_map([self.project_id], |row| {\n Ok(MrForDiscussionSync {\n local_mr_id: row.get(0)?,\n iid: row.get(1)?,\n updated_at: row.get(2)?,\n })\n })?.collect::, _>>()?\n };\n \n let total_needing_sync = mrs_needing_sync.len();\n info!(\"Discussion sync needed for {} MRs\", total_needing_sync);\n \n // 3. Execute discussion sync with bounded concurrency\n let concurrency = self.config.sync.dependent_concurrency.unwrap_or(5);\n \n let discussion_results: Vec> = \n futures::stream::iter(mrs_needing_sync)\n .map(|mr| {\n let conn = &self.conn;\n let client = &self.client;\n let config = &self.config;\n let project_id = self.project_id;\n let gitlab_project_id = self.gitlab_project_id;\n async move {\n ingest_mr_discussions(\n conn, client, config,\n project_id, gitlab_project_id,\n mr.iid, mr.local_mr_id, mr.updated_at,\n ).await\n }\n })\n .buffer_unordered(concurrency)\n .collect()\n .await;\n \n // 4. Aggregate results\n let mut total_discussions = 0;\n let mut total_notes = 0;\n let mut total_diffnotes = 0;\n let mut failed_syncs = 0;\n \n for result in discussion_results {\n match result {\n Ok(r) => {\n total_discussions += r.discussions_upserted;\n total_notes += r.notes_upserted;\n total_diffnotes += r.diffnotes_count;\n }\n Err(e) => {\n warn!(\"Discussion sync failed: {}\", e);\n failed_syncs += 1;\n }\n }\n }\n \n Ok(IngestResult {\n mrs_fetched: mr_result.fetched,\n mrs_upserted: mr_result.upserted,\n labels_created: mr_result.labels_created,\n assignees_linked: mr_result.assignees_linked,\n reviewers_linked: mr_result.reviewers_linked,\n discussions_synced: total_discussions,\n notes_synced: total_notes,\n diffnotes_count: total_diffnotes,\n mrs_skipped_discussion_sync: (mr_result.fetched as usize).saturating_sub(total_needing_sync),\n failed_discussion_syncs: failed_syncs,\n })\n}\n```\n\n## Required Imports\n```rust\nuse futures::stream::StreamExt;\nuse crate::ingestion::merge_requests::ingest_merge_requests;\nuse crate::ingestion::mr_discussions::{ingest_mr_discussions, IngestMrDiscussionsResult};\n```\n\n## Config Reference\n```rust\n// In config.rs or similar\npub struct SyncConfig {\n pub dependent_concurrency: Option, // Default 5\n // ... other fields\n}\n```\n\n## Edge Cases\n- Large projects: 10k+ MRs may need discussion sync - DB-driven query avoids memory growth\n- Partial failures: Each MR's discussion sync is independent; failures logged but don't stop others\n- Concurrency: Too high (>10) may hit GitLab rate limits; default 5 balances throughput with safety\n- Empty result: If no MRs need sync, discussion phase completes immediately with zero counts","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:42.731140Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:25:13.472341Z","closed_at":"2026-01-27T00:25:13.472281Z","close_reason":"Updated orchestrator for MR ingestion:\n- Added IngestMrProjectResult struct with all MR-specific metrics\n- Added ingest_project_merge_requests() and ingest_project_merge_requests_with_progress()\n- Queries DB for MRs needing discussion sync AFTER ingestion (memory-safe for large projects)\n- Added MR-specific progress events (MrsFetchStarted, MrFetched, etc.)\n- Sequential discussion sync using dependent_concurrency config\n- All 164 tests passing","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-10f","depends_on_id":"bd-20h","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-10f","depends_on_id":"bd-ser","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-10i","title":"Epic: CP2 Gate D - Resumability Proof","description":"## Background\nGate D validates resumability and crash recovery. Proves that cursor and watermark mechanics prevent massive refetch after interruption. This is critical for large projects where a full refetch would take hours.\n\n## Acceptance Criteria (Pass/Fail)\n- [ ] Kill mid-run, rerun -> bounded redo (not full refetch from beginning)\n- [ ] Cursor saved at page boundary (not item boundary)\n- [ ] No redundant discussion refetch after crash recovery\n- [ ] No watermark advancement on partial pagination failure\n- [ ] Single-flight lock prevents concurrent ingest runs\n- [ ] `--full` flag resets MR cursor to NULL\n- [ ] `--full` flag resets ALL `discussions_synced_for_updated_at` to NULL\n- [ ] `--force` bypasses single-flight lock\n\n## Validation Script\n```bash\n#!/bin/bash\nset -e\n\nDB_PATH=\"${XDG_DATA_HOME:-$HOME/.local/share}/gitlab-inbox/db.sqlite3\"\n\necho \"=== Gate D: Resumability Proof ===\"\n\n# 1. Test single-flight lock\necho \"Step 1: Test single-flight lock...\"\ngi ingest --type=merge_requests &\nFIRST_PID=$!\nsleep 1\n\n# Try second ingest - should fail with lock error\nif gi ingest --type=merge_requests 2>&1 | grep -q \"lock\\|already running\"; then\n echo \" PASS: Second ingest blocked by lock\"\nelse\n echo \" FAIL: Lock not working\"\nfi\nwait $FIRST_PID 2>/dev/null || true\n\n# 2. Test --force bypasses lock\necho \"Step 2: Test --force flag...\"\ngi ingest --type=merge_requests &\nFIRST_PID=$!\nsleep 1\nif gi ingest --type=merge_requests --force 2>&1; then\n echo \" PASS: --force bypassed lock\"\nelse\n echo \" Note: --force test inconclusive\"\nfi\nwait $FIRST_PID 2>/dev/null || true\n\n# 3. Check cursor state\necho \"Step 3: Check cursor state...\"\nsqlite3 \"$DB_PATH\" \"\n SELECT resource_type, updated_at, gitlab_id\n FROM sync_cursors \n WHERE resource_type = 'merge_requests';\n\"\n\n# 4. Test crash recovery\necho \"Step 4: Test crash recovery...\"\n\n# Record current cursor\nCURSOR_BEFORE=$(sqlite3 \"$DB_PATH\" \"\n SELECT updated_at FROM sync_cursors WHERE resource_type = 'merge_requests';\n\")\necho \" Cursor before: $CURSOR_BEFORE\"\n\n# Force full sync and kill\necho \" Starting full sync then killing...\"\ngi ingest --type=merge_requests --full &\nPID=$!\nsleep 5 && kill -9 $PID 2>/dev/null || true\nwait $PID 2>/dev/null || true\n\n# Check cursor was saved (should be non-null if any page completed)\nCURSOR_AFTER=$(sqlite3 \"$DB_PATH\" \"\n SELECT updated_at FROM sync_cursors WHERE resource_type = 'merge_requests';\n\")\necho \" Cursor after kill: $CURSOR_AFTER\"\n\n# Re-run and verify bounded redo\necho \" Re-running (should resume from cursor)...\"\ntime gi ingest --type=merge_requests\n# Should be faster than first full sync\n\n# 5. Test --full reset\necho \"Step 5: Test --full resets watermarks...\"\n\n# Check watermarks before\nWATERMARKS_BEFORE=$(sqlite3 \"$DB_PATH\" \"\n SELECT COUNT(*) FROM merge_requests \n WHERE discussions_synced_for_updated_at IS NOT NULL;\n\")\necho \" Watermarks set before --full: $WATERMARKS_BEFORE\"\n\n# Record cursor before\nCURSOR_BEFORE_FULL=$(sqlite3 \"$DB_PATH\" \"\n SELECT updated_at, gitlab_id FROM sync_cursors WHERE resource_type = 'merge_requests';\n\")\necho \" Cursor before --full: $CURSOR_BEFORE_FULL\"\n\n# Run --full\ngi ingest --type=merge_requests --full\n\n# Check cursor was reset then rebuilt\nCURSOR_AFTER_FULL=$(sqlite3 \"$DB_PATH\" \"\n SELECT updated_at, gitlab_id FROM sync_cursors WHERE resource_type = 'merge_requests';\n\")\necho \" Cursor after --full: $CURSOR_AFTER_FULL\"\n\n# Watermarks should be set again (sync completed)\nWATERMARKS_AFTER=$(sqlite3 \"$DB_PATH\" \"\n SELECT COUNT(*) FROM merge_requests \n WHERE discussions_synced_for_updated_at IS NOT NULL;\n\")\necho \" Watermarks set after --full: $WATERMARKS_AFTER\"\n\necho \"\"\necho \"=== Gate D: PASSED ===\"\n```\n\n## Watermark Safety Test (Simulated Network Failure)\n```bash\n# This tests that watermark doesn't advance on partial failure\n# Requires ability to simulate network issues\n\n# 1. Get an MR that needs discussion sync\nMR_ID=$(sqlite3 \"$DB_PATH\" \"\n SELECT id FROM merge_requests \n WHERE discussions_synced_for_updated_at IS NULL \n OR updated_at > discussions_synced_for_updated_at\n LIMIT 1;\n\")\n\n# 2. Note current watermark\nWATERMARK_BEFORE=$(sqlite3 \"$DB_PATH\" \"\n SELECT discussions_synced_for_updated_at FROM merge_requests WHERE id = $MR_ID;\n\")\necho \"Watermark before: $WATERMARK_BEFORE\"\n\n# 3. Simulate network failure (requires network manipulation)\n# Option A: Block GitLab API temporarily\n# Option B: Run in a container with network limits\n# Option C: Use the automated test instead:\ncargo test does_not_advance_discussion_watermark_on_partial_failure\n\n# 4. Verify watermark unchanged after failure\nWATERMARK_AFTER=$(sqlite3 \"$DB_PATH\" \"\n SELECT discussions_synced_for_updated_at FROM merge_requests WHERE id = $MR_ID;\n\")\necho \"Watermark after failure: $WATERMARK_AFTER\"\n[ \"$WATERMARK_BEFORE\" = \"$WATERMARK_AFTER\" ] && echo \"PASS: Watermark preserved\"\n```\n\n## Test Commands (Quick Verification)\n```bash\n# Check cursor state:\nsqlite3 ~/.local/share/gitlab-inbox/db.sqlite3 \"\n SELECT * FROM sync_cursors WHERE resource_type = 'merge_requests';\n\"\n\n# Check watermark distribution:\nsqlite3 ~/.local/share/gitlab-inbox/db.sqlite3 \"\n SELECT \n SUM(CASE WHEN discussions_synced_for_updated_at IS NULL THEN 1 ELSE 0 END) as needs_sync,\n SUM(CASE WHEN discussions_synced_for_updated_at IS NOT NULL THEN 1 ELSE 0 END) as synced\n FROM merge_requests;\n\"\n\n# Test --full resets (check before/after):\nsqlite3 ~/.local/share/gitlab-inbox/db.sqlite3 \"SELECT COUNT(*) FROM merge_requests WHERE discussions_synced_for_updated_at IS NOT NULL;\"\ngi ingest --type=merge_requests --full\n# During full sync, watermarks should be NULL, then repopulated\n```\n\n## Critical Automated Tests\nThese tests MUST pass for Gate D:\n```bash\ncargo test does_not_advance_discussion_watermark_on_partial_failure\ncargo test full_sync_resets_discussion_watermarks\ncargo test cursor_saved_at_page_boundary\n```\n\n## Dependencies\nThis gate requires:\n- bd-mk3 (ingest command with --full and --force support)\n- bd-ser (MR ingestion with cursor mechanics)\n- bd-20h (MR discussion ingestion with watermark safety)\n- Gates A, B, C must pass first\n\n## Edge Cases\n- Very fast sync: May complete before kill signal reaches; retest with larger project\n- Lock file stale: If previous run crashed, lock file may exist; --force handles this\n- Clock skew: Cursor timestamps should use server time, not local time","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-26T22:06:02.124186Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:48:21.060596Z","closed_at":"2026-01-27T00:48:21.060555Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-10i","depends_on_id":"bd-mk3","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-11mg","title":"Add CLI flags: --as-of, --explain-score, --include-bots, --all-history","description":"## Background\nThe who command needs new CLI flags for reproducible scoring (--as-of), score transparency (--explain-score), bot inclusion (--include-bots), and full-history mode (--all-history). The default --since for expert mode changes from 6m to 24m. At this point query_expert() already accepts the new params (from bd-13q8).\n\n## Approach\nModify WhoArgs struct and run_who() (who.rs:276). The command uses clap derive macros.\n\n### New clap fields on WhoArgs:\n```rust\n#[arg(long, value_name = \"TIMESTAMP\")]\npub as_of: Option,\n\n#[arg(long, conflicts_with = \"detail\")]\npub explain_score: bool,\n\n#[arg(long)]\npub include_bots: bool,\n\n#[arg(long, conflicts_with = \"since\")]\npub all_history: bool,\n```\n\n### run_who() changes (who.rs:276):\n1. Default --since: \"6m\" -> \"24m\" for expert mode\n2. **Path canonicalization**: Call normalize_query_path() on raw path input at top of run_who(), before build_path_query(). Store both original and normalized for robot JSON.\n3. Parse --as-of: RFC3339 or YYYY-MM-DD (append T23:59:59.999Z for end-of-day UTC) -> i64 millis. Default: now_ms()\n4. Parse --all-history: set since_ms = 0\n5. Thread as_of_ms, explain_score, include_bots through to query_expert()\n6. Update the production query_expert() callsite (line ~311) from the default values bd-13q8 set to the actual parsed flag values\n\n### Robot JSON resolved_input additions:\n- scoring_model_version: 2\n- path_input_original: raw user input\n- path_input_normalized: after normalize_query_path()\n- as_of_ms/as_of_iso\n- window_start_iso/window_end_iso/window_end_exclusive: true\n- since_mode: \"all\" | \"24m\" | user value\n- excluded_usernames_applied: true|false\n\n### Robot JSON per-expert (explain_score): score_raw + components object\n### Human output (explain_score): parenthetical after score: `42 (author:28.5 review:10.0 notes:3.5)`\n\n## TDD Loop\n\n### RED (write these 8 tests first):\n- test_explain_score_components_sum_to_total: components sum == score_raw within tolerance\n- test_as_of_produces_deterministic_results: two runs with same as_of -> identical\n- test_as_of_excludes_future_events: event after as_of excluded entirely\n- test_as_of_exclusive_upper_bound: event at exactly as_of_ms excluded (strict <)\n- test_since_relative_to_as_of_clock: since window from as_of, not wall clock\n- test_explain_and_detail_are_mutually_exclusive: clap parse error\n- test_excluded_usernames_filters_bots: renovate-bot filtered, jsmith present\n- test_include_bots_flag_disables_filtering: both appear with --include-bots\n\n### GREEN: Add clap args, parse logic, robot JSON fields, human output format.\n### VERIFY: cargo test -p lore -- test_explain_score test_as_of test_since_relative test_excluded test_include_bots\n\n## Acceptance Criteria\n- [ ] All 8 tests pass green\n- [ ] --as-of parses RFC3339 and YYYY-MM-DD (end-of-day UTC)\n- [ ] --explain-score conflicts with --detail (clap error at parse time)\n- [ ] --all-history conflicts with --since (clap error at parse time)\n- [ ] Default --since is 24m for expert mode\n- [ ] Robot JSON includes scoring_model_version: 2\n- [ ] Robot JSON includes path_input_original AND path_input_normalized\n- [ ] Robot JSON includes score_raw + components when --explain-score\n- [ ] Human output appends component parenthetical when --explain-score\n\n## Files\n- MODIFY: src/cli/commands/who.rs (WhoArgs struct, run_who at line 276, robot/human output rendering)\n\n## Edge Cases\n- YYYY-MM-DD parsing: chrono NaiveDate then to end-of-day UTC (T23:59:59.999Z)\n- as_of in the past with --since: since window = as_of_ms - duration, not now - duration\n- since_mode in robot JSON: \"all\" for --all-history, \"24m\" default, user value otherwise\n- Path normalization runs BEFORE path resolution","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-09T17:00:11.115322Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:48:08.826661Z","compaction_level":0,"original_size":0,"labels":["cli","scoring"],"dependencies":[{"issue_id":"bd-11mg","depends_on_id":"bd-13q8","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"},{"issue_id":"bd-11mg","depends_on_id":"bd-18dn","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"}]} +{"id":"bd-11mg","title":"Add CLI flags: --as-of, --explain-score, --include-bots, --all-history","description":"## Background\nThe who command needs new CLI flags for reproducible scoring (--as-of), score transparency (--explain-score), bot inclusion (--include-bots), and full-history mode (--all-history). The default --since for expert mode changes from 6m to 24m. At this point query_expert() already accepts the new params (from bd-13q8).\n\n## Approach\nModify WhoArgs struct and run_who() (who.rs:276). The command uses clap derive macros.\n\n### New clap fields on WhoArgs:\n```rust\n#[arg(long, value_name = \"TIMESTAMP\")]\npub as_of: Option,\n\n#[arg(long, conflicts_with = \"detail\")]\npub explain_score: bool,\n\n#[arg(long)]\npub include_bots: bool,\n\n#[arg(long, conflicts_with = \"since\")]\npub all_history: bool,\n```\n\n### run_who() changes (who.rs:276):\n1. Default --since: \"6m\" -> \"24m\" for expert mode\n2. **Path canonicalization**: Call normalize_query_path() on raw path input at top of run_who(), before build_path_query(). Store both original and normalized for robot JSON.\n3. Parse --as-of: RFC3339 or YYYY-MM-DD (append T23:59:59.999Z for end-of-day UTC) -> i64 millis. Default: now_ms()\n4. Parse --all-history: set since_ms = 0\n5. Thread as_of_ms, explain_score, include_bots through to query_expert()\n6. Update the production query_expert() callsite (line ~311) from the default values bd-13q8 set to the actual parsed flag values\n\n### Robot JSON resolved_input additions:\n- scoring_model_version: 2\n- path_input_original: raw user input\n- path_input_normalized: after normalize_query_path()\n- as_of_ms/as_of_iso\n- window_start_iso/window_end_iso/window_end_exclusive: true\n- since_mode: \"all\" | \"24m\" | user value\n- excluded_usernames_applied: true|false\n\n### Robot JSON per-expert (explain_score): score_raw + components object\n### Human output (explain_score): parenthetical after score: `42 (author:28.5 review:10.0 notes:3.5)`\n\n## TDD Loop\n\n### RED (write these 8 tests first):\n- test_explain_score_components_sum_to_total: components sum == score_raw within tolerance\n- test_as_of_produces_deterministic_results: two runs with same as_of -> identical\n- test_as_of_excludes_future_events: event after as_of excluded entirely\n- test_as_of_exclusive_upper_bound: event at exactly as_of_ms excluded (strict <)\n- test_since_relative_to_as_of_clock: since window from as_of, not wall clock\n- test_explain_and_detail_are_mutually_exclusive: clap parse error\n- test_excluded_usernames_filters_bots: renovate-bot filtered, jsmith present\n- test_include_bots_flag_disables_filtering: both appear with --include-bots\n\n### GREEN: Add clap args, parse logic, robot JSON fields, human output format.\n### VERIFY: cargo test -p lore -- test_explain_score test_as_of test_since_relative test_excluded test_include_bots\n\n## Acceptance Criteria\n- [ ] All 8 tests pass green\n- [ ] --as-of parses RFC3339 and YYYY-MM-DD (end-of-day UTC)\n- [ ] --explain-score conflicts with --detail (clap error at parse time)\n- [ ] --all-history conflicts with --since (clap error at parse time)\n- [ ] Default --since is 24m for expert mode\n- [ ] Robot JSON includes scoring_model_version: 2\n- [ ] Robot JSON includes path_input_original AND path_input_normalized\n- [ ] Robot JSON includes score_raw + components when --explain-score\n- [ ] Human output appends component parenthetical when --explain-score\n\n## Files\n- MODIFY: src/cli/commands/who.rs (WhoArgs struct, run_who at line 276, robot/human output rendering)\n\n## Edge Cases\n- YYYY-MM-DD parsing: chrono NaiveDate then to end-of-day UTC (T23:59:59.999Z)\n- as_of in the past with --since: since window = as_of_ms - duration, not now - duration\n- since_mode in robot JSON: \"all\" for --all-history, \"24m\" default, user value otherwise\n- Path normalization runs BEFORE path resolution","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T17:00:11.115322Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:43:04.413786Z","closed_at":"2026-02-12T20:43:04.413729Z","close_reason":"Implemented by time-decay swarm: 3 agents, 12 tasks, 621 tests passing, all quality gates green","compaction_level":0,"original_size":0,"labels":["cli","scoring"],"dependencies":[{"issue_id":"bd-11mg","depends_on_id":"bd-13q8","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-11mg","depends_on_id":"bd-18dn","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-12ae","title":"OBSERV: Add structured tracing fields to rate-limit/retry handling","description":"## Background\nRate limit and retry events are currently logged at WARN with minimal context (src/gitlab/client.rs:~157). This enriches them with structured fields so MetricsLayer can count them and -v mode shows actionable retry information.\n\n## Approach\n### src/gitlab/client.rs - request() method (line ~119-171)\n\nCurrent 429 handling (~line 155-158):\n```rust\nif response.status() == StatusCode::TOO_MANY_REQUESTS && attempt < Self::MAX_RETRIES {\n let retry_after = Self::parse_retry_after(&response);\n tracing::warn!(retry_after_secs = retry_after, attempt, path, \"Rate limited by GitLab, retrying\");\n sleep(Duration::from_secs(retry_after)).await;\n continue;\n}\n```\n\nReplace with INFO-level structured log:\n```rust\nif response.status() == StatusCode::TOO_MANY_REQUESTS && attempt < Self::MAX_RETRIES {\n let retry_after = Self::parse_retry_after(&response);\n tracing::info!(\n path = %path,\n attempt = attempt,\n retry_after_secs = retry_after,\n status_code = 429u16,\n \"Rate limited, retrying\"\n );\n sleep(Duration::from_secs(retry_after)).await;\n continue;\n}\n```\n\nFor transient errors (network errors, 5xx responses), add similar structured logging:\n```rust\ntracing::info!(\n path = %path,\n attempt = attempt,\n error = %e,\n \"Retrying after transient error\"\n);\n```\n\nKey changes:\n- Level: WARN -> INFO (visible in -v mode, not alarming in default mode)\n- Added: status_code field for 429\n- Added: structured path, attempt fields for all retry events\n- These structured fields enable MetricsLayer (bd-3vqk) to count rate_limit_hits and retries\n\n## Acceptance Criteria\n- [ ] 429 responses log at INFO with fields: path, attempt, retry_after_secs, status_code=429\n- [ ] Transient error retries log at INFO with fields: path, attempt, error\n- [ ] lore -v sync shows retry activity on stderr (INFO is visible in -v mode)\n- [ ] Default mode (no -v) does NOT show retry lines on stderr (INFO filtered out)\n- [ ] File layer captures all retry events (always at DEBUG+)\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/gitlab/client.rs (modify request() method, lines ~119-171)\n\n## TDD Loop\nRED:\n - test_rate_limit_log_fields: mock 429 response, capture log output, parse JSON, assert fields\n - test_retry_log_fields: mock network error + retry, assert structured fields\nGREEN: Change log level and add structured fields\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- parse_retry_after returns 0 or very large values: the existing logic handles this\n- All retries exhausted: the final attempt returns the error normally. No special logging needed (the error propagates).\n- path may contain sensitive data (project IDs): project IDs are not sensitive in this context","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:55:02.448070Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:21:42.304259Z","closed_at":"2026-02-04T17:21:42.304213Z","close_reason":"Changed 429 rate-limit logging from WARN to INFO with structured fields: path, attempt, retry_after_secs, status_code=429 in both request() and request_with_headers()","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-12ae","depends_on_id":"bd-3pk","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-13b","title":"[CP0] CLI entry point with Commander.js","description":"## Background\n\nCommander.js provides the CLI framework. The main entry point sets up the program with all subcommands. Uses ESM with proper shebang for npx/global installation.\n\nReference: docs/prd/checkpoint-0.md section \"CLI Commands\"\n\n## Approach\n\n**src/cli/index.ts:**\n```typescript\n#!/usr/bin/env node\n\nimport { Command } from 'commander';\nimport { version } from '../../package.json' with { type: 'json' };\nimport { initCommand } from './commands/init';\nimport { authTestCommand } from './commands/auth-test';\nimport { doctorCommand } from './commands/doctor';\nimport { versionCommand } from './commands/version';\nimport { backupCommand } from './commands/backup';\nimport { resetCommand } from './commands/reset';\nimport { syncStatusCommand } from './commands/sync-status';\n\nconst program = new Command();\n\nprogram\n .name('gi')\n .description('GitLab Inbox - Unified notification management')\n .version(version);\n\n// Global --config flag available to all commands\nprogram.option('-c, --config ', 'Path to config file');\n\n// Register subcommands\nprogram.addCommand(initCommand);\nprogram.addCommand(authTestCommand);\nprogram.addCommand(doctorCommand);\nprogram.addCommand(versionCommand);\nprogram.addCommand(backupCommand);\nprogram.addCommand(resetCommand);\nprogram.addCommand(syncStatusCommand);\n\nprogram.parse();\n```\n\nEach command file exports a Command instance:\n```typescript\n// src/cli/commands/version.ts\nimport { Command } from 'commander';\n\nexport const versionCommand = new Command('version')\n .description('Show version information')\n .action(() => {\n console.log(`gi version ${version}`);\n });\n```\n\n## Acceptance Criteria\n\n- [ ] `gi --help` shows all commands and global options\n- [ ] `gi --version` shows version from package.json\n- [ ] `gi --help` shows command-specific help\n- [ ] `gi --config ./path` passes config path to commands\n- [ ] Unknown command shows error and suggests --help\n- [ ] Exit code 0 on success, non-zero on error\n- [ ] Shebang line works for npx execution\n\n## Files\n\nCREATE:\n- src/cli/index.ts (main entry point)\n- src/cli/commands/version.ts (simple command as template)\n\nMODIFY (later beads):\n- package.json (add \"bin\" field pointing to dist/cli/index.js)\n\n## TDD Loop\n\nN/A for CLI entry point - verify with manual testing:\n\n```bash\nnpm run build\nnode dist/cli/index.js --help\nnode dist/cli/index.js version\nnode dist/cli/index.js unknown-command # should error\n```\n\n## Edge Cases\n\n- package.json import requires Node 20+ with { type: 'json' } assertion\n- Alternative: read version from package.json with readFileSync\n- Command registration order affects help display - alphabetical preferred\n- Global options must be defined before subcommands","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:50.499023Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:10:49.224627Z","closed_at":"2026-01-25T03:10:49.224499Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-13b","depends_on_id":"bd-gg1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-13lp","title":"Epic: CLI Intelligence & Market Position (CLI-IMP)","description":"## Strategic Context\n\nAnalysis of glab (GitLab CLI) vs lore reveals a clean architectural split: lore = ALL reads (issues, MRs, search, who, timeline, intelligence); glab = ALL writes (create, update, approve, merge, CI/CD).\n\nLore is NOT duplicating glab. The overlap is minimal (both list issues/MRs), but lore output is curated, flatter, and richer (closing MRs, work-item status, discussions pre-joined). Agents reach for glab by default because they have been trained on it — a discovery problem, not a capability problem.\n\nThree layers: (1) Foundation — make lore the definitive read path; (2) Intelligence — ship half-built features (hybrid search, timeline, per-note search); (3) Alien Artifact — novel intelligence (explain, related, brief, drift).\n\n## Progress (as of 2026-02-12)\n\n### Shipped\n- Timeline CLI (bd-2wpf): CLOSED. 5-stage pipeline with human and robot renderers working end-to-end.\n- `who` command: Expert, Workload, Reviews, Active, Overlap modes all functional.\n- Search infrastructure: hybrid.rs, vector.rs, rrf.rs all implemented and tested (not yet wired to CLI).\n\n### In Progress\n- Foundation: bd-kvij (skill rewrite), bd-91j1 (robot-docs), bd-2g50 (data gaps)\n- Intelligence: bd-1ksf (hybrid search wiring), bd-2l3s (per-note search)\n- Alien Artifact: bd-1n5q (brief), bd-8con (related), bd-9lbr (explain), bd-1cjx (drift)\n\n## Success Criteria\n- [x] Timeline CLI shipped with human and robot renderers (bd-2wpf CLOSED)\n- [ ] Zero agent skill files reference glab for read operations\n- [ ] robot-docs comprehensive enough for zero-training agent bootstrap\n- [ ] Hybrid search (FTS + vector + RRF) wired to CLI and default\n- [ ] Per-note search operational at note granularity\n- [ ] At least one Tier 3 alien artifact feature prototyped (brief, related, explain, or drift)\n\n## Architecture Notes\n- main.rs is 2579 lines with all subcommand handlers\n- CLI commands in src/cli/commands/ (16 modules: auth_test, count, doctor, embed, generate_docs, init, ingest, list, search, show, stats, sync, sync_status, timeline, who, plus mod.rs)\n- Database: 21 migrations wired (001-021), LATEST_SCHEMA_VERSION = 21\n- Raw payloads for issues store 15 fields: assignees, author, closed_at, created_at, description, due_date, id, iid, labels, milestone, project_id, state, title, updated_at, web_url\n- Missing from raw payloads: closed_by, confidential, upvotes, downvotes, weight, issue_type, time_stats, health_status (ingestion pipeline doesn't capture these)\n- robot-docs current output keys: name, version, description, activation, commands, aliases, exit_codes, clap_error_codes, error_format, workflows","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T15:44:23.993267Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:08:36.417919Z","compaction_level":0,"original_size":0,"labels":["cli-imp","epic"]} {"id":"bd-13pt","title":"Display closing MRs in lore issues output","description":"## Background\nThe `entity_references` table stores MR->Issue 'closes' relationships (from the closes_issues API), but this data is never displayed when viewing an issue. This is the 'Development' section in GitLab UI showing which MRs will close an issue when merged.\n\n**System fit**: Data already flows through `fetch_mr_closes_issues()` -> `store_closes_issues_refs()` -> `entity_references` table. We just need to query and display it.\n\n## Approach\n\nAll changes in `src/cli/commands/show.rs`:\n\n### 1. Add ClosingMrRef struct (after DiffNotePosition ~line 57)\n```rust\n#[derive(Debug, Clone, Serialize)]\npub struct ClosingMrRef {\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub web_url: Option,\n}\n```\n\n### 2. Update IssueDetail struct (line ~59)\n```rust\npub struct IssueDetail {\n // ... existing fields ...\n pub closing_merge_requests: Vec, // NEW - add after discussions\n}\n```\n\n### 3. Add ClosingMrRefJson struct (after NoteDetailJson ~line 797)\n```rust\n#[derive(Serialize)]\npub struct ClosingMrRefJson {\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub web_url: Option,\n}\n```\n\n### 4. Update IssueDetailJson struct (line ~770)\n```rust\npub struct IssueDetailJson {\n // ... existing fields ...\n pub closing_merge_requests: Vec, // NEW\n}\n```\n\n### 5. Add get_closing_mrs() function (after get_issue_discussions ~line 245)\n```rust\nfn get_closing_mrs(conn: &Connection, issue_id: i64) -> Result> {\n let mut stmt = conn.prepare(\n \"SELECT mr.iid, mr.title, mr.state, mr.web_url\n FROM entity_references er\n JOIN merge_requests mr ON mr.id = er.source_entity_id\n WHERE er.target_entity_type = 'issue'\n AND er.target_entity_id = ?\n AND er.source_entity_type = 'merge_request'\n AND er.reference_type = 'closes'\n ORDER BY mr.iid\"\n )?;\n \n let mrs = stmt\n .query_map([issue_id], |row| {\n Ok(ClosingMrRef {\n iid: row.get(0)?,\n title: row.get(1)?,\n state: row.get(2)?,\n web_url: row.get(3)?,\n })\n })?\n .collect::, _>>()?;\n \n Ok(mrs)\n}\n```\n\n### 6. Update run_show_issue() (line ~89)\n```rust\nlet closing_mrs = get_closing_mrs(&conn, issue.id)?;\n// In return struct:\nclosing_merge_requests: closing_mrs,\n```\n\n### 7. Update print_show_issue() (after Labels section ~line 556)\n```rust\nif !issue.closing_merge_requests.is_empty() {\n println!(\"Development:\");\n for mr in &issue.closing_merge_requests {\n let state_indicator = match mr.state.as_str() {\n \"merged\" => style(\"merged\").green(),\n \"opened\" => style(\"opened\").cyan(),\n \"closed\" => style(\"closed\").red(),\n _ => style(&mr.state).dim(),\n };\n println!(\" !{} {} ({})\", mr.iid, mr.title, state_indicator);\n }\n}\n```\n\n### 8. Update From<&IssueDetail> for IssueDetailJson (line ~799)\n```rust\nclosing_merge_requests: issue.closing_merge_requests.iter().map(|mr| ClosingMrRefJson {\n iid: mr.iid,\n title: mr.title.clone(),\n state: mr.state.clone(),\n web_url: mr.web_url.clone(),\n}).collect(),\n```\n\n## Acceptance Criteria\n- [ ] `cargo test test_get_closing_mrs` passes (4 tests)\n- [ ] `lore issues ` shows Development section when closing MRs exist\n- [ ] Development section shows MR iid, title, and state\n- [ ] State is color-coded (green=merged, cyan=opened, red=closed)\n- [ ] `lore -J issues ` includes closing_merge_requests array\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n- `src/cli/commands/show.rs` - ALL changes\n\n## TDD Loop\n\n**RED** - Add tests to `src/cli/commands/show.rs` `#[cfg(test)] mod tests`:\n\n```rust\nfn seed_issue_with_closing_mr(conn: &Connection) -> (i64, i64) {\n conn.execute(\n \"INSERT INTO projects (id, gitlab_project_id, path_with_namespace, web_url, created_at, updated_at)\n VALUES (1, 100, 'group/repo', 'https://gitlab.example.com', 1000, 2000)\", []\n ).unwrap();\n conn.execute(\n \"INSERT INTO issues (id, gitlab_id, iid, project_id, title, state, author_username,\n created_at, updated_at, last_seen_at) VALUES (1, 200, 10, 1, 'Bug fix', 'opened', 'dev', 1000, 2000, 2000)\", []\n ).unwrap();\n conn.execute(\n \"INSERT INTO merge_requests (id, gitlab_id, iid, project_id, title, state, author_username,\n source_branch, target_branch, created_at, updated_at, last_seen_at)\n VALUES (1, 300, 5, 1, 'Fix the bug', 'merged', 'dev', 'fix', 'main', 1000, 2000, 2000)\", []\n ).unwrap();\n conn.execute(\n \"INSERT INTO entity_references (project_id, source_entity_type, source_entity_id,\n target_entity_type, target_entity_id, reference_type, source_method, created_at)\n VALUES (1, 'merge_request', 1, 'issue', 1, 'closes', 'api', 3000)\", []\n ).unwrap();\n (1, 1) // (issue_id, mr_id)\n}\n\n#[test]\nfn test_get_closing_mrs_empty() {\n let conn = setup_test_db();\n // seed project + issue with no closing MRs\n conn.execute(\"INSERT INTO projects ...\", []).unwrap();\n conn.execute(\"INSERT INTO issues ...\", []).unwrap();\n let result = get_closing_mrs(&conn, 1).unwrap();\n assert!(result.is_empty());\n}\n\n#[test]\nfn test_get_closing_mrs_single() {\n let conn = setup_test_db();\n seed_issue_with_closing_mr(&conn);\n let result = get_closing_mrs(&conn, 1).unwrap();\n assert_eq!(result.len(), 1);\n assert_eq!(result[0].iid, 5);\n assert_eq!(result[0].title, \"Fix the bug\");\n assert_eq!(result[0].state, \"merged\");\n}\n\n#[test]\nfn test_get_closing_mrs_ignores_mentioned() {\n let conn = setup_test_db();\n seed_issue_with_closing_mr(&conn);\n // Add a 'mentioned' reference that should be ignored\n conn.execute(\n \"INSERT INTO merge_requests (id, gitlab_id, iid, project_id, title, state, author_username,\n source_branch, target_branch, created_at, updated_at, last_seen_at)\n VALUES (2, 301, 6, 1, 'Other MR', 'opened', 'dev', 'other', 'main', 1000, 2000, 2000)\", []\n ).unwrap();\n conn.execute(\n \"INSERT INTO entity_references (project_id, source_entity_type, source_entity_id,\n target_entity_type, target_entity_id, reference_type, source_method, created_at)\n VALUES (1, 'merge_request', 2, 'issue', 1, 'mentioned', 'note_parse', 3000)\", []\n ).unwrap();\n let result = get_closing_mrs(&conn, 1).unwrap();\n assert_eq!(result.len(), 1); // Only the 'closes' ref\n}\n\n#[test]\nfn test_get_closing_mrs_multiple_sorted() {\n let conn = setup_test_db();\n seed_issue_with_closing_mr(&conn);\n // Add second closing MR with higher iid\n conn.execute(\n \"INSERT INTO merge_requests (id, gitlab_id, iid, project_id, title, state, author_username,\n source_branch, target_branch, created_at, updated_at, last_seen_at)\n VALUES (2, 301, 8, 1, 'Another fix', 'opened', 'dev', 'fix2', 'main', 1000, 2000, 2000)\", []\n ).unwrap();\n conn.execute(\n \"INSERT INTO entity_references (project_id, source_entity_type, source_entity_id,\n target_entity_type, target_entity_id, reference_type, source_method, created_at)\n VALUES (1, 'merge_request', 2, 'issue', 1, 'closes', 'api', 3000)\", []\n ).unwrap();\n let result = get_closing_mrs(&conn, 1).unwrap();\n assert_eq!(result.len(), 2);\n assert_eq!(result[0].iid, 5); // Lower iid first\n assert_eq!(result[1].iid, 8);\n}\n```\n\n**GREEN** - Implement get_closing_mrs() and struct updates\n\n**VERIFY**: `cargo test test_get_closing_mrs && cargo clippy --all-targets -- -D warnings`\n\n## Edge Cases\n- Empty closing MRs -> don't print Development section\n- MR in different states -> color-coded appropriately \n- Cross-project closes (target_entity_id IS NULL) -> not displayed (unresolved refs)\n- Multiple MRs closing same issue -> all shown, ordered by iid","status":"closed","priority":1,"issue_type":"feature","created_at":"2026-02-05T15:15:37.598249Z","created_by":"tayloreernisse","updated_at":"2026-02-05T15:26:09.522557Z","closed_at":"2026-02-05T15:26:09.522506Z","close_reason":"Implemented: closing MRs (Development section) now display in lore issues . All 4 new tests pass.","compaction_level":0,"original_size":0,"labels":["ISSUE"]} -{"id":"bd-13q8","title":"Implement Rust-side decay aggregation with reviewer split","description":"## Background\nThe current accumulation (who.rs ~line 780-810) maps SQL rows directly to Expert structs with integer scores computed in SQL. The new model receives per-signal rows from build_expert_sql() (bd-1hoq) and needs Rust-side decay computation, reviewer split, closed MR multiplier, and deterministic f64 ordering. This bead wires the new SQL into query_expert() and replaces the accumulation logic.\n\n## Approach\nModify query_expert() (who.rs:641) to:\n1. Call build_expert_sql() instead of the inline SQL\n2. Bind 6 params: path, since_ms, project_id, as_of_ms, closed_mr_multiplier, reviewer_min_note_chars\n3. Execute and iterate rows: (username, signal, mr_id, qty, ts, state_mult)\n4. Accumulate into per-user UserAccum structs\n5. Compute decayed scores with deterministic ordering\n6. Build Expert structs from accumulators\n\n### Updated query_expert() signature:\n```rust\n#[allow(clippy::too_many_arguments)]\nfn query_expert(\n conn: &Connection,\n path: &str,\n project_id: Option,\n since_ms: i64,\n as_of_ms: i64,\n limit: usize,\n scoring: &ScoringConfig,\n detail: bool,\n explain_score: bool,\n include_bots: bool,\n) -> Result\n```\n\n### CRITICAL: Existing callsite updates\nChanging the signature from 7 to 10 params breaks ALL existing callers. There are 17 callsites that must be updated:\n\n**Production (1):**\n- run_who() at line ~311: Updated by bd-11mg (CLI flags bead), not this bead. To keep code compiling between bd-13q8 and bd-11mg, update this callsite with default values: `query_expert(conn, path, project_id, since_ms, now_ms(), limit, scoring, detail, false, false)`\n\n**Tests (16):**\nUpdate ALL test callsites to the new 10-param signature. The new params use defaults that preserve current behavior:\n- `as_of_ms` = `now_ms() + 1000` (slightly in future, ensures all test data is within window)\n- `explain_score` = `false`\n- `include_bots` = `false`\n\nLines to update (current line numbers):\n2879, 3127, 3208, 3214, 3226, 3252, 3291, 3325, 3345, 3398, 3563, 3572, 3588, 3625, 3651, 3658\n\nPattern: replace `query_expert(&conn, path, None, 0, limit, &scoring, detail)` with `query_expert(&conn, path, None, 0, now_ms() + 1000, limit, &scoring, detail, false, false)`\n\n### Per-user accumulator:\n```rust\nstruct UserAccum {\n author_mrs: HashMap, // mr_id -> (max_ts, state_mult)\n reviewer_participated: HashMap, // mr_id -> (max_ts, state_mult)\n reviewer_assigned: HashMap, // mr_id -> (max_ts, state_mult)\n notes_per_mr: HashMap, // mr_id -> (count, max_ts, state_mult)\n last_seen: i64,\n components: Option<[f64; 4]>, // when explain_score: [author, participated, assigned, notes]\n}\n```\n\n**Key**: state_mult is f64 from SQL (computed in mr_activity CTE), NOT computed from mr_state string in Rust.\n\n### Signal routing:\n- `diffnote_author` / `file_author` -> author_mrs (max ts + state_mult per mr_id)\n- `diffnote_reviewer` / `file_reviewer_participated` -> reviewer_participated\n- `file_reviewer_assigned` -> reviewer_assigned (skip if mr_id already in reviewer_participated)\n- `note_group` -> notes_per_mr (qty from SQL row, max ts + state_mult)\n\n### Deterministic score computation:\nSort each HashMap entries into a Vec sorted by mr_id ASC, then sum:\n```\nraw_score =\n sum(author_weight * state_mult * decay(as_of_ms - ts, author_hl) for (mr, ts, sm) in author_mrs sorted)\n + sum(reviewer_weight * state_mult * decay(as_of_ms - ts, reviewer_hl) for ... sorted)\n + sum(reviewer_assignment_weight * state_mult * decay(as_of_ms - ts, reviewer_assignment_hl) for ... sorted)\n + sum(note_bonus * state_mult * log2(1 + count) * decay(as_of_ms - ts, note_hl) for ... sorted)\n```\n\n### Expert struct additions (who.rs:141-154):\n```rust\npub score_raw: Option, // unrounded f64, only when explain_score\npub components: Option, // only when explain_score\n```\n\nAdd new struct:\n```rust\npub struct ScoreComponents {\n pub author: f64,\n pub reviewer_participated: f64,\n pub reviewer_assigned: f64,\n pub notes: f64,\n}\n```\n\n### Bot filtering:\nPost-query: if !include_bots, filter out usernames in scoring.excluded_usernames (case-insensitive via .to_lowercase() comparison).\n\n## TDD Loop\n\n### RED (write these 13 tests first):\n\n**Core decay integration:**\n- test_expert_scores_decay_with_time: recent (10d) vs old (360d), recent scores ~24, old ~6\n- test_expert_reviewer_decays_faster_than_author: same MR at 90d, author > reviewer\n- test_reviewer_participated_vs_assigned_only: participated ~10*decay vs assigned ~3*decay\n- test_note_diminishing_returns_per_mr: 20-note/1-note ratio ~4.4x not 20x\n- test_file_change_timestamp_uses_merged_at: merged MR uses merged_at not updated_at\n- test_open_mr_uses_updated_at: opened MR uses updated_at\n- test_old_path_match_credits_expertise: query old path -> author appears\n- test_closed_mr_multiplier: closed MR at 0.5x merged (state_mult from SQL)\n- test_trivial_note_does_not_count_as_participation: 4-char LGTM -> assigned-only\n- test_null_timestamp_fallback_to_created_at: merged with NULL merged_at\n- test_row_order_independence: different insert order -> identical rankings\n- test_reviewer_split_is_exhaustive: every reviewer in exactly one bucket\n- test_deterministic_accumulation_order: 100 runs, bit-identical f64\n\nAll tests use insert_mr_at/insert_diffnote_at from bd-2yu5 for timestamp control, and call the NEW query_expert() with 10 params.\n\n### GREEN: Wire build_expert_sql into query_expert, implement UserAccum + scoring loop, update all 17 existing callsites.\n### VERIFY: cargo test -p lore -- test_expert_scores test_reviewer_participated test_note_diminishing\n\n## Acceptance Criteria\n- [ ] All 13 new tests pass green\n- [ ] All 16 existing test callsites updated to 10-param signature\n- [ ] Production caller (run_who at ~line 311) updated with default values\n- [ ] Existing who tests pass unchanged (decay ~1.0 for now_ms() data)\n- [ ] state_mult comes from SQL f64 column, NOT from string matching on mr_state\n- [ ] reviewer_assigned excludes mr_ids already in reviewer_participated\n- [ ] Deterministic: 100 runs produce bit-identical f64 (sorted by mr_id)\n- [ ] Bot filtering applied when include_bots=false\n- [ ] cargo check --all-targets passes (no broken callers)\n\n## Files\n- MODIFY: src/cli/commands/who.rs (query_expert at line 641, Expert struct at line 141, all test callsites)\n\n## Edge Cases\n- log2(1.0 + 0) = 0.0 — zero notes contribute nothing\n- f64 NaN: half_life_decay guards hl=0\n- HashMap to sorted Vec for deterministic summing\n- as_of_ms: use passed value, not now_ms()\n- state_mult is always 1.0 or closed_mr_multiplier (from SQL) — no other values possible\n- Production caller uses now_ms() as as_of_ms default until bd-11mg adds --as-of flag","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-09T17:00:01.764110Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:45:28.951909Z","compaction_level":0,"original_size":0,"labels":["scoring"],"dependencies":[{"issue_id":"bd-13q8","depends_on_id":"bd-1hoq","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"},{"issue_id":"bd-13q8","depends_on_id":"bd-1soz","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"},{"issue_id":"bd-13q8","depends_on_id":"bd-2yu5","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"}]} +{"id":"bd-13q8","title":"Implement Rust-side decay aggregation with reviewer split","description":"## Background\nThe current accumulation (who.rs ~line 780-810) maps SQL rows directly to Expert structs with integer scores computed in SQL. The new model receives per-signal rows from build_expert_sql() (bd-1hoq) and needs Rust-side decay computation, reviewer split, closed MR multiplier, and deterministic f64 ordering. This bead wires the new SQL into query_expert() and replaces the accumulation logic.\n\n## Approach\nModify query_expert() (who.rs:641) to:\n1. Call build_expert_sql() instead of the inline SQL\n2. Bind 6 params: path, since_ms, project_id, as_of_ms, closed_mr_multiplier, reviewer_min_note_chars\n3. Execute and iterate rows: (username, signal, mr_id, qty, ts, state_mult)\n4. Accumulate into per-user UserAccum structs\n5. Compute decayed scores with deterministic ordering\n6. Build Expert structs from accumulators\n\n### Updated query_expert() signature:\n```rust\n#[allow(clippy::too_many_arguments)]\nfn query_expert(\n conn: &Connection,\n path: &str,\n project_id: Option,\n since_ms: i64,\n as_of_ms: i64,\n limit: usize,\n scoring: &ScoringConfig,\n detail: bool,\n explain_score: bool,\n include_bots: bool,\n) -> Result\n```\n\n### CRITICAL: Existing callsite updates\nChanging the signature from 7 to 10 params breaks ALL existing callers. There are 17 callsites that must be updated:\n\n**Production (1):**\n- run_who() at line ~311: Updated by bd-11mg (CLI flags bead), not this bead. To keep code compiling between bd-13q8 and bd-11mg, update this callsite with default values: `query_expert(conn, path, project_id, since_ms, now_ms(), limit, scoring, detail, false, false)`\n\n**Tests (16):**\nUpdate ALL test callsites to the new 10-param signature. The new params use defaults that preserve current behavior:\n- `as_of_ms` = `now_ms() + 1000` (slightly in future, ensures all test data is within window)\n- `explain_score` = `false`\n- `include_bots` = `false`\n\nLines to update (current line numbers):\n2879, 3127, 3208, 3214, 3226, 3252, 3291, 3325, 3345, 3398, 3563, 3572, 3588, 3625, 3651, 3658\n\nPattern: replace `query_expert(&conn, path, None, 0, limit, &scoring, detail)` with `query_expert(&conn, path, None, 0, now_ms() + 1000, limit, &scoring, detail, false, false)`\n\n### Per-user accumulator:\n```rust\nstruct UserAccum {\n author_mrs: HashMap, // mr_id -> (max_ts, state_mult)\n reviewer_participated: HashMap, // mr_id -> (max_ts, state_mult)\n reviewer_assigned: HashMap, // mr_id -> (max_ts, state_mult)\n notes_per_mr: HashMap, // mr_id -> (count, max_ts, state_mult)\n last_seen: i64,\n components: Option<[f64; 4]>, // when explain_score: [author, participated, assigned, notes]\n}\n```\n\n**Key**: state_mult is f64 from SQL (computed in mr_activity CTE), NOT computed from mr_state string in Rust.\n\n### Signal routing:\n- `diffnote_author` / `file_author` -> author_mrs (max ts + state_mult per mr_id)\n- `diffnote_reviewer` / `file_reviewer_participated` -> reviewer_participated\n- `file_reviewer_assigned` -> reviewer_assigned (skip if mr_id already in reviewer_participated)\n- `note_group` -> notes_per_mr (qty from SQL row, max ts + state_mult)\n\n### Deterministic score computation:\nSort each HashMap entries into a Vec sorted by mr_id ASC, then sum:\n```\nraw_score =\n sum(author_weight * state_mult * decay(as_of_ms - ts, author_hl) for (mr, ts, sm) in author_mrs sorted)\n + sum(reviewer_weight * state_mult * decay(as_of_ms - ts, reviewer_hl) for ... sorted)\n + sum(reviewer_assignment_weight * state_mult * decay(as_of_ms - ts, reviewer_assignment_hl) for ... sorted)\n + sum(note_bonus * state_mult * log2(1 + count) * decay(as_of_ms - ts, note_hl) for ... sorted)\n```\n\n### Expert struct additions (who.rs:141-154):\n```rust\npub score_raw: Option, // unrounded f64, only when explain_score\npub components: Option, // only when explain_score\n```\n\nAdd new struct:\n```rust\npub struct ScoreComponents {\n pub author: f64,\n pub reviewer_participated: f64,\n pub reviewer_assigned: f64,\n pub notes: f64,\n}\n```\n\n### Bot filtering:\nPost-query: if !include_bots, filter out usernames in scoring.excluded_usernames (case-insensitive via .to_lowercase() comparison).\n\n## TDD Loop\n\n### RED (write these 13 tests first):\n\n**Core decay integration:**\n- test_expert_scores_decay_with_time: recent (10d) vs old (360d), recent scores ~24, old ~6\n- test_expert_reviewer_decays_faster_than_author: same MR at 90d, author > reviewer\n- test_reviewer_participated_vs_assigned_only: participated ~10*decay vs assigned ~3*decay\n- test_note_diminishing_returns_per_mr: 20-note/1-note ratio ~4.4x not 20x\n- test_file_change_timestamp_uses_merged_at: merged MR uses merged_at not updated_at\n- test_open_mr_uses_updated_at: opened MR uses updated_at\n- test_old_path_match_credits_expertise: query old path -> author appears\n- test_closed_mr_multiplier: closed MR at 0.5x merged (state_mult from SQL)\n- test_trivial_note_does_not_count_as_participation: 4-char LGTM -> assigned-only\n- test_null_timestamp_fallback_to_created_at: merged with NULL merged_at\n- test_row_order_independence: different insert order -> identical rankings\n- test_reviewer_split_is_exhaustive: every reviewer in exactly one bucket\n- test_deterministic_accumulation_order: 100 runs, bit-identical f64\n\nAll tests use insert_mr_at/insert_diffnote_at from bd-2yu5 for timestamp control, and call the NEW query_expert() with 10 params.\n\n### GREEN: Wire build_expert_sql into query_expert, implement UserAccum + scoring loop, update all 17 existing callsites.\n### VERIFY: cargo test -p lore -- test_expert_scores test_reviewer_participated test_note_diminishing\n\n## Acceptance Criteria\n- [ ] All 13 new tests pass green\n- [ ] All 16 existing test callsites updated to 10-param signature\n- [ ] Production caller (run_who at ~line 311) updated with default values\n- [ ] Existing who tests pass unchanged (decay ~1.0 for now_ms() data)\n- [ ] state_mult comes from SQL f64 column, NOT from string matching on mr_state\n- [ ] reviewer_assigned excludes mr_ids already in reviewer_participated\n- [ ] Deterministic: 100 runs produce bit-identical f64 (sorted by mr_id)\n- [ ] Bot filtering applied when include_bots=false\n- [ ] cargo check --all-targets passes (no broken callers)\n\n## Files\n- MODIFY: src/cli/commands/who.rs (query_expert at line 641, Expert struct at line 141, all test callsites)\n\n## Edge Cases\n- log2(1.0 + 0) = 0.0 — zero notes contribute nothing\n- f64 NaN: half_life_decay guards hl=0\n- HashMap to sorted Vec for deterministic summing\n- as_of_ms: use passed value, not now_ms()\n- state_mult is always 1.0 or closed_mr_multiplier (from SQL) — no other values possible\n- Production caller uses now_ms() as as_of_ms default until bd-11mg adds --as-of flag","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T17:00:01.764110Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:43:04.412694Z","closed_at":"2026-02-12T20:43:04.412646Z","close_reason":"Implemented by time-decay swarm: 3 agents, 12 tasks, 621 tests passing, all quality gates green","compaction_level":0,"original_size":0,"labels":["scoring"],"dependencies":[{"issue_id":"bd-13q8","depends_on_id":"bd-1hoq","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-13q8","depends_on_id":"bd-1soz","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-13q8","depends_on_id":"bd-2yu5","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-140","title":"[CP1] Database migration 002_issues.sql","description":"Create migration file with tables for issues, labels, issue_labels, discussions, and notes.\n\nTables to create:\n- issues: gitlab_id, project_id, iid, title, description, state, author_username, timestamps, web_url, raw_payload_id\n- labels: gitlab_id, project_id, name, color, description (unique on project_id+name)\n- issue_labels: junction table\n- discussions: gitlab_discussion_id, project_id, issue_id, noteable_type, individual_note, timestamps, resolvable/resolved\n- notes: gitlab_id, discussion_id, project_id, type, is_system, author_username, body, timestamps, position, resolution fields, DiffNote position fields\n\nInclude appropriate indexes:\n- idx_issues_project_updated, idx_issues_author, uq_issues_project_iid\n- uq_labels_project_name, idx_labels_name\n- idx_issue_labels_label\n- uq_discussions_project_discussion_id, idx_discussions_issue/mr/last_note\n- idx_notes_discussion/author/system\n\nFiles: migrations/002_issues.sql\nDone when: Migration applies cleanly on top of 001_initial.sql","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:18:53.954039Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.154936Z","closed_at":"2026-01-25T15:21:35.154936Z","deleted_at":"2026-01-25T15:21:35.154934Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-14hv","title":"Implement soak test + concurrent pagination/write race tests","description":"## Background\nThe 30-minute soak test verifies no panic, deadlock, or memory leak under sustained use. Concurrent pagination/write race tests prove browse snapshot fences prevent duplicate or skipped rows during sync writes.\n\n## Approach\nSoak test:\n- Automated script that drives the TUI for 30 minutes: random navigation, filter changes, sync starts/cancels, search queries\n- Monitors: no panic (exit code), no deadlock (watchdog timer), memory growth < 5% (RSS sampling)\n- Uses FakeClock with accelerated time for time-dependent features\n\nConcurrent pagination/write race:\n- Thread A: paginating through Issue List (fetching pages via keyset cursor)\n- Thread B: writing new issues to DB (simulating sync)\n- Assert: no duplicate rows across pages, no skipped rows within a browse snapshot fence\n- BrowseSnapshot token ensures stable ordering until explicit refresh\n\n## Acceptance Criteria\n- [ ] 30-min soak: no panic\n- [ ] 30-min soak: no deadlock (watchdog detects)\n- [ ] 30-min soak: memory growth < 5%\n- [ ] Concurrent pagination: no duplicate rows across pages\n- [ ] Concurrent pagination: no skipped rows within snapshot fence\n- [ ] BrowseSnapshot invalidated on manual refresh, not on background writes\n\n## Files\n- CREATE: crates/lore-tui/tests/soak_test.rs\n- CREATE: crates/lore-tui/tests/pagination_race_test.rs\n\n## TDD Anchor\nRED: Write test_pagination_no_duplicates that runs paginator and writer concurrently for 1000 iterations, collects all returned row IDs, asserts no duplicates.\nGREEN: Implement browse snapshot fence in keyset pagination.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_pagination_no_duplicates\n\n## Edge Cases\n- Soak test needs headless mode (no real terminal) — use ftui test harness\n- Memory sampling on macOS: use mach_task_info or /proc equivalent\n- Writer must use WAL mode to not block readers\n- Snapshot fence: deferred read transaction holds snapshot until page sequence completes\n\n## Dependency Context\nUses DbManager from \"Implement DbManager\" task.\nUses BrowseSnapshot from \"Implement NavigationStack\" task.\nUses keyset pagination from \"Implement Issue List\" task.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:05:28.130516Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:38.546708Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-14hv","depends_on_id":"bd-1b6k","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-14hv","depends_on_id":"bd-wnuo","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-14q","title":"Epic: Gate 4 - File Decision History (lore file-history)","description":"## Background\n\nGate 4 implements `lore file-history` — answers \"Which MRs touched this file, and why?\" by linking files to MRs via a new mr_file_changes table and resolving rename chains.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Gate 4 (Sections 4.1-4.7).\n\n## Prerequisites\n\n- Gates 1-2 COMPLETE: entity_references populated, resource events fetched\n- Migration 015 exists on disk (commit SHAs + closes watermark) — registered by bd-1oo\n- pending_dependent_fetches has job_type='mr_diffs' in CHECK constraint (migration 011)\n\n## Architecture\n\n- **New table:** mr_file_changes (migration 016) stores file paths per MR\n- **New config:** fetchMrFileChanges (default true) gates the API calls\n- **API source:** GET /projects/:id/merge_requests/:iid/diffs — extract paths only, discard diff content\n- **Rename resolution:** BFS both directions on mr_file_changes WHERE change_type='renamed', bounded at 10 hops\n- **Query:** Join mr_file_changes -> merge_requests, optionally enrich with entity_references and discussions\n\n## Children (Execution Order)\n\n1. **bd-1oo** — Register migration 015 + create migration 016 (mr_file_changes table)\n2. **bd-jec** — Add fetchMrFileChanges config flag\n3. **bd-2yo** — Fetch MR diffs API and populate mr_file_changes\n4. **bd-1yx** — Implement rename chain resolution (BFS algorithm)\n5. **bd-z94** — Implement lore file-history CLI command (human + robot output)\n\n## Gate Completion Criteria\n\n- [ ] mr_file_changes table populated from GitLab diffs API\n- [ ] merge_commit_sha and squash_commit_sha captured in merge_requests (already done in code, needs migration 015 registered)\n- [ ] `lore file-history ` returns MRs ordered by merge/creation date\n- [ ] Output includes: MR title, state, author, change type, discussion count\n- [ ] --discussions shows inline discussion snippets from DiffNotes on the file\n- [ ] Rename chains resolved with bounded hop count (default 10) and cycle detection\n- [ ] --no-follow-renames disables chain resolution\n- [ ] Robot mode JSON includes rename_chain when renames detected\n- [ ] -p required when path in multiple projects (exit 18 Ambiguous)\n","status":"open","priority":1,"issue_type":"feature","created_at":"2026-02-02T21:31:01.094024Z","created_by":"tayloreernisse","updated_at":"2026-02-05T20:56:53.434796Z","compaction_level":0,"original_size":0,"labels":["epic","gate-4","phase-b"],"dependencies":[{"issue_id":"bd-14q","depends_on_id":"bd-1se","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-14q","depends_on_id":"bd-2zl","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-14q8","title":"Split commands.rs into commands/ module (registry + defs)","description":"commands.rs is 807 lines. Split into crates/lore-tui/src/commands/mod.rs (re-exports), commands/registry.rs (CommandRegistry, lookup, status_hints, help_entries, palette_entries, build_registry), and commands/defs.rs (command definitions, KeyCombo, CommandDef struct). Keep public API identical via re-exports. All downstream imports should continue to work unchanged.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T21:24:11.259683Z","created_by":"tayloreernisse","updated_at":"2026-02-12T21:24:32.009880Z","compaction_level":0,"original_size":0,"labels":["TUI"]} +{"id":"bd-14q8","title":"Split commands.rs into commands/ module (registry + defs)","description":"commands.rs is 807 lines. Split into crates/lore-tui/src/commands/mod.rs (re-exports), commands/registry.rs (CommandRegistry, lookup, status_hints, help_entries, palette_entries, build_registry), and commands/defs.rs (command definitions, KeyCombo, CommandDef struct). Keep public API identical via re-exports. All downstream imports should continue to work unchanged.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T21:24:11.259683Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:48:18.915386Z","closed_at":"2026-02-18T18:48:18.915341Z","close_reason":"Split commands.rs into commands/ module (defs.rs + registry.rs + mod.rs)","compaction_level":0,"original_size":0,"labels":["TUI"]} {"id":"bd-157","title":"[CP1] Issue transformer with label extraction","description":"Transform GitLab issue payloads to normalized database schema.\n\n## Module\nsrc/gitlab/transformers/issue.rs\n\n## Structs\n\n### NormalizedIssue\n- gitlab_id: i64\n- project_id: i64 (local DB project ID)\n- iid: i64\n- title: String\n- description: Option\n- state: String\n- author_username: String\n- created_at, updated_at, last_seen_at: i64 (ms epoch)\n- web_url: String\n\n### NormalizedLabel (CP1: name-only)\n- project_id: i64\n- name: String\n\n## Functions\n\n### transform_issue(gitlab_issue: &GitLabIssue, local_project_id: i64) -> NormalizedIssue\n- Convert ISO timestamps to ms epoch using iso_to_ms()\n- Set last_seen_at to now_ms()\n- Clone string fields\n\n### extract_labels(gitlab_issue: &GitLabIssue, local_project_id: i64) -> Vec\n- Map labels vec to NormalizedLabel structs\n\nFiles: \n- src/gitlab/transformers/mod.rs\n- src/gitlab/transformers/issue.rs\nTests: tests/issue_transformer_tests.rs\nDone when: Unit tests pass for payload transformation and label extraction","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:42:47.719562Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.736142Z","closed_at":"2026-01-25T17:02:01.736142Z","deleted_at":"2026-01-25T17:02:01.736129Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} +{"id":"bd-159p","title":"Add get_issue_by_iid and get_mr_by_iid to GitLabClient with wiremock tests","description":"## Background\nSurgical sync needs to fetch a single issue or MR by its project-scoped IID from GitLab REST API during the preflight phase. The existing `GitLabClient` has `paginate_issues` and `paginate_merge_requests` for bulk streaming, but no single-entity fetch by IID. The GitLab v4 API provides `/api/v4/projects/:id/issues/:iid` and `/api/v4/projects/:id/merge_requests/:iid` endpoints that return exactly one entity or 404.\n\nThese methods are used by the surgical preflight (bd-3sez) to validate that requested IIDs actually exist on GitLab before committing to the ingest phase. They must return the full `GitLabIssue` / `GitLabMergeRequest` structs (same as the paginated endpoints return) so they can be passed directly to `process_single_issue` / `process_single_mr`.\n\n## Approach\n\n### Step 1: Add `get_issue_by_iid` method (src/gitlab/client.rs)\n\nAdd after the existing `get_version` method (~line 112):\n\n```rust\npub async fn get_issue_by_iid(\n &self,\n project_id: u64,\n iid: u64,\n) -> Result {\n self.request(&format!(\"/api/v4/projects/{project_id}/issues/{iid}\"))\n .await\n}\n```\n\nThis reuses the existing `request()` method which already handles:\n- Rate limiting (via `RateLimiter`)\n- Retry on 429 (up to `MAX_RETRIES`)\n- 404 → `LoreError::GitLabNotFound { resource }`\n- 401 → `LoreError::GitLabAuthFailed`\n- JSON deserialization into `GitLabIssue`\n\n### Step 2: Add `get_mr_by_iid` method (src/gitlab/client.rs)\n\n```rust\npub async fn get_mr_by_iid(\n &self,\n project_id: u64,\n iid: u64,\n) -> Result {\n self.request(&format!(\"/api/v4/projects/{project_id}/merge_requests/{iid}\"))\n .await\n}\n```\n\n### Step 3: Add wiremock tests (src/gitlab/client_tests.rs or inline #[cfg(test)])\n\nFour tests using the same wiremock pattern as `src/gitlab/graphql_tests.rs`:\n1. `get_issue_by_iid_success` — mock 200 with full GitLabIssue JSON, verify deserialized fields\n2. `get_issue_by_iid_not_found` — mock 404, verify `LoreError::GitLabNotFound`\n3. `get_mr_by_iid_success` — mock 200 with full GitLabMergeRequest JSON, verify deserialized fields\n4. `get_mr_by_iid_not_found` — mock 404, verify `LoreError::GitLabNotFound`\n\n## Acceptance Criteria\n- [ ] `GitLabClient::get_issue_by_iid(project_id, iid)` returns `Result`\n- [ ] `GitLabClient::get_mr_by_iid(project_id, iid)` returns `Result`\n- [ ] 404 response maps to `LoreError::GitLabNotFound`\n- [ ] 401 response maps to `LoreError::GitLabAuthFailed` (inherited from `handle_response`)\n- [ ] Successful responses deserialize into the correct struct types\n- [ ] All 4 wiremock tests pass\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n- MODIFY: src/gitlab/client.rs (add two pub async methods)\n- CREATE: src/gitlab/client_tests.rs (wiremock tests, referenced via `#[cfg(test)] #[path = \"client_tests.rs\"] mod tests;` at bottom of client.rs)\n\n## TDD Anchor\nRED: Write 4 wiremock tests in `src/gitlab/client_tests.rs`:\n\n```rust\nuse super::*;\nuse crate::core::error::LoreError;\nuse wiremock::matchers::{header, method, path};\nuse wiremock::{Mock, MockServer, ResponseTemplate};\n\n#[tokio::test]\nasync fn get_issue_by_iid_success() {\n let server = MockServer::start().await;\n let issue_json = serde_json::json!({\n \"id\": 1001,\n \"iid\": 42,\n \"project_id\": 5,\n \"title\": \"Fix login bug\",\n \"state\": \"opened\",\n \"created_at\": \"2026-01-15T10:00:00Z\",\n \"updated_at\": \"2026-02-01T14:30:00Z\",\n \"author\": { \"id\": 1, \"username\": \"dev1\", \"name\": \"Developer One\", \"avatar_url\": null, \"web_url\": \"https://gitlab.example.com/dev1\" },\n \"web_url\": \"https://gitlab.example.com/group/repo/-/issues/42\",\n \"labels\": [],\n \"milestone\": null,\n \"assignees\": [],\n \"closed_at\": null,\n \"closed_by\": null,\n \"description\": \"Login fails on mobile\"\n });\n\n Mock::given(method(\"GET\"))\n .and(path(\"/api/v4/projects/5/issues/42\"))\n .and(header(\"PRIVATE-TOKEN\", \"test-token\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(&issue_json))\n .mount(&server)\n .await;\n\n let client = GitLabClient::new(&server.uri(), \"test-token\", Some(100.0));\n let issue = client.get_issue_by_iid(5, 42).await.unwrap();\n assert_eq!(issue.iid, 42);\n assert_eq!(issue.title, \"Fix login bug\");\n}\n\n#[tokio::test]\nasync fn get_issue_by_iid_not_found() {\n let server = MockServer::start().await;\n\n Mock::given(method(\"GET\"))\n .and(path(\"/api/v4/projects/5/issues/999\"))\n .respond_with(ResponseTemplate::new(404).set_body_json(serde_json::json!({\"message\": \"404 Not Found\"})))\n .mount(&server)\n .await;\n\n let client = GitLabClient::new(&server.uri(), \"test-token\", Some(100.0));\n let err = client.get_issue_by_iid(5, 999).await.unwrap_err();\n assert!(matches!(err, LoreError::GitLabNotFound { .. }));\n}\n\n#[tokio::test]\nasync fn get_mr_by_iid_success() {\n let server = MockServer::start().await;\n let mr_json = serde_json::json!({\n \"id\": 2001,\n \"iid\": 101,\n \"project_id\": 5,\n \"title\": \"Add caching layer\",\n \"state\": \"merged\",\n \"created_at\": \"2026-01-20T09:00:00Z\",\n \"updated_at\": \"2026-02-10T16:00:00Z\",\n \"author\": { \"id\": 2, \"username\": \"dev2\", \"name\": \"Developer Two\", \"avatar_url\": null, \"web_url\": \"https://gitlab.example.com/dev2\" },\n \"web_url\": \"https://gitlab.example.com/group/repo/-/merge_requests/101\",\n \"source_branch\": \"feature/caching\",\n \"target_branch\": \"main\",\n \"draft\": false,\n \"merge_status\": \"can_be_merged\",\n \"labels\": [],\n \"milestone\": null,\n \"assignees\": [],\n \"reviewers\": [],\n \"merged_by\": null,\n \"merged_at\": null,\n \"closed_at\": null,\n \"closed_by\": null,\n \"description\": \"Adds Redis caching\"\n });\n\n Mock::given(method(\"GET\"))\n .and(path(\"/api/v4/projects/5/merge_requests/101\"))\n .and(header(\"PRIVATE-TOKEN\", \"test-token\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(&mr_json))\n .mount(&server)\n .await;\n\n let client = GitLabClient::new(&server.uri(), \"test-token\", Some(100.0));\n let mr = client.get_mr_by_iid(5, 101).await.unwrap();\n assert_eq!(mr.iid, 101);\n assert_eq!(mr.title, \"Add caching layer\");\n assert_eq!(mr.source_branch, \"feature/caching\");\n}\n\n#[tokio::test]\nasync fn get_mr_by_iid_not_found() {\n let server = MockServer::start().await;\n\n Mock::given(method(\"GET\"))\n .and(path(\"/api/v4/projects/5/merge_requests/999\"))\n .respond_with(ResponseTemplate::new(404).set_body_json(serde_json::json!({\"message\": \"404 Not Found\"})))\n .mount(&server)\n .await;\n\n let client = GitLabClient::new(&server.uri(), \"test-token\", Some(100.0));\n let err = client.get_mr_by_iid(5, 999).await.unwrap_err();\n assert!(matches!(err, LoreError::GitLabNotFound { .. }));\n}\n```\n\nGREEN: Add the two methods to `GitLabClient`.\nVERIFY: `cargo test get_issue_by_iid && cargo test get_mr_by_iid`\n\n## Edge Cases\n- The `request()` method already handles 429 retries, so no extra retry logic is needed in the new methods.\n- The GitLabIssue/GitLabMergeRequest fixture JSON must include all required (non-Option) fields. Check the struct definitions in `src/gitlab/types.rs` if deserialization fails — the test fixtures above include the minimum required fields based on the struct definitions.\n- The `project_id` parameter is the GitLab-side numeric project ID (not the local SQLite row ID). The caller must resolve this from the local `projects` table's `gitlab_project_id` column.\n\n## Dependency Context\nThis is a leaf/foundation bead with no upstream dependencies. Downstream bead bd-3sez (surgical.rs) calls these methods during preflight to fetch entities by IID before ingesting.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:12:14.447996Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:01:59.767219Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-159p","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-159p","depends_on_id":"bd-3sez","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-16m8","title":"OBSERV: Record item counts as span fields in sync stages","description":"## Background\nMetricsLayer (bd-34ek) captures span fields, but the stage functions must actually record item counts INTO their spans. This is the bridge between \"work happened\" and \"MetricsLayer knows about it.\"\n\n## Approach\nIn each stage function, after the work loop completes, record counts into the current span:\n\n### src/ingestion/orchestrator.rs - ingest_project_issues_with_progress() (~line 110)\nAfter issues are fetched and discussions synced:\n```rust\ntracing::Span::current().record(\"items_processed\", result.issues_upserted);\ntracing::Span::current().record(\"items_skipped\", result.issues_skipped);\ntracing::Span::current().record(\"errors\", result.errors);\n```\n\n### src/ingestion/orchestrator.rs - drain_resource_events() (~line 566)\nAfter the drain loop:\n```rust\ntracing::Span::current().record(\"items_processed\", result.fetched);\ntracing::Span::current().record(\"errors\", result.failed);\n```\n\n### src/documents/regenerator.rs - regenerate_dirty_documents() (~line 24)\nAfter the regeneration loop:\n```rust\ntracing::Span::current().record(\"items_processed\", result.regenerated);\ntracing::Span::current().record(\"items_skipped\", result.unchanged);\ntracing::Span::current().record(\"errors\", result.errored);\n```\n\n### src/embedding/pipeline.rs - embed_documents() (~line 36)\nAfter embedding completes:\n```rust\ntracing::Span::current().record(\"items_processed\", result.embedded);\ntracing::Span::current().record(\"items_skipped\", result.skipped);\ntracing::Span::current().record(\"errors\", result.failed);\n```\n\nIMPORTANT: These fields must be declared as tracing::field::Empty in the #[instrument] attribute (done in bd-24j1). You can only record() a field that was declared at span creation. Attempting to record an undeclared field silently does nothing.\n\n## Acceptance Criteria\n- [ ] MetricsLayer captures items_processed for each stage\n- [ ] MetricsLayer captures items_skipped and errors when non-zero\n- [ ] Fields match the span declarations from bd-24j1\n- [ ] extract_timings() returns correct counts in StageTiming\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/ingestion/orchestrator.rs (record counts in ingest + drain functions)\n- src/documents/regenerator.rs (record counts in regenerate)\n- src/embedding/pipeline.rs (record counts in embed)\n\n## TDD Loop\nRED: test_stage_fields_recorded (integration: run pipeline, extract timings, verify counts > 0)\nGREEN: Add Span::current().record() calls at end of each stage\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- Span::current() returns a disabled span if no subscriber is registered (e.g., in tests without subscriber setup). record() on disabled span is a no-op. Tests need a subscriber.\n- Field names must exactly match the declaration: \"items_processed\" not \"itemsProcessed\"\n- Recording must happen BEFORE the span closes (before function returns). Place at end of function but before Ok(result).","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:54:32.011236Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:27:38.620645Z","closed_at":"2026-02-04T17:27:38.620601Z","close_reason":"Added tracing::field::Empty declarations and Span::current().record() calls in 4 functions: ingest_project_issues, ingest_project_merge_requests, drain_resource_events, regenerate_dirty_documents, embed_documents","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-16m8","depends_on_id":"bd-24j1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-16m8","depends_on_id":"bd-34ek","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-16m8","depends_on_id":"bd-3er","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-17n","title":"OBSERV: Add LoggingConfig to Config struct","description":"## Background\nLoggingConfig centralizes log file settings so users can customize retention and disable file logging. It follows the same #[serde(default)] pattern as SyncConfig (src/core/config.rs:32-78) so existing config.json files continue working with zero changes.\n\n## Approach\nAdd to src/core/config.rs, after the EmbeddingConfig struct (around line 120):\n\n```rust\n#[derive(Debug, Clone, Deserialize)]\n#[serde(default)]\npub struct LoggingConfig {\n /// Directory for log files. Default: None (= XDG data dir + /logs/)\n pub log_dir: Option,\n\n /// Days to retain log files. Default: 30. Set to 0 to disable file logging.\n pub retention_days: u32,\n\n /// Enable JSON log files. Default: true.\n pub file_logging: bool,\n}\n\nimpl Default for LoggingConfig {\n fn default() -> Self {\n Self {\n log_dir: None,\n retention_days: 30,\n file_logging: true,\n }\n }\n}\n```\n\nAdd to the Config struct (src/core/config.rs:123-137), after the embedding field:\n\n```rust\n#[serde(default)]\npub logging: LoggingConfig,\n```\n\nNote: Using impl Default rather than default helper functions (default_retention_days, default_true) because #[serde(default)] on the struct applies Default::default() to the entire struct when the key is missing. This is the same pattern used by SyncConfig.\n\n## Acceptance Criteria\n- [ ] Deserializing {} as LoggingConfig yields retention_days=30, file_logging=true, log_dir=None\n- [ ] Deserializing {\"retention_days\": 7} preserves file_logging=true default\n- [ ] Existing config.json files (no \"logging\" key) deserialize without error\n- [ ] Config struct has .logging field accessible\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/core/config.rs (add LoggingConfig struct + Default impl, add field to Config)\n\n## TDD Loop\nRED: tests/config_tests.rs (or inline #[cfg(test)] mod):\n - test_logging_config_defaults\n - test_logging_config_partial\nGREEN: Add LoggingConfig struct, Default impl, field on Config\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- retention_days=0 means disable file logging entirely (not \"delete all files\") -- document this in the struct doc comment\n- log_dir with a relative path: should be resolved relative to CWD or treated as absolute? Decision: treat as absolute, document it\n- Missing \"logging\" key in JSON: #[serde(default)] handles this -- the entire LoggingConfig gets Default::default()","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:53:55.471193Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:10:22.751969Z","closed_at":"2026-02-04T17:10:22.751921Z","close_reason":"Added LoggingConfig struct with log_dir, retention_days, file_logging fields and serde defaults","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-17n","depends_on_id":"bd-2nx","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-17v","title":"[CP1] gi sync-status enhancement","description":"## Background\n\nThe `gi sync-status` command shows synchronization state: last successful sync time, cursor positions per project/resource, and overall health. This helps users understand when data was last refreshed and diagnose sync issues.\n\n## Approach\n\n### Module: src/cli/commands/sync_status.rs (enhance existing or create)\n\n### Handler Function\n\n```rust\npub async fn handle_sync_status(conn: &Connection) -> Result<()>\n```\n\n### Data to Display\n\n1. **Last sync run**: From `sync_runs` table\n - Started at, completed at, status\n - Issues fetched, discussions fetched\n\n2. **Cursor positions**: From `sync_cursors` table\n - Per (project, resource_type) pair\n - Show updated_at_cursor as human-readable date\n - Show tie_breaker_id (GitLab ID of last processed item)\n\n3. **Overall counts**: Quick summary\n - Total issues, discussions, notes in DB\n\n### Output Format\n\n```\nLast Sync\n─────────\nStatus: completed\nStarted: 2024-01-25 10:30:00\nCompleted: 2024-01-25 10:35:00\nDuration: 5m 23s\n\nCursor Positions\n────────────────\ngroup/project-one (issues):\n Last updated_at: 2024-01-25 10:30:00\n Last GitLab ID: 12345\n\nData Summary\n────────────\nIssues: 1,234\nDiscussions: 5,678\nNotes: 12,345 (excluding 2,000 system)\n```\n\n### Queries\n\n```sql\n-- Last sync run\nSELECT * FROM sync_runs ORDER BY started_at DESC LIMIT 1\n\n-- Cursor positions\nSELECT p.path, sc.resource_type, sc.updated_at_cursor, sc.tie_breaker_id\nFROM sync_cursors sc\nJOIN projects p ON sc.project_id = p.id\n\n-- Data summary\nSELECT COUNT(*) FROM issues\nSELECT COUNT(*) FROM discussions\nSELECT COUNT(*), SUM(is_system) FROM notes\n```\n\n## Acceptance Criteria\n\n- [ ] Shows last sync run with status and timing\n- [ ] Shows cursor position per project/resource\n- [ ] Shows total counts for issues, discussions, notes\n- [ ] Handles case where no sync has run yet\n- [ ] Formats timestamps as human-readable local time\n\n## Files\n\n- src/cli/commands/sync_status.rs (create or enhance)\n- src/cli/mod.rs (add SyncStatus variant if new)\n\n## TDD Loop\n\nRED:\n```rust\n#[tokio::test] async fn sync_status_shows_last_run()\n#[tokio::test] async fn sync_status_shows_cursor_positions()\n#[tokio::test] async fn sync_status_handles_no_sync_yet()\n```\n\nGREEN: Implement handler with queries and formatting\n\nVERIFY: `cargo test sync_status`\n\n## Edge Cases\n\n- No sync has ever run - show \"No sync runs recorded\"\n- Sync in progress - show \"Status: running\" with started_at\n- Cursor at epoch 0 - means fresh start, show \"Not started\"\n- Multiple projects - show cursor for each","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-25T17:02:38.409353Z","created_by":"tayloreernisse","updated_at":"2026-01-25T23:03:21.851557Z","closed_at":"2026-01-25T23:03:21.851496Z","close_reason":"Implemented gi sync-status showing last run, cursor positions, and data summary","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-17v","depends_on_id":"bd-208","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-18bf","title":"NOTE-0B: Immediate deletion propagation for swept notes","description":"## Background\nWhen sweep deletes stale notes, orphaned note documents remain in search results until generate-docs --full runs. This erodes dataset trust. Propagate deletion to documents immediately in the same transaction.\n\n## Approach\nUpdate both sweep functions (issue + MR) to use set-based SQL that deletes documents and dirty_sources entries for stale notes before deleting the note rows:\n\nStep 1: DELETE FROM documents WHERE source_type = 'note' AND source_id IN (SELECT id FROM notes WHERE discussion_id = ? AND last_seen_at < ? AND is_system = 0)\nStep 2: DELETE FROM dirty_sources WHERE source_type = 'note' AND source_id IN (same subquery)\nStep 3: DELETE FROM notes WHERE discussion_id = ? AND last_seen_at < ?\n\nDocument DELETE cascades to document_labels/document_paths via ON DELETE CASCADE (defined in migration 007_documents.sql). FTS trigger documents_ad auto-removes FTS entry (defined in migration 008_fts5.sql). Same pattern for mr_discussions.rs sweep.\n\nNote: MR sweep_stale_notes() at line 551 uses a different WHERE clause (project_id + discussion_id IN subquery + last_seen_at). Apply the same document propagation pattern with the matching subquery.\n\n## Files\n- MODIFY: src/ingestion/discussions.rs (update sweep_stale_issue_notes from NOTE-0A)\n- MODIFY: src/ingestion/mr_discussions.rs (update sweep_stale_notes at line 551)\n\n## TDD Anchor\nRED: test_issue_note_sweep_deletes_note_documents_immediately — setup 3 notes with documents, re-sync 2, sweep, assert stale doc deleted.\nGREEN: Add document/dirty_sources DELETE before note DELETE in sweep functions.\nVERIFY: cargo test sweep_deletes_note_documents -- --nocapture\nTests: test_mr_note_sweep_deletes_note_documents_immediately, test_sweep_deletion_handles_note_without_document, test_set_based_deletion_atomicity\n\n## Acceptance Criteria\n- [ ] Stale note sweep deletes corresponding documents in same transaction\n- [ ] Stale note sweep deletes corresponding dirty_sources entries\n- [ ] Non-system notes only — system notes never have documents (is_system = 0 filter)\n- [ ] Set-based SQL (not per-note loops) for performance\n- [ ] Works for both issue and MR discussion sweeps\n- [ ] No error when sweeping notes that have no documents (DELETE WHERE on absent rows = no-op)\n- [ ] All 4 tests pass\n\n## Dependency Context\n- Depends on NOTE-0A (bd-3bpk): uses sweep_stale_issue_notes/sweep_stale_notes functions created/modified in that bead\n- Depends on NOTE-2A (bd-1oi7): documents table must accept source_type='note' (migration 024 adds CHECK constraint)\n\n## Edge Cases\n- System notes: WHERE clause filters with is_system = 0 (system notes never get documents)\n- Notes without documents: DELETE WHERE on non-existent document is a no-op in SQLite\n- FTS consistency: documents_ad trigger (migration 008) handles FTS cleanup on document DELETE","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:59:33.412628Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.082355Z","closed_at":"2026-02-12T18:13:15.082307Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"]} {"id":"bd-18dn","title":"Add normalize_query_path() pure function for path canonicalization","description":"## Background\nPlan section 3a (iteration 6, feedback-6). User input paths like `./src//foo.rs` or whitespace-padded paths fail path resolution even when the file exists in the database. A syntactic normalization function runs before `build_path_query()` to reduce false negatives.\n\n## Approach\nAdd `normalize_query_path()` as a private pure function in who.rs (near `half_life_decay()`):\n\n```rust\nfn normalize_query_path(input: &str) -> String {\n let trimmed = input.trim();\n let stripped = trimmed.strip_prefix(\"./\").unwrap_or(trimmed);\n // Collapse repeated /\n let mut result = String::with_capacity(stripped.len());\n let mut prev_slash = false;\n for ch in stripped.chars() {\n if ch == '/' {\n if !prev_slash { result.push('/'); }\n prev_slash = true;\n } else {\n result.push(ch);\n prev_slash = false;\n }\n }\n result\n}\n```\n\nCalled once at top of `run_who()` before `build_path_query()`. Robot JSON `resolved_input` includes both `path_input_original` (raw) and `path_input_normalized` (after canonicalization).\n\nRules:\n- Strip leading `./`\n- Collapse repeated `/` (e.g., `src//foo.rs` -> `src/foo.rs`)\n- Trim leading/trailing whitespace\n- Preserve trailing `/` (signals explicit prefix intent)\n- Purely syntactic — no filesystem or DB lookups\n\n## TDD Loop\n\n### RED (write first):\n```rust\n#[test]\nfn test_path_normalization_handles_dot_and_double_slash() {\n assert_eq!(normalize_query_path(\"./src//foo.rs\"), \"src/foo.rs\");\n assert_eq!(normalize_query_path(\" src/bar.rs \"), \"src/bar.rs\");\n assert_eq!(normalize_query_path(\"src/foo.rs\"), \"src/foo.rs\"); // unchanged\n assert_eq!(normalize_query_path(\"\"), \"\"); // empty passthrough\n}\n\n#[test]\nfn test_path_normalization_preserves_prefix_semantics() {\n assert_eq!(normalize_query_path(\"./src/dir/\"), \"src/dir/\"); // trailing slash preserved\n assert_eq!(normalize_query_path(\"src/dir\"), \"src/dir\"); // no trailing slash = file\n}\n```\n\n### GREEN: Implement normalize_query_path (5-10 lines).\n### VERIFY: `cargo test -p lore -- test_path_normalization`\n\n## Acceptance Criteria\n- [ ] test_path_normalization_handles_dot_and_double_slash passes\n- [ ] test_path_normalization_preserves_prefix_semantics passes\n- [ ] Function is private (`fn` not `pub fn`)\n- [ ] No DB or filesystem dependency — pure string function\n- [ ] Called in run_who() before build_path_query()\n- [ ] Robot JSON resolved_input includes path_input_original and path_input_normalized\n\n## Files\n- src/cli/commands/who.rs (function near half_life_decay, call site in run_who)\n\n## Edge Cases\n- Empty string -> empty string\n- Only whitespace -> empty string\n- Multiple leading ./ (\"././src\") -> strip first \"./\" only per plan spec\n- Trailing slash preserved for prefix intent","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:27.954857Z","created_by":"tayloreernisse","updated_at":"2026-02-12T21:03:35.102542Z","closed_at":"2026-02-12T21:03:35.102484Z","close_reason":"Completed: implemented normalize_query_path() with TDD, wired into Expert/Overlap branches, added path_input_original/normalized to WhoResolvedInput and robot JSON","compaction_level":0,"original_size":0,"labels":["scoring"]} -{"id":"bd-18qs","title":"Implement entity table + filter bar widgets","description":"## Background\nThe entity table and filter bar are shared widgets used by Issue List, MR List, and potentially Search results. The entity table supports sortable columns with responsive width allocation. The filter bar provides a typed DSL for filtering with inline diagnostics.\n\n## Approach\nEntity Table (view/common/entity_table.rs):\n- EntityTable widget: generic over row type\n- TableRow trait: fn cells(&self) -> Vec, fn sort_key(&self, col: usize) -> Ordering\n- Column definitions: name, min_width, flex_weight, alignment, sort_field\n- Responsive column fitting: hide low-priority columns as terminal narrows\n- Keyboard: j/k scroll, J/K page scroll, Tab cycle sort column, Enter select, g+g top, G bottom\n- Visual: alternating row colors, selected row highlight, sort indicator arrow\n\nFilter Bar (view/common/filter_bar.rs):\n- FilterBar widget wrapping ftui TextInput\n- DSL parsing (crate filter_dsl.rs): quoted values (\"in progress\"), negation prefix (-closed), field:value syntax (author:taylor, state:opened, label:bug), free-text search\n- Inline diagnostics: unknown field names highlighted, cursor position for error\n- Applied filter chips shown as tags below the input\n\nFilter DSL (filter_dsl.rs):\n- parse_filter_tokens(input: &str) -> Vec\n- FilterToken enum: FieldValue{field, value}, Negation{field, value}, FreeText(String), QuotedValue(String)\n- Validation: known fields per entity type (issues: state, author, assignee, label, milestone, status; MRs: state, author, reviewer, target_branch, source_branch, label, draft)\n\n## Acceptance Criteria\n- [ ] EntityTable renders with responsive column widths\n- [ ] Columns hide gracefully when terminal is too narrow\n- [ ] j/k scrolls, Enter selects, Tab cycles sort column\n- [ ] Sort indicator (arrow) shows on active sort column\n- [ ] FilterBar captures text input and parses DSL tokens\n- [ ] Quoted values preserved as single token\n- [ ] Negation prefix (-closed) creates exclusion filter\n- [ ] field:value syntax maps to typed filter fields\n- [ ] Unknown field names highlighted as error\n- [ ] Filter chips rendered below input bar\n\n## Files\n- CREATE: crates/lore-tui/src/view/common/entity_table.rs\n- CREATE: crates/lore-tui/src/view/common/filter_bar.rs\n- CREATE: crates/lore-tui/src/filter_dsl.rs\n\n## TDD Anchor\nRED: Write test_parse_filter_basic in filter_dsl.rs that parses \"state:opened author:taylor\" and asserts two FieldValue tokens.\nGREEN: Implement parse_filter_tokens with field:value splitting.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_parse_filter\n\nAdditional tests:\n- test_parse_quoted_value: \"in progress\" -> single QuotedValue token\n- test_parse_negation: -closed -> Negation token\n- test_parse_mixed: state:opened \"bug fix\" -wontfix -> 3 tokens of correct types\n- test_column_hiding: EntityTable with 5 columns hides lowest priority at 60 cols\n\n## Edge Cases\n- Filter DSL must handle Unicode in values (CJK issue titles)\n- Empty filter string should show all results (no-op)\n- Very long filter strings must not overflow the input area\n- Tab cycling sort must wrap around (last column -> first)\n- Column widths must respect min_width even when terminal is very narrow","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T16:58:07.586225Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:28.085981Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-18qs","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-18qs","depends_on_id":"bd-6pmy","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-18qs","title":"Implement entity table + filter bar widgets","description":"## Background\nThe entity table and filter bar are shared widgets used by Issue List, MR List, and potentially Search results. The entity table supports sortable columns with responsive width allocation. The filter bar provides a typed DSL for filtering with inline diagnostics.\n\n## Approach\nEntity Table (view/common/entity_table.rs):\n- EntityTable widget: generic over row type\n- TableRow trait: fn cells(&self) -> Vec, fn sort_key(&self, col: usize) -> Ordering\n- Column definitions: name, min_width, flex_weight, alignment, sort_field\n- Responsive column fitting: hide low-priority columns as terminal narrows\n- Keyboard: j/k scroll, J/K page scroll, Tab cycle sort column, Enter select, g+g top, G bottom\n- Visual: alternating row colors, selected row highlight, sort indicator arrow\n\nFilter Bar (view/common/filter_bar.rs):\n- FilterBar widget wrapping ftui TextInput\n- DSL parsing (crate filter_dsl.rs): quoted values (\"in progress\"), negation prefix (-closed), field:value syntax (author:taylor, state:opened, label:bug), free-text search\n- Inline diagnostics: unknown field names highlighted, cursor position for error\n- Applied filter chips shown as tags below the input\n\nFilter DSL (filter_dsl.rs):\n- parse_filter_tokens(input: &str) -> Vec\n- FilterToken enum: FieldValue{field, value}, Negation{field, value}, FreeText(String), QuotedValue(String)\n- Validation: known fields per entity type (issues: state, author, assignee, label, milestone, status; MRs: state, author, reviewer, target_branch, source_branch, label, draft)\n\n## Acceptance Criteria\n- [ ] EntityTable renders with responsive column widths\n- [ ] Columns hide gracefully when terminal is too narrow\n- [ ] j/k scrolls, Enter selects, Tab cycles sort column\n- [ ] Sort indicator (arrow) shows on active sort column\n- [ ] FilterBar captures text input and parses DSL tokens\n- [ ] Quoted values preserved as single token\n- [ ] Negation prefix (-closed) creates exclusion filter\n- [ ] field:value syntax maps to typed filter fields\n- [ ] Unknown field names highlighted as error\n- [ ] Filter chips rendered below input bar\n\n## Files\n- CREATE: crates/lore-tui/src/view/common/entity_table.rs\n- CREATE: crates/lore-tui/src/view/common/filter_bar.rs\n- CREATE: crates/lore-tui/src/filter_dsl.rs\n\n## TDD Anchor\nRED: Write test_parse_filter_basic in filter_dsl.rs that parses \"state:opened author:taylor\" and asserts two FieldValue tokens.\nGREEN: Implement parse_filter_tokens with field:value splitting.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_parse_filter\n\nAdditional tests:\n- test_parse_quoted_value: \"in progress\" -> single QuotedValue token\n- test_parse_negation: -closed -> Negation token\n- test_parse_mixed: state:opened \"bug fix\" -wontfix -> 3 tokens of correct types\n- test_column_hiding: EntityTable with 5 columns hides lowest priority at 60 cols\n\n## Edge Cases\n- Filter DSL must handle Unicode in values (CJK issue titles)\n- Empty filter string should show all results (no-op)\n- Very long filter strings must not overflow the input area\n- Tab cycling sort must wrap around (last column -> first)\n- Column widths must respect min_width even when terminal is very narrow","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:58:07.586225Z","created_by":"tayloreernisse","updated_at":"2026-02-18T19:18:07.275204Z","closed_at":"2026-02-18T19:18:07.275087Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-18qs","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-18qs","depends_on_id":"bd-6pmy","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-18t","title":"Implement discussion truncation logic","description":"## Background\nDiscussion threads can contain dozens of notes spanning thousands of characters. The truncation module ensures discussion documents stay within a 32k character limit (suitable for embedding chunking) by dropping middle notes while preserving first and last notes for context. A separate hard safety cap of 2MB applies to ALL document types for pathological content (pasted logs, base64 blobs). Issue/MR documents are NOT truncated by the discussion logic — only the hard cap applies.\n\n## Approach\nCreate `src/documents/truncation.rs` per PRD Section 2.3:\n\n```rust\npub const MAX_DISCUSSION_CHARS: usize = 32_000;\npub const MAX_DOCUMENT_CHARS_HARD: usize = 2_000_000;\n\npub struct NoteContent {\n pub author: String,\n pub date: String,\n pub body: String,\n}\n\npub struct TruncationResult {\n pub content: String,\n pub is_truncated: bool,\n pub reason: Option,\n}\n\npub enum TruncationReason {\n TokenLimitMiddleDrop,\n SingleNoteOversized,\n FirstLastOversized,\n HardCapOversized,\n}\n```\n\n**Core functions:**\n- `truncate_discussion(notes: &[NoteContent], max_chars: usize) -> TruncationResult`\n- `truncate_utf8(s: &str, max_bytes: usize) -> &str` (shared with fts.rs)\n- `truncate_hard_cap(content: &str) -> TruncationResult` (for any doc type)\n\n**Algorithm for truncate_discussion:**\n1. Format all notes as `@author (date):\\nbody\\n\\n`\n2. If total <= max_chars: return as-is\n3. If single note: truncate at UTF-8 boundary, append `[truncated]`, reason = SingleNoteOversized\n4. Binary search: find max N where first N notes + last 1 note + marker fit within max_chars\n5. If first + last > max_chars: keep only first (truncated), reason = FirstLastOversized\n6. Otherwise: first N + marker + last M, reason = TokenLimitMiddleDrop\n\n**Marker format:** `\\n\\n[... N notes omitted for length ...]\\n\\n`\n\n## Acceptance Criteria\n- [ ] Discussion with total < 32k chars returns untruncated\n- [ ] Discussion > 32k chars: middle notes dropped, first + last preserved\n- [ ] Truncation marker shows correct count of omitted notes\n- [ ] Single note > 32k chars: truncated at UTF-8-safe boundary with `[truncated]` appended\n- [ ] First + last note > 32k: only first note kept (truncated if needed)\n- [ ] Hard cap (2MB) truncates any document type at UTF-8-safe boundary\n- [ ] `truncate_utf8` never panics on multi-byte codepoints (emoji, CJK, accented chars)\n- [ ] `TruncationReason::as_str()` returns DB-compatible strings matching CHECK constraint\n\n## Files\n- `src/documents/truncation.rs` — new file\n- `src/documents/mod.rs` — add `pub use truncation::{truncate_discussion, truncate_hard_cap, TruncationResult, NoteContent};`\n\n## TDD Loop\nRED: Tests in `#[cfg(test)] mod tests`:\n- `test_no_truncation_under_limit` — 3 short notes, all fit\n- `test_middle_notes_dropped` — 10 notes totaling > 32k, first+last preserved\n- `test_single_note_oversized` — one note of 50k chars, truncated safely\n- `test_first_last_oversized` — first=20k, last=20k, only first kept\n- `test_one_note_total` — single note under limit: no truncation\n- `test_utf8_boundary_safety` — content with emoji/CJK at truncation point\n- `test_hard_cap` — 3MB content truncated to 2MB\n- `test_marker_count_correct` — marker says \"[... 5 notes omitted ...]\" when 5 dropped\nGREEN: Implement truncation logic\nVERIFY: `cargo test truncation`\n\n## Edge Cases\n- Empty notes list: return empty content, not truncated\n- All notes are empty strings: total = 0, no truncation\n- Note body contains only multi-byte characters: truncate_utf8 walks backward to find safe boundary\n- Note body with trailing newlines: formatted output should not have excessive blank lines","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-30T15:25:45.597167Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:21:32.256569Z","closed_at":"2026-01-30T17:21:32.256507Z","close_reason":"Completed: truncate_discussion, truncate_hard_cap, truncate_utf8, TruncationReason with as_str(), 12 tests pass","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-18t","depends_on_id":"bd-36p","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-18yh","title":"NOTE-2C: Note document extractor function","description":"## Background\nEach non-system note becomes a searchable document in the FTS/embedding pipeline. Follows the pattern of extract_issue_document() (line 85), extract_mr_document() (line 186), extract_discussion_document() (line 302) in src/documents/extractor.rs.\n\n## Approach\nAdd pub fn extract_note_document(conn: &Connection, note_id: i64) -> Result> to src/documents/extractor.rs:\n\n1. Fetch note with JOIN to discussions and projects:\n SELECT n.id, n.gitlab_id, n.author_username, n.body, n.note_type, n.is_system, n.created_at, n.updated_at, n.position_new_path, n.position_new_line, n.position_old_path, n.position_old_line, n.resolvable, n.resolved, n.resolved_by, d.noteable_type, d.issue_id, d.merge_request_id, p.path_with_namespace, p.id as project_id\n FROM notes n\n JOIN discussions d ON n.discussion_id = d.id\n JOIN projects p ON n.project_id = p.id\n WHERE n.id = ?\n\n2. Return None for: system notes (is_system = 1), not found, orphaned discussions (no parent issue/MR)\n\n3. Fetch parent entity (Issue or MR) — get iid, title, web_url, labels:\n For issues: SELECT iid, title, web_url FROM issues WHERE id = ?\n For MRs: SELECT iid, title, web_url FROM merge_requests WHERE id = ?\n Labels: SELECT label_name FROM issue_labels/mr_labels WHERE issue_id/mr_id = ?\n (Same pattern as extract_discussion_document lines 332-401)\n\n4. Build paths: BTreeSet from position_old_path + position_new_path (filter None values)\n\n5. Build URL: parent_web_url + \"#note_{gitlab_id}\"\n\n6. Format content with structured key-value header:\n [[Note]]\n source_type: note\n note_gitlab_id: {gitlab_id}\n project: {path_with_namespace}\n parent_type: {Issue|MergeRequest}\n parent_iid: {iid}\n parent_title: {title}\n note_type: {DiffNote|DiscussionNote|...}\n author: @{author}\n created_at: {iso8601}\n resolved: {true|false} (only if resolvable)\n path: {position_new_path}:{line} (only if DiffNote with path)\n labels: {comma-separated parent labels}\n url: {url}\n\n --- Body ---\n\n {body}\n\n7. Title: \"Note by @{author} on {Issue|MR} #{iid}: {parent_title}\"\n\n8. Compute hashes: content_hash via compute_content_hash() (line 66), labels_hash via compute_list_hash(), paths_hash via compute_list_hash(). Apply truncate_hard_cap() (imported from truncation.rs at line 9).\n\n9. Return DocumentData (struct defined at line 47) with: source_type: SourceType::Note, source_id: note_id, project_id, author_username, labels, paths (as Vec), labels_hash, paths_hash, created_at, updated_at, url, title, content_text (from hard_cap), content_hash, is_truncated, truncated_reason.\n\n## Files\n- MODIFY: src/documents/extractor.rs (add extract_note_document after extract_discussion_document, ~line 500)\n- MODIFY: src/documents/mod.rs (add extract_note_document to pub use exports, line 12 area)\n\n## TDD Anchor\nRED: test_note_document_basic_format — insert project, issue, discussion, note; extract; assert content contains [[Note]], author, parent reference.\nGREEN: Implement extract_note_document with SQL JOIN and content formatting.\nVERIFY: cargo test note_document_basic_format -- --nocapture\nTests: test_note_document_diffnote_with_path, test_note_document_inherits_parent_labels, test_note_document_mr_parent, test_note_document_system_note_returns_none, test_note_document_not_found, test_note_document_orphaned_discussion, test_note_document_hash_deterministic, test_note_document_empty_body, test_note_document_null_body\n\n## Acceptance Criteria\n- [ ] extract_note_document returns Some(DocumentData) for non-system notes\n- [ ] Returns None for system notes, not-found, orphaned discussions\n- [ ] Content includes structured [[Note]] header with all parent context fields\n- [ ] DiffNote includes file path and line info in content header\n- [ ] Labels inherited from parent issue/MR\n- [ ] URL format: parent_url#note_{gitlab_id}\n- [ ] Title format: \"Note by @{author} on {Issue|MR} #{iid}: {parent_title}\"\n- [ ] Hash is deterministic across calls (same input = same hash)\n- [ ] Empty/null body handled gracefully (use empty string)\n- [ ] truncate_hard_cap applied to content\n- [ ] All 10 tests pass\n\n## Dependency Context\n- Depends on NOTE-2B (bd-ef0u): SourceType::Note variant must exist to construct DocumentData\n\n## Edge Cases\n- NULL body: use empty string \"\" — not all notes have body text\n- Orphaned discussion: parent issue/MR deleted but discussion remains — return None\n- Very long note body: truncate_hard_cap handles this (2MB limit)\n- Note with no position data: skip path line in content header\n- Note on MR vs Issue: different label table (mr_labels vs issue_labels)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:01.802842Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:23.928224Z","closed_at":"2026-02-12T18:13:23.928173Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"],"dependencies":[{"issue_id":"bd-18yh","depends_on_id":"bd-2ezb","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-18yh","depends_on_id":"bd-3cjp","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-1au9","title":"Audit and improve test coverage across ingestion module","description":"During code reorganization, discovered that ingestion/issues.rs has only 4 tests covering passes_cursor_filter, while 10 production functions (~400 lines) are untested:\n\nUNTESTED FUNCTIONS in ingestion/issues.rs:\n- ingest_issues() - main async pipeline with cursor-based pagination, shutdown handling\n- process_single_issue() - transforms GitLab issue, wraps in transaction\n- process_issue_in_transaction() - DB upsert with ON CONFLICT, label/assignee/milestone association, dirty tracking\n- upsert_label_tx() - label upsert with INSERT OR IGNORE + created count tracking\n- link_issue_label_tx() - issue-label junction table insert\n- upsert_milestone_tx() - milestone upsert with RETURNING id\n- get_sync_cursor() - reads sync_cursors table for incremental sync\n- update_sync_cursor() - writes sync cursor with tie-breaker ID\n- get_issues_needing_discussion_sync() - identifies issues needing discussion refresh\n- parse_timestamp() - RFC3339 parsing with error wrapping\n\nLIKELY SIMILAR GAPS in sibling files:\n- ingestion/merge_requests.rs (479 lines) - parallel structure to issues.rs\n- ingestion/discussions.rs (469 lines prod code) - discussion upsert pipeline\n- ingestion/mr_discussions.rs (738 lines prod before tests) - MR discussion pipeline\n- ingestion/orchestrator.rs (1703 lines) - full pipeline orchestration\n\nThe ingestion module handles the most critical data path (GitLab API -> SQLite) yet relies primarily on integration-level orchestrator tests rather than unit tests for individual functions.\n\nPRIORITY AREAS:\n1. DB upsert logic with ON CONFLICT handling (data correctness)\n2. Cursor-based pagination (incremental sync correctness)\n3. Label/milestone/assignee association (relational integrity)\n4. Dirty tracker marking after upserts (document pipeline triggering)\n5. Discussion sync queue population (cascading sync correctness)\n6. Error handling paths (invalid timestamps, missing data)\n\nAPPROACH: Use in-memory SQLite (create_connection(Path::new(\":memory:\")) + run_migrations) for unit tests. See existing patterns in core/db_tests.rs and documents/regenerator_tests.rs.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-13T00:53:15.302370Z","created_by":"tayloreernisse","updated_at":"2026-02-13T00:53:15.305167Z","compaction_level":0,"original_size":0,"labels":["testing"]} {"id":"bd-1b0n","title":"OBSERV: Print human-readable timing summary after interactive sync","description":"## Background\nInteractive users want a quick timing summary after sync completes. This is the human-readable equivalent of meta.stages in robot JSON. Gated behind IngestDisplay::show_text so it doesn't appear in -q, robot, or progress_only modes.\n\n## Approach\nAdd a function to format and print the timing summary, called from run_sync() after the pipeline completes:\n\n```rust\nfn print_timing_summary(stages: &[StageTiming], total_elapsed: Duration) {\n eprintln!();\n eprintln!(\"Sync complete in {:.1}s\", total_elapsed.as_secs_f64());\n for stage in stages {\n let dots = \".\".repeat(20_usize.saturating_sub(stage.name.len()));\n eprintln!(\n \" {} {} {:.1}s ({} items{})\",\n stage.name,\n dots,\n stage.elapsed_ms as f64 / 1000.0,\n stage.items_processed,\n if stage.errors > 0 { format!(\", {} errors\", stage.errors) } else { String::new() },\n );\n }\n}\n```\n\nCall in run_sync() (src/cli/commands/sync.rs), after pipeline and before return:\n```rust\nif display.show_text {\n let stages = metrics_handle.extract_timings();\n print_timing_summary(&stages, start.elapsed());\n}\n```\n\nOutput format per PRD Section 4.6.4:\n```\nSync complete in 45.2s\n Ingest issues .... 12.3s (150 items, 42 discussions)\n Ingest MRs ....... 18.9s (85 items, 1 error)\n Generate docs .... 8.5s (235 documents)\n Embed ............ 5.5s (1024 chunks)\n```\n\n## Acceptance Criteria\n- [ ] Interactive lore sync prints timing summary to stderr after completion\n- [ ] Summary shows total time and per-stage breakdown\n- [ ] lore -q sync does NOT print timing summary\n- [ ] Robot mode does NOT print timing summary (only JSON)\n- [ ] Error counts shown when non-zero\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/cli/commands/sync.rs (add print_timing_summary function, call after pipeline)\n\n## TDD Loop\nRED: test_timing_summary_format (capture stderr, verify format matches PRD example pattern)\nGREEN: Implement print_timing_summary, gate behind display.show_text\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- Empty stages (e.g., sync with no projects configured): print \"Sync complete in 0.0s\" with no stage lines\n- Very fast stages (<1ms): show \"0.0s\" not scientific notation\n- Stage names with varying lengths: dot padding keeps alignment readable","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:54:32.109882Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:32:52.558314Z","closed_at":"2026-02-04T17:32:52.558264Z","close_reason":"Added print_timing_summary with per-stage breakdown (name, elapsed, items, errors, rate limits), nested sub-stage support, gated behind metrics Option","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-1b0n","depends_on_id":"bd-1zj6","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1b0n","depends_on_id":"bd-3er","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-1b50","title":"Update existing tests for new ScoringConfig fields","description":"## Background\nThe existing test test_expert_scoring_weights_are_configurable (who.rs:3551-3574) constructs a ScoringConfig with only the original 3 fields. After bd-2w1p adds 8 new fields, this test will not compile without ..Default::default().\n\n## Approach\nFind the test at who.rs:3551-3574. The flipped config construction at line 3567:\n```rust\nlet flipped = ScoringConfig {\n author_weight: 5,\n reviewer_weight: 30,\n note_bonus: 1,\n};\n```\nChange to:\n```rust\nlet flipped = ScoringConfig {\n author_weight: 5,\n reviewer_weight: 30,\n note_bonus: 1,\n ..Default::default()\n};\n```\n\nAlso check default_scoring() helper at line 2451 — it calls ScoringConfig::default() which already works.\n\n### Important: Scope boundary\nThis bead ONLY handles ScoringConfig struct literal changes. The query_expert() function signature change (7 params -> 10 params) happens in bd-13q8 (Layer 3), which is responsible for updating all test callsites at that time.\n\n### Why existing assertions do not break:\nAll test data is inserted with now_ms(). With as_of_ms also at ~now_ms(), elapsed ~0ms, decay ~1.0. So integer-rounded scores are identical to the flat-weight model.\n\n## Acceptance Criteria\n- [ ] cargo test passes with zero assertion changes to existing test values\n- [ ] test_expert_scoring_weights_are_configurable compiles and passes\n- [ ] All other existing who tests pass unchanged\n- [ ] No new test code needed — only ..Default::default() additions\n- [ ] cargo check --all-targets clean\n\n## Files\n- MODIFY: src/cli/commands/who.rs (ScoringConfig literal at line 3567)\n\n## TDD Loop\nN/A — mechanical change, no new tests.\nVERIFY: cargo check --all-targets && cargo test -p lore -- test_expert_scoring_weights_are_configurable\n\n## Edge Cases\n- Search for ALL ScoringConfig { ... } literals in test module — there may be more than the one at line 3567\n- The default_scoring() helper at line 2451 uses ScoringConfig::default() — no change needed","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-09T17:00:45.084472Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:46:47.140946Z","compaction_level":0,"original_size":0,"labels":["scoring","test"],"dependencies":[{"issue_id":"bd-1b50","depends_on_id":"bd-2w1p","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"}]} +{"id":"bd-1b50","title":"Update existing tests for new ScoringConfig fields","description":"## Background\nThe existing test test_expert_scoring_weights_are_configurable (who.rs:3551-3574) constructs a ScoringConfig with only the original 3 fields. After bd-2w1p adds 8 new fields, this test will not compile without ..Default::default().\n\n## Approach\nFind the test at who.rs:3551-3574. The flipped config construction at line 3567:\n```rust\nlet flipped = ScoringConfig {\n author_weight: 5,\n reviewer_weight: 30,\n note_bonus: 1,\n};\n```\nChange to:\n```rust\nlet flipped = ScoringConfig {\n author_weight: 5,\n reviewer_weight: 30,\n note_bonus: 1,\n ..Default::default()\n};\n```\n\nAlso check default_scoring() helper at line 2451 — it calls ScoringConfig::default() which already works.\n\n### Important: Scope boundary\nThis bead ONLY handles ScoringConfig struct literal changes. The query_expert() function signature change (7 params -> 10 params) happens in bd-13q8 (Layer 3), which is responsible for updating all test callsites at that time.\n\n### Why existing assertions do not break:\nAll test data is inserted with now_ms(). With as_of_ms also at ~now_ms(), elapsed ~0ms, decay ~1.0. So integer-rounded scores are identical to the flat-weight model.\n\n## Acceptance Criteria\n- [ ] cargo test passes with zero assertion changes to existing test values\n- [ ] test_expert_scoring_weights_are_configurable compiles and passes\n- [ ] All other existing who tests pass unchanged\n- [ ] No new test code needed — only ..Default::default() additions\n- [ ] cargo check --all-targets clean\n\n## Files\n- MODIFY: src/cli/commands/who.rs (ScoringConfig literal at line 3567)\n\n## TDD Loop\nN/A — mechanical change, no new tests.\nVERIFY: cargo check --all-targets && cargo test -p lore -- test_expert_scoring_weights_are_configurable\n\n## Edge Cases\n- Search for ALL ScoringConfig { ... } literals in test module — there may be more than the one at line 3567\n- The default_scoring() helper at line 2451 uses ScoringConfig::default() — no change needed","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-09T17:00:45.084472Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:43:04.409277Z","closed_at":"2026-02-12T20:43:04.409239Z","close_reason":"Implemented by time-decay swarm: 3 agents, 12 tasks, 621 tests passing, all quality gates green","compaction_level":0,"original_size":0,"labels":["scoring","test"],"dependencies":[{"issue_id":"bd-1b50","depends_on_id":"bd-2w1p","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-1b6k","title":"Epic: TUI Phase 5.5 — Reliability Test Pack","description":"## Background\nPhase 5.5 is a comprehensive reliability test suite covering race conditions, stress tests, property-based testing, and deterministic clock verification. These tests ensure the TUI is robust under adverse conditions (rapid input, concurrent writes, resize storms, backpressure).\n\n## Acceptance Criteria\n- [ ] Stale response drop tests pass\n- [ ] Sync cancel/resume tests pass\n- [ ] SQLITE_BUSY retry tests pass\n- [ ] Resize storm + rapid keypress tests pass without panic\n- [ ] Property tests for navigation invariants pass\n- [ ] Performance benchmark fixtures (S/M/L tiers) pass SLOs\n- [ ] Event fuzz tests: 10k traces with zero invariant violations\n- [ ] Deterministic clock/render tests produce identical output\n- [ ] 30-minute soak test: no panic, no deadlock, memory growth < 5%\n- [ ] Concurrent pagination/write race tests: no duplicate/skipped rows\n- [ ] Query cancellation race tests: no cross-task bleed, no stuck loading","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:04:04.486702Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.508682Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1b6k","depends_on_id":"bd-3t6r","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1b91","title":"CLI: show issue status display (human + robot)","description":"## Background\nOnce status data is in the DB, lore show issue needs to display it. Human view shows colored status text; robot view includes all 5 fields as JSON.\n\n## Approach\nAdd 5 fields to the IssueRow/IssueDetail/IssueDetailJson structs. Extend both find_issue SQL queries. Add status display line after State in human view. New style_with_hex() helper converts hex color to ANSI 256.\n\n## Files\n- src/cli/commands/show.rs\n\n## Implementation\n\nAdd to IssueRow (private struct):\n status_name: Option, status_category: Option,\n status_color: Option, status_icon_name: Option,\n status_synced_at: Option\n\nUpdate BOTH find_issue SQL queries (with and without project filter) SELECT list — add after existing columns:\n i.status_name, i.status_category, i.status_color, i.status_icon_name, i.status_synced_at\nColumn indices: status_name=12, status_category=13, status_color=14, status_icon_name=15, status_synced_at=16\n\nRow mapping (after milestone_title: row.get(11)?):\n status_name: row.get(12)?, ..., status_synced_at: row.get(16)?\n\nAdd to IssueDetail (public struct) — same 5 fields\nAdd to IssueDetailJson — same 5 fields\nAdd to From<&IssueDetail> for IssueDetailJson — clone/copy fields\n\nHuman display in print_show_issue (after State line):\n if let Some(status) = &issue.status_name {\n let display = match &issue.status_category {\n Some(cat) => format!(\"{status} ({})\", cat.to_ascii_lowercase()),\n None => status.clone(),\n };\n println!(\"Status: {}\", style_with_hex(&display, issue.status_color.as_deref()));\n }\n\nNew helper:\n fn style_with_hex<'a>(text: &'a str, hex: Option<&str>) -> console::StyledObject<&'a str>\n Parses 6-char hex (strips #), converts via ansi256_from_rgb, falls back to unstyled\n\n## Acceptance Criteria\n- [ ] Human: \"Status: In progress (in_progress)\" shown after State line\n- [ ] Status colored by hex -> ANSI 256\n- [ ] Status line omitted when status_name IS NULL\n- [ ] Robot: all 5 fields present as null when no status\n- [ ] Robot: status_synced_at is integer (ms epoch) or null\n- [ ] Both SQL queries updated (with and without project filter)\n- [ ] cargo check --all-targets passes\n\n## TDD Loop\nRED: No new dedicated test file — verify via cargo test show (existing tests should still pass)\nGREEN: Add fields, SQL columns, display logic\nVERIFY: cargo test show && cargo check --all-targets\n\n## Edge Cases\n- Two separate SQL strings in find_issue — BOTH must be updated identically\n- Column indices are positional — count carefully from 0\n- style_with_hex: hex.len() == 6 check after trimming # prefix\n- Invalid hex -> fall back to unstyled (no panic)\n- NULL hex color -> fall back to unstyled\n- clippy: use let-chain for combined if conditions (if hex.len() == 6 && let (...) = ...)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:42:16.215984Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.420281Z","closed_at":"2026-02-11T07:21:33.420236Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1b91","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1b91","depends_on_id":"bd-3dum","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1cb","title":"[CP0] gi doctor command - health checks","description":"## Background\n\ndoctor is the primary diagnostic command. It checks all system components and reports their status. Supports JSON output for scripting and CI integration. Must degrade gracefully - warn about optional components (Ollama) without failing.\n\nReference: docs/prd/checkpoint-0.md section \"gi doctor\"\n\n## Approach\n\n**src/cli/commands/doctor.ts:**\n\nPerforms 5 checks:\n1. **Config**: Load and validate config file\n2. **Database**: Open DB, verify pragmas, check schema version\n3. **GitLab**: Auth with token, verify connectivity\n4. **Projects**: Count configured vs resolved in DB\n5. **Ollama**: Ping embedding endpoint (optional - warn if unavailable)\n\n**DoctorResult interface:**\n```typescript\ninterface DoctorResult {\n success: boolean; // All required checks passed\n checks: {\n config: { status: 'ok' | 'error'; path?: string; error?: string };\n database: { status: 'ok' | 'error'; path?: string; schemaVersion?: number; error?: string };\n gitlab: { status: 'ok' | 'error'; url?: string; username?: string; error?: string };\n projects: { status: 'ok' | 'error'; configured?: number; resolved?: number; error?: string };\n ollama: { status: 'ok' | 'warning' | 'error'; url?: string; model?: string; error?: string };\n };\n}\n```\n\n**Human-readable output (default):**\n```\ngi doctor\n\n Config ✓ Loaded from ~/.config/gi/config.json\n Database ✓ ~/.local/share/gi/data.db (schema v1)\n GitLab ✓ https://gitlab.example.com (authenticated as @johndoe)\n Projects ✓ 2 configured, 2 resolved\n Ollama ⚠ Not running (semantic search unavailable)\n\nStatus: Ready (lexical search available, semantic search requires Ollama)\n```\n\n**JSON output (--json flag):**\nOutputs DoctorResult as JSON to stdout\n\n## Acceptance Criteria\n\n- [ ] Config check: shows path and validation status\n- [ ] Database check: shows path, schema version, pragma verification\n- [ ] GitLab check: shows URL and authenticated username\n- [ ] Projects check: shows configured count and resolved count\n- [ ] Ollama check: warns if not running, doesn't fail overall\n- [ ] success=true only if config, database, gitlab, projects all ok\n- [ ] --json outputs valid JSON matching DoctorResult interface\n- [ ] Exit 0 if success=true, exit 1 if any required check fails\n- [ ] Colors and symbols in human output (✓, ⚠, ✗)\n\n## Files\n\nCREATE:\n- src/cli/commands/doctor.ts\n- src/types/doctor.ts (DoctorResult interface)\n\n## TDD Loop\n\nN/A - diagnostic command, verify with manual testing:\n\n```bash\n# All good\ngi doctor\n\n# JSON output\ngi doctor --json | jq .\n\n# With missing Ollama\n# (just don't run Ollama - should show warning)\n\n# With bad config\nmv ~/.config/gi/config.json ~/.config/gi/config.json.bak\ngi doctor # should show config error\n```\n\n## Edge Cases\n\n- Ollama timeout should be short (2s) - don't block on slow network\n- Ollama 404 (wrong model) vs connection refused (not running)\n- Database file exists but wrong schema version\n- Projects in config but not in database (init not run)\n- Token valid for user but project access revoked","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:51.435540Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:30:24.921206Z","closed_at":"2026-01-25T03:30:24.921041Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1cb","depends_on_id":"bd-13b","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1cb","depends_on_id":"bd-1l1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1cb","depends_on_id":"bd-3ng","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1cb","depends_on_id":"bd-epj","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -30,18 +32,20 @@ {"id":"bd-1d5","title":"[CP1] GitLab client pagination methods","description":"Add async generator methods for paginated GitLab API calls.\n\nMethods to add to src/gitlab/client.ts:\n- paginateIssues(gitlabProjectId, updatedAfter?) → AsyncGenerator\n- paginateIssueDiscussions(gitlabProjectId, issueIid) → AsyncGenerator\n- requestWithHeaders(path) → { data: T, headers: Headers }\n\nImplementation:\n- Use scope=all, state=all for issues\n- Order by updated_at ASC\n- Follow X-Next-Page header until empty/absent\n- Apply cursor rewind (subtract cursorRewindSeconds) for tuple semantics\n- Fall back to empty-page detection if headers missing\n\nFiles: src/gitlab/client.ts\nTests: tests/unit/pagination.test.ts\nDone when: Pagination handles multiple pages and respects cursors","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:19:43.069869Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.156881Z","closed_at":"2026-01-25T15:21:35.156881Z","deleted_at":"2026-01-25T15:21:35.156877Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-1d6z","title":"Implement discussion tree + cross-reference widgets","description":"## Background\nThe discussion tree renders threaded conversations from GitLab issues/MRs using FrankenTUI's Tree widget. Cross-references show linked entities (closing MRs, related issues) as navigable links. Both are used in Issue Detail and MR Detail views.\n\n## Approach\nDiscussion Tree (view/common/discussion_tree.rs):\n- Wraps ftui Tree widget with TreePersistState for expand/collapse persistence\n- Tree structure: top-level discussions as roots, notes within discussion as children\n- Each node renders: author, timestamp (relative via Clock), note body (sanitized)\n- System notes rendered with muted style\n- Diff notes show file path + line reference\n- Keyboard: j/k navigate, Enter expand/collapse, Space toggle thread\n- Expand-on-demand: thread bodies loaded only when expanded (progressive hydration phase 3)\n\nCross-Reference (view/common/cross_ref.rs):\n- CrossRefWidget: renders list of entity references with type icon and navigable links\n- CrossRef struct: kind (ClosingMR, RelatedIssue, MentionedIn), entity_key (EntityKey), label (String)\n- Enter on a cross-ref navigates to that entity (pushes nav stack)\n- Renders as: \"Closing MR !42: Fix authentication flow\" with colored kind indicator\n\n## Acceptance Criteria\n- [ ] Discussion tree renders top-level discussions as expandable nodes\n- [ ] Notes within discussion shown as children with indentation\n- [ ] System notes visually distinguished (muted color)\n- [ ] Diff notes show file path context\n- [ ] Timestamps use injected Clock for deterministic rendering\n- [ ] All note text sanitized via sanitize_for_terminal()\n- [ ] Cross-references render with entity type icons\n- [ ] Enter on cross-ref navigates to entity detail\n- [ ] Tree state persists across navigation (expand/collapse remembered)\n\n## Files\n- CREATE: crates/lore-tui/src/view/common/discussion_tree.rs\n- CREATE: crates/lore-tui/src/view/common/cross_ref.rs\n\n## TDD Anchor\nRED: Write test_cross_ref_entity_key that creates a CrossRef with EntityKey::mr(1, 42), asserts kind and key are correct.\nGREEN: Implement CrossRef struct.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_cross_ref\n\n## Edge Cases\n- Deeply nested discussions (rare in GitLab but possible): limit indent depth to 4 levels\n- Very long note bodies: wrap text within tree node area\n- Empty discussions (resolved with no notes): show \"[resolved]\" indicator\n- Cross-references to entities not in local DB: show as non-navigable text\n\n## Dependency Context\nUses sanitize_for_terminal() from \"Implement terminal safety module\" task.\nUses Clock for timestamps from \"Implement Clock trait\" task.\nUses EntityKey, Screen from \"Implement core types\" task.\nUses NavigationStack from \"Implement NavigationStack\" task.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T16:58:49.765694Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:28.589883Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1d6z","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1d6z","depends_on_id":"bd-2lg6","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1d6z","depends_on_id":"bd-3ir1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1df9","title":"Epic: TUI Phase 4 — Operations","description":"## Background\nPhase 4 adds operational screens: Sync (real-time progress + post-sync summary), Doctor/Stats (health checks), and CLI integration (lore tui command for binary delegation). The Sync screen is the most complex — it needs real-time streaming progress with backpressure handling.\n\n## Acceptance Criteria\n- [ ] Sync screen shows real-time progress during sync with per-lane indicators\n- [ ] Sync summary shows exact changed entities after completion\n- [ ] Doctor screen shows environment health checks\n- [ ] Stats screen shows database statistics\n- [ ] CLI integration: lore tui launches lore-tui binary via runtime delegation","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:01:44.603447Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.361318Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1df9","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-1elx","title":"Implement run_embed_for_document_ids scoped embedding","description":"## Background\n\nCurrently `embed_documents()` in `src/embedding/pipeline.rs` uses `find_pending_documents()` to discover ALL documents that need embedding (no existing embedding, changed content_hash, or model mismatch). The surgical sync pipeline needs a scoped variant that only embeds specific document IDs — the ones returned by the scoped doc regeneration step (bd-hs6j).\n\nThe existing `embed_page()` private function handles the actual embedding work for a batch of `PendingDocument` structs. It calls `split_into_chunks`, sends batches to the OllamaClient, and writes embeddings + metadata to the DB. The scoped function can reuse this by constructing `PendingDocument` structs from the provided document IDs.\n\nKey types:\n- `PendingDocument { document_id: i64, content_text: String, content_hash: String }` (from `change_detector.rs`)\n- `EmbedResult { chunks_embedded, docs_embedded, failed, skipped }` (pipeline.rs:21)\n- `OllamaClient` for the actual embedding API calls\n- `ShutdownSignal` for cancellation support\n\n## Approach\n\nAdd `embed_documents_by_ids()` to `src/embedding/pipeline.rs`:\n\n```rust\npub struct EmbedForIdsResult {\n pub chunks_embedded: usize,\n pub docs_embedded: usize,\n pub failed: usize,\n pub skipped: usize,\n}\n\npub async fn embed_documents_by_ids(\n conn: &Connection,\n client: &OllamaClient,\n model_name: &str,\n concurrency: usize,\n document_ids: &[i64],\n signal: &ShutdownSignal,\n) -> Result\n```\n\nImplementation:\n1. If `document_ids` is empty, return immediately with zero counts.\n2. Load `PendingDocument` structs for the specified IDs. Query: `SELECT id, content_text, content_hash FROM documents WHERE id IN (...)`. Filter out documents that already have current embeddings (same content_hash, model, dims, chunk_max_bytes) — reuse the LEFT JOIN logic from `find_pending_documents` but with `WHERE d.id IN (?)` instead of `WHERE d.id > ?`.\n3. If no documents need embedding after filtering, return with skipped=len.\n4. Chunk into pages of `DB_PAGE_SIZE` (500).\n5. For each page, call `embed_page()` (reuse existing private function) within a SAVEPOINT.\n6. Handle cancellation via `signal.is_cancelled()` between pages.\n\nAlternative simpler approach: load all specified doc IDs into a temp table or use a parameterized IN clause, then let `embed_page` process them. Since the list is typically small (1-5 documents for surgical sync), a single page call suffices.\n\nExport from `src/embedding/mod.rs` if not already pub.\n\n## Acceptance Criteria\n\n- [ ] `embed_documents_by_ids` only embeds the specified document IDs, not all pending documents\n- [ ] Documents already embedded with current content_hash + model are skipped (not re-embedded)\n- [ ] Empty document_ids input returns immediately with zero counts\n- [ ] Cancellation via ShutdownSignal is respected between pages\n- [ ] SAVEPOINT/ROLLBACK semantics match existing `embed_documents` for data integrity\n- [ ] Ollama errors for individual documents are counted as failed, not fatal\n- [ ] Function is pub for use by orchestration (bd-1i4i)\n\n## Files\n\n- `src/embedding/pipeline.rs` (add new function + result struct)\n- `src/embedding/mod.rs` (export if needed)\n\n## TDD Anchor\n\nTests in `src/embedding/pipeline_tests.rs` (or new `src/embedding/scoped_embed_tests.rs`):\n\n```rust\n#[tokio::test]\nasync fn test_embed_by_ids_only_embeds_specified_docs() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n setup_ollama_mock(&mock).await;\n let client = OllamaClient::new(&mock.uri());\n\n // Insert 2 documents: A (id=1) and B (id=2)\n insert_test_document(&conn, 1, \"Content A\", \"hash_a\");\n insert_test_document(&conn, 2, \"Content B\", \"hash_b\");\n\n let signal = ShutdownSignal::new();\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1,\n &[1], // Only embed doc 1\n &signal,\n ).await.unwrap();\n\n assert_eq!(result.docs_embedded, 1);\n // Verify doc 1 has embeddings\n let count: i64 = conn.query_row(\n \"SELECT COUNT(*) FROM embedding_metadata WHERE document_id = 1\",\n [], |r| r.get(0),\n ).unwrap();\n assert!(count > 0);\n // Verify doc 2 has NO embeddings\n let count_b: i64 = conn.query_row(\n \"SELECT COUNT(*) FROM embedding_metadata WHERE document_id = 2\",\n [], |r| r.get(0),\n ).unwrap();\n assert_eq!(count_b, 0);\n}\n\n#[tokio::test]\nasync fn test_embed_by_ids_skips_already_embedded() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n setup_ollama_mock(&mock).await;\n let client = OllamaClient::new(&mock.uri());\n\n insert_test_document(&conn, 1, \"Content A\", \"hash_a\");\n let signal = ShutdownSignal::new();\n\n // Embed once\n embed_documents_by_ids(&conn, &client, \"nomic-embed-text\", 1, &[1], &signal).await.unwrap();\n // Embed again with same hash — should skip\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1, &[1], &signal,\n ).await.unwrap();\n assert_eq!(result.docs_embedded, 0);\n assert_eq!(result.skipped, 1);\n}\n\n#[tokio::test]\nasync fn test_embed_by_ids_empty_input() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n let client = OllamaClient::new(&mock.uri());\n let signal = ShutdownSignal::new();\n\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1, &[], &signal,\n ).await.unwrap();\n assert_eq!(result.docs_embedded, 0);\n assert_eq!(result.chunks_embedded, 0);\n}\n\n#[tokio::test]\nasync fn test_embed_by_ids_respects_cancellation() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n // Use delayed response to allow cancellation\n setup_slow_ollama_mock(&mock).await;\n let client = OllamaClient::new(&mock.uri());\n\n insert_test_document(&conn, 1, \"Content A\", \"hash_a\");\n let signal = ShutdownSignal::new();\n signal.cancel(); // Pre-cancel\n\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1, &[1], &signal,\n ).await.unwrap();\n assert_eq!(result.docs_embedded, 0);\n}\n```\n\n## Edge Cases\n\n- Document ID that does not exist in the documents table: query returns no rows, skipped silently.\n- Document with empty `content_text`: `split_into_chunks` may return 0 chunks, counted as skipped.\n- Ollama server unreachable: returns `OllamaUnavailable` error. Must not leave partial embeddings (SAVEPOINT rollback).\n- Very long document (>1500 bytes): gets chunked into multiple chunks by `split_into_chunks`. All chunks for one document must be embedded atomically.\n- Document already has embeddings but with different model: content_hash check passes but model mismatch detected — should re-embed.\n- Concurrent calls with overlapping document_ids: SAVEPOINT isolation prevents conflicts, last writer wins on embedding_metadata upsert.\n\n## Dependency Context\n\n- **Blocked by bd-hs6j**: Gets `document_ids` from scoped doc regeneration output\n- **Blocks bd-1i4i**: Orchestration function calls this as the final step of surgical sync\n- **Blocks bd-3jqx**: Integration tests verify embed isolation (only surgical docs get embedded)\n- **Uses existing internals**: `embed_page`, `PendingDocument`, `split_into_chunks`, `OllamaClient`, `ShutdownSignal`","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:16:43.680009Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:05:18.735382Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-1elx","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1elx","depends_on_id":"bd-3jqx","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-1ep","title":"Wire resource event fetching into sync pipeline","description":"## Background\nAfter issue/MR primary ingestion and discussion fetch, changed entities need resource_events jobs enqueued and drained. This is the integration point that connects the queue (bd-tir), API client (bd-sqw), DB upserts (bd-1uc), and config flag (bd-2e8).\n\n## Approach\nModify the sync pipeline to add two new phases after discussion sync:\n\n**Phase 1 — Enqueue during ingestion:**\nIn src/ingestion/orchestrator.rs, after each entity upsert (issue or MR), call:\n```rust\nif config.sync.fetch_resource_events {\n enqueue_job(conn, project_id, \"issue\", iid, local_id, \"resource_events\", None)?;\n}\n// For MRs, also enqueue mr_closes_issues (always) and mr_diffs (when fetchMrFileChanges)\n```\n\nThe \"changed entity\" detection uses the existing dirty tracker: if an entity was inserted or updated during this sync run, it gets enqueued. On --full sync, all entities are enqueued.\n\n**Phase 2 — Drain dependent queue:**\nAdd a new drain step in src/cli/commands/sync.rs (or new src/core/drain.rs), called after discussion sync:\n```rust\npub async fn drain_dependent_queue(\n conn: &Connection,\n client: &GitLabClient,\n config: &Config,\n progress: Option,\n) -> Result\n```\n\nFlow:\n1. reclaim_stale_locks(conn, config.sync.stale_lock_minutes)\n2. Loop: claim_jobs(conn, \"resource_events\", batch_size=10)\n3. For each job:\n a. Fetch 3 event types via client (fetch_issue_state_events etc.)\n b. Store via upsert functions (upsert_state_events etc.)\n c. complete_job(conn, job.id) on success\n d. fail_job(conn, job.id, error_msg) on failure\n4. Report progress: \"Fetching resource events... [N/M]\"\n5. Repeat until no more claimable jobs\n\n**Progress reporting:**\nAdd new ProgressEvent variants:\n```rust\nResourceEventsFetchStart { total: usize },\nResourceEventsFetchProgress { completed: usize, total: usize },\nResourceEventsFetchComplete { fetched: usize, failed: usize },\n```\n\n## Acceptance Criteria\n- [ ] Full sync enqueues resource_events jobs for all issues and MRs\n- [ ] Incremental sync only enqueues for entities changed since last sync\n- [ ] --no-events prevents enqueueing resource_events jobs\n- [ ] Drain step fetches all 3 event types per entity\n- [ ] Successful fetches stored and job completed\n- [ ] Failed fetches recorded with error, job retried on next sync\n- [ ] Stale locks reclaimed at drain start\n- [ ] Progress displayed: \"Fetching resource events... [N/M]\"\n- [ ] Robot mode progress suppressed (quiet mode)\n\n## Files\n- src/ingestion/orchestrator.rs (add enqueue calls during upsert)\n- src/cli/commands/sync.rs (add drain step after discussions)\n- src/core/drain.rs (new, optional — or inline in sync.rs)\n\n## TDD Loop\nRED: tests/sync_pipeline_tests.rs (or extend existing):\n- `test_sync_enqueues_resource_events_for_changed_entities` - mock sync, verify jobs enqueued\n- `test_sync_no_events_flag_skips_enqueue` - verify no jobs when flag false\n- `test_drain_completes_jobs_on_success` - mock API responses, verify jobs deleted\n- `test_drain_fails_jobs_on_error` - mock API failure, verify job attempts incremented\n\nNote: Full pipeline integration tests may need mock HTTP server. Start with unit tests on enqueue/drain logic using the real DB with mock API responses.\n\nGREEN: Implement enqueue hooks + drain step\n\nVERIFY: `cargo test sync -- --nocapture && cargo build`\n\n## Edge Cases\n- Entity deleted between enqueue and drain: API returns 404, fail_job with \"entity not found\" (retry won't help but backoff caps it)\n- Rate limiting during drain: GitLabRateLimited error should fail_job with retry (transient)\n- Network error during drain: GitLabNetworkError should fail_job with retry\n- Multiple sync runs competing: locked_at prevents double-processing; stale lock reclaim handles crashes\n- Drain should have a max iterations guard to prevent infinite loop if jobs keep failing and being retried within the same run","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:31:57.334527Z","created_by":"tayloreernisse","updated_at":"2026-02-03T17:46:51.336138Z","closed_at":"2026-02-03T17:46:51.336077Z","close_reason":"Implemented: enqueue + drain resource events in orchestrator, wired counts through ingest→sync pipeline, added progress events, 4 new tests, all 209 tests pass","compaction_level":0,"original_size":0,"labels":["gate-1","phase-b","pipeline"],"dependencies":[{"issue_id":"bd-1ep","depends_on_id":"bd-1uc","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ep","depends_on_id":"bd-2e8","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ep","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ep","depends_on_id":"bd-sqw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ep","depends_on_id":"bd-tir","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-1f5b","title":"Extract query functions from CLI to shared pub API","description":"## Background\nThe TUI's action.rs bridges to existing CLI query functions. To avoid code duplication, the existing query_* functions in cli/commands/*.rs need to be made pub so action.rs can call them. This is the minimal refactoring approach — no new domain query layer, just visibility changes.\n\n## Approach\nModify existing CLI command files to extract and expose query functions:\n- src/cli/commands/list.rs: make query_issues(), query_mrs() pub\n- src/cli/commands/show.rs: make query_issue_detail(), query_mr_detail() pub\n- src/cli/commands/who.rs: make query_experts(), query_workload(), query_reviews(), query_active(), query_overlap() pub\n- src/cli/commands/search.rs: make run_search_query() pub\n\nThese functions should take Connection + parameters and return Result. Any CLI-specific formatting logic stays in the CLI; only the pure query logic is extracted.\n\nIf a function mixes query + format logic, split it:\n1. query_X() -> Result, LoreError> (pure query, made pub)\n2. format_X(data: &[T]) -> String (CLI-only formatting, stays private)\n\n## Acceptance Criteria\n- [ ] query_issues() is pub and callable from outside cli module\n- [ ] query_mrs() is pub and callable\n- [ ] query_issue_detail() and query_mr_detail() are pub\n- [ ] query_experts() and other who functions are pub\n- [ ] run_search_query() is pub\n- [ ] Existing CLI behavior unchanged (no functional changes)\n- [ ] All extracted functions take Connection + params, return Result\n- [ ] cargo test passes (no regressions)\n\n## Files\n- MODIFY: src/cli/commands/list.rs (make query functions pub)\n- MODIFY: src/cli/commands/show.rs (make query functions pub)\n- MODIFY: src/cli/commands/who.rs (make query functions pub)\n- MODIFY: src/cli/commands/search.rs (make search query pub)\n\n## TDD Anchor\nRED: Write test in lore-tui action.rs that calls crate::cli::commands::list::query_issues() and asserts it compiles.\nGREEN: Make query_issues pub.\nVERIFY: cargo test --all-targets\n\n## Edge Cases\n- Some query functions may have Config dependencies — extract only the Connection-dependent parts\n- Visibility changes may expose functions that weren't designed for external use — review signatures\n- This is a non-breaking change (additive pub visibility)\n\n## Dependency Context\nThis modifies the main lore crate (stable Rust).\nRequired by all TUI action.rs query bridge functions.\nMust be completed before TUI can fetch real data.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:06:25.285403Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.713834Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1f5b","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-1f5b","title":"Extract query functions from CLI to shared pub API","description":"## Background\nThe TUI's action.rs bridges to existing CLI query functions. To avoid code duplication, query functions need to be made accessible to the TUI crate. The who module was refactored on master into src/cli/commands/who/ with types.rs, expert.rs, workload.rs, reviews.rs, active.rs, overlap.rs. Query functions are currently pub(super) — visible within the who module but not from external crates.\n\n## Approach\n\n### Phase A: Move shared types to core (who)\nMove src/cli/commands/who/types.rs content to src/core/who_types.rs (or src/core/who/types.rs). These are pure data structs with zero logic — WhoRun, WhoResolvedInput, WhoResult enum, ExpertResult, WorkloadResult, ReviewsResult, ActiveResult, OverlapResult, and all nested structs. CLI re-exports from core. TUI imports from core.\n\n### Phase B: Promote query function visibility (who)\nChange pub(super) to pub on the 5 query functions:\n- src/cli/commands/who/expert.rs: query_expert(conn, path, project_id, since_ms, as_of_ms, limit, scoring, detail, explain_score, include_bots)\n- src/cli/commands/who/workload.rs: query_workload(conn, username, project_id, since_ms, limit, include_closed)\n- src/cli/commands/who/reviews.rs: query_reviews(conn, username, project_id, since_ms)\n- src/cli/commands/who/active.rs: query_active(conn, project_id, since_ms, limit, include_closed)\n- src/cli/commands/who/overlap.rs: query_overlap(conn, path, project_id, since_ms, limit)\n\nAlso promote helper: half_life_decay in expert.rs (pub(super) -> pub).\n\n### Phase C: Other command extractions\n- src/cli/commands/list.rs: make query_issues(), query_mrs() pub\n- src/cli/commands/show.rs: make query_issue_detail(), query_mr_detail() pub\n- src/cli/commands/search.rs: make run_search_query() pub\n- src/cli/commands/file_history.rs: extract run_file_history() query logic to pub fn (currently takes Config for DB path; split into query-only fn taking Connection)\n- src/cli/commands/trace.rs: make parse_trace_path() pub\n\n### Phase D: Re-export from who module\nUpdate src/cli/commands/who/mod.rs to re-export query functions as pub (not just pub(super)):\n```rust\npub use expert::query_expert;\npub use workload::query_workload;\npub use reviews::query_reviews;\npub use active::query_active;\npub use overlap::query_overlap;\n```\n\n## Acceptance Criteria\n- [ ] WhoResult, ExpertResult, WorkloadResult, ReviewsResult, ActiveResult, OverlapResult, and all nested structs live in src/core/ (not CLI)\n- [ ] CLI who module imports types from core (no duplication)\n- [ ] query_expert, query_workload, query_reviews, query_active, query_overlap are pub and callable from TUI crate\n- [ ] query_issues(), query_mrs() are pub\n- [ ] query_issue_detail(), query_mr_detail() are pub\n- [ ] run_search_query() is pub\n- [ ] run_file_history() query logic available as pub fn taking Connection (not Config)\n- [ ] parse_trace_path() is pub\n- [ ] Existing CLI behavior unchanged (no functional changes)\n- [ ] cargo test passes (no regressions)\n- [ ] cargo check --all-targets passes\n\n## Files\n- CREATE: src/core/who_types.rs (move types from who/types.rs)\n- MODIFY: src/core/mod.rs (add pub mod who_types)\n- MODIFY: src/cli/commands/who/types.rs (re-export from core)\n- MODIFY: src/cli/commands/who/mod.rs (pub use query functions)\n- MODIFY: src/cli/commands/who/expert.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/workload.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/reviews.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/active.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/who/overlap.rs (pub(super) -> pub)\n- MODIFY: src/cli/commands/list.rs (make query functions pub)\n- MODIFY: src/cli/commands/show.rs (make query functions pub)\n- MODIFY: src/cli/commands/search.rs (make search query pub)\n- MODIFY: src/cli/commands/file_history.rs (extract query logic)\n- MODIFY: src/cli/commands/trace.rs (make parse_trace_path pub)\n\n## TDD Anchor\nRED: In lore-tui action.rs, write test that imports lore::core::who_types::ExpertResult and lore::cli::commands::who::query_expert — assert it compiles.\nGREEN: Move types to core, promote visibility.\nVERIFY: cargo test --all-targets && cargo check --all-targets\n\n## Edge Cases\n- ScoringConfig dependency: query_expert takes &ScoringConfig from src/core/config.rs — TUI has access via Config\n- include_closed: only affects query_workload and query_active — other modes ignore it\n- file_history.rs run_file_history takes Config for DB path resolution — split into query_file_history(conn, ...) + run_file_history(config, ...) wrapper\n- Visibility changes are additive (non-breaking) — existing callers unaffected\n\n## Dependency Context\nThis modifies the main lore crate (stable Rust). The who module was refactored on master from a single who.rs file into src/cli/commands/who/ with types.rs + 5 mode files. Types are already cleanly separated in types.rs, making the move to core mechanical.\nRequired by: Who screen (bd-u7se), Trace screen (bd-2uzm), File History screen (bd-1up1), and all other TUI action.rs query bridges.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:06:25.285403Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:31:43.615250Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1f5b","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1fn","title":"[CP1] Integration tests for discussion watermark","description":"Integration tests verifying discussion sync watermark behavior.\n\n## Tests (tests/discussion_watermark_tests.rs)\n\n- skips_discussion_fetch_when_updated_at_unchanged\n- fetches_discussions_when_updated_at_advanced\n- updates_watermark_after_successful_discussion_sync\n- does_not_update_watermark_on_discussion_sync_failure\n\n## Test Scenario\n1. Ingest issue with updated_at = T1\n2. Verify discussions_synced_for_updated_at = T1\n3. Re-run ingest with same issue (updated_at = T1)\n4. Verify NO discussion API calls made (watermark prevents)\n5. Simulate issue update (updated_at = T2)\n6. Re-run ingest\n7. Verify discussion API calls made for T2\n8. Verify watermark updated to T2\n\n## Why This Matters\nDiscussion API is expensive (1 call per issue). Watermark ensures\nwe only refetch when issue actually changed, even with cursor rewind.\n\nFiles: tests/discussion_watermark_tests.rs\nDone when: Watermark correctly prevents redundant discussion refetch","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:59:11.362495Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:02.086158Z","closed_at":"2026-01-25T17:02:02.086158Z","deleted_at":"2026-01-25T17:02:02.086154Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-1gu","title":"[CP0] gi auth-test command","description":"## Background\n\nauth-test is a quick diagnostic command to verify GitLab connectivity. Used for troubleshooting and CI pipelines. Simpler than doctor because it only checks auth, not full system health.\n\nReference: docs/prd/checkpoint-0.md section \"gi auth-test\"\n\n## Approach\n\n**src/cli/commands/auth-test.ts:**\n```typescript\nimport { Command } from 'commander';\nimport { loadConfig } from '../../core/config';\nimport { GitLabClient } from '../../gitlab/client';\nimport { TokenNotSetError } from '../../core/errors';\n\nexport const authTestCommand = new Command('auth-test')\n .description('Verify GitLab authentication')\n .action(async (options, command) => {\n const globalOpts = command.optsWithGlobals();\n \n // 1. Load config\n const config = loadConfig(globalOpts.config);\n \n // 2. Get token from environment\n const token = process.env[config.gitlab.tokenEnvVar];\n if (!token) {\n throw new TokenNotSetError(config.gitlab.tokenEnvVar);\n }\n \n // 3. Create client and test auth\n const client = new GitLabClient({\n baseUrl: config.gitlab.baseUrl,\n token,\n });\n \n // 4. Get current user\n const user = await client.getCurrentUser();\n \n // 5. Output success\n console.log(`Authenticated as @${user.username} (${user.name})`);\n console.log(`GitLab: ${config.gitlab.baseUrl}`);\n });\n```\n\n**Output format:**\n```\nAuthenticated as @johndoe (John Doe)\nGitLab: https://gitlab.example.com\n```\n\n## Acceptance Criteria\n\n- [ ] Loads config from default or --config path\n- [ ] Gets token from configured env var (default GITLAB_TOKEN)\n- [ ] Throws TokenNotSetError if env var not set\n- [ ] Calls GET /api/v4/user to verify auth\n- [ ] Prints username and display name on success\n- [ ] Exit 0 on success\n- [ ] Exit 1 on auth failure (GitLabAuthError)\n- [ ] Exit 1 if config not found (ConfigNotFoundError)\n\n## Files\n\nCREATE:\n- src/cli/commands/auth-test.ts\n\n## TDD Loop\n\nN/A - simple command, verify manually and with integration test in init.test.ts\n\n```bash\n# Manual verification\nexport GITLAB_TOKEN=\"valid-token\"\ngi auth-test\n\n# With invalid token\nexport GITLAB_TOKEN=\"invalid\"\ngi auth-test # should exit 1\n```\n\n## Edge Cases\n\n- Config exists but token env var not set - clear error message\n- Token exists but wrong scopes - GitLabAuthError (401)\n- Network unreachable - GitLabNetworkError\n- Token with extra whitespace - should trim","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:51.135580Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:28:16.369542Z","closed_at":"2026-01-25T03:28:16.369481Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1gu","depends_on_id":"bd-13b","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1gu","depends_on_id":"bd-1l1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1gvg","title":"Implement status fetcher with adaptive paging and pagination guard","description":"## Background\nWith the GraphQL client in place, we need a status-specific fetcher that paginates through all issues in a project, extracts status widgets via __typename matching, and handles edge cases like complexity errors and cursor stalls.\n\n## Approach\nAll code goes in src/gitlab/graphql.rs alongside GraphqlClient. The fetcher uses the workItems(types:[ISSUE]) resolver (NOT project.issues which returns the old Issue type without status widgets). Widget matching uses __typename == \"WorkItemWidgetStatus\" for deterministic identification.\n\n## Files\n- src/gitlab/graphql.rs (add to existing file created by bd-2dlt)\n\n## Implementation\n\nConstants:\n ISSUE_STATUS_QUERY: GraphQL query string with $projectPath, $after, $first variables\n PAGE_SIZES: &[u32] = &[100, 50, 25, 10]\n\nPrivate deserialization types:\n WorkItemsResponse { project: Option }\n ProjectNode { work_items: Option } (serde rename workItems)\n WorkItemConnection { nodes: Vec, page_info: PageInfo } (serde rename pageInfo)\n WorkItemNode { iid: String, widgets: Vec }\n PageInfo { end_cursor: Option, has_next_page: bool } (serde renames)\n StatusWidget { status: Option }\n\nPublic types:\n UnsupportedReason enum: GraphqlEndpointMissing, AuthForbidden (Debug, Clone)\n FetchStatusResult struct:\n statuses: HashMap\n all_fetched_iids: HashSet\n unsupported_reason: Option\n partial_error_count: usize\n first_partial_error: Option\n\nis_complexity_or_timeout_error(msg) -> bool: lowercase contains \"complexity\" or \"timeout\"\n\nfetch_issue_statuses(client, project_path) -> Result:\n Pagination loop:\n 1. Build variables with current page_size from PAGE_SIZES[page_size_idx]\n 2. Call client.query() — match errors:\n - GitLabNotFound -> Ok(empty + GraphqlEndpointMissing) + warn\n - GitLabAuthFailed -> Ok(empty + AuthForbidden) + warn \n - Other with complexity/timeout msg -> reduce page_size_idx, continue (retry same cursor)\n - Other with smallest page size exhausted -> return Err\n - Other -> return Err\n 3. Track partial errors from GraphqlQueryResult\n 4. Parse response into WorkItemsResponse\n 5. For each node: parse iid to i64, add to all_fetched_iids, check widgets for __typename == \"WorkItemWidgetStatus\", insert status into map\n 6. Reset page_size_idx to 0 after successful page\n 7. Pagination guard: if has_next_page but new cursor == old cursor or is None, warn + break\n 8. Update cursor, continue loop\n\n## Acceptance Criteria\n- [ ] Paginates: 2-page mock returns all statuses + all IIDs\n- [ ] No status widget: IID in all_fetched_iids but not in statuses\n- [ ] Status widget with null status: IID in all_fetched_iids but not in statuses\n- [ ] 404 -> Ok(empty, unsupported_reason: GraphqlEndpointMissing)\n- [ ] 403 -> Ok(empty, unsupported_reason: AuthForbidden)\n- [ ] Success -> unsupported_reason: None\n- [ ] __typename != \"WorkItemWidgetStatus\" -> ignored, no error\n- [ ] Cursor stall (same endCursor twice) -> aborts, returns partial result\n- [ ] Complexity error at first=100 -> retries at 50, succeeds\n- [ ] Timeout error -> reduces page size\n- [ ] All page sizes fail -> returns Err\n- [ ] After successful page, next page starts at first=100 again\n- [ ] Partial-data pages -> partial_error_count incremented, first_partial_error captured\n\n## TDD Loop\nRED: test_fetch_statuses_pagination, test_fetch_statuses_no_status_widget, test_fetch_statuses_404_graceful, test_fetch_statuses_403_graceful, test_typename_matching_ignores_non_status_widgets, test_fetch_statuses_cursor_stall_aborts, test_fetch_statuses_complexity_error_reduces_page_size, test_fetch_statuses_timeout_error_reduces_page_size, test_fetch_statuses_smallest_page_still_fails, test_fetch_statuses_page_size_resets_after_success, test_fetch_statuses_unsupported_reason_none_on_success, test_fetch_statuses_partial_errors_tracked\n Adaptive tests: mock must inspect $first variable in request body to return different responses per page size\nGREEN: Implement all types + fetch_issue_statuses function\nVERIFY: cargo test fetch_statuses && cargo test typename\n\n## Edge Cases\n- GraphQL returns iid as String — parse to i64\n- widgets is Vec — match __typename field, then deserialize matching widgets\n- let-chain syntax: if is_status_widget && let Ok(sw) = serde_json::from_value::(...)\n- Pagination guard: new_cursor.is_none() || new_cursor == cursor\n- Page size resets to 0 (index into PAGE_SIZES) after each successful page\n- FetchStatusResult is NOT Clone — test fields individually","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:42:00.388137Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.418490Z","closed_at":"2026-02-11T07:21:33.418451Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1gvg","depends_on_id":"bd-2dlt","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1gvg","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-1h3f","title":"Add rename awareness to path resolution probes","description":"## Background\nThe path resolution layer (build_path_query at who.rs:467 and suffix_probe at who.rs:596) only checks position_new_path and new_path. If a user queries an old filename (e.g., 'login.rs' after rename to 'auth.rs'), the probes return 'not found' and scoring never runs — even though the scoring SQL (bd-1hoq) now matches old_path.\n\n## Approach\n\n### build_path_query() changes (who.rs:467):\n\nProbe 1 (exact_exists):\n- Notes query: add OR position_old_path = ?1\n- File changes query: add OR old_path = ?1\n\nProbe 2 (prefix_exists):\n- Notes query: add OR position_old_path LIKE ?1 ESCAPE '\\\\'\n- File changes query: add OR old_path LIKE ?1 ESCAPE '\\\\'\n\nNote: These probes use simple OR (not UNION ALL) since they only check existence (SELECT 1 ... LIMIT 1) — no risk of planner degradation on single-row probes.\n\n### suffix_probe() changes (who.rs:596):\n\nAdd two UNION branches to the existing query:\n```sql\nUNION\nSELECT position_old_path AS full_path FROM notes\nWHERE note_type = 'DiffNote' AND is_system = 0\n AND position_old_path IS NOT NULL\n AND (position_old_path LIKE ?1 ESCAPE '\\\\' OR position_old_path = ?2)\n AND (?3 IS NULL OR project_id = ?3)\nUNION\nSELECT old_path AS full_path FROM mr_file_changes\nWHERE old_path IS NOT NULL\n AND (old_path LIKE ?1 ESCAPE '\\\\' OR old_path = ?2)\n AND (?3 IS NULL OR project_id = ?3)\n```\n\nUse UNION (not UNION ALL) — the existing query uses UNION for dedup.\n\n## TDD Loop\n\n### RED (write first):\n```rust\n#[test]\nfn test_old_path_probe_exact_and_prefix() {\n let conn = setup_test_db();\n insert_project(&conn, 1, \"team/backend\");\n insert_mr(&conn, 1, 1, 100, \"alice\", \"merged\");\n insert_file_change_with_old_path(&conn, 1, 1, \"src/new/foo.rs\", Some(\"src/old/foo.rs\"), \"renamed\");\n insert_discussion(&conn, 1, 1, Some(1), None, true, false);\n insert_diffnote_at(&conn, 1, 1, 1, \"alice\", \"src/new/foo.rs\", Some(\"src/old/foo.rs\"), \"review comment\", now_ms());\n\n // Exact probe by OLD path should resolve\n let pq = build_path_query(&conn, \"src/old/foo.rs\", None).unwrap();\n assert\\!(matches\\!(pq, PathQuery::Exact { .. } | PathQuery::Prefix { .. }));\n\n // Prefix probe by OLD directory should resolve\n let pq = build_path_query(&conn, \"src/old/\", None).unwrap();\n assert\\!(matches\\!(pq, PathQuery::Prefix { .. }));\n\n // New path still works\n let pq = build_path_query(&conn, \"src/new/foo.rs\", None).unwrap();\n assert\\!(matches\\!(pq, PathQuery::Exact { .. }));\n}\n\n#[test]\nfn test_suffix_probe_uses_old_path_sources() {\n let conn = setup_test_db();\n insert_project(&conn, 1, \"team/backend\");\n insert_mr(&conn, 1, 1, 100, \"alice\", \"merged\");\n insert_file_change_with_old_path(&conn, 1, 1, \"src/utils.rs\", Some(\"legacy/utils.rs\"), \"renamed\");\n\n let result = suffix_probe(&conn, \"utils.rs\", None).unwrap();\n match result {\n SuffixResult::Ambiguous(paths) => {\n assert\\!(paths.contains(&\"src/utils.rs\".to_string()));\n assert\\!(paths.contains(&\"legacy/utils.rs\".to_string()));\n }\n SuffixResult::Unique(p) => {\n assert\\!(p == \"src/utils.rs\" || p == \"legacy/utils.rs\");\n }\n other => panic\\!(\"Expected Ambiguous or Unique, got {other:?}\"),\n }\n}\n```\n\n### GREEN: Add OR old_path clauses to probes + UNION branches to suffix_probe.\n### VERIFY: cargo test -p lore -- test_old_path_probe test_suffix_probe_uses_old_path\n\n## Acceptance Criteria\n- [ ] test_old_path_probe_exact_and_prefix passes\n- [ ] test_suffix_probe_uses_old_path_sources passes\n- [ ] Existing path probe tests still pass\n- [ ] No changes to PathQuery or SuffixResult enums\n\n## Files\n- MODIFY: src/cli/commands/who.rs (build_path_query at line 467, suffix_probe at line 596)\n\n## Edge Cases\n- position_old_path can be NULL — OR clause handles naturally (NULL \\!= ?1)\n- Old path might match multiple new paths (copy+rename) — suffix_probe Ambiguous handles this\n- Requires insert_file_change_with_old_path and insert_diffnote_at helpers from bd-2yu5","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-09T16:59:51.706482Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:47:43.416027Z","compaction_level":0,"original_size":0,"labels":["scoring"],"dependencies":[{"issue_id":"bd-1h3f","depends_on_id":"bd-2ao4","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"},{"issue_id":"bd-1h3f","depends_on_id":"bd-2yu5","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"}]} +{"id":"bd-1h3f","title":"Add rename awareness to path resolution probes","description":"## Background\nThe path resolution layer (build_path_query at who.rs:467 and suffix_probe at who.rs:596) only checks position_new_path and new_path. If a user queries an old filename (e.g., 'login.rs' after rename to 'auth.rs'), the probes return 'not found' and scoring never runs — even though the scoring SQL (bd-1hoq) now matches old_path.\n\n## Approach\n\n### build_path_query() changes (who.rs:467):\n\nProbe 1 (exact_exists):\n- Notes query: add OR position_old_path = ?1\n- File changes query: add OR old_path = ?1\n\nProbe 2 (prefix_exists):\n- Notes query: add OR position_old_path LIKE ?1 ESCAPE '\\\\'\n- File changes query: add OR old_path LIKE ?1 ESCAPE '\\\\'\n\nNote: These probes use simple OR (not UNION ALL) since they only check existence (SELECT 1 ... LIMIT 1) — no risk of planner degradation on single-row probes.\n\n### suffix_probe() changes (who.rs:596):\n\nAdd two UNION branches to the existing query:\n```sql\nUNION\nSELECT position_old_path AS full_path FROM notes\nWHERE note_type = 'DiffNote' AND is_system = 0\n AND position_old_path IS NOT NULL\n AND (position_old_path LIKE ?1 ESCAPE '\\\\' OR position_old_path = ?2)\n AND (?3 IS NULL OR project_id = ?3)\nUNION\nSELECT old_path AS full_path FROM mr_file_changes\nWHERE old_path IS NOT NULL\n AND (old_path LIKE ?1 ESCAPE '\\\\' OR old_path = ?2)\n AND (?3 IS NULL OR project_id = ?3)\n```\n\nUse UNION (not UNION ALL) — the existing query uses UNION for dedup.\n\n## TDD Loop\n\n### RED (write first):\n```rust\n#[test]\nfn test_old_path_probe_exact_and_prefix() {\n let conn = setup_test_db();\n insert_project(&conn, 1, \"team/backend\");\n insert_mr(&conn, 1, 1, 100, \"alice\", \"merged\");\n insert_file_change_with_old_path(&conn, 1, 1, \"src/new/foo.rs\", Some(\"src/old/foo.rs\"), \"renamed\");\n insert_discussion(&conn, 1, 1, Some(1), None, true, false);\n insert_diffnote_at(&conn, 1, 1, 1, \"alice\", \"src/new/foo.rs\", Some(\"src/old/foo.rs\"), \"review comment\", now_ms());\n\n // Exact probe by OLD path should resolve\n let pq = build_path_query(&conn, \"src/old/foo.rs\", None).unwrap();\n assert\\!(matches\\!(pq, PathQuery::Exact { .. } | PathQuery::Prefix { .. }));\n\n // Prefix probe by OLD directory should resolve\n let pq = build_path_query(&conn, \"src/old/\", None).unwrap();\n assert\\!(matches\\!(pq, PathQuery::Prefix { .. }));\n\n // New path still works\n let pq = build_path_query(&conn, \"src/new/foo.rs\", None).unwrap();\n assert\\!(matches\\!(pq, PathQuery::Exact { .. }));\n}\n\n#[test]\nfn test_suffix_probe_uses_old_path_sources() {\n let conn = setup_test_db();\n insert_project(&conn, 1, \"team/backend\");\n insert_mr(&conn, 1, 1, 100, \"alice\", \"merged\");\n insert_file_change_with_old_path(&conn, 1, 1, \"src/utils.rs\", Some(\"legacy/utils.rs\"), \"renamed\");\n\n let result = suffix_probe(&conn, \"utils.rs\", None).unwrap();\n match result {\n SuffixResult::Ambiguous(paths) => {\n assert\\!(paths.contains(&\"src/utils.rs\".to_string()));\n assert\\!(paths.contains(&\"legacy/utils.rs\".to_string()));\n }\n SuffixResult::Unique(p) => {\n assert\\!(p == \"src/utils.rs\" || p == \"legacy/utils.rs\");\n }\n other => panic\\!(\"Expected Ambiguous or Unique, got {other:?}\"),\n }\n}\n```\n\n### GREEN: Add OR old_path clauses to probes + UNION branches to suffix_probe.\n### VERIFY: cargo test -p lore -- test_old_path_probe test_suffix_probe_uses_old_path\n\n## Acceptance Criteria\n- [ ] test_old_path_probe_exact_and_prefix passes\n- [ ] test_suffix_probe_uses_old_path_sources passes\n- [ ] Existing path probe tests still pass\n- [ ] No changes to PathQuery or SuffixResult enums\n\n## Files\n- MODIFY: src/cli/commands/who.rs (build_path_query at line 467, suffix_probe at line 596)\n\n## Edge Cases\n- position_old_path can be NULL — OR clause handles naturally (NULL \\!= ?1)\n- Old path might match multiple new paths (copy+rename) — suffix_probe Ambiguous handles this\n- Requires insert_file_change_with_old_path and insert_diffnote_at helpers from bd-2yu5","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T16:59:51.706482Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:43:04.411615Z","closed_at":"2026-02-12T20:43:04.411575Z","close_reason":"Implemented by time-decay swarm: 3 agents, 12 tasks, 621 tests passing, all quality gates green","compaction_level":0,"original_size":0,"labels":["scoring"],"dependencies":[{"issue_id":"bd-1h3f","depends_on_id":"bd-2ao4","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1h3f","depends_on_id":"bd-2yu5","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-1hj","title":"[CP1] Ingestion orchestrator","description":"Coordinate issue + dependent discussion sync with bounded concurrency.\n\n## Module\nsrc/ingestion/orchestrator.rs\n\n## Canonical Pattern (CP1)\n\nWhen gi ingest --type=issues runs:\n\n1. **Ingest issues** - cursor-based with incremental cursor updates per page\n2. **Collect touched issues** - record IssueForDiscussionSync for each issue passing cursor filter\n3. **Filter for discussion sync** - enqueue issues where:\n issue.updated_at > issues.discussions_synced_for_updated_at\n4. **Execute discussion sync** - with bounded concurrency (dependent_concurrency from config)\n5. **Update watermark** - after each issue's discussions successfully ingested\n\n## Concurrency Notes\n\nRuntime decision: Use single-threaded Tokio runtime (flavor = \"current_thread\")\n- rusqlite::Connection is !Send, conflicts with multi-threaded runtimes\n- Single-threaded avoids Send bounds entirely\n- Use tokio::task::spawn_local + LocalSet for concurrent discussion fetches\n- Keeps code simple; can upgrade to channel-based DB writer in CP2 if needed\n\n## Configuration Used\n- config.sync.dependent_concurrency - limits parallel discussion requests\n- config.sync.cursor_rewind_seconds - safety margin for cursor\n\n## Progress Reporting\n- Show total issues fetched\n- Show issues needing discussion sync\n- Show discussion/note counts per project\n\nFiles: src/ingestion/orchestrator.rs\nTests: Integration tests with mocked GitLab\nDone when: Full issue + discussion ingestion orchestrated correctly","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T16:57:57.325679Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.851047Z","closed_at":"2026-01-25T17:02:01.851047Z","deleted_at":"2026-01-25T17:02:01.851043Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} -{"id":"bd-1hoq","title":"Restructure expert SQL with CTE-based dual-path matching","description":"## Background\nThe current query_expert() at who.rs:641 uses a 4-signal UNION ALL that only matches position_new_path and new_path, with flat COUNT-based scoring computed entirely in SQL. The new model needs dual-path matching, 5 signal types, state-aware timestamps, and returns per-signal rows for Rust-side decay computation (bd-13q8).\n\n## Approach\n**Important**: This bead builds the new SQL as a separate function WITHOUT modifying query_expert() yet. bd-13q8 wires it into query_expert(). This keeps this bead independently testable.\n\nAdd a new function:\n```rust\n/// Build the CTE-based expert scoring SQL for a given path query mode.\n/// Returns SQL string. Params: ?1=path, ?2=since_ms, ?3=project_id, ?4=as_of_ms, ?5=closed_mr_multiplier, ?6=reviewer_min_note_chars\nfn build_expert_sql(path_op: &str) -> String {\n // ... format the SQL with {path_op} inlined, all config values as bound params\n}\n```\n\n### SQL structure (8 CTEs + final SELECT):\n1. **matched_notes_raw**: UNION ALL on position_new_path + position_old_path\n2. **matched_notes**: DISTINCT dedup by id\n3. **matched_file_changes_raw**: UNION ALL on new_path + old_path\n4. **matched_file_changes**: DISTINCT dedup by (merge_request_id, project_id)\n5. **mr_activity**: Centralized state-aware timestamps AND state_mult. Joins merge_requests via matched_file_changes. Computes:\n - activity_ts: CASE WHEN state='merged' THEN COALESCE(merged_at, created_at) WHEN state='closed' THEN COALESCE(closed_at, created_at) ELSE COALESCE(updated_at, created_at) END\n - state_mult: CASE WHEN state='closed' THEN ?5 ELSE 1.0 END\n6. **reviewer_participation**: substantive DiffNotes WHERE LENGTH(TRIM(body)) >= ?6\n7. **raw**: 5 signals (diffnote_reviewer, diffnote_author, file_author, file_reviewer_participated, file_reviewer_assigned). Signals 1-2 compute state_mult inline. Signals 3-4a-4b reference mr_activity.\n8. **aggregated**: MR-level GROUP BY + note_group with COUNT\n\n### Returns 6 columns: (username TEXT, signal TEXT, mr_id INTEGER, qty INTEGER, ts INTEGER, state_mult REAL)\n\nSee plans/time-decay-expert-scoring.md section 3 for the full SQL template.\n\n## TDD Loop\n\n### RED (write first):\n```rust\n#[test]\nfn test_expert_sql_returns_expected_signal_rows() {\n let conn = setup_test_db();\n insert_project(&conn, 1, \"team/backend\");\n insert_mr(&conn, 1, 1, 100, \"alice\", \"merged\");\n insert_file_change(&conn, 1, 1, \"src/app.rs\", \"modified\");\n insert_reviewer(&conn, 1, \"bob\");\n insert_reviewer(&conn, 1, \"carol\");\n insert_discussion(&conn, 1, 1, Some(1), None, true, false);\n insert_diffnote(&conn, 1, 1, 1, \"carol\", \"src/app.rs\", \"This needs error handling for the edge case\");\n\n let sql = build_expert_sql(\"= ?1\");\n let mut stmt = conn.prepare(&sql).unwrap();\n let rows: Vec<(String, String, i64, i64, i64, f64)> = stmt\n .query_map(\n rusqlite::params![\"src/app.rs\", 0_i64, Option::::None, now_ms() + 1000, 0.5_f64, 20_i64],\n |row| Ok((\n row.get(0).unwrap(), row.get(1).unwrap(), row.get(2).unwrap(),\n row.get(3).unwrap(), row.get(4).unwrap(), row.get(5).unwrap(),\n ))\n ).unwrap().filter_map(|r| r.ok()).collect();\n\n // alice: file_author\n assert!(rows.iter().any(|(u, s, ..)| u == \"alice\" && s == \"file_author\"));\n // carol: file_reviewer_participated (left substantive DiffNote)\n assert!(rows.iter().any(|(u, s, ..)| u == \"carol\" && s == \"file_reviewer_participated\"));\n // bob: file_reviewer_assigned (no DiffNotes)\n assert!(rows.iter().any(|(u, s, ..)| u == \"bob\" && s == \"file_reviewer_assigned\"));\n // carol: note_group\n assert!(rows.iter().any(|(u, s, ..)| u == \"carol\" && s == \"note_group\"));\n // alice: diffnote_author\n assert!(rows.iter().any(|(u, s, ..)| u == \"alice\" && s == \"diffnote_author\"));\n // All merged rows have state_mult = 1.0\n assert!(rows.iter().all(|(.., sm)| (sm - 1.0).abs() < f64::EPSILON));\n}\n```\n\n### GREEN: Implement build_expert_sql() with the 8 CTEs.\n### VERIFY: cargo test -p lore -- test_expert_sql_returns_expected_signal_rows\n\n## Acceptance Criteria\n- [ ] test_expert_sql_returns_expected_signal_rows passes (all 5 signal types correct)\n- [ ] SQL compiles against :memory: DB with indexes from bd-2ao4 (migration 026)\n- [ ] 6 columns returned: username, signal, mr_id, qty, ts, state_mult (REAL, not TEXT)\n- [ ] 6 SQL params: ?1=path, ?2=since_ms, ?3=project_id, ?4=as_of_ms, ?5=closed_mr_multiplier, ?6=reviewer_min_note_chars\n- [ ] mr_activity CTE centralizes timestamp + state_mult (not repeated)\n- [ ] reviewer_participation uses ?6 not inlined literal\n- [ ] Existing query_expert() and all existing tests UNTOUCHED\n- [ ] build_expert_sql() is a pure function (no Connection param)\n\n## Files\n- MODIFY: src/cli/commands/who.rs (new build_expert_sql function + test, placed near query_expert at line ~641)\n\n## Edge Cases\n- ?5 (closed_mr_multiplier) bound as f64 — rusqlite handles this\n- ?6 (reviewer_min_note_chars) bound as i64 — SQLite LENGTH returns integer\n- Signals 1-2 compute state_mult inline (join through discussions, not mr_activity)\n- COALESCE fallback to created_at for NULL merged_at/closed_at/updated_at\n- Dedup in matched_notes/matched_file_changes prevents double-counting","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-09T16:59:44.665314Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:47:18.871566Z","compaction_level":0,"original_size":0,"labels":["scoring"],"dependencies":[{"issue_id":"bd-1hoq","depends_on_id":"bd-1soz","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"},{"issue_id":"bd-1hoq","depends_on_id":"bd-2ao4","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"},{"issue_id":"bd-1hoq","depends_on_id":"bd-2w1p","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"}]} +{"id":"bd-1hoq","title":"Restructure expert SQL with CTE-based dual-path matching","description":"## Background\nThe current query_expert() at who.rs:641 uses a 4-signal UNION ALL that only matches position_new_path and new_path, with flat COUNT-based scoring computed entirely in SQL. The new model needs dual-path matching, 5 signal types, state-aware timestamps, and returns per-signal rows for Rust-side decay computation (bd-13q8).\n\n## Approach\n**Important**: This bead builds the new SQL as a separate function WITHOUT modifying query_expert() yet. bd-13q8 wires it into query_expert(). This keeps this bead independently testable.\n\nAdd a new function:\n```rust\n/// Build the CTE-based expert scoring SQL for a given path query mode.\n/// Returns SQL string. Params: ?1=path, ?2=since_ms, ?3=project_id, ?4=as_of_ms, ?5=closed_mr_multiplier, ?6=reviewer_min_note_chars\nfn build_expert_sql(path_op: &str) -> String {\n // ... format the SQL with {path_op} inlined, all config values as bound params\n}\n```\n\n### SQL structure (8 CTEs + final SELECT):\n1. **matched_notes_raw**: UNION ALL on position_new_path + position_old_path\n2. **matched_notes**: DISTINCT dedup by id\n3. **matched_file_changes_raw**: UNION ALL on new_path + old_path\n4. **matched_file_changes**: DISTINCT dedup by (merge_request_id, project_id)\n5. **mr_activity**: Centralized state-aware timestamps AND state_mult. Joins merge_requests via matched_file_changes. Computes:\n - activity_ts: CASE WHEN state='merged' THEN COALESCE(merged_at, created_at) WHEN state='closed' THEN COALESCE(closed_at, created_at) ELSE COALESCE(updated_at, created_at) END\n - state_mult: CASE WHEN state='closed' THEN ?5 ELSE 1.0 END\n6. **reviewer_participation**: substantive DiffNotes WHERE LENGTH(TRIM(body)) >= ?6\n7. **raw**: 5 signals (diffnote_reviewer, diffnote_author, file_author, file_reviewer_participated, file_reviewer_assigned). Signals 1-2 compute state_mult inline. Signals 3-4a-4b reference mr_activity.\n8. **aggregated**: MR-level GROUP BY + note_group with COUNT\n\n### Returns 6 columns: (username TEXT, signal TEXT, mr_id INTEGER, qty INTEGER, ts INTEGER, state_mult REAL)\n\nSee plans/time-decay-expert-scoring.md section 3 for the full SQL template.\n\n## TDD Loop\n\n### RED (write first):\n```rust\n#[test]\nfn test_expert_sql_returns_expected_signal_rows() {\n let conn = setup_test_db();\n insert_project(&conn, 1, \"team/backend\");\n insert_mr(&conn, 1, 1, 100, \"alice\", \"merged\");\n insert_file_change(&conn, 1, 1, \"src/app.rs\", \"modified\");\n insert_reviewer(&conn, 1, \"bob\");\n insert_reviewer(&conn, 1, \"carol\");\n insert_discussion(&conn, 1, 1, Some(1), None, true, false);\n insert_diffnote(&conn, 1, 1, 1, \"carol\", \"src/app.rs\", \"This needs error handling for the edge case\");\n\n let sql = build_expert_sql(\"= ?1\");\n let mut stmt = conn.prepare(&sql).unwrap();\n let rows: Vec<(String, String, i64, i64, i64, f64)> = stmt\n .query_map(\n rusqlite::params![\"src/app.rs\", 0_i64, Option::::None, now_ms() + 1000, 0.5_f64, 20_i64],\n |row| Ok((\n row.get(0).unwrap(), row.get(1).unwrap(), row.get(2).unwrap(),\n row.get(3).unwrap(), row.get(4).unwrap(), row.get(5).unwrap(),\n ))\n ).unwrap().filter_map(|r| r.ok()).collect();\n\n // alice: file_author\n assert!(rows.iter().any(|(u, s, ..)| u == \"alice\" && s == \"file_author\"));\n // carol: file_reviewer_participated (left substantive DiffNote)\n assert!(rows.iter().any(|(u, s, ..)| u == \"carol\" && s == \"file_reviewer_participated\"));\n // bob: file_reviewer_assigned (no DiffNotes)\n assert!(rows.iter().any(|(u, s, ..)| u == \"bob\" && s == \"file_reviewer_assigned\"));\n // carol: note_group\n assert!(rows.iter().any(|(u, s, ..)| u == \"carol\" && s == \"note_group\"));\n // alice: diffnote_author\n assert!(rows.iter().any(|(u, s, ..)| u == \"alice\" && s == \"diffnote_author\"));\n // All merged rows have state_mult = 1.0\n assert!(rows.iter().all(|(.., sm)| (sm - 1.0).abs() < f64::EPSILON));\n}\n```\n\n### GREEN: Implement build_expert_sql() with the 8 CTEs.\n### VERIFY: cargo test -p lore -- test_expert_sql_returns_expected_signal_rows\n\n## Acceptance Criteria\n- [ ] test_expert_sql_returns_expected_signal_rows passes (all 5 signal types correct)\n- [ ] SQL compiles against :memory: DB with indexes from bd-2ao4 (migration 026)\n- [ ] 6 columns returned: username, signal, mr_id, qty, ts, state_mult (REAL, not TEXT)\n- [ ] 6 SQL params: ?1=path, ?2=since_ms, ?3=project_id, ?4=as_of_ms, ?5=closed_mr_multiplier, ?6=reviewer_min_note_chars\n- [ ] mr_activity CTE centralizes timestamp + state_mult (not repeated)\n- [ ] reviewer_participation uses ?6 not inlined literal\n- [ ] Existing query_expert() and all existing tests UNTOUCHED\n- [ ] build_expert_sql() is a pure function (no Connection param)\n\n## Files\n- MODIFY: src/cli/commands/who.rs (new build_expert_sql function + test, placed near query_expert at line ~641)\n\n## Edge Cases\n- ?5 (closed_mr_multiplier) bound as f64 — rusqlite handles this\n- ?6 (reviewer_min_note_chars) bound as i64 — SQLite LENGTH returns integer\n- Signals 1-2 compute state_mult inline (join through discussions, not mr_activity)\n- COALESCE fallback to created_at for NULL merged_at/closed_at/updated_at\n- Dedup in matched_notes/matched_file_changes prevents double-counting","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T16:59:44.665314Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:43:04.410514Z","closed_at":"2026-02-12T20:43:04.410470Z","close_reason":"Implemented by time-decay swarm: 3 agents, 12 tasks, 621 tests passing, all quality gates green","compaction_level":0,"original_size":0,"labels":["scoring"],"dependencies":[{"issue_id":"bd-1hoq","depends_on_id":"bd-1soz","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1hoq","depends_on_id":"bd-2ao4","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1hoq","depends_on_id":"bd-2w1p","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-1ht","title":"Epic: Gate 5 - Code Trace (lore trace)","description":"## Background\n\nGate 5 implements 'lore trace' — answers 'Why was this code introduced?' by tracing from a file path through the MR that modified it, to the issue that motivated the MR, to the discussions with decision rationale. Capstone of Phase B.\n\nGate 5 ships Tier 1 only (API-only, no local git). Tier 2 (git blame via git2-rs) deferred to Phase C.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Gate 5 (Sections 5.1-5.7).\n\n## Prerequisites\n\n- Gates 1-2 COMPLETE: entity_references populated, resource events fetched\n- Gate 4 (bd-14q): provides mr_file_changes table + resolve_rename_chain algorithm\n- entity_references source_method: 'api' | 'note_parse' | 'description_parse'\n- discussions/notes tables for DiffNote content\n- merge_requests.merged_at exists (migration 006). Use COALESCE(merged_at, updated_at) for ordering.\n\n## Architecture\n\n- **No new tables.** Trace queries combine mr_file_changes, entity_references, discussions/notes\n- **Query flow:** file -> mr_file_changes -> MRs -> entity_references (closes/related) -> issues -> discussions with DiffNote context\n- **Tier 1:** File-level granularity only. Cannot trace a specific line to its introducing commit.\n- **Path parsing:** Supports 'src/foo.rs:45' syntax — line number parsed but deferred with Tier 2 warning.\n- **Rename aware:** Reuses file_history::resolve_rename_chain for multi-path matching.\n\n## Children (Execution Order)\n\n1. **bd-2n4** — Trace query logic: file -> MR -> issue -> discussion chain (src/core/trace.rs)\n2. **bd-9dd** — CLI command with human + robot output (src/cli/commands/trace.rs)\n\n## Gate Completion Criteria\n\n- [ ] `lore trace ` shows MRs with linked issues + discussion context\n- [ ] Output includes MR -> issue -> discussion chain\n- [ ] DiffNote snippets show content on the traced file\n- [ ] Cross-references from entity_references used for MR->issue linking\n- [ ] :line suffix parses and emits Tier 2 warning\n- [ ] Robot mode JSON with tier: 'api_only'\n- [ ] Graceful handling when no MR data found (suggest sync with fetchMrFileChanges)\n","status":"open","priority":1,"issue_type":"feature","created_at":"2026-02-02T21:31:01.141053Z","created_by":"tayloreernisse","updated_at":"2026-02-05T20:57:12.357740Z","compaction_level":0,"original_size":0,"labels":["epic","gate-5","phase-b"],"dependencies":[{"issue_id":"bd-1ht","depends_on_id":"bd-14q","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ht","depends_on_id":"bd-1se","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1i2","title":"Integrate mark_dirty_tx into ingestion modules","description":"## Background\nThis bead integrates dirty source tracking into the existing ingestion pipelines. Every entity upserted during ingestion must be marked dirty so the document regenerator knows to update the corresponding search document. The critical constraint: mark_dirty_tx() must be called INSIDE the same transaction that upserts the entity — not after commit.\n\n**Key PRD clarification:** Mark ALL upserted entities dirty (not just changed ones). The regenerator's hash comparison handles \"unchanged\" detection cheaply — this avoids needing change detection in ingestion.\n\n## Approach\nModify 4 existing ingestion files to add mark_dirty_tx() calls inside existing transaction blocks per PRD Section 6.1.\n\n**1. src/ingestion/issues.rs:**\nInside the issue upsert loop, after each successful INSERT/UPDATE:\n```rust\ndirty_tracker::mark_dirty_tx(&tx, SourceType::Issue, issue_row.id)?;\n```\n\n**2. src/ingestion/merge_requests.rs:**\nInside the MR upsert loop:\n```rust\ndirty_tracker::mark_dirty_tx(&tx, SourceType::MergeRequest, mr_row.id)?;\n```\n\n**3. src/ingestion/discussions.rs:**\nInside discussion insert (issue discussions, full-refresh transaction):\n```rust\ndirty_tracker::mark_dirty_tx(&tx, SourceType::Discussion, discussion_row.id)?;\n```\n\n**4. src/ingestion/mr_discussions.rs:**\nInside discussion upsert (write phase):\n```rust\ndirty_tracker::mark_dirty_tx(&tx, SourceType::Discussion, discussion_row.id)?;\n```\n\n**Discussion Sweep Cleanup (PRD Section 6.1 — CRITICAL):**\nWhen the MR discussion sweep deletes stale discussions (`last_seen_at < run_start_time`), **delete the corresponding document rows directly** — do NOT use the dirty queue for cleanup. The `ON DELETE CASCADE` on `document_labels`/`document_paths` and the `documents_embeddings_ad` trigger handle all downstream cleanup.\n\n**PRD-exact CTE pattern:**\n```sql\n-- In src/ingestion/mr_discussions.rs, during sweep phase.\n-- Uses a CTE to capture stale IDs atomically before cascading deletes.\n-- This is more defensive than two separate statements because the CTE\n-- guarantees the ID set is captured before any row is deleted.\nWITH stale AS (\n SELECT id FROM discussions\n WHERE merge_request_id = ? AND last_seen_at < ?\n)\n-- Step 1: delete orphaned documents (must happen while source_id still resolves)\nDELETE FROM documents\n WHERE source_type = 'discussion' AND source_id IN (SELECT id FROM stale);\n-- Step 2: delete the stale discussions themselves\nDELETE FROM discussions\n WHERE id IN (SELECT id FROM stale);\n```\n\n**NOTE:** If SQLite version doesn't support CTE-based multi-statement, execute as two sequential statements capturing IDs in Rust first:\n```rust\nlet stale_ids: Vec = conn.prepare(\n \"SELECT id FROM discussions WHERE merge_request_id = ? AND last_seen_at < ?\"\n)?.query_map(params![mr_id, run_start], |r| r.get(0))?\n .collect::, _>>()?;\n\nif !stale_ids.is_empty() {\n // Delete documents FIRST (while source_id still resolves)\n conn.execute(\n \"DELETE FROM documents WHERE source_type = 'discussion' AND source_id IN (...)\",\n ...\n )?;\n // Then delete the discussions\n conn.execute(\n \"DELETE FROM discussions WHERE id IN (...)\",\n ...\n )?;\n}\n```\n\n**IMPORTANT difference from dirty queue pattern:** The sweep deletes documents DIRECTLY (not via dirty_sources queue). This is because the source entity is being deleted — there's nothing for the regenerator to regenerate from. The cascade handles FTS, labels, paths, and embeddings cleanup.\n\n## Acceptance Criteria\n- [ ] Every upserted issue is marked dirty inside the same transaction\n- [ ] Every upserted MR is marked dirty inside the same transaction\n- [ ] Every upserted discussion (issue + MR) is marked dirty inside the same transaction\n- [ ] ALL upserted entities marked dirty (not just changed ones) — regenerator handles skip\n- [ ] mark_dirty_tx called with &Transaction (not &Connection)\n- [ ] mark_dirty_tx uses upsert with ON CONFLICT to reset backoff state (not INSERT OR IGNORE)\n- [ ] Discussion sweep deletes documents DIRECTLY (not via dirty queue)\n- [ ] Discussion sweep uses CTE (or Rust-side ID capture) to capture stale IDs before cascading deletes\n- [ ] Documents deleted BEFORE discussions (while source_id still resolves)\n- [ ] ON DELETE CASCADE handles document_labels, document_paths cleanup\n- [ ] documents_embeddings_ad trigger handles embedding cleanup\n- [ ] `cargo build` succeeds\n- [ ] Existing ingestion tests still pass\n\n## Files\n- `src/ingestion/issues.rs` — add mark_dirty_tx calls in upsert loop\n- `src/ingestion/merge_requests.rs` — add mark_dirty_tx calls in upsert loop\n- `src/ingestion/discussions.rs` — add mark_dirty_tx calls in insert loop\n- `src/ingestion/mr_discussions.rs` — add mark_dirty_tx calls + direct document deletion in sweep\n\n## TDD Loop\nRED: Existing tests should still pass (regression); new tests:\n- `test_issue_upsert_marks_dirty` — after issue ingest, dirty_sources has entry\n- `test_mr_upsert_marks_dirty` — after MR ingest, dirty_sources has entry\n- `test_discussion_upsert_marks_dirty` — after discussion ingest, dirty_sources has entry\n- `test_discussion_sweep_deletes_documents` — stale discussion documents deleted directly\n- `test_sweep_cascade_cleans_labels_paths` — ON DELETE CASCADE works\nGREEN: Add mark_dirty_tx calls in all 4 files, implement sweep with CTE\nVERIFY: `cargo test ingestion && cargo build`\n\n## Edge Cases\n- Upsert that doesn't change data: still marks dirty (regenerator hash check handles skip)\n- Transaction rollback: dirty mark also rolled back (atomic, inside same txn)\n- Discussion sweep with zero stale IDs: CTE returns empty, no DELETE executed\n- Large batch of upserts: each mark_dirty_tx is O(1) INSERT with ON CONFLICT\n- Sweep deletes document before discussion: order matters for source_id resolution","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:27:09.540279Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:39:17.241433Z","closed_at":"2026-01-30T17:39:17.241390Z","close_reason":"Added mark_dirty_tx calls in issues.rs, merge_requests.rs, discussions.rs, mr_discussions.rs (2 paths)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1i2","depends_on_id":"bd-38q","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-1j1","title":"Integration test: full Phase B sync pipeline","description":"## Background\n\nThis integration test proves the full Phase B sync pipeline works end-to-end. Since Gates 1 and 2 are already implemented and closed, this test validates that the complete pipeline — including Gate 4 mr_diffs draining — works together.\n\n## Codebase Context\n\n- **Gates 1-2 FULLY IMPLEMENTED (CLOSED):** resource events fetch, closes_issues API, system note parsing (note_parser.rs), entity_references extraction (references.rs)\n- **Gate 4 in progress:** migration 015 (mr_file_changes), fetch_mr_diffs, drain_mr_diffs — this test validates the full chain\n- Migrations 001-014 exist. Migration 015 (bd-1oo) adds mr_file_changes + commit SHAs.\n- Orchestrator has drain_resource_events() and drain_mr_closes_issues(). Gate 4 adds drain_mr_diffs().\n- wiremock crate used in existing tests (check dev-dependencies in Cargo.toml)\n- src/core/dependent_queue.rs: enqueue_job(), claim_jobs(), complete_job(), fail_job() with exponential backoff\n- IngestProjectResult and IngestMrProjectResult track counts for all drain phases\n\n## Approach\n\nCreate tests/phase_b_integration.rs:\n\n### Test Setup\n\n1. In-memory SQLite DB with all migrations (001-015)\n2. wiremock mock server with:\n - /api/v4/projects/:id/issues — 2 test issues\n - /api/v4/projects/:id/merge_requests — 1 test MR\n - /api/v4/projects/:id/issues/:iid/resource_state_events — state events\n - /api/v4/projects/:id/issues/:iid/resource_label_events — label events\n - /api/v4/projects/:id/merge_requests/:iid/resource_state_events — merge event with source_merge_request_iid\n - /api/v4/projects/:id/merge_requests/:iid/closes_issues — linked issues\n - /api/v4/projects/:id/merge_requests/:iid/diffs — file changes\n - /api/v4/projects/:id/issues/:iid/discussions — discussion with system note \"mentioned in !1\"\n3. Config with fetch_resource_events=true and fetch_mr_file_changes=true (bd-jec)\n4. Use dependent_concurrency=1 to avoid timing issues\n\n### Test Flow\n\n```rust\n#[tokio::test]\nasync fn test_full_phase_b_pipeline() {\n // 1. Set up mock server + DB with migrations 001-015\n // 2. Run ingest issues + MRs (orchestrator functions)\n // 3. Verify pending_dependent_fetches enqueued: resource_events, mr_closes_issues, mr_diffs\n // 4. Drain all dependent fetch queues\n // 5. Assert: resource_state_events populated (count > 0)\n // 6. Assert: resource_label_events populated (count > 0)\n // 7. Assert: entity_references has closes ref with source_method='api'\n // 8. Assert: entity_references has mentioned ref with source_method='note_parse'\n // 9. Assert: mr_file_changes populated from diffs API\n // 10. Assert: pending_dependent_fetches fully drained (no stuck locks)\n}\n```\n\n### Assertions (SQL)\n\n```sql\nSELECT COUNT(*) FROM resource_state_events -- > 0\nSELECT COUNT(*) FROM resource_label_events -- > 0\nSELECT COUNT(*) FROM entity_references WHERE reference_type = 'closes' AND source_method = 'api' -- >= 1\nSELECT COUNT(*) FROM entity_references WHERE source_method = 'note_parse' -- >= 1\nSELECT COUNT(*) FROM mr_file_changes -- > 0\nSELECT COUNT(*) FROM pending_dependent_fetches WHERE locked_at IS NOT NULL -- = 0\n```\n\n## Acceptance Criteria\n\n- [ ] Test creates DB with migrations 001-015, mocks, and runs full pipeline\n- [ ] resource_state_events and resource_label_events populated\n- [ ] entity_references has closes ref (source_method='api') and mentioned ref (source_method='note_parse')\n- [ ] mr_file_changes populated from diffs mock\n- [ ] pending_dependent_fetches fully drained (no stuck locks, no retryable jobs)\n- [ ] Test runs in < 10 seconds\n- [ ] `cargo test --test phase_b_integration` passes\n\n## Files\n\n- tests/phase_b_integration.rs (NEW)\n\n## TDD Loop\n\nRED: Write test with all assertions — may fail if Gate 4 draining not yet wired.\n\nGREEN: Fix pipeline wiring (drain_mr_diffs in orchestrator).\n\nVERIFY: cargo test --test phase_b_integration -- --nocapture\n\n## Edge Cases\n\n- Paginated mock responses: include Link header for multi-page responses\n- Empty pages: verify graceful handling\n- Use dependent_concurrency=1 to avoid timing issues in test environment\n- Stale lock reclaim: test that locks older than stale_lock_minutes are reclaimed","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-02T22:42:26.355071Z","created_by":"tayloreernisse","updated_at":"2026-02-05T20:16:55.266005Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1j1","depends_on_id":"bd-1ji","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1j1","depends_on_id":"bd-1se","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1j1","depends_on_id":"bd-3ia","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1j1","depends_on_id":"bd-8t4","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-1j5o","title":"Verification: quality gates, query plan check, real-world validation","description":"## Background\n\nPost-implementation verification checkpoint. Runs after all code beads complete to validate the full scoring model works correctly against real data, not just test fixtures.\n\n## Approach\n\nExecute 8 verification steps in order. Each step has a binary pass/fail outcome.\n\n### Step 1: Compiler check\n```bash\ncargo check --all-targets\n```\nPass: exit 0\n\n### Step 2: Clippy\n```bash\ncargo clippy --all-targets -- -D warnings\n```\nPass: exit 0\n\n### Step 3: Formatting\n```bash\ncargo fmt --check\n```\nPass: exit 0\n\n### Step 4: Test suite\n```bash\ncargo test -p lore\n```\nPass: all tests green, including 31 new decay/scoring tests\n\n### Step 5: UBS scan\n```bash\nubs src/cli/commands/who.rs src/core/config.rs src/core/db.rs\n```\nPass: exit 0\n\n### Step 6: Query plan verification (manual)\nRun against real database:\n```bash\ncargo run --release -- who --path MeasurementQualityDialog.tsx -vvv 2>&1 | grep -i \"query plan\"\n```\nOr use sqlite3 CLI with EXPLAIN QUERY PLAN on the expert SQL (both exact and prefix modes).\n\nPass criteria (6 checks):\n- matched_notes_raw branch 1 uses existing new_path index\n- matched_notes_raw branch 2 uses idx_notes_old_path_author\n- matched_file_changes_raw uses idx_mfc_new_path_project_mr and idx_mfc_old_path_project_mr\n- reviewer_participation uses idx_notes_diffnote_discussion_author\n- mr_activity CTE joins merge_requests via primary key from matched_file_changes\n- Path resolution probes (old_path leg) use idx_notes_old_path_project_created\nDocument observed plan as SQL comment near the CTE.\n\n### Step 7: Performance baseline (manual)\n```bash\ntime cargo run --release -- who --path MeasurementQualityDialog.tsx\ntime cargo run --release -- who --path src/\ntime cargo run --release -- who --path Dialog.tsx\n```\nPass criteria (soft SLOs):\n- Exact path: p95 < 200ms\n- Prefix: p95 < 300ms\n- Suffix: p95 < 500ms\nRecord timings as SQL comment for future regression reference.\n\n### Step 8: Real-world validation\n```bash\ncargo run --release -- who --path MeasurementQualityDialog.tsx\ncargo run --release -- who --path MeasurementQualityDialog.tsx --explain-score\ncargo run --release -- who --path MeasurementQualityDialog.tsx --as-of 2025-06-01\ncargo run --release -- who --path MeasurementQualityDialog.tsx --all-history\n```\nPass criteria:\n- [ ] Recency discounting visible (recent authors rank above old reviewers)\n- [ ] --explain-score components sum to total (within f64 tolerance)\n- [ ] --as-of produces identical results on repeated runs\n- [ ] Assigned-only reviewers rank below participated reviewers on same MR\n- [ ] Known renamed file path resolves and credits old expertise\n- [ ] LGTM-only reviewers classified as assigned-only\n- [ ] Closed MRs at ~50% contribution visible via --explain-score\n\n## Acceptance Criteria\n- [ ] Steps 1-5 pass (exit 0)\n- [ ] Step 6: query plan documented with all 6 index usage points confirmed\n- [ ] Step 7: timing baselines recorded\n- [ ] Step 8: all 7 real-world checks pass\n\n## Files\n- All files modified by child beads (read-only verification)\n- Add SQL comments near CTE with observed EXPLAIN QUERY PLAN output\n\n## Edge Cases\n- SQLite planner may choose different plans across versions — document version\n- Timing varies by hardware — record machine specs alongside baselines\n- Real DB may have NULL merged_at on old MRs — state-aware fallback handles this","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-09T17:00:59.287720Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:46:09.896857Z","compaction_level":0,"original_size":0,"labels":["scoring"],"dependencies":[{"issue_id":"bd-1j5o","depends_on_id":"bd-1b50","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"},{"issue_id":"bd-1j5o","depends_on_id":"bd-1vti","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"}]} +{"id":"bd-1i4i","title":"Implement run_sync_surgical orchestration function","description":"## Background\n\nThe surgical sync pipeline needs a top-level orchestration function that coordinates the full pipeline for syncing specific IIDs. Unlike `run_sync` (lines 63-360 of `src/cli/commands/sync.rs`) which syncs all projects and all entities, `run_sync_surgical` targets specific issues/MRs by IID within a single project. The pipeline stages are: resolve project, record sync run, preflight fetch, check cancellation, acquire lock, ingest with TOCTOU guards, inline dependent enrichment (discussions, events, diffs), scoped doc regeneration, scoped embedding, finalize recorder, and build `SyncResult`.\n\n## Approach\n\nCreate `pub async fn run_sync_surgical()` in a new file `src/cli/commands/sync_surgical.rs`. Signature:\n\n```rust\npub async fn run_sync_surgical(\n config: &Config,\n options: SyncOptions,\n run_id: Option<&str>,\n signal: &ShutdownSignal,\n) -> Result\n```\n\nThe function reads `options.issues` and `options.merge_requests` (added by bd-1lja) to determine target IIDs. Pipeline:\n\n1. **Resolve project**: Call `resolve_project(conn, project_str)` from `src/core/project.rs` to get `gitlab_project_id`.\n2. **Start recorder**: `SyncRunRecorder::start(&recorder_conn, \"surgical-sync\", run_id)`. Note: `succeed()` and `fail()` consume `self`, so control flow must ensure exactly one terminal call.\n3. **Preflight fetch**: For each IID, call `get_issue_by_iid` / `get_mr_by_iid` (bd-159p) to confirm the entity exists on GitLab and capture `updated_at` for TOCTOU.\n4. **Check cancellation**: `if signal.is_cancelled() { recorder.fail(...); return Ok(result); }`\n5. **Acquire lock**: `AppLock::new(conn, LockOptions { name: \"surgical-sync\".into(), stale_lock_minutes: config.sync.stale_lock_minutes, heartbeat_interval_seconds: config.sync.heartbeat_interval_seconds })`. Lock must `acquire(force)` and `release()` on all exit paths.\n6. **Ingest with TOCTOU**: For each preflight entity, call surgical ingest (bd-3sez). Compare DB `updated_at` with preflight `updated_at`; skip if already current. Record outcome in `EntitySyncResult`.\n7. **Inline dependents**: For ingested entities, fetch discussions, resource events (if `config.sync.fetch_resource_events`), MR diffs (if `config.sync.fetch_mr_file_changes`). Use `config.sync.requests_per_second` for rate limiting.\n8. **Scoped docs**: Call `run_generate_docs_for_sources()` (bd-hs6j) with only the affected entity source IDs.\n9. **Scoped embed**: Call `run_embed_for_document_ids()` (bd-1elx) with only the regenerated document IDs.\n10. **Finalize**: `recorder.succeed(conn, &metrics, total_items, total_errors)`.\n11. **Build SyncResult**: Populate surgical fields (bd-wcja): `surgical_mode: Some(true)`, `surgical_iids`, `entity_results`, `preflight_only`.\n\nIf `options.preflight_only` is set, return after step 3 with the preflight data and skip steps 4-10.\n\nProgress output uses `stage_spinner_v2(icon, label, msg, robot_mode)` from `src/cli/progress.rs` line 18 during execution, and `format_stage_line(icon, label, summary, elapsed)` from `src/cli/progress.rs` line 67 for completion lines. Stage icons via `Icons::sync()` from `src/cli/render.rs` line 208. Error completion uses `color_icon(icon, has_errors)` from `src/cli/commands/sync.rs` line 55.\n\n## Acceptance Criteria\n\n1. `run_sync_surgical` compiles and runs the full pipeline for 1+ issue IIDs\n2. Preflight-only mode returns early with fetched entity data, no DB writes beyond recorder\n3. TOCTOU: entities whose DB `updated_at` matches preflight `updated_at` are skipped with `skipped_toctou` outcome\n4. Cancellation at any stage between preflight and ingest stops processing, calls `recorder.fail()`\n5. Lock is acquired before ingest and released on all exit paths (success, error, cancellation)\n6. `SyncResult` surgical fields are populated: `surgical_mode`, `surgical_iids`, `entity_results`\n7. Robot mode produces valid JSON with per-entity outcomes\n8. Human mode shows stage spinners and completion lines\n\n## Files\n\n- `src/cli/commands/sync_surgical.rs` — new file, main orchestration function\n- `src/cli/commands/mod.rs` — add `pub mod sync_surgical;`\n\n## TDD Anchor\n\nTests in `src/cli/commands/sync_surgical.rs` or a companion `sync_surgical_tests.rs`:\n\n```rust\n#[cfg(test)]\nmod tests {\n use super::*;\n use crate::core::db::{create_connection, run_migrations};\n use std::path::Path;\n use wiremock::{MockServer, Mock, ResponseTemplate};\n use wiremock::matchers::{method, path_regex};\n\n fn test_config(mock_url: &str) -> Config {\n let mut config = Config::default();\n config.gitlab.url = mock_url.to_string();\n config.gitlab.token = \"test-token\".to_string();\n config\n }\n\n fn setup_db() -> rusqlite::Connection {\n let conn = create_connection(Path::new(\":memory:\")).unwrap();\n run_migrations(&conn).unwrap();\n // Insert test project\n conn.execute(\n \"INSERT INTO projects (gitlab_project_id, path_with_namespace, web_url)\n VALUES (1, 'group/project', 'https://gitlab.example.com/group/project')\",\n [],\n ).unwrap();\n conn\n }\n\n #[tokio::test]\n async fn surgical_sync_single_issue_end_to_end() {\n let server = MockServer::start().await;\n // Mock: GET /projects/:id/issues?iids[]=7 returns one issue\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(\n serde_json::json!([{\n \"id\": 100, \"iid\": 7, \"project_id\": 1, \"title\": \"Test\",\n \"state\": \"opened\", \"created_at\": \"2026-01-01T00:00:00Z\",\n \"updated_at\": \"2026-02-17T00:00:00Z\",\n \"author\": {\"id\": 1, \"username\": \"dev\", \"name\": \"Dev\"},\n \"web_url\": \"https://gitlab.example.com/group/project/-/issues/7\"\n }])\n ))\n .mount(&server).await;\n // Mock discussions endpoint\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues/7/discussions\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n robot_mode: true,\n issues: vec![7],\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n let result = run_sync_surgical(&config, options, Some(\"test01\"), &signal).await.unwrap();\n\n assert_eq!(result.surgical_mode, Some(true));\n assert_eq!(result.surgical_iids.as_ref().unwrap().issues, vec![7]);\n let entities = result.entity_results.as_ref().unwrap();\n assert_eq!(entities.len(), 1);\n assert_eq!(entities[0].outcome, \"synced\");\n }\n\n #[tokio::test]\n async fn preflight_only_returns_early() {\n let server = MockServer::start().await;\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([{\n \"id\": 100, \"iid\": 7, \"project_id\": 1, \"title\": \"Test\",\n \"state\": \"opened\", \"created_at\": \"2026-01-01T00:00:00Z\",\n \"updated_at\": \"2026-02-17T00:00:00Z\",\n \"author\": {\"id\": 1, \"username\": \"dev\", \"name\": \"Dev\"},\n \"web_url\": \"https://gitlab.example.com/group/project/-/issues/7\"\n }])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n robot_mode: true,\n issues: vec![7],\n preflight_only: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n let result = run_sync_surgical(&config, options, Some(\"test02\"), &signal).await.unwrap();\n\n assert_eq!(result.preflight_only, Some(true));\n assert_eq!(result.issues_updated, 0); // No actual ingest happened\n }\n\n #[tokio::test]\n async fn cancellation_before_ingest_fails_recorder() {\n let server = MockServer::start().await;\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([{\n \"id\": 100, \"iid\": 7, \"project_id\": 1, \"title\": \"Test\",\n \"state\": \"opened\", \"created_at\": \"2026-01-01T00:00:00Z\",\n \"updated_at\": \"2026-02-17T00:00:00Z\",\n \"author\": {\"id\": 1, \"username\": \"dev\", \"name\": \"Dev\"},\n \"web_url\": \"https://gitlab.example.com/group/project/-/issues/7\"\n }])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n robot_mode: true,\n issues: vec![7],\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n signal.cancel(); // Cancel before we start\n let result = run_sync_surgical(&config, options, Some(\"test03\"), &signal).await.unwrap();\n\n // Result should indicate cancellation\n assert_eq!(result.issues_updated, 0);\n }\n}\n```\n\n## Edge Cases\n\n- **Entity not found on GitLab**: Preflight returns 404 for an IID. Record `EntitySyncResult { outcome: \"not_found\" }` and continue with remaining IIDs.\n- **All entities skipped by TOCTOU**: Every entity's `updated_at` matches DB. Result has `entity_results` with all `skipped_toctou`, zero actual sync work.\n- **Mixed success/failure**: Some IIDs succeed, some fail. All recorded in `entity_results`. Function returns `Ok` with partial results, not `Err`.\n- **SyncRunRecorder consume semantics**: `succeed()` and `fail()` take `self` by value. The orchestrator must ensure exactly one terminal call. Use an `Option` pattern: `let mut recorder = Some(recorder); ... recorder.take().unwrap().succeed(...)`.\n- **Lock contention**: If another sync holds the lock and `force` is false, fail with clear error before any ingest.\n- **Empty IID lists**: If both `options.issues` and `options.merge_requests` are empty, return immediately with default `SyncResult` (no surgical fields set).\n\n## Dependency Context\n\n- **Depends on (upstream)**: bd-wcja (SyncResult fields), bd-1lja (SyncOptions extensions), bd-159p (get_by_iid client methods), bd-3sez (surgical ingest/preflight/TOCTOU), bd-kanh (per-entity helpers), bd-arka (SyncRunRecorder surgical methods), bd-1elx (scoped embed), bd-hs6j (scoped docs), bd-tiux (migration 027)\n- **Blocks (downstream)**: bd-3bec (wiring into run_sync), bd-3jqx (integration tests)\n- This is the keystone bead — it consumes all upstream primitives and is consumed by the final wiring and integration test beads.","status":"open","priority":1,"issue_type":"task","created_at":"2026-02-17T19:17:24.197299Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:03:01.815253Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-1i4i","depends_on_id":"bd-3bec","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} +{"id":"bd-1j1","title":"Integration test: full Phase B sync pipeline","description":"## Background\n\nThis integration test proves the full Phase B sync pipeline works end-to-end. Since Gates 1 and 2 are already implemented and closed, this test validates that the complete pipeline — including Gate 4 mr_diffs draining — works together.\n\n## Codebase Context\n\n- **Gates 1-2 FULLY IMPLEMENTED (CLOSED):** resource events fetch, closes_issues API, system note parsing (note_parser.rs), entity_references extraction (references.rs)\n- **Gate 4 in progress:** migration 016 (mr_file_changes), fetch_mr_diffs, drain_mr_diffs — already wired in orchestrator (lines 708-726, 1514+)\n- **26 migrations exist** (001-026). LATEST_SCHEMA_VERSION = 26. In-memory DB must run all 26.\n- Orchestrator has drain_resource_events() (line 932), drain_mr_closes_issues() (line 1254), and drain_mr_diffs() (line 1514).\n- wiremock crate used in existing tests (check dev-dependencies in Cargo.toml)\n- src/core/dependent_queue.rs: enqueue_job(), claim_jobs(), complete_job(), fail_job() with exponential backoff\n- IngestProjectResult and IngestMrProjectResult track counts for all drain phases\n\n## Approach\n\nCreate tests/phase_b_integration.rs:\n\n### Test Setup\n\n1. In-memory SQLite DB with all 26 migrations (001-026)\n2. wiremock mock server with:\n - /api/v4/projects/:id/issues — 2 test issues\n - /api/v4/projects/:id/merge_requests — 1 test MR\n - /api/v4/projects/:id/issues/:iid/resource_state_events — state events\n - /api/v4/projects/:id/issues/:iid/resource_label_events — label events\n - /api/v4/projects/:id/merge_requests/:iid/resource_state_events — merge event with source_merge_request_iid\n - /api/v4/projects/:id/merge_requests/:iid/closes_issues — linked issues\n - /api/v4/projects/:id/merge_requests/:iid/diffs — file changes\n - /api/v4/projects/:id/issues/:iid/discussions — discussion with system note \"mentioned in !1\"\n3. Config with fetch_resource_events=true and fetch_mr_file_changes=true\n4. Use dependent_concurrency=1 to avoid timing issues\n\n### Test Flow\n\n```rust\n#[tokio::test]\nasync fn test_full_phase_b_pipeline() {\n // 1. Set up mock server + DB with all 26 migrations\n // 2. Run ingest issues + MRs (orchestrator functions)\n // 3. Verify pending_dependent_fetches enqueued: resource_events, mr_closes_issues, mr_diffs\n // 4. Drain all dependent fetch queues\n // 5. Assert: resource_state_events populated (count > 0)\n // 6. Assert: resource_label_events populated (count > 0)\n // 7. Assert: entity_references has closes ref with source_method='api'\n // 8. Assert: entity_references has mentioned ref with source_method='note_parse'\n // 9. Assert: mr_file_changes populated from diffs API\n // 10. Assert: pending_dependent_fetches fully drained (no stuck locks)\n}\n```\n\n### Assertions (SQL)\n\n```sql\nSELECT COUNT(*) FROM resource_state_events -- > 0\nSELECT COUNT(*) FROM resource_label_events -- > 0\nSELECT COUNT(*) FROM entity_references WHERE reference_type = 'closes' AND source_method = 'api' -- >= 1\nSELECT COUNT(*) FROM entity_references WHERE source_method = 'note_parse' -- >= 1\nSELECT COUNT(*) FROM mr_file_changes -- > 0\nSELECT COUNT(*) FROM pending_dependent_fetches WHERE locked_at IS NOT NULL -- = 0\n```\n\n## Acceptance Criteria\n\n- [ ] Test creates DB with all 26 migrations, mocks, and runs full pipeline\n- [ ] resource_state_events and resource_label_events populated\n- [ ] entity_references has closes ref (source_method='api') and mentioned ref (source_method='note_parse')\n- [ ] mr_file_changes populated from diffs mock\n- [ ] pending_dependent_fetches fully drained (no stuck locks, no retryable jobs)\n- [ ] Test runs in < 10 seconds\n- [ ] `cargo test --test phase_b_integration` passes\n\n## Files\n\n- CREATE: tests/phase_b_integration.rs\n\n## TDD Anchor\n\nRED: Write test with all assertions — should pass if all Gates are wired correctly.\n\nGREEN: If anything fails, it indicates a missing orchestrator connection — fix the wiring.\n\nVERIFY: cargo test --test phase_b_integration -- --nocapture\n\n## Edge Cases\n\n- Paginated mock responses: include Link header for multi-page responses\n- Empty pages: verify graceful handling\n- Use dependent_concurrency=1 to avoid timing issues in test environment\n- Stale lock reclaim: test that locks older than stale_lock_minutes are reclaimed\n- If Gate 4 drain_mr_diffs is not fully wired yet, the mr_file_changes assertion will fail — this is the intended RED signal\n\n## Dependency Context\n\n- **bd-8t4 (resource_state_events extraction)**: CLOSED. Provides drain_resource_events() which populates resource_state_events and resource_label_events tables.\n- **bd-3ia (closes_issues)**: CLOSED. Provides drain_mr_closes_issues() which populates entity_references with reference_type='closes', source_method='api'.\n- **bd-1ji (note parsing)**: CLOSED. Provides note_parser.rs which extracts \"mentioned in !N\" patterns and stores as entity_references with source_method='note_parse'.\n- **dependent_queue.rs**: Provides the claim/complete/fail lifecycle. All three drain functions use this.\n- **orchestrator.rs**: Contains all drain functions. drain_mr_diffs() at line 1514+ populates mr_file_changes.","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-02T22:42:26.355071Z","created_by":"tayloreernisse","updated_at":"2026-02-17T16:52:30.970742Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1j1","depends_on_id":"bd-1ji","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1j1","depends_on_id":"bd-1se","type":"parent-child","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1j1","depends_on_id":"bd-3ia","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1j1","depends_on_id":"bd-8t4","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} +{"id":"bd-1j5o","title":"Verification: quality gates, query plan check, real-world validation","description":"## Background\n\nPost-implementation verification checkpoint. Runs after all code beads complete to validate the full scoring model works correctly against real data, not just test fixtures.\n\n## Approach\n\nExecute 8 verification steps in order. Each step has a binary pass/fail outcome.\n\n### Step 1: Compiler check\n```bash\ncargo check --all-targets\n```\nPass: exit 0\n\n### Step 2: Clippy\n```bash\ncargo clippy --all-targets -- -D warnings\n```\nPass: exit 0\n\n### Step 3: Formatting\n```bash\ncargo fmt --check\n```\nPass: exit 0\n\n### Step 4: Test suite\n```bash\ncargo test -p lore\n```\nPass: all tests green, including 31 new decay/scoring tests\n\n### Step 5: UBS scan\n```bash\nubs src/cli/commands/who.rs src/core/config.rs src/core/db.rs\n```\nPass: exit 0\n\n### Step 6: Query plan verification (manual)\nRun against real database:\n```bash\ncargo run --release -- who --path MeasurementQualityDialog.tsx -vvv 2>&1 | grep -i \"query plan\"\n```\nOr use sqlite3 CLI with EXPLAIN QUERY PLAN on the expert SQL (both exact and prefix modes).\n\nPass criteria (6 checks):\n- matched_notes_raw branch 1 uses existing new_path index\n- matched_notes_raw branch 2 uses idx_notes_old_path_author\n- matched_file_changes_raw uses idx_mfc_new_path_project_mr and idx_mfc_old_path_project_mr\n- reviewer_participation uses idx_notes_diffnote_discussion_author\n- mr_activity CTE joins merge_requests via primary key from matched_file_changes\n- Path resolution probes (old_path leg) use idx_notes_old_path_project_created\nDocument observed plan as SQL comment near the CTE.\n\n### Step 7: Performance baseline (manual)\n```bash\ntime cargo run --release -- who --path MeasurementQualityDialog.tsx\ntime cargo run --release -- who --path src/\ntime cargo run --release -- who --path Dialog.tsx\n```\nPass criteria (soft SLOs):\n- Exact path: p95 < 200ms\n- Prefix: p95 < 300ms\n- Suffix: p95 < 500ms\nRecord timings as SQL comment for future regression reference.\n\n### Step 8: Real-world validation\n```bash\ncargo run --release -- who --path MeasurementQualityDialog.tsx\ncargo run --release -- who --path MeasurementQualityDialog.tsx --explain-score\ncargo run --release -- who --path MeasurementQualityDialog.tsx --as-of 2025-06-01\ncargo run --release -- who --path MeasurementQualityDialog.tsx --all-history\n```\nPass criteria:\n- [ ] Recency discounting visible (recent authors rank above old reviewers)\n- [ ] --explain-score components sum to total (within f64 tolerance)\n- [ ] --as-of produces identical results on repeated runs\n- [ ] Assigned-only reviewers rank below participated reviewers on same MR\n- [ ] Known renamed file path resolves and credits old expertise\n- [ ] LGTM-only reviewers classified as assigned-only\n- [ ] Closed MRs at ~50% contribution visible via --explain-score\n\n## Acceptance Criteria\n- [ ] Steps 1-5 pass (exit 0)\n- [ ] Step 6: query plan documented with all 6 index usage points confirmed\n- [ ] Step 7: timing baselines recorded\n- [ ] Step 8: all 7 real-world checks pass\n\n## Files\n- All files modified by child beads (read-only verification)\n- Add SQL comments near CTE with observed EXPLAIN QUERY PLAN output\n\n## Edge Cases\n- SQLite planner may choose different plans across versions — document version\n- Timing varies by hardware — record machine specs alongside baselines\n- Real DB may have NULL merged_at on old MRs — state-aware fallback handles this","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-09T17:00:59.287720Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:43:04.415816Z","closed_at":"2026-02-12T20:43:04.415772Z","close_reason":"Implemented by time-decay swarm: 3 agents, 12 tasks, 621 tests passing, all quality gates green","compaction_level":0,"original_size":0,"labels":["scoring"],"dependencies":[{"issue_id":"bd-1j5o","depends_on_id":"bd-1b50","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1j5o","depends_on_id":"bd-1vti","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-1je","title":"Implement pending discussion queue","description":"## Background\nThe pending discussion queue tracks discussions that need to be fetched from GitLab. When an issue or MR is updated, its discussions may need re-fetching. This queue is separate from dirty_sources (which tracks entities needing document regeneration) — it tracks entities needing API calls to GitLab. The queue uses the same backoff pattern as dirty_sources for consistency.\n\n## Approach\nCreate `src/ingestion/discussion_queue.rs`:\n\n```rust\nuse crate::core::backoff::compute_next_attempt_at;\n\n/// Noteable type for discussion queue.\n#[derive(Debug, Clone, Copy)]\npub enum NoteableType {\n Issue,\n MergeRequest,\n}\n\nimpl NoteableType {\n pub fn as_str(&self) -> &'static str {\n match self {\n Self::Issue => \"Issue\",\n Self::MergeRequest => \"MergeRequest\",\n }\n }\n}\n\npub struct PendingFetch {\n pub project_id: i64,\n pub noteable_type: NoteableType,\n pub noteable_iid: i64,\n pub attempt_count: i32,\n}\n\n/// Queue a discussion fetch. ON CONFLICT DO UPDATE resets backoff (consistent with dirty_sources).\npub fn queue_discussion_fetch(\n conn: &Connection,\n project_id: i64,\n noteable_type: NoteableType,\n noteable_iid: i64,\n) -> Result<()>;\n\n/// Get next batch of pending fetches (WHERE next_attempt_at IS NULL OR <= now).\npub fn get_pending_fetches(conn: &Connection, limit: usize) -> Result>;\n\n/// Mark fetch complete (remove from queue).\npub fn complete_fetch(\n conn: &Connection,\n project_id: i64,\n noteable_type: NoteableType,\n noteable_iid: i64,\n) -> Result<()>;\n\n/// Record fetch error with backoff.\npub fn record_fetch_error(\n conn: &Connection,\n project_id: i64,\n noteable_type: NoteableType,\n noteable_iid: i64,\n error: &str,\n) -> Result<()>;\n```\n\n## Acceptance Criteria\n- [ ] queue_discussion_fetch uses ON CONFLICT DO UPDATE (consistent with dirty_sources pattern)\n- [ ] Re-queuing resets: attempt_count=0, next_attempt_at=NULL, last_error=NULL\n- [ ] get_pending_fetches respects next_attempt_at backoff\n- [ ] get_pending_fetches returns entries ordered by queued_at ASC\n- [ ] complete_fetch removes entry from queue\n- [ ] record_fetch_error increments attempt_count, computes next_attempt_at via shared backoff\n- [ ] NoteableType.as_str() returns \"Issue\" or \"MergeRequest\" (matches DB CHECK constraint)\n- [ ] `cargo test discussion_queue` passes\n\n## Files\n- `src/ingestion/discussion_queue.rs` — new file\n- `src/ingestion/mod.rs` — add `pub mod discussion_queue;`\n\n## TDD Loop\nRED: Tests in `#[cfg(test)] mod tests`:\n- `test_queue_and_get` — queue entry, get returns it\n- `test_requeue_resets_backoff` — queue, error, re-queue -> attempt_count=0\n- `test_backoff_respected` — entry with future next_attempt_at not returned\n- `test_complete_removes` — complete_fetch removes entry\n- `test_error_increments_attempts` — error -> attempt_count=1, next_attempt_at set\nGREEN: Implement all functions\nVERIFY: `cargo test discussion_queue`\n\n## Edge Cases\n- Queue same (project_id, noteable_type, noteable_iid) twice: ON CONFLICT resets state\n- NoteableType must match DB CHECK constraint exactly (\"Issue\", \"MergeRequest\" — capitalized)\n- Empty queue: get_pending_fetches returns empty Vec","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:27:09.505548Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:31:35.496454Z","closed_at":"2026-01-30T17:31:35.496405Z","close_reason":"Implemented discussion_queue with queue/get/complete/record_error + 6 tests","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1je","depends_on_id":"bd-hrs","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1je","depends_on_id":"bd-mem","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1ji","title":"Parse system notes for cross-reference patterns","description":"## Background\nSystem notes contain cross-reference patterns like 'mentioned in !{iid}', 'closed by !{iid}', etc. This is best-effort, English-only extraction that supplements the structured API data from bd-3ia and bd-8t4. Runs as a local post-processing step (no API calls).\n\n## Approach\nCreate src/core/note_parser.rs:\n\n```rust\nuse regex::Regex;\nuse lazy_static::lazy_static;\n\n/// A parsed cross-reference from a system note.\npub struct ParsedCrossRef {\n pub reference_type: String, // \"mentioned\" | \"closes\"\n pub target_entity_type: String, // \"issue\" | \"merge_request\" \n pub target_iid: i64,\n pub target_project_path: Option, // None = same project\n}\n\nlazy_static! {\n static ref MENTIONED_RE: Regex = Regex::new(\n r\"mentioned in (?:(?P[\\w\\-]+/[\\w\\-]+))?(?P[#!])(?P\\d+)\"\n ).unwrap();\n static ref CLOSED_BY_RE: Regex = Regex::new(\n r\"closed by (?:(?P[\\w\\-]+/[\\w\\-]+))?(?P[#!])(?P\\d+)\"\n ).unwrap();\n}\n\n/// Parse a system note body for cross-references.\npub fn parse_cross_refs(body: &str) -> Vec\n\n/// Extract cross-references from all system notes and insert into entity_references.\n/// Queries notes WHERE is_system = 1, parses body text, resolves to entity_references.\npub fn extract_refs_from_system_notes(\n conn: &Connection,\n project_id: i64,\n) -> Result\n\npub struct ExtractResult {\n pub inserted: usize,\n pub skipped_unresolvable: usize,\n pub parse_failures: usize, // logged at debug level\n}\n```\n\nSigil mapping: `#` = issue, `!` = merge_request\n\nResolution logic:\n1. If target_project_path is None (same project): look up entity by iid in local DB → set target_entity_id\n2. If target_project_path is Some: check if project is synced locally\n - If yes: resolve to local entity id\n - If no: store as unresolved (target_entity_id=NULL, target_project_path=path, target_entity_iid=iid)\n\nInsert with source_method='system_note_parse', INSERT OR IGNORE for dedup.\n\nCall after drain_dependent_queue and extract_refs_from_state_events in the sync pipeline.\n\n## Acceptance Criteria\n- [ ] 'mentioned in !123' → mentioned ref, target=MR iid 123\n- [ ] 'mentioned in #456' → mentioned ref, target=issue iid 456\n- [ ] 'mentioned in group/project!789' → cross-project mentioned ref\n- [ ] 'closed by !123' → closes ref\n- [ ] Cross-project refs stored as unresolved when target project not synced\n- [ ] source_method = 'system_note_parse'\n- [ ] Parse failures logged at debug level (not errors)\n- [ ] Idempotent (INSERT OR IGNORE)\n- [ ] Only processes is_system=1 notes\n\n## Files\n- src/core/note_parser.rs (new)\n- src/core/mod.rs (add `pub mod note_parser;`)\n- src/cli/commands/sync.rs (call after other ref extraction steps)\n\n## TDD Loop\nRED: tests/note_parser_tests.rs:\n- `test_parse_mentioned_in_mr` - \"mentioned in !567\" → ParsedCrossRef { mentioned, merge_request, 567 }\n- `test_parse_mentioned_in_issue` - \"mentioned in #234\" → ParsedCrossRef { mentioned, issue, 234 }\n- `test_parse_mentioned_cross_project` - \"mentioned in group/repo!789\" → with project path\n- `test_parse_closed_by_mr` - \"closed by !567\" → ParsedCrossRef { closes, merge_request, 567 }\n- `test_parse_multiple_refs` - note with two mentions → two refs\n- `test_parse_no_refs` - \"Updated the description\" → empty vec\n- `test_extract_refs_from_system_notes_integration` - seed DB with system notes, verify entity_references created\n\nGREEN: Implement regex patterns and extraction logic\n\nVERIFY: `cargo test note_parser -- --nocapture`\n\n## Edge Cases\n- Non-English GitLab instances: \"ajouté l'étiquette ~bug\" won't match — this is accepted limitation, logged at debug\n- Multi-level group paths: \"mentioned in top/sub/project#123\" — regex needs to handle arbitrary depth ([\\w\\-]+(?:/[\\w\\-]+)+)\n- Note body may contain markdown links that look like refs: \"[#123](url)\" — the regex should handle this correctly since the prefix \"mentioned in\" is required\n- Same ref mentioned multiple times in same note — dedup via INSERT OR IGNORE\n- Note may reference itself (e.g., system note on issue #123 says \"mentioned in #123\") — technically valid, store it","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-02T21:32:33.663304Z","created_by":"tayloreernisse","updated_at":"2026-02-04T20:13:33.398960Z","closed_at":"2026-02-04T20:13:33.398868Z","close_reason":"Completed: parse_cross_refs regex parser, extract_refs_from_system_notes DB function, wired into orchestrator. 17 tests passing.","compaction_level":0,"original_size":0,"labels":["gate-2","parsing","phase-b"],"dependencies":[{"issue_id":"bd-1ji","depends_on_id":"bd-1se","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ji","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1k1","title":"Implement FTS5 search function and query sanitization","description":"## Background\nFTS5 search is the core lexical retrieval engine. It wraps SQLite's FTS5 with safe query parsing that prevents user input from causing SQL syntax errors, while preserving useful features like prefix search for type-ahead. The search function returns ranked results with BM25 scores and contextual snippets. This module is the Gate A search backbone and also provides fallback search when Ollama is unavailable in Gate B.\n\n## Approach\nCreate `src/search/` module with `mod.rs` and `fts.rs` per PRD Section 3.1-3.2.\n\n**src/search/mod.rs:**\n```rust\nmod fts;\nmod filters;\n// Later beads add: mod vector; mod hybrid; mod rrf;\npub use fts::{search_fts, to_fts_query, FtsResult, FtsQueryMode, generate_fallback_snippet, get_result_snippet};\n```\n\n**src/search/fts.rs — key functions:**\n\n1. `to_fts_query(raw: &str, mode: FtsQueryMode) -> String`\n - Safe mode: wrap each token in quotes, escape internal quotes, preserve trailing * on alphanumeric tokens\n - Raw mode: pass through unchanged\n\n2. `search_fts(conn: &Connection, query: &str, limit: usize, mode: FtsQueryMode) -> Result>`\n - Uses `bm25(documents_fts)` for ranking\n - Uses `snippet(documents_fts, 1, '', '', '...', 64)` for context\n - Column index 1 = content_text (0=title)\n\n3. `generate_fallback_snippet(content_text: &str, max_chars: usize) -> String`\n - For semantic-only results without FTS snippets\n - Uses `truncate_utf8()` for safe byte boundaries\n\n4. `truncate_utf8(s: &str, max_bytes: usize) -> &str`\n - Walks backward from max_bytes to find nearest char boundary\n\n5. `get_result_snippet(fts_snippet: Option<&str>, content_text: &str) -> String`\n - Prefers FTS snippet, falls back to truncated content\n\nUpdate `src/lib.rs`: add `pub mod search;`\n\n## Acceptance Criteria\n- [ ] Porter stemming works: search \"searching\" matches document containing \"search\"\n- [ ] Prefix search works: `auth*` matches \"authentication\"\n- [ ] Empty query returns empty Vec (no error)\n- [ ] Special characters don't cause FTS5 errors: `-`, `\"`, `:`, `*`\n- [ ] Query `\"-DWITH_SSL\"` returns results (dash not treated as NOT operator)\n- [ ] Query `C++` returns results (special chars preserved in quotes)\n- [ ] Safe mode preserves trailing `*` on alphanumeric tokens: `auth*` -> `\"auth\"*`\n- [ ] Raw mode passes query unchanged\n- [ ] BM25 scores returned (lower = better match)\n- [ ] Snippets contain `` tags around matches\n- [ ] `generate_fallback_snippet` truncates at word boundary, appends \"...\"\n- [ ] `truncate_utf8` never panics on multi-byte codepoints\n- [ ] `cargo test fts` passes\n\n## Files\n- `src/search/mod.rs` — new file (module root)\n- `src/search/fts.rs` — new file (FTS5 search + query sanitization)\n- `src/lib.rs` — add `pub mod search;`\n\n## TDD Loop\nRED: Tests in `fts.rs` `#[cfg(test)] mod tests`:\n- `test_safe_query_basic` — \"auth error\" -> `\"auth\" \"error\"`\n- `test_safe_query_prefix` — \"auth*\" -> `\"auth\"*`\n- `test_safe_query_special_chars` — \"C++\" -> `\"C++\"`\n- `test_safe_query_dash` — \"-DWITH_SSL\" -> `\"-DWITH_SSL\"`\n- `test_safe_query_quotes` — `he said \"hello\"` -> escaped\n- `test_raw_mode_passthrough` — raw query unchanged\n- `test_empty_query` — returns empty vec\n- `test_truncate_utf8_emoji` — truncate mid-emoji walks back\n- `test_fallback_snippet_word_boundary` — truncates at space\nGREEN: Implement to_fts_query, search_fts, helpers\nVERIFY: `cargo test fts`\n\n## Edge Cases\n- Query with only whitespace: treated as empty, returns empty\n- Query with only special characters: quoted, may return no results (not an error)\n- Very long query (1000+ chars): works but may be slow (no explicit limit)\n- FTS5 snippet returns empty string: fallback to truncated content_text\n- Non-alphanumeric prefix: `C++*` — NOT treated as prefix (special chars present)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:13.005179Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:23:35.204290Z","closed_at":"2026-01-30T17:23:35.204106Z","close_reason":"Completed: to_fts_query (safe/raw modes), search_fts with BM25+snippets, generate_fallback_snippet, get_result_snippet, truncate_utf8 reuse, 13 tests pass","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1k1","depends_on_id":"bd-221","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -49,6 +53,7 @@ {"id":"bd-1kh","title":"[CP0] Raw payload handling - compression and deduplication","description":"## Background\n\nRaw payload storage allows replaying API responses for debugging and audit. Compression reduces storage for large payloads. SHA-256 deduplication prevents storing identical payloads multiple times (important for frequently polled resources that haven't changed).\n\nReference: docs/prd/checkpoint-0.md section \"Raw Payload Handling\"\n\n## Approach\n\n**src/core/payloads.ts:**\n```typescript\nimport { createHash } from 'node:crypto';\nimport { gzipSync, gunzipSync } from 'node:zlib';\nimport Database from 'better-sqlite3';\nimport { nowMs } from './time';\n\ninterface StorePayloadOptions {\n projectId: number | null;\n resourceType: string; // 'project' | 'issue' | 'mr' | 'note' | 'discussion'\n gitlabId: string; // TEXT because discussion IDs are strings\n payload: unknown; // JSON-serializable object\n compress: boolean; // from config.storage.compressRawPayloads\n}\n\nexport function storePayload(db: Database.Database, options: StorePayloadOptions): number | null {\n // 1. JSON.stringify the payload\n // 2. SHA-256 hash the JSON bytes\n // 3. Check for duplicate by (project_id, resource_type, gitlab_id, payload_hash)\n // 4. If duplicate, return existing ID\n // 5. If compress=true, gzip the JSON bytes\n // 6. INSERT with content_encoding='gzip' or 'identity'\n // 7. Return lastInsertRowid\n}\n\nexport function readPayload(db: Database.Database, id: number): unknown {\n // 1. SELECT content_encoding, payload FROM raw_payloads WHERE id = ?\n // 2. If gzip, decompress\n // 3. JSON.parse and return\n}\n```\n\n## Acceptance Criteria\n\n- [ ] storePayload() with compress=true stores gzip-encoded payload\n- [ ] storePayload() with compress=false stores identity-encoded payload\n- [ ] Duplicate payload (same hash) returns existing row ID, not new row\n- [ ] readPayload() correctly decompresses gzip payloads\n- [ ] readPayload() returns null for non-existent ID\n- [ ] SHA-256 hash computed from pre-compression JSON bytes\n- [ ] Large payloads (100KB+) compress to ~10-20% of original size\n\n## Files\n\nCREATE:\n- src/core/payloads.ts\n- tests/unit/payloads.test.ts\n\n## TDD Loop\n\nRED:\n```typescript\n// tests/unit/payloads.test.ts\ndescribe('Payload Storage', () => {\n describe('storePayload', () => {\n it('stores uncompressed payload with identity encoding')\n it('stores compressed payload with gzip encoding')\n it('deduplicates identical payloads by hash')\n it('stores different payloads for same gitlab_id')\n })\n\n describe('readPayload', () => {\n it('reads uncompressed payload')\n it('reads and decompresses gzip payload')\n it('returns null for non-existent id')\n })\n})\n```\n\nGREEN: Implement storePayload() and readPayload()\n\nVERIFY: `npm run test -- tests/unit/payloads.test.ts`\n\n## Edge Cases\n\n- gitlabId is TEXT not INTEGER - discussion IDs are UUIDs\n- Compression ratio varies - some JSON compresses better than others\n- null projectId valid for global resources (like user profile)\n- Hash collision extremely unlikely with SHA-256 but unique index enforces","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:50.189494Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:19:12.854771Z","closed_at":"2026-01-25T03:19:12.854372Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1kh","depends_on_id":"bd-3ng","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1ksf","title":"Wire up hybrid search: FTS5 + vector + RRF ranking","description":"## Problem\nlore search hardcodes lexical-only mode. The full hybrid/vector/RRF backend is ALREADY IMPLEMENTED and tested -- it just needs to be called from the CLI.\n\n## Current State (Verified 2026-02-12)\n\n### Backend: COMPLETE\n- `search_hybrid()` in src/search/hybrid.rs:47 — async fn, handles Lexical/Semantic/Hybrid modes with graceful degradation\n- `search_vector()` in src/search/vector.rs:43 — sqlite-vec KNN with chunk deduplication and adaptive k multiplier\n- `rank_rrf()` in src/search/rrf.rs:13 — reciprocal rank fusion with normalization (7 passing tests)\n- `SearchMode::parse()` — parses hybrid, lexical/fts, semantic/vector\n- `OllamaClient::embed_batch()` in src/embedding/ollama.rs:103 — batch embedding via Ollama /api/embed endpoint\n- All exported from src/search/mod.rs:7-14\n\n### CLI: BROKEN\n- src/cli/commands/search.rs:61 `run_search()` is SYNCHRONOUS (not async)\n- Line 76: `let actual_mode = \"lexical\";` — hardcoded\n- Lines 77-82: warns if user requests vector/hybrid, falls back to lexical\n- Line 161: calls `search_fts()` directly instead of `search_hybrid()`\n- Line 172: calls `rank_rrf(&[], &fts_tuples)` — empty vector list, FTS-only ranking\n- Lines 143-152: manually constructs `SearchFilters` (this code is reusable)\n- Lines 187-223: hydrates + maps to `SearchResultDisplay` (this can be adapted)\n\n### Entry Point\n- src/main.rs:1731 `async fn handle_search()` — IS async, but calls `run_search()` synchronously at line 1758\n- main.rs is 2579 lines total\n\n## Actual Work Required\n\n### Step 1: Make run_search async\nChange `pub fn run_search(...)` to `pub async fn run_search(...)` in search.rs:61.\nUpdate handle_search call site (main.rs:1758) to `.await`.\n\n### Step 2: Create OllamaClient when mode != lexical\nPattern from src/cli/commands/embed.rs — reuse `OllamaConfig` from config:\n```rust\nlet client = if actual_mode != SearchMode::Lexical {\n let ollama_cfg = &config.embedding;\n Some(OllamaClient::new(&ollama_cfg.ollama_url, &ollama_cfg.model))\n} else {\n None\n};\n```\n\n### Step 3: Replace manual FTS+filter+rank with search_hybrid call\nReplace lines 161-172 (search_fts + rank_rrf) with:\n```rust\nlet (hybrid_results, mut hybrid_warnings) = search_hybrid(\n &conn,\n client.as_ref(),\n query,\n actual_mode,\n &filters,\n fts_mode,\n).await?;\nwarnings.append(&mut hybrid_warnings);\n```\n\n### Step 4: Map HybridResult to SearchResultDisplay\nHybridResult (src/search/hybrid.rs:39-45) has these fields:\n```rust\npub struct HybridResult {\n pub document_id: i64,\n pub score: f64, // combined score\n pub vector_rank: Option,\n pub fts_rank: Option,\n pub rrf_score: f64,\n}\n```\nNOTE: HybridResult has NO `snippet` field and NO `normalized_score` field. `score` is the combined score. The `snippet` must still be obtained from the FTS results or from `get_result_snippet()`.\n\nSearchResultDisplay needs: document_id, source_type, title, url, author, etc. (from hydration).\nKeep the existing hydrate_results() call (line 187) and rrf_map construction (lines 189-190), but adapt to use HybridResult instead of RrfResult:\n```rust\n// Map hybrid results for lookup\nlet hybrid_map: HashMap =\n hybrid_results.iter().map(|r| (r.document_id, r)).collect();\n\n// For each hydrated row:\nlet hr = hybrid_map.get(&row.document_id);\nlet explain_data = if explain {\n hr.map(|r| ExplainData {\n vector_rank: r.vector_rank,\n fts_rank: r.fts_rank,\n rrf_score: r.rrf_score,\n })\n} else { None };\n// score: hr.map(|r| r.score).unwrap_or(0.0)\n```\n\nFor snippets: search_hybrid calls search_fts internally, but does NOT return snippets. You need to either:\n(a) Call search_fts separately just for snippets, or\n(b) Modify search_hybrid to also return a snippet_map — preferred if touching hybrid.rs is in scope.\nSimpler approach: keep the existing `search_fts()` call for snippets, use hybrid for ranking. The FTS call is fast (<50ms) and avoids modifying the already-complete hybrid.rs.\n\n### Step 5: Determine actual_mode from config + CLI flag\n```rust\nlet actual_mode = SearchMode::parse(requested_mode).unwrap_or(SearchMode::Hybrid);\n// search_hybrid handles graceful degradation internally\n```\n\n## Signatures for Reference\n\n```rust\n// src/search/hybrid.rs:47\npub async fn search_hybrid(\n conn: &Connection,\n client: Option<&OllamaClient>,\n query: &str,\n mode: SearchMode,\n filters: &SearchFilters,\n fts_mode: FtsQueryMode,\n) -> Result<(Vec, Vec)>\n\n// src/search/hybrid.rs:39\npub struct HybridResult {\n pub document_id: i64,\n pub score: f64,\n pub vector_rank: Option,\n pub fts_rank: Option,\n pub rrf_score: f64,\n}\n\n// src/search/mod.rs exports\npub use hybrid::{HybridResult, SearchMode, search_hybrid};\npub use rrf::{RrfResult, rank_rrf};\npub use vector::{VectorResult, search_vector};\n\n// src/embedding/ollama.rs:103\npub async fn embed_batch(&self, texts: &[&str]) -> Result>>\n```\n\n## TDD Loop\nRED: Add test in src/search/hybrid.rs:\n- test_hybrid_lexical_fallback_no_ollama: search_hybrid with mode=Hybrid, client=None returns FTS results + warning\n- test_hybrid_mode_detection: verify default mode is Hybrid when embeddings exist\n\nGREEN: Wire search.rs to call search_hybrid() as described above\n\nVERIFY:\n```bash\ncargo test search:: && cargo clippy --all-targets -- -D warnings\ncargo run --release -- -J search 'throw time' --mode hybrid --explain | jq '.data.mode'\n# Should return \"hybrid\" (or \"lexical\" with warning if Ollama is down)\n```\n\n## Edge Cases\n- Ollama running but model not found: clear error with suggestion to run `ollama pull nomic-embed-text`\n- No embeddings in DB (never ran lore embed): search_vector returns empty, RRF uses FTS only — search_hybrid handles this gracefully\n- Query embedding returns all zeros: should still return FTS results\n- Very long query string (>1500 bytes): chunk or truncate before embedding (CHUNK_MAX_BYTES=1500)\n- sqlite-vec table missing (old DB without migration 009): graceful error from search_vector\n- OllamaConfig missing from config: check `config.embedding` exists before constructing client\n- Snippet handling: HybridResult has no snippet field — must obtain snippets from a separate search_fts call or from get_result_snippet() with content_text fallback\n\n## Files to Modify\n- src/cli/commands/search.rs — make run_search async, replace manual FTS+RRF with search_hybrid call (~80 lines replaced with ~20)\n- src/main.rs:1758 — add .await to run_search call (already in async context)\n\n## Files NOT to Modify (already complete)\n- src/search/hybrid.rs\n- src/search/vector.rs\n- src/search/rrf.rs\n- src/embedding/ollama.rs","status":"closed","priority":1,"issue_type":"feature","created_at":"2026-02-12T15:45:56.305343Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:49:25.720332Z","closed_at":"2026-02-12T16:49:25.720209Z","compaction_level":0,"original_size":0,"labels":["cli-imp","search"],"dependencies":[{"issue_id":"bd-1ksf","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ksf","depends_on_id":"bd-2l3s","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1l1","title":"[CP0] GitLab API client with rate limiting","description":"## Background\n\nThe GitLab client handles all API communication with rate limiting to avoid 429 errors. Uses native fetch (Node 18+). Rate limiter adds jitter to prevent thundering herd. All errors are typed for clean error handling in CLI commands.\n\nReference: docs/prd/checkpoint-0.md section \"GitLab Client\"\n\n## Approach\n\n**src/gitlab/client.ts:**\n```typescript\nexport class GitLabClient {\n private baseUrl: string;\n private token: string;\n private rateLimiter: RateLimiter;\n\n constructor(options: { baseUrl: string; token: string; requestsPerSecond?: number }) {\n this.baseUrl = options.baseUrl.replace(/\\/$/, '');\n this.token = options.token;\n this.rateLimiter = new RateLimiter(options.requestsPerSecond ?? 10);\n }\n\n async getCurrentUser(): Promise\n async getProject(pathWithNamespace: string): Promise\n private async request(path: string, options?: RequestInit): Promise\n}\n\nclass RateLimiter {\n private lastRequest = 0;\n private minInterval: number;\n\n constructor(requestsPerSecond: number) {\n this.minInterval = 1000 / requestsPerSecond;\n }\n\n async acquire(): Promise {\n // Wait if too soon since last request\n // Add 0-50ms jitter\n }\n}\n```\n\n**src/gitlab/types.ts:**\n```typescript\nexport interface GitLabUser {\n id: number;\n username: string;\n name: string;\n}\n\nexport interface GitLabProject {\n id: number;\n path_with_namespace: string;\n default_branch: string;\n web_url: string;\n created_at: string;\n updated_at: string;\n}\n```\n\n**Integration tests with MSW (Mock Service Worker):**\nSet up MSW handlers that mock GitLab API responses for /api/v4/user and /api/v4/projects/:path\n\n## Acceptance Criteria\n\n- [ ] getCurrentUser() returns GitLabUser with id, username, name\n- [ ] getProject(\"group/project\") URL-encodes path correctly\n- [ ] 401 response throws GitLabAuthError\n- [ ] 404 response throws GitLabNotFoundError\n- [ ] 429 response throws GitLabRateLimitError with retryAfter from header\n- [ ] Network failure throws GitLabNetworkError\n- [ ] Rate limiter enforces minimum interval between requests\n- [ ] Rate limiter adds random jitter (0-50ms)\n- [ ] tests/integration/gitlab-client.test.ts passes (6 tests)\n\n## Files\n\nCREATE:\n- src/gitlab/client.ts\n- src/gitlab/types.ts\n- tests/integration/gitlab-client.test.ts\n- tests/fixtures/mock-responses/gitlab-user.json\n- tests/fixtures/mock-responses/gitlab-project.json\n\n## TDD Loop\n\nRED:\n```typescript\n// tests/integration/gitlab-client.test.ts\ndescribe('GitLab Client', () => {\n it('authenticates with valid PAT')\n it('returns 401 for invalid PAT')\n it('fetches project by path')\n it('handles rate limiting (429) with Retry-After')\n it('respects rate limit (requests per second)')\n it('adds jitter to rate limiting')\n})\n```\n\nGREEN: Implement client.ts and types.ts\n\nVERIFY: `npm run test -- tests/integration/gitlab-client.test.ts`\n\n## Edge Cases\n\n- Path with special characters (spaces, slashes) must be URL-encoded\n- Retry-After header may be missing - default to 60s\n- Network timeout should be handled (use AbortController)\n- Rate limiter jitter prevents multiple clients syncing in lockstep\n- baseUrl trailing slash should be stripped","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:49.842981Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:06:39.520300Z","closed_at":"2026-01-25T03:06:39.520131Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1l1","depends_on_id":"bd-gg1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-1lja","title":"Add --issue, --mr, -p, --preflight-only CLI flags and SyncOptions extensions with validation","description":"## Background\nSurgical sync is invoked via `lore sync --issue 123 --mr 456 -p myproject`. This bead adds the CLI flags to `SyncArgs` (clap struct), extends `SyncOptions` with surgical fields, and wires them together in `handle_sync_cmd` with full validation. This is the user-facing entry point for the entire surgical sync feature.\n\nThe existing `SyncArgs` struct at lines 760-805 of `src/cli/mod.rs` defines all CLI flags for `lore sync`. `SyncOptions` at lines 20-29 of `src/cli/commands/sync.rs` is the runtime options struct passed to `run_sync`. `handle_sync_cmd` at lines 2070-2096 of `src/main.rs` bridges CLI args to SyncOptions and calls `run_sync`.\n\n## Approach\n\n### Step 1: Add flags to SyncArgs (src/cli/mod.rs, struct SyncArgs at line ~760)\n\nAdd after the existing `timings` field:\n\n```rust\n/// Surgically sync specific issues by IID (repeatable, must be positive)\n#[arg(long, value_parser = clap::value_parser!(u64).range(1..), action = clap::ArgAction::Append)]\npub issue: Vec,\n\n/// Surgically sync specific merge requests by IID (repeatable, must be positive)\n#[arg(long, value_parser = clap::value_parser!(u64).range(1..), action = clap::ArgAction::Append)]\npub mr: Vec,\n\n/// Scope to a single project (required when --issue or --mr is used, falls back to config.defaultProject)\n#[arg(short = 'p', long)]\npub project: Option,\n\n/// Validate remote entities exist without any DB content writes. Runs preflight network fetch only.\n#[arg(long, default_value_t = false)]\npub preflight_only: bool,\n```\n\n**Why u64 with range(1..)**: IIDs are always positive. Parse-time validation gives immediate, clear error messages from clap.\n\n### Step 2: Extend SyncOptions (src/cli/commands/sync.rs, struct SyncOptions at line ~20)\n\nAdd fields:\n\n```rust\npub issue_iids: Vec,\npub mr_iids: Vec,\npub project: Option,\npub preflight_only: bool,\n```\n\nAdd helper:\n\n```rust\nimpl SyncOptions {\n pub const MAX_SURGICAL_TARGETS: usize = 100;\n\n pub fn is_surgical(&self) -> bool {\n !self.issue_iids.is_empty() || !self.mr_iids.is_empty()\n }\n}\n```\n\n### Step 3: Wire in handle_sync_cmd (src/main.rs, function handle_sync_cmd at line ~2070)\n\nAfter existing SyncOptions construction (~line 2088):\n\n1. **Dedup IIDs** before constructing options:\n```rust\nlet mut issue_iids = args.issue;\nlet mut mr_iids = args.mr;\nissue_iids.sort_unstable();\nissue_iids.dedup();\nmr_iids.sort_unstable();\nmr_iids.dedup();\n```\n\n2. **Add new fields** to the SyncOptions construction.\n\n3. **Validation** (after options creation, before calling run_sync):\n- Hard cap: `issue_iids.len() + mr_iids.len() > MAX_SURGICAL_TARGETS` → error with count\n- Project required: if `is_surgical()`, use `config.effective_project(options.project.as_deref())`. If None → error saying `-p` or `defaultProject` is required\n- Incompatible flags: `--full` + surgical → error\n- Embed leakage guard: `--no-docs` without `--no-embed` in surgical mode → error (stale embeddings for regenerated docs)\n- `--preflight-only` requires surgical mode → error if not `is_surgical()`\n\n## Acceptance Criteria\n- [ ] `lore sync --issue 123` parses correctly (issue_iids = [123])\n- [ ] `lore sync --issue 123 --issue 456` produces deduplicated sorted vec\n- [ ] `lore sync --mr 789` parses correctly\n- [ ] `lore sync --issue 0` rejected at parse time by clap (range 1..)\n- [ ] `lore sync --issue -1` rejected at parse time by clap (u64 parse failure)\n- [ ] `lore sync -p myproject --issue 1` sets project = Some(\"myproject\")\n- [ ] `lore sync --preflight-only --issue 1 -p proj` sets preflight_only = true\n- [ ] `SyncOptions::is_surgical()` returns true when issue_iids or mr_iids is non-empty\n- [ ] `SyncOptions::is_surgical()` returns false when both vecs are empty\n- [ ] `SyncOptions::MAX_SURGICAL_TARGETS` is 100\n- [ ] Validation: `--issue 1` without `-p` and no defaultProject → error mentioning `-p`\n- [ ] Validation: `--issue 1` without `-p` but with defaultProject in config → uses defaultProject (no error)\n- [ ] Validation: `--full --issue 1 -p proj` → incompatibility error\n- [ ] Validation: `--no-docs --issue 1 -p proj` (without --no-embed) → embed leakage error\n- [ ] Validation: `--no-docs --no-embed --issue 1 -p proj` → accepted\n- [ ] Validation: `--preflight-only` without --issue/--mr → error\n- [ ] Validation: >100 combined targets → hard cap error\n- [ ] Normal `lore sync` (without --issue/--mr) still works identically\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n- MODIFY: src/cli/mod.rs (add fields to SyncArgs, ~line 805)\n- MODIFY: src/cli/commands/sync.rs (extend SyncOptions + is_surgical + MAX_SURGICAL_TARGETS)\n- MODIFY: src/main.rs (wire fields + validation in handle_sync_cmd)\n\n## TDD Anchor\nRED: Write tests in `src/cli/commands/sync.rs` (in a `#[cfg(test)] mod tests` block):\n\n```rust\n#[cfg(test)]\nmod tests {\n use super::*;\n\n fn default_options() -> SyncOptions {\n SyncOptions {\n full: false,\n no_status: false,\n no_docs: false,\n no_embed: false,\n timings: false,\n issue_iids: vec![],\n mr_iids: vec![],\n project: None,\n preflight_only: false,\n }\n }\n\n #[test]\n fn is_surgical_with_issues() {\n let opts = SyncOptions { issue_iids: vec![1], ..default_options() };\n assert!(opts.is_surgical());\n }\n\n #[test]\n fn is_surgical_with_mrs() {\n let opts = SyncOptions { mr_iids: vec![10], ..default_options() };\n assert!(opts.is_surgical());\n }\n\n #[test]\n fn is_surgical_empty() {\n let opts = default_options();\n assert!(!opts.is_surgical());\n }\n\n #[test]\n fn max_surgical_targets_is_100() {\n assert_eq!(SyncOptions::MAX_SURGICAL_TARGETS, 100);\n }\n}\n```\n\nGREEN: Add the fields and `is_surgical()` method.\nVERIFY: `cargo test is_surgical && cargo test max_surgical_targets`\n\nAdditional validation tests (in integration or as unit tests on a `validate_surgical_options` helper if extracted):\n- `preflight_only_requires_surgical` — SyncOptions with preflight_only=true, empty iids → error\n- `surgical_no_docs_requires_no_embed` — SyncOptions with no_docs=true, no_embed=false, is_surgical=true → error\n- `surgical_incompatible_with_full` — SyncOptions with full=true, is_surgical=true → error\n\n## Edge Cases\n- Clap `ArgAction::Append` allows `--issue 1 --issue 2` but NOT `--issue 1,2` (no value_delimiter). This is intentional — comma-separated values are ambiguous and error-prone.\n- Duplicate IIDs like `--issue 123 --issue 123` are handled by dedup in handle_sync_cmd, not rejected.\n- The `effective_project` method on Config (line 309 of config.rs) already handles the `-p` / defaultProject fallback: `cli_project.or(self.default_project.as_deref())`.\n- The `-p` short flag does not conflict with any existing SyncArgs flags.\n\n## Dependency Context\nThis is a leaf dependency with no upstream blockers. Can be done in parallel with bd-1sc6, bd-159p, bd-tiux. Downstream bead bd-1i4i (orchestrator) reads these fields to dispatch surgical vs standard sync.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:12:43.921399Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:02:47.520632Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-1lja","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-1m8","title":"Extend 'lore stats --check' for event table integrity and queue health","description":"## Background\nThe existing stats --check command validates data integrity. Need to extend it for event tables (referential integrity) and dependent job queue health (stuck locks, retryable jobs). This provides operators and agents a way to detect data quality issues after sync.\n\n## Approach\nExtend src/cli/commands/stats.rs check mode:\n\n**New checks:**\n\n1. Event FK integrity:\n```sql\n-- Orphaned state events (issue_id points to non-existent issue)\nSELECT COUNT(*) FROM resource_state_events rse\nWHERE rse.issue_id IS NOT NULL\n AND NOT EXISTS (SELECT 1 FROM issues i WHERE i.id = rse.issue_id);\n-- (repeat for merge_request_id, and for label + milestone event tables)\n```\n\n2. Queue health:\n```sql\n-- Pending jobs by type\nSELECT job_type, COUNT(*) FROM pending_dependent_fetches GROUP BY job_type;\n-- Stuck locks (locked_at older than 5 minutes)\nSELECT COUNT(*) FROM pending_dependent_fetches WHERE locked_at IS NOT NULL AND locked_at < ?;\n-- Retryable jobs (attempts > 0, not locked)\nSELECT COUNT(*) FROM pending_dependent_fetches WHERE attempts > 0 AND locked_at IS NULL;\n-- Max attempts (jobs that may be permanently failing)\nSELECT job_type, MAX(attempts) FROM pending_dependent_fetches GROUP BY job_type;\n```\n\n3. Human output per check: PASS / WARN / FAIL with counts\n```\nEvent FK integrity: PASS (0 orphaned events)\nQueue health: WARN (3 stuck locks, 12 retryable jobs)\n```\n\n4. Robot JSON: structured health report\n```json\n{\n \"event_integrity\": {\n \"status\": \"pass\",\n \"orphaned_state_events\": 0,\n \"orphaned_label_events\": 0,\n \"orphaned_milestone_events\": 0\n },\n \"queue_health\": {\n \"status\": \"warn\",\n \"pending_by_type\": {\"resource_events\": 5, \"mr_closes_issues\": 2},\n \"stuck_locks\": 3,\n \"retryable_jobs\": 12,\n \"max_attempts_by_type\": {\"resource_events\": 5}\n }\n}\n```\n\n## Acceptance Criteria\n- [ ] Detects orphaned events (FK target missing)\n- [ ] Detects stuck locks (locked_at older than threshold)\n- [ ] Reports retryable job count and max attempts\n- [ ] Human output shows PASS/WARN/FAIL per check\n- [ ] Robot JSON matches structured schema\n- [ ] Graceful when event/queue tables don't exist\n\n## Files\n- src/cli/commands/stats.rs (extend check mode)\n\n## TDD Loop\nRED: tests/stats_check_tests.rs:\n- `test_stats_check_events_pass` - clean data, verify PASS\n- `test_stats_check_events_orphaned` - delete an issue with events remaining, verify FAIL count\n- `test_stats_check_queue_stuck_locks` - set old locked_at, verify WARN\n- `test_stats_check_queue_retryable` - fail some jobs, verify retryable count\n\nGREEN: Add the check queries and formatting\n\nVERIFY: `cargo test stats_check -- --nocapture`\n\n## Edge Cases\n- FK with CASCADE should prevent orphaned events in normal operation — but manual DB edits or bugs could cause them\n- Tables may not exist if migration 011 not applied — check table existence before querying\n- Empty queue is PASS (not WARN for \"no jobs found\")\n- Distinguish between \"0 stuck locks\" (good) and \"queue table doesn't exist\" (skip check)","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-02T21:31:57.422916Z","created_by":"tayloreernisse","updated_at":"2026-02-03T16:23:13.409909Z","closed_at":"2026-02-03T16:23:13.409717Z","close_reason":"Extended IntegrityResult with orphan_state/label/milestone_events and queue_stuck_locks/queue_max_attempts. Added FK integrity queries for all 3 event tables and queue health checks. Updated human output with PASS/WARN/FAIL indicators and robot JSON.","compaction_level":0,"original_size":0,"labels":["cli","gate-1","phase-b"],"dependencies":[{"issue_id":"bd-1m8","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1m8","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1m8","depends_on_id":"bd-tir","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1mf","title":"[CP1] gi sync-status enhancement","description":"Enhance sync-status from CP0 stub to show issue cursors.\n\nOutput:\n- Last run timestamp and duration\n- Cursor positions per project (issues resource_type)\n- Entity counts (issues, discussions, notes)\n\nFiles: src/cli/commands/sync-status.ts (update existing)\nDone when: Shows cursor positions and counts after ingestion","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T15:20:36.449088Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.157235Z","closed_at":"2026-01-25T15:21:35.157235Z","deleted_at":"2026-01-25T15:21:35.157232Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-1mju","title":"Vertical slice integration test + SLO verification","description":"## Background\nThe vertical slice gate validates that core screens work together end-to-end with real data flows and meet performance SLOs. This is a manual + automated verification pass.\n\n## Approach\nCreate integration tests in crates/lore-tui/tests/:\n- test_full_nav_flow: Dashboard -> press i -> IssueList loads -> press Enter -> IssueDetail loads -> press Esc -> back to IssueList with cursor preserved -> press H -> Dashboard\n- test_filter_requery: IssueList -> type filter -> verify re-query triggers and results update\n- test_stale_result_guard: rapidly navigate between screens, verify no stale data displayed\n- Performance benchmarks: run M-tier fixture, measure p95 nav latency, assert < 75ms\n- Stuck-input check: fuzz InputMode transitions, assert always recoverable via Esc or Ctrl+C\n- Cancel latency: start sync, cancel, measure time to acknowledgment, assert < 2s\n\n## Acceptance Criteria\n- [ ] Full nav flow test passes without panic\n- [ ] Filter re-query test shows updated results\n- [ ] No stale data displayed during rapid navigation\n- [ ] p95 nav latency < 75ms on M-tier fixtures\n- [ ] Zero stuck-input states across 1000 random key sequences\n- [ ] Sync cancel acknowledged p95 < 2s\n- [ ] All state preserved correctly on back-navigation\n\n## Files\n- CREATE: crates/lore-tui/tests/vertical_slice.rs\n\n## TDD Anchor\nRED: Write test_dashboard_to_issue_detail_roundtrip that navigates Dashboard -> IssueList -> IssueDetail -> Esc -> IssueList, asserts cursor position preserved.\nGREEN: Ensure all navigation and state preservation is wired up.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml vertical_slice\n\n## Edge Cases\n- Tests need FakeClock and synthetic DB fixtures (not real GitLab)\n- ftui test harness required for rendering tests without TTY\n- Performance benchmarks may vary by machine — use relative thresholds\n\n## Dependency Context\nRequires all Phase 2 screens: Dashboard, Issue List, Issue Detail, MR List, MR Detail.\nRequires NavigationStack, TaskSupervisor, DbManager from Phase 1.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:00:18.310264Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:33.796953Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1mju","depends_on_id":"bd-3pxe","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1mju","depends_on_id":"bd-3t1b","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1mju","depends_on_id":"bd-3ty8","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1mju","depends_on_id":"bd-8ab7","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -56,12 +61,13 @@ {"id":"bd-1n5q","title":"lore brief: situational awareness for topic/module/person","description":"## Background\nComposable capstone command. An agent says \"I am about to work on auth\" and gets everything in one call: open issues, active MRs, experts, recent activity, unresolved threads, related context. Replaces 5 separate lore calls with 1.\n\n## Input Modes\n1. Topic: `lore brief 'authentication'` — FTS search to find relevant entities, aggregate\n2. Path: `lore brief --path src/auth/` — who expert internals for path expertise\n3. Person: `lore brief --person teernisse` — who workload internals\n4. Entity: `lore brief issues 3864` — single entity focus with cross-references\n\n## Section Assembly Architecture\n\n### Reuse existing run_* functions (ship faster, recommended for v1)\nEach section calls existing CLI command functions and converts their output.\n\nIMPORTANT: All existing run_* functions take `&Config`, NOT `&Connection`. The Config contains the db_path and each function opens its own connection internally.\n\n```rust\n// In src/cli/commands/brief.rs\n\nuse crate::cli::commands::list::{run_list_issues, run_list_mrs, ListFilters, MrListFilters};\nuse crate::cli::commands::who::{run_who, WhoArgs, WhoMode};\nuse crate::core::config::Config;\n\npub async fn run_brief(config: &Config, args: BriefArgs) -> Result {\n let mut sections_computed = Vec::new();\n\n // 1. open_issues: reuse list.rs\n // Signature: pub fn run_list_issues(config: &Config, filters: ListFilters) -> Result\n // Located at src/cli/commands/list.rs:268\n let open_issues = run_list_issues(config, ListFilters {\n state: Some(\"opened\".into()),\n limit: Some(5),\n project: args.project.clone(),\n // ... scope by topic/path/person based on mode\n ..Default::default()\n })?;\n sections_computed.push(\"open_issues\");\n\n // 2. active_mrs: reuse list.rs\n // Signature: pub fn run_list_mrs(config: &Config, filters: MrListFilters) -> Result\n // Located at src/cli/commands/list.rs:476\n let active_mrs = run_list_mrs(config, MrListFilters {\n state: Some(\"opened\".into()),\n limit: Some(5),\n project: args.project.clone(),\n ..Default::default()\n })?;\n sections_computed.push(\"active_mrs\");\n\n // 3. experts: reuse who.rs\n // Signature: pub fn run_who(config: &Config, args: &WhoArgs) -> Result\n // Located at src/cli/commands/who.rs:276\n let experts = run_who(config, &WhoArgs {\n mode: WhoMode::Expert,\n path: args.path.clone(),\n limit: Some(3),\n ..Default::default()\n })?;\n sections_computed.push(\"experts\");\n\n // 4. recent_activity: reuse timeline internals\n // The timeline pipeline is 5-stage (SEED->HYDRATE->EXPAND->COLLECT->RENDER)\n // Types in src/core/timeline.rs, seed in src/core/timeline_seed.rs\n // ...etc\n}\n```\n\nNOTE: ListFilters and MrListFilters may not implement Default. Check before using `..Default::default()`. If they don't, derive it or construct all fields explicitly.\n\n### Concrete Function References (src/cli/commands/)\n| Module | Function | Signature | Line |\n|--------|----------|-----------|------|\n| list.rs | run_list_issues | `(config: &Config, filters: ListFilters) -> Result` | 268 |\n| list.rs | run_list_mrs | `(config: &Config, filters: MrListFilters) -> Result` | 476 |\n| who.rs | run_who | `(config: &Config, args: &WhoArgs) -> Result` | 276 |\n| search.rs | run_search | `(config: &Config, query: &str, cli_filters: SearchCliFilters, fts_mode: FtsQueryMode, requested_mode: &str, explain: bool) -> Result` | 61 |\n\nNOTE: run_search is currently synchronous (pub fn, not pub async fn). If bd-1ksf ships first, it becomes async. Brief should handle both cases — call `.await` if async, direct call if sync.\n\n### Section Details\n| Section | Source | Limit | Fallback |\n|---------|--------|-------|----------|\n| open_issues | list.rs with state=opened | 5 | empty array |\n| active_mrs | list.rs with state=opened | 5 | empty array |\n| experts | who.rs Expert mode | 3 | empty array (no path data) |\n| recent_activity | timeline pipeline | 10 events | empty array |\n| unresolved_threads | SQL: discussions WHERE resolved=false | 5 | empty array |\n| related | search_vector() via bd-8con | 5 | omit section (no embeddings) |\n| warnings | computed from dates/state | all | empty array |\n\n### Warning Generation\n```rust\nfn compute_warnings(issues: &[IssueRow]) -> Vec {\n let now = chrono::Utc::now();\n issues.iter().filter_map(|i| {\n let updated = parse_timestamp(i.updated_at)?;\n let days_stale = (now - updated).num_days();\n if days_stale > 30 {\n Some(format!(\"Issue #{} has no activity for {} days\", i.iid, days_stale))\n } else { None }\n }).chain(\n issues.iter().filter(|i| i.assignees.is_empty())\n .map(|i| format!(\"Issue #{} is unassigned\", i.iid))\n ).collect()\n}\n```\n\n## Robot Mode Output Schema\n```json\n{\n \"ok\": true,\n \"data\": {\n \"mode\": \"topic\",\n \"query\": \"authentication\",\n \"summary\": \"3 open issues, 2 active MRs, top expert: teernisse\",\n \"open_issues\": [{ \"iid\": 123, \"title\": \"...\", \"state\": \"opened\", \"assignees\": [...], \"updated_at\": \"...\", \"labels\": [...] }],\n \"active_mrs\": [{ \"iid\": 456, \"title\": \"...\", \"state\": \"opened\", \"author\": \"...\", \"draft\": false, \"updated_at\": \"...\" }],\n \"experts\": [{ \"username\": \"teernisse\", \"score\": 42, \"last_activity\": \"...\" }],\n \"recent_activity\": [{ \"timestamp\": \"...\", \"event_type\": \"state_change\", \"entity_ref\": \"issues#123\", \"summary\": \"...\", \"actor\": \"...\" }],\n \"unresolved_threads\": [{ \"discussion_id\": \"abc\", \"entity_ref\": \"issues#123\", \"started_by\": \"...\", \"note_count\": 5, \"last_note_at\": \"...\" }],\n \"related\": [{ \"iid\": 789, \"title\": \"...\", \"similarity_score\": 0.85 }],\n \"warnings\": [\"Issue #3800 has no activity for 45 days\"]\n },\n \"meta\": { \"elapsed_ms\": 1200, \"sections_computed\": [\"open_issues\", \"active_mrs\", \"experts\", \"recent_activity\"] }\n}\n```\n\n## Clap Registration\n```rust\n// In src/main.rs Commands enum, add:\nBrief {\n /// Free-text topic, entity type, or omit for project-wide brief\n query: Option,\n /// Focus on a file path (who expert mode)\n #[arg(long)]\n path: Option,\n /// Focus on a person (who workload mode)\n #[arg(long)]\n person: Option,\n /// Scope to project (fuzzy match)\n #[arg(short, long)]\n project: Option,\n /// Maximum items per section\n #[arg(long, default_value = \"5\")]\n section_limit: usize,\n},\n```\n\n## TDD Loop\nRED: Tests in src/cli/commands/brief.rs:\n- test_brief_topic_returns_all_sections: insert test data, search 'auth', assert all section keys present in response\n- test_brief_path_uses_who_expert: brief --path src/auth/, assert experts section populated\n- test_brief_person_uses_who_workload: brief --person user, assert open_issues filtered to user's assignments\n- test_brief_warnings_stale_issue: insert issue with updated_at > 30 days ago, assert warning generated\n- test_brief_token_budget: robot mode output for topic query is under 12000 bytes (~3000 tokens)\n- test_brief_no_embeddings_graceful: related section omitted (not errored) when no embeddings exist\n- test_brief_empty_topic: zero matches returns valid JSON with empty arrays + \"No data found\" summary\n\nGREEN: Implement brief with section assembly, calling existing run_* functions\n\nVERIFY:\n```bash\ncargo test brief:: && cargo clippy --all-targets -- -D warnings\ncargo run --release -- -J brief 'throw time' | jq '.data | keys'\ncargo run --release -- -J brief 'throw time' | wc -c # target <12000\n```\n\n## Acceptance Criteria\n- [ ] lore brief TOPIC returns all sections for free-text topic\n- [ ] lore brief --path PATH returns path-focused briefing with experts\n- [ ] lore brief --person USERNAME returns person-focused briefing\n- [ ] lore brief issues N returns entity-focused briefing\n- [ ] Robot mode output under 12000 bytes (~3000 tokens)\n- [ ] Each section degrades gracefully if its data source is unavailable\n- [ ] summary field is auto-generated one-liner from section counts\n- [ ] warnings detect: stale issues (>30d), unassigned, no due date\n- [ ] Performance: <2s total (acceptable since composing multiple queries)\n- [ ] Command registered in main.rs and robot-docs\n\n## Edge Cases\n- Topic with zero matches: return empty sections + \"No data found for this topic\" summary\n- Path that nobody has touched: experts empty, related may still have results\n- Person not found in DB: exit code 17 with suggestion\n- All sections empty: still return valid JSON with empty arrays\n- Very broad topic (\"the\"): may return too many results — each section respects its limit cap\n- ListFilters/MrListFilters may not derive Default — construct all fields explicitly if needed\n\n## Dependencies\n- Hybrid search (bd-1ksf) for topic relevance ranking\n- lore who (already shipped) for expertise\n- lore related (bd-8con) for semantic connections (BLOCKER — related section is core to the feature)\n- Timeline pipeline (already shipped) for recent activity\n\n## Dependency Context\n- **bd-1ksf (hybrid search)**: Provides `search_hybrid()` which brief uses for topic mode to find relevant entities. Without it, topic mode falls back to FTS-only via `search_fts()`.\n- **bd-8con (related)**: Provides `run_related()` which brief calls to populate the `related` section with semantically similar entities. This is a blocking dependency — the related section is a core differentiator.\n\n## Files to Create/Modify\n- NEW: src/cli/commands/brief.rs\n- src/cli/commands/mod.rs (add pub mod brief; re-export)\n- src/main.rs (register Brief subcommand in Commands enum, add handle_brief fn)\n- Reuse: list.rs, who.rs, timeline.rs, search.rs, show.rs internals","status":"open","priority":2,"issue_type":"feature","created_at":"2026-02-12T15:47:22.893231Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:31:33.752020Z","compaction_level":0,"original_size":0,"labels":["cli-imp","intelligence"],"dependencies":[{"issue_id":"bd-1n5q","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1n5q","depends_on_id":"bd-1ksf","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1n5q","depends_on_id":"bd-8con","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1nf","title":"Register 'lore timeline' command with all flags","description":"## Background\n\nThis bead wires the `lore timeline` command into the CLI — adding the subcommand to the Commands enum, defining all flags, registering in VALID_COMMANDS, and dispatching to the timeline handler. The actual query logic and rendering are in separate beads.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 3.1 (Command Design).\n\n## Codebase Context\n\n- Commands enum in `src/cli/mod.rs` (line ~86): uses #[derive(Subcommand)] with nested Args structs\n- VALID_COMMANDS in `src/main.rs` (line ~448): &[&str] array for fuzzy command matching\n- Handler dispatch in `src/main.rs` match on Commands:: variants\n- robot-docs manifest in `src/main.rs`: registers commands for `lore robot-docs` output\n- Existing pattern: `Sync(SyncArgs)`, `Search(SearchArgs)`, etc.\n- No timeline module exists yet — this bead creates the CLI entry point only\n\n## Approach\n\n### 1. TimelineArgs struct (`src/cli/mod.rs`):\n\n```rust\n/// Show a chronological timeline of events matching a query\n#[derive(Parser, Debug)]\npub struct TimelineArgs {\n /// Search query (keywords to find in issues, MRs, and discussions)\n pub query: String,\n\n /// Scope to a specific project (fuzzy match)\n #[arg(short = 'p', long)]\n pub project: Option,\n\n /// Only show events after this date (e.g. \"6m\", \"2w\", \"2024-01-01\")\n #[arg(long)]\n pub since: Option,\n\n /// Cross-reference expansion depth (0 = no expansion)\n #[arg(long, default_value = \"1\")]\n pub depth: usize,\n\n /// Also follow 'mentioned' edges during expansion (high fan-out)\n #[arg(long = \"expand-mentions\")]\n pub expand_mentions: bool,\n\n /// Maximum number of events to display\n #[arg(short = 'n', long = \"limit\", default_value = \"100\")]\n pub limit: usize,\n}\n```\n\n### 2. Commands enum variant:\n\n```rust\n/// Show a chronological timeline of events matching a query\n#[command(name = \"timeline\")]\nTimeline(TimelineArgs),\n```\n\n### 3. Handler in `src/main.rs`:\n\n```rust\nCommands::Timeline(args) => {\n // Placeholder: will be filled by bd-2f2 (human) and bd-dty (robot)\n // For now: resolve project, call timeline query, dispatch to renderer\n}\n```\n\n### 4. VALID_COMMANDS: add `\"timeline\"` to the array\n\n### 5. robot-docs: add timeline command description to manifest\n\n## Acceptance Criteria\n\n- [ ] `TimelineArgs` struct with all 6 flags: query, project, since, depth, expand-mentions, limit\n- [ ] Commands::Timeline variant registered in Commands enum\n- [ ] Handler stub in src/main.rs dispatches to timeline logic\n- [ ] `\"timeline\"` added to VALID_COMMANDS array\n- [ ] robot-docs manifest includes timeline command description\n- [ ] `lore timeline --help` shows correct help text\n- [ ] `lore timeline` without query shows error (query is required positional)\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/cli/mod.rs` (TimelineArgs struct + Commands::Timeline variant)\n- `src/main.rs` (handler dispatch + VALID_COMMANDS + robot-docs entry)\n\n## TDD Loop\n\nNo unit tests for CLI wiring. Verify with:\n\n```bash\ncargo check --all-targets\ncargo run -- timeline --help\n```\n\n## Edge Cases\n\n- --since parsing: reuse existing date parsing from ListFilters (src/cli/mod.rs handles \"7d\", \"2w\", \"YYYY-MM-DD\")\n- --depth 0: valid, means no cross-reference expansion\n- --expand-mentions: off by default because mentioned edges have high fan-out\n","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:33:28.422082Z","created_by":"tayloreernisse","updated_at":"2026-02-06T13:49:15.313047Z","closed_at":"2026-02-06T13:49:15.312993Z","close_reason":"Wired lore timeline command: TimelineArgs with 9 flags, Commands::Timeline variant, handle_timeline handler, VALID_COMMANDS entry, robot-docs manifest with temporal_intelligence workflow","compaction_level":0,"original_size":0,"labels":["cli","gate-3","phase-b"],"dependencies":[{"issue_id":"bd-1nf","depends_on_id":"bd-2f2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1nf","depends_on_id":"bd-dty","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1nf","depends_on_id":"bd-ike","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1np","title":"[CP1] GitLab types for issues, discussions, notes","description":"## Background\n\nGitLab types define the Rust structs for deserializing GitLab API responses. These types are the foundation for all ingestion work - issues, discussions, and notes must be correctly typed for serde to parse them.\n\n## Approach\n\nAdd types to `src/gitlab/types.rs` with serde derives:\n\n### GitLabIssue\n\n```rust\n#[derive(Debug, Clone, Deserialize)]\npub struct GitLabIssue {\n pub id: i64, // GitLab global ID\n pub iid: i64, // Project-scoped issue number\n pub project_id: i64,\n pub title: String,\n pub description: Option,\n pub state: String, // \"opened\" | \"closed\"\n pub created_at: String, // ISO 8601\n pub updated_at: String, // ISO 8601\n pub closed_at: Option,\n pub author: GitLabAuthor,\n pub labels: Vec, // Array of label names (CP1 canonical)\n pub web_url: String,\n}\n```\n\nNOTE: `labels_details` intentionally NOT modeled - varies across GitLab versions.\n\n### GitLabAuthor\n\n```rust\n#[derive(Debug, Clone, Deserialize)]\npub struct GitLabAuthor {\n pub id: i64,\n pub username: String,\n pub name: String,\n}\n```\n\n### GitLabDiscussion\n\n```rust\n#[derive(Debug, Clone, Deserialize)]\npub struct GitLabDiscussion {\n pub id: String, // String ID like \"6a9c1750b37d...\"\n pub individual_note: bool, // true = standalone comment\n pub notes: Vec,\n}\n```\n\n### GitLabNote\n\n```rust\n#[derive(Debug, Clone, Deserialize)]\npub struct GitLabNote {\n pub id: i64,\n #[serde(rename = \"type\")]\n pub note_type: Option, // \"DiscussionNote\" | \"DiffNote\" | null\n pub body: String,\n pub author: GitLabAuthor,\n pub created_at: String, // ISO 8601\n pub updated_at: String, // ISO 8601\n pub system: bool, // true for system-generated notes\n #[serde(default)]\n pub resolvable: bool,\n #[serde(default)]\n pub resolved: bool,\n pub resolved_by: Option,\n pub resolved_at: Option,\n pub position: Option,\n}\n```\n\n### GitLabNotePosition\n\n```rust\n#[derive(Debug, Clone, Deserialize)]\npub struct GitLabNotePosition {\n pub old_path: Option,\n pub new_path: Option,\n pub old_line: Option,\n pub new_line: Option,\n}\n```\n\n## Acceptance Criteria\n\n- [ ] GitLabIssue deserializes from API response JSON\n- [ ] GitLabAuthor embedded correctly in issue and note\n- [ ] GitLabDiscussion with notes array deserializes\n- [ ] GitLabNote handles null note_type (use Option)\n- [ ] GitLabNote uses #[serde(rename = \"type\")] for reserved keyword\n- [ ] resolvable/resolved default to false via #[serde(default)]\n- [ ] All timestamp fields are String (ISO 8601 parsed elsewhere)\n\n## Files\n\n- src/gitlab/types.rs (edit - add types)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/gitlab_types_tests.rs\n#[test] fn deserializes_gitlab_issue_from_json()\n#[test] fn deserializes_gitlab_discussion_from_json()\n#[test] fn handles_null_note_type()\n#[test] fn handles_missing_resolvable_field()\n#[test] fn deserializes_labels_as_string_array()\n```\n\nGREEN: Add type definitions with serde attributes\n\nVERIFY: `cargo test gitlab_types`\n\n## Edge Cases\n\n- note_type can be null, \"DiscussionNote\", or \"DiffNote\"\n- labels array can be empty\n- description can be null\n- resolved_by/resolved_at can be null\n- position is only present for DiffNotes","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.150472Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:17:08.842965Z","closed_at":"2026-01-25T22:17:08.842895Z","close_reason":"Implemented GitLabAuthor, GitLabIssue, GitLabDiscussion, GitLabNote, GitLabNotePosition types with 10 passing tests","compaction_level":0,"original_size":0} +{"id":"bd-1nsl","title":"Epic: Surgical Per-IID Sync","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-17T19:11:34.020453Z","created_by":"tayloreernisse","updated_at":"2026-02-17T19:11:34.023031Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"]} {"id":"bd-1o1","title":"OBSERV: Add -v/--verbose and --log-format CLI flags","description":"## Background\nUsers and agents need CLI-controlled verbosity without knowing RUST_LOG syntax. The -v flag convention (cargo, curl, ssh) is universally understood. --log-format json enables lore sync 2>&1 | jq workflows without reading log files.\n\n## Approach\nAdd two new global flags to the Cli struct in src/cli/mod.rs (insert after the quiet field at line ~37):\n\n```rust\n/// Increase log verbosity (-v, -vv, -vvv)\n#[arg(short = 'v', long = \"verbose\", action = clap::ArgAction::Count, global = true)]\npub verbose: u8,\n\n/// Log format for stderr output: text (default) or json\n#[arg(long = \"log-format\", global = true, value_parser = [\"text\", \"json\"], default_value = \"text\")]\npub log_format: String,\n```\n\nThe existing Cli struct (src/cli/mod.rs:13-42) has these global flags: config, robot, json, color, quiet. The new flags follow the same pattern.\n\nNote: clap::ArgAction::Count allows -v, -vv, -vvv as a single flag with increasing count (0, 1, 2, 3).\n\n## Acceptance Criteria\n- [ ] lore -v sync parses without error (verbose=1)\n- [ ] lore -vv sync parses (verbose=2)\n- [ ] lore -vvv sync parses (verbose=3)\n- [ ] lore --log-format json sync parses (log_format=\"json\")\n- [ ] lore --log-format text sync parses (default)\n- [ ] lore --log-format xml sync errors (invalid value)\n- [ ] Existing commands unaffected (verbose defaults to 0, log_format to \"text\")\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/cli/mod.rs (modify Cli struct, lines 13-42)\n\n## TDD Loop\nRED: Write test that parses Cli with -v flag and asserts verbose=1\nGREEN: Add the two fields to Cli struct\nVERIFY: cargo test -p lore && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- -v and -q together: both parse fine; conflict resolution happens in subscriber setup (bd-2rr), not here\n- -v flag must be global=true so it works before and after subcommands: lore -v sync AND lore sync -v\n- --log-format is a string, not enum, to keep Cli struct simple","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:53:55.421339Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:10:22.585947Z","closed_at":"2026-02-04T17:10:22.585905Z","close_reason":"Added -v/--verbose (count) and --log-format (text|json) global CLI flags","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-1o1","depends_on_id":"bd-2nx","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1o4h","title":"OBSERV: Define StageTiming struct in src/core/metrics.rs","description":"## Background\nStageTiming is the materialized view of span timing data. It's the data structure that flows through robot JSON output, sync_runs.metrics_json, and the human-readable timing summary. Defined in a new file because it's genuinely new functionality that doesn't fit existing modules.\n\n## Approach\nCreate src/core/metrics.rs:\n\n```rust\nuse serde::Serialize;\n\nfn is_zero(v: &usize) -> bool { *v == 0 }\n\n#[derive(Debug, Clone, Serialize)]\npub struct StageTiming {\n pub name: String,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub project: Option,\n pub elapsed_ms: u64,\n pub items_processed: usize,\n #[serde(skip_serializing_if = \"is_zero\")]\n pub items_skipped: usize,\n #[serde(skip_serializing_if = \"is_zero\")]\n pub errors: usize,\n #[serde(skip_serializing_if = \"Vec::is_empty\")]\n pub sub_stages: Vec,\n}\n```\n\nRegister module in src/core/mod.rs (line ~11, add):\n```rust\npub mod metrics;\n```\n\nThe is_zero helper is a private function used by serde's skip_serializing_if. It must take &usize (reference) and return bool.\n\n## Acceptance Criteria\n- [ ] StageTiming serializes to JSON matching PRD Section 4.6.2 example\n- [ ] items_skipped omitted when 0\n- [ ] errors omitted when 0\n- [ ] sub_stages omitted when empty vec\n- [ ] project omitted when None\n- [ ] name, elapsed_ms, items_processed always present\n- [ ] Struct is Debug + Clone + Serialize\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/core/metrics.rs (new file)\n- src/core/mod.rs (register module, add line after existing pub mod declarations)\n\n## TDD Loop\nRED:\n - test_stage_timing_serialization: create StageTiming with sub_stages, serialize, assert JSON structure\n - test_stage_timing_zero_fields_omitted: errors=0, items_skipped=0, assert no \"errors\" or \"items_skipped\" keys\n - test_stage_timing_empty_sub_stages: sub_stages=vec![], assert no \"sub_stages\" key\nGREEN: Create metrics.rs with StageTiming struct and is_zero helper\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- is_zero must be a function, not a closure (serde skip_serializing_if requires a function path)\n- Vec::is_empty is a method on Vec, and serde accepts \"Vec::is_empty\" as a path for skip_serializing_if\n- Recursive StageTiming (sub_stages contains StageTiming): serde handles this naturally, no special handling needed","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:54:31.907234Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:21:40.915842Z","closed_at":"2026-02-04T17:21:40.915794Z","close_reason":"Created src/core/metrics.rs with StageTiming struct, serde skip_serializing_if for zero/empty fields, 5 tests","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-1o4h","depends_on_id":"bd-3er","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1oi7","title":"NOTE-2A: Schema migration for note documents (migration 024)","description":"## Background\nThe documents and dirty_sources tables have CHECK constraints limiting source_type to ('issue', 'merge_request', 'discussion'). Need to add 'note' as valid source_type. SQLite doesn't support ALTER CONSTRAINT, so use the table-rebuild pattern. Uses migration slot 024 (022 = query indexes, 023 = issue_detail_fields already exists).\n\n## Approach\nCreate migrations/024_note_documents.sql:\n\n1. Rebuild dirty_sources: CREATE dirty_sources_new with CHECK adding 'note', INSERT SELECT, DROP old, RENAME.\n2. Rebuild documents (complex — must preserve FTS consistency):\n - Save junction table data (_doc_labels_backup, _doc_paths_backup)\n - Drop FTS triggers (documents_ai, documents_ad, documents_au — defined in migration 008_fts5.sql)\n - Drop junction tables (document_labels, document_paths — defined in migration 007_documents.sql)\n - Create documents_new with updated CHECK adding 'note'\n - INSERT INTO documents_new SELECT * FROM documents (preserves rowids for FTS)\n - Drop documents, rename new\n - Recreate all indexes (idx_documents_project_updated, idx_documents_author, idx_documents_source, idx_documents_content_hash — see migration 007_documents.sql for definitions)\n - Recreate junction tables + restore data from backups\n - Recreate FTS triggers (see migration 008_fts5.sql for trigger SQL)\n - INSERT INTO documents_fts(documents_fts) VALUES('rebuild')\n3. Defense-in-depth triggers:\n - notes_ad_cleanup: AFTER DELETE ON notes WHEN old.is_system = 0 → delete doc + dirty_sources for source_type='note', source_id=old.id\n - notes_au_system_cleanup: AFTER UPDATE OF is_system ON notes WHEN NEW.is_system = 1 AND OLD.is_system = 0 → delete doc + dirty_sources\n4. Drop temp backup tables\n\nRegister as (\"024\", include_str!(\"../../migrations/024_note_documents.sql\")) in MIGRATIONS array in src/core/db.rs. Position AFTER the \"023\" entry.\n\n## Files\n- CREATE: migrations/024_note_documents.sql\n- MODIFY: src/core/db.rs (add (\"024\", include_str!(...)) to MIGRATIONS array, after line 75)\n\n## TDD Anchor\nRED: test_migration_024_allows_note_source_type — INSERT with source_type='note' should succeed in both documents and dirty_sources.\nGREEN: Implement the table rebuild migration.\nVERIFY: cargo test migration_024 -- --nocapture\nTests: test_migration_024_preserves_existing_data, test_migration_024_fts_triggers_intact, test_migration_024_row_counts_preserved, test_migration_024_integrity_checks_pass, test_migration_024_fts_rebuild_consistent, test_migration_024_note_delete_trigger_cleans_document, test_migration_024_note_system_flip_trigger_cleans_document, test_migration_024_system_note_delete_trigger_does_not_fire\n\n## Acceptance Criteria\n- [ ] INSERT source_type='note' succeeds in documents and dirty_sources\n- [ ] All existing data preserved through table rebuild (row counts match before/after)\n- [ ] FTS triggers fire correctly after rebuild (insert a doc, verify FTS entry exists)\n- [ ] documents_fts row count == documents row count after rebuild\n- [ ] PRAGMA foreign_key_check returns no violations\n- [ ] notes_ad_cleanup trigger fires on note deletion (deletes document + dirty_sources)\n- [ ] notes_au_system_cleanup trigger fires when is_system flips 0→1\n- [ ] System note deletion does NOT trigger notes_ad_cleanup (is_system = 1 guard)\n- [ ] All 9 tests pass\n\n## Edge Cases\n- Rowid preservation: INSERT INTO documents_new SELECT * preserves id column = rowid for FTS consistency\n- CRITICAL: Must save/restore junction table data (ON DELETE CASCADE on document_labels/document_paths would delete them when documents table is dropped)\n- The FTS rebuild at end is a safety net for any rowid drift\n- Empty database: migration is a no-op (all SELECTs return 0 rows, tables rebuilt with new CHECK)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:35.164340Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:24.078558Z","closed_at":"2026-02-12T18:13:24.078512Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"],"dependencies":[{"issue_id":"bd-1oi7","depends_on_id":"bd-18bf","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1oi7","depends_on_id":"bd-22ai","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1oi7","depends_on_id":"bd-ef0u","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1oo","title":"Register migration 015 in db.rs and create migration 016 for mr_file_changes","description":"## Background\n\nThis bead creates the `mr_file_changes` table that stores which files each MR touched, enabling Gate 4 (file-history) and Gate 5 (trace). It maps MRs to the file paths they modify.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 4.1 (Schema).\n\n## Codebase Context — CRITICAL Migration Numbering\n\n- **LATEST_SCHEMA_VERSION = 14** (MIGRATIONS array in db.rs includes 001-014)\n- **Migration 015 exists on disk** (`migrations/015_commit_shas_and_closes_watermark.sql`) but is **NOT registered** in `src/core/db.rs` MIGRATIONS array\n- `merge_commit_sha` and `squash_commit_sha` are already on merge_requests (added by 015 SQL) and already used in `src/ingestion/merge_requests.rs`\n- `closes_issues_synced_for_updated_at` also added by 015 and used in orchestrator.rs\n- **This bead must FIRST register migration 015 in db.rs**, then create migration 016 for mr_file_changes\n- pending_dependent_fetches already has `job_type='mr_diffs'` in CHECK constraint (migration 011)\n- Schema version auto-computes: `LATEST_SCHEMA_VERSION = MIGRATIONS.len() as i32`\n\n## Approach\n\n### Step 1: Register existing migration 015 in db.rs\n\nAdd to MIGRATIONS array in `src/core/db.rs` (after the \"014\" entry):\n\n```rust\n(\n \"015\",\n include_str!(\"../../migrations/015_commit_shas_and_closes_watermark.sql\"),\n),\n```\n\nThis makes LATEST_SCHEMA_VERSION = 15.\n\n### Step 2: Create migration 016 for mr_file_changes\n\nCreate `migrations/016_mr_file_changes.sql`:\n\n```sql\n-- Migration 016: MR file changes table\n-- Powers file-history and trace commands (Gates 4-5)\n\nCREATE TABLE mr_file_changes (\n id INTEGER PRIMARY KEY,\n merge_request_id INTEGER NOT NULL REFERENCES merge_requests(id) ON DELETE CASCADE,\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n old_path TEXT,\n new_path TEXT NOT NULL,\n change_type TEXT NOT NULL CHECK (change_type IN ('added', 'modified', 'renamed', 'deleted')),\n UNIQUE(merge_request_id, new_path)\n);\n\nCREATE INDEX idx_mfc_project_path ON mr_file_changes(project_id, new_path);\nCREATE INDEX idx_mfc_project_old_path ON mr_file_changes(project_id, old_path) WHERE old_path IS NOT NULL;\nCREATE INDEX idx_mfc_mr ON mr_file_changes(merge_request_id);\nCREATE INDEX idx_mfc_renamed ON mr_file_changes(project_id, change_type) WHERE change_type = 'renamed';\n\nINSERT INTO schema_version (version, applied_at, description)\nVALUES (16, strftime('%s', 'now') * 1000, 'MR file changes table');\n```\n\n### Step 3: Register migration 016 in db.rs\n\n```rust\n(\n \"016\",\n include_str!(\"../../migrations/016_mr_file_changes.sql\"),\n),\n```\n\nLATEST_SCHEMA_VERSION will auto-compute to 16.\n\n## Acceptance Criteria\n\n- [ ] Migration 015 registered in MIGRATIONS array in src/core/db.rs\n- [ ] Migration file exists at `migrations/016_mr_file_changes.sql`\n- [ ] `mr_file_changes` table has columns: id, merge_request_id, project_id, old_path, new_path, change_type\n- [ ] UNIQUE constraint on (merge_request_id, new_path)\n- [ ] CHECK constraint on change_type: added, modified, renamed, deleted\n- [ ] 4 indexes: project+new_path, project+old_path (partial), mr_id, project+renamed (partial)\n- [ ] Migration 016 registered in MIGRATIONS array\n- [ ] LATEST_SCHEMA_VERSION auto-computes to 16\n- [ ] `lore migrate` applies both 015 and 016 successfully on a v14 database\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/core/db.rs` (register migrations 015 AND 016 in MIGRATIONS array)\n- `migrations/016_mr_file_changes.sql` (NEW)\n\n## TDD Loop\n\nRED: `lore migrate` on v14 database says \"already up to date\" (015 not registered)\n\nGREEN: Register 015 in db.rs, create 016 file, register 016 in db.rs. `lore migrate` applies both.\n\nVERIFY:\n```bash\ncargo check --all-targets\nlore --robot migrate\nsqlite3 ~/.local/share/lore/lore.db '.schema mr_file_changes'\nsqlite3 ~/.local/share/lore/lore.db \"SELECT version FROM schema_version ORDER BY version DESC LIMIT 1\"\n```\n\n## Edge Cases\n\n- Databases already at v15 via manual migration: 015 will be skipped, only 016 applied\n- old_path is NULL for added files, populated for renamed/deleted\n- No lines_added/lines_removed columns (spec does not require them; removed to match spec exactly)\n- Partial indexes only index relevant rows for rename chain BFS performance\n","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:08.837816Z","created_by":"tayloreernisse","updated_at":"2026-02-05T21:40:46.766136Z","closed_at":"2026-02-05T21:40:46.766074Z","close_reason":"Completed: registered migration 015 in db.rs MIGRATIONS array, created migration 016 (mr_file_changes table with 4 indexes, CHECK constraint, UNIQUE constraint), registered 016 in db.rs. LATEST_SCHEMA_VERSION auto-computes to 16. cargo check, clippy, and fmt all pass.","compaction_level":0,"original_size":0,"labels":["gate-4","phase-b","schema"],"dependencies":[{"issue_id":"bd-1oo","depends_on_id":"bd-14q","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1oo","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1oyf","title":"NOTE-1D: robot-docs integration for notes command","description":"## Background\nAdd the notes command to the robot-docs manifest so agents can discover it. Also forward-prep SearchArgs --type to accept \"note\"/\"notes\" (duplicates work in NOTE-2F but is safe to do early).\n\n## Approach\n1. Robot-docs manifest is in src/main.rs, function handle_robot_docs() starting at line 2087. The commands JSON is built at line 2090 with serde_json::json!. Add a \"notes\" entry following the pattern of \"issues\" (line 2107 area) and \"mrs\" entries:\n\n \"notes\": {\n \"description\": \"List notes from discussions with rich filtering\",\n \"flags\": [\"--limit/-n \", \"--author/-a \", \"--note-type \", \"--contains \", \"--for-issue \", \"--for-mr \", \"-p/--project \", \"--since \", \"--until \", \"--path \", \"--resolution \", \"--sort \", \"--asc\", \"--include-system\", \"--note-id \", \"--gitlab-note-id \", \"--discussion-id \", \"--format \", \"--fields \", \"--open\"],\n \"robot_flags\": [\"--format json\", \"--fields minimal\"],\n \"example\": \"lore --robot notes --author jdefting --since 1y --format json --fields minimal\",\n \"response_schema\": {\n \"ok\": \"bool\",\n \"data\": {\"notes\": \"[NoteListRowJson]\", \"total_count\": \"int\", \"showing\": \"int\"},\n \"meta\": {\"elapsed_ms\": \"int\"}\n }\n }\n\n2. Update SearchArgs.source_type value_parser in src/cli/mod.rs (line 560) to include \"note\":\n value_parser = [\"issue\", \"mr\", \"discussion\", \"note\"]\n (This is also done in NOTE-2F but is safe to do in either order — value_parser is additive)\n\n3. Add \"notes\" to the command list in handle_robot_docs (line 662 area where command names are listed).\n\n## Files\n- MODIFY: src/main.rs (add notes to robot-docs commands JSON at line 2090 area, add to command list at line 662)\n- MODIFY: src/cli/mod.rs (add \"note\" to SearchArgs source_type value_parser at line 560)\n\n## TDD Anchor\nSmoke test: cargo run -- --robot robot-docs | jq '.data.commands.notes' should return the notes command entry.\nVERIFY: cargo test -- --nocapture (no dedicated test needed — robot-docs is a static JSON generator)\n\n## Acceptance Criteria\n- [ ] lore robot-docs output includes notes command with all flags\n- [ ] notes command has response_schema, example, and robot_flags\n- [ ] SearchArgs accepts --type note\n- [ ] All existing tests still pass\n\n## Dependency Context\n- Depends on NOTE-1A (bd-20p9), NOTE-1B (bd-3iod), NOTE-1C (bd-25hb): command must be fully wired before documenting (the manifest should describe actual working behavior)\n\n## Edge Cases\n- robot-docs --brief mode: notes command should still appear in brief output\n- Value parser order doesn't matter — \"note\" can be added at any position in the array","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T17:01:04.191582Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.359505Z","closed_at":"2026-02-12T18:13:15.359457Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["cli","per-note","search"]} -{"id":"bd-1pzj","title":"Implement responsive layout system (LORE_BREAKPOINTS + Responsive)","description":"## Background\n\nEvery TUI view needs to adapt its layout to terminal width. The PRD defines a project-wide breakpoint system using FrankenTUI native `Responsive` with 5 tiers: Xs (<60), Sm (60-89), Md (90-119), Lg (120-159), Xl (160+). This is cross-cutting infrastructure — Dashboard uses it for 1/2/3-column layout, Issue/MR lists use it for column visibility, Search/Who use it for split-pane toggling. Without this, every view would reinvent breakpoint logic.\n\n## Approach\n\nDefine a single `layout.rs` module in the TUI crate with:\n\n1. **`LORE_BREAKPOINTS` constant** — `Breakpoints::new(60, 90, 120)` (Xl defaults to Lg + 40 = 160)\n2. **`classify_width(width: u16) -> Breakpoint`** — wrapper around `LORE_BREAKPOINTS.classify_width(area.width)`\n3. **Helper functions** using `Responsive`:\n - `dashboard_columns(bp: Breakpoint) -> u16` — 1 (Xs/Sm), 2 (Md), 3 (Lg/Xl)\n - `show_preview_pane(bp: Breakpoint) -> bool` — false (Xs/Sm), true (Md+)\n - `table_columns(bp: Breakpoint, screen: Screen) -> Vec` — returns visible column set per breakpoint per screen\n\nReference FrankenTUI types:\n```rust\nuse ftui::layout::{Breakpoints, Breakpoint, Responsive, Flex, Constraint};\npub const LORE_BREAKPOINTS: Breakpoints = Breakpoints::new(60, 90, 120);\n```\n\nThe `Responsive` wrapper provides breakpoint-aware values with inheritance — `.new(base)` sets Xs, `.at(bp, val)` sets overrides, `.resolve_cloned(bp)` walks downward.\n\n## Acceptance Criteria\n- [ ] `LORE_BREAKPOINTS` constant defined with sm=60, md=90, lg=120\n- [ ] `classify_width()` returns correct Breakpoint for widths: 40->Xs, 60->Sm, 90->Md, 120->Lg, 160->Xl\n- [ ] `dashboard_columns()` returns 1 for Xs/Sm, 2 for Md, 3 for Lg/Xl\n- [ ] `show_preview_pane()` returns false for Xs/Sm, true for Md+\n- [ ] All helpers use `Responsive` (not bare match), so inheritance is automatic\n- [ ] Module is `pub` and importable by all view modules\n\n## Files\n- CREATE: crates/lore-tui/src/layout.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add `pub mod layout;`)\n\n## TDD Anchor\nRED: Write `test_classify_width_boundaries` that asserts classify_width(59)=Xs, classify_width(60)=Sm, classify_width(89)=Sm, classify_width(90)=Md, classify_width(119)=Md, classify_width(120)=Lg, classify_width(159)=Lg, classify_width(160)=Xl.\nGREEN: Implement LORE_BREAKPOINTS and classify_width().\nVERIFY: cargo test -p lore-tui classify_width\n\nAdditional tests:\n- test_dashboard_columns_per_breakpoint\n- test_show_preview_pane_per_breakpoint\n- test_responsive_inheritance (Sm inherits from Xs when no Sm override set)\n\n## Edge Cases\n- Terminal width of 0 or 1 must not panic — classify to Xs\n- Very wide terminals (>300 cols) should still work — classify to Xl\n- All Responsive values must have an Xs base so resolve never fails\n\n## Dependency Context\n- Depends on bd-3ddw (crate scaffold) which creates the crates/lore-tui/ workspace\n- Consumed by all view beads (bd-35g5 Dashboard, bd-3ei1 Issue List, bd-2kr0 MR List, bd-1zow Search, bd-u7se Who, etc.) for layout decisions","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:34.077080Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:29:46.129189Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1pzj","depends_on_id":"bd-3ddw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-1pzj","title":"Implement responsive layout system (LORE_BREAKPOINTS + Responsive)","description":"## Background\n\nEvery TUI view needs to adapt its layout to terminal width. The PRD defines a project-wide breakpoint system using FrankenTUI native `Responsive` with 5 tiers: Xs (<60), Sm (60-89), Md (90-119), Lg (120-159), Xl (160+). This is cross-cutting infrastructure — Dashboard uses it for 1/2/3-column layout, Issue/MR lists use it for column visibility, Search/Who use it for split-pane toggling. Without this, every view would reinvent breakpoint logic.\n\n## Approach\n\nDefine a single `layout.rs` module in the TUI crate with:\n\n1. **`LORE_BREAKPOINTS` constant** — `Breakpoints::new(60, 90, 120)` (Xl defaults to Lg + 40 = 160)\n2. **`classify_width(width: u16) -> Breakpoint`** — wrapper around `LORE_BREAKPOINTS.classify_width(area.width)`\n3. **Helper functions** using `Responsive`:\n - `dashboard_columns(bp: Breakpoint) -> u16` — 1 (Xs/Sm), 2 (Md), 3 (Lg/Xl)\n - `show_preview_pane(bp: Breakpoint) -> bool` — false (Xs/Sm), true (Md+)\n - `table_columns(bp: Breakpoint, screen: Screen) -> Vec` — returns visible column set per breakpoint per screen\n\nReference FrankenTUI types:\n```rust\nuse ftui::layout::{Breakpoints, Breakpoint, Responsive, Flex, Constraint};\npub const LORE_BREAKPOINTS: Breakpoints = Breakpoints::new(60, 90, 120);\n```\n\nThe `Responsive` wrapper provides breakpoint-aware values with inheritance — `.new(base)` sets Xs, `.at(bp, val)` sets overrides, `.resolve_cloned(bp)` walks downward.\n\n## Acceptance Criteria\n- [ ] `LORE_BREAKPOINTS` constant defined with sm=60, md=90, lg=120\n- [ ] `classify_width()` returns correct Breakpoint for widths: 40->Xs, 60->Sm, 90->Md, 120->Lg, 160->Xl\n- [ ] `dashboard_columns()` returns 1 for Xs/Sm, 2 for Md, 3 for Lg/Xl\n- [ ] `show_preview_pane()` returns false for Xs/Sm, true for Md+\n- [ ] All helpers use `Responsive` (not bare match), so inheritance is automatic\n- [ ] Module is `pub` and importable by all view modules\n\n## Files\n- CREATE: crates/lore-tui/src/layout.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add `pub mod layout;`)\n\n## TDD Anchor\nRED: Write `test_classify_width_boundaries` that asserts classify_width(59)=Xs, classify_width(60)=Sm, classify_width(89)=Sm, classify_width(90)=Md, classify_width(119)=Md, classify_width(120)=Lg, classify_width(159)=Lg, classify_width(160)=Xl.\nGREEN: Implement LORE_BREAKPOINTS and classify_width().\nVERIFY: cargo test -p lore-tui classify_width\n\nAdditional tests:\n- test_dashboard_columns_per_breakpoint\n- test_show_preview_pane_per_breakpoint\n- test_responsive_inheritance (Sm inherits from Xs when no Sm override set)\n\n## Edge Cases\n- Terminal width of 0 or 1 must not panic — classify to Xs\n- Very wide terminals (>300 cols) should still work — classify to Xl\n- All Responsive values must have an Xs base so resolve never fails\n\n## Dependency Context\n- Depends on bd-3ddw (crate scaffold) which creates the crates/lore-tui/ workspace\n- Consumed by all view beads (bd-35g5 Dashboard, bd-3ei1 Issue List, bd-2kr0 MR List, bd-1zow Search, bd-u7se Who, etc.) for layout decisions","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:34.077080Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:47:56.777729Z","closed_at":"2026-02-18T18:47:56.777533Z","close_reason":"Implemented responsive layout system with LORE_BREAKPOINTS, classify_width, dashboard_columns, show_preview_pane. All backed by ftui::layout::Breakpoint/Breakpoints. 5 tests cover boundaries, per-breakpoint behavior, and edge cases. Build blocked by upstream lore crate errors (other agents WIP), not by layout.rs.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1pzj","depends_on_id":"bd-3ddw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1q8z","title":"WHO: Epic — People Intelligence Commands","description":"## Background\n\nThe current beads roadmap focuses on Gate 4/5 (file-history, code-trace) — archaeology queries requiring mr_file_changes data that does not exist yet. Meanwhile, the DB has rich people/activity data (280K notes, 210K discussions, 33K DiffNotes with file positions, 53 active participants) that can answer collaboration questions immediately with zero new tables or API calls.\n\n## Scope\n\nThis epic builds `lore who` — a pure SQL query layer answering 5 questions:\n1. **Expert**: \"Who should I talk to about this feature/file?\" (DiffNote path analysis)\n2. **Workload**: \"What is person X working on?\" (open issues, authored/reviewing MRs, unresolved discussions)\n3. **Reviews**: \"What review patterns does person X have?\" (DiffNote **prefix** category extraction)\n4. **Active**: \"What discussions are actively in progress?\" (unresolved resolvable discussions)\n5. **Overlap**: \"Who else has MRs/notes touching my files?\" (path-based activity overlap)\n\n## Plan Reference\n\nFull implementation plan with 8 iterations of review: `docs/who-command-design.md`\n\n## Children (Execution Order)\n\n1. **bd-34rr** — Migration 017: 5 composite indexes for query performance\n2. **bd-2rk9** — CLI skeleton: WhoArgs, Commands::Who, dispatch, stub file\n3. **bd-2ldg** — Mode resolution, path helpers, run_who entry point, all result types\n4. **bd-zqpf** — Expert mode query (CTE + MR-breadth scoring)\n5. **bd-s3rc** — Workload mode query (4 SELECT queries)\n6. **bd-m7k1** — Active mode query (CTE + global/scoped SQL variants)\n7. **bd-b51e** — Overlap mode query (dual role tracking + accumulator)\n8. **bd-2711** — Reviews mode query (prefix extraction + normalization)\n9. **bd-1rdi** — Human terminal output for all 5 modes\n10. **bd-3mj2** — Robot JSON output for all 5 modes\n11. **bd-tfh3** — Comprehensive test suite (20+ tests)\n12. **bd-zibc** — VALID_COMMANDS + robot-docs manifest\n13. **bd-g0d5** — Verification gate (check, clippy, fmt, EXPLAIN QUERY PLAN)\n\n## Design Principles (from plan)\n\n- All SQL fully static — no format!() for query text, LIMIT bound as ?N\n- prepare_cached() everywhere for statement caching\n- (?N IS NULL OR ...) nullable binding except Active mode (two SQL variants for index selection)\n- Self-review exclusion on all DiffNote-based branches\n- Deterministic output: sorted GROUP_CONCAT, sorted HashSet-derived vectors, stable tie-breakers\n- Truncation transparency: LIMIT+1 pattern with truncated bool\n- Bounded payloads: capped arrays with *_total + *_truncated metadata\n- Robot-first reproducibility: input + resolved_input with since_mode tri-state\n\n## Files\n\n| File | Action | Description |\n|---|---|---|\n| `src/cli/commands/who.rs` | CREATE | All 5 query modes + human/robot output |\n| `src/cli/commands/mod.rs` | MODIFY | Add `pub mod who` + re-exports |\n| `src/cli/mod.rs` | MODIFY | Add `WhoArgs` struct + `Commands::Who` variant |\n| `src/main.rs` | MODIFY | Add dispatch arm + `handle_who` fn + VALID_COMMANDS + robot-docs |\n| `src/core/db.rs` | MODIFY | Add migration 017: composite indexes for who query paths |\n\n## TDD Loop\n\nEach child bead has its own RED/GREEN/VERIFY cycle. The epic TDD strategy:\n- RED: Tests in bd-tfh3 (written alongside query beads)\n- GREEN: Query implementations in bd-zqpf, bd-s3rc, bd-m7k1, bd-b51e, bd-2711\n- VERIFY: bd-g0d5 runs `cargo test` + `cargo clippy` + EXPLAIN QUERY PLAN\n\n## Acceptance Criteria\n\n- [ ] `lore who src/path/` shows ranked experts with scores\n- [ ] `lore who @username` shows workload across all projects\n- [ ] `lore who @username --reviews` shows categorized review patterns\n- [ ] `lore who --active` shows unresolved discussions\n- [ ] `lore who --overlap src/path/` shows other contributors\n- [ ] `lore who --path README.md` handles root files\n- [ ] `lore -J who ...` produces valid JSON with input + resolved_input\n- [ ] All indexes verified via EXPLAIN QUERY PLAN\n- [ ] cargo check + clippy + fmt + test all pass\n\n## Edge Cases\n\n- This epic has zero new tables — all queries are pure SQL over existing schema + migration 017 indexes\n- Gate 4/5 beads are NOT dependencies — who command works independently with current data\n- If DB has <1000 notes, queries will work but results will be sparse — this is expected for fresh installations\n- format_relative_time() is duplicated from list.rs intentionally (private fn, small blast radius > refactoring shared module)\n- lookup_project_path() is local to who.rs — single invocation per run, does not warrant shared utility","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-02-08T02:39:39.538892Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:38.665143Z","closed_at":"2026-02-08T04:10:38.665094Z","close_reason":"All 13 child beads implemented: migration 017 (5 composite indexes), CLI skeleton with WhoArgs/dispatch/robot-docs, 5 query modes (expert/workload/active/overlap/reviews), human terminal + robot JSON output, 20 tests. All quality gates pass: cargo check, clippy (pedantic+nursery), fmt, test.","compaction_level":0,"original_size":0} {"id":"bd-1qf","title":"[CP1] Discussion and note transformers","description":"## Background\n\nDiscussion and note transformers convert GitLab API discussion responses into our normalized schema. They compute derived fields like `first_note_at`, `last_note_at`, resolvable/resolved status, and note positions. These are pure functions with no I/O.\n\n## Approach\n\nCreate transformer module with:\n\n### Structs\n\n```rust\n// src/gitlab/transformers/discussion.rs\n\npub struct NormalizedDiscussion {\n pub gitlab_discussion_id: String,\n pub project_id: i64,\n pub issue_id: i64,\n pub noteable_type: String, // \"Issue\"\n pub individual_note: bool,\n pub first_note_at: Option, // min(note.created_at) in ms epoch\n pub last_note_at: Option, // max(note.created_at) in ms epoch\n pub last_seen_at: i64,\n pub resolvable: bool, // any note is resolvable\n pub resolved: bool, // all resolvable notes are resolved\n}\n\npub struct NormalizedNote {\n pub gitlab_id: i64,\n pub project_id: i64,\n pub note_type: Option, // \"DiscussionNote\" | \"DiffNote\" | null\n pub is_system: bool, // from note.system\n pub author_username: String,\n pub body: String,\n pub created_at: i64, // ms epoch\n pub updated_at: i64, // ms epoch\n pub last_seen_at: i64,\n pub position: i32, // 0-indexed array position\n pub resolvable: bool,\n pub resolved: bool,\n pub resolved_by: Option,\n pub resolved_at: Option,\n}\n```\n\n### Functions\n\n```rust\npub fn transform_discussion(\n gitlab_discussion: &GitLabDiscussion,\n local_project_id: i64,\n local_issue_id: i64,\n) -> NormalizedDiscussion\n\npub fn transform_notes(\n gitlab_discussion: &GitLabDiscussion,\n local_project_id: i64,\n) -> Vec\n```\n\n## Acceptance Criteria\n\n- [ ] `NormalizedDiscussion` struct with all fields\n- [ ] `NormalizedNote` struct with all fields\n- [ ] `transform_discussion` computes first_note_at/last_note_at from notes array\n- [ ] `transform_discussion` computes resolvable (any note is resolvable)\n- [ ] `transform_discussion` computes resolved (all resolvable notes resolved)\n- [ ] `transform_notes` preserves array order via position field (0-indexed)\n- [ ] `transform_notes` maps system flag to is_system\n- [ ] Unit tests cover all computed fields\n\n## Files\n\n- src/gitlab/transformers/mod.rs (add `pub mod discussion;`)\n- src/gitlab/transformers/discussion.rs (create)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/discussion_transformer_tests.rs\n#[test] fn transforms_discussion_payload_to_normalized_schema()\n#[test] fn extracts_notes_array_from_discussion()\n#[test] fn sets_individual_note_flag_correctly()\n#[test] fn flags_system_notes_with_is_system_true()\n#[test] fn preserves_note_order_via_position_field()\n#[test] fn computes_first_note_at_and_last_note_at_correctly()\n#[test] fn computes_resolvable_and_resolved_status()\n```\n\nGREEN: Implement transform_discussion and transform_notes\n\nVERIFY: `cargo test discussion_transformer`\n\n## Edge Cases\n\n- Discussion with single note - first_note_at == last_note_at\n- All notes are system notes - still compute timestamps\n- No notes resolvable - resolvable=false, resolved=false\n- Mix of resolved/unresolved notes - resolved=false until all done","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.196079Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:27:11.485112Z","closed_at":"2026-01-25T22:27:11.485058Z","close_reason":"Implemented NormalizedDiscussion, NormalizedNote, transform_discussion, transform_notes with 9 passing unit tests","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1qf","depends_on_id":"bd-1np","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1qpp","title":"Implement NavigationStack (back/forward/jump list)","description":"## Background\nNavigation uses a stack with global shortcuts, supporting back/forward (browser-like) and jump list (vim-like Ctrl+O/Ctrl+I). State is preserved when navigating away — screens are never cleared on pop. The jump list only records \"significant\" hops (detail views, cross-references).\n\n## Approach\nCreate crates/lore-tui/src/navigation.rs:\n- NavigationStack struct: back_stack (Vec), current (Screen), forward_stack (Vec), jump_list (Vec), jump_index (usize), browse_snapshots (HashMap)\n- new() -> Self: initializes with Dashboard as current\n- current() -> &Screen\n- is_at(&Screen) -> bool\n- push(Screen): pushes current to back_stack, clears forward_stack, sets new current, records detail hops in jump_list\n- pop() -> Option: pops from back_stack, pushes current to forward_stack\n- go_forward() -> Option: pops from forward_stack, pushes current to back_stack\n- jump_back() -> Option<&Screen>: moves backward in jump list (Ctrl+O)\n- jump_forward() -> Option<&Screen>: moves forward in jump list (Ctrl+I)\n- reset_to(Screen): clears all stacks, sets new current (H=Home)\n- breadcrumbs() -> Vec<&str>: returns labels for breadcrumb display\n- depth() -> usize: back_stack.len() + 1\n- BrowseSnapshot struct: per-screen pagination cursor snapshot for stable ordering during concurrent writes\n\n## Acceptance Criteria\n- [ ] push() adds to back_stack and clears forward_stack\n- [ ] pop() moves current to forward_stack and restores previous\n- [ ] go_forward() restores from forward_stack\n- [ ] jump_back/forward navigates only through detail views\n- [ ] reset_to() clears all history\n- [ ] breadcrumbs() returns ordered screen labels\n- [ ] pop() returns None at root (can't pop past Dashboard)\n- [ ] push() only records is_detail_or_entity() screens in jump_list\n\n## Files\n- CREATE: crates/lore-tui/src/navigation.rs\n\n## TDD Anchor\nRED: Write test_push_pop_preserves_order that pushes Dashboard->IssueList->IssueDetail, pops twice, verifies correct order.\nGREEN: Implement push/pop with back_stack.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_push_pop\n\nAdditional tests:\n- test_forward_stack_cleared_on_new_push\n- test_jump_list_skips_list_screens\n- test_reset_clears_all_history\n- test_pop_at_root_returns_none\n- test_breadcrumbs_reflect_stack\n\n## Edge Cases\n- Stack depth has no explicit limit — deeply nested cross-reference chains are supported\n- Forward stack must be cleared on any new push (browser behavior)\n- Jump list must truncate forward entries when recording a new jump (vim behavior)\n\n## Dependency Context\nUses Screen enum and Screen::is_detail_or_entity() from \"Implement core types\" task.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:56:01.365386Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:26:46.938223Z","closed_at":"2026-02-12T20:26:46.938170Z","close_reason":"NavigationStack: push/pop/forward, vim jump list, breadcrumbs, reset. 13 tests, quality gate green.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1qpp","depends_on_id":"bd-c9gk","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -69,6 +75,7 @@ {"id":"bd-1rdi","title":"WHO: Human terminal output for all 5 modes","description":"## Background\n\nTerminal output for humans. Each mode gets a dedicated print function with consistent styling: bold headers, cyan usernames/refs, dim metadata, table alignment.\n\n## Approach\n\n### Dispatch:\n```rust\npub fn print_who_human(result: &WhoResult, project_path: Option<&str>) {\n match result {\n WhoResult::Expert(r) => print_expert_human(r, project_path),\n WhoResult::Workload(r) => print_workload_human(r),\n WhoResult::Reviews(r) => print_reviews_human(r),\n WhoResult::Active(r) => print_active_human(r, project_path),\n WhoResult::Overlap(r) => print_overlap_human(r, project_path),\n }\n}\n```\n\n### Shared helpers:\n- **print_scope_hint()**: dim \"(aggregated across all projects; use -p to scope)\" when project_path is None. Called by Expert, Active, Overlap.\n- **format_relative_time(ms_epoch)**: \"just now\" / \"N min ago\" / \"N hours ago\" / \"N days ago\" / \"N weeks ago\" / \"N months ago\" — DUPLICATE from list.rs (private there, keep blast radius small)\n- **truncate_str(s, max)**: Unicode-aware, appends \"...\" if truncated\n\n### Mode formats:\n- **Expert**: table with Username(16) / Score(6) / Reviewed(MRs)(12) / Notes(6) / Authored(MRs)(12) / Last Seen. Path match hint line. \"-\" for zero counts.\n- **Workload**: 4 sections (Assigned Issues, Authored MRs, Reviewing MRs, Unresolved Discussions). Canonical refs in cyan. Draft indicator. Per-section truncation.\n- **Reviews**: DiffNote summary line + category table (Category(16) / Count(6) / %(6)). Uncategorized count note.\n- **Active**: Discussion list with entity ref, note count, participants (comma-joined @usernames), project path. Discussion count in header.\n- **Overlap**: table with Username(16) / Role(6) / MRs(7) / Last Seen(12) / MR Refs (first 5, +N overflow). Path match hint.\n\n### All modes: truncation dim hints, empty-state messages, console::style formatting.\n\n## Files\n\n- `src/cli/commands/who.rs`\n\n## TDD Loop\n\nNo unit tests for print functions (they write to stdout). Verification is manual smoke test.\nVERIFY: `cargo check --all-targets` then manual: `cargo run --release -- who src/features/global-search/`\n\n## Acceptance Criteria\n\n- [ ] cargo check passes (all print functions compile)\n- [ ] Each mode produces readable, aligned terminal output\n- [ ] Scope hint shown when project not specified (Expert, Active, Overlap)\n- [ ] Truncation hints shown when results exceed limit\n- [ ] Empty-state messages for zero results\n\n## Edge Cases\n\n- format_relative_time handles negative diff (\"in the future\")\n- truncate_str is Unicode-aware (.chars().count(), not .len())\n- Workload shows empty message only when ALL 4 sections are empty","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:41:06.190608Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.599783Z","closed_at":"2026-02-08T04:10:29.599749Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1rdi","depends_on_id":"bd-2711","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1rdi","depends_on_id":"bd-b51e","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1rdi","depends_on_id":"bd-m7k1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1rdi","depends_on_id":"bd-s3rc","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1rdi","depends_on_id":"bd-zqpf","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1re","title":"[CP1] gi show issue command","description":"Show issue details with discussions.\n\nFlags:\n- --project=PATH (required if iid is ambiguous across projects)\n\nOutput:\n- Title, project, state, author, dates, labels, URL\n- Description text\n- All discussions with notes (formatted thread view)\n\nHandle ambiguity: If multiple projects have same iid, prompt for --project or show error.\n\nFiles: src/cli/commands/show.ts\nDone when: Issue detail view displays all fields including threaded discussions","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T15:20:29.826786Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.153211Z","closed_at":"2026-01-25T15:21:35.153211Z","deleted_at":"2026-01-25T15:21:35.153208Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-1s1","title":"[CP1] Integration tests for issue ingestion","description":"Full integration tests for issue ingestion module.\n\n## Tests (tests/issue_ingestion_tests.rs)\n\n- inserts_issues_into_database\n- creates_labels_from_issue_payloads\n- links_issues_to_labels_via_junction_table\n- removes_stale_label_links_on_resync\n- stores_raw_payload_for_each_issue\n- stores_raw_payload_for_each_discussion\n- updates_cursor_incrementally_per_page\n- resumes_from_cursor_on_subsequent_runs\n- handles_issues_with_no_labels\n- upserts_existing_issues_on_refetch\n- skips_discussion_refetch_for_unchanged_issues\n\n## Test Setup\n- tempfile::TempDir for isolated database\n- wiremock::MockServer for GitLab API\n- Mock handlers returning fixture data\n\nFiles: tests/issue_ingestion_tests.rs\nDone when: All integration tests pass with mocked GitLab","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:59:12.158586Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:02.109109Z","closed_at":"2026-01-25T17:02:02.109109Z","deleted_at":"2026-01-25T17:02:02.109105Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} +{"id":"bd-1sc6","title":"Add SurgicalPreflightFailed error variant and foundation visibility changes","description":"## Background\nSurgical sync needs a dedicated error variant for preflight failures (e.g., IID not found on GitLab, project mismatch). The existing `GitLabNotFound` variant maps to exit code 6 and is too generic — it does not convey that the failure occurred during surgical preflight validation. A new `SurgicalPreflightFailed` variant in `LoreError` with a clear Display message and exit code 6 provides actionable feedback to both human and robot consumers.\n\nAdditionally, the `process_single_issue` function in `src/ingestion/issues.rs` and `process_single_mr` + `ProcessMrResult` in `src/ingestion/merge_requests.rs` are currently private. The surgical sync orchestrator (downstream bead bd-3sez) will need to call these from `src/core/surgical.rs`, so they must be raised to `pub(crate)` visibility. No config field is needed for this bead — the surgical sync feature is triggered purely by CLI flags (bead bd-1lja).\n\n## Approach\n\n### Step 1: Add ErrorCode variant (src/core/error.rs, line ~23)\nAdd `SurgicalPreflightFailed` to the `ErrorCode` enum (after `Ambiguous`). Wire it through three impls:\n- `Display`: maps to `\"SURGICAL_PREFLIGHT_FAILED\"`\n- `exit_code()`: maps to `6` (same category as GitLabNotFound — resource-level failure)\n\n### Step 2: Add LoreError variant (src/core/error.rs, after EmbeddingsNotBuilt ~line 155)\n```rust\n#[error(\"Surgical preflight failed for {entity_type} !{iid} in {project}: {reason}\")]\nSurgicalPreflightFailed {\n entity_type: String, // \"issue\" or \"merge_request\"\n iid: u64,\n project: String,\n reason: String,\n},\n```\nWire in `code()` → `ErrorCode::SurgicalPreflightFailed`, `suggestion()` → a helpful message about verifying the IID exists, `actions()` → `[\"lore issues -p \", \"lore mrs -p \"]`.\n\n### Step 3: Raise visibility (src/ingestion/issues.rs, src/ingestion/merge_requests.rs)\n- `process_single_issue` at line 143: `fn` → `pub(crate) fn`\n- `process_single_mr` at line 144: `fn` → `pub(crate) fn`\n- `ProcessMrResult` at line 138: `struct` → `pub(crate) struct` (and its fields)\n\n## Acceptance Criteria\n- [ ] `ErrorCode::SurgicalPreflightFailed` exists with Display `\"SURGICAL_PREFLIGHT_FAILED\"` and exit code 6\n- [ ] `LoreError::SurgicalPreflightFailed { entity_type, iid, project, reason }` exists\n- [ ] `LoreError::SurgicalPreflightFailed { .. }.code()` returns `ErrorCode::SurgicalPreflightFailed`\n- [ ] Display output includes entity_type, iid, project, and reason\n- [ ] `suggestion()` returns a non-None helpful string\n- [ ] `process_single_issue` is `pub(crate)`\n- [ ] `process_single_mr` is `pub(crate)`\n- [ ] `ProcessMrResult` and its fields are `pub(crate)`\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n- [ ] All existing tests pass\n\n## Files\n- MODIFY: src/core/error.rs (add ErrorCode variant, LoreError variant, wire Display/exit_code/code/suggestion/actions)\n- MODIFY: src/ingestion/issues.rs (pub(crate) on process_single_issue)\n- MODIFY: src/ingestion/merge_requests.rs (pub(crate) on process_single_mr, ProcessMrResult + fields)\n\n## TDD Anchor\nRED: Write three tests in a new `#[cfg(test)] mod tests` block at the bottom of `src/core/error.rs`:\n\n```rust\n#[cfg(test)]\nmod tests {\n use super::*;\n\n #[test]\n fn surgical_preflight_failed_display() {\n let err = LoreError::SurgicalPreflightFailed {\n entity_type: \"issue\".to_string(),\n iid: 42,\n project: \"group/repo\".to_string(),\n reason: \"not found on GitLab\".to_string(),\n };\n let msg = err.to_string();\n assert!(msg.contains(\"issue\"), \"missing entity_type: {msg}\");\n assert!(msg.contains(\"42\"), \"missing iid: {msg}\");\n assert!(msg.contains(\"group/repo\"), \"missing project: {msg}\");\n assert!(msg.contains(\"not found on GitLab\"), \"missing reason: {msg}\");\n }\n\n #[test]\n fn surgical_preflight_failed_error_code() {\n let code = ErrorCode::SurgicalPreflightFailed;\n assert_eq!(code.exit_code(), 6);\n }\n\n #[test]\n fn surgical_preflight_failed_code_mapping() {\n let err = LoreError::SurgicalPreflightFailed {\n entity_type: \"merge_request\".to_string(),\n iid: 99,\n project: \"ns/proj\".to_string(),\n reason: \"404\".to_string(),\n };\n assert_eq!(err.code(), ErrorCode::SurgicalPreflightFailed);\n }\n}\n```\n\nGREEN: Add the variant and wire all impls.\nVERIFY: `cargo test surgical_preflight_failed`\n\n## Edge Cases\n- Exit code 6 is shared with `GitLabNotFound` — this is intentional (same semantic category: resource not found). The `ErrorCode` Display string distinguishes them for robot consumers.\n- The `entity_type` field uses strings (\"issue\", \"merge_request\") rather than an enum to avoid over-abstraction for two values.\n- Visibility changes are `pub(crate)`, not `pub` — these are internal implementation details, not public API.\n\n## Dependency Context\nThis is a leaf/foundation bead with no upstream dependencies. Downstream beads bd-1i4i (orchestrator) and bd-3sez (surgical.rs module) depend on this for both the error variant and the pub(crate) visibility of ingestion functions.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:11:41.476902Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:01:18.103312Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-1sc6","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1sc6","depends_on_id":"bd-3sez","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-1se","title":"Epic: Gate 2 - Cross-Reference Extraction","description":"## Background\nGate 2 builds the entity relationship graph that connects issues, MRs, and discussions. Without cross-references, temporal queries can only show events for individually-matched entities. With them, \"lore timeline auth migration\" can discover that MR !567 closed issue #234, which spawned follow-up issue #299 — even if #299 does not contain the words \"auth migration.\"\n\nThree data sources feed entity_references:\n1. **Structured API (reliable):** GET /projects/:id/merge_requests/:iid/closes_issues\n2. **State events (reliable):** resource_state_events.source_merge_request_id\n3. **System note parsing (best-effort):** \"mentioned in !456\", \"closed by !789\" patterns\n\n## Architecture\n- **entity_references table:** Already created in migration 011 (bd-hu3/bd-czk). Stores source→target relationships with reference_type (closes/mentioned/related) and source_method provenance.\n- **Directionality convention:** source = entity where reference was observed, target = entity being referenced. Consistent across all source_methods.\n- **Unresolved references:** Cross-project refs stored with target_entity_id=NULL, target_project_path populated. Still valuable for timeline narratives.\n- **closes_issues fetch:** Uses generic dependent fetch queue (job_type = mr_closes_issues). One API call per MR.\n- **System note parsing:** Local post-processing after all dependent fetches complete. No API calls. English-only, best-effort.\n\n## Children (Execution Order)\n1. **bd-czk** [CLOSED] — entity_references schema (folded into migration 011)\n2. **bd-8t4** [OPEN] — Extract cross-references from resource_state_events (source_merge_request_id)\n3. **bd-3ia** [OPEN] — Fetch closes_issues API and populate entity_references\n4. **bd-1ji** [OPEN] — Parse system notes for cross-reference patterns\n\n## Gate Completion Criteria\n- [ ] entity_references populated from closes_issues API for all synced MRs\n- [ ] entity_references populated from state events where source_merge_request_id present\n- [ ] System notes parsed for cross-reference patterns (English instances)\n- [ ] Cross-project references stored as unresolved (target_entity_id=NULL)\n- [ ] source_method tracks provenance of each reference\n- [ ] Deduplication: same relationship from multiple sources stored once (UNIQUE constraint)\n- [ ] Timeline JSON includes expansion provenance (via) for expanded entities\n- [ ] Integration test: sync with all three extraction methods, verify entity_references populated\n\n## Dependencies\n- Depends on: Gate 1 (bd-2zl) — event tables and dependent fetch queue\n- Downstream: Gate 3 (bd-ike) depends on entity_references for BFS expansion","status":"closed","priority":1,"issue_type":"feature","created_at":"2026-02-02T21:31:00.981132Z","created_by":"tayloreernisse","updated_at":"2026-02-05T16:08:26.965177Z","closed_at":"2026-02-05T16:08:26.964997Z","close_reason":"All child beads completed: bd-8t4 (state event extraction), bd-3ia (closes_issues API), bd-1ji (system note parsing)","compaction_level":0,"original_size":0,"labels":["epic","gate-2","phase-b"],"dependencies":[{"issue_id":"bd-1se","depends_on_id":"bd-2zl","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1ser","title":"Implement scope context (global project filter)","description":"## Background\nThe scope context provides a global project filter that flows through all query bridge functions. Users can pin to a specific project set or view all projects. The P keybinding opens a scope picker overlay. Scope is persisted in session state.\n\n## Approach\nCreate crates/lore-tui/src/scope.rs:\n- ScopeContext enum: AllProjects, Pinned(Vec)\n- ProjectInfo: id (i64), path (String)\n- scope_filter_sql(scope: &ScopeContext) -> String: generates WHERE clause fragment\n- All action.rs query functions accept &ScopeContext parameter\n- Scope picker overlay: list of projects with checkbox selection\n- P keybinding toggles scope picker from any screen\n\n## Acceptance Criteria\n- [ ] AllProjects scope returns unfiltered results\n- [ ] Pinned scope filters to specific project IDs\n- [ ] All query functions respect global scope\n- [ ] P keybinding opens scope picker\n- [ ] Scope persisted in session state\n- [ ] Scope change triggers re-query of current screen\n\n## Files\n- CREATE: crates/lore-tui/src/scope.rs\n- MODIFY: crates/lore-tui/src/action.rs (add scope parameter to all queries)\n\n## TDD Anchor\nRED: Write test_scope_filter_sql that creates Pinned scope with 2 projects, asserts generated SQL contains IN (1, 2).\nGREEN: Implement scope_filter_sql.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_scope_filter\n\n## Edge Cases\n- Single-project datasets: scope picker not needed, but should still work\n- Very many projects (>50): scope picker should be scrollable\n- Scope change mid-pagination: reset cursor to first page\n\n## Dependency Context\nUses AppState from \"Implement AppState composition\" task.\nUses session persistence from \"Implement session persistence\" task.","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-12T17:03:37.555484Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.537405Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1ser","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ser","depends_on_id":"bd-26lp","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1soz","title":"Add half_life_decay() pure function","description":"## Background\nThe decay function is the mathematical core of the scoring model. It must be correct, tested first (TDD RED), and verified independently of any DB or SQL changes.\n\n## Approach\nAdd to who.rs as a private function near the top of the module (before query_expert):\n\n```rust\n/// Exponential half-life decay: R = 2^(-t/h)\n/// Returns 1.0 at elapsed=0, 0.5 at elapsed=half_life, 0.0 if half_life=0.\nfn half_life_decay(elapsed_ms: i64, half_life_days: u32) -> f64 {\n let days = (elapsed_ms as f64 / 86_400_000.0).max(0.0);\n let hl = f64::from(half_life_days);\n if hl <= 0.0 { return 0.0; }\n 2.0_f64.powf(-days / hl)\n}\n```\n\n## TDD Loop\n\n### RED (write first):\n```rust\n#[test]\nfn test_half_life_decay_math() {\n let hl_180 = 180;\n // At t=0, full retention\n assert!((half_life_decay(0, hl_180) - 1.0).abs() < f64::EPSILON);\n // At t=half_life, exactly 0.5\n let one_hl_ms = 180 * 86_400_000_i64;\n assert!((half_life_decay(one_hl_ms, hl_180) - 0.5).abs() < 1e-10);\n // At t=2*half_life, exactly 0.25\n assert!((half_life_decay(2 * one_hl_ms, hl_180) - 0.25).abs() < 1e-10);\n // Negative elapsed clamped to 0 -> 1.0\n assert!((half_life_decay(-1000, hl_180) - 1.0).abs() < f64::EPSILON);\n // Zero half-life -> 0.0 (div-by-zero guard)\n assert!((half_life_decay(86_400_000, 0)).abs() < f64::EPSILON);\n}\n\n#[test]\nfn test_score_monotonicity_by_age() {\n // For any half-life, older timestamps must never produce higher decay than newer ones.\n // Use deterministic LCG PRNG (no rand dependency).\n let mut seed: u64 = 42;\n let hl = 90_u32;\n for _ in 0..50 {\n seed = seed.wrapping_mul(6364136223846793005).wrapping_add(1);\n let newer_ms = (seed % 100_000_000) as i64; // 0-100M ms (~1.15 days max)\n seed = seed.wrapping_mul(6364136223846793005).wrapping_add(1);\n let older_ms = newer_ms + (seed % 500_000_000) as i64; // always >= newer\n assert!(\n half_life_decay(older_ms, hl) <= half_life_decay(newer_ms, hl),\n \"Monotonicity violated: decay({older_ms}) > decay({newer_ms})\"\n );\n }\n}\n```\n\n### GREEN: Add the half_life_decay function (3 lines of math).\n### VERIFY: `cargo test -p lore -- test_half_life_decay_math test_score_monotonicity`\n\n## Acceptance Criteria\n- [ ] test_half_life_decay_math passes (4 boundary cases + div-by-zero guard)\n- [ ] test_score_monotonicity_by_age passes (50 random pairs, deterministic seed)\n- [ ] Function is `fn` not `pub fn` (module-private)\n- [ ] No DB dependency — pure function\n\n## Files\n- src/cli/commands/who.rs (function near top, tests in test module)\n\n## Edge Cases\n- Negative elapsed_ms: clamped to 0 via .max(0.0) -> returns 1.0\n- half_life_days = 0: returns 0.0, not NaN/Inf\n- Very large elapsed (10 years): returns very small positive f64, never negative","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T16:59:22.913281Z","created_by":"tayloreernisse","updated_at":"2026-02-12T21:07:16.929095Z","closed_at":"2026-02-12T21:07:16.928983Z","close_reason":"Completed: added half_life_decay(elapsed_ms, half_life_days) -> f64 pure function with div-by-zero guard, negative elapsed clamping, and 2 tests (boundary math + monotonicity property). All 585 tests pass.","compaction_level":0,"original_size":0,"labels":["scoring"]} @@ -76,17 +83,18 @@ {"id":"bd-1ta","title":"[CP1] Integration tests for pagination","description":"Integration tests for GitLab pagination with wiremock.\n\n## Tests (tests/pagination_tests.rs)\n\n### Page Navigation\n- fetches_all_pages_when_multiple_exist\n- respects_per_page_parameter\n- follows_x_next_page_header_until_empty\n- falls_back_to_empty_page_stop_if_headers_missing\n\n### Cursor Behavior\n- applies_cursor_rewind_for_tuple_semantics\n- clamps_negative_rewind_to_zero\n\n## Test Setup\n- Use wiremock::MockServer\n- Set up handlers for /api/v4/projects/:id/issues\n- Return x-next-page headers\n- Verify request params (updated_after, per_page)\n\nFiles: tests/pagination_tests.rs\nDone when: All pagination tests pass with mocked server","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:59:07.806593Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:02.038945Z","closed_at":"2026-01-25T17:02:02.038945Z","deleted_at":"2026-01-25T17:02:02.038939Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-1u1","title":"Implement document regenerator","description":"## Background\nThe document regenerator drains the dirty_sources queue, regenerating documents for each entry. It uses per-item transactions for crash safety, a triple-hash fast path to skip unchanged documents entirely (no writes at all), and a bounded batch loop that drains completely. Error recording includes backoff computation.\n\n## Approach\nCreate `src/documents/regenerator.rs` per PRD Section 6.3.\n\n**Core function:**\n```rust\npub fn regenerate_dirty_documents(conn: &Connection) -> Result\n```\n\n**RegenerateResult:** { regenerated, unchanged, errored }\n\n**Algorithm (per PRD):**\n1. Loop: get_dirty_sources(conn) -> Vec<(SourceType, i64)>\n2. If empty, break (queue fully drained)\n3. For each (source_type, source_id):\n a. Begin transaction\n b. Call regenerate_one_tx(&tx, source_type, source_id) -> Result\n c. If Ok(changed): clear_dirty_tx, commit, count regenerated or unchanged\n d. If Err: record_dirty_error_tx (with backoff), commit, count errored\n\n**regenerate_one_tx (per PRD):**\n1. Extract document via extract_{type}_document(conn, source_id)\n2. If None (deleted): delete_document, return Ok(true)\n3. If Some(doc): call get_existing_hash() to check current state\n4. **If ALL THREE hashes match: return Ok(false) — skip ALL writes** (fast path)\n5. Otherwise: upsert_document with conditional label/path relinking\n6. Return Ok(content changed)\n\n**Helper functions (PRD-exact):**\n\n`get_existing_hash` — uses `optional()` to distinguish missing rows from DB errors:\n```rust\nfn get_existing_hash(\n conn: &Connection,\n source_type: SourceType,\n source_id: i64,\n) -> Result> {\n use rusqlite::OptionalExtension;\n let hash: Option = stmt\n .query_row(params, |row| row.get(0))\n .optional()?; // IMPORTANT: Not .ok() — .ok() would hide real DB errors\n Ok(hash)\n}\n```\n\n`get_document_id` — resolve document ID after upsert:\n```rust\nfn get_document_id(conn: &Connection, source_type: SourceType, source_id: i64) -> Result\n```\n\n`upsert_document` — checks existing triple hash before writing:\n```rust\nfn upsert_document(conn: &Connection, doc: &DocumentData) -> Result<()> {\n // 1. Query existing (id, content_hash, labels_hash, paths_hash) via OptionalExtension\n // 2. Triple-hash fast path: all match -> return Ok(())\n // 3. Upsert document row (ON CONFLICT DO UPDATE)\n // 4. Get doc_id (from existing or query after insert)\n // 5. Only delete+reinsert labels if labels_hash changed\n // 6. Only delete+reinsert paths if paths_hash changed\n}\n```\n\n**Key PRD detail — triple-hash fast path:**\n```rust\nif old_content_hash == &doc.content_hash\n && old_labels_hash == &doc.labels_hash\n && old_paths_hash == &doc.paths_hash\n{ return Ok(()); } // Skip ALL writes — prevents WAL churn\n```\n\n**Error recording with backoff:**\nrecord_dirty_error_tx reads current attempt_count from DB, computes next_attempt_at via shared backoff utility:\n```rust\nlet next_attempt_at = crate::core::backoff::compute_next_attempt_at(now, attempt_count + 1);\n```\n\n**All internal functions use _tx suffix** (take &Transaction) for atomicity.\n\n## Acceptance Criteria\n- [ ] Queue fully drained (bounded batch loop until empty)\n- [ ] Per-item transactions (crash loses at most 1 doc)\n- [ ] Triple-hash fast path: ALL THREE hashes match -> skip ALL writes (return Ok(false))\n- [ ] Content change: upsert document, update labels/paths\n- [ ] Labels-only change: relabels but skips path writes (paths_hash unchanged)\n- [ ] Deleted entity: delete document (cascade handles FTS/labels/paths/embeddings)\n- [ ] get_existing_hash uses `.optional()` (not `.ok()`) to preserve DB errors\n- [ ] get_document_id resolves document ID after upsert\n- [ ] Error recording: increment attempt_count, compute next_attempt_at via backoff\n- [ ] FTS triggers fire on insert/update/delete (verified by trigger, not regenerator)\n- [ ] RegenerateResult counts accurate (regenerated, unchanged, errored)\n- [ ] Errors do not abort batch (log, increment, continue)\n- [ ] `cargo test regenerator` passes\n\n## Files\n- `src/documents/regenerator.rs` — new file\n- `src/documents/mod.rs` — add `pub use regenerator::regenerate_dirty_documents;`\n\n## TDD Loop\nRED: Tests requiring DB:\n- `test_creates_new_document` — dirty source -> document created\n- `test_skips_unchanged_triple_hash` — all 3 hashes match -> unchanged count incremented, no DB writes\n- `test_updates_changed_content` — content_hash mismatch -> updated\n- `test_updates_changed_labels_only` — content same but labels_hash different -> updated\n- `test_updates_changed_paths_only` — content same but paths_hash different -> updated\n- `test_deletes_missing_source` — source deleted -> document deleted\n- `test_drains_queue` — queue empty after regeneration\n- `test_error_records_backoff` — error -> attempt_count incremented, next_attempt_at set\n- `test_get_existing_hash_not_found` — returns Ok(None) for missing document\nGREEN: Implement regenerate_dirty_documents + all helpers\nVERIFY: `cargo test regenerator`\n\n## Edge Cases\n- Empty queue: return immediately with all-zero counts\n- Extractor error for one item: record_dirty_error_tx, commit, continue\n- Triple-hash prevents WAL churn on incremental syncs (most entities unchanged)\n- Labels change but content does not: labels_hash mismatch triggers upsert with label relinking\n- get_existing_hash on missing document: returns Ok(None) via .optional() (not DB error)\n- get_existing_hash on corrupt DB: propagates real DB error (not masked by .ok())","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:25:55.178825Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:41:29.942386Z","closed_at":"2026-01-30T17:41:29.942324Z","close_reason":"Implemented document regenerator with triple-hash fast path, queue draining, fail-soft error handling + 5 tests","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1u1","depends_on_id":"bd-1yz","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1u1","depends_on_id":"bd-247","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1u1","depends_on_id":"bd-2fp","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1uc","title":"Implement DB upsert functions for resource events","description":"## Background\nNeed to store fetched resource events into the three event tables created by migration 011. The existing DB pattern uses rusqlite prepared statements with named parameters. Timestamps from GitLab are ISO 8601 strings that need conversion to ms epoch UTC (matching the existing time.rs parse_datetime_to_ms function).\n\n## Approach\nCreate src/core/events_db.rs (new module) with three upsert functions:\n\n```rust\nuse rusqlite::Connection;\nuse super::error::Result;\n\n/// Upsert state events for an entity.\n/// Uses INSERT OR REPLACE keyed on UNIQUE(gitlab_id, project_id).\npub fn upsert_state_events(\n conn: &Connection,\n project_id: i64, // local DB project id\n entity_type: &str, // \"issue\" | \"merge_request\"\n entity_local_id: i64, // local DB id of the issue/MR\n events: &[GitLabStateEvent],\n) -> Result\n\n/// Upsert label events for an entity.\npub fn upsert_label_events(\n conn: &Connection,\n project_id: i64,\n entity_type: &str,\n entity_local_id: i64,\n events: &[GitLabLabelEvent],\n) -> Result\n\n/// Upsert milestone events for an entity.\npub fn upsert_milestone_events(\n conn: &Connection,\n project_id: i64,\n entity_type: &str,\n entity_local_id: i64,\n events: &[GitLabMilestoneEvent],\n) -> Result\n```\n\nEach function:\n1. Prepares INSERT OR REPLACE statement\n2. For each event, maps GitLab types to DB columns:\n - `actor_gitlab_id` = event.user.map(|u| u.id)\n - `actor_username` = event.user.map(|u| u.username.clone())\n - `created_at` = parse_datetime_to_ms(&event.created_at)?\n - Set issue_id or merge_request_id based on entity_type\n3. Returns count of upserted rows\n4. Wraps in a savepoint for atomicity per entity\n\nRegister module in src/core/mod.rs:\n```rust\npub mod events_db;\n```\n\n## Acceptance Criteria\n- [ ] All three upsert functions compile and handle all event fields\n- [ ] Upserts are idempotent (re-inserting same event doesn't duplicate)\n- [ ] Timestamps converted to ms epoch UTC via parse_datetime_to_ms\n- [ ] actor_gitlab_id and actor_username populated from event.user (handles None)\n- [ ] entity_type correctly maps to issue_id/merge_request_id (other is NULL)\n- [ ] source_merge_request_id populated for state events (iid from source_merge_request)\n- [ ] source_commit populated for state events\n- [ ] label_name populated for label events\n- [ ] milestone_title and milestone_id populated for milestone events\n- [ ] Returns upserted count\n\n## Files\n- src/core/events_db.rs (new)\n- src/core/mod.rs (add `pub mod events_db;`)\n\n## TDD Loop\nRED: tests/events_db_tests.rs (new):\n- `test_upsert_state_events_basic` - insert 3 events, verify count and data\n- `test_upsert_state_events_idempotent` - insert same events twice, verify no duplicates\n- `test_upsert_label_events_with_actor` - verify actor fields populated\n- `test_upsert_milestone_events_null_user` - verify user: null doesn't crash\n- `test_upsert_state_events_entity_exclusivity` - verify only one of issue_id/merge_request_id set\n\nSetup: create_test_db() helper that applies migrations 001-011, inserts a test project + issue + MR.\n\nGREEN: Implement the three functions\n\nVERIFY: `cargo test events_db -- --nocapture`\n\n## Edge Cases\n- parse_datetime_to_ms must handle GitLab's format: \"2024-03-15T10:30:00.000Z\" and \"2024-03-15T10:30:00.000+00:00\"\n- INSERT OR REPLACE will fire CASCADE deletes if there are FK references to these rows — currently no other table references event rows, so this is safe\n- entity_type must be validated (\"issue\" or \"merge_request\") — panic or error on invalid\n- source_merge_request field contains an MR ref object, not an ID — extract .iid for DB column","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:31:57.242549Z","created_by":"tayloreernisse","updated_at":"2026-02-03T16:19:14.169437Z","closed_at":"2026-02-03T16:19:14.169233Z","close_reason":"Implemented upsert_state_events, upsert_label_events, upsert_milestone_events, count_events in src/core/events_db.rs. Uses savepoints for atomicity, LoreError::Database via ? operator for clean error handling.","compaction_level":0,"original_size":0,"labels":["db","gate-1","phase-b"],"dependencies":[{"issue_id":"bd-1uc","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1uc","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-1up1","title":"Implement File History screen (per-file MR timeline with rename tracking)","description":"## Background\nThe File History screen shows which MRs touched a file over time, with rename-aware tracking and optional DiffNote discussion snippets. It wraps run_file_history() from src/cli/commands/file_history.rs (added in v0.8.1) in a TUI view. While Trace answers \"why was this code introduced?\", File History answers \"what happened to this file?\" — a chronological MR timeline.\n\nThe core query resolves rename chains via BFS (resolve_rename_chain from src/core/file_history.rs), finds all MRs with mr_file_changes entries matching any renamed path, and optionally fetches DiffNote discussions on those paths.\n\n## Data Shapes (from src/cli/commands/file_history.rs)\n\n```rust\npub struct FileHistoryResult {\n pub path: String,\n pub rename_chain: Vec, // resolved paths via BFS\n pub renames_followed: bool,\n pub merge_requests: Vec,\n pub discussions: Vec,\n pub total_mrs: usize,\n pub paths_searched: usize,\n}\n\npub struct FileHistoryMr {\n pub iid: i64,\n pub title: String,\n pub state: String, // merged/opened/closed\n pub author_username: String,\n pub change_type: String, // added/modified/deleted/renamed\n pub merged_at_iso: Option,\n pub updated_at_iso: String,\n pub merge_commit_sha: Option,\n pub web_url: Option,\n}\n\npub struct FileDiscussion {\n pub discussion_id: String,\n pub author_username: String,\n pub body_snippet: String,\n pub path: String,\n pub created_at_iso: String,\n}\n```\n\nrun_file_history() signature (src/cli/commands/file_history.rs):\n```rust\npub fn run_file_history(\n config: &Config, // used only for DB path — bd-1f5b will extract query-only version\n path: &str,\n project: Option<&str>,\n no_follow_renames: bool,\n merged_only: bool,\n include_discussions: bool,\n limit: usize,\n) -> Result\n```\n\nAfter bd-1f5b extracts the query logic, the TUI will call a Connection-based variant:\n```rust\npub fn query_file_history(\n conn: &Connection,\n project_id: Option,\n path: &str,\n follow_renames: bool,\n merged_only: bool,\n include_discussions: bool,\n limit: usize,\n) -> Result\n```\n\n## Approach\n\n**Screen enum** (message.rs):\nAdd Screen::FileHistory variant (no parameters). Label: \"File History\". Breadcrumb: \"File History\".\n\n**Path autocomplete**: Same mechanism as Trace screen — query DISTINCT new_path from mr_file_changes. Share the known_paths cache with Trace if both are loaded, or each screen maintains its own (simpler).\n\n**State** (state/file_history.rs):\n```rust\n#[derive(Debug, Default)]\npub struct FileHistoryState {\n pub path_input: String,\n pub path_focused: bool,\n pub result: Option,\n pub selected_mr_index: usize,\n pub follow_renames: bool, // default true\n pub merged_only: bool, // default false\n pub show_discussions: bool, // default false\n pub scroll_offset: u16,\n pub known_paths: Vec, // autocomplete cache\n pub autocomplete_matches: Vec,\n pub autocomplete_index: usize,\n}\n```\n\n**Action** (action.rs):\n- fetch_file_history(conn, project_id, path, follow_renames, merged_only, show_discussions, limit) -> Result: calls query_file_history from file_history module (after bd-1f5b extraction)\n- fetch_known_paths(conn, project_id): shared with Trace screen (same query)\n\n**View** (view/file_history.rs):\n- Top: path input with autocomplete dropdown + toggle indicators [renames: on] [merged: off] [discussions: off]\n- If renames followed: rename chain breadcrumb (path_a -> path_b -> path_c) in dimmed text\n- Summary line: \"N merge requests across M paths\"\n- Main area: chronological MR list (sorted by updated_at descending):\n - Each row: MR state icon + !iid + title + @author + change_type tag + date\n - If show_discussions: inline discussion snippets beneath relevant MRs (indented, dimmed, author + date + body_snippet)\n- Footer: \"showing N of M\" when total_mrs > limit\n- Keyboard:\n - j/k: scroll MR list\n - Enter: navigate to MrDetail(EntityKey::mr(project_id, iid))\n - /: focus path input\n - Tab: cycle autocomplete suggestions when path focused\n - r: toggle follow_renames (re-fetches)\n - m: toggle merged_only (re-fetches)\n - d: toggle show_discussions (re-fetches)\n - q: back\n\n**Contextual entry points** (wired from other screens):\n- MR Detail: h on a file path opens File History pre-filled with that path\n- Expert mode (Who screen): when viewing a file path's experts, h opens File History for that path\n- Requires other screens to expose selected_file_path() -> Option\n\n## Acceptance Criteria\n- [ ] Screen::FileHistory added to message.rs Screen enum with label and breadcrumb\n- [ ] FileHistoryState struct with all fields, Default impl\n- [ ] Path input with autocomplete dropdown from mr_file_changes (same mechanism as Trace)\n- [ ] Rename chain displayed as breadcrumb when renames_followed is true\n- [ ] Chronological MR list with state icons (merged/opened/closed) and change_type tags\n- [ ] Enter on MR navigates to MrDetail(EntityKey::mr(project_id, iid))\n- [ ] r toggles follow_renames, m toggles merged_only, d toggles show_discussions — all re-fetch\n- [ ] Discussion snippets shown inline beneath MRs when toggled on\n- [ ] Summary line showing \"N merge requests across M paths\"\n- [ ] Footer truncation indicator when total_mrs > display limit\n- [ ] Empty state: \"No MRs found for this file\" with hint \"Run 'lore sync --fetch-mr-file-changes' to populate\"\n- [ ] Contextual navigation: h on file path in MR Detail opens File History pre-filled\n- [ ] Registered in command palette (label \"File History\", keywords [\"history\", \"file\", \"changes\"])\n- [ ] AppState.has_text_focus() updated to include file_history.path_focused\n- [ ] AppState.blur_text_focus() updated to include file_history.path_focused = false\n\n## Files\n- MODIFY: crates/lore-tui/src/message.rs (add Screen::FileHistory variant + label)\n- CREATE: crates/lore-tui/src/state/file_history.rs (FileHistoryState struct + Default)\n- MODIFY: crates/lore-tui/src/state/mod.rs (pub mod file_history, pub use FileHistoryState, add to AppState, update has_text_focus/blur_text_focus)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_file_history, share fetch_known_paths with Trace)\n- CREATE: crates/lore-tui/src/view/file_history.rs (render_file_history fn)\n- MODIFY: crates/lore-tui/src/view/mod.rs (add Screen::FileHistory dispatch arm)\n\n## TDD Anchor\nRED: Write test_fetch_file_history_returns_mrs in action tests. Setup: in-memory DB, insert project, MR (state=\"merged\", merged_at set), mr_file_changes row (new_path=\"src/lib.rs\", change_type=\"modified\"). Call fetch_file_history(conn, Some(project_id), \"src/lib.rs\", true, false, false, 50). Assert: result.merge_requests.len() == 1, result.merge_requests[0].iid matches.\nGREEN: Implement fetch_file_history calling query_file_history.\nVERIFY: cargo test -p lore-tui file_history -- --nocapture\n\nAdditional tests:\n- test_file_history_empty: path \"nonexistent.rs\" returns empty merge_requests\n- test_file_history_rename_chain: insert rename A->B, query A, assert rename_chain=[\"A\",\"B\"] and MRs touching B are included\n- test_file_history_merged_only: merged_only=true excludes opened/closed MRs\n- test_file_history_discussions: show_discussions=true populates discussions vec with DiffNote snippets\n- test_file_history_limit: insert 10 MRs, limit=5, assert merge_requests.len()==5 and total_mrs==10\n- test_autocomplete: shared with Trace tests\n\n## Edge Cases\n- File never modified by any MR: empty state with helpful message and sync hint\n- Rename chain with cycles: BFS visited set in resolve_rename_chain prevents infinite loop\n- Very long file paths: truncate from left in list view (...path/to/file.rs)\n- Hundreds of MRs for a single file: default limit 50, footer shows total count\n- Discussion body_snippet may contain markdown/code — render as plain text, no parsing\n- No mr_file_changes data at all: hint that sync needs --fetch-mr-file-changes (config.sync.fetch_mr_file_changes)\n- Project scope: if global_scope.project_id is set, pass it to query and autocomplete\n\n## Dependency Context\n- bd-1f5b (blocks): Extracts query_file_history(conn, ...) from run_file_history(config, ...) in src/cli/commands/file_history.rs. The current function opens its own DB connection from Config — TUI needs a Connection-based variant since it manages its own connection.\n- src/core/file_history.rs: resolve_rename_chain() used by query_file_history internally. TUI does not call it directly.\n- FileHistoryResult, FileHistoryMr, FileDiscussion: currently defined in src/cli/commands/file_history.rs — bd-1f5b should move these to core or make them importable.\n- Navigation: uses NavigationStack.push(Screen::MrDetail(key)) from crates/lore-tui/src/navigation.rs.\n- AppState composition: FileHistoryState added as field in AppState (state/mod.rs ~line 154-174). has_text_focus/blur_text_focus at lines 194-207 must include file_history.path_focused.\n- Autocomplete: fetch_known_paths query identical to Trace screen — consider extracting to shared helper in action.rs.\n- Contextual entry: requires MrDetailState to expose selected file path. Deferred if MR Detail not yet built.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-18T18:14:13.179338Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:34:24.563746Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1up1","depends_on_id":"bd-1f5b","type":"blocks","created_at":"2026-02-18T18:14:33.412864Z","created_by":"tayloreernisse"},{"issue_id":"bd-1up1","depends_on_id":"bd-nwux","type":"parent-child","created_at":"2026-02-18T18:14:13.180816Z","created_by":"tayloreernisse"}]} {"id":"bd-1ut","title":"[CP0] Final validation - tests, lint, typecheck","description":"## Background\n\nFinal validation ensures everything works together before marking CP0 complete. This is the integration gate - all unit tests, integration tests, lint, and type checking must pass. Manual smoke tests verify the full user experience.\n\nReference: docs/prd/checkpoint-0.md sections \"Definition of Done\", \"Manual Smoke Tests\"\n\n## Approach\n\n**Automated checks:**\n```bash\n# All tests pass\nnpm run test\n\n# TypeScript strict mode\nnpm run build # or: npx tsc --noEmit\n\n# ESLint with no errors\nnpm run lint\n```\n\n**Manual smoke tests (from PRD table):**\n\n| Command | Expected | Pass Criteria |\n|---------|----------|---------------|\n| `gi --help` | Command list | Shows all commands |\n| `gi version` | Version number | Shows installed version |\n| `gi init` | Interactive prompts | Creates valid config |\n| `gi init` (config exists) | Confirmation prompt | Warns before overwriting |\n| `gi init --force` | No prompt | Overwrites without asking |\n| `gi auth-test` | `Authenticated as @username` | Shows GitLab username |\n| `GITLAB_TOKEN=invalid gi auth-test` | Error message | Non-zero exit, clear error |\n| `gi doctor` | Status table | All required checks pass |\n| `gi doctor --json` | JSON object | Valid JSON, `success: true` |\n| `gi backup` | Backup path | Creates timestamped backup |\n| `gi sync-status` | No runs message | Stub output works |\n\n**Definition of Done gate items:**\n- [ ] `gi init` writes config to XDG path and validates projects against GitLab\n- [ ] `gi auth-test` succeeds with real PAT\n- [ ] `gi doctor` reports DB ok + GitLab ok\n- [ ] DB migrations apply; WAL + FK enabled; busy_timeout + synchronous set\n- [ ] App lock mechanism works (concurrent runs blocked)\n- [ ] All unit tests pass\n- [ ] All integration tests pass (mocked)\n- [ ] ESLint passes with no errors\n- [ ] TypeScript compiles with strict mode\n\n## Acceptance Criteria\n\n- [ ] `npm run test` exits 0 (all tests pass)\n- [ ] `npm run build` exits 0 (TypeScript compiles)\n- [ ] `npm run lint` exits 0 (no ESLint errors)\n- [ ] All 11 manual smoke tests pass\n- [ ] All 9 Definition of Done gate items verified\n\n## Files\n\nNo new files created. This bead verifies existing work.\n\n## TDD Loop\n\nThis IS the final verification step:\n\n```bash\n# Automated\nnpm run test\nnpm run build\nnpm run lint\n\n# Manual (requires GITLAB_TOKEN set with valid token)\ngi --help\ngi version\ngi init # go through setup\ngi auth-test\ngi doctor\ngi doctor --json | jq .success # should output true\ngi backup\ngi sync-status\ngi reset --confirm\ngi init # re-setup\n```\n\n## Edge Cases\n\n- Test coverage should be reasonable (aim for 80%+ on core modules)\n- Integration tests may flake on CI - check MSW setup\n- Manual tests require real GitLab token - document in README\n- ESLint may warn vs error - only errors block\n- TypeScript noImplicitAny catches missed types","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:52.078907Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:37:51.858558Z","closed_at":"2026-01-25T03:37:51.858474Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1ut","depends_on_id":"bd-1cb","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ut","depends_on_id":"bd-1gu","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ut","depends_on_id":"bd-1kh","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ut","depends_on_id":"bd-38e","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1ut","depends_on_id":"bd-3kj","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1v8","title":"Update robot-docs manifest with Phase B commands","description":"## Background\n\nThe robot-docs manifest is the agent self-discovery mechanism. It must include all Phase B commands so agents can discover temporal intelligence features.\n\n## Codebase Context\n\n- handle_robot_docs() in src/main.rs (line ~1646) returns JSON with commands, exit_codes, workflows, aliases, clap_error_codes\n- Currently 18 commands documented in the manifest\n- VALID_COMMANDS array in src/main.rs (line ~448): [\"issues\", \"mrs\", \"search\", \"sync\", \"ingest\", \"count\", \"status\", \"auth\", \"doctor\", \"version\", \"init\", \"stats\", \"generate-docs\", \"embed\", \"migrate\", \"health\", \"robot-docs\", \"completions\"]\n- Phase B adds 3 new commands: timeline, file-history, trace\n- count gains new entity: \"references\" (bd-2ez)\n- Existing workflows: first_setup, daily_sync, search, pre_flight\n\n## Approach\n\n### 1. Add commands to handle_robot_docs() JSON:\n\n```json\n\"timeline\": {\n \"description\": \"Chronological timeline of events matching a keyword query\",\n \"flags\": [\"\", \"-p \", \"--since \", \"--depth \", \"--expand-mentions\", \"-n \"],\n \"example\": \"lore --robot timeline 'authentication' --since 30d\"\n},\n\"file-history\": {\n \"description\": \"Which MRs touched a file, with rename chain resolution\",\n \"flags\": [\"\", \"-p \", \"--discussions\", \"--no-follow-renames\", \"--merged\", \"-n \"],\n \"example\": \"lore --robot file-history src/auth/oauth.rs\"\n},\n\"trace\": {\n \"description\": \"Trace file -> MR -> issue -> discussions decision chain\",\n \"flags\": [\"\", \"-p \", \"--discussions\", \"--no-follow-renames\", \"-n \"],\n \"example\": \"lore --robot trace src/auth/oauth.rs\"\n}\n```\n\n### 2. Update count command to mention \"references\" entity\n\n### 3. Add temporal_intelligence workflow:\n```json\n\"temporal_intelligence\": {\n \"description\": \"Query temporal data about project history\",\n \"steps\": [\n \"lore sync (ensure events fetched with fetchResourceEvents=true)\",\n \"lore timeline '' for chronological event history\",\n \"lore file-history for file-level MR history\",\n \"lore trace for file -> MR -> issue -> discussion chain\"\n ]\n}\n```\n\n### 4. Add timeline, file-history, trace to VALID_COMMANDS array\n\n## Acceptance Criteria\n\n- [ ] robot-docs includes timeline, file-history, trace commands\n- [ ] count references documented\n- [ ] temporal_intelligence workflow present\n- [ ] VALID_COMMANDS includes all 3 new commands\n- [ ] Examples are valid, runnable commands\n- [ ] cargo check --all-targets passes\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n\n- src/main.rs (update handle_robot_docs + VALID_COMMANDS array)\n\n## TDD Loop\n\nVERIFY: lore robot-docs | jq '.data.commands.timeline'\nVERIFY: lore robot-docs | jq '.data.workflows.temporal_intelligence'","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-02T22:43:07.859092Z","created_by":"tayloreernisse","updated_at":"2026-02-05T20:17:38.827205Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1v8","depends_on_id":"bd-1ht","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1v8","depends_on_id":"bd-2ez","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1v8","depends_on_id":"bd-2n4","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1v8t","title":"Add WorkItemStatus type and SyncConfig toggle","description":"## Background\nThe GraphQL status response returns name, category, color, and iconName fields. We need a Rust struct that deserializes this directly. Category is stored as raw Option (not an enum) because GitLab 18.5+ supports custom statuses with arbitrary category values. We also need a config toggle so users can disable status enrichment.\n\n## Approach\nAdd WorkItemStatus to the existing types module. Add fetch_work_item_status to the existing SyncConfig with default_true() helper. Also add WorkItemStatus to pub use re-exports in src/gitlab/mod.rs.\n\n## Files\n- src/gitlab/types.rs (add struct after GitLabMergeRequest, before #[cfg(test)])\n- src/core/config.rs (add field to SyncConfig struct + Default impl)\n- src/gitlab/mod.rs (add WorkItemStatus to pub use)\n\n## Implementation\n\nIn src/gitlab/types.rs (needs Serialize, Deserialize derives already in scope):\n #[derive(Debug, Clone, Serialize, Deserialize)]\n pub struct WorkItemStatus {\n pub name: String,\n pub category: Option,\n pub color: Option,\n #[serde(rename = \"iconName\")]\n pub icon_name: Option,\n }\n\nIn src/core/config.rs SyncConfig struct (after fetch_mr_file_changes):\n #[serde(rename = \"fetchWorkItemStatus\", default = \"default_true\")]\n pub fetch_work_item_status: bool,\n\nIn impl Default for SyncConfig (after fetch_mr_file_changes: true):\n fetch_work_item_status: true,\n\n## Acceptance Criteria\n- [ ] WorkItemStatus deserializes: {\"name\":\"In progress\",\"category\":\"IN_PROGRESS\",\"color\":\"#1f75cb\",\"iconName\":\"status-in-progress\"}\n- [ ] Optional fields: {\"name\":\"To do\"} -> category/color/icon_name are None\n- [ ] Unknown category: {\"name\":\"Custom\",\"category\":\"SOME_FUTURE_VALUE\"} -> Ok\n- [ ] Null category: {\"name\":\"In progress\",\"category\":null} -> None\n- [ ] SyncConfig::default().fetch_work_item_status == true\n- [ ] JSON without fetchWorkItemStatus key -> defaults true\n- [ ] cargo check --all-targets passes\n\n## TDD Loop\nRED: test_work_item_status_deserialize, test_work_item_status_optional_fields, test_work_item_status_unknown_category, test_work_item_status_null_category, test_config_fetch_work_item_status_default_true, test_config_deserialize_without_key\nGREEN: Add struct + config field\nVERIFY: cargo test test_work_item_status && cargo test test_config\n\n## Edge Cases\n- serde rename \"iconName\" -> icon_name (camelCase in GraphQL)\n- Category is Option, NOT an enum\n- Config key is camelCase \"fetchWorkItemStatus\" matching existing convention","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:41:42.790001Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.416990Z","closed_at":"2026-02-11T07:21:33.416950Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1v8t","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1v9m","title":"Implement AppState composition + LoadState + ScreenIntent","description":"## Background\nAppState is the top-level state composition — each field corresponds to one screen. State is preserved when navigating away (never cleared on pop). LoadState enables stale-while-revalidate: screens show last data during refresh with a spinner. ScreenIntent is the pure return type from state handlers — they never launch async tasks directly.\n\n## Approach\nCreate crates/lore-tui/src/state/mod.rs:\n- AppState struct: dashboard (DashboardState), issue_list (IssueListState), issue_detail (IssueDetailState), mr_list (MrListState), mr_detail (MrDetailState), search (SearchState), timeline (TimelineState), who (WhoState), sync (SyncState), command_palette (CommandPaletteState), global_scope (ScopeContext), load_state (ScreenLoadStateMap), error_toast (Option), show_help (bool), terminal_size ((u16, u16))\n- LoadState enum: Idle, LoadingInitial, Refreshing, Error(String)\n- ScreenLoadStateMap: wraps HashMap, get()/set()/any_loading()\n- AppState methods: set_loading(), set_error(), clear_error(), has_text_focus(), blur_text_focus(), delegate_text_event(), interpret_screen_key(), handle_screen_msg()\n- ScreenIntent enum: None, Navigate(Screen), RequeryNeeded(Screen)\n- handle_screen_msg() matches Msg variants and returns ScreenIntent (NEVER Cmd::task)\n\nCreate stub per-screen state files (just Default-derivable structs):\n- state/dashboard.rs, issue_list.rs, issue_detail.rs, mr_list.rs, mr_detail.rs, search.rs, timeline.rs, who.rs, sync.rs, command_palette.rs\n\n## Acceptance Criteria\n- [ ] AppState derives Default and compiles with all screen state fields\n- [ ] LoadState has Idle, LoadingInitial, Refreshing, Error variants\n- [ ] ScreenLoadStateMap::get() returns Idle for untracked screens\n- [ ] ScreenLoadStateMap::any_loading() returns true when any screen is loading\n- [ ] has_text_focus() checks all filter/query focused flags\n- [ ] blur_text_focus() resets all focus flags\n- [ ] handle_screen_msg() returns ScreenIntent, never Cmd::task\n- [ ] ScreenIntent::RequeryNeeded signals that LoreApp should dispatch supervised query\n\n## Files\n- CREATE: crates/lore-tui/src/state/mod.rs\n- CREATE: crates/lore-tui/src/state/dashboard.rs (stub)\n- CREATE: crates/lore-tui/src/state/issue_list.rs (stub)\n- CREATE: crates/lore-tui/src/state/issue_detail.rs (stub)\n- CREATE: crates/lore-tui/src/state/mr_list.rs (stub)\n- CREATE: crates/lore-tui/src/state/mr_detail.rs (stub)\n- CREATE: crates/lore-tui/src/state/search.rs (stub)\n- CREATE: crates/lore-tui/src/state/timeline.rs (stub)\n- CREATE: crates/lore-tui/src/state/who.rs (stub)\n- CREATE: crates/lore-tui/src/state/sync.rs (stub)\n- CREATE: crates/lore-tui/src/state/command_palette.rs (stub)\n\n## TDD Anchor\nRED: Write test_load_state_default_idle that creates ScreenLoadStateMap, asserts get(&Screen::Dashboard) returns Idle.\nGREEN: Implement ScreenLoadStateMap with HashMap defaulting to Idle.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_load_state\n\n## Edge Cases\n- LoadState::set() removes Idle entries from the map to prevent unbounded growth\n- Screen::IssueDetail(key) comparison for HashMap: requires Screen to impl Hash+Eq or use ScreenKind discriminant\n- has_text_focus() must be kept in sync as new screens add text inputs","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:56:42.023482Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:35:46.811462Z","closed_at":"2026-02-12T20:35:46.811406Z","close_reason":"Implemented state/ module: AppState (11 screen fields + cross-cutting), LoadState (4 variants), ScreenLoadStateMap (auto-prune Idle), ScreenIntent (None/Navigate/RequeryNeeded), ScopeContext, 10 per-screen state stubs. 12 tests. Quality gate green (114 total).","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1v9m","depends_on_id":"bd-c9gk","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-1vti","title":"Write decay and scoring example-based tests (TDD)","description":"## Background\nAll implementation beads (bd-1soz through bd-11mg) include their own inline TDD tests. This bead is the integration verification: run the full test suite and confirm everything works together with no regressions.\n\n## Approach\nRun cargo test and verify:\n1. All NEW tests pass (31 tests across implementation beads)\n2. All EXISTING tests pass unchanged (existing who tests, config tests, etc.)\n3. No test interference (--test-threads=1 mode)\n4. All tests in who.rs test module compile and run cleanly\n\nTest count by bead:\n- bd-1soz: 2 (test_half_life_decay_math, test_score_monotonicity_by_age)\n- bd-2w1p: 3 (test_config_validation_rejects_zero_half_life, _absurd_half_life, _nan_multiplier)\n- bd-18dn: 2 (test_path_normalization_handles_dot_and_double_slash, _preserves_prefix_semantics)\n- bd-1hoq: 1 (test_expert_sql_returns_expected_signal_rows)\n- bd-1h3f: 2 (test_old_path_probe_exact_and_prefix, test_suffix_probe_uses_old_path_sources)\n- bd-13q8: 13 (decay integration + invariant tests)\n- bd-11mg: 8 (CLI flag tests: explain_score, as_of, excluded_usernames, etc.)\nTotal: 2+3+2+1+2+13+8 = 31 new tests\n\nThis is NOT a code-writing bead — it is a verification checkpoint.\n\n## Acceptance Criteria\n- [ ] cargo test -p lore passes (all tests green)\n- [ ] cargo test -p lore -- --test-threads=1 passes (no test interference)\n- [ ] No existing test assertions were changed (only callsite signatures updated in bd-13q8 and ScoringConfig literals in bd-1b50)\n- [ ] Total test count: existing + 31 new = all pass\n\n## TDD Loop\nN/A — this bead verifies, does not write code.\nVERIFY: cargo test -p lore\n\n## Files\nNone modified — read-only verification.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-09T17:00:29.453420Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:45:44.798773Z","compaction_level":0,"original_size":0,"labels":["scoring","test"],"dependencies":[{"issue_id":"bd-1vti","depends_on_id":"bd-11mg","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"},{"issue_id":"bd-1vti","depends_on_id":"bd-18dn","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"},{"issue_id":"bd-1vti","depends_on_id":"bd-1b50","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"},{"issue_id":"bd-1vti","depends_on_id":"bd-1h3f","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"},{"issue_id":"bd-1vti","depends_on_id":"bd-1soz","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"},{"issue_id":"bd-1vti","depends_on_id":"bd-2w1p","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"},{"issue_id":"bd-1vti","depends_on_id":"bd-2yu5","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"}]} +{"id":"bd-1vti","title":"Write decay and scoring example-based tests (TDD)","description":"## Background\nAll implementation beads (bd-1soz through bd-11mg) include their own inline TDD tests. This bead is the integration verification: run the full test suite and confirm everything works together with no regressions.\n\n## Approach\nRun cargo test and verify:\n1. All NEW tests pass (31 tests across implementation beads)\n2. All EXISTING tests pass unchanged (existing who tests, config tests, etc.)\n3. No test interference (--test-threads=1 mode)\n4. All tests in who.rs test module compile and run cleanly\n\nTest count by bead:\n- bd-1soz: 2 (test_half_life_decay_math, test_score_monotonicity_by_age)\n- bd-2w1p: 3 (test_config_validation_rejects_zero_half_life, _absurd_half_life, _nan_multiplier)\n- bd-18dn: 2 (test_path_normalization_handles_dot_and_double_slash, _preserves_prefix_semantics)\n- bd-1hoq: 1 (test_expert_sql_returns_expected_signal_rows)\n- bd-1h3f: 2 (test_old_path_probe_exact_and_prefix, test_suffix_probe_uses_old_path_sources)\n- bd-13q8: 13 (decay integration + invariant tests)\n- bd-11mg: 8 (CLI flag tests: explain_score, as_of, excluded_usernames, etc.)\nTotal: 2+3+2+1+2+13+8 = 31 new tests\n\nThis is NOT a code-writing bead — it is a verification checkpoint.\n\n## Acceptance Criteria\n- [ ] cargo test -p lore passes (all tests green)\n- [ ] cargo test -p lore -- --test-threads=1 passes (no test interference)\n- [ ] No existing test assertions were changed (only callsite signatures updated in bd-13q8 and ScoringConfig literals in bd-1b50)\n- [ ] Total test count: existing + 31 new = all pass\n\n## TDD Loop\nN/A — this bead verifies, does not write code.\nVERIFY: cargo test -p lore\n\n## Files\nNone modified — read-only verification.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T17:00:29.453420Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:43:04.414775Z","closed_at":"2026-02-12T20:43:04.414735Z","close_reason":"Implemented by time-decay swarm: 3 agents, 12 tasks, 621 tests passing, all quality gates green","compaction_level":0,"original_size":0,"labels":["scoring","test"],"dependencies":[{"issue_id":"bd-1vti","depends_on_id":"bd-11mg","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1vti","depends_on_id":"bd-18dn","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1vti","depends_on_id":"bd-1b50","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1vti","depends_on_id":"bd-1h3f","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1vti","depends_on_id":"bd-1soz","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1vti","depends_on_id":"bd-2w1p","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1vti","depends_on_id":"bd-2yu5","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-1wa2","title":"Design Actionable Insights panel (heuristic queries TBD)","description":"## Background\nThe PRD specifies an Actionable Insights panel on the Dashboard that surfaces heuristic signals: stale P1 issues, blocked MRs awaiting review, velocity spikes/dips, and assignee workload imbalance. This requires heuristic query functions that do NOT currently exist in the lore codebase.\n\nSince the TUI work is purely UI built on existing code, the Actionable Insights panel is deferred to a later phase when the heuristic queries are implemented. This bead tracks the design and eventual implementation.\n\n## Approach\nWhen ready to implement:\n1. Define InsightKind enum: StaleHighPriority, BlockedMR, VelocityAnomaly, WorkloadImbalance\n2. Define Insight struct: kind, severity (Info/Warning/Critical), title, description, entity_refs (Vec)\n3. Implement heuristic query functions in lore core (NOT in TUI crate)\n4. Wire insights into DashboardState as Optional>\n5. Render as a scrollable panel with severity-colored icons\n\n## Acceptance Criteria\n- [ ] InsightKind and Insight types defined\n- [ ] At least 2 heuristic queries implemented (stale P1, blocked MR)\n- [ ] Dashboard renders insights panel when data available\n- [ ] Insights panel is scrollable with j/k\n- [ ] Enter on insight navigates to related entity\n- [ ] Empty insights shows \"No insights\" or hides panel entirely\n\n## Status\nBLOCKED: Requires heuristic query functions that don't exist yet. This is NOT a TUI-only task — it requires backend query work first.\n\n## Files\n- CREATE: src/core/insights.rs (heuristic query functions — in main crate, not TUI)\n- MODIFY: crates/lore-tui/src/state/dashboard.rs (add insights field)\n- MODIFY: crates/lore-tui/src/view/dashboard.rs (add insights panel)\n\n## Edge Cases\n- Insights depend on data freshness: stale DB = stale insights. Show \"last updated\" timestamp.\n- Heuristic thresholds should be configurable (e.g., \"stale\" = P1 untouched for 7 days)\n- Large number of insights: cap at 20, show \"N more...\" link","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-12T18:08:15.172539Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:50.980246Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1wa2","depends_on_id":"bd-35g5","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1x6","title":"Implement lore sync CLI command","description":"## Background\nThe sync command is the unified orchestrator for the full pipeline: ingest -> generate-docs -> embed. It replaces the need to run three separate commands. It acquires a lock, runs each stage sequentially, and reports combined results. Individual stages can be skipped via flags (--no-embed, --no-docs). The command is designed for cron/scheduled execution. Individual commands (`lore generate-docs`, `lore embed`) still exist for manual recovery and debugging.\n\n## Approach\nCreate `src/cli/commands/sync.rs` per PRD Section 6.4.\n\n**IMPORTANT: run_sync is async** (embed_documents and search_hybrid are async).\n\n**Key types (PRD-exact):**\n```rust\n#[derive(Debug, Serialize)]\npub struct SyncResult {\n pub issues_updated: usize,\n pub mrs_updated: usize,\n pub discussions_fetched: usize,\n pub documents_regenerated: usize,\n pub documents_embedded: usize,\n}\n\n#[derive(Debug, Default)]\npub struct SyncOptions {\n pub full: bool, // Reset cursors, fetch everything\n pub force: bool, // Override stale lock\n pub no_embed: bool, // Skip embedding step\n pub no_docs: bool, // Skip document regeneration\n}\n```\n\n**Core function (async, PRD-exact):**\n```rust\npub async fn run_sync(config: &Config, options: SyncOptions) -> Result\n```\n\n**Pipeline (sequential steps per PRD):**\n1. Acquire app lock with heartbeat (via existing `src/core/lock.rs`)\n2. Ingest delta: fetch issues + MRs via cursor-based sync (calls existing ingestion orchestrator)\n - Each upserted entity marked dirty via `mark_dirty_tx(&tx)` inside ingestion transaction\n3. Process `pending_discussion_fetches` queue (bounded)\n - Discussion sweep uses CTE to capture stale IDs, then cascading deletes\n4. Regenerate documents from `dirty_sources` queue (unless --no-docs)\n5. Embed documents with changed content_hash (unless --no-embed; skipped gracefully if Ollama unavailable)\n6. Release lock, record sync_run\n\n**NOTE (PRD):** Rolling backfill window removed — the existing cursor + watermark design handles old issues with resumed activity. GitLab updates `updated_at` when new comments are added, so the cursor naturally picks up old issues that receive new activity.\n\n**CLI args (PRD-exact):**\n```rust\n#[derive(Args)]\npub struct SyncArgs {\n /// Reset cursors, fetch everything\n #[arg(long)]\n full: bool,\n /// Override stale lock\n #[arg(long)]\n force: bool,\n /// Skip embedding step\n #[arg(long)]\n no_embed: bool,\n /// Skip document regeneration\n #[arg(long)]\n no_docs: bool,\n}\n```\n\n**Human output:**\n```\nSync complete:\n Issues updated: 42\n MRs updated: 18\n Discussions fetched: 56\n Documents regenerated: 38\n Documents embedded: 38\n Elapsed: 2m 15s\n```\n\n**JSON output:**\n```json\n{\"ok\": true, \"data\": {...}, \"meta\": {\"elapsed_ms\": 135000}}\n```\n\n## Acceptance Criteria\n- [ ] Function is `async fn run_sync`\n- [ ] Takes `SyncOptions` struct (not separate params)\n- [ ] Returns `SyncResult` with flat fields (not nested sub-structs)\n- [ ] Full pipeline orchestrated: ingest -> discussion queue -> docs -> embed\n- [ ] --full resets cursors (passes through to ingest)\n- [ ] --force overrides stale sync lock\n- [ ] --no-embed skips embedding stage (Ollama not needed)\n- [ ] --no-docs skips document regeneration stage\n- [ ] Discussion queue processing bounded per run\n- [ ] Dirty sources marked inside ingestion transactions (via mark_dirty_tx)\n- [ ] Progress reporting: stage names + elapsed time\n- [ ] Lock acquired with heartbeat at start, released at end (even on error)\n- [ ] Embedding skipped gracefully if Ollama unavailable (warning, not error)\n- [ ] JSON summary in robot mode\n- [ ] Human-readable summary with elapsed time\n- [ ] `cargo build` succeeds\n\n## Files\n- `src/cli/commands/sync.rs` — new file\n- `src/cli/commands/mod.rs` — add `pub mod sync;`\n- `src/cli/mod.rs` — add SyncArgs, wire up sync subcommand\n- `src/main.rs` — add sync command handler (async dispatch)\n\n## TDD Loop\nRED: Integration test requiring full pipeline\nGREEN: Implement run_sync orchestration (async)\nVERIFY: `cargo build && cargo test sync`\n\n## Edge Cases\n- Ollama unavailable + --no-embed not set: sync should NOT fail — embed stage logs warning, returns 0 embedded\n- Lock already held: error unless --force (and lock is stale)\n- No dirty sources after ingest: regeneration stage returns 0 (not error)\n- --full with large dataset: keyset pagination prevents OFFSET degradation","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:27:09.577782Z","created_by":"tayloreernisse","updated_at":"2026-01-30T18:05:34.676100Z","closed_at":"2026-01-30T18:05:34.676035Z","close_reason":"Sync CLI: async run_sync orchestrator with 4-stage pipeline (ingest issues, ingest MRs, generate-docs, embed), SyncOptions/SyncResult, --full/--force/--no-embed/--no-docs flags, graceful Ollama degradation, human+JSON output, clean build, all tests pass","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1x6","depends_on_id":"bd-1i2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1x6","depends_on_id":"bd-1je","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1x6","depends_on_id":"bd-2sx","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1x6","depends_on_id":"bd-38q","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1x6","depends_on_id":"bd-3qs","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1y7q","title":"Write invariant tests for ranking system","description":"## Background\nInvariant tests catch subtle ranking regressions that example-based tests miss. These test properties that must hold for ANY input, not specific values.\n\n## Approach\n\n### test_score_monotonicity_by_age:\nGenerate 50 random (age_ms, half_life_days) pairs using a simple LCG PRNG (deterministic seed for reproducibility). Assert decay(older) <= decay(newer) for all pairs where older > newer. Tests the pure half_life_decay() function only.\n\n### test_row_order_independence:\nInsert the same 5 signals in two orderings (forward and reverse). Run query_expert on both -> assert identical username ordering and identical scores (f64 bit-equal). Use a deterministic dataset with varied timestamps.\n\n### test_reviewer_split_is_exhaustive:\nSet up 3 reviewers on the same MR:\n1. Reviewer with substantive DiffNotes (>= 20 chars) -> must appear in participated ONLY\n2. Reviewer with no DiffNotes -> must appear in assigned-only ONLY\n3. Reviewer with trivial note (< 20 chars) -> must appear in assigned-only ONLY\nUse --explain-score to verify each reviewer's components: participated reviewer has reviewer_participated > 0 and reviewer_assigned == 0; others have reviewer_assigned > 0 and reviewer_participated == 0.\n\n### test_deterministic_accumulation_order:\nInsert signals for one user with 15 MRs at varied timestamps. Run query_expert 100 times in a loop. Assert all 100 runs produce the exact same f64 score (use == not approx, to verify bit-identical results from sorted accumulation).\n\n## Acceptance Criteria\n- [ ] All 4 tests pass\n- [ ] No flakiness across 10 consecutive cargo test runs\n- [ ] test_score_monotonicity covers at least 50 random pairs\n- [ ] test_deterministic_accumulation runs at least 100 iterations\n\n## Files\n- src/cli/commands/who.rs (test module)\n\n## Edge Cases\n- LCG PRNG for monotonicity test: use fixed seed, not rand crate (avoid dependency)\n- Bit-identical f64: use assert_eq!(a, b) not approx — the deterministic ordering guarantees this\n- Row order test: must insert in genuinely different orders, not just shuffled within same transaction","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T17:00:35.774542Z","created_by":"tayloreernisse","updated_at":"2026-02-09T17:17:18.920235Z","closed_at":"2026-02-09T17:17:18.920188Z","close_reason":"Tests distributed to implementation beads: monotonicity->bd-1soz, row_order+split+deterministic->bd-13q8","compaction_level":0,"original_size":0,"labels":["scoring","test"]} {"id":"bd-1y8","title":"Implement chunk ID encoding module","description":"## Background\nsqlite-vec uses a single integer rowid for embeddings. To store multiple chunks per document, we encode (document_id, chunk_index) into a single rowid using a multiplier. This module is shared between the embedding pipeline (encode on write) and vector search (decode on read). The encoding scheme supports up to 1000 chunks per document.\n\n## Approach\nCreate `src/embedding/chunk_ids.rs`:\n\n```rust\n/// Multiplier for encoding (document_id, chunk_index) into a single rowid.\n/// Supports up to 1000 chunks per document (32M chars at 32k/chunk).\npub const CHUNK_ROWID_MULTIPLIER: i64 = 1000;\n\n/// Encode (document_id, chunk_index) into a sqlite-vec rowid.\n///\n/// rowid = document_id * CHUNK_ROWID_MULTIPLIER + chunk_index\npub fn encode_rowid(document_id: i64, chunk_index: i64) -> i64 {\n document_id * CHUNK_ROWID_MULTIPLIER + chunk_index\n}\n\n/// Decode a sqlite-vec rowid back into (document_id, chunk_index).\npub fn decode_rowid(rowid: i64) -> (i64, i64) {\n let document_id = rowid / CHUNK_ROWID_MULTIPLIER;\n let chunk_index = rowid % CHUNK_ROWID_MULTIPLIER;\n (document_id, chunk_index)\n}\n```\n\nAlso create the parent module `src/embedding/mod.rs`:\n```rust\npub mod chunk_ids;\n// Later beads add: pub mod ollama; pub mod pipeline;\n```\n\nUpdate `src/lib.rs`: add `pub mod embedding;`\n\n## Acceptance Criteria\n- [ ] `encode_rowid(42, 0)` == 42000\n- [ ] `encode_rowid(42, 5)` == 42005\n- [ ] `decode_rowid(42005)` == (42, 5)\n- [ ] Roundtrip: decode(encode(doc_id, chunk_idx)) == (doc_id, chunk_idx) for all valid inputs\n- [ ] CHUNK_ROWID_MULTIPLIER is 1000\n- [ ] `cargo test chunk_ids` passes\n\n## Files\n- `src/embedding/chunk_ids.rs` — new file\n- `src/embedding/mod.rs` — new file (module root)\n- `src/lib.rs` — add `pub mod embedding;`\n\n## TDD Loop\nRED: Tests in `#[cfg(test)] mod tests`:\n- `test_encode_single_chunk` — encode(1, 0) == 1000\n- `test_encode_multi_chunk` — encode(1, 5) == 1005\n- `test_decode_roundtrip` — property test over range of doc_ids and chunk_indices\n- `test_decode_zero_chunk` — decode(42000) == (42, 0)\n- `test_multiplier_value` — assert CHUNK_ROWID_MULTIPLIER == 1000\nGREEN: Implement encode_rowid, decode_rowid\nVERIFY: `cargo test chunk_ids`\n\n## Edge Cases\n- chunk_index >= 1000: not expected (documents that large would be pathological), but no runtime panic — just incorrect decode. The embedding pipeline caps chunks well below this.\n- document_id = 0: valid (encode returns chunk_index directly)","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-30T15:26:34.060769Z","created_by":"tayloreernisse","updated_at":"2026-01-30T16:51:59.048910Z","closed_at":"2026-01-30T16:51:59.048843Z","close_reason":"Completed: chunk_ids module with encode_rowid/decode_rowid, CHUNK_ROWID_MULTIPLIER=1000, 6 tests pass","compaction_level":0,"original_size":0} {"id":"bd-1yu","title":"[CP1] GitLab types for issues, discussions, notes","description":"Add TypeScript interfaces for GitLab API responses.\n\nTypes to add to src/gitlab/types.ts:\n- GitLabIssue: id, iid, project_id, title, description, state, timestamps, author, labels[], labels_details?, web_url\n- GitLabDiscussion: id (string), individual_note, notes[]\n- GitLabNote: id, type, body, author, timestamps, system, resolvable, resolved, resolved_by, resolved_at, position?\n\nFiles: src/gitlab/types.ts\nDone when: Types compile and match GitLab API documentation","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:19:00.558718Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.153996Z","closed_at":"2026-01-25T15:21:35.153996Z","deleted_at":"2026-01-25T15:21:35.153993Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} -{"id":"bd-1yx","title":"Implement rename chain resolution for file-history","description":"## Background\n\nRename chain resolution is the core algorithm for Gate 4. When querying history of src/auth.rs, it finds MRs that touched the file when it was previously named src/authentication.rs. This is reused by Gate 5 (trace) as well.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 4.6 (Rename Handling).\n\n## Codebase Context\n\n- mr_file_changes table (migration 016, bd-1oo): merge_request_id, project_id, old_path, new_path, change_type\n- change_type='renamed' rows have both old_path and new_path populated\n- Partial index `idx_mfc_renamed` on (project_id, change_type) WHERE change_type='renamed' optimizes BFS queries\n- Also `idx_mfc_project_path` on (project_id, new_path) and `idx_mfc_project_old_path` partial index\n- No timeline/trace/file_history modules exist yet in src/core/\n\n## Approach\n\nCreate `src/core/file_history.rs`:\n\n```rust\nuse std::collections::HashSet;\nuse std::collections::VecDeque;\nuse rusqlite::Connection;\nuse crate::core::error::Result;\n\n/// Resolves a file path through its rename history.\n/// Returns all equivalent paths (original + renames) for use in queries.\n/// BFS in both directions: forward (old_path -> new_path) and backward (new_path -> old_path).\npub fn resolve_rename_chain(\n conn: &Connection,\n project_id: i64,\n path: &str,\n max_hops: usize, // default 10 from CLI\n) -> Result> {\n let mut visited: HashSet = HashSet::new();\n let mut queue: VecDeque = VecDeque::new();\n\n visited.insert(path.to_string());\n queue.push_back(path.to_string());\n\n let forward_sql = \"SELECT mfc.new_path FROM mr_file_changes mfc \\\n WHERE mfc.project_id = ?1 AND mfc.old_path = ?2 AND mfc.change_type = 'renamed'\";\n let backward_sql = \"SELECT mfc.old_path FROM mr_file_changes mfc \\\n WHERE mfc.project_id = ?1 AND mfc.new_path = ?2 AND mfc.change_type = 'renamed'\";\n\n while let Some(current) = queue.pop_front() {\n if visited.len() > max_hops + 1 { break; }\n\n // Forward: current was the old name -> discover new names\n let mut stmt = conn.prepare(forward_sql)?;\n let forward: Vec = stmt.query_map(\n rusqlite::params\\![project_id, current],\n |row| row.get(0),\n )?.filter_map(|r| r.ok()).collect();\n\n // Backward: current was the new name -> discover old names\n let mut stmt = conn.prepare(backward_sql)?;\n let backward: Vec = stmt.query_map(\n rusqlite::params\\![project_id, current],\n |row| row.get(0),\n )?.filter_map(|r| r.ok()).collect();\n\n for discovered in forward.into_iter().chain(backward) {\n if visited.insert(discovered.clone()) {\n queue.push_back(discovered);\n }\n }\n }\n\n Ok(visited.into_iter().collect())\n}\n```\n\nRegister in `src/core/mod.rs`: add `pub mod file_history;`\n\n## Acceptance Criteria\n\n- [ ] `resolve_rename_chain()` follows renames in both directions (forward + backward)\n- [ ] Cycles detected via HashSet (same path never visited twice)\n- [ ] Bounded at max_hops (default 10)\n- [ ] No renames found: returns vec with just the original path\n- [ ] max_hops=0: returns just original path without querying DB\n- [ ] Module registered in src/core/mod.rs as `pub mod file_history;`\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/core/file_history.rs` (NEW)\n- `src/core/mod.rs` (add `pub mod file_history;`)\n\n## TDD Loop\n\nRED:\n- `test_rename_chain_no_renames` — returns just original path\n- `test_rename_chain_forward` — a.rs -> b.rs -> c.rs: starting from a.rs finds all three\n- `test_rename_chain_backward` — starting from c.rs finds a.rs and b.rs\n- `test_rename_chain_cycle_detection` — a->b->a terminates without infinite loop\n- `test_rename_chain_max_hops_zero` — returns just original path\n- `test_rename_chain_max_hops_bounded` — chain longer than max is truncated\n\nTests need in-memory DB with migrations applied through 016 + mr_file_changes test data with change_type='renamed'.\n\nGREEN: Implement BFS with visited set.\n\nVERIFY: `cargo test --lib -- file_history`\n\n## Edge Cases\n\n- File never renamed: single-element vec\n- Circular rename (a->b->a): visited set prevents infinite loop\n- max_hops=0: return just original path, no queries executed\n- Case sensitivity: paths are case-sensitive (Linux default, matches GitLab behavior)\n- Multiple renames from same old_path: BFS discovers all branches\n","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:08.985345Z","created_by":"tayloreernisse","updated_at":"2026-02-05T20:54:52.423441Z","compaction_level":0,"original_size":0,"labels":["gate-4","phase-b","query"],"dependencies":[{"issue_id":"bd-1yx","depends_on_id":"bd-14q","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1yx","depends_on_id":"bd-1oo","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-1yx","title":"Implement rename chain resolution for file-history","description":"## Background\n\nRename chain resolution is the core algorithm for Gate 4. When querying history of src/auth.rs, it finds MRs that touched the file when it was previously named src/authentication.rs. This is reused by Gate 5 (trace) as well.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 4.6 (Rename Handling).\n\n## Codebase Context\n\n- mr_file_changes table (migration 016, bd-1oo): merge_request_id, project_id, old_path, new_path, change_type\n- change_type='renamed' rows have both old_path and new_path populated\n- Partial index `idx_mfc_renamed` on (project_id, change_type) WHERE change_type='renamed' optimizes BFS queries\n- Also `idx_mfc_project_path` on (project_id, new_path) and `idx_mfc_project_old_path` partial index\n- No timeline/trace/file_history modules exist yet in src/core/\n\n## Approach\n\nCreate `src/core/file_history.rs`:\n\n```rust\nuse std::collections::HashSet;\nuse std::collections::VecDeque;\nuse rusqlite::Connection;\nuse crate::core::error::Result;\n\n/// Resolves a file path through its rename history.\n/// Returns all equivalent paths (original + renames) for use in queries.\n/// BFS in both directions: forward (old_path -> new_path) and backward (new_path -> old_path).\npub fn resolve_rename_chain(\n conn: &Connection,\n project_id: i64,\n path: &str,\n max_hops: usize, // default 10 from CLI\n) -> Result> {\n let mut visited: HashSet = HashSet::new();\n let mut queue: VecDeque = VecDeque::new();\n\n visited.insert(path.to_string());\n queue.push_back(path.to_string());\n\n let forward_sql = \"SELECT mfc.new_path FROM mr_file_changes mfc \\\n WHERE mfc.project_id = ?1 AND mfc.old_path = ?2 AND mfc.change_type = 'renamed'\";\n let backward_sql = \"SELECT mfc.old_path FROM mr_file_changes mfc \\\n WHERE mfc.project_id = ?1 AND mfc.new_path = ?2 AND mfc.change_type = 'renamed'\";\n\n while let Some(current) = queue.pop_front() {\n if visited.len() > max_hops + 1 { break; }\n\n // Forward: current was the old name -> discover new names\n let mut stmt = conn.prepare(forward_sql)?;\n let forward: Vec = stmt.query_map(\n rusqlite::params\\![project_id, current],\n |row| row.get(0),\n )?.filter_map(|r| r.ok()).collect();\n\n // Backward: current was the new name -> discover old names\n let mut stmt = conn.prepare(backward_sql)?;\n let backward: Vec = stmt.query_map(\n rusqlite::params\\![project_id, current],\n |row| row.get(0),\n )?.filter_map(|r| r.ok()).collect();\n\n for discovered in forward.into_iter().chain(backward) {\n if visited.insert(discovered.clone()) {\n queue.push_back(discovered);\n }\n }\n }\n\n Ok(visited.into_iter().collect())\n}\n```\n\nRegister in `src/core/mod.rs`: add `pub mod file_history;`\n\n## Acceptance Criteria\n\n- [ ] `resolve_rename_chain()` follows renames in both directions (forward + backward)\n- [ ] Cycles detected via HashSet (same path never visited twice)\n- [ ] Bounded at max_hops (default 10)\n- [ ] No renames found: returns vec with just the original path\n- [ ] max_hops=0: returns just original path without querying DB\n- [ ] Module registered in src/core/mod.rs as `pub mod file_history;`\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/core/file_history.rs` (NEW)\n- `src/core/mod.rs` (add `pub mod file_history;`)\n\n## TDD Loop\n\nRED:\n- `test_rename_chain_no_renames` — returns just original path\n- `test_rename_chain_forward` — a.rs -> b.rs -> c.rs: starting from a.rs finds all three\n- `test_rename_chain_backward` — starting from c.rs finds a.rs and b.rs\n- `test_rename_chain_cycle_detection` — a->b->a terminates without infinite loop\n- `test_rename_chain_max_hops_zero` — returns just original path\n- `test_rename_chain_max_hops_bounded` — chain longer than max is truncated\n\nTests need in-memory DB with migrations applied through 016 + mr_file_changes test data with change_type='renamed'.\n\nGREEN: Implement BFS with visited set.\n\nVERIFY: `cargo test --lib -- file_history`\n\n## Edge Cases\n\n- File never renamed: single-element vec\n- Circular rename (a->b->a): visited set prevents infinite loop\n- max_hops=0: return just original path, no queries executed\n- Case sensitivity: paths are case-sensitive (Linux default, matches GitLab behavior)\n- Multiple renames from same old_path: BFS discovers all branches\n","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:08.985345Z","created_by":"tayloreernisse","updated_at":"2026-02-13T14:00:46.354253Z","closed_at":"2026-02-13T14:00:46.354201Z","close_reason":"Implemented resolve_rename_chain() BFS in src/core/file_history.rs with 8 tests covering: no renames, forward chain, backward chain, cycle detection, max_hops=0, max_hops bounded, branching renames, project isolation. All 765 tests pass, clippy+fmt clean.","compaction_level":0,"original_size":0,"labels":["gate-4","phase-b","query"],"dependencies":[{"issue_id":"bd-1yx","depends_on_id":"bd-14q","type":"parent-child","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-1yx","depends_on_id":"bd-1oo","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-1yz","title":"Implement MR document extraction","description":"## Background\nMR documents are similar to issue documents but include source/target branch information in the header. The extractor queries merge_requests and mr_labels tables. Like issue extraction, it produces a DocumentData struct for the regeneration pipeline.\n\n## Approach\nImplement `extract_mr_document()` in `src/documents/extractor.rs`:\n\n```rust\n/// Extract a searchable document from a merge request.\n/// Returns None if the MR has been deleted from the DB.\npub fn extract_mr_document(conn: &Connection, mr_id: i64) -> Result>\n```\n\n**SQL queries (from PRD Section 2.2):**\n```sql\n-- Main entity\nSELECT m.id, m.iid, m.title, m.description, m.state, m.author_username,\n m.source_branch, m.target_branch,\n m.created_at, m.updated_at, m.web_url,\n p.path_with_namespace, p.id AS project_id\nFROM merge_requests m\nJOIN projects p ON p.id = m.project_id\nWHERE m.id = ?\n\n-- Labels\nSELECT l.name FROM mr_labels ml\nJOIN labels l ON l.id = ml.label_id\nWHERE ml.merge_request_id = ?\nORDER BY l.name\n```\n\n**Document format:**\n```\n[[MergeRequest]] !456: Implement JWT authentication\nProject: group/project-one\nURL: https://gitlab.example.com/group/project-one/-/merge_requests/456\nLabels: [\"feature\", \"auth\"]\nState: opened\nAuthor: @johndoe\nSource: feature/jwt-auth -> main\n\n--- Description ---\n\nThis MR implements JWT-based authentication...\n```\n\n**Key difference from issues:** The `Source:` line with `source_branch -> target_branch`.\n\n## Acceptance Criteria\n- [ ] Deleted MR returns Ok(None)\n- [ ] MR document has `[[MergeRequest]]` prefix with `!` before iid (not `#`)\n- [ ] Source line shows `source_branch -> target_branch`\n- [ ] Labels sorted alphabetically in JSON array\n- [ ] content_hash computed from full content_text\n- [ ] labels_hash computed from sorted labels\n- [ ] paths is empty (MR-level docs don't have DiffNote paths; those are on discussion docs)\n- [ ] `cargo test extract_mr` passes\n\n## Files\n- `src/documents/extractor.rs` — implement `extract_mr_document()`\n\n## TDD Loop\nRED: Tests in `#[cfg(test)] mod tests`:\n- `test_mr_document_format` — verify header matches PRD template with Source line\n- `test_mr_not_found` — returns Ok(None)\n- `test_mr_no_description` — header only\n- `test_mr_branch_info` — Source line correct\nGREEN: Implement extract_mr_document with SQL queries\nVERIFY: `cargo test extract_mr`\n\n## Edge Cases\n- MR with NULL description: skip \"--- Description ---\" section\n- MR with NULL source_branch or target_branch: omit Source line (shouldn't happen in practice)\n- Draft MRs: state field captures this, no special handling needed","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-30T15:25:45.521703Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:30:04.308781Z","closed_at":"2026-01-30T17:30:04.308598Z","close_reason":"Implemented extract_mr_document() with Source line, PRD format, and 5 tests","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1yz","depends_on_id":"bd-36p","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1yz","depends_on_id":"bd-hrs","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1zj6","title":"OBSERV: Enrich robot JSON meta with run_id and stages","description":"## Background\nRobot JSON currently has a flat meta.elapsed_ms. This enriches it with run_id and a stages array, making every lore --robot sync output a complete performance profile.\n\n## Approach\nThe robot JSON output is built in src/cli/commands/sync.rs. The current SyncResult (line 15-25) is serialized into the data field. The meta field is built alongside it.\n\n1. Find or create the SyncMeta struct (likely near SyncResult). Add fields:\n```rust\n#[derive(Debug, Serialize)]\nstruct SyncMeta {\n run_id: String,\n elapsed_ms: u64,\n stages: Vec,\n}\n```\n\n2. After run_sync() completes, extract timings from MetricsLayer:\n```rust\nlet stages = metrics_handle.extract_timings();\nlet meta = SyncMeta {\n run_id: run_id.to_string(),\n elapsed_ms: start.elapsed().as_millis() as u64,\n stages,\n};\n```\n\n3. Build the JSON envelope:\n```rust\nlet output = serde_json::json!({\n \"ok\": true,\n \"data\": result,\n \"meta\": meta,\n});\n```\n\nThe metrics_handle (Arc) must be passed from main.rs to the command handler. This requires adding a parameter to handle_sync_cmd() and run_sync(), or using a global. Prefer parameter passing.\n\nSame pattern for standalone ingest: add stages to IngestMeta.\n\n## Acceptance Criteria\n- [ ] lore --robot sync output includes meta.run_id (string, 8 hex chars)\n- [ ] lore --robot sync output includes meta.stages (array of StageTiming)\n- [ ] meta.elapsed_ms still present (total wall clock time)\n- [ ] Each stage has name, elapsed_ms, items_processed at minimum\n- [ ] Top-level stages have sub_stages when applicable\n- [ ] lore --robot ingest also includes run_id and stages\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/cli/commands/sync.rs (add SyncMeta struct, wire extract_timings)\n- src/cli/commands/ingest.rs (same for standalone ingest)\n- src/main.rs (pass metrics_handle to command handlers)\n\n## TDD Loop\nRED: test_sync_meta_includes_stages (run robot-mode sync, parse JSON, assert meta.stages is array)\nGREEN: Add SyncMeta, extract timings, include in JSON output\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- Empty stages: if sync runs with --no-docs --no-embed, some stages won't exist. stages array is shorter, not padded.\n- extract_timings() called before root span closes: returns incomplete tree. Must call AFTER run_sync returns (span is dropped on function exit).\n- metrics_handle clone: MetricsLayer uses Arc internally, clone is cheap (reference count increment).","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:54:32.062410Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:31:11.073580Z","closed_at":"2026-02-04T17:31:11.073534Z","close_reason":"Wired MetricsLayer into subscriber stack (all 4 branches), added run_id to SyncResult, enriched SyncMeta with run_id + stages Vec, updated print_sync_json to accept MetricsLayer and extract timings","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-1zj6","depends_on_id":"bd-34ek","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1zj6","depends_on_id":"bd-3er","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-1zow","title":"Implement Search screen (state + action + view)","description":"## Background\nThe Search screen provides full-text and semantic search across all indexed documents. It supports 3 modes (lexical FTS5, hybrid FTS+vector, semantic vector-only), a split-pane layout with results on the left and preview on the right, and capability-aware mode selection based on available indexes.\n\n## Approach\nState (state/search.rs):\n- SearchState: query (String), query_input (TextInput), query_focused (bool), mode (SearchMode), results (Vec), selected_index (usize), preview (Option), capabilities (SearchCapabilities), generation (u64)\n- SearchMode: Lexical, Hybrid, Semantic\n- SearchCapabilities: has_fts (bool), has_embeddings (bool), embedding_coverage_pct (f32)\n- SearchResult: doc_id, entity_type, entity_iid, project_path, title, snippet, score, mode_used\n- SearchPreview: full document text or entity detail\n\n**Capability detection** (on screen entry):\n- Probe documents_fts table: SELECT COUNT(*) FROM documents_fts_docsize (uses fast B-tree count, not FTS5 virtual table scan — see MEMORY.md perf audit)\n- Probe embeddings: SELECT COUNT(*) FROM embeddings / SELECT COUNT(*) FROM documents to compute coverage pct\n- If has_fts=false: disable Lexical and Hybrid modes, only Semantic available\n- If has_embeddings=false: disable Semantic and Hybrid modes, only Lexical available\n- If both false: show \"No search indexes found. Run lore generate-docs and lore embed first.\"\n\n**Score explanation (e key):**\n- Press e on a selected result to toggle a score breakdown panel\n- For Lexical: show FTS5 bm25 raw score\n- For Hybrid: show FTS score, vector score, and RRF combined score with weights\n- For Semantic: show cosine similarity score\n- Panel appears below the selected result row, Esc or e dismisses\n\n**Debounced input (200ms):**\n- Uses Msg::SearchDebounceArmed and Msg::SearchDebounced timer pattern\n- On keystroke in query input: arm debounce timer via Cmd::timer(200ms, Msg::SearchDebounced)\n- On SearchDebounced: execute search with current query text\n- This prevents flooding the search backend on rapid typing\n\nAction (action.rs):\n- fetch_search_capabilities(conn) -> SearchCapabilities: probe FTS and embedding tables\n- execute_search(conn, query, mode, limit) -> Vec: dispatches to correct search backend. Uses existing crate::search module functions.\n- fetch_search_preview(conn, result) -> SearchPreview: loads full entity detail for selected result\n\nView (view/search.rs):\n- Split pane: results list (60%) | preview (40%)\n- Query bar at top with mode indicator (L/H/S)\n- Mode switching: Tab cycles modes (only available modes based on capabilities)\n- Score column shows numeric score; e key expands explanation\n- Empty query shows recent entities instead of empty state\n- Narrow terminal (<100 cols): hide preview pane\n\n## Acceptance Criteria\n- [ ] 3 search modes: Lexical, Hybrid, Semantic\n- [ ] Mode switching via Tab, only available modes selectable based on capability detection\n- [ ] Capability detection probes FTS and embedding tables on screen entry\n- [ ] Graceful degradation: unavailable modes shown as greyed out with reason\n- [ ] \"No search indexes\" message when both FTS and embeddings are empty\n- [ ] 200ms debounce on search input (timer-driven via Msg::SearchDebounceArmed/Fired)\n- [ ] Split pane: results | preview\n- [ ] Enter on result navigates to entity detail\n- [ ] Score shown next to each result\n- [ ] e key toggles score explanation panel for selected result\n- [ ] Empty query shows recent entities instead of empty state\n- [ ] Narrow terminal (<100 cols): hide preview pane\n\n## Files\n- MODIFY: crates/lore-tui/src/state/search.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add search functions)\n- CREATE: crates/lore-tui/src/view/search.rs\n\n## TDD Anchor\nRED: Write test_search_capability_detection that creates DB with FTS but no embeddings, asserts has_fts=true, has_embeddings=false, Semantic mode disabled.\nGREEN: Implement fetch_search_capabilities.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_search_capability\n\nAdditional tests:\n- test_debounce_prevents_rapid_search: simulate 5 keystrokes in 100ms, assert only 1 search executed\n- test_score_explanation_lexical: verify bm25 score shown for Lexical mode result\n- test_empty_query_shows_recent: assert recent entities returned when query is empty\n\n## Edge Cases\n- Search query < 2 chars: don't execute search (debounce filter)\n- FTS5 special characters (*, \", -): escape or pass through based on mode\n- Hybrid mode: uses existing RRF implementation from crate::search module\n- Very large result sets: limit to 100 results, show \"more results available\" hint\n- Preview pane on narrow terminal (<100 cols): hide preview, full-width results only\n- FTS count performance: use documents_fts_docsize shadow table for COUNT (19x faster)\n\n## Dependency Context\nUses existing search infrastructure from lore core (crate::search::{FtsQueryMode, to_fts_query} — note private submodules, import via crate::search).\nUses SearchDebounceArmed/SearchDebounced Msg variants from \"Implement core types\" (bd-c9gk).\nUses TaskSupervisor debounce management from \"Implement TaskSupervisor\" (bd-3le2).\nUses AppState composition from \"Implement AppState composition\" (bd-1v9m).","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:00:48.862621Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:33.891935Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1zow","depends_on_id":"bd-1mju","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-1zow","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -96,7 +104,7 @@ {"id":"bd-20h","title":"Implement MR discussion ingestion module","description":"## Background\nMR discussion ingestion with critical atomicity guarantees. Parse notes BEFORE destructive DB operations to prevent data loss. Watermark ONLY advanced on full success.\n\n## Approach\nCreate `src/ingestion/mr_discussions.rs` with:\n1. `IngestMrDiscussionsResult` - Per-MR stats\n2. `ingest_mr_discussions()` - Main function with atomicity guarantees\n3. Upsert + sweep pattern for notes (not delete-all-then-insert)\n4. Sync health telemetry for debugging failures\n\n## Files\n- `src/ingestion/mr_discussions.rs` - New module\n- `tests/mr_discussion_ingestion_tests.rs` - Integration tests\n\n## Acceptance Criteria\n- [ ] `IngestMrDiscussionsResult` has: discussions_fetched, discussions_upserted, notes_upserted, notes_skipped_bad_timestamp, diffnotes_count, pagination_succeeded\n- [ ] `ingest_mr_discussions()` returns `Result`\n- [ ] CRITICAL: Notes parsed BEFORE any DELETE operations\n- [ ] CRITICAL: Watermark NOT advanced if `pagination_succeeded == false`\n- [ ] CRITICAL: Watermark NOT advanced if any note parse fails\n- [ ] Upsert + sweep pattern using `last_seen_at`\n- [ ] Stale discussions/notes removed only on full success\n- [ ] Selective raw payload storage (skip system notes without position)\n- [ ] Sync health telemetry recorded on failure\n- [ ] `does_not_advance_discussion_watermark_on_partial_failure` test passes\n- [ ] `atomic_note_replacement_preserves_data_on_parse_failure` test passes\n\n## TDD Loop\nRED: `cargo test does_not_advance_watermark` -> test fails\nGREEN: Add ingestion with atomicity guarantees\nVERIFY: `cargo test mr_discussion_ingestion`\n\n## Main Function\n```rust\npub async fn ingest_mr_discussions(\n conn: &Connection,\n client: &GitLabClient,\n config: &Config,\n project_id: i64,\n gitlab_project_id: i64,\n mr_iid: i64,\n local_mr_id: i64,\n mr_updated_at: i64,\n) -> Result\n```\n\n## CRITICAL: Atomic Note Replacement\n```rust\n// Record sync start time for sweep\nlet run_seen_at = now_ms();\n\nwhile let Some(discussion_result) = stream.next().await {\n let discussion = match discussion_result {\n Ok(d) => d,\n Err(e) => {\n result.pagination_succeeded = false;\n break; // Stop but don't advance watermark\n }\n };\n \n // CRITICAL: Parse BEFORE destructive operations\n let notes = match transform_notes_with_diff_position(&discussion, project_id) {\n Ok(notes) => notes,\n Err(e) => {\n warn!(\"Note transform failed; preserving existing notes\");\n result.notes_skipped_bad_timestamp += discussion.notes.len();\n result.pagination_succeeded = false;\n continue; // Skip this discussion, don't delete existing\n }\n };\n \n // Only NOW start transaction (after parse succeeded)\n let tx = conn.unchecked_transaction()?;\n \n // Upsert discussion with run_seen_at\n // Upsert notes with run_seen_at (not delete-all)\n \n tx.commit()?;\n}\n```\n\n## Stale Data Sweep (only on success)\n```rust\nif result.pagination_succeeded {\n // Sweep stale discussions\n conn.execute(\n \"DELETE FROM discussions\n WHERE project_id = ? AND merge_request_id = ?\n AND last_seen_at < ?\",\n params![project_id, local_mr_id, run_seen_at],\n )?;\n \n // Sweep stale notes\n conn.execute(\n \"DELETE FROM notes\n WHERE discussion_id IN (\n SELECT id FROM discussions\n WHERE project_id = ? AND merge_request_id = ?\n )\n AND last_seen_at < ?\",\n params![project_id, local_mr_id, run_seen_at],\n )?;\n}\n```\n\n## Watermark Update (ONLY on success)\n```rust\nif result.pagination_succeeded {\n mark_discussions_synced(conn, local_mr_id, mr_updated_at)?;\n clear_sync_health_error(conn, local_mr_id)?;\n} else {\n record_sync_health_error(conn, local_mr_id, \"Pagination incomplete or parse failure\")?;\n warn!(\"Watermark NOT advanced; will retry on next sync\");\n}\n```\n\n## Selective Payload Storage\n```rust\n// Only store payload for DiffNotes and non-system notes\nlet should_store_note_payload =\n !note.is_system() ||\n note.position_new_path().is_some() ||\n note.position_old_path().is_some();\n```\n\n## Integration Tests (CRITICAL)\n```rust\n#[tokio::test]\nasync fn does_not_advance_discussion_watermark_on_partial_failure() {\n // Setup: MR with updated_at > discussions_synced_for_updated_at\n // Mock: Page 1 returns OK, Page 2 returns 500\n // Assert: discussions_synced_for_updated_at unchanged\n}\n\n#[tokio::test]\nasync fn does_not_advance_discussion_watermark_on_note_parse_failure() {\n // Setup: Existing notes in DB\n // Mock: Discussion with note having invalid created_at\n // Assert: Original notes preserved, watermark unchanged\n}\n\n#[tokio::test]\nasync fn atomic_note_replacement_preserves_data_on_parse_failure() {\n // Setup: Discussion with 3 valid notes\n // Mock: Updated discussion where note 2 has bad timestamp\n // Assert: All 3 original notes still in DB\n}\n```\n\n## Edge Cases\n- HTTP error mid-pagination: preserve existing data, log error, no watermark advance\n- Invalid note timestamp: skip discussion, preserve existing notes\n- System notes without position: don't store raw payload (saves space)\n- Empty discussion: still upsert discussion record, no notes","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:42.335714Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:22:43.207057Z","closed_at":"2026-01-27T00:22:43.206996Z","close_reason":"Implemented MR discussion ingestion module with full atomicity guarantees:\n- IngestMrDiscussionsResult with all required fields\n- parse-before-destructive pattern (transform notes before DB ops)\n- Upsert + sweep pattern with last_seen_at timestamps\n- Watermark advanced ONLY on full pagination success\n- Selective payload storage (skip system notes without position)\n- Sync health telemetry for failure debugging\n- All 163 tests passing","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-20h","depends_on_id":"bd-3ir","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-20h","depends_on_id":"bd-3j6","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-20h","depends_on_id":"bd-iba","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-20p9","title":"NOTE-1A: Note query layer data types and filters","description":"## Background\nPhase 1 adds a lore notes command for direct SQL query over the notes table. This chunk implements the data structures, filter logic, and query function following existing patterns in src/cli/commands/list.rs. The existing file contains: IssueListRow/Json/Result (for issues), MrListRow/Json/Result (for MRs), ListFilters/MrListFilters, query_issues(), query_mrs().\n\n## Approach\nAdd to src/cli/commands/list.rs (after the existing MR query code):\n\nData structures:\n- NoteListRow: id, gitlab_id, author_username, body, note_type, is_system, created_at, updated_at, position_new_path, position_new_line, position_old_path, position_old_line, resolvable, resolved, resolved_by, noteable_type (from discussions.noteable_type), parent_iid (i64), parent_title, project_path\n- NoteListRowJson: ISO timestamp variants (created_at_iso, updated_at_iso using ms_to_iso from crate::core::time) + #[derive(Serialize)]\n- NoteListResult: notes: Vec, total_count: i64\n- NoteListResultJson: notes: Vec, total_count: i64, showing: usize\n- NoteListFilters: limit (usize), project (Option), author (Option), note_type (Option), include_system (bool), for_issue_iid (Option), for_mr_iid (Option), note_id (Option), gitlab_note_id (Option), discussion_id (Option), since (Option), until (Option), path (Option), contains (Option), resolution (Option), sort (String), order (String)\n\nQuery function pub fn query_notes(conn: &Connection, filters: &NoteListFilters, config: &Config) -> Result:\n- Time window: parse since/until relative to single anchored now_ms via parse_since (from crate::core::time). --until date = end-of-day (23:59:59.999). Validate since_ms <= until_ms.\n- Core SQL: SELECT from notes n JOIN discussions d ON n.discussion_id = d.id JOIN projects p ON n.project_id = p.id LEFT JOIN issues i ON d.issue_id = i.id LEFT JOIN merge_requests m ON d.merge_request_id = m.id WHERE {dynamic_filters} ORDER BY {sort} {order}, n.id {order} LIMIT ?\n- Filter mappings:\n - author: COLLATE NOCASE, strip leading @ (same pattern as existing list filters)\n - note_type: exact match\n - project: resolve_project(conn, project_str) from crate::core::project\n - since/until: n.created_at >= ?ms / n.created_at <= ?ms\n - path: trailing / = LIKE prefix match with escape_like (from crate::core::project), else exact match on position_new_path\n - contains: LIKE %term% COLLATE NOCASE on n.body with escape_like for %, _\n - resolution: \"unresolved\" → n.resolvable = 1 AND n.resolved = 0, \"resolved\" → n.resolvable = 1 AND n.resolved = 1, \"any\" → no filter\n - for_issue_iid/for_mr_iid: requires project_id context. Validation at query layer (return error if no project and no defaultProject), NOT as clap requires.\n - include_system: when false (default), add n.is_system = 0\n - note_id: exact match on n.id\n - gitlab_note_id: exact match on n.gitlab_id\n - discussion_id: exact match on d.gitlab_discussion_id\n- Use dynamic WHERE clause building with params vector (same pattern as query_issues/query_mrs)\n\n## Files\n- MODIFY: src/cli/commands/list.rs (add NoteListRow, NoteListRowJson, NoteListResult, NoteListResultJson, NoteListFilters, query_notes)\n\n## TDD Anchor\nRED: test_query_notes_empty_db — setup DB with no notes, call query_notes, assert total_count == 0.\nGREEN: Implement NoteListFilters + query_notes with basic SELECT.\nVERIFY: cargo test query_notes_empty_db -- --nocapture\n28 tests from PRD: test_query_notes_empty_db, test_query_notes_filter_author, test_query_notes_filter_author_strips_at, test_query_notes_filter_author_case_insensitive, test_query_notes_filter_note_type, test_query_notes_filter_project, test_query_notes_filter_since, test_query_notes_filter_until, test_query_notes_filter_since_and_until_combined, test_query_notes_invalid_time_window_rejected, test_query_notes_until_date_uses_end_of_day, test_query_notes_filter_contains, test_query_notes_filter_contains_case_insensitive, test_query_notes_filter_contains_escapes_like_wildcards, test_query_notes_filter_path, test_query_notes_filter_path_prefix, test_query_notes_filter_for_issue_requires_project, test_query_notes_filter_for_mr_requires_project, test_query_notes_filter_for_issue_uses_default_project, test_query_notes_filter_resolution_unresolved, test_query_notes_filter_resolution_resolved, test_query_notes_sort_created_desc, test_query_notes_sort_created_asc, test_query_notes_deterministic_tiebreak, test_query_notes_limit, test_query_notes_combined_filters, test_query_notes_filter_note_id_exact, test_query_notes_filter_gitlab_note_id_exact, test_query_notes_filter_discussion_id_exact, test_note_list_row_json_conversion\n\n## Acceptance Criteria\n- [ ] NoteListRow/Json/Result/Filters structs defined with all fields\n- [ ] query_notes returns notes matching all filter combinations\n- [ ] Author filter is case-insensitive and strips @ prefix\n- [ ] Time window validates since <= until with clear error message including swap suggestion\n- [ ] --until date uses end-of-day (23:59:59.999)\n- [ ] Path filter: trailing / = prefix match with LIKE escape, otherwise exact\n- [ ] Contains filter: case-insensitive body substring with LIKE escape for %, _\n- [ ] for_issue_iid/for_mr_iid require project context (error if no --project and no defaultProject)\n- [ ] Default: exclude system notes (is_system = 0). --include-system overrides.\n- [ ] ORDER BY includes n.id tiebreaker for deterministic results\n- [ ] All 28+ tests pass\n\n## Edge Cases\n- parse_until_with_anchor: YYYY-MM-DD --until returns end-of-day (not start-of-day)\n- Inverted time window: --since 30d --until 90d → error message suggesting swap\n- LIKE wildcards in --contains: % and _ escaped via escape_like (from crate::core::project)\n- IID without project: error at query layer (not clap) to support defaultProject\n- Discussion with NULL noteable_type: LEFT JOIN handles gracefully (parent_iid/title will be None)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:00:26.741853Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:24.378983Z","closed_at":"2026-02-12T18:13:24.378936Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["cli","per-note","search"],"dependencies":[{"issue_id":"bd-20p9","depends_on_id":"bd-1oyf","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-20p9","depends_on_id":"bd-25hb","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-20p9","depends_on_id":"bd-3iod","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-221","title":"Create migration 008_fts5.sql","description":"## Background\nFTS5 (Full-Text Search 5) provides the lexical search backbone for Gate A. The virtual table + triggers keep the FTS index in sync with the documents table automatically. This migration must be applied AFTER migration 007 (documents table exists). The trigger design handles NULL titles via COALESCE and only rebuilds the FTS entry when searchable text actually changes (not metadata-only updates).\n\n## Approach\nCreate `migrations/008_fts5.sql` with the exact SQL from PRD Section 1.2:\n\n1. **Virtual table:** `documents_fts` using FTS5 with porter stemmer, prefix indexes (2,3,4), external content backed by `documents` table\n2. **Insert trigger:** `documents_ai` — inserts into FTS on document insert, uses COALESCE(title, '') for NULL safety\n3. **Delete trigger:** `documents_ad` — removes from FTS on document delete using the FTS5 delete command syntax\n4. **Update trigger:** `documents_au` — only fires when `title` or `content_text` changes (WHEN clause), performs delete-then-insert to update FTS\n\nRegister migration 8 in `src/core/db.rs` MIGRATIONS array.\n\n**Critical detail:** The COALESCE is required because FTS5 external-content tables require exact value matching for delete operations. If NULL was inserted, the delete trigger couldn't match it (NULL != NULL in SQL).\n\n## Acceptance Criteria\n- [ ] `migrations/008_fts5.sql` file exists\n- [ ] `documents_fts` virtual table created with `tokenize='porter unicode61'` and `prefix='2 3 4'`\n- [ ] `content='documents'` and `content_rowid='id'` set (external content mode)\n- [ ] Insert trigger `documents_ai` fires on document insert with COALESCE(title, '')\n- [ ] Delete trigger `documents_ad` fires on document delete using FTS5 delete command\n- [ ] Update trigger `documents_au` only fires when `old.title IS NOT new.title OR old.content_text != new.content_text`\n- [ ] Prefix search works: query `auth*` matches \"authentication\"\n- [ ] After bulk insert of N documents, `SELECT count(*) FROM documents_fts` returns N\n- [ ] Schema version 8 recorded in schema_version table\n- [ ] `cargo test migration_tests` passes\n\n## Files\n- `migrations/008_fts5.sql` — new file (copy exact SQL from PRD Section 1.2)\n- `src/core/db.rs` — add migration 8 to MIGRATIONS array\n\n## TDD Loop\nRED: Register migration in db.rs, `cargo test migration_tests` fails (SQL file missing)\nGREEN: Create `008_fts5.sql` with all triggers\nVERIFY: `cargo test migration_tests && cargo build`\n\n## Edge Cases\n- Metadata-only updates (e.g., changing `updated_at` or `labels_hash`) must NOT trigger FTS rebuild — the WHEN clause prevents this\n- NULL titles must use COALESCE to empty string in both insert and delete triggers\n- The update trigger does delete+insert (not FTS5 'delete' + regular insert atomically) — this is the correct FTS5 pattern for content changes","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:25:25.763146Z","created_by":"tayloreernisse","updated_at":"2026-01-30T16:56:13.131830Z","closed_at":"2026-01-30T16:56:13.131771Z","close_reason":"Completed: migration 008_fts5.sql with FTS5 virtual table, 3 sync triggers (insert/delete/update with COALESCE NULL safety), prefix search, registered in db.rs, cargo build + tests pass","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-221","depends_on_id":"bd-hrs","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-226s","title":"Epic: Time-Decay Expert Scoring Model","description":"## Background\n\nReplace flat-weight expertise scoring with exponential half-life decay, split reviewer signals (participated vs assigned-only), dual-path rename awareness, and new CLI flags (--as-of, --explain-score, --include-bots, --all-history).\n\n**Plan document:** plans/time-decay-expert-scoring.md (iteration 6, target 8)\n**Beads revision:** 3 (updated line numbers to match v0.7.0 codebase, fixed migration 022->026, fixed test count 27->31, added explicit callsite update scope to bd-13q8)\n\n## Children (Execution Order)\n\n### Layer 0 — Foundation (no deps)\n- **bd-2w1p** — Add half-life fields and config validation to ScoringConfig\n- **bd-1soz** — Add half_life_decay() pure function\n- **bd-18dn** — Add normalize_query_path() pure function\n\n### Layer 1 — Schema + Helpers (depends on Layer 0)\n- **bd-2ao4** — Add migration 026 for dual-path and reviewer participation indexes (5 indexes)\n- **bd-2yu5** — Add timestamp-aware test helpers (insert_mr_at, insert_diffnote_at, insert_file_change_with_old_path)\n- **bd-1b50** — Update existing tests for new ScoringConfig fields (..Default::default())\n\n### Layer 2 — SQL + Path Probes (depends on Layer 1)\n- **bd-1hoq** — Restructure expert SQL with CTE-based dual-path matching (8 CTEs, mr_activity, parameterized ?5/?6)\n- **bd-1h3f** — Add rename awareness to path resolution probes (build_path_query + suffix_probe)\n\n### Layer 3 — Rust Aggregation (depends on Layer 2)\n- **bd-13q8** — Implement Rust-side decay aggregation with reviewer split + update all 17 existing query_expert() callsites\n\n### Layer 4 — CLI (depends on Layer 3)\n- **bd-11mg** — Add CLI flags: --as-of, --explain-score, --include-bots, --all-history, path normalization\n\n### Layer 5 — Verification (depends on Layer 4)\n- **bd-1vti** — Run full test suite: 31 new tests + all existing tests, no regressions\n- **bd-1j5o** — Quality gates, query plan check (6 index points), real-world validation\n\n## Revision 3 Delta (from revision 2)\n- **Migration number**: 022 -> 026 (latest existing is 025_note_dirty_backfill.sql)\n- **Test count**: 27 -> 31 (correct tally: 2+3+2+1+2+13+8=31)\n- **Line numbers**: All beads updated to match v0.7.0 codebase (ScoringConfig at 155, validate_scoring at 274, query_expert at 641, build_path_query at 467, suffix_probe at 596, run_who at 276, test helpers at 2469-2598, test_expert_scoring_weights at 3551)\n- **bd-13q8 scope**: Now explicitly documents updating all 17 existing query_expert() callsites (1 production + 16 test) when changing signature from 7 to 10 params\n- **bd-2yu5**: insert_file_change_with_old_path now has complete SQL implementation (was placeholder)\n\n## Files Modified\n- src/core/config.rs (ScoringConfig struct at line 155, validation at line 274)\n- src/cli/commands/who.rs (decay function, normalize_query_path, SQL, aggregation, CLI flags, tests)\n- src/core/db.rs (MIGRATIONS array — add (\"026\", ...) entry)\n- CREATE: migrations/026_scoring_indexes.sql (5 new indexes)\n\n## Acceptance Criteria\n- [ ] All 31 new tests pass (across all child beads)\n- [ ] All existing tests pass unchanged (decay ~1.0 at now_ms())\n- [ ] cargo check + clippy + fmt clean\n- [ ] ubs scan clean on modified files\n- [ ] EXPLAIN QUERY PLAN shows 6 index usage points (manual verification)\n- [ ] Real-world validation: who --path on known files shows recency discounting\n- [ ] who --explain-score component breakdown sums to total\n- [ ] who --as-of produces deterministic results across runs\n- [ ] Assigned-only reviewers rank below participated reviewers\n- [ ] Old file paths resolve and credit expertise after renames\n- [ ] Path normalization: ./src//foo.rs resolves identically to src/foo.rs\n\n## Edge Cases\n- f64 NaN guard in half_life_decay (hl=0 -> 0.0)\n- Deterministic f64 ordering via mr_id sort before summation\n- Closed MR multiplier applied via state_mult in SQL (not Rust string match)\n- Trivial notes (< reviewer_min_note_chars) classified as assigned-only\n- Exclusive upper bound on --as-of prevents future event leakage\n- Config upper bounds prevent absurd values (3650-day cap, 4096-char cap, NaN/Inf rejection)","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-09T16:58:58.007560Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:48:43.108823Z","compaction_level":0,"original_size":0} +{"id":"bd-226s","title":"Epic: Time-Decay Expert Scoring Model","description":"## Background\n\nReplace flat-weight expertise scoring with exponential half-life decay, split reviewer signals (participated vs assigned-only), dual-path rename awareness, and new CLI flags (--as-of, --explain-score, --include-bots, --all-history).\n\n**Plan document:** plans/time-decay-expert-scoring.md (iteration 6, target 8)\n**Beads revision:** 3 (updated line numbers to match v0.7.0 codebase, fixed migration 022->026, fixed test count 27->31, added explicit callsite update scope to bd-13q8)\n\n## Children (Execution Order)\n\n### Layer 0 — Foundation (no deps)\n- **bd-2w1p** — Add half-life fields and config validation to ScoringConfig\n- **bd-1soz** — Add half_life_decay() pure function\n- **bd-18dn** — Add normalize_query_path() pure function\n\n### Layer 1 — Schema + Helpers (depends on Layer 0)\n- **bd-2ao4** — Add migration 026 for dual-path and reviewer participation indexes (5 indexes)\n- **bd-2yu5** — Add timestamp-aware test helpers (insert_mr_at, insert_diffnote_at, insert_file_change_with_old_path)\n- **bd-1b50** — Update existing tests for new ScoringConfig fields (..Default::default())\n\n### Layer 2 — SQL + Path Probes (depends on Layer 1)\n- **bd-1hoq** — Restructure expert SQL with CTE-based dual-path matching (8 CTEs, mr_activity, parameterized ?5/?6)\n- **bd-1h3f** — Add rename awareness to path resolution probes (build_path_query + suffix_probe)\n\n### Layer 3 — Rust Aggregation (depends on Layer 2)\n- **bd-13q8** — Implement Rust-side decay aggregation with reviewer split + update all 17 existing query_expert() callsites\n\n### Layer 4 — CLI (depends on Layer 3)\n- **bd-11mg** — Add CLI flags: --as-of, --explain-score, --include-bots, --all-history, path normalization\n\n### Layer 5 — Verification (depends on Layer 4)\n- **bd-1vti** — Run full test suite: 31 new tests + all existing tests, no regressions\n- **bd-1j5o** — Quality gates, query plan check (6 index points), real-world validation\n\n## Revision 3 Delta (from revision 2)\n- **Migration number**: 022 -> 026 (latest existing is 025_note_dirty_backfill.sql)\n- **Test count**: 27 -> 31 (correct tally: 2+3+2+1+2+13+8=31)\n- **Line numbers**: All beads updated to match v0.7.0 codebase (ScoringConfig at 155, validate_scoring at 274, query_expert at 641, build_path_query at 467, suffix_probe at 596, run_who at 276, test helpers at 2469-2598, test_expert_scoring_weights at 3551)\n- **bd-13q8 scope**: Now explicitly documents updating all 17 existing query_expert() callsites (1 production + 16 test) when changing signature from 7 to 10 params\n- **bd-2yu5**: insert_file_change_with_old_path now has complete SQL implementation (was placeholder)\n\n## Files Modified\n- src/core/config.rs (ScoringConfig struct at line 155, validation at line 274)\n- src/cli/commands/who.rs (decay function, normalize_query_path, SQL, aggregation, CLI flags, tests)\n- src/core/db.rs (MIGRATIONS array — add (\"026\", ...) entry)\n- CREATE: migrations/026_scoring_indexes.sql (5 new indexes)\n\n## Acceptance Criteria\n- [ ] All 31 new tests pass (across all child beads)\n- [ ] All existing tests pass unchanged (decay ~1.0 at now_ms())\n- [ ] cargo check + clippy + fmt clean\n- [ ] ubs scan clean on modified files\n- [ ] EXPLAIN QUERY PLAN shows 6 index usage points (manual verification)\n- [ ] Real-world validation: who --path on known files shows recency discounting\n- [ ] who --explain-score component breakdown sums to total\n- [ ] who --as-of produces deterministic results across runs\n- [ ] Assigned-only reviewers rank below participated reviewers\n- [ ] Old file paths resolve and credit expertise after renames\n- [ ] Path normalization: ./src//foo.rs resolves identically to src/foo.rs\n\n## Edge Cases\n- f64 NaN guard in half_life_decay (hl=0 -> 0.0)\n- Deterministic f64 ordering via mr_id sort before summation\n- Closed MR multiplier applied via state_mult in SQL (not Rust string match)\n- Trivial notes (< reviewer_min_note_chars) classified as assigned-only\n- Exclusive upper bound on --as-of prevents future event leakage\n- Config upper bounds prevent absurd values (3650-day cap, 4096-char cap, NaN/Inf rejection)","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-02-09T16:58:58.007560Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:43:09.211271Z","closed_at":"2026-02-12T20:43:09.211222Z","close_reason":"Epic complete: time-decay expert scoring model implemented. 3-agent swarm, 12 tasks, 621 tests, all quality gates green, real-world validation passed.","compaction_level":0,"original_size":0} {"id":"bd-227","title":"[CP1] gi count issues/discussions/notes commands","description":"Count entities in the database.\n\n## Module\nsrc/cli/commands/count.rs\n\n## Clap Definition\nCount {\n #[arg(value_parser = [\"issues\", \"mrs\", \"discussions\", \"notes\"])]\n entity: String,\n \n #[arg(long, value_parser = [\"issue\", \"mr\"])]\n r#type: Option,\n}\n\n## Commands\n- gi count issues → 'Issues: N'\n- gi count discussions → 'Discussions: N'\n- gi count discussions --type=issue → 'Issue Discussions: N'\n- gi count notes → 'Notes: N (excluding M system)'\n- gi count notes --type=issue → 'Issue Notes: N (excluding M system)'\n\n## Implementation\n- Simple COUNT(*) queries\n- For notes, also count WHERE is_system = 1 for system note count\n- Filter by noteable_type when --type specified\n\nFiles: src/cli/commands/count.rs\nDone when: Counts match expected values from GitLab","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:58:25.648805Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.920135Z","closed_at":"2026-01-25T17:02:01.920135Z","deleted_at":"2026-01-25T17:02:01.920129Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-22ai","title":"NOTE-2H: Backfill existing notes after upgrade (migration 025)","description":"## Background\nWhen a user upgrades to note document support, existing notes have no documents. Without backfill, only notes that change post-upgrade get documents. This migration seeds all existing non-system notes into dirty queue. Uses migration slot 025 (024 = note documents schema).\n\n## Approach\nCreate migrations/025_note_dirty_backfill.sql:\nINSERT INTO dirty_sources (source_type, source_id, queued_at)\nSELECT 'note', n.id, CAST(strftime('%s', 'now') AS INTEGER) * 1000\nFROM notes n\nLEFT JOIN documents d ON d.source_type = 'note' AND d.source_id = n.id\nWHERE n.is_system = 0 AND d.id IS NULL\nON CONFLICT(source_type, source_id) DO NOTHING;\n\nRegister as (\"025\", include_str!(\"../../migrations/025_note_dirty_backfill.sql\")) in MIGRATIONS array in src/core/db.rs.\nData-only migration — no schema changes. Safe on empty DBs (no notes = no-op).\n\n## Files\n- CREATE: migrations/025_note_dirty_backfill.sql\n- MODIFY: src/core/db.rs (add (\"025\", ...) to MIGRATIONS array)\n\n## TDD Anchor\nRED: test_migration_025_backfills_existing_notes — setup: run migrations through 024, insert 5 non-system + 2 system notes, run migration 025, assert 5 dirty entries with source_type='note'.\nGREEN: Create migration with the INSERT...SELECT...ON CONFLICT statement.\nVERIFY: cargo test migration_025 -- --nocapture\nTests: test_migration_025_idempotent_with_existing_documents (notes already having documents are skipped), test_migration_025_skips_notes_already_in_dirty_queue\n\n## Acceptance Criteria\n- [ ] Migration seeds non-system notes without documents into dirty queue\n- [ ] System notes excluded (is_system = 0 filter)\n- [ ] Notes already having documents excluded (LEFT JOIN + d.id IS NULL)\n- [ ] Idempotent: re-running doesn't create duplicates (ON CONFLICT DO NOTHING)\n- [ ] Notes already in dirty queue not duplicated\n- [ ] All 3 tests pass\n\n## Dependency Context\n- Depends on NOTE-2A (bd-1oi7): dirty_sources must accept source_type='note' (migration 024 adds CHECK constraint). Must run after migration 024.\n\n## Edge Cases\n- Empty database (fresh install): no notes exist, migration is a no-op\n- Database with only system notes: no entries queued\n- Concurrent sync running: ON CONFLICT DO NOTHING handles race safely","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:48.824398Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.797283Z","closed_at":"2026-02-12T18:13:15.797239Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"]} {"id":"bd-22li","title":"OBSERV: Implement SyncRunRecorder lifecycle helper","description":"## Background\nThe sync_runs table exists (migration 001) but NOTHING writes to it. SyncRunRecorder encapsulates the INSERT-on-start, UPDATE-on-finish lifecycle, fixing this bug and enabling sync history tracking.\n\n## Approach\nCreate src/core/sync_run.rs:\n\n```rust\nuse crate::core::metrics::StageTiming;\nuse crate::core::error::Result;\nuse rusqlite::Connection;\n\npub struct SyncRunRecorder {\n row_id: i64,\n}\n\nimpl SyncRunRecorder {\n /// Insert a new sync_runs row with status='running'.\n pub fn start(conn: &Connection, command: &str, run_id: &str) -> Result {\n let now_ms = crate::core::time::now_ms();\n conn.execute(\n \"INSERT INTO sync_runs (started_at, heartbeat_at, status, command, run_id)\n VALUES (?1, ?2, 'running', ?3, ?4)\",\n rusqlite::params![now_ms, now_ms, command, run_id],\n )?;\n let row_id = conn.last_insert_rowid();\n Ok(Self { row_id })\n }\n\n /// Mark run as succeeded with full metrics.\n pub fn succeed(\n self,\n conn: &Connection,\n metrics: &[StageTiming],\n total_items: usize,\n total_errors: usize,\n ) -> Result<()> {\n let now_ms = crate::core::time::now_ms();\n let metrics_json = serde_json::to_string(metrics)\n .unwrap_or_else(|_| \"[]\".to_string());\n conn.execute(\n \"UPDATE sync_runs\n SET finished_at = ?1, status = 'succeeded',\n metrics_json = ?2, total_items_processed = ?3, total_errors = ?4\n WHERE id = ?5\",\n rusqlite::params![now_ms, metrics_json, total_items, total_errors, self.row_id],\n )?;\n Ok(())\n }\n\n /// Mark run as failed with error message and optional partial metrics.\n pub fn fail(\n self,\n conn: &Connection,\n error: &str,\n metrics: Option<&[StageTiming]>,\n ) -> Result<()> {\n let now_ms = crate::core::time::now_ms();\n let metrics_json = metrics\n .map(|m| serde_json::to_string(m).unwrap_or_else(|_| \"[]\".to_string()));\n conn.execute(\n \"UPDATE sync_runs\n SET finished_at = ?1, status = 'failed', error = ?2,\n metrics_json = ?3\n WHERE id = ?4\",\n rusqlite::params![now_ms, error, metrics_json, self.row_id],\n )?;\n Ok(())\n }\n}\n```\n\nRegister in src/core/mod.rs:\n```rust\npub mod sync_run;\n```\n\nNote: SyncRunRecorder takes self (not &self) in succeed/fail to enforce single-use lifecycle. You start a run, then either succeed or fail it -- never both.\n\nThe existing time::now_ms() helper (src/core/time.rs) returns milliseconds since epoch as i64. Used by the existing sync_runs schema (started_at, finished_at are INTEGER ms).\n\n## Acceptance Criteria\n- [ ] SyncRunRecorder::start() inserts row with status='running', started_at set\n- [ ] SyncRunRecorder::succeed() updates status='succeeded', finished_at set, metrics_json populated\n- [ ] SyncRunRecorder::fail() updates status='failed', error set, finished_at set\n- [ ] fail() with Some(metrics) stores partial metrics in metrics_json\n- [ ] fail() with None leaves metrics_json as NULL\n- [ ] succeed/fail consume self (single-use enforcement)\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/core/sync_run.rs (new file)\n- src/core/mod.rs (register module)\n\n## TDD Loop\nRED:\n - test_sync_run_recorder_start: in-memory DB, start(), query sync_runs, assert status='running'\n - test_sync_run_recorder_succeed: start() then succeed(), assert status='succeeded', metrics_json parseable\n - test_sync_run_recorder_fail: start() then fail(), assert status='failed', error set\n - test_sync_run_recorder_fail_with_partial_metrics: fail with Some(metrics), assert metrics_json has data\nGREEN: Implement SyncRunRecorder\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- Connection lifetime: SyncRunRecorder stores row_id, not Connection. The caller must ensure the same Connection is used for start/succeed/fail.\n- Panic during sync: if the program panics between start() and succeed()/fail(), the row stays as 'running'. The existing stale lock detection (stale_lock_minutes) handles this.\n- metrics_json encoding: serde_json::to_string on Vec produces a JSON array string.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:54:51.364617Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:38:04.903657Z","closed_at":"2026-02-04T17:38:04.903610Z","close_reason":"Implemented SyncRunRecorder with start/succeed/fail lifecycle, 4 passing tests","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-22li","depends_on_id":"bd-1o4h","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-22li","depends_on_id":"bd-3pz","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-22li","depends_on_id":"bd-apmo","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -105,16 +113,16 @@ {"id":"bd-247","title":"Implement issue document extraction","description":"## Background\nIssue documents are the simplest document type — a structured header + description text. The extractor queries the existing issues and issue_labels tables (populated by ingestion) and assembles a DocumentData struct. This is one of three entity-specific extractors (issue, MR, discussion) that feed the document regeneration pipeline.\n\n## Approach\nImplement `extract_issue_document()` in `src/documents/extractor.rs`:\n\n```rust\n/// Extract a searchable document from an issue.\n/// Returns None if the issue has been deleted from the DB.\npub fn extract_issue_document(conn: &Connection, issue_id: i64) -> Result>\n```\n\n**SQL queries (from PRD Section 2.2):**\n```sql\n-- Main entity\nSELECT i.id, i.iid, i.title, i.description, i.state, i.author_username,\n i.created_at, i.updated_at, i.web_url,\n p.path_with_namespace, p.id AS project_id\nFROM issues i\nJOIN projects p ON p.id = i.project_id\nWHERE i.id = ?\n\n-- Labels\nSELECT l.name FROM issue_labels il\nJOIN labels l ON l.id = il.label_id\nWHERE il.issue_id = ?\nORDER BY l.name\n```\n\n**Document format:**\n```\n[[Issue]] #234: Authentication redesign\nProject: group/project-one\nURL: https://gitlab.example.com/group/project-one/-/issues/234\nLabels: [\"bug\", \"auth\"]\nState: opened\nAuthor: @johndoe\n\n--- Description ---\n\nWe need to modernize our authentication system...\n```\n\n**Implementation steps:**\n1. Query issue row — if not found, return Ok(None)\n2. Query labels via junction table\n3. Format header with [[Issue]] prefix\n4. Compute content_hash via compute_content_hash()\n5. Compute labels_hash via compute_list_hash()\n6. paths is always empty for issues (paths are only for DiffNote discussions)\n7. Return DocumentData with all fields populated\n\n## Acceptance Criteria\n- [ ] Deleted issue (not in DB) returns Ok(None)\n- [ ] Issue with no description: content_text has header only (no \"--- Description ---\" section)\n- [ ] Issue with no labels: Labels line shows \"[]\"\n- [ ] Issue with labels: Labels line shows sorted JSON array\n- [ ] content_hash is SHA-256 of the full content_text\n- [ ] labels_hash is SHA-256 of sorted label names joined by newline\n- [ ] paths_hash is empty string hash (issues have no paths)\n- [ ] project_id comes from the JOIN with projects table\n- [ ] `cargo test extract_issue` passes\n\n## Files\n- `src/documents/extractor.rs` — implement `extract_issue_document()`\n\n## TDD Loop\nRED: Test in `#[cfg(test)] mod tests`:\n- `test_issue_document_format` — verify header format matches PRD template\n- `test_issue_not_found` — returns Ok(None) for nonexistent issue_id\n- `test_issue_no_description` — no description section when description is NULL\n- `test_issue_labels_sorted` — labels appear in alphabetical order\n- `test_issue_hash_deterministic` — same issue produces same content_hash\nGREEN: Implement extract_issue_document with SQL queries\nVERIFY: `cargo test extract_issue`\n\n## Edge Cases\n- Issue with NULL description: skip \"--- Description ---\" section entirely\n- Issue with empty string description: include section but with empty body\n- Issue with very long description: no truncation here (hard cap applied by caller)\n- Labels with special characters (quotes, commas): JSON array handles escaping","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-30T15:25:45.490145Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:28:13.974948Z","closed_at":"2026-01-30T17:28:13.974891Z","close_reason":"Implemented extract_issue_document() with SQL queries, PRD-compliant format, and 7 tests","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-247","depends_on_id":"bd-36p","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-247","depends_on_id":"bd-hrs","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-24j1","title":"OBSERV: Add #[instrument] spans to ingestion stages","description":"## Background\nTracing spans on each sync stage create the hierarchy that (1) makes log lines filterable by stage, (2) Phase 3's MetricsLayer reads to build StageTiming trees, and (3) gives meaningful context in -vv stderr output.\n\n## Approach\nAdd #[instrument] attributes or manual spans to these functions:\n\n### src/ingestion/orchestrator.rs\n1. ingest_project_issues_with_progress() (line ~110):\n```rust\n#[instrument(skip_all, fields(stage = \"ingest_issues\", project = %project_path))]\npub async fn ingest_project_issues_with_progress(...) -> Result {\n```\n\n2. The MR equivalent (ingest_project_mrs_with_progress or similar):\n```rust\n#[instrument(skip_all, fields(stage = \"ingest_mrs\", project = %project_path))]\n```\n\n3. Inside the issue ingest function, add child spans for sub-stages:\n```rust\nlet _fetch_span = tracing::info_span!(\"fetch_pages\", project = %project_path).entered();\n// ... fetch logic\ndrop(_fetch_span);\n\nlet _disc_span = tracing::info_span!(\"sync_discussions\", project = %project_path).entered();\n// ... discussion sync logic\ndrop(_disc_span);\n```\n\n4. drain_resource_events() (line ~566):\n```rust\nlet _span = tracing::info_span!(\"fetch_resource_events\", project = %project_path).entered();\n```\n\n### src/documents/regenerator.rs\n5. regenerate_dirty_documents() (line ~24):\n```rust\n#[instrument(skip_all, fields(stage = \"generate_docs\"))]\npub fn regenerate_dirty_documents(conn: &Connection) -> Result {\n```\n\n### src/embedding/pipeline.rs\n6. embed_documents() (line ~36):\n```rust\n#[instrument(skip_all, fields(stage = \"embed\"))]\npub async fn embed_documents(...) -> Result {\n```\n\n### Important: field declarations for Phase 3\nThe #[instrument] fields should include empty recording fields that Phase 3 (bd-16m8) will populate:\n```rust\n#[instrument(skip_all, fields(\n stage = \"ingest_issues\",\n project = %project_path,\n items_processed = tracing::field::Empty,\n items_skipped = tracing::field::Empty,\n errors = tracing::field::Empty,\n))]\n```\n\nThis declares the fields on the span so MetricsLayer can capture them when span.record() is called later.\n\n## Acceptance Criteria\n- [ ] JSON log lines show nested span context: sync > ingest_issues > fetch_pages\n- [ ] Each stage span has a \"stage\" field with the stage name\n- [ ] Per-project spans include \"project\" field\n- [ ] Spans are visible in -vv stderr output as bracketed context\n- [ ] Empty recording fields declared for items_processed, items_skipped, errors\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/ingestion/orchestrator.rs (spans on ingest functions and sub-stages)\n- src/documents/regenerator.rs (span on regenerate_dirty_documents)\n- src/embedding/pipeline.rs (span on embed_documents)\n\n## TDD Loop\nRED:\n - test_span_context_in_json_logs: mock sync, capture JSON, verify span chain\n - test_nested_span_chain: verify parent-child: sync > ingest_issues > fetch_pages\n - test_span_elapsed_on_close: create span, sleep 10ms, verify elapsed >= 10\nGREEN: Add #[instrument] and manual spans to all stage functions\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- #[instrument] on async fn: uses tracing::Instrument trait automatically. Works with tokio.\n- skip_all is essential: without it, #[instrument] tries to Debug-format all parameters, which may not implement Debug or may be expensive.\n- Manual span drop: for sub-stages within a single function, use explicit drop(_span) to end the span before the next sub-stage starts. Otherwise spans overlap.\n- tracing::field::Empty: declares a field that can be recorded later. If never recorded, it appears as empty/missing in output (not zero).","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:54:07.821068Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:19:34.307672Z","closed_at":"2026-02-04T17:19:34.307624Z","close_reason":"Added #[instrument] spans to ingest_project_issues_with_progress, ingest_project_merge_requests_with_progress, drain_resource_events, regenerate_dirty_documents, embed_documents","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-24j1","depends_on_id":"bd-2ni","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-24j1","depends_on_id":"bd-2rr","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-25hb","title":"NOTE-1C: Human and robot output formatting for notes","description":"## Background\nImplement the 4 output formatters for the notes command: human table, robot JSON, JSONL streaming, and CSV export.\n\n## Approach\nAdd to src/cli/commands/list.rs (after the query_notes function from NOTE-1A):\n\n1. pub fn print_list_notes(result: &NoteListResult) — human table:\n Use comfy-table (already in Cargo.toml) following the pattern of print_list_issues/print_list_mrs.\n Columns: ID | Author | Type | Body (truncated to 60 chars + \"...\") | Path:Line | Parent | Created\n ID: colored_cell with Cyan for gitlab_id\n Author: @username with Magenta\n Type: \"Diff\" for DiffNote, \"Disc\" for DiscussionNote, \"-\" for others\n Path: position_new_path:line (or \"-\" if no path)\n Parent: \"Issue #N\" or \"MR !N\" from noteable_type + parent_iid\n Created: format_relative_time (existing helper in list.rs)\n\n2. pub fn print_list_notes_json(result: &NoteListResult, elapsed_ms: u64, fields: Option<&[String]>) — robot JSON:\n Standard envelope: {\"ok\":true,\"data\":{\"notes\":[...],\"total_count\":N,\"showing\":M},\"meta\":{\"elapsed_ms\":U64}}\n Supports --fields via filter_fields() from crate::cli::robot\n Same pattern as print_list_issues_json.\n\n3. pub fn print_list_notes_jsonl(result: &NoteListResult) — one JSON object per line:\n Each line is one NoteListRowJson serialized. No envelope. Ideal for jq/notebook pipelines.\n Use serde_json::to_string for each row, println! each line.\n\n4. pub fn print_list_notes_csv(result: &NoteListResult) — CSV output:\n Check if csv crate is already used in the project. If not, use manual CSV with proper escaping:\n - Header row with field names matching NoteListRowJson\n - Quote fields containing commas, quotes, or newlines\n - Escape internal quotes by doubling them\n Alternatively, if adding csv crate (add csv = \"1\" to Cargo.toml [dependencies]), use csv::WriterBuilder for RFC 4180 compliance.\n\nHelper: Add a truncate_body(body: &str, max_len: usize) -> String function for the human table truncation.\n\n## Files\n- MODIFY: src/cli/commands/list.rs (4 print functions + truncate_body helper)\n- POSSIBLY MODIFY: Cargo.toml (add csv = \"1\" if using csv crate for CSV output)\n\n## TDD Anchor\nRED: test_truncate_note_body — assert 200-char body truncated to 60 + \"...\"\nGREEN: Implement truncate_body helper.\nVERIFY: cargo test truncate_note_body -- --nocapture\nTests: test_csv_output_basic (CSV output has correct header + escaped fields), test_jsonl_output_one_per_line (each line parses as valid JSON)\n\n## Acceptance Criteria\n- [ ] Human table renders with colored columns, truncated body, relative time\n- [ ] Robot JSON follows standard envelope with timing metadata\n- [ ] --fields filtering works on JSON output (via filter_fields)\n- [ ] JSONL outputs one valid JSON object per line\n- [ ] CSV properly escapes commas, quotes, and newlines in body text\n- [ ] Multi-byte chars handled correctly in CSV and truncation\n- [ ] All 3 tests pass\n\n## Dependency Context\n- Depends on NOTE-1A (bd-20p9): uses NoteListRow, NoteListRowJson, NoteListResult structs\n\n## Edge Cases\n- Empty body in table: show \"-\" or empty cell\n- Very long body with multi-byte chars: truncation must respect char boundaries (use .chars().take(n) not byte slicing)\n- JSONL with body containing newlines: serde_json::to_string escapes \\n correctly\n- CSV with body containing quotes: must double them per RFC 4180","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:00:53.482055Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:24.304235Z","closed_at":"2026-02-12T18:13:24.304188Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["cli","per-note","search"],"dependencies":[{"issue_id":"bd-25hb","depends_on_id":"bd-1oyf","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-25s","title":"robot-docs: Add Ollama dependency discovery to manifest","description":"## Background\n\nAdd Ollama dependency discovery to robot-docs so agents know which commands need Ollama and which work without it.\n\n## Codebase Context\n\n- handle_robot_docs() in src/main.rs (line ~1646) returns RobotDocsData JSON\n- RobotDocsData has fields: commands, exit_codes, workflows, aliases, clap_error_codes\n- Currently 18 documented commands in the manifest\n- Ollama required for: embed, search --mode=semantic, search --mode=hybrid\n- Not required for: all Phase B temporal commands (timeline, file-history, trace), lexical search, count, ingest, stats, etc.\n- No dependencies field exists yet in RobotDocsData\n\n## Approach\n\nAdd dependencies field to RobotDocsData struct and populate in handle_robot_docs():\n\n```json\n{\n \"ollama\": {\n \"required_by\": [\"embed\", \"search --mode=semantic\", \"search --mode=hybrid\"],\n \"not_required_by\": [\"issues\", \"mrs\", \"search --mode=lexical\", \"timeline\", \"file-history\", \"trace\", \"count\", \"ingest\", \"stats\", \"sync\", \"doctor\", \"health\"],\n \"install\": {\"macos\": \"brew install ollama\", \"linux\": \"curl -fsSL https://ollama.ai/install.sh | sh\"},\n \"setup\": \"ollama pull nomic-embed-text\",\n \"note\": \"Lexical search and all temporal features work without Ollama.\"\n }\n}\n```\n\n## Acceptance Criteria\n\n- [ ] `lore robot-docs | jq '.data.dependencies.ollama'` returns structured info\n- [ ] required_by and not_required_by lists are complete and accurate\n- [ ] Phase B commands listed in not_required_by\n- [ ] Install instructions for macos and linux\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- src/main.rs (update RobotDocsData struct + handle_robot_docs)\n\n## TDD Loop\n\nVERIFY: `lore robot-docs | jq '.data.dependencies.ollama.required_by'`\n\n## Edge Cases\n\n- Keep not_required_by up to date as new commands are added\n- Phase B commands (timeline, file-history, trace) must be in not_required_by once they exist","status":"open","priority":4,"issue_type":"feature","created_at":"2026-01-30T20:26:43.169688Z","created_by":"tayloreernisse","updated_at":"2026-02-05T20:17:09.991762Z","compaction_level":0,"original_size":0,"labels":["enhancement","robot-mode"]} +{"id":"bd-25s","title":"robot-docs: Add Ollama dependency discovery to manifest","description":"## Background\n\nAdd Ollama dependency discovery to robot-docs so agents know which commands need Ollama and which work without it. Currently robot-docs lists commands, exit codes, workflows, and aliases — but has no dependency information.\n\n## Codebase Context\n\n- handle_robot_docs() in src/main.rs (line ~1646) returns RobotDocsData JSON\n- RobotDocsData struct has fields: commands, exit_codes, workflows, aliases, clap_error_codes\n- Currently 18 documented commands in the manifest\n- Ollama required for: embed, search --mode=semantic, search --mode=hybrid\n- Not required for: all Phase B temporal commands (timeline, file-history, trace), lexical search, count, ingest, stats, sync, doctor, health, who, show, issues, mrs, etc.\n- No dependencies field exists yet in RobotDocsData\n\n## Approach\n\n### 1. Add dependencies field to RobotDocsData (src/main.rs):\n\n```rust\n#[derive(Serialize)]\nstruct RobotDocsData {\n // ... existing fields ...\n dependencies: DependencyInfo,\n}\n\n#[derive(Serialize)]\nstruct DependencyInfo {\n ollama: OllamaDependency,\n}\n\n#[derive(Serialize)]\nstruct OllamaDependency {\n required_by: Vec,\n not_required_by: Vec,\n install: HashMap, // {\"macos\": \"brew install ollama\", \"linux\": \"curl ...\"}\n setup: String, // \"ollama pull nomic-embed-text\"\n note: String,\n}\n```\n\n### 2. Populate in handle_robot_docs():\n\n```json\n{\n \"ollama\": {\n \"required_by\": [\"embed\", \"search --mode=semantic\", \"search --mode=hybrid\"],\n \"not_required_by\": [\"issues\", \"mrs\", \"search --mode=lexical\", \"timeline\", \"file-history\", \"count\", \"ingest\", \"stats\", \"sync\", \"doctor\", \"health\", \"who\", \"show\", \"status\"],\n \"install\": {\"macos\": \"brew install ollama\", \"linux\": \"curl -fsSL https://ollama.ai/install.sh | sh\"},\n \"setup\": \"ollama pull nomic-embed-text\",\n \"note\": \"Lexical search and all temporal features work without Ollama.\"\n }\n}\n```\n\n## Acceptance Criteria\n\n- [ ] `lore robot-docs | jq '.data.dependencies.ollama'` returns structured info\n- [ ] required_by lists embed and semantic/hybrid search modes\n- [ ] not_required_by lists all commands that work without Ollama (including Phase B if they exist)\n- [ ] Install instructions for macos and linux\n- [ ] setup field includes \"ollama pull nomic-embed-text\"\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n- [ ] `cargo fmt --check` passes\n\n## Files\n\n- MODIFY: src/main.rs (add DependencyInfo/OllamaDependency structs, update RobotDocsData, populate in handle_robot_docs)\n\n## TDD Anchor\n\nNo unit test needed — this is static metadata. Verify with:\n\n```bash\ncargo check --all-targets\ncargo run --release -- robot-docs | jq '.data.dependencies.ollama.required_by'\ncargo run --release -- robot-docs | jq '.data.dependencies.ollama.not_required_by'\n```\n\n## Edge Cases\n\n- Keep not_required_by up to date as new commands are added — consider a comment in the code listing which commands to check\n- Phase B commands (timeline, file-history, trace) must be in not_required_by once they exist\n- If a command conditionally needs Ollama (like search with --mode flag), list the specific flag combination in required_by\n\n## Dependency Context\n\n- **RobotDocsData** (src/main.rs ~line 1646): the existing struct that this bead extends. Currently has commands (Vec), exit_codes (Vec), workflows (Vec), aliases (Vec), clap_error_codes (Vec). Adding a dependencies field is additive — no breaking changes.\n- **handle_robot_docs()**: the function that constructs and returns the JSON. All data is hardcoded in the function — no runtime introspection needed.","status":"open","priority":4,"issue_type":"feature","created_at":"2026-01-30T20:26:43.169688Z","created_by":"tayloreernisse","updated_at":"2026-02-17T16:53:20.425853Z","compaction_level":0,"original_size":0,"labels":["enhancement","robot-mode"]} {"id":"bd-26f2","title":"Implement common widgets (status bar, breadcrumb, loading, error toast, help overlay)","description":"## Background\nCommon widgets appear across all screens: the status bar shows context-sensitive key hints and sync status, the breadcrumb shows navigation depth, the loading spinner indicates background work, the error toast shows transient errors with auto-dismiss, and the help overlay (?) shows available keybindings.\n\n## Approach\nCreate crates/lore-tui/src/view/common/mod.rs and individual widget files:\n\nview/common/mod.rs:\n- render_breadcrumb(frame, area, nav: &NavigationStack, theme: &Theme): renders \"Dashboard > Issues > #42\" trail\n- render_status_bar(frame, area, registry: &CommandRegistry, screen: &Screen, mode: &InputMode, theme: &Theme): renders bottom bar with key hints and sync indicator\n- render_loading(frame, area, load_state: &LoadState, theme: &Theme): renders centered spinner for LoadingInitial, or subtle refresh indicator for Refreshing\n- render_error_toast(frame, area, msg: &str, theme: &Theme): renders floating toast at bottom-right with error message\n- render_help_overlay(frame, area, registry: &CommandRegistry, screen: &Screen, theme: &Theme): renders centered modal with keybinding list from registry\n\nCreate crates/lore-tui/src/view/mod.rs:\n- render_screen(frame, app: &LoreApp): top-level dispatch — renders breadcrumb + screen content + status bar + optional overlays (help, error toast, command palette)\n\n## Acceptance Criteria\n- [ ] Breadcrumb renders all stack entries with \" > \" separator\n- [ ] Status bar shows contextual hints from CommandRegistry\n- [ ] Loading spinner animates via tick subscription\n- [ ] Error toast auto-positions at bottom-right of screen\n- [ ] Help overlay shows all commands for current screen from registry\n- [ ] render_screen routes to correct per-screen view function\n- [ ] Overlays (help, error, palette) render on top of screen content\n\n## Files\n- CREATE: crates/lore-tui/src/view/mod.rs\n- CREATE: crates/lore-tui/src/view/common/mod.rs\n\n## TDD Anchor\nRED: Write test_breadcrumbs_format that creates a NavigationStack with Dashboard > IssueList, calls breadcrumbs(), asserts [\"Dashboard\", \"Issues\"].\nGREEN: Implement breadcrumbs() in NavigationStack (already in nav task) and render_breadcrumb.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_breadcrumbs\n\n## Edge Cases\n- Breadcrumb must truncate from the left if stack is too deep for terminal width\n- Status bar must handle narrow terminals (<60 cols) gracefully — show abbreviated hints\n- Error toast must handle very long messages with truncation\n- Help overlay must scroll if there are more commands than terminal height\n\n## Dependency Context\nUses NavigationStack from \"Implement NavigationStack\" task.\nUses CommandRegistry from \"Implement CommandRegistry\" task.\nUses LoadState from \"Implement AppState composition\" task.\nUses Theme from \"Implement theme configuration\" task.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:57:13.520393Z","created_by":"tayloreernisse","updated_at":"2026-02-12T21:10:58.182249Z","closed_at":"2026-02-12T21:10:58.181707Z","close_reason":"Completed: 5 common widgets + render_screen dispatch + 27 tests + clippy clean","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-26f2","depends_on_id":"bd-1qpp","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-26f2","depends_on_id":"bd-1v9m","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-26f2","depends_on_id":"bd-38lb","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-26f2","depends_on_id":"bd-5ofk","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-26lp","title":"Implement CLI integration (lore tui command + binary delegation)","description":"## Background\nThe lore CLI binary needs a tui subcommand that launches the lore-tui binary. This is runtime binary delegation — lore finds lore-tui via PATH lookup and execs it, passing through relevant flags. Zero compile-time dependency from lore to lore-tui. The TUI is the human interface; the CLI is the robot/script interface.\n\n## Approach\nAdd a tui subcommand to the lore CLI:\n\n**CLI side** (`src/cli/tui.rs`):\n- Add `Tui` variant to the main CLI enum with flags: --config, --sync, --fresh, --render-mode, --ascii, --no-alt-screen\n- Implementation: resolve lore-tui binary via PATH lookup (std::process::Command with \"lore-tui\")\n- Pass through all flags as CLI arguments\n- If lore-tui not found in PATH, print helpful error: \"lore-tui binary not found. Install with: cargo install --path crates/lore-tui\"\n- Exec (not spawn+wait) using std::os::unix::process::CommandExt::exec() for clean process replacement on Unix\n\n**Binary naming**: The binary is `lore-tui` (hyphenated), matching the crate name.\n\n## Acceptance Criteria\n- [ ] lore tui launches lore-tui binary from PATH\n- [ ] All flags (--config, --sync, --fresh, --render-mode, --ascii, --no-alt-screen) are passed through\n- [ ] Missing binary produces helpful error with install instructions\n- [ ] Uses exec() on Unix for clean process replacement (no zombie parent)\n- [ ] Robot mode: lore --robot tui returns JSON error if binary not found\n- [ ] lore tui --help shows TUI-specific flags\n\n## Files\n- CREATE: src/cli/tui.rs\n- MODIFY: src/cli/mod.rs (add tui subcommand to CLI enum)\n- MODIFY: src/main.rs (add match arm for Tui variant)\n\n## TDD Anchor\nRED: Write `test_tui_binary_not_found_error` that asserts the error message includes install instructions when lore-tui is not in PATH.\nGREEN: Implement the binary lookup and error handling.\nVERIFY: cargo test tui_binary -- --nocapture\n\nAdditional tests:\n- test_tui_flag_passthrough (verify all flags are forwarded)\n- test_tui_robot_mode_json_error (structured error when binary missing)\n\n## Edge Cases\n- lore-tui binary exists but is not executable — should produce clear error\n- PATH contains multiple lore-tui versions — uses first match (standard PATH behavior)\n- Windows: exec() not available — fall back to spawn+wait+exit with same code\n- User runs lore tui in robot mode — should fail with structured JSON error (TUI is human-only)\n\n## Dependency Context\nDepends on bd-2iqk (Doctor + Stats screens) for phase ordering. The CLI integration is one of the last Phase 4 tasks because it requires lore-tui to be substantially complete for the delegation to be useful.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:39.602970Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.449333Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-26lp","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-26lp","depends_on_id":"bd-2iqk","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2711","title":"WHO: Reviews mode query (query_reviews)","description":"## Background\n\nReviews mode answers \"What review patterns does person X have?\" by analyzing the **prefix** convention in DiffNote bodies (e.g., **suggestion**: ..., **question**: ..., **nit**: ...). Only counts DiffNotes on MRs the user did NOT author (m.author_username != ?1).\n\n## Approach\n\n### Three queries:\n1. **Total DiffNotes**: COUNT(*) of DiffNotes by user on others' MRs\n2. **Distinct MRs reviewed**: COUNT(DISTINCT m.id) \n3. **Category extraction**: SQL-level prefix parsing + Rust normalization\n\n### Category extraction SQL:\n```sql\nSELECT\n SUBSTR(ltrim(n.body), 3, INSTR(SUBSTR(ltrim(n.body), 3), '**') - 1) AS raw_prefix,\n COUNT(*) AS cnt\nFROM notes n\nJOIN discussions d ON n.discussion_id = d.id\nJOIN merge_requests m ON d.merge_request_id = m.id\nWHERE n.author_username = ?1\n AND n.note_type = 'DiffNote' AND n.is_system = 0\n AND m.author_username != ?1\n AND ltrim(n.body) LIKE '**%**%' -- only bodies with **prefix** pattern\n AND n.created_at >= ?2\n AND (?3 IS NULL OR n.project_id = ?3)\nGROUP BY raw_prefix ORDER BY cnt DESC\n```\n\nKey: `ltrim(n.body)` tolerates leading whitespace before **prefix** (common in practice).\n\n### normalize_review_prefix() in Rust:\n```rust\nfn normalize_review_prefix(raw: &str) -> String {\n let s = raw.trim().trim_end_matches(':').trim().to_lowercase();\n // Strip parentheticals like \"(non-blocking)\"\n let s = if let Some(idx) = s.find('(') { s[..idx].trim().to_string() } else { s };\n // Merge nit/nitpick variants\n match s.as_str() {\n \"nitpick\" | \"nit\" => \"nit\".to_string(),\n other => other.to_string(),\n }\n}\n```\n\n### HashMap merge for normalized categories, then sort by count DESC\n\n### ReviewsResult struct:\n```rust\npub struct ReviewsResult {\n pub username: String,\n pub total_diffnotes: u32,\n pub categorized_count: u32,\n pub mrs_reviewed: u32,\n pub categories: Vec,\n}\npub struct ReviewCategory { pub name: String, pub count: u32, pub percentage: f64 }\n```\n\nNo LIMIT needed — categories are naturally bounded (few distinct prefixes).\n\n## Files\n\n- `src/cli/commands/who.rs`\n\n## TDD Loop\n\nRED:\n```\ntest_reviews_query — insert 3 DiffNotes (2 with **prefix**, 1 without); verify total=3, categorized=2, categories.len()=2\ntest_normalize_review_prefix — \"suggestion\" \"Suggestion:\" \"suggestion (non-blocking):\" \"Nitpick:\" \"nit (non-blocking):\" \"question\" \"TODO:\"\n```\n\nGREEN: Implement query_reviews + normalize_review_prefix\nVERIFY: `cargo test -- reviews`\n\n## Acceptance Criteria\n\n- [ ] test_reviews_query passes (total=3, categorized=2)\n- [ ] test_normalize_review_prefix passes (nit/nitpick merge, parenthetical strip)\n- [ ] Only counts DiffNotes on MRs user did NOT author\n- [ ] Default since window: 6m\n\n## Edge Cases\n\n- Self-authored MRs excluded (m.author_username != ?1) — user's notes on own MRs are not \"reviews\"\n- ltrim() handles leading whitespace before **prefix**\n- Empty raw_prefix after normalization filtered out (!normalized.is_empty())\n- Percentage calculated from categorized_count (not total_diffnotes)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:40:53.350210Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.599252Z","closed_at":"2026-02-08T04:10:29.599217Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2711","depends_on_id":"bd-2ldg","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2711","depends_on_id":"bd-34rr","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-296a","title":"NOTE-1E: Composite query index and author_id column (migration 022)","description":"## Background\nThe notes table needs composite covering indexes for the new query_notes() function, plus the author_id column for immutable identity (NOTE-0D). Combined in a single migration to avoid an extra migration step. Migration slot 022 is available (021 = work_item_status, 023 = issue_detail_fields already exists).\n\n## Approach\nCreate migrations/022_notes_query_index.sql with:\n\n1. Composite index for author-scoped queries (most common pattern):\n CREATE INDEX IF NOT EXISTS idx_notes_user_created\n ON notes(project_id, author_username COLLATE NOCASE, created_at DESC, id DESC)\n WHERE is_system = 0;\n\n2. Composite index for project-scoped date-range queries:\n CREATE INDEX IF NOT EXISTS idx_notes_project_created\n ON notes(project_id, created_at DESC, id DESC)\n WHERE is_system = 0;\n\n3. Discussion JOIN indexes (check if they already exist first):\n CREATE INDEX IF NOT EXISTS idx_discussions_issue_id ON discussions(issue_id);\n CREATE INDEX IF NOT EXISTS idx_discussions_mr_id ON discussions(merge_request_id);\n\n4. Immutable author identity column (for NOTE-0D):\n ALTER TABLE notes ADD COLUMN author_id INTEGER;\n CREATE INDEX IF NOT EXISTS idx_notes_author_id ON notes(author_id) WHERE author_id IS NOT NULL;\n\nRegister in src/core/db.rs MIGRATIONS array as (\"022\", include_str!(\"../../migrations/022_notes_query_index.sql\")). Insert BEFORE the existing (\"023\", ...) entry. LATEST_SCHEMA_VERSION auto-increments via MIGRATIONS.len().\n\n## Files\n- CREATE: migrations/022_notes_query_index.sql\n- MODIFY: src/core/db.rs (add (\"022\", include_str!(...)) to MIGRATIONS array, insert at position before \"023\" entry around line 73)\n\n## TDD Anchor\nRED: test_migration_022_indexes_exist — run_migrations on in-memory DB, verify 4 new indexes exist in sqlite_master.\nGREEN: Create migration file with all CREATE INDEX statements.\nVERIFY: cargo test migration_022 -- --nocapture\n\n## Acceptance Criteria\n- [ ] Migration 022 creates idx_notes_user_created partial index\n- [ ] Migration 022 creates idx_notes_project_created partial index\n- [ ] Migration 022 creates idx_discussions_issue_id (or is no-op if exists)\n- [ ] Migration 022 creates idx_discussions_mr_id (or is no-op if exists)\n- [ ] Migration 022 adds author_id INTEGER column to notes\n- [ ] Migration 022 creates idx_notes_author_id partial index\n- [ ] MIGRATIONS array in db.rs includes (\"022\", ...) before (\"023\", ...)\n- [ ] Existing tests still pass with new migration\n- [ ] Test verifying all indexes exist passes\n\n## Edge Cases\n- Partial indexes exclude system notes (is_system = 0) — filters 30-50% of notes\n- COLLATE NOCASE on author_username matches the query's case-insensitive comparison\n- author_id is nullable (existing notes won't have it until re-synced)\n- IF NOT EXISTS on all CREATE INDEX statements makes migration idempotent","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:18.127989Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.435624Z","closed_at":"2026-02-12T18:13:15.435576Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"],"dependencies":[{"issue_id":"bd-296a","depends_on_id":"bd-jbfw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-29qw","title":"Implement Timeline screen (state + action + view)","description":"## Background\nThe Timeline screen renders a chronological event stream from the 5-stage timeline pipeline (SEED -> HYDRATE -> EXPAND -> COLLECT -> RENDER). Events are color-coded by type and can be scoped to an entity, author, or time range.\n\n## Approach\nState (state/timeline.rs):\n- TimelineState: events (Vec), query (String), query_input (TextInput), query_focused (bool), selected_index (usize), scroll_offset (usize), scope (TimelineScope)\n- TimelineScope: All, Entity(EntityKey), Author(String), DateRange(DateTime, DateTime)\n\nAction (action.rs):\n- fetch_timeline(conn, scope, limit, clock) -> Vec: runs the timeline pipeline against DB\n\nView (view/timeline.rs):\n- Vertical event stream with timestamp gutter on the left\n- Color-coded event types: Created(green), Updated(yellow), Closed(red), Merged(purple), Commented(blue), Labeled(cyan), Milestoned(orange)\n- Each event: timestamp | entity ref | event description\n- Entity refs navigable via Enter\n- Query bar for filtering by text or entity\n- Keyboard: j/k scroll, Enter navigate to entity, / focus query, g+g top\n\n## Acceptance Criteria\n- [ ] Timeline renders chronological event stream\n- [ ] Events color-coded by type\n- [ ] Entity references navigable\n- [ ] Scope filters: all, per-entity, per-author, date range\n- [ ] Query bar filters events\n- [ ] Keyboard navigation works (j/k/Enter/Esc)\n- [ ] Timestamps use injected Clock\n\n## Files\n- MODIFY: crates/lore-tui/src/state/timeline.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_timeline)\n- CREATE: crates/lore-tui/src/view/timeline.rs\n\n## TDD Anchor\nRED: Write test_fetch_timeline_scoped that creates issues with events, calls fetch_timeline with Entity scope, asserts only that entity's events returned.\nGREEN: Implement fetch_timeline with scope filtering.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_timeline\n\n## Edge Cases\n- Timeline pipeline may not be fully implemented in core yet — degrade gracefully if SEED/HYDRATE/EXPAND stages are not available, fall back to raw events\n- Very long timelines: VirtualizedList or lazy loading for performance\n- Events with identical timestamps: stable sort by entity type, then iid\n\n## Dependency Context\nUses timeline pipeline types from src/core/timeline.rs if available.\nUses Clock for timestamp rendering from \"Implement Clock trait\" task.\nUses EntityKey navigation from \"Implement core types\" task.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:05.605968Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:33.993830Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-29qw","depends_on_id":"bd-1zow","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-29qw","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-29wn","title":"Split app.rs into app/ module (model + key dispatch)","description":"app.rs is 712 lines and will grow as screens are added. Split into crates/lore-tui/src/app/mod.rs (LoreApp struct, new(), init()), app/update.rs (update() method, key dispatch, message handling), app/view.rs (view() delegation). Key dispatch is the largest section and the primary growth point. Keep public API identical via re-exports.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T21:24:16.854321Z","created_by":"tayloreernisse","updated_at":"2026-02-12T21:24:38.918911Z","compaction_level":0,"original_size":0,"labels":["TUI"]} +{"id":"bd-29wn","title":"Split app.rs into app/ module (model + key dispatch)","description":"app.rs is 712 lines and will grow as screens are added. Split into crates/lore-tui/src/app/mod.rs (LoreApp struct, new(), init()), app/update.rs (update() method, key dispatch, message handling), app/view.rs (view() delegation). Key dispatch is the largest section and the primary growth point. Keep public API identical via re-exports.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T21:24:16.854321Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:53:10.448834Z","closed_at":"2026-02-18T18:53:10.448649Z","close_reason":"Split app.rs into app/mod.rs, app/update.rs, app/tests.rs. All 177 tests pass.","compaction_level":0,"original_size":0,"labels":["TUI"]} {"id":"bd-2ac","title":"Create migration 009_embeddings.sql","description":"## Background\nMigration 009 creates the embedding storage layer for Gate B. It introduces a sqlite-vec vec0 virtual table for vector search and an embedding_metadata table for tracking provenance per chunk. Unlike migrations 007-008, this migration REQUIRES sqlite-vec to be loaded before it can be applied. The migration runner in db.rs must load the sqlite-vec extension first.\n\n## Approach\nCreate `migrations/009_embeddings.sql` per PRD Section 1.3.\n\n**Tables:**\n1. `embeddings` — vec0 virtual table with `embedding float[768]`\n2. `embedding_metadata` — tracks per-chunk provenance with composite PK (document_id, chunk_index)\n3. Orphan cleanup trigger: `documents_embeddings_ad` — deletes ALL chunk embeddings when a document is deleted using range deletion `[doc_id * 1000, (doc_id + 1) * 1000)`\n\n**Critical: sqlite-vec loading:**\nThe migration runner in `src/core/db.rs` must load sqlite-vec BEFORE applying any migrations. This means adding extension loading to the `create_connection()` or `run_migrations()` function. sqlite-vec is loaded via:\n```rust\nconn.load_extension_enable()?;\nconn.load_extension(\"vec0\", None)?; // or platform-specific path\nconn.load_extension_disable()?;\n```\n\nRegister migration 9 in `src/core/db.rs` MIGRATIONS array.\n\n## Acceptance Criteria\n- [ ] `migrations/009_embeddings.sql` file exists\n- [ ] `embeddings` vec0 virtual table created with `embedding float[768]`\n- [ ] `embedding_metadata` table has composite PK (document_id, chunk_index)\n- [ ] `embedding_metadata.document_id` has FK to documents(id) ON DELETE CASCADE\n- [ ] Error tracking fields: last_error, attempt_count, last_attempt_at\n- [ ] Orphan cleanup trigger: deletes embeddings WHERE rowid in [doc_id*1000, (doc_id+1)*1000)\n- [ ] Index on embedding_metadata(last_error) WHERE last_error IS NOT NULL\n- [ ] Index on embedding_metadata(document_id)\n- [ ] Schema version 9 recorded\n- [ ] Migration runner loads sqlite-vec before applying migrations\n- [ ] `cargo build` succeeds\n\n## Files\n- `migrations/009_embeddings.sql` — new file (copy exact SQL from PRD Section 1.3)\n- `src/core/db.rs` — add migration 9 to MIGRATIONS array; add sqlite-vec extension loading\n\n## TDD Loop\nRED: Register migration in db.rs, `cargo test migration_tests` fails\nGREEN: Create SQL file + add extension loading\nVERIFY: `cargo test migration_tests && cargo build`\n\n## Edge Cases\n- sqlite-vec not installed: migration fails with clear error (not a silent skip)\n- Migration applied without sqlite-vec loaded: `CREATE VIRTUAL TABLE` fails with \"no such module: vec0\"\n- Documents deleted before embeddings: trigger fires but vec0 DELETE on empty range is safe\n- vec0 doesn't support FK cascades: that's why we need the explicit trigger","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:33.958178Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:22:26.478290Z","closed_at":"2026-01-30T17:22:26.478229Z","close_reason":"Completed: migration 009_embeddings.sql with vec0 table, embedding_metadata with composite PK, orphan cleanup trigger, registered in db.rs","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2ac","depends_on_id":"bd-221","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2am8","title":"OBSERV: Enhance sync-status to show recent runs with metrics","description":"## Background\nsync_status currently queries sync_runs but always gets zero rows (nothing writes to the table). After bd-23a4 wires up SyncRunRecorder, rows will exist. This bead enhances the display to show recent runs with metrics.\n\n## Approach\n### src/cli/commands/sync_status.rs\n\n1. Change get_last_sync_run() (line ~66) to get_recent_sync_runs() returning last N:\n```rust\nfn get_recent_sync_runs(conn: &Connection, limit: usize) -> Result> {\n let mut stmt = conn.prepare(\n \"SELECT id, started_at, finished_at, status, command, error,\n run_id, total_items_processed, total_errors, metrics_json\n FROM sync_runs\n ORDER BY started_at DESC\n LIMIT ?1\",\n )?;\n // ... map rows to SyncRunInfo\n}\n```\n\n2. Extend SyncRunInfo to include new fields:\n```rust\npub struct SyncRunInfo {\n pub id: i64,\n pub started_at: i64,\n pub finished_at: Option,\n pub status: String,\n pub command: String,\n pub error: Option,\n pub run_id: Option, // NEW\n pub total_items_processed: i64, // NEW\n pub total_errors: i64, // NEW\n pub stages: Option>, // NEW: parsed from metrics_json\n}\n```\n\n3. Parse metrics_json into Vec:\n```rust\nlet stages: Option> = row.get::<_, Option>(9)?\n .and_then(|json| serde_json::from_str(&json).ok());\n```\n\n4. Interactive output (new format):\n```\nRecent sync runs:\n Run a1b2c3 | 2026-02-04 14:32 | 45.2s | 235 items | 1 error\n Run d4e5f6 | 2026-02-03 14:30 | 38.1s | 220 items | 0 errors\n Run g7h8i9 | 2026-02-02 14:29 | 42.7s | 228 items | 0 errors\n```\n\n5. Robot JSON output: runs array with stages parsed from metrics_json:\n```json\n{\n \"ok\": true,\n \"data\": {\n \"runs\": [{ \"run_id\": \"...\", \"stages\": [...] }],\n \"cursors\": [...],\n \"summary\": {...}\n }\n}\n```\n\n6. Add --run flag to sync-status subcommand for single-run detail view (shows full stage breakdown).\n\n## Acceptance Criteria\n- [ ] lore sync-status shows last 10 runs (not just 1) with run_id, duration, items, errors\n- [ ] lore --robot sync-status JSON includes runs array with stages parsed from metrics_json\n- [ ] lore sync-status --run a1b2c3 shows single run detail with full stage breakdown\n- [ ] When no runs exist, shows appropriate \"No sync runs recorded\" message\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/cli/commands/sync_status.rs (rewrite query, extend structs, update display)\n\n## TDD Loop\nRED:\n - test_sync_status_shows_runs: insert 3 sync_runs rows, call print function, assert all 3 shown\n - test_sync_status_json_includes_stages: insert row with metrics_json, verify robot JSON has stages\n - test_sync_status_empty: no rows, verify graceful message\nGREEN: Rewrite get_last_sync_run -> get_recent_sync_runs, extend SyncRunInfo, update output\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- metrics_json is NULL (old rows or failed runs): stages field is null/empty in output\n- metrics_json is malformed: serde_json::from_str fails silently (.ok()), stages is None\n- Duration calculation: finished_at - started_at in ms. If finished_at is NULL (running), show \"in progress\"","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:54:51.467705Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:43:07.306504Z","closed_at":"2026-02-04T17:43:07.306425Z","close_reason":"Enhanced sync-status: shows last 10 runs with run_id, duration, items, errors, parsed stages; JSON includes full stages array","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-2am8","depends_on_id":"bd-23a4","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2am8","depends_on_id":"bd-3pz","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-2ao4","title":"Add migration for dual-path and reviewer participation indexes","description":"## Background\nThe restructured expert SQL (bd-1hoq) uses UNION ALL + dedup to match both old_path and new_path columns. Without indexes on old_path columns, these branches would force table scans. The reviewer_participation CTE joins notes -> discussions and needs index coverage on discussion_id. Path resolution probes (build_path_query, suffix_probe) need their own old_path index optimized for existence checks.\n\n## Approach\nCreate migration file migrations/026_scoring_indexes.sql (latest is 025_note_dirty_backfill.sql). Add entry to MIGRATIONS array in db.rs. LATEST_SCHEMA_VERSION auto-increments via MIGRATIONS.len().\n\n### Migration SQL (5 indexes):\n```sql\n-- 1. Support old_path leg of matched_notes CTE (scoring queries)\nCREATE INDEX IF NOT EXISTS idx_notes_old_path_author\n ON notes(position_old_path, author_username, created_at)\n WHERE note_type = 'DiffNote' AND is_system = 0 AND position_old_path IS NOT NULL;\n\n-- 2. Support old_path leg of matched_file_changes CTE\nCREATE INDEX IF NOT EXISTS idx_mfc_old_path_project_mr\n ON mr_file_changes(old_path, project_id, merge_request_id)\n WHERE old_path IS NOT NULL;\n\n-- 3. Ensure new_path index parity for matched_file_changes CTE\nCREATE INDEX IF NOT EXISTS idx_mfc_new_path_project_mr\n ON mr_file_changes(new_path, project_id, merge_request_id);\n\n-- 4. Support reviewer_participation CTE: notes -> discussions join\nCREATE INDEX IF NOT EXISTS idx_notes_diffnote_discussion_author\n ON notes(discussion_id, author_username, created_at)\n WHERE note_type = 'DiffNote' AND is_system = 0;\n\n-- 5. Support path resolution probes on old_path (build_path_query + suffix_probe)\nCREATE INDEX IF NOT EXISTS idx_notes_old_path_project_created\n ON notes(position_old_path, project_id, created_at)\n WHERE note_type = 'DiffNote' AND is_system = 0 AND position_old_path IS NOT NULL;\n```\n\n### MIGRATIONS array addition (src/core/db.rs, after the (\"025\", ...) entry):\n```rust\n(\"026\", include_str!(\"../../migrations/026_scoring_indexes.sql\")),\n```\n\n## Acceptance Criteria\n- [ ] Migration file at migrations/026_scoring_indexes.sql with 5 CREATE INDEX statements\n- [ ] MIGRATIONS array has (\"026\", include_str!(\"../../migrations/026_scoring_indexes.sql\"))\n- [ ] LATEST_SCHEMA_VERSION auto-increments to 26 (MIGRATIONS.len())\n- [ ] cargo test (migration tests pass on both fresh and migrated :memory: DBs)\n- [ ] Existing indexes unaffected\n\n## Files\n- CREATE: migrations/026_scoring_indexes.sql\n- MODIFY: src/core/db.rs (MIGRATIONS array — add entry after (\"025\", ...) at end of array)\n\n## TDD Loop\nRED: cargo test should fail if migration not applied (schema version mismatch)\nGREEN: Add migration file + MIGRATIONS entry\nVERIFY: cargo test -p lore\n\n## Edge Cases\n- Use CREATE INDEX IF NOT EXISTS (idempotent)\n- Partial indexes with WHERE clauses keep size minimal\n- position_old_path and old_path can be NULL (handled by WHERE clause)\n- idx_notes_old_path_project_created vs idx_notes_old_path_author: former for probes (no author constraint), latter for scoring (author in covering index)","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-09T16:59:30.746899Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:44:15.476719Z","compaction_level":0,"original_size":0,"labels":["db","scoring"]} +{"id":"bd-2ao4","title":"Add migration for dual-path and reviewer participation indexes","description":"## Background\nThe restructured expert SQL (bd-1hoq) uses UNION ALL + dedup to match both old_path and new_path columns. Without indexes on old_path columns, these branches would force table scans. The reviewer_participation CTE joins notes -> discussions and needs index coverage on discussion_id. Path resolution probes (build_path_query, suffix_probe) need their own old_path index optimized for existence checks.\n\n## Approach\nCreate migration file migrations/026_scoring_indexes.sql (latest is 025_note_dirty_backfill.sql). Add entry to MIGRATIONS array in db.rs. LATEST_SCHEMA_VERSION auto-increments via MIGRATIONS.len().\n\n### Migration SQL (5 indexes):\n```sql\n-- 1. Support old_path leg of matched_notes CTE (scoring queries)\nCREATE INDEX IF NOT EXISTS idx_notes_old_path_author\n ON notes(position_old_path, author_username, created_at)\n WHERE note_type = 'DiffNote' AND is_system = 0 AND position_old_path IS NOT NULL;\n\n-- 2. Support old_path leg of matched_file_changes CTE\nCREATE INDEX IF NOT EXISTS idx_mfc_old_path_project_mr\n ON mr_file_changes(old_path, project_id, merge_request_id)\n WHERE old_path IS NOT NULL;\n\n-- 3. Ensure new_path index parity for matched_file_changes CTE\nCREATE INDEX IF NOT EXISTS idx_mfc_new_path_project_mr\n ON mr_file_changes(new_path, project_id, merge_request_id);\n\n-- 4. Support reviewer_participation CTE: notes -> discussions join\nCREATE INDEX IF NOT EXISTS idx_notes_diffnote_discussion_author\n ON notes(discussion_id, author_username, created_at)\n WHERE note_type = 'DiffNote' AND is_system = 0;\n\n-- 5. Support path resolution probes on old_path (build_path_query + suffix_probe)\nCREATE INDEX IF NOT EXISTS idx_notes_old_path_project_created\n ON notes(position_old_path, project_id, created_at)\n WHERE note_type = 'DiffNote' AND is_system = 0 AND position_old_path IS NOT NULL;\n```\n\n### MIGRATIONS array addition (src/core/db.rs, after the (\"025\", ...) entry):\n```rust\n(\"026\", include_str!(\"../../migrations/026_scoring_indexes.sql\")),\n```\n\n## Acceptance Criteria\n- [ ] Migration file at migrations/026_scoring_indexes.sql with 5 CREATE INDEX statements\n- [ ] MIGRATIONS array has (\"026\", include_str!(\"../../migrations/026_scoring_indexes.sql\"))\n- [ ] LATEST_SCHEMA_VERSION auto-increments to 26 (MIGRATIONS.len())\n- [ ] cargo test (migration tests pass on both fresh and migrated :memory: DBs)\n- [ ] Existing indexes unaffected\n\n## Files\n- CREATE: migrations/026_scoring_indexes.sql\n- MODIFY: src/core/db.rs (MIGRATIONS array — add entry after (\"025\", ...) at end of array)\n\n## TDD Loop\nRED: cargo test should fail if migration not applied (schema version mismatch)\nGREEN: Add migration file + MIGRATIONS entry\nVERIFY: cargo test -p lore\n\n## Edge Cases\n- Use CREATE INDEX IF NOT EXISTS (idempotent)\n- Partial indexes with WHERE clauses keep size minimal\n- position_old_path and old_path can be NULL (handled by WHERE clause)\n- idx_notes_old_path_project_created vs idx_notes_old_path_author: former for probes (no author constraint), latter for scoring (author in covering index)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T16:59:30.746899Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:43:04.407407Z","closed_at":"2026-02-12T20:43:04.407369Z","close_reason":"Implemented by time-decay swarm: 3 agents, 12 tasks, 621 tests passing, all quality gates green","compaction_level":0,"original_size":0,"labels":["db","scoring"]} {"id":"bd-2as","title":"[CP1] Epic: Issue Ingestion","description":"Ingest all issues, labels, and issue discussions from configured GitLab repositories with resumable cursor-based incremental sync. This establishes the core data ingestion pattern reused for MRs in CP2.\n\nSuccess Criteria:\n- gi ingest --type=issues fetches all issues (count matches GitLab UI)\n- Labels extracted from issue payloads\n- Issue discussions fetched per-issue\n- Cursor-based sync is resumable\n- Sync tracking records all runs\n- Single-flight lock prevents concurrent runs\n\nReference: docs/prd/checkpoint-1.md","status":"tombstone","priority":1,"issue_type":"task","created_at":"2026-01-25T15:18:44.062057Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.155746Z","closed_at":"2026-01-25T15:21:35.155746Z","deleted_at":"2026-01-25T15:21:35.155744Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-2b28","title":"NOTE-0C: Sweep safety guard for partial fetch protection","description":"## Background\nThe sweep pattern (delete notes where last_seen_at < run_seen_at) is correct only when a discussion's notes were fully fetched. If a page fails mid-fetch, the current logic would incorrectly delete valid notes that weren't seen during the incomplete fetch. Especially dangerous for long threads spanning multiple API pages.\n\n## Approach\nAdd a fetch_complete: bool parameter to discussion ingestion functions. Only run sweep when fetch completed successfully:\n\nif fetch_complete {\n sweep_stale_issue_notes(&tx, local_discussion_id, last_seen_at)?;\n} else {\n tracing::warn!(discussion_id = local_discussion_id, \"Skipping stale note sweep due to partial/incomplete fetch\");\n}\n\nDetermining fetch_complete: Look at the existing pagination_error pattern in src/ingestion/discussions.rs lines 148-154. When pagination_error is None (all pages fetched successfully), fetch_complete = true. When pagination_error is Some (network error, rate limit, interruption), fetch_complete = false. The MR path has a similar pattern in src/ingestion/mr_discussions.rs — search for where sweep_stale_discussions (line 539) and sweep_stale_notes (line 551) are called to find the equivalent guard.\n\nThe fetch_complete flag should be threaded from the outer discussion-fetch loop into the per-discussion upsert transaction, NOT as a parameter on sweep itself (sweep always sweeps — the caller decides whether to call it).\n\n## Files\n- MODIFY: src/ingestion/discussions.rs (guard sweep call with fetch_complete, lines 132-146)\n- MODIFY: src/ingestion/mr_discussions.rs (guard sweep call, near line 551 call site)\n\n## TDD Anchor\nRED: test_partial_fetch_does_not_sweep_notes — 5 notes in DB, partial fetch returns 2, assert all 5 still exist.\nGREEN: Add fetch_complete guard around sweep call.\nVERIFY: cargo test partial_fetch_does_not_sweep -- --nocapture\nTests: test_complete_fetch_runs_sweep_normally, test_partial_fetch_then_complete_fetch_cleans_up\n\n## Acceptance Criteria\n- [ ] Sweep only runs when fetch_complete = true\n- [ ] Partial fetch logs a warning (tracing::warn!) but preserves all notes\n- [ ] Second complete fetch correctly sweeps notes deleted on GitLab\n- [ ] Both issue and MR discussion paths support fetch_complete\n- [ ] All 3 tests pass\n\n## Dependency Context\n- Depends on NOTE-0A (bd-3bpk): modifies the sweep call site from NOTE-0A. The sweep functions must exist before this guard can wrap them.\n\n## Edge Cases\n- Rate limit mid-page: pagination_error triggers partial fetch — sweep must be skipped\n- Discussion with 1 page of notes: always fully fetched if no error, sweep runs normally\n- Empty discussion (0 notes returned): still counts as complete fetch — sweep is a no-op anyway","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:59:44.290790Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.172004Z","closed_at":"2026-02-12T18:13:15.171952Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"]} {"id":"bd-2bu","title":"[CP1] GitLab types for issues, discussions, notes","description":"Add Rust types to src/gitlab/types.rs for GitLab API responses.\n\n## Types to Add\n\n### GitLabIssue\n- id: i64 (GitLab global ID)\n- iid: i64 (project-scoped issue number)\n- project_id: i64\n- title: String\n- description: Option\n- state: String (\"opened\" | \"closed\")\n- created_at, updated_at: String (ISO 8601)\n- closed_at: Option\n- author: GitLabAuthor\n- labels: Vec (array of label names - CP1 canonical)\n- web_url: String\nNOTE: labels_details intentionally NOT modeled - varies across GitLab versions\n\n### GitLabAuthor\n- id: i64\n- username: String\n- name: String\n\n### GitLabDiscussion\n- id: String (like \"6a9c1750b37d...\")\n- individual_note: bool\n- notes: Vec\n\n### GitLabNote\n- id: i64\n- note_type: Option (\"DiscussionNote\" | \"DiffNote\" | null)\n- body: String\n- author: GitLabAuthor\n- created_at, updated_at: String (ISO 8601)\n- system: bool\n- resolvable: bool (default false)\n- resolved: bool (default false)\n- resolved_by: Option\n- resolved_at: Option\n- position: Option\n\n### GitLabNotePosition\n- old_path, new_path: Option\n- old_line, new_line: Option\n\nFiles: src/gitlab/types.rs\nTests: Test deserialization with fixtures\nDone when: Types compile and deserialize sample API responses correctly","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:42:46.922805Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.710057Z","closed_at":"2026-01-25T17:02:01.710057Z","deleted_at":"2026-01-25T17:02:01.710051Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} @@ -124,7 +132,7 @@ {"id":"bd-2dlt","title":"Implement GraphQL client with partial-error handling","description":"## Background\nGitLab's GraphQL endpoint (/api/graphql) uses different auth than REST (Bearer token, not PRIVATE-TOKEN). We need a minimal GraphQL client that handles the GitLab-specific error codes and partial-data responses per GraphQL spec. The client returns a GraphqlQueryResult struct that propagates partial-error metadata end-to-end.\n\n## Approach\nCreate a new file src/gitlab/graphql.rs with GraphqlClient (uses reqwest). Add httpdate crate for Retry-After HTTP-date parsing. Wire into the module tree. Factory on GitLabClient keeps token encapsulated.\n\n## Files\n- src/gitlab/graphql.rs (NEW) — GraphqlClient struct, GraphqlQueryResult, ansi256_from_rgb\n- src/gitlab/mod.rs (add pub mod graphql;)\n- src/gitlab/client.rs (add graphql_client() factory method)\n- Cargo.toml (add httpdate dependency)\n\n## Implementation\n\nGraphqlClient struct:\n Fields: http (reqwest::Client with 30s timeout), base_url (String), token (String)\n Constructor: new(base_url, token) — trims trailing slash from base_url\n \nquery() method:\n - POST to {base_url}/api/graphql\n - Headers: Authorization: Bearer {token}, Content-Type: application/json\n - Body: {\"query\": \"...\", \"variables\": {...}}\n - Returns Result\n\nGraphqlQueryResult struct (pub):\n data: serde_json::Value\n had_partial_errors: bool\n first_partial_error: Option\n\nHTTP status mapping:\n 401 | 403 -> LoreError::GitLabAuthFailed\n 404 -> LoreError::GitLabNotFound { resource: \"GraphQL endpoint\" }\n 429 -> LoreError::GitLabRateLimited { retry_after } (parse Retry-After: try u64 first, then httpdate::parse_http_date, fallback 60)\n Other non-success -> LoreError::Other\n\nGraphQL-level error handling:\n errors array present + data absent/null -> Err(LoreError::Other(\"GraphQL error: {first_msg}\"))\n errors array present + data present -> Ok(GraphqlQueryResult { data, had_partial_errors: true, first_partial_error: Some(first_msg) })\n No errors + data present -> Ok(GraphqlQueryResult { data, had_partial_errors: false, first_partial_error: None })\n No errors + no data -> Err(LoreError::Other(\"missing 'data' field\"))\n\nansi256_from_rgb(r, g, b) -> u8:\n Maps RGB to nearest ANSI 256-color index using 6x6x6 cube (indices 16-231).\n MUST be placed BEFORE #[cfg(test)] module (clippy::items_after_test_module).\n\nFactory in src/gitlab/client.rs:\n pub fn graphql_client(&self) -> crate::gitlab::graphql::GraphqlClient {\n crate::gitlab::graphql::GraphqlClient::new(&self.base_url, &self.token)\n }\n\n## Acceptance Criteria\n- [ ] query() sends POST with Bearer auth header\n- [ ] Success: returns GraphqlQueryResult { data, had_partial_errors: false }\n- [ ] Errors-only (no data): returns Err with first error message\n- [ ] Partial data + errors: returns Ok with had_partial_errors: true\n- [ ] 401 -> GitLabAuthFailed\n- [ ] 403 -> GitLabAuthFailed\n- [ ] 404 -> GitLabNotFound\n- [ ] 429 -> GitLabRateLimited (parses Retry-After delta-seconds and HTTP-date, fallback 60)\n- [ ] ansi256_from_rgb: (0,0,0)->16, (255,255,255)->231\n- [ ] cargo check --all-targets passes\n\n## TDD Loop\nRED: test_graphql_query_success, test_graphql_query_with_errors_no_data, test_graphql_auth_uses_bearer, test_graphql_401_maps_to_auth_failed, test_graphql_403_maps_to_auth_failed, test_graphql_404_maps_to_not_found, test_graphql_partial_data_with_errors_returns_data, test_retry_after_http_date_format, test_retry_after_invalid_falls_back_to_60, test_ansi256_from_rgb\n Tests use wiremock or similar mock HTTP server\nGREEN: Implement GraphqlClient, add httpdate to Cargo.toml\nVERIFY: cargo test graphql && cargo test ansi256\n\n## Edge Cases\n- Use r##\"...\"## in tests containing \"#1f75cb\" hex colors (# breaks r#\"...\"#)\n- LoreError::GitLabRateLimited uses u64 not Option — use .unwrap_or(60)\n- httpdate::parse_http_date returns SystemTime — compute duration_since(now) for delta\n- GraphqlQueryResult is NOT Clone — tests must check fields individually","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:41:52.833151Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.417835Z","closed_at":"2026-02-11T07:21:33.417793Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2dlt","depends_on_id":"bd-1v8t","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2dlt","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2e8","title":"Add fetchResourceEvents config flag to SyncConfig","description":"## Background\nEvent fetching should be opt-in (default true) so users who don't need temporal queries skip 3 extra API calls per entity. This follows the existing SyncConfig pattern with serde defaults and camelCase JSON aliases.\n\n## Approach\nAdd to SyncConfig in src/core/config.rs:\n```rust\n#[serde(rename = \"fetchResourceEvents\", default = \"default_true\")]\npub fetch_resource_events: bool,\n```\n\nAdd default function (if not already present):\n```rust\nfn default_true() -> bool { true }\n```\n\nUpdate Default impl for SyncConfig to include `fetch_resource_events: true`.\n\nAdd --no-events flag to sync command in src/cli/mod.rs (SyncArgs):\n```rust\n/// Skip resource event fetching (overrides config)\n#[arg(long = \"no-events\", help_heading = \"Sync Options\")]\npub no_events: bool,\n```\n\nIn the sync command handler (src/cli/commands/sync.rs), override config when flag is set:\n```rust\nif args.no_events {\n config.sync.fetch_resource_events = false;\n}\n```\n\n## Acceptance Criteria\n- [ ] SyncConfig deserializes `fetchResourceEvents: false` from JSON config\n- [ ] SyncConfig defaults to `fetch_resource_events: true` when field absent\n- [ ] `--no-events` flag parses correctly in CLI\n- [ ] `--no-events` overrides config to false\n- [ ] `cargo test` passes with no regressions\n\n## Files\n- src/core/config.rs (add field to SyncConfig + default fn + Default impl)\n- src/cli/mod.rs (add --no-events to SyncArgs)\n- src/cli/commands/sync.rs (override config when flag set)\n\n## TDD Loop\nRED: tests/config_tests.rs (or inline in config.rs):\n- `test_sync_config_fetch_resource_events_default_true` - omit field from JSON, verify default\n- `test_sync_config_fetch_resource_events_explicit_false` - set field false, verify parsed\n- `test_sync_config_no_events_flag` - verify CLI arg parsing\n\nGREEN: Add the field, default fn, Default impl update, CLI flag, and override logic\n\nVERIFY: `cargo test config -- --nocapture && cargo build`\n\n## Edge Cases\n- Ensure serde rename matches camelCase convention used by all other SyncConfig fields\n- The default_true fn may already exist for other fields — check before adding duplicate\n- The --no-events flag must NOT be confused with --no-X negation flags already in CLI (check mod.rs for conflicts)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:31:24.006037Z","created_by":"tayloreernisse","updated_at":"2026-02-03T16:10:20.311986Z","closed_at":"2026-02-03T16:10:20.311939Z","close_reason":"Completed: Added fetch_resource_events bool to SyncConfig with serde rename, default_true, --no-events CLI flag, and config override in sync handler","compaction_level":0,"original_size":0,"labels":["config","gate-1","phase-b"],"dependencies":[{"issue_id":"bd-2e8","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2emv","title":"FrankenTUI integration proof + terminal compat smoke test","description":"## Background\nThis is the critical validation that FrankenTUI works with our setup. A minimal Model trait implementation must compile, render a frame, and handle basic input. Terminal compatibility must be verified in iTerm2 and tmux. This proves the toolchain gate before investing in the full implementation.\n\n## Approach\nIn crates/lore-tui/src/app.rs, implement a minimal LoreApp that:\n- implements ftui_runtime::program::Model with type Message = Msg\n- init() returns Cmd::none()\n- update() handles Msg::Quit to return None (exit) and ignores everything else\n- view() renders a simple \"lore TUI\" text centered on screen\n- subscriptions() returns empty vec\n\nAdd a smoke test binary or integration test that:\n- Creates a TerminalSession with ftui test harness\n- Verifies Model::view() produces non-empty output\n- Verifies resize events are handled without panic\n- Tests render in both fullscreen and inline(12) modes\n\nTerminal compat: manually verify ftui demo-showcase renders correctly in iTerm2 and tmux (document results in test notes).\n\n## Acceptance Criteria\n- [ ] LoreApp implements Model trait with Msg as message type\n- [ ] App::fullscreen(lore_app).run() compiles (even if not runnable in CI without a TTY)\n- [ ] App::inline(lore_app, 12).run() compiles\n- [ ] Panic hook installed: terminal restored on crash (crossterm disable_raw_mode + LeaveAlternateScreen)\n- [ ] Crash report written to ~/.local/share/lore/crash-{timestamp}.log with redacted sensitive data\n- [ ] Crash file retention: max 20 files, oldest deleted\n- [ ] ftui demo-showcase renders correctly in iTerm2 (documented)\n- [ ] ftui demo-showcase renders correctly in tmux (documented)\n- [ ] Binary size increase < 5MB over current lore binary\n\n## Files\n- CREATE: crates/lore-tui/src/app.rs (minimal Model impl)\n- MODIFY: crates/lore-tui/src/lib.rs (add install_panic_hook_for_tui, crash report logic)\n- CREATE: crates/lore-tui/src/crash_context.rs (ring buffer stub for crash diagnostics)\n\n## TDD Anchor\nRED: Write test_app_model_compiles that creates LoreApp and calls init(), verifying it returns without error.\nGREEN: Implement minimal LoreApp struct with Model trait.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_app_model\n\n## Edge Cases\n- CI environments have no TTY — tests must use ftui test harness, not actual terminal\n- tmux may not support all ANSI features — FrankenTUI's BOCPD resize coalescing must be verified\n- Panic hook must handle double-panic gracefully (don't panic inside the panic hook)\n- Crash context ring buffer must be lock-free readable from panic hook (signal safety)\n\n## Dependency Context\nUses crate scaffold (Cargo.toml, rust-toolchain.toml) from \"Create lore-tui crate scaffold\" task.\nUses Msg enum and Screen type from \"Implement core types\" task.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:54:52.087021Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:06:53.032980Z","closed_at":"2026-02-12T20:06:53.032792Z","close_reason":"LoreApp Model trait impl: init/update/view + compile-time App::fullscreen/inline assertions. 4 tests, quality gate green.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2emv","depends_on_id":"bd-3ddw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2emv","depends_on_id":"bd-c9gk","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-2ez","title":"Add 'lore count references' command","description":"## Background\n\nThe count command currently supports issues, mrs, discussions, notes, and events. This adds 'references' as a new entity type, showing cross-reference totals and breakdowns by reference_type and source_method.\n\n## Codebase Context\n\n- entity_references table (migration 011) with:\n - reference_type CHECK: `'closes' | 'mentioned' | 'related'`\n - source_method CHECK: `'api' | 'note_parse' | 'description_parse'` (**codebase values, NOT spec values**)\n - target_entity_id: NULL for unresolved cross-project refs\n- Count command pattern in src/cli/commands/count.rs: run_count() returns CountResult, handle_count formats output\n- events count already implemented as a special case: run_count_events() in main.rs (line ~829)\n- count.rs has value_parser list for entity arg\n\n## Approach\n\n### 1. Add to CountArgs value_parser in `src/cli/mod.rs`:\n```rust\n#[arg(value_parser = [\"issues\", \"mrs\", \"discussions\", \"notes\", \"events\", \"references\"])]\npub entity: String,\n```\n\n### 2. Add types and query in `src/cli/commands/count.rs`:\n\n```rust\npub struct ReferenceCountResult {\n pub total: i64,\n pub by_type: HashMap, // closes, mentioned, related\n pub by_method: HashMap, // api, note_parse, description_parse\n pub unresolved: i64,\n}\n```\n\n### 3. SQL:\n```sql\nSELECT\n COUNT(*) as total,\n COALESCE(SUM(CASE WHEN reference_type = 'closes' THEN 1 ELSE 0 END), 0) as closes,\n COALESCE(SUM(CASE WHEN reference_type = 'mentioned' THEN 1 ELSE 0 END), 0) as mentioned,\n COALESCE(SUM(CASE WHEN reference_type = 'related' THEN 1 ELSE 0 END), 0) as related,\n COALESCE(SUM(CASE WHEN source_method = 'api' THEN 1 ELSE 0 END), 0) as api,\n COALESCE(SUM(CASE WHEN source_method = 'note_parse' THEN 1 ELSE 0 END), 0) as note_parse,\n COALESCE(SUM(CASE WHEN source_method = 'description_parse' THEN 1 ELSE 0 END), 0) as desc_parse,\n COALESCE(SUM(CASE WHEN target_entity_id IS NULL THEN 1 ELSE 0 END), 0) as unresolved\nFROM entity_references\n```\n\n### 4. Human output:\n```\nReferences: 1,234\n By type:\n closes: 456\n mentioned: 678\n related: 100\n By source:\n api: 234\n note_parse: 890\n description_parse: 110\n Unresolved: 45 (3.6%)\n```\n\n### 5. Robot JSON:\n```json\n{\n \"ok\": true,\n \"data\": {\n \"entity\": \"references\",\n \"total\": 1234,\n \"by_type\": { \"closes\": 456, \"mentioned\": 678, \"related\": 100 },\n \"by_method\": { \"api\": 234, \"note_parse\": 890, \"description_parse\": 110 },\n \"unresolved\": 45\n }\n}\n```\n\n### 6. Wire in main.rs handle_count:\nAdd \"references\" branch, similar to the existing \"events\" special case.\n\n## Acceptance Criteria\n\n- [ ] `lore count references` works with human output\n- [ ] `lore --robot count references` returns JSON\n- [ ] by_type uses codebase values: closes, mentioned, related\n- [ ] by_method uses codebase values: api, note_parse, description_parse (NOT spec values)\n- [ ] Unresolved = WHERE target_entity_id IS NULL\n- [ ] Zero references: all counts 0, not error\n- [ ] entity_references table missing (old schema): graceful error with migration suggestion\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/cli/mod.rs` (add \"references\" to value_parser)\n- `src/cli/commands/count.rs` (add count_references + ReferenceCountResult)\n- `src/main.rs` (add \"references\" branch in handle_count)\n\n## TDD Loop\n\nRED: `test_count_references_query` with in-memory DB + migration 011 data\n\nGREEN: Implement query, result type, output.\n\nVERIFY: `cargo test --lib -- count && cargo check --all-targets`\n\n## Edge Cases\n\n- entity_references table doesn't exist (pre-migration-011): catch SQL error, suggest `lore migrate`\n- All references unresolved: unresolved = total\n- New source_method values in future: consider logging unknown values","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-02T22:42:43.780303Z","created_by":"tayloreernisse","updated_at":"2026-02-05T19:42:55.459109Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2ez","depends_on_id":"bd-1se","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ez","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-2ez","title":"Add 'lore count references' command","description":"## Background\n\nThe count command currently supports issues, mrs, discussions, notes, and events. This adds 'references' as a new entity type, showing cross-reference totals and breakdowns by reference_type and source_method.\n\n## Codebase Context\n\n- entity_references table (migration 011) with:\n - reference_type CHECK: 'closes' | 'mentioned' | 'related'\n - source_method CHECK: 'api' | 'note_parse' | 'description_parse'\n - target_entity_id: NULL for unresolved cross-project refs\n- Count command pattern in src/cli/commands/count.rs: run_count() returns CountResult, handle_count formats output\n- events count already implemented as a special case: run_count_events() in main.rs (line ~829)\n- count.rs has value_parser list for entity arg\n- 26 migrations exist (001-026). entity_references was introduced in migration 011.\n\n## Approach\n\n### 1. Add to CountArgs value_parser in `src/cli/mod.rs`:\n```rust\n#[arg(value_parser = [\"issues\", \"mrs\", \"discussions\", \"notes\", \"events\", \"references\"])]\npub entity: String,\n```\n\n### 2. Add types and query in `src/cli/commands/count.rs`:\n\n```rust\npub struct ReferenceCountResult {\n pub total: i64,\n pub by_type: HashMap, // closes, mentioned, related\n pub by_method: HashMap, // api, note_parse, description_parse\n pub unresolved: i64,\n}\n```\n\n### 3. SQL (single conditional aggregate query — no N+1):\n```sql\nSELECT\n COUNT(*) as total,\n COALESCE(SUM(CASE WHEN reference_type = 'closes' THEN 1 ELSE 0 END), 0) as closes,\n COALESCE(SUM(CASE WHEN reference_type = 'mentioned' THEN 1 ELSE 0 END), 0) as mentioned,\n COALESCE(SUM(CASE WHEN reference_type = 'related' THEN 1 ELSE 0 END), 0) as related,\n COALESCE(SUM(CASE WHEN source_method = 'api' THEN 1 ELSE 0 END), 0) as api,\n COALESCE(SUM(CASE WHEN source_method = 'note_parse' THEN 1 ELSE 0 END), 0) as note_parse,\n COALESCE(SUM(CASE WHEN source_method = 'description_parse' THEN 1 ELSE 0 END), 0) as desc_parse,\n COALESCE(SUM(CASE WHEN target_entity_id IS NULL THEN 1 ELSE 0 END), 0) as unresolved\nFROM entity_references\n```\n\n### 4. Human output:\n```\nReferences: 1,234\n By type:\n closes: 456\n mentioned: 678\n related: 100\n By source:\n api: 234\n note_parse: 890\n description_parse: 110\n Unresolved: 45 (3.6%)\n```\n\n### 5. Robot JSON:\n```json\n{\n \"ok\": true,\n \"data\": {\n \"entity\": \"references\",\n \"total\": 1234,\n \"by_type\": { \"closes\": 456, \"mentioned\": 678, \"related\": 100 },\n \"by_method\": { \"api\": 234, \"note_parse\": 890, \"description_parse\": 110 },\n \"unresolved\": 45\n }\n}\n```\n\n### 6. Wire in main.rs handle_count:\nAdd \"references\" branch, similar to the existing \"events\" special case.\n\n## Acceptance Criteria\n\n- [ ] `lore count references` works with human output\n- [ ] `lore --robot count references` returns JSON with {ok, data, meta} envelope\n- [ ] by_type uses codebase values: closes, mentioned, related\n- [ ] by_method uses codebase values: api, note_parse, description_parse\n- [ ] Unresolved = COUNT WHERE target_entity_id IS NULL\n- [ ] Zero references: all counts 0, not error\n- [ ] entity_references table missing (pre-migration-011 schema): graceful error with migration suggestion\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n- [ ] `cargo fmt --check` passes\n\n## Files\n\n- MODIFY: src/cli/mod.rs (add \"references\" to value_parser list)\n- MODIFY: src/cli/commands/count.rs (add count_references() + ReferenceCountResult)\n- MODIFY: src/main.rs (add \"references\" branch in handle_count)\n\n## TDD Anchor\n\nRED: test_count_references_query — in-memory DB with migration 011+, insert 3 entity_references rows (one closes/api, one mentioned/note_parse, one related/api with target_entity_id=NULL), verify all counts.\n\nGREEN: Implement query, result type, output formatters.\n\nVERIFY: cargo test --lib -- count && cargo check --all-targets\n\n## Edge Cases\n\n- entity_references table doesn't exist (pre-migration-011): catch SQL error, return user-friendly message suggesting `lore sync`\n- All references unresolved: unresolved = total, percentage = 100%\n- Division by zero in percentage: guard with `if total > 0`\n- New reference_type/source_method values added in future: they won't appear in breakdown but will be in total — consider logging unknown values\n\n## Dependency Context\n\n- **bd-hu3 / migration 011**: provides the entity_references table with reference_type and source_method CHECK constraints. This bead reads from that table — no writes.\n- **count.rs pattern**: run_count() dispatches to entity-specific queries. events already has a special-case function run_count_events() — follow the same pattern for references.","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-02T22:42:43.780303Z","created_by":"tayloreernisse","updated_at":"2026-02-17T16:52:59.706810Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2ez","depends_on_id":"bd-1se","type":"parent-child","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-2ez","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-2ezb","title":"NOTE-2D: Regenerator and dirty tracking for note documents","description":"## Background\nWire note document extraction into the regenerator and add change-aware dirty marking in the ingestion pipeline. When a note's semantic content changes during upsert, it gets queued for document regeneration.\n\n## Approach\n1. Update regenerate_one() in src/documents/regenerator.rs (line 86-91):\n Add match arm: SourceType::Note => extract_note_document(conn, source_id)?\n Add import: use crate::documents::extract_note_document;\n This replaces the temporary unreachable!() from NOTE-2B.\n\n2. Add change-aware dirty marking in src/ingestion/discussions.rs (in upsert loop modified by NOTE-0A):\n After each upsert_note_for_issue call:\n if !note.is_system && outcome.changed_semantics {\n dirty_tracker::mark_dirty_tx(&tx, SourceType::Note, outcome.local_note_id)?;\n }\n Import: use crate::documents::SourceType;\n\n3. Same in src/ingestion/mr_discussions.rs for MR note upserts (after upsert_note call near line 470 area, once NOTE-0A modifies it to return NoteUpsertOutcome).\n\n4. Update test setup helpers:\n - src/documents/regenerator.rs tests: the setup_db() function creates test tables. Add notes + discussions tables so regenerate_one can be tested with SourceType::Note. Also update the dirty_sources CHECK constraint in test setup to include 'note'.\n - src/ingestion/dirty_tracker.rs tests: similar test setup_db() update for CHECK constraint.\n\n## Files\n- MODIFY: src/documents/regenerator.rs (add Note match arm at line 90, add import, update test setup_db)\n- MODIFY: src/ingestion/discussions.rs (add dirty marking after upsert loop)\n- MODIFY: src/ingestion/mr_discussions.rs (add dirty marking after upsert)\n- MODIFY: src/ingestion/dirty_tracker.rs (update test setup_db CHECK constraint if present)\n\n## TDD Anchor\nRED: test_regenerate_note_document — create project, issue, discussion, note, mark dirty, call regenerate_dirty_documents, assert document created with source_type='note'.\nGREEN: Add SourceType::Note arm to regenerate_one.\nVERIFY: cargo test regenerate_note_document -- --nocapture\nTests: test_regenerate_note_system_note_deletes (system note in dirty queue → document gets deleted), test_regenerate_note_unchanged (same content hash → no update), test_note_ingestion_idempotent_across_two_syncs (identical re-sync produces no new dirty entries), test_mark_dirty_note_type\n\n## Acceptance Criteria\n- [ ] regenerate_one() handles SourceType::Note via extract_note_document\n- [ ] Changed notes queued as dirty during issue discussion ingestion\n- [ ] Changed notes queued as dirty during MR discussion ingestion\n- [ ] System notes never queued as dirty (is_system guard)\n- [ ] Unchanged notes not re-queued (changed_semantics = false from NOTE-0A)\n- [ ] Second sync of identical data produces no new dirty entries\n- [ ] All 5 tests pass\n\n## Dependency Context\n- Depends on NOTE-0A (bd-3bpk): uses NoteUpsertOutcome.changed_semantics from upsert functions\n- Depends on NOTE-2B (bd-ef0u): SourceType::Note enum variant for dirty marking and match arm\n- Depends on NOTE-2C (bd-18yh): extract_note_document function for the regenerator dispatch\n\n## Edge Cases\n- Note deleted during regeneration: extract_note_document returns None → delete_document called (line 93-95 of regenerator.rs)\n- System note in dirty queue (from manual INSERT): extract returns None → document deleted\n- Concurrent sync + regeneration: dirty_tracker uses ON CONFLICT handling","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:14.161688Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:23.852811Z","closed_at":"2026-02-12T18:13:23.852765Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"],"dependencies":[{"issue_id":"bd-2ezb","depends_on_id":"bd-22uw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ezb","depends_on_id":"bd-3o0i","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ezb","depends_on_id":"bd-9wl5","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2f0","title":"[CP1] gi count issues/discussions/notes commands","description":"## Background\n\nThe `gi count` command provides quick counts of entities in the local database. It supports counting issues, MRs, discussions, and notes, with optional filtering by noteable type. This enables quick validation that sync is working correctly.\n\n## Approach\n\n### Module: src/cli/commands/count.rs\n\n### Clap Definition\n\n```rust\n#[derive(Args)]\npub struct CountArgs {\n /// Entity type to count\n #[arg(value_parser = [\"issues\", \"mrs\", \"discussions\", \"notes\"])]\n pub entity: String,\n\n /// Filter by noteable type (for discussions/notes)\n #[arg(long, value_parser = [\"issue\", \"mr\"])]\n pub r#type: Option,\n}\n```\n\n### Handler Function\n\n```rust\npub async fn handle_count(args: CountArgs, conn: &Connection) -> Result<()>\n```\n\n### Queries by Entity\n\n**issues:**\n```sql\nSELECT COUNT(*) FROM issues\n```\nOutput: `Issues: 3,801`\n\n**discussions:**\n```sql\n-- Without type filter\nSELECT COUNT(*) FROM discussions\n\n-- With --type=issue\nSELECT COUNT(*) FROM discussions WHERE noteable_type = 'Issue'\n```\nOutput: `Issue Discussions: 1,234`\n\n**notes:**\n```sql\n-- Total and system count\nSELECT COUNT(*), SUM(is_system) FROM notes\n\n-- With --type=issue (join through discussions)\nSELECT COUNT(*), SUM(n.is_system)\nFROM notes n\nJOIN discussions d ON n.discussion_id = d.id\nWHERE d.noteable_type = 'Issue'\n```\nOutput: `Issue Notes: 5,678 (excluding 1,234 system)`\n\n### Output Format\n\n```\nIssues: 3,801\n```\n\n```\nIssue Discussions: 1,234\n```\n\n```\nIssue Notes: 5,678 (excluding 1,234 system)\n```\n\n## Acceptance Criteria\n\n- [ ] `gi count issues` shows total issue count\n- [ ] `gi count discussions` shows total discussion count\n- [ ] `gi count discussions --type=issue` filters to issue discussions\n- [ ] `gi count notes` shows total note count with system note exclusion\n- [ ] `gi count notes --type=issue` filters to issue notes\n- [ ] Numbers formatted with thousands separators (1,234)\n\n## Files\n\n- src/cli/commands/mod.rs (add `pub mod count;`)\n- src/cli/commands/count.rs (create)\n- src/cli/mod.rs (add Count variant to Commands enum)\n\n## TDD Loop\n\nRED:\n```rust\n#[tokio::test] async fn count_issues_returns_total()\n#[tokio::test] async fn count_discussions_with_type_filter()\n#[tokio::test] async fn count_notes_excludes_system_notes()\n```\n\nGREEN: Implement handler with queries\n\nVERIFY: `cargo test count`\n\n## Edge Cases\n\n- Zero entities - show \"Issues: 0\"\n- --type flag invalid for issues/mrs - ignore or error\n- All notes are system notes - show \"Notes: 0 (excluding 1,234 system)\"","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-25T17:02:38.360495Z","created_by":"tayloreernisse","updated_at":"2026-01-25T23:01:37.084627Z","closed_at":"2026-01-25T23:01:37.084568Z","close_reason":"Implemented gi count command with issues/discussions/notes support, format_number helper, and system note exclusion","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2f0","depends_on_id":"bd-208","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2f2","title":"Implement timeline human output renderer","description":"## Background\n\nThis bead implements the human-readable (non-robot) output renderer for `lore timeline`. It takes a collection of TimelineEvents and renders them as a colored, chronological timeline in the terminal.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 3.4 (Human Output Format).\n\n## Codebase Context\n\n- Colored output pattern: src/cli/commands/show.rs uses `colored` crate for terminal styling\n- Existing formatters: `print_show_issue()`, `print_show_mr()`, `print_list_issues()`\n- TimelineEvent model (bd-20e): timestamp, entity_type, entity_iid, project_path, event_type, summary, actor, url, is_seed\n- TimelineEventType enum (bd-20e): Created, StateChanged, LabelAdded, LabelRemoved, MilestoneSet, MilestoneRemoved, Merged, NoteEvidence, CrossReferenced\n- Expansion provenance: expanded entities have `via` info (from which seed, what edge type)\n- Convention: all output functions take `&[TimelineEvent]` and metadata, not raw DB results\n\n## Approach\n\nCreate `src/cli/commands/timeline.rs`:\n\n```rust\nuse colored::Colorize;\nuse crate::core::timeline::{TimelineEvent, TimelineEventType, TimelineQueryResult};\n\npub fn print_timeline(result: &TimelineQueryResult) {\n // Header\n println\\!();\n println\\!(\"{}\", format\\!(\"Timeline: \\\"{}\\\" ({} events across {} entities)\",\n result.query, result.events.len(), result.total_entities).bold());\n println\\!(\"{}\", \"─\".repeat(60));\n println\\!();\n\n // Events\n for event in &result.events {\n print_timeline_event(event);\n }\n\n // Footer\n println\\!();\n println\\!(\"{}\", \"─\".repeat(60));\n print_timeline_footer(result);\n}\n\nfn print_timeline_event(event: &TimelineEvent) {\n let date = format_date(event.timestamp);\n let tag = format_event_tag(&event.event_type);\n let entity = format_entity_ref(event.entity_type.as_str(), event.entity_iid);\n let actor = event.actor.as_deref().map(|a| format\\!(\"@{a}\")).unwrap_or_default();\n let expanded_marker = if event.is_seed { \"\" } else { \" [expanded]\" };\n\n println\\!(\"{date} {tag:10} {entity:6} {summary:40} {actor}{expanded_marker}\",\n summary = &event.summary);\n\n // Extra lines for specific event types\n match &event.event_type {\n TimelineEventType::NoteEvidence { snippet, .. } => {\n // Show snippet indented, wrapped to ~70 chars\n for line in wrap_text(snippet, 70) {\n println\\!(\" \\\"{line}\\\"\");\n }\n }\n TimelineEventType::Created => {\n // Could show labels if available in details\n }\n _ => {}\n }\n}\n```\n\n### Event Tag Colors:\n| Tag | Color |\n|-----|-------|\n| CREATED | green |\n| CLOSED | red |\n| REOPENED | yellow |\n| MERGED | cyan |\n| LABEL | blue |\n| MILESTONE | magenta |\n| NOTE | white/dim |\n| REF | dim |\n\n### Date Format:\n```\n2024-03-15 CREATED #234 Migrate to OAuth2 @alice\n```\nUse `YYYY-MM-DD` for dates. Group consecutive same-day events visually.\n\nAdd `pub mod timeline;` to `src/cli/commands/mod.rs` and re-export `print_timeline`.\n\n## Acceptance Criteria\n\n- [ ] `print_timeline()` renders header with query, event count, entity count\n- [ ] Events displayed chronologically with: date, tag, entity ref, summary, actor\n- [ ] Expanded entities marked with [expanded] suffix\n- [ ] NoteEvidence events show snippet text indented and quoted\n- [ ] Tags colored by event type\n- [ ] Footer shows seed entities and expansion info\n- [ ] Module registered in src/cli/commands/mod.rs\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/cli/commands/timeline.rs` (NEW)\n- `src/cli/commands/mod.rs` (add `pub mod timeline;` and re-export `print_timeline`)\n\n## TDD Loop\n\nNo unit tests for terminal rendering. Verify visually:\n\n```bash\ncargo check --all-targets\n# After full pipeline: lore timeline \"some query\"\n```\n\n## Edge Cases\n\n- Empty result: print \"No events found for query.\" and exit 0\n- Very long summaries: truncate to 60 chars with \"...\"\n- NoteEvidence snippets: wrap at 70 chars, cap at 4 lines\n- Null actors (system events): show no @username\n- Entity types: # for issues, \\! for MRs (GitLab convention)\n","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:33:28.326026Z","created_by":"tayloreernisse","updated_at":"2026-02-06T13:49:10.580508Z","closed_at":"2026-02-06T13:49:10.580438Z","close_reason":"Implemented print_timeline() human renderer in src/cli/commands/timeline.rs with colored chronological output, event tags, entity refs, evidence note snippets, and footer summary","compaction_level":0,"original_size":0,"labels":["cli","gate-3","phase-b"],"dependencies":[{"issue_id":"bd-2f2","depends_on_id":"bd-3as","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2f2","depends_on_id":"bd-ike","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -139,13 +147,13 @@ {"id":"bd-2iqk","title":"Implement Doctor + Stats screens","description":"## Background\nDoctor shows environment health checks (config, auth, DB, Ollama). Stats shows database statistics (entity counts, index sizes, FTS coverage). Both are informational screens using ftui JsonView or simple table layouts.\n\n## Approach\nState:\n- DoctorState: checks (Vec), overall_status (Healthy|Warning|Error)\n- StatsState: entity_stats (EntityStats), index_stats (IndexStats), fts_stats (FtsStats)\n\nAction:\n- run_doctor(config, conn) -> Vec: reuses existing lore doctor logic\n- fetch_stats(conn) -> StatsData: reuses existing lore stats logic\n\nView:\n- Doctor: vertical list of health checks with pass/fail/warn indicators\n- Stats: table of entity counts, index sizes, FTS document count, embedding coverage\n\n## Acceptance Criteria\n- [ ] Doctor shows config, auth, DB, and Ollama health status\n- [ ] Stats shows entity counts matching lore --robot stats output\n- [ ] Both screens accessible via navigation (gd for Doctor)\n- [ ] Health check results color-coded: green pass, yellow warn, red fail\n\n## Files\n- CREATE: crates/lore-tui/src/state/doctor.rs\n- CREATE: crates/lore-tui/src/state/stats.rs\n- CREATE: crates/lore-tui/src/view/doctor.rs\n- CREATE: crates/lore-tui/src/view/stats.rs\n- MODIFY: crates/lore-tui/src/action.rs (add run_doctor, fetch_stats)\n\n## TDD Anchor\nRED: Write test_fetch_stats_counts that creates DB with known data, asserts fetch_stats returns correct counts.\nGREEN: Implement fetch_stats with COUNT queries.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_stats\n\n## Edge Cases\n- Ollama not running: Doctor shows warning, not error (optional dependency)\n- Very large databases: stats queries should be fast (use shadow tables for FTS count)\n\n## Dependency Context\nUses existing doctor and stats logic from lore CLI commands.\nUses DbManager from \"Implement DbManager\" task.","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-12T17:02:21.744226Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.357165Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2iqk","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2iqk","depends_on_id":"bd-2x2h","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2jzn","title":"Migration 021: Add status columns to issues table","description":"## Background\nGitLab issues have work item status (To do, In progress, Done, Won't do, Duplicate) only available via GraphQL. We need 5 nullable columns on the issues table to store this data after enrichment. The status_synced_at column tracks when enrichment last wrote/cleared each row (ms epoch UTC).\n\n## Approach\nCreate a new SQL migration file and register it in the MIGRATIONS array. SQLite ALTER TABLE ADD COLUMN is non-destructive — existing rows get NULL defaults. Add a compound index for --status filter performance.\n\n## Files\n- migrations/021_work_item_status.sql (NEW)\n- src/core/db.rs (add entry to MIGRATIONS array)\n\n## Implementation\n\nmigrations/021_work_item_status.sql:\n ALTER TABLE issues ADD COLUMN status_name TEXT;\n ALTER TABLE issues ADD COLUMN status_category TEXT;\n ALTER TABLE issues ADD COLUMN status_color TEXT;\n ALTER TABLE issues ADD COLUMN status_icon_name TEXT;\n ALTER TABLE issues ADD COLUMN status_synced_at INTEGER;\n CREATE INDEX IF NOT EXISTS idx_issues_project_status_name ON issues(project_id, status_name);\n\nIn src/core/db.rs, add as last entry in MIGRATIONS array:\n (\"021\", include_str!(\"../../migrations/021_work_item_status.sql\")),\nLATEST_SCHEMA_VERSION is computed as MIGRATIONS.len() as i32 — auto-becomes 21.\n\n## Acceptance Criteria\n- [ ] Migration file exists at migrations/021_work_item_status.sql\n- [ ] MIGRATIONS array has 21 entries ending with (\"021\", ...)\n- [ ] In-memory DB: PRAGMA table_info(issues) includes all 5 new columns\n- [ ] In-memory DB: PRAGMA index_list(issues) includes idx_issues_project_status_name\n- [ ] Existing rows have NULL for all 5 new columns\n- [ ] cargo check --all-targets passes\n\n## TDD Loop\nRED: test_migration_021_adds_columns, test_migration_021_adds_index\n Pattern: create_connection(Path::new(\":memory:\")) + run_migrations(&conn), then PRAGMA queries\nGREEN: Create SQL file + register in MIGRATIONS\nVERIFY: cargo test test_migration_021\n\n## Edge Cases\n- Migration has 5 columns (including status_synced_at INTEGER), not 4\n- Test project insert uses gitlab_project_id, path_with_namespace, web_url (no name/last_seen_at)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:41:40.806320Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.414434Z","closed_at":"2026-02-11T07:21:33.414387Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2jzn","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2kop","title":"Implement DbManager (read pool + dedicated writer)","description":"## Background\nThe TUI needs concurrent database access: multiple read queries can run in parallel (e.g., loading dashboard stats while prefetching issue list), but writes must be serialized. The DbManager provides a read pool (3 connections, round-robin) plus a dedicated writer connection, accessed via closures.\n\nThe database uses WAL mode with 5000ms busy_timeout (already configured in lore's create_connection). WAL allows concurrent readers + single writer. The TUI is self-contained — it does NOT detect or react to external CLI sync operations. If someone runs lore sync externally while the TUI is open, WAL prevents conflicts and the TUI's natural re-query on navigation handles stale data implicitly.\n\n## Approach\nCreate `crates/lore-tui/src/db.rs`:\n\n```rust\npub struct DbManager {\n readers: Vec, // 3 connections, WAL mode\n writer: Connection, // dedicated writer\n next_reader: AtomicUsize, // round-robin index\n}\n```\n\n- `DbManager::open(path: &Path) -> Result` — opens 4 connections (3 read + 1 write), all with WAL + busy_timeout via lore::core::db::create_connection\n- `with_reader(&self, f: F) -> Result where F: FnOnce(&Connection) -> Result` — closure-based read access, round-robin selection\n- `with_writer(&self, f: F) -> Result where F: FnOnce(&Connection) -> Result` — closure-based write access (serialized)\n- Reader connections set `PRAGMA query_only = ON` as a safety guard\n- All connections reuse lore's `create_connection()` which sets WAL + busy_timeout + foreign_keys\n\nThe DbManager is created once at app startup and shared (via Arc) across all screen states and action tasks.\n\n## Acceptance Criteria\n- [ ] DbManager opens 3 reader + 1 writer connection\n- [ ] Readers use round-robin selection via AtomicUsize\n- [ ] Reader connections have query_only = ON\n- [ ] Writer connection allows INSERT/UPDATE/DELETE\n- [ ] with_reader and with_writer use closure-based access (no connection leaking)\n- [ ] All connections use WAL mode and 5000ms busy_timeout\n- [ ] DbManager is Send + Sync (can be shared via Arc across async tasks)\n- [ ] Unit test: concurrent reads don't block each other\n- [ ] Unit test: write through reader connection fails (query_only guard)\n\n## Files\n- CREATE: crates/lore-tui/src/db.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add pub mod db)\n\n## TDD Anchor\nRED: Write `test_reader_is_query_only` that opens a DbManager on an in-memory DB, attempts an INSERT via with_reader, and asserts it fails.\nGREEN: Implement DbManager with query_only pragma on readers.\nVERIFY: cargo test -p lore-tui db -- --nocapture\n\nAdditional tests:\n- test_writer_allows_mutations\n- test_round_robin_rotates_readers\n- test_dbmanager_is_send_sync (compile-time assert)\n- test_concurrent_reads (spawn threads, all complete without blocking)\n\n## Edge Cases\n- Database file doesn't exist — create_connection handles this (creates new DB)\n- Database locked by external process — busy_timeout handles retry\n- Connection pool exhaustion — not possible with closure-based access (connection is borrowed, not taken)\n- AtomicUsize overflow — wraps around, which is fine for round-robin (modulo 3)\n\n## Dependency Context\nDepends on bd-3ddw (scaffold) for the crate to exist. Uses lore::core::db::create_connection for connection setup. All screen action modules depend on DbManager for data access.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:53:59.708214Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:59:21.852517Z","closed_at":"2026-02-12T19:59:21.852405Z","close_reason":"Implemented DbManager: 3 reader pool (query_only, round-robin) + 1 writer, Mutex-wrapped for Send+Sync. 7 tests passing, clippy clean.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2kop","depends_on_id":"bd-3ddw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-2kr0","title":"Implement MR List (state + action + view)","description":"## Background\nThe MR List mirrors the Issue List pattern with MR-specific columns (target branch, source branch, draft status, reviewer). Same keyset pagination, snapshot fence, and filter bar DSL.\n\n## Approach\nState (state/mr_list.rs):\n- MrListState: same structure as IssueListState but with MrFilter and MrListRow, plus snapshot_upper_updated_at, filter_hash, peek_visible, peek_content\n- MrFilter: state, author, reviewer, target_branch, source_branch, label, draft (bool), free_text, project_id\n- MrListRow: project_path, iid, title, state, author, reviewer, target_branch, labels, updated_at, draft\n- MrCursor: updated_at, iid for keyset pagination\n- handle_key(): j/k scroll, J/K page, Enter select, / focus filter, Tab sort, g+g top, G bottom, r refresh, Space toggle Quick Peek\n\n**Snapshot fence:** Same pattern as Issue List — store snapshot_upper_updated_at on first load and refresh, filter subsequent pages. Explicit refresh (r) resets.\n\n**filter_hash:** Same pattern as Issue List — filter change resets cursor to page 1.\n\n**Quick Peek (Space key):**\n- Space toggles right-side preview pane (40% width) showing selected MR detail\n- Preview loads asynchronously via TaskSupervisor\n- j/k updates preview for newly selected row\n- Narrow terminals (<100 cols): peek replaces list\n\nAction (action.rs):\n- fetch_mrs(conn, filter, cursor, page_size, clock, snapshot_fence) -> Result: keyset query against merge_requests table. Uses idx_mrs_list_default index.\n- fetch_mr_peek(conn, entity_key) -> Result: loads MR detail for Quick Peek preview\n\nView (view/mr_list.rs):\n- render_mr_list(frame, state, area, theme): FilterBar + EntityTable with MR columns\n- When peek_visible: split area horizontally — list (60%) | peek preview (40%)\n- Columns: IID, Title (flex), State, Author, Target, Labels, Updated, Draft indicator\n- Draft MRs shown with muted style and [WIP] tag\n\n## Acceptance Criteria\n- [ ] Keyset pagination works for MR list (same pattern as issues)\n- [ ] Browse snapshot fence prevents rows shifting during concurrent sync\n- [ ] Explicit refresh (r) resets snapshot fence\n- [ ] filter_hash resets cursor on filter change\n- [ ] MR-specific filter fields: draft, reviewer, target_branch, source_branch\n- [ ] Draft MRs visually distinguished with [WIP] indicator\n- [ ] State filter supports: opened, merged, closed, locked, all\n- [ ] Columns: IID, Title, State, Author, Target Branch, Labels, Updated\n- [ ] Enter navigates to MrDetail, Esc returns with state preserved\n- [ ] Space toggles Quick Peek right-side preview pane\n- [ ] Quick Peek loads MR detail asynchronously\n- [ ] j/k in peek mode updates preview for newly selected row\n- [ ] Narrow terminal (<100 cols): peek replaces list\n\n## Files\n- MODIFY: crates/lore-tui/src/state/mr_list.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_mrs, fetch_mr_peek)\n- CREATE: crates/lore-tui/src/view/mr_list.rs\n\n## TDD Anchor\nRED: Write test_fetch_mrs_draft_filter in action.rs that inserts 5 MRs (3 draft, 2 not), calls fetch_mrs with draft=true filter, asserts 3 results.\nGREEN: Implement fetch_mrs with draft filter.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_mrs\n\nAdditional tests:\n- test_mr_snapshot_fence: verify fence excludes newer rows\n- test_mr_filter_hash_reset: verify filter change resets cursor\n\n## Edge Cases\n- MR state \"locked\" is rare but must be handled in filter and display\n- Very long branch names: truncate with ellipsis\n- MRs with no reviewer: show \"-\" in reviewer column\n- Quick Peek on empty list: no-op\n- Rapid j/k with peek open: debounce peek loads\n\n## Dependency Context\nUses EntityTable and FilterBar from \"Implement entity table + filter bar widgets\" (bd-18qs).\nUses same keyset pagination pattern from \"Implement Issue List\" (bd-3ei1).\nUses MrListState from \"Implement AppState composition\" (bd-1v9m).\nUses TaskSupervisor for load management from \"Implement TaskSupervisor\" (bd-3le2).\nRequires idx_mrs_list_default index from \"Add required TUI indexes\" (bd-3pm2).","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T16:59:24.070743Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:28.255965Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2kr0","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2kr0","depends_on_id":"bd-3ei1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2kr0","depends_on_id":"bd-3pm2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-2kr0","title":"Implement MR List (state + action + view)","description":"## Background\nThe MR List mirrors the Issue List pattern with MR-specific columns (target branch, source branch, draft status, reviewer). Same keyset pagination, snapshot fence, and filter bar DSL.\n\n## Approach\nState (state/mr_list.rs):\n- MrListState: same structure as IssueListState but with MrFilter and MrListRow, plus snapshot_upper_updated_at, filter_hash, peek_visible, peek_content\n- MrFilter: state, author, reviewer, target_branch, source_branch, label, draft (bool), free_text, project_id\n- MrListRow: project_path, iid, title, state, author, reviewer, target_branch, labels, updated_at, draft\n- MrCursor: updated_at, iid for keyset pagination\n- handle_key(): j/k scroll, J/K page, Enter select, / focus filter, Tab sort, g+g top, G bottom, r refresh, Space toggle Quick Peek\n\n**Snapshot fence:** Same pattern as Issue List — store snapshot_upper_updated_at on first load and refresh, filter subsequent pages. Explicit refresh (r) resets.\n\n**filter_hash:** Same pattern as Issue List — filter change resets cursor to page 1.\n\n**Quick Peek (Space key):**\n- Space toggles right-side preview pane (40% width) showing selected MR detail\n- Preview loads asynchronously via TaskSupervisor\n- j/k updates preview for newly selected row\n- Narrow terminals (<100 cols): peek replaces list\n\nAction (action.rs):\n- fetch_mrs(conn, filter, cursor, page_size, clock, snapshot_fence) -> Result: keyset query against merge_requests table. Uses idx_mrs_list_default index.\n- fetch_mr_peek(conn, entity_key) -> Result: loads MR detail for Quick Peek preview\n\nView (view/mr_list.rs):\n- render_mr_list(frame, state, area, theme): FilterBar + EntityTable with MR columns\n- When peek_visible: split area horizontally — list (60%) | peek preview (40%)\n- Columns: IID, Title (flex), State, Author, Target, Labels, Updated, Draft indicator\n- Draft MRs shown with muted style and [WIP] tag\n\n## Acceptance Criteria\n- [ ] Keyset pagination works for MR list (same pattern as issues)\n- [ ] Browse snapshot fence prevents rows shifting during concurrent sync\n- [ ] Explicit refresh (r) resets snapshot fence\n- [ ] filter_hash resets cursor on filter change\n- [ ] MR-specific filter fields: draft, reviewer, target_branch, source_branch\n- [ ] Draft MRs visually distinguished with [WIP] indicator\n- [ ] State filter supports: opened, merged, closed, locked, all\n- [ ] Columns: IID, Title, State, Author, Target Branch, Labels, Updated\n- [ ] Enter navigates to MrDetail, Esc returns with state preserved\n- [ ] Space toggles Quick Peek right-side preview pane\n- [ ] Quick Peek loads MR detail asynchronously\n- [ ] j/k in peek mode updates preview for newly selected row\n- [ ] Narrow terminal (<100 cols): peek replaces list\n\n## Files\n- MODIFY: crates/lore-tui/src/state/mr_list.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_mrs, fetch_mr_peek)\n- CREATE: crates/lore-tui/src/view/mr_list.rs\n\n## TDD Anchor\nRED: Write test_fetch_mrs_draft_filter in action.rs that inserts 5 MRs (3 draft, 2 not), calls fetch_mrs with draft=true filter, asserts 3 results.\nGREEN: Implement fetch_mrs with draft filter.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_mrs\n\nAdditional tests:\n- test_mr_snapshot_fence: verify fence excludes newer rows\n- test_mr_filter_hash_reset: verify filter change resets cursor\n\n## Edge Cases\n- MR state \"locked\" is rare but must be handled in filter and display\n- Very long branch names: truncate with ellipsis\n- MRs with no reviewer: show \"-\" in reviewer column\n- Quick Peek on empty list: no-op\n- Rapid j/k with peek open: debounce peek loads\n\n## Dependency Context\nUses EntityTable and FilterBar from \"Implement entity table + filter bar widgets\" (bd-18qs).\nUses same keyset pagination pattern from \"Implement Issue List\" (bd-3ei1).\nUses MrListState from \"Implement AppState composition\" (bd-1v9m).\nUses TaskSupervisor for load management from \"Implement TaskSupervisor\" (bd-3le2).\nRequires idx_mrs_list_default index from \"Add required TUI indexes\" (bd-3pm2).","status":"in_progress","priority":2,"issue_type":"task","created_at":"2026-02-12T16:59:24.070743Z","created_by":"tayloreernisse","updated_at":"2026-02-18T19:38:12.922553Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2kr0","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2kr0","depends_on_id":"bd-3ei1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2kr0","depends_on_id":"bd-3pm2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2l3s","title":"Per-note search: search individual comments at note granularity","description":"## Background\nMost knowledge in a GitLab project is buried in discussion threads. Current lore search operates at document level (one doc per issue/MR/discussion). An agent searching for \"we decided to use Redis\" only finds the parent issue, not the specific comment where that decision was stated.\n\n## Current State (Verified 2026-02-12)\n- documents table (migration 007): source_type, source_id, project_id, author_username, label_names, content_text, content_hash, etc. NO source_note_id column.\n- source_type values: 'issue', 'merge_request', 'discussion' — discussion docs concatenate all notes into one text blob\n- notes table: 282K rows with individual note body, author, timestamps, is_system flag\n- discussions table: links notes to their parent entity (noteable_type, noteable_id)\n- FTS5 index (documents_fts): operates on coarse document-level text\n- Document generation: src/documents/extractor.rs extracts issue/MR/discussion documents\n- Document regeneration: src/documents/regenerator.rs handles dirty document refresh\n- PRD exists: docs/prd-per-note-search.md with 5 feedback iterations\n\n## Approach\n\n### Schema (Migration 022)\nThis bead owns migration 022. bd-2g50 (data gaps) ships after this and uses migration 023.\n\n```sql\n-- migrations/022_note_documents.sql\nALTER TABLE documents ADD COLUMN source_note_id INTEGER REFERENCES notes(id);\nCREATE INDEX idx_documents_source_note ON documents(source_note_id) WHERE source_note_id IS NOT NULL;\n```\n- source_note_id = NULL for existing entity-level documents (backwards compatible)\n- source_note_id = notes.id for new note-level documents\n\nWire into src/core/db.rs MIGRATIONS array as entry (\"022\", include_str!(\"../../migrations/022_note_documents.sql\")). LATEST_SCHEMA_VERSION auto-updates since it's `MIGRATIONS.len() as i32`.\n\n### Document Generation (src/documents/extractor.rs)\nAdd a new extraction function alongside existing `extract_issue_document()` (line 85), `extract_mr_document()` (line 186), `extract_discussion_document()` (line 302):\n\n```rust\npub fn extract_note_documents(\n conn: &Connection,\n project_id: i64,\n) -> Result> {\n // SELECT n.id, n.body, n.author_username, n.created_at, n.updated_at,\n // d.noteable_type, d.noteable_id\n // FROM notes n\n // JOIN discussions d ON n.discussion_id = d.id\n // WHERE n.is_system = 0\n // AND LENGTH(n.body) >= 50\n // AND d.project_id = ?1\n // AND n.id NOT IN (SELECT source_note_id FROM documents WHERE source_note_id IS NOT NULL)\n\n // For each qualifying note:\n // - source_type = 'note'\n // - source_id = note.id (the note's local DB id)\n // - source_note_id = note.id\n // - title = format!(\"Re: {}\", parent_entity_title)\n // - author_username = note.author_username\n // - content_text = note.body\n // - content_hash = sha256(note.body) for deduplication\n}\n```\n\nMinimum note length (50 chars) filters out \"+1\", \"LGTM\", emoji-only notes. is_system=0 filters automated state change notes.\n\nNOTE: The documents table CHECK constraint for source_type needs updating — currently enforces `CHECK (source_type IN ('issue','merge_request','discussion'))`. Migration 022 must also:\n```sql\n-- Drop and recreate the CHECK constraint is not supported in SQLite ALTER TABLE.\n-- Instead, the check is only on INSERT, so we need to handle this:\n-- Option A: Don't add 'note' to CHECK — just insert with source_type='note' and let\n-- SQLite ignore the CHECK on ALTER (it won't — CHECK is enforced).\n-- Option B: Use source_type='discussion' for note docs (semantically wrong).\n-- Option C: Recreate the table (heavy migration).\n-- RECOMMENDED: Use a new migration that drops the CHECK constraint entirely.\n-- SQLite doesn't support ALTER TABLE ... DROP CONSTRAINT, so:\n-- CREATE TABLE documents_new (... without CHECK ...);\n-- INSERT INTO documents_new SELECT * FROM documents;\n-- DROP TABLE documents;\n-- ALTER TABLE documents_new RENAME TO documents;\n-- Recreate indexes and triggers.\n-- This is the only correct approach. The CHECK constraint is in migration 007.\n```\n\n### Search Integration\nAdd --granularity flag to search command:\n\n```rust\n// In SearchCliFilters or SearchFilters (src/search/filters.rs:15)\npub granularity: Option, // note | entity (default)\n\n// In FTS query construction (src/search/fts.rs)\n// When granularity = note:\n// AND d.source_note_id IS NOT NULL\n// When granularity = entity (or default):\n// AND d.source_note_id IS NULL (existing behavior)\n```\n\n### Robot Mode Output (note granularity)\n```json\n{\n \"source_type\": \"note\",\n \"title\": \"Re: Switch Health Card\",\n \"parent_type\": \"issue\",\n \"parent_iid\": 3864,\n \"parent_title\": \"Switch Health Card (Throw Times)\",\n \"note_author\": \"teernisse\",\n \"note_created_at\": \"2026-02-01T...\",\n \"discussion_id\": \"abc123\",\n \"snippet\": \"...decided to use once-per-day ingestion from BNSF...\",\n \"score\": 0.87\n}\n```\n\nJoin path for note metadata:\n```sql\nSELECT d.source_note_id, n.author_username, n.created_at,\n disc.gitlab_discussion_id,\n CASE disc.noteable_type\n WHEN 'Issue' THEN 'issue'\n WHEN 'MergeRequest' THEN 'merge_request'\n END as parent_type,\n disc.noteable_id\nFROM documents d\nJOIN notes n ON d.source_note_id = n.id\nJOIN discussions disc ON n.discussion_id = disc.id\nWHERE d.source_note_id IS NOT NULL AND d.id IN (...)\n```\n\n## TDD Loop\nRED: Tests in src/documents/extractor.rs (or new test file):\n- test_note_document_generation: insert issue + discussion + 3 notes (one 10 chars, one 60 chars, one 200 chars), run extract_note_documents, assert 2 note-level documents created (>= 50 chars only)\n- test_note_document_skips_system_notes: insert system note (is_system=1) with 100-char body, assert no document generated\n- test_note_document_content_hash_dedup: insert note, generate doc, re-run, assert no duplicate created\n- test_note_document_parent_title: assert generated doc title starts with \"Re: \"\n\nTests in src/cli/commands/search.rs:\n- test_search_granularity_note_filter: with note docs in DB, --granularity note returns only note results\n- test_search_granularity_entity_default: default behavior unchanged, does NOT return note docs\n\nGREEN: Add migration, update extractor, add --granularity flag to search\n\nVERIFY:\n```bash\ncargo test note_document && cargo test search_granularity\ncargo clippy --all-targets -- -D warnings\ncargo run --release -- -J search 'ingestion' --granularity note | jq '.data.results[0].parent_iid'\n```\n\n## Acceptance Criteria\n- [ ] Migration 022 adds source_note_id to documents table (nullable, indexed, FK to notes)\n- [ ] Migration 022 handles the source_type CHECK constraint (allows 'note' as valid value)\n- [ ] extract_note_documents creates note-level docs for notes >= 50 chars, non-system\n- [ ] Content hash deduplication prevents duplicate note documents\n- [ ] lore search --granularity note returns note-level results with parent context\n- [ ] lore search (no flag) returns entity-level results only (backwards compatible)\n- [ ] Robot mode includes parent_type, parent_iid, parent_title, note_author, note_created_at\n- [ ] Performance: note-level FTS search across expanded index completes in <200ms\n- [ ] Embedding pipeline handles note-level documents (embed individually, same as entity docs)\n- [ ] lore stats shows note document count separately from entity document count\n\n## Edge Cases\n- Note with only markdown formatting (no text after stripping): skip (LENGTH(body) >= 50 handles most)\n- Note body is a quote of another note (duplicated text): deduplicate via content_hash\n- Very long note (>32KB): apply same truncation as entity documents (src/documents/truncation.rs)\n- Discussion with 100+ notes: each becomes its own document (correct behavior)\n- Deleted notes (if tracked): should not generate documents\n- Notes on confidential issues: inherit visibility (future concern, not blocking)\n- source_type CHECK constraint: migration MUST handle this — SQLite enforces CHECK on INSERT, so inserting source_type='note' will fail without updating the constraint\n\n## Files to Modify\n- NEW: migrations/022_note_documents.sql (schema change + CHECK constraint update)\n- src/core/db.rs (wire migration 022 into MIGRATIONS array)\n- src/documents/extractor.rs (add extract_note_documents function)\n- src/documents/mod.rs (export new function)\n- src/search/fts.rs (add granularity filter to FTS queries)\n- src/search/filters.rs (add granularity to SearchFilters at line 15)\n- src/cli/commands/search.rs (--granularity flag, note metadata in SearchResultDisplay)\n- src/cli/commands/stats.rs (show note document count)","status":"closed","priority":1,"issue_type":"feature","created_at":"2026-02-12T15:45:35.465446Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:55:56.774523Z","closed_at":"2026-02-12T16:55:56.774470Z","close_reason":"Replaced by granular beads broken out from docs/prd-per-note-search.md","compaction_level":0,"original_size":0,"labels":["cli-imp","search"],"dependencies":[{"issue_id":"bd-2l3s","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2l3s","depends_on_id":"bd-2g50","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2ldg","title":"WHO: Mode resolution, path helpers, run_who entry point","description":"## Background\n\nCore scaffolding that all 5 query modes depend on. Defines the mode discrimination logic, path normalization, path-to-SQL translation (with project-scoped DB probes), time resolution, and the run_who() entry point that dispatches to query functions.\n\n## Approach\n\n### WhoMode enum\n```rust\nenum WhoMode<'a> {\n Expert { path: String }, // owns String (normalization produces new strings)\n Workload { username: &'a str }, // borrows from args\n Reviews { username: &'a str },\n Active,\n Overlap { path: String },\n}\n```\n\n### resolve_mode() discrimination rules:\n1. --path flag always wins -> Expert\n2. --active -> Active\n3. --overlap -> Overlap\n4. positional target with --reviews -> Reviews\n5. positional target containing '/' -> Expert (username never contains /)\n6. positional target without '/' -> Workload (strip @ prefix)\n7. No args -> error with usage examples\n\n### normalize_repo_path(): strips ./, leading /, collapses //, converts \\ to / (Windows paste, only when no / present), trims whitespace\n\n### PathQuery + build_path_query(conn, path, project_id):\n- Struct: `{ value: String, is_prefix: bool }`\n- Trailing / forces directory prefix\n- Root path (no /) without trailing / -> exact match (handles Makefile, LICENSE via --path)\n- Last segment contains . -> heuristic: file (exact)\n- **Two-way DB probe** (project-scoped): when heuristics are ambiguous, probe DB:\n - Probe 1: exact path exists? `SELECT 1 FROM notes WHERE note_type='DiffNote' AND is_system=0 AND position_new_path = ?1 AND (?2 IS NULL OR project_id = ?2) LIMIT 1`\n - Probe 2 (only if exact miss, not forced-dir): prefix exists?\n - Decision: forced_dir -> prefix; exact_exists -> exact; prefix_exists -> prefix; else heuristic\n- **CRITICAL**: escape_like() is ONLY called for prefix (LIKE) matches. For exact matches (=), use raw path — LIKE metacharacters (_, %) are not special in = comparisons.\n\n### Result types: WhoRun, WhoResolvedInput (since_mode tri-state: \"default\"/\"explicit\"/\"none\"), WhoResult enum, all 5 mode-specific result structs (see plan Step 2 \"Result Types\")\n\n### run_who() entry: resolve project -> resolve mode -> resolve since -> dispatch to query_* -> return WhoRun\n\n### since_mode semantics:\n- Expert/Reviews/Active/Overlap: default window applies if --since absent -> \"default\"\n- Workload: no default window; --since absent -> \"none\"\n- Any mode with explicit --since -> \"explicit\"\n\n## Files\n\n- `src/cli/commands/who.rs` — all code in this file\n\n## TDD Loop\n\nRED:\n```\ntest_is_file_path_discrimination — resolve_mode for paths/usernames/@/--reviews/--path\ntest_build_path_query — directory/file/root/dotted/underscore/dotless\ntest_build_path_query_exact_does_not_escape — _ in exact path stays raw\ntest_path_flag_dotless_root_file_is_exact — Makefile/Dockerfile via --path\ntest_build_path_query_dotless_subdir_file_uses_db_probe — src/Dockerfile with/without DB data\ntest_build_path_query_probe_is_project_scoped — data in proj 1, query proj 2\ntest_escape_like — normal/underscore/percent/backslash\ntest_normalize_repo_path — ./ / \\\\ // whitespace identity\ntest_lookup_project_path — basic round-trip\n```\n\nGREEN: Implement all functions. Query functions can be stubs (todo!()) for now.\nVERIFY: `cargo test -- who`\n\n## Acceptance Criteria\n\n- [ ] resolve_mode correctly discriminates all 7 cases (see tests)\n- [ ] build_path_query returns exact for files, prefix for dirs\n- [ ] build_path_query DB probe is project-scoped (cross-project isolation)\n- [ ] escape_like escapes %, _, \\ correctly\n- [ ] normalize_repo_path handles ./, /, \\\\, //, whitespace\n- [ ] WhoResolvedInput.since_mode is \"none\" for Workload without --since\n\n## Edge Cases\n\n- Dotless files in subdirectories (src/Dockerfile, infra/Makefile) — DB probe catches these, heuristic alone would misclassify as directory\n- Windows path paste (src\\foo\\bar.rs) — convert \\ to / only when no / present\n- LIKE metacharacters in filenames (README_with_underscore.md) — must NOT be escaped for exact match\n- Root files without / (README.md, LICENSE, Makefile) — must use --path flag, positional would treat as username","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:40:11.209288Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.595703Z","closed_at":"2026-02-08T04:10:29.595666Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2ldg","depends_on_id":"bd-2rk9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2lg6","title":"Implement Clock trait (SystemClock + FakeClock)","description":"## Background\nAll relative-time rendering (e.g., \"3h ago\" labels) must use an injected Clock, not wall-clock time. This ensures deterministic snapshot tests and consistent timestamps within a single frame. FakeClock lets tests control time precisely.\n\n## Approach\nCreate crates/lore-tui/src/clock.rs with:\n- Clock trait: fn now(&self) -> chrono::DateTime\n- SystemClock: impl Clock using chrono::Utc::now()\n- FakeClock: wraps Arc>>, impl Clock returning the frozen value. Methods: new(fixed_time), advance(duration), set(time)\n- Both cloneable (SystemClock is Copy, FakeClock shares Arc)\n\n## Acceptance Criteria\n- [ ] Clock trait with now() method\n- [ ] SystemClock returns real wall-clock time\n- [ ] FakeClock returns frozen time, advance() moves it forward\n- [ ] FakeClock is Clone (shared Arc)\n- [ ] Tests pass: frozen clock returns same time on repeated calls\n- [ ] Tests pass: advance() moves time forward by exact duration\n\n## Files\n- CREATE: crates/lore-tui/src/clock.rs\n\n## TDD Anchor\nRED: Write test_fake_clock_frozen that creates FakeClock at a fixed time, calls now() twice, asserts both return the same value.\nGREEN: Implement FakeClock with Arc>.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fake_clock\n\n## Edge Cases\n- FakeClock must be Send+Sync for use across Cmd::task threads\n- advance() must handle chrono overflow gracefully (use checked_add)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:54:11.756415Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:48:39.169147Z","closed_at":"2026-02-12T19:48:39.169096Z","close_reason":"Clock trait + SystemClock + FakeClock with 7 tests: frozen time, advance, set, clone-shares-state, Send+Sync, trait object. Clippy clean.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2lg6","depends_on_id":"bd-3ddw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2ms","title":"[CP1] Unit tests for transformers","description":"Comprehensive unit tests for issue and discussion transformers.\n\n## Issue Transformer Tests (tests/issue_transformer_tests.rs)\n\n- transforms_gitlab_issue_to_normalized_schema\n- extracts_labels_from_issue_payload\n- handles_missing_optional_fields_gracefully\n- converts_iso_timestamps_to_ms_epoch\n- sets_last_seen_at_to_current_time\n\n## Discussion Transformer Tests (tests/discussion_transformer_tests.rs)\n\n- transforms_discussion_payload_to_normalized_schema\n- extracts_notes_array_from_discussion\n- sets_individual_note_flag_correctly\n- flags_system_notes_with_is_system_true\n- preserves_note_order_via_position_field\n- computes_first_note_at_and_last_note_at_correctly\n- computes_resolvable_and_resolved_status\n\n## Test Setup\n- Load from test fixtures\n- Use serde_json for deserialization\n- Compare against expected NormalizedX structs\n\nFiles: tests/issue_transformer_tests.rs, tests/discussion_transformer_tests.rs\nDone when: All transformer unit tests pass","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:59:04.165187Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:02.015847Z","closed_at":"2026-01-25T17:02:02.015847Z","deleted_at":"2026-01-25T17:02:02.015841Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-2mz","title":"Epic: Gate A - Lexical MVP","description":"## Background\nGate A delivers the lexical search MVP — the foundation that works without sqlite-vec or Ollama. It introduces the document layer (documents, document_labels, document_paths), FTS5 indexing, search filters, and the search + stats + generate-docs CLI commands. Gate A is independently shippable — users get working search with FTS5 only.\n\n## Gate A Deliverables\n1. Document generation from issues/MRs/discussions with FTS5 indexing\n2. Lexical search + filters + snippets + lore stats\n\n## Bead Dependencies (execution order)\n1. **bd-3lc** — Rename GiError to LoreError (no deps, enables all subsequent work)\n2. **bd-hrs** — Migration 007 (blocked by bd-3lc)\n3. **bd-221** — Migration 008 FTS5 (blocked by bd-hrs)\n4. **bd-36p** — Document types + extractor module (blocked by bd-3lc)\n5. **bd-18t** — Truncation logic (blocked by bd-36p)\n6. **bd-247** — Issue extraction (blocked by bd-36p, bd-hrs)\n7. **bd-1yz** — MR extraction (blocked by bd-36p, bd-hrs)\n8. **bd-2fp** — Discussion extraction (blocked by bd-36p, bd-hrs, bd-18t)\n9. **bd-1u1** — Document regenerator (blocked by bd-36p, bd-38q, bd-hrs)\n10. **bd-1k1** — FTS5 search (blocked by bd-221)\n11. **bd-3q2** — Search filters (blocked by bd-36p)\n12. **bd-3lu** — Search CLI (blocked by bd-1k1, bd-3q2, bd-36p)\n13. **bd-3qs** — Generate-docs CLI (blocked by bd-1u1, bd-3lu)\n14. **bd-pr1** — Stats CLI (blocked by bd-hrs)\n15. **bd-2dk** — Project resolution (blocked by bd-3lc)\n\n## Acceptance Criteria\n- [ ] `lore search \"query\"` returns FTS5 results with snippets\n- [ ] `lore search --type issue --label bug \"query\"` filters correctly\n- [ ] `lore generate-docs` creates documents from all entities\n- [ ] `lore generate-docs --full` regenerates everything\n- [ ] `lore stats` shows document/FTS/queue counts\n- [ ] `lore stats --check` verifies FTS consistency\n- [ ] No sqlite-vec dependency in Gate A","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-30T15:25:09.721108Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:54:44.243610Z","closed_at":"2026-01-30T17:54:44.243562Z","close_reason":"All Gate A sub-beads complete. Lexical MVP delivered: document extraction (issue/MR/discussion), FTS5 indexing, search with filters/snippets/RRF, generate-docs CLI, stats CLI with integrity check/repair.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2mz","depends_on_id":"bd-3lu","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2mz","depends_on_id":"bd-3qs","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2mz","depends_on_id":"bd-pr1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-2n4","title":"Implement trace query: file -> MR -> issue -> discussion chain","description":"## Background\n\nThe trace query builds a chain from file path -> MRs -> issues -> discussions, combining data from mr_file_changes (Gate 4), entity_references (Gate 2), and the existing discussions/notes tables. This is the backend for the trace CLI command.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 5.4 (Query Flow Tier 1).\n\n## Codebase Context\n\n- entity_references table (migration 011): source_entity_type, source_entity_id, target_entity_type, target_entity_id, reference_type, source_method\n- mr_file_changes table (migration 016, bd-1oo): merge_request_id, project_id, old_path, new_path, change_type\n- discussions table: issue_id, merge_request_id\n- notes table: discussion_id, author_username, body, created_at, is_system, position_new_path (for DiffNotes)\n- merge_requests table: iid, title, state, author_username, web_url, merged_at, updated_at\n- issues table: iid, title, state, web_url\n- resolve_rename_chain() from bd-1yx (src/core/file_history.rs) provides multi-path matching\n- reference_type values: 'closes', 'mentioned', 'related'\n\n## Approach\n\nCreate `src/core/trace.rs`:\n\n```rust\nuse rusqlite::Connection;\nuse crate::core::file_history::resolve_rename_chain;\nuse crate::core::error::Result;\n\n#[derive(Debug, Clone, Serialize)]\npub struct TraceChain {\n pub merge_request: TraceMr,\n pub issues: Vec,\n pub discussions: Vec,\n}\n\n#[derive(Debug, Clone, Serialize)]\npub struct TraceMr {\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub author_username: String,\n pub web_url: Option,\n pub merged_at: Option,\n pub merge_commit_sha: Option,\n pub file_change_type: String,\n}\n\n#[derive(Debug, Clone, Serialize)]\npub struct TraceIssue {\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub web_url: Option,\n pub reference_type: String, // \"closes\", \"mentioned\", \"related\"\n}\n\n#[derive(Debug, Clone, Serialize)]\npub struct TraceDiscussion {\n pub author_username: String,\n pub body_snippet: String, // truncated to 500 chars\n pub created_at: i64,\n pub is_diff_note: bool, // true if position_new_path matched\n}\n\n#[derive(Debug, Clone, Serialize)]\npub struct TraceResult {\n pub path: String,\n pub resolved_paths: Vec,\n pub chains: Vec,\n}\n\npub fn run_trace(\n conn: &Connection,\n project_id: i64,\n path: &str,\n follow_renames: bool,\n include_discussions: bool,\n limit: usize,\n) -> Result {\n // 1. Resolve rename chain (unless !follow_renames)\n let paths = if follow_renames {\n resolve_rename_chain(conn, project_id, path, 10)?\n } else {\n vec![path.to_string()]\n };\n\n // 2. Find MRs via mr_file_changes for all resolved paths\n // Dynamic IN-clause for path set\n // 3. For each MR, find linked issues via entity_references\n // 4. If include_discussions, fetch DiffNote discussions on traced file\n // 5. Order chains by COALESCE(merged_at, updated_at) DESC, apply limit\n}\n```\n\n### SQL for step 2 (find MRs):\n\nBuild dynamic IN-clause placeholders for the resolved path set:\n```sql\nSELECT DISTINCT mr.id, mr.iid, mr.title, mr.state, mr.author_username,\n mr.web_url, mr.merged_at, mr.updated_at, mr.merge_commit_sha,\n mfc.change_type\nFROM mr_file_changes mfc\nJOIN merge_requests mr ON mr.id = mfc.merge_request_id\nWHERE mfc.project_id = ?1\n AND (mfc.new_path IN (...placeholders...) OR mfc.old_path IN (...placeholders...))\nORDER BY COALESCE(mr.merged_at, mr.updated_at) DESC\nLIMIT ?N\n```\n\n### SQL for step 3 (linked issues):\n```sql\nSELECT i.iid, i.title, i.state, i.web_url, er.reference_type\nFROM entity_references er\nJOIN issues i ON i.id = er.target_entity_id\nWHERE er.source_entity_type = 'merge_request'\n AND er.source_entity_id = ?1\n AND er.target_entity_type = 'issue'\n```\n\n### SQL for step 4 (DiffNote discussions):\n```sql\nSELECT n.author_username, n.body, n.created_at, n.position_new_path\nFROM notes n\nJOIN discussions d ON d.id = n.discussion_id\nWHERE d.merge_request_id = ?1\n AND n.position_new_path IN (...placeholders...)\n AND n.is_system = 0\nORDER BY n.created_at ASC\n```\n\nRegister in `src/core/mod.rs`: `pub mod trace;`\n\n## Acceptance Criteria\n\n- [ ] run_trace() returns chains ordered by COALESCE(merged_at, updated_at) DESC\n- [ ] Rename-aware: uses all paths from resolve_rename_chain\n- [ ] Issues linked via entity_references (closes, mentioned, related)\n- [ ] DiffNote discussions correctly filtered to traced file paths via position_new_path\n- [ ] Discussion body_snippet truncated to 500 chars\n- [ ] Empty result (file not in any MR) returns TraceResult with empty chains\n- [ ] Limit applies to number of chains (MRs), not total discussions\n- [ ] Module registered in src/core/mod.rs as `pub mod trace;`\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/core/trace.rs` (NEW)\n- `src/core/mod.rs` (add `pub mod trace;`)\n\n## TDD Loop\n\nRED:\n- `test_trace_empty_file` — unknown file returns empty chains\n- `test_trace_finds_mr` — file in mr_file_changes returns chain with correct MR\n- `test_trace_follows_renames` — renamed file finds historical MRs\n- `test_trace_links_issues` — MR with entity_references shows linked issues\n- `test_trace_limits_chains` — limit=1 returns at most 1 chain\n- `test_trace_no_follow_renames` — follow_renames=false only matches literal path\n\nTests need in-memory DB with migrations applied through 016 + test fixtures for mr_file_changes, entity_references, discussions, notes.\n\nGREEN: Implement SQL queries and chain assembly.\n\nVERIFY: `cargo test --lib -- trace`\n\n## Edge Cases\n\n- MR with no linked issues: chain has empty issues vec\n- Same issue linked from multiple MRs: appears in each chain independently\n- DiffNote on old_path (before rename): captured via resolved path set\n- include_discussions=false: skip DiffNote query for performance\n- Null merged_at: falls back to updated_at for ordering\n- Dynamic IN-clause: use rusqlite::params_from_iter for parameterized queries\n","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:32.738743Z","created_by":"tayloreernisse","updated_at":"2026-02-05T20:58:17.168662Z","compaction_level":0,"original_size":0,"labels":["gate-5","phase-b","query"],"dependencies":[{"issue_id":"bd-2n4","depends_on_id":"bd-1ht","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2n4","depends_on_id":"bd-3ia","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2n4","depends_on_id":"bd-z94","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-2n4","title":"Implement trace query: file -> MR -> issue -> discussion chain","description":"## Background\n\nThe trace query builds a chain from file path -> MRs -> issues -> discussions, combining data from mr_file_changes (Gate 4), entity_references (Gate 2), and the existing discussions/notes tables. This is the backend for the trace CLI command.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 5.4 (Query Flow Tier 1).\n\n## Codebase Context\n\n- entity_references table (migration 011): source_entity_type, source_entity_id, target_entity_type, target_entity_id, reference_type, source_method\n- mr_file_changes table (migration 016, bd-1oo): merge_request_id, project_id, old_path, new_path, change_type\n- discussions table: issue_id, merge_request_id\n- notes table: discussion_id, author_username, body, created_at, is_system, position_new_path (for DiffNotes)\n- merge_requests table: iid, title, state, author_username, web_url, merged_at, updated_at\n- issues table: iid, title, state, web_url\n- resolve_rename_chain() from bd-1yx (src/core/file_history.rs) provides multi-path matching\n- reference_type values: 'closes', 'mentioned', 'related'\n\n## Approach\n\nCreate `src/core/trace.rs`:\n\n```rust\nuse rusqlite::Connection;\nuse crate::core::file_history::resolve_rename_chain;\nuse crate::core::error::Result;\n\n#[derive(Debug, Clone, Serialize)]\npub struct TraceChain {\n pub merge_request: TraceMr,\n pub issues: Vec,\n pub discussions: Vec,\n}\n\n#[derive(Debug, Clone, Serialize)]\npub struct TraceMr {\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub author_username: String,\n pub web_url: Option,\n pub merged_at: Option,\n pub merge_commit_sha: Option,\n pub file_change_type: String,\n}\n\n#[derive(Debug, Clone, Serialize)]\npub struct TraceIssue {\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub web_url: Option,\n pub reference_type: String, // \"closes\", \"mentioned\", \"related\"\n}\n\n#[derive(Debug, Clone, Serialize)]\npub struct TraceDiscussion {\n pub author_username: String,\n pub body_snippet: String, // truncated to 500 chars\n pub created_at: i64,\n pub is_diff_note: bool, // true if position_new_path matched\n}\n\n#[derive(Debug, Clone, Serialize)]\npub struct TraceResult {\n pub path: String,\n pub resolved_paths: Vec,\n pub chains: Vec,\n}\n\npub fn run_trace(\n conn: &Connection,\n project_id: i64,\n path: &str,\n follow_renames: bool,\n include_discussions: bool,\n limit: usize,\n) -> Result {\n // 1. Resolve rename chain (unless !follow_renames)\n let paths = if follow_renames {\n resolve_rename_chain(conn, project_id, path, 10)?\n } else {\n vec![path.to_string()]\n };\n\n // 2. Find MRs via mr_file_changes for all resolved paths\n // Dynamic IN-clause for path set\n // 3. For each MR, find linked issues via entity_references\n // 4. If include_discussions, fetch DiffNote discussions on traced file\n // 5. Order chains by COALESCE(merged_at, updated_at) DESC, apply limit\n}\n```\n\n### SQL for step 2 (find MRs):\n\nBuild dynamic IN-clause placeholders for the resolved path set:\n```sql\nSELECT DISTINCT mr.id, mr.iid, mr.title, mr.state, mr.author_username,\n mr.web_url, mr.merged_at, mr.updated_at, mr.merge_commit_sha,\n mfc.change_type\nFROM mr_file_changes mfc\nJOIN merge_requests mr ON mr.id = mfc.merge_request_id\nWHERE mfc.project_id = ?1\n AND (mfc.new_path IN (...placeholders...) OR mfc.old_path IN (...placeholders...))\nORDER BY COALESCE(mr.merged_at, mr.updated_at) DESC\nLIMIT ?N\n```\n\n### SQL for step 3 (linked issues):\n```sql\nSELECT i.iid, i.title, i.state, i.web_url, er.reference_type\nFROM entity_references er\nJOIN issues i ON i.id = er.target_entity_id\nWHERE er.source_entity_type = 'merge_request'\n AND er.source_entity_id = ?1\n AND er.target_entity_type = 'issue'\n```\n\n### SQL for step 4 (DiffNote discussions):\n```sql\nSELECT n.author_username, n.body, n.created_at, n.position_new_path\nFROM notes n\nJOIN discussions d ON d.id = n.discussion_id\nWHERE d.merge_request_id = ?1\n AND n.position_new_path IN (...placeholders...)\n AND n.is_system = 0\nORDER BY n.created_at ASC\n```\n\nRegister in `src/core/mod.rs`: `pub mod trace;`\n\n## Acceptance Criteria\n\n- [ ] run_trace() returns chains ordered by COALESCE(merged_at, updated_at) DESC\n- [ ] Rename-aware: uses all paths from resolve_rename_chain\n- [ ] Issues linked via entity_references (closes, mentioned, related)\n- [ ] DiffNote discussions correctly filtered to traced file paths via position_new_path\n- [ ] Discussion body_snippet truncated to 500 chars\n- [ ] Empty result (file not in any MR) returns TraceResult with empty chains\n- [ ] Limit applies to number of chains (MRs), not total discussions\n- [ ] Module registered in src/core/mod.rs as `pub mod trace;`\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/core/trace.rs` (NEW)\n- `src/core/mod.rs` (add `pub mod trace;`)\n\n## TDD Loop\n\nRED:\n- `test_trace_empty_file` — unknown file returns empty chains\n- `test_trace_finds_mr` — file in mr_file_changes returns chain with correct MR\n- `test_trace_follows_renames` — renamed file finds historical MRs\n- `test_trace_links_issues` — MR with entity_references shows linked issues\n- `test_trace_limits_chains` — limit=1 returns at most 1 chain\n- `test_trace_no_follow_renames` — follow_renames=false only matches literal path\n\nTests need in-memory DB with migrations applied through 016 + test fixtures for mr_file_changes, entity_references, discussions, notes.\n\nGREEN: Implement SQL queries and chain assembly.\n\nVERIFY: `cargo test --lib -- trace`\n\n## Edge Cases\n\n- MR with no linked issues: chain has empty issues vec\n- Same issue linked from multiple MRs: appears in each chain independently\n- DiffNote on old_path (before rename): captured via resolved path set\n- include_discussions=false: skip DiffNote query for performance\n- Null merged_at: falls back to updated_at for ordering\n- Dynamic IN-clause: use rusqlite::params_from_iter for parameterized queries\n","status":"in_progress","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:32.738743Z","created_by":"tayloreernisse","updated_at":"2026-02-17T19:08:40.226759Z","compaction_level":0,"original_size":0,"labels":["gate-5","phase-b","query"],"dependencies":[{"issue_id":"bd-2n4","depends_on_id":"bd-1ht","type":"parent-child","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-2n4","depends_on_id":"bd-3ia","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-2n4","depends_on_id":"bd-z94","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-2nb","title":"[CP1] Issue ingestion module","description":"Fetch and store issues with cursor-based incremental sync.\n\nImplement ingestIssues(options) → { fetched, upserted, labelsCreated }\n\nLogic:\n1. Get current cursor from sync_cursors\n2. Paginate through issues updated after cursor\n3. Apply local filtering for tuple cursor semantics\n4. For each issue:\n - Store raw payload (compressed)\n - Upsert issue record\n - Extract and upsert labels\n - Link issue to labels via junction\n5. Update cursor after each page commit\n\nFiles: src/ingestion/issues.ts\nTests: tests/integration/issue-ingestion.test.ts\nDone when: Issues, labels, issue_labels populated correctly with resumable cursor","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:19:50.701180Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.154318Z","closed_at":"2026-01-25T15:21:35.154318Z","deleted_at":"2026-01-25T15:21:35.154316Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-2nfs","title":"Implement snapshot test infrastructure + terminal compat matrix","description":"## Background\nSnapshot tests ensure deterministic rendering using FakeClock and ftui's test backend. They capture rendered TUI output as styled text and compare against golden files, catching visual regressions without a real terminal. The terminal compatibility matrix is a separate documentation artifact, not an automated test.\n\n## Approach\n\n### Snapshot Infrastructure\n\n**Test Backend**: Use `ftui_harness::TestBackend` (or equivalent from ftui-harness crate) which captures rendered output as a Buffer without needing a real terminal. If ftui-harness is not available, create a minimal TestBackend that implements ftui's backend trait and stores cells in a `Vec>`.\n\n**Deterministic Rendering**:\n- Inject FakeClock (from bd-2lg6) to freeze all relative time computations (\"2 hours ago\" always renders the same)\n- Fix terminal size to 120x40 for all snapshot tests\n- Use synthetic DB fixture with known data (same fixture pattern as parity tests)\n\n**Snapshot Capture Flow**:\n```rust\nfn capture_snapshot(app: &LoreApp, size: (u16, u16)) -> String {\n let backend = TestBackend::new(size.0, size.1);\n // Render app.view() to backend\n // Convert buffer cells to plain text with ANSI annotations\n // Return as String\n}\n```\n\n**Golden File Management**:\n- Golden files stored in `crates/lore-tui/tests/snapshots/` as `.snap` files\n- Naming: `{test_name}.snap` (e.g., `dashboard_default.snap`)\n- Update mode: set env var `UPDATE_SNAPSHOTS=1` to overwrite golden files instead of comparing\n- Use `insta` crate (or manual file comparison) for snapshot assertion\n\n**Fixture Data** (synthetic, deterministic):\n- 50 issues (mix of opened/closed/locked states, various labels)\n- 25 MRs (mix of opened/merged/closed/draft)\n- 100 discussions with notes\n- Known timestamps relative to FakeClock's frozen time\n\n### Snapshot Tests\n\nEach test:\n1. Creates in-memory DB with fixture data\n2. Creates LoreApp with FakeClock frozen at 2026-01-15T12:00:00Z\n3. Sets initial screen state\n4. Renders via TestBackend at 120x40\n5. Compares output against golden file\n\nTests to implement:\n- `test_dashboard_snapshot`: Dashboard screen with fixture counts and recent activity\n- `test_issue_list_snapshot`: Issue list with default sort, showing state badges and relative times\n- `test_issue_detail_snapshot`: Single issue detail with description and discussion thread\n- `test_mr_list_snapshot`: MR list showing draft indicators and review status\n- `test_search_results_snapshot`: Search results with highlighted matches\n- `test_empty_state_snapshot`: Dashboard with empty DB (zero issues/MRs)\n\n### Terminal Compatibility Matrix (Documentation)\n\nThis is a manual verification checklist, NOT an automated test. Document results in `crates/lore-tui/TERMINAL_COMPAT.md`:\n\n| Feature | iTerm2 | tmux | Alacritty | kitty |\n|---------|--------|------|-----------|-------|\n| True color (RGB) | | | | |\n| Unicode width (CJK) | | | | |\n| Box-drawing chars | | | | |\n| Bold/italic/underline | | | | |\n| Mouse events | | | | |\n| Resize handling | | | | |\n| Alt screen | | | | |\n\nFill in during manual QA, not during automated test implementation.\n\n## Acceptance Criteria\n- [ ] At least 6 snapshot tests pass with golden files committed to repo\n- [ ] All snapshots use FakeClock frozen at 2026-01-15T12:00:00Z\n- [ ] All snapshots render at fixed 120x40 terminal size\n- [ ] Dashboard snapshot matches golden file (deterministic)\n- [ ] Issue list snapshot matches golden file (deterministic)\n- [ ] Empty state snapshot matches golden file\n- [ ] UPDATE_SNAPSHOTS=1 env var overwrites golden files for updates\n- [ ] Golden files are plain text (diffable in version control)\n- [ ] TERMINAL_COMPAT.md template created (to be filled during manual QA)\n\n## Files\n- CREATE: crates/lore-tui/tests/snapshot_tests.rs\n- CREATE: crates/lore-tui/tests/snapshots/ (directory for golden files)\n- CREATE: crates/lore-tui/tests/snapshots/dashboard_default.snap\n- CREATE: crates/lore-tui/tests/snapshots/issue_list_default.snap\n- CREATE: crates/lore-tui/tests/snapshots/issue_detail.snap\n- CREATE: crates/lore-tui/tests/snapshots/mr_list_default.snap\n- CREATE: crates/lore-tui/tests/snapshots/search_results.snap\n- CREATE: crates/lore-tui/tests/snapshots/empty_state.snap\n- CREATE: crates/lore-tui/TERMINAL_COMPAT.md (template)\n\n## TDD Anchor\nRED: Write `test_dashboard_snapshot` that creates LoreApp with FakeClock and fixture DB, renders Dashboard at 120x40, asserts output matches `snapshots/dashboard_default.snap`. Fails because golden file does not exist yet.\nGREEN: Render the Dashboard, run with UPDATE_SNAPSHOTS=1 to generate golden file, then run normally to verify match.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml snapshot\n\n## Edge Cases\n- Golden file encoding: always UTF-8, normalize line endings to LF\n- FakeClock must be injected into all components that compute relative time (e.g., \"2 hours ago\")\n- Snapshot diffs on CI: print a clear diff showing expected vs actual when mismatch occurs\n- Fixture data must NOT include non-deterministic values (random IDs, current timestamps)\n- If ftui-harness API changes, TestBackend shim may need updating\n\n## Dependency Context\n- Uses FakeClock from bd-2lg6 (Implement Clock trait)\n- Uses all screen views from Phase 2 (Dashboard, Issue List, MR List, Detail views)\n- Uses TestBackend from ftui-harness crate (or custom implementation)\n- Depends on bd-3h00 (session persistence) per phase ordering — screens must be complete before snapshotting\n- Downstream: bd-nu0d (fuzz tests) and bd-3fjk (race tests) depend on this infrastructure","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:03:54.220114Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:38.126586Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2nfs","depends_on_id":"bd-1b6k","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2nfs","depends_on_id":"bd-3h00","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2ni","title":"OBSERV Epic: Phase 2 - Spans + Correlation IDs","description":"Add tracing spans to all sync stages and generate UUID-based run_id for correlation. Every log line within a sync run includes run_id in JSON span context. Nested spans produce correct parent-child chains.\n\nDepends on: Phase 1 (subscriber must support span recording)\nUnblocks: Phase 3 (metrics), Phase 5 (rate limit logging)\n\nFiles: src/cli/commands/sync.rs, src/cli/commands/ingest.rs, src/ingestion/orchestrator.rs, src/documents/regenerator.rs, src/embedding/pipeline.rs, src/main.rs\n\nAcceptance criteria (PRD Section 6.2):\n- Every log line includes run_id in JSON span context\n- Nested spans produce chain: fetch_pages includes parent ingest_issues span\n- run_id is 8-char hex (truncated UUIDv4)\n- Spans visible in -vv stderr output","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-02-04T15:53:08.935218Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:19:38.721297Z","closed_at":"2026-02-04T17:19:38.721241Z","close_reason":"Phase 2 complete: run_id correlation IDs generated at sync/ingest entry, root spans with .instrument() for async, #[instrument] on 5 key pipeline functions","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-2ni","depends_on_id":"bd-2nx","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -155,21 +163,23 @@ {"id":"bd-2og9","title":"Implement entity cache + render cache","description":"## Background\nEntity cache provides near-instant detail view reopens during Enter/Esc drill workflows by caching IssueDetail/MrDetail payloads. Render cache prevents per-frame recomputation of expensive render artifacts (markdown to styled text, discussion tree shaping). Both use bounded LRU eviction with selective invalidation.\n\n## Approach\n\n### Entity Cache (entity_cache.rs)\n\n```rust\nuse std::collections::HashMap;\n\npub struct EntityCache {\n entries: HashMap, // value + last-access tick\n capacity: usize,\n tick: u64,\n}\n\nimpl EntityCache {\n pub fn new(capacity: usize) -> Self;\n pub fn get(&mut self, key: &EntityKey) -> Option<&V>; // updates tick\n pub fn put(&mut self, key: EntityKey, value: V); // evicts oldest if at capacity\n pub fn invalidate(&mut self, keys: &[EntityKey]); // selective by key set\n}\n```\n\n- `EntityKey` is `(EntityType, i64)` from core types (bd-c9gk) — e.g., `(EntityType::Issue, 42)`\n- Default capacity: 64 entries (sufficient for typical drill-in/out workflows)\n- LRU eviction: on `put()` when at capacity, find entry with lowest tick and remove it\n- `get()` bumps the access tick to keep recently-accessed entries alive\n- `invalidate()` takes a slice of changed keys (from sync results) and removes only those entries — NOT a blanket clear\n\n### Render Cache (render_cache.rs)\n\n```rust\npub struct RenderCacheKey {\n content_hash: u64, // FxHash of source content\n terminal_width: u16, // width affects line wrapping\n}\n\npub struct RenderCache {\n entries: HashMap,\n capacity: usize,\n}\n\nimpl RenderCache {\n pub fn new(capacity: usize) -> Self;\n pub fn get(&self, key: &RenderCacheKey) -> Option<&V>;\n pub fn put(&mut self, key: RenderCacheKey, value: V);\n pub fn invalidate_width(&mut self, keep_width: u16); // remove entries NOT matching this width\n pub fn invalidate_all(&mut self); // theme change = full clear\n}\n```\n\n- Default capacity: 256 entries\n- Used for: markdown->styled text, discussion tree layout, issue body rendering\n- `content_hash` uses `std::hash::Hasher` with FxHash (or std DefaultHasher) on source text\n- `invalidate_width(keep_width)`: on terminal resize, remove entries cached at old width\n- `invalidate_all()`: on theme change, clear everything (colors changed)\n- Both caches are NOT thread-safe (single-threaded TUI event loop). No Arc/Mutex needed.\n\n### Integration Point\nBoth caches live as fields on the main LoreApp struct. Cache miss falls through to normal DB query transparently — the action functions check cache first, query DB on miss, populate cache on return.\n\n## Acceptance Criteria\n- [ ] EntityCache::get returns Some for recently put items\n- [ ] EntityCache::put evicts the least-recently-accessed entry when at capacity\n- [ ] EntityCache::invalidate removes only the specified keys, leaves others intact\n- [ ] EntityCache capacity defaults to 64\n- [ ] RenderCache::get returns Some for matching (hash, width) pair\n- [ ] RenderCache::invalidate_width removes entries with non-matching width\n- [ ] RenderCache::invalidate_all clears everything\n- [ ] RenderCache capacity defaults to 256\n- [ ] Both caches are Send (no Rc, no raw pointers) but NOT required to be Sync\n- [ ] No unsafe code\n\n## Files\n- CREATE: crates/lore-tui/src/entity_cache.rs\n- CREATE: crates/lore-tui/src/render_cache.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add `pub mod entity_cache; pub mod render_cache;`)\n\n## TDD Anchor\nRED: Write `test_entity_cache_lru_eviction` that creates EntityCache with capacity 3, puts 4 items, asserts first item (lowest tick) is evicted and the other 3 remain.\nGREEN: Implement LRU eviction using tick-based tracking.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml entity_cache\n\nAdditional tests:\n- test_entity_cache_get_bumps_tick (accessed item survives eviction over older untouched items)\n- test_entity_cache_invalidate_selective (removes only specified keys)\n- test_entity_cache_invalidate_nonexistent_key (no panic)\n- test_render_cache_width_invalidation (entries at old width removed, current width kept)\n- test_render_cache_invalidate_all (empty after call)\n- test_render_cache_capacity_eviction\n\n## Edge Cases\n- Invalidating an EntityKey not in the cache is a no-op (no panic)\n- Zero-capacity cache: all gets return None, all puts are no-ops (degenerate but safe)\n- RenderCacheKey equality: two different strings can have the same hash (collision) — accept this; worst case is a wrong cached render that gets corrected on next invalidation\n- Entity cache should NOT be prewarmed synchronously during sync — sync results just invalidate stale entries, and the next view() call repopulates on demand\n\n## Dependency Context\nDepends on bd-c9gk (core types) for EntityKey type definition.\nBoth caches are integrated into LoreApp (bd-6pmy) as struct fields.\nAction functions (from Phase 2/3 screen beads) check cache before querying DB.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:03:25.520201Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.626204Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2og9","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2og9","depends_on_id":"bd-c9gk","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2px","title":"[CP1] Epic: Issue Ingestion","description":"Ingest all issues, labels, and issue discussions from configured GitLab repositories with resumable cursor-based incremental sync. This establishes the core data ingestion pattern reused for MRs in CP2.\n\n## Success Criteria\n- gi ingest --type=issues fetches all issues (count matches GitLab UI)\n- Labels extracted from issue payloads (name-only)\n- Label linkage reflects current GitLab state (removed labels unlinked on re-sync)\n- Issue discussions fetched per-issue (dependent sync)\n- Cursor-based sync is resumable (re-running fetches 0 new items)\n- Discussion sync skips unchanged issues (per-issue watermark)\n- Sync tracking records all runs\n- Single-flight lock prevents concurrent runs\n\n## Internal Gates\n- Gate A: Issues only (cursor + upsert + raw payloads + list/count/show)\n- Gate B: Labels correct (stale-link removal verified)\n- Gate C: Dependent discussion sync (watermark prevents redundant refetch)\n- Gate D: Resumability proof (kill mid-run, rerun; bounded redo)\n\nReference: docs/prd/checkpoint-1.md","status":"tombstone","priority":1,"issue_type":"epic","created_at":"2026-01-25T15:42:13.167698Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.638609Z","closed_at":"2026-01-25T17:02:01.638609Z","deleted_at":"2026-01-25T17:02:01.638606Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"epic","compaction_level":0,"original_size":0} {"id":"bd-2rk9","title":"WHO: CLI skeleton — WhoArgs, Commands::Who, dispatch arm","description":"## Background\n\nWire up the CLI plumbing so `lore who --help` works and dispatch reaches the who module. This is pure boilerplate — no query logic yet.\n\n## Approach\n\n### 1. src/cli/mod.rs — WhoArgs struct (after TimelineArgs, ~line 195)\n\n```rust\n#[derive(Parser)]\n#[command(after_help = \"\\x1b[1mExamples:\\x1b[0m\n lore who src/features/auth/ # Who knows about this area?\n lore who @asmith # What is asmith working on?\n lore who @asmith --reviews # What review patterns does asmith have?\n lore who --active # What discussions need attention?\n lore who --overlap src/features/auth/ # Who else is touching these files?\n lore who --path README.md # Expert lookup for a root file\")]\npub struct WhoArgs {\n /// Username or file path (path if contains /)\n pub target: Option,\n\n /// Force expert mode for a file/directory path (handles root files like README.md, Makefile)\n #[arg(long, help_heading = \"Mode\", conflicts_with_all = [\"active\", \"overlap\", \"reviews\"])]\n pub path: Option,\n\n /// Show active unresolved discussions\n #[arg(long, help_heading = \"Mode\", conflicts_with_all = [\"target\", \"overlap\", \"reviews\", \"path\"])]\n pub active: bool,\n\n /// Find users with MRs/notes touching this file path\n #[arg(long, help_heading = \"Mode\", conflicts_with_all = [\"target\", \"active\", \"reviews\", \"path\"])]\n pub overlap: Option,\n\n /// Show review pattern analysis (requires username target)\n #[arg(long, help_heading = \"Mode\", requires = \"target\", conflicts_with_all = [\"active\", \"overlap\", \"path\"])]\n pub reviews: bool,\n\n /// Time window (7d, 2w, 6m, YYYY-MM-DD). Default varies by mode.\n #[arg(long, help_heading = \"Filters\")]\n pub since: Option,\n\n /// Scope to a project (supports fuzzy matching)\n #[arg(short = 'p', long, help_heading = \"Filters\")]\n pub project: Option,\n\n /// Maximum results per section (1..=500)\n #[arg(short = 'n', long = \"limit\", default_value = \"20\",\n value_parser = clap::value_parser!(u16).range(1..=500),\n help_heading = \"Output\")]\n pub limit: u16,\n}\n```\n\n### 2. Commands enum — add Who(WhoArgs) after Timeline, before hidden List\n\n### 3. src/cli/commands/mod.rs — add `pub mod who;` and re-exports:\n```rust\npub use who::{run_who, print_who_human, print_who_json, WhoRun};\n```\n\n### 4. src/main.rs — dispatch arm + handler:\n```rust\nSome(Commands::Who(args)) => handle_who(cli.config.as_deref(), args, robot_mode),\n```\n\n### 5. src/cli/commands/who.rs — stub file with signatures that compile\n\n## Files\n\n- `src/cli/mod.rs` — WhoArgs struct + Commands::Who variant\n- `src/cli/commands/mod.rs` — pub mod who + re-exports\n- `src/main.rs` — dispatch arm + handle_who function + imports\n- `src/cli/commands/who.rs` — CREATE stub file\n\n## TDD Loop\n\nRED: `cargo check --all-targets` fails (missing who module)\nGREEN: Create stub who.rs with empty/todo!() implementations, wire up all 4 files\nVERIFY: `cargo check --all-targets && cargo run -- who --help`\n\n## Acceptance Criteria\n\n- [ ] `cargo check --all-targets` passes\n- [ ] `lore who --help` displays all flags with correct grouping (Mode, Filters, Output)\n- [ ] `lore who --active --overlap foo` rejected by clap (conflicts_with)\n- [ ] `lore who --reviews` rejected by clap (requires target)\n- [ ] WhoArgs is pub and importable from lore::cli\n\n## Edge Cases\n\n- conflicts_with_all on --path must NOT include \"target\" (--path is used alongside positional target in some cases... actually no, --path replaces target — check the plan: it conflicts with active/overlap/reviews but NOT target. Wait, looking at the plan: --path does NOT conflict with target. But if both target and --path are provided, --path takes priority in resolve_mode. The clap struct allows both.)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:39:58.436660Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.594923Z","closed_at":"2026-02-08T04:10:29.594882Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0} +{"id":"bd-2rqs","title":"Dynamic shell completions for file paths (lore complete-path)","description":"## Background\n\nTab-completion for lore commands currently only covers static subcommand/flag names via clap_complete v4 (src/main.rs handle_completions(), line ~1667). Users frequently type file paths (for who --path, file-history) and entity IIDs (for issues, mrs, show) manually. Dynamic completions would allow tab-completing these from the local SQLite database.\n\n**Pattern:** kubectl, gh, docker all use hidden subcommands for dynamic completions. clap_complete v4 has a custom completer API that can shell out to these hidden subcommands.\n\n## Codebase Context\n\n- **Static completions**: Commands::Completions variant in src/cli/mod.rs, handled by handle_completions() in src/main.rs (line ~1667) using clap_complete::generate()\n- **clap_complete v4**: Already in Cargo.toml. Supports custom completer API for dynamic values.\n- **Commands taking IIDs**: IssuesArgs (iid: Option), MrsArgs (iid: Option), Drift (for: EntityRef), Show (hidden, takes entity ref)\n- **path_resolver**: src/core/path_resolver.rs (245 lines). build_path_query() (lines 71-187) and suffix_probe() (lines 192-240) resolve partial paths against mr_file_changes. SuffixResult::Ambiguous(Vec) returns multiple matches — perfect for completions.\n- **who --path**: WhoArgs has `path: Option` field, already uses path_resolver\n- **DB access**: create_connection() from src/core/db.rs, config loading from src/core/config.rs\n- **Performance**: Must complete in <100ms. SQLite queries against indexed columns are sub-ms.\n\n## Approach\n\n### 1. Hidden Subcommands (src/cli/mod.rs)\n\nAdd hidden subcommands that query the DB and print completion candidates:\n\n```rust\n/// Hidden: emit file path completions for shell integration\n#[command(name = \"complete-path\", hide = true)]\nCompletePath {\n /// Partial path prefix to complete\n prefix: String,\n /// Project scope\n #[arg(short = 'p', long)]\n project: Option,\n},\n\n/// Hidden: emit issue IID completions\n#[command(name = \"complete-issue\", hide = true)]\nCompleteIssue {\n /// Partial IID prefix\n prefix: String,\n #[arg(short = 'p', long)]\n project: Option,\n},\n\n/// Hidden: emit MR IID completions\n#[command(name = \"complete-mr\", hide = true)]\nCompleteMr {\n /// Partial IID prefix\n prefix: String,\n #[arg(short = 'p', long)]\n project: Option,\n},\n```\n\n### 2. Completion Handlers (src/cli/commands/completions.rs NEW)\n\n```rust\npub fn complete_path(conn: &Connection, prefix: &str, project_id: Option) -> Result> {\n // Use suffix_probe() from path_resolver if prefix looks like a suffix (no leading /)\n // Otherwise: SELECT DISTINCT new_path FROM mr_file_changes WHERE new_path LIKE ?||'%' LIMIT 50\n // Also check old_path for rename awareness\n}\n\npub fn complete_issue(conn: &Connection, prefix: &str, project_id: Option) -> Result> {\n // SELECT iid, title FROM issues WHERE CAST(iid AS TEXT) LIKE ?||'%' ORDER BY updated_at DESC LIMIT 30\n // Output: \"123\\tFix login bug\" (tab-separated for shell description)\n}\n\npub fn complete_mr(conn: &Connection, prefix: &str, project_id: Option) -> Result> {\n // SELECT iid, title FROM merge_requests WHERE CAST(iid AS TEXT) LIKE ?||'%' ORDER BY updated_at DESC LIMIT 30\n // Output: \"456\\tAdd OAuth support\"\n}\n```\n\n### 3. Wire in main.rs\n\nAdd match arms for CompletePath, CompleteIssue, CompleteMr. Each:\n1. Opens DB connection (read-only)\n2. Resolves project if -p given\n3. Calls completion handler\n4. Prints one candidate per line to stdout\n5. Exits 0\n\n### 4. Shell Integration\n\nUpdate handle_completions() to generate shell scripts that call the hidden subcommands. For fish:\n```fish\ncomplete -c lore -n '__fish_seen_subcommand_from issues' -a '(lore complete-issue \"\")'\ncomplete -c lore -n '__fish_seen_subcommand_from who' -l path -a '(lore complete-path (commandline -ct))'\n```\n\nSimilar for bash (using `_lore_complete()` function) and zsh.\n\n## Acceptance Criteria\n\n- [ ] `lore complete-path \"src/co\"` prints matching file paths from mr_file_changes\n- [ ] `lore complete-issue \"12\"` prints matching issue IIDs with titles\n- [ ] `lore complete-mr \"45\"` prints matching MR IIDs with titles\n- [ ] All three hidden subcommands respect -p for project scoping\n- [ ] All three complete in <100ms (SQLite indexed queries)\n- [ ] Empty prefix returns recent/popular results (not all rows)\n- [ ] Hidden subcommands don't appear in --help or completions themselves\n- [ ] Shell completion scripts (fish, bash, zsh) call hidden subcommands for dynamic values\n- [ ] Static completions (subcommands, flags) still work as before\n- [ ] No DB connection attempted if DB doesn't exist (graceful degradation — return no completions)\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n- [ ] `cargo fmt --check` passes\n\n## Files\n\n- MODIFY: src/cli/mod.rs (add CompletePath, CompleteIssue, CompleteMr hidden variants)\n- CREATE: src/cli/commands/completions.rs (complete_path, complete_issue, complete_mr handlers)\n- MODIFY: src/cli/commands/mod.rs (add pub mod completions)\n- MODIFY: src/main.rs (match arms for hidden subcommands + update handle_completions shell scripts)\n\n## TDD Anchor\n\nRED:\n- test_complete_path_suffix_match (in-memory DB with mr_file_changes rows, verify suffix matching returns correct paths)\n- test_complete_issue_prefix (in-memory DB with issues, verify IID prefix filtering)\n- test_complete_mr_prefix (same for MRs)\n- test_complete_empty_prefix_returns_recent (verify limited results ordered by updated_at DESC)\n\nGREEN: Implement completion handlers with SQL queries.\n\nVERIFY: cargo test --lib -- completions && cargo check --all-targets\n\n## Edge Cases\n\n- DB doesn't exist yet (first run before sync): return empty completions, exit 0 (not error)\n- mr_file_changes empty (sync hasn't run with --fetch-mr-diffs): complete-path returns nothing, no error\n- Very long prefix with no matches: empty output, exit 0\n- Special characters in paths (spaces, brackets): shell quoting handled by completion framework\n- Project ambiguous with -p: exit 18, same as other commands (resolve_project pattern)\n- IID prefix \"0\": return nothing (no issues/MRs have iid=0)\n\n## Dependency Context\n\n- **path_resolver** (src/core/path_resolver.rs): provides suffix_probe() which returns SuffixResult::Exact/Ambiguous/NotFound — reuse for complete-path instead of raw SQL when prefix looks like a suffix\n- **mr_file_changes** (migration 016): provides new_path/old_path columns for file path completions\n- **clap_complete v4** (Cargo.toml): provides generate() for static completions and custom completer API for dynamic shell integration","status":"open","priority":3,"issue_type":"feature","created_at":"2026-02-13T16:31:48.589428Z","created_by":"tayloreernisse","updated_at":"2026-02-17T16:51:21.891406Z","compaction_level":0,"original_size":0,"labels":["cli-ux","gate-4"]} {"id":"bd-2rr","title":"OBSERV: Replace subscriber init with dual-layer setup","description":"## Background\nThis is the core infrastructure bead for Phase 1. It replaces the single-layer subscriber (src/main.rs:44-58) with a dual-layer registry that separates stderr and file concerns. The file layer provides always-on post-mortem data; the stderr layer respects -v flags.\n\n## Approach\nReplace src/main.rs lines 44-58 with a function (e.g., init_tracing()) that:\n\n1. Build stderr filter from -v count (or RUST_LOG override):\n```rust\nfn build_stderr_filter(verbose: u8, quiet: bool) -> EnvFilter {\n if let Ok(rust_log) = std::env::var(\"RUST_LOG\") {\n return EnvFilter::new(rust_log);\n }\n if quiet {\n return EnvFilter::new(\"lore=warn,error\");\n }\n match verbose {\n 0 => EnvFilter::new(\"lore=info,warn\"),\n 1 => EnvFilter::new(\"lore=debug,warn\"),\n 2 => EnvFilter::new(\"lore=debug,info\"),\n _ => EnvFilter::new(\"trace,debug\"),\n }\n}\n```\n\n2. Build file filter (always lore=debug,warn unless RUST_LOG set):\n```rust\nfn build_file_filter() -> EnvFilter {\n if let Ok(rust_log) = std::env::var(\"RUST_LOG\") {\n return EnvFilter::new(rust_log);\n }\n EnvFilter::new(\"lore=debug,warn\")\n}\n```\n\n3. Assemble the registry:\n```rust\nlet stderr_layer = fmt::layer()\n .with_target(false)\n .with_writer(SuspendingWriter);\n// Conditionally add .json() based on log_format\n\nlet file_appender = tracing_appender::rolling::daily(log_dir, \"lore\");\nlet (non_blocking, _guard) = tracing_appender::non_blocking(file_appender);\nlet file_layer = fmt::layer()\n .json()\n .with_writer(non_blocking);\n\ntracing_subscriber::registry()\n .with(stderr_layer.with_filter(build_stderr_filter(cli.verbose, cli.quiet)))\n .with(file_layer.with_filter(build_file_filter()))\n .init();\n```\n\nCRITICAL: The non_blocking _guard must be held for the program's lifetime. Store it in main() scope, NOT in the init function. If the guard drops, the file writer thread stops and buffered logs are lost.\n\nCRITICAL: Per-layer filtering requires each .with_filter() to produce a Filtered type. The two layers will have different concrete types (one with json, one without). This is fine -- the registry accepts heterogeneous layers via .with().\n\nWhen --log-format json: wrap stderr_layer with .json() too. This requires conditional construction. Two approaches:\n A) Use Box> for dynamic dispatch (simpler, tiny perf hit)\n B) Use an enum wrapper (zero cost but more code)\nRecommend approach A for simplicity. The overhead is one vtable indirection per log event, dwarfed by I/O.\n\nWhen file_logging is false (LoggingConfig.file_logging == false): skip adding the file layer entirely.\n\n## Acceptance Criteria\n- [ ] lore sync writes JSON log lines to ~/.local/share/lore/logs/lore.YYYY-MM-DD.log\n- [ ] lore -v sync shows DEBUG lore::* on stderr, deps at WARN\n- [ ] lore -vv sync shows DEBUG lore::* + INFO deps on stderr\n- [ ] lore -vvv sync shows TRACE everything on stderr\n- [ ] RUST_LOG=lore::gitlab=trace overrides -v for both layers\n- [ ] lore --log-format json sync emits JSON on stderr\n- [ ] -q + -v: -q wins (stderr at WARN+)\n- [ ] -q does NOT affect file layer (still DEBUG+)\n- [ ] File layer does NOT use SuspendingWriter\n- [ ] Non-blocking guard kept alive for program duration\n- [ ] Existing behavior unchanged when no new flags passed\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/main.rs (replace lines 44-58, add init_tracing function or inline)\n\n## TDD Loop\nRED:\n - test_verbosity_filter_construction: assert filter directives for verbose=0,1,2,3\n - test_rust_log_overrides_verbose: set env, assert TRACE not DEBUG\n - test_quiet_overrides_verbose: -q + -v => WARN+\n - test_json_log_output_format: capture file output, parse as JSON\n - test_suspending_writer_dual_layer: no garbled stderr with progress bars\nGREEN: Implement build_stderr_filter, build_file_filter, assemble registry\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- _guard lifetime: if guard is dropped early, buffered log lines are lost. MUST hold in main() scope.\n- Type erasure: stderr layer with/without .json() produces different types. Use Box> or separate init paths.\n- Empty RUST_LOG string: env::var returns Ok(\"\"), which EnvFilter::new(\"\") defaults to TRACE. May want to check is_empty().\n- File I/O error on log dir: tracing-appender handles this gracefully (no panic), but logs will be silently lost. The doctor command (bd-2i10) can diagnose this.","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:53:55.577025Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:15:04.384114Z","closed_at":"2026-02-04T17:15:04.384062Z","close_reason":"Replaced single-layer subscriber with dual-layer setup: stderr (human/json, -v controlled) + file (always-on JSON, daily rotation via tracing-appender)","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-2rr","depends_on_id":"bd-17n","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2rr","depends_on_id":"bd-1k4","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2rr","depends_on_id":"bd-1o1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2rr","depends_on_id":"bd-2nx","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2rr","depends_on_id":"bd-gba","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2sr2","title":"Robot sync envelope: status enrichment metadata","description":"## Background\nAgents need machine-readable status enrichment metadata in the robot sync output to detect issues like unsupported GraphQL, partial errors, or enrichment failures. Without this, enrichment problems are invisible to automation.\n\n## Approach\nWire IngestProjectResult status fields into the per-project robot sync JSON. Add aggregate error count to top-level summary.\n\n## Files\n- Wherever robot sync output JSON is constructed (likely src/cli/commands/ingest.rs or the sync output serialization path — search for IngestProjectResult -> JSON conversion)\n\n## Implementation\n\nPer-project status_enrichment object in robot sync JSON:\n{\n \"mode\": \"fetched\" | \"unsupported\" | \"skipped\",\n \"reason\": null | \"graphql_endpoint_missing\" | \"auth_forbidden\",\n \"seen\": N,\n \"enriched\": N,\n \"cleared\": N,\n \"without_widget\": N,\n \"partial_errors\": N,\n \"first_partial_error\": null | \"message\",\n \"error\": null | \"message\"\n}\n\nSource fields from IngestProjectResult:\n mode <- status_enrichment_mode\n reason <- status_unsupported_reason\n seen <- statuses_seen\n enriched <- statuses_enriched\n cleared <- statuses_cleared\n without_widget <- statuses_without_widget\n partial_errors <- partial_error_count\n first_partial_error <- first_partial_error\n error <- status_enrichment_error\n\nTop-level sync summary: add status_enrichment_errors: N (count of projects where error is Some)\n\nField semantics:\n mode \"fetched\": enrichment ran (even if 0 statuses or error occurred)\n mode \"unsupported\": 404/403 from GraphQL\n mode \"skipped\": config toggle off\n seen > 0 + enriched == 0: project has issues but none with status\n partial_errors > 0: some pages returned incomplete data\n\n## Acceptance Criteria\n- [ ] Robot sync JSON includes per-project status_enrichment object\n- [ ] All 9 fields present with correct types\n- [ ] mode reflects actual enrichment outcome (fetched/unsupported/skipped)\n- [ ] Top-level status_enrichment_errors count present\n- [ ] Test: full robot sync output validates structure\n\n## TDD Loop\nRED: test_robot_sync_includes_status_enrichment\nGREEN: Wire fields into JSON serialization\nVERIFY: cargo test robot_sync\n\n## Edge Cases\n- Find the exact location where IngestProjectResult is serialized to JSON — may be in a Serialize impl or manual json! macro\n- All numeric fields default to 0, all Option fields default to null in JSON\n- mode is always present (never null)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:42:29.127412Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.422233Z","closed_at":"2026-02-11T07:21:33.422193Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2sr2","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2sr2","depends_on_id":"bd-3dum","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2sx","title":"Implement lore embed CLI command","description":"## Background\nThe embed CLI command is the user-facing wrapper for the embedding pipeline. It runs Ollama health checks, selects documents to embed (pending or failed), shows progress, and reports results. This is the standalone command for building embeddings outside of the sync orchestrator.\n\n## Approach\nCreate `src/cli/commands/embed.rs` per PRD Section 4.4.\n\n**IMPORTANT: The embed command is async.** The underlying `embed_documents()` function is `async fn` (uses `FuturesUnordered` for concurrent HTTP to Ollama). The CLI runner must use tokio runtime.\n\n**Core function (async):**\n```rust\npub async fn run_embed(\n config: &Config,\n retry_failed: bool,\n) -> Result\n```\n\n**Pipeline:**\n1. Create OllamaClient from config.embedding (base_url, model, timeout_secs)\n2. Run `client.health_check().await` — fail early with clear error if Ollama unavailable or model missing\n3. Determine selection: `EmbedSelection::RetryFailed` if --retry-failed, else `EmbedSelection::Pending`\n4. Call `embed_documents(conn, &client, selection, concurrency, progress_callback).await`\n - `concurrency` param controls max in-flight HTTP requests to Ollama\n - `progress_callback` drives indicatif progress bar\n5. Show progress bar (indicatif) during embedding\n6. Return EmbedResult with counts\n\n**CLI args:**\n```rust\n#[derive(Args)]\npub struct EmbedArgs {\n #[arg(long)]\n retry_failed: bool,\n}\n```\n\n**Output:**\n- Human: \"Embedded 42 documents (15 chunks), 2 errors, 5 skipped (unchanged)\"\n- JSON: `{\"ok\": true, \"data\": {\"embedded\": 42, \"chunks\": 15, \"errors\": 2, \"skipped\": 5}}`\n\n**Tokio integration note:**\nThe embed command runs async code. Either:\n- Use `#[tokio::main]` on main and propagate async through CLI dispatch\n- Or use `tokio::runtime::Runtime::new()` in the embed command handler\n\n## Acceptance Criteria\n- [ ] Command is async (embed_documents is async, health_check is async)\n- [ ] OllamaClient created from config.embedding settings\n- [ ] Health check runs first — clear error if Ollama down (exit code 14)\n- [ ] Clear error if model not found: \"Pull the model: ollama pull nomic-embed-text\" (exit code 15)\n- [ ] Embeds pending documents (no existing embeddings or stale content_hash)\n- [ ] --retry-failed re-attempts documents with last_error\n- [ ] Progress bar shows during embedding (indicatif)\n- [ ] embed_documents called with concurrency parameter\n- [ ] embed_documents called with progress_callback for progress bar\n- [ ] Human + JSON output\n- [ ] `cargo build` succeeds\n\n## Files\n- `src/cli/commands/embed.rs` — new file\n- `src/cli/commands/mod.rs` — add `pub mod embed;`\n- `src/cli/mod.rs` — add EmbedArgs, wire up embed subcommand\n- `src/main.rs` — add embed command handler (async dispatch)\n\n## TDD Loop\nRED: Integration test needing Ollama\nGREEN: Implement run_embed (async)\nVERIFY: `cargo build && cargo test embed`\n\n## Edge Cases\n- No documents in DB: \"No documents to embed\" (not error)\n- All documents already embedded and unchanged: \"0 documents to embed (all up to date)\"\n- Ollama goes down mid-embedding: pipeline records errors for remaining docs, returns partial result\n- --retry-failed with no failed docs: \"No failed documents to retry\"","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:34.126482Z","created_by":"tayloreernisse","updated_at":"2026-01-30T18:02:38.633115Z","closed_at":"2026-01-30T18:02:38.633055Z","close_reason":"Embed CLI command fully wired: EmbedArgs, Commands::Embed variant, handle_embed handler, clean build, all tests pass","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2sx","depends_on_id":"bd-am7","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-2tr4","title":"Epic: TUI Phase 1 — Foundation","description":"## Background\nPhase 1 builds the foundational infrastructure that all screens depend on: the full LoreApp Model implementation with key dispatch, navigation stack, task supervisor for async work management, theme configuration, common widgets, and the state/action architecture. Phase 1 deliverables are the skeleton that Phase 2 screens plug into.\n\n## Acceptance Criteria\n- [ ] LoreApp update() dispatches all Msg variants through 5-stage key pipeline\n- [ ] NavigationStack supports push/pop/forward/jump with state preservation\n- [ ] TaskSupervisor manages background tasks with dedup, cancellation, and generation IDs\n- [ ] Theme renders correctly with adaptive light/dark colors\n- [ ] Status bar, breadcrumb, loading, error toast, and help overlay widgets render\n- [ ] CommandRegistry is the single source of truth for keybindings/help/palette\n- [ ] AppState composition with per-screen states and LoadState map\n\n## Scope\nBlocked by Phase 0 (Toolchain Gate). Blocks Phase 2 (Core Screens).","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T16:55:02.650495Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.059729Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2tr4","depends_on_id":"bd-1cj0","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-2tr4","title":"Epic: TUI Phase 1 — Foundation","description":"## Background\nPhase 1 builds the foundational infrastructure that all screens depend on: the full LoreApp Model implementation with key dispatch, navigation stack, task supervisor for async work management, theme configuration, common widgets, and the state/action architecture. Phase 1 deliverables are the skeleton that Phase 2 screens plug into.\n\n## Acceptance Criteria\n- [ ] LoreApp update() dispatches all Msg variants through 5-stage key pipeline\n- [ ] NavigationStack supports push/pop/forward/jump with state preservation\n- [ ] TaskSupervisor manages background tasks with dedup, cancellation, and generation IDs\n- [ ] Theme renders correctly with adaptive light/dark colors\n- [ ] Status bar, breadcrumb, loading, error toast, and help overlay widgets render\n- [ ] CommandRegistry is the single source of truth for keybindings/help/palette\n- [ ] AppState composition with per-screen states and LoadState map\n\n## Scope\nBlocked by Phase 0 (Toolchain Gate). Blocks Phase 2 (Core Screens).","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-02-12T16:55:02.650495Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:54:04.268740Z","closed_at":"2026-02-18T18:54:04.268696Z","close_reason":"All 7 acceptance criteria met: 5-stage key pipeline, NavigationStack, TaskSupervisor, Theme, common widgets, CommandRegistry, AppState composition. 177 tests pass. Unblocks Phase 2.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2tr4","depends_on_id":"bd-1cj0","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2ug","title":"[CP1] gi ingest --type=issues command","description":"CLI command to orchestrate issue ingestion.\n\n## Module\nsrc/cli/commands/ingest.rs\n\n## Clap Definition\n#[derive(Subcommand)]\npub enum Commands {\n Ingest {\n #[arg(long, value_parser = [\"issues\", \"merge_requests\"])]\n r#type: String,\n \n #[arg(long)]\n project: Option,\n \n #[arg(long)]\n force: bool,\n },\n}\n\n## Implementation\n1. Acquire app lock with heartbeat (respect --force for stale lock)\n2. Create sync_run record (status='running')\n3. For each configured project (or filtered --project):\n - Call orchestrator to ingest issues and discussions\n - Show progress (spinner or progress bar)\n4. Update sync_run (status='succeeded', metrics_json with counts)\n5. Release lock\n\n## Output Format\nIngesting issues...\n\n group/project-one: 1,234 issues fetched, 45 new labels\n\nFetching discussions (312 issues with updates)...\n\n group/project-one: 312 issues → 1,234 discussions, 5,678 notes\n\nTotal: 1,234 issues, 1,234 discussions, 5,678 notes (excluding 1,234 system notes)\nSkipped discussion sync for 922 unchanged issues.\n\n## Error Handling\n- Lock acquisition failure: exit with DatabaseLockError message\n- Network errors: show GitLabNetworkError, exit non-zero\n- Rate limiting: respect backoff, show progress\n\nFiles: src/cli/commands/ingest.rs, src/cli/commands/mod.rs\nTests: tests/integration/sync_runs_tests.rs\nDone when: Full issue + discussion ingestion works end-to-end","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T16:57:58.552504Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.875613Z","closed_at":"2026-01-25T17:02:01.875613Z","deleted_at":"2026-01-25T17:02:01.875607Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-2um","title":"[CP1] Epic: Issue Ingestion","description":"Ingest all issues, labels, and issue discussions from configured GitLab repositories with resumable cursor-based incremental sync. This checkpoint establishes the core data ingestion pattern that will be reused for MRs in Checkpoint 2.\n\n## Success Criteria\n- gi ingest --type=issues fetches all issues (count matches GitLab UI)\n- Labels extracted from issue payloads (name-only)\n- Label linkage reflects current GitLab state (removed labels unlinked on re-sync)\n- Issue discussions fetched per-issue (dependent sync)\n- Cursor-based sync is resumable (re-running fetches 0 new items)\n- Discussion sync skips unchanged issues (per-issue watermark)\n- Sync tracking records all runs (sync_runs table)\n- Single-flight lock prevents concurrent runs\n\n## Internal Gates\n- **Gate A**: Issues only - cursor + upsert + raw payloads + list/count/show working\n- **Gate B**: Labels correct - stale-link removal verified; label count matches GitLab\n- **Gate C**: Dependent discussion sync - watermark prevents redundant refetch; concurrency bounded\n- **Gate D**: Resumability proof - kill mid-run, rerun; bounded redo and no redundant discussion refetch\n\n## Reference\ndocs/prd/checkpoint-1.md","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-01-25T17:02:38.075224Z","created_by":"tayloreernisse","updated_at":"2026-01-25T23:27:15.347364Z","closed_at":"2026-01-25T23:27:15.347317Z","close_reason":"CP1 Issue Ingestion complete: all sub-tasks done, 71 tests pass, CLI commands working","compaction_level":0,"original_size":0} +{"id":"bd-2uzm","title":"Implement Trace screen (file -> MR -> issue chain drill-down)","description":"## Background\nThe Trace screen answers \"Why was this code introduced?\" by building file -> MR -> issue -> discussion chains. It wraps run_trace() from src/core/trace.rs (added in v0.8.1) in an interactive TUI view where users can drill down into any linked entity. The CLI prints flat output; the TUI makes the chain navigable.\n\nThe core query accepts a file path (with optional :line suffix), resolves renames via BFS, finds MRs that touched the file, links issues via entity_references, and extracts DiffNote discussions. Each result is a TraceChain: MR metadata + linked issues + relevant discussions.\n\n## Data Shapes (from src/core/trace.rs)\n\n```rust\npub struct TraceResult {\n pub path: String,\n pub resolved_paths: Vec, // rename chain via BFS\n pub renames_followed: bool,\n pub trace_chains: Vec,\n pub total_chains: usize,\n}\n\npub struct TraceChain {\n pub mr_iid: i64,\n pub mr_title: String,\n pub mr_state: String, // merged/opened/closed\n pub mr_author: String,\n pub change_type: String, // added/modified/deleted/renamed\n pub merged_at_iso: Option,\n pub updated_at_iso: String,\n pub web_url: Option,\n pub issues: Vec, // linked via entity_references\n pub discussions: Vec, // DiffNote threads on this file\n}\n\npub struct TraceIssue {\n pub iid: i64, pub title: String, pub state: String,\n pub reference_type: String, pub web_url: Option,\n}\n\npub struct TraceDiscussion {\n pub discussion_id: String, pub mr_iid: i64,\n pub author_username: String, pub body: String,\n pub path: String, pub created_at_iso: String,\n}\n```\n\nrun_trace() signature (src/core/trace.rs):\n```rust\npub fn run_trace(\n conn: &Connection,\n project_id: Option,\n path: &str,\n follow_renames: bool,\n include_discussions: bool,\n limit: usize,\n) -> Result\n```\n\nparse_trace_path() (src/cli/commands/trace.rs, made pub by bd-1f5b):\n```rust\npub fn parse_trace_path(input: &str) -> (String, Option)\n```\n\n## Approach\n\n**Screen enum** (message.rs):\nAdd Screen::Trace variant (no parameters — path is entered on-screen). Label: \"Trace\". Breadcrumb: \"Trace\".\n\n**Path autocomplete**: Query DISTINCT new_path from mr_file_changes (scoped to project_id if set) for fuzzy matching as user types. Cache results on first focus. SQL:\n```sql\nSELECT DISTINCT new_path FROM mr_file_changes\nWHERE project_id = ?1 ORDER BY new_path\n```\nStore as Vec in TraceState. Filter client-side with case-insensitive substring match.\n\n**State** (state/trace.rs):\n```rust\n#[derive(Debug, Default)]\npub struct TraceState {\n pub path_input: String,\n pub path_focused: bool,\n pub line_filter: Option, // from :line suffix\n pub result: Option,\n pub selected_chain_index: usize,\n pub expanded_chains: HashSet, // multiple can be expanded\n pub follow_renames: bool, // default true\n pub include_discussions: bool, // default true\n pub scroll_offset: u16,\n pub known_paths: Vec, // autocomplete cache\n pub autocomplete_matches: Vec, // filtered suggestions\n pub autocomplete_index: usize,\n}\n```\n\n**Action** (action.rs):\n- fetch_trace(conn, project_id, path, follow_renames, include_discussions, limit) -> Result: calls run_trace() directly from src/core/trace.rs\n- fetch_known_paths(conn, project_id) -> Result, LoreError>: queries mr_file_changes for autocomplete\n\n**View** (view/trace.rs):\n- Top: path input with autocomplete dropdown + toggle indicators [renames: on] [discussions: on]\n- If renames followed: rename chain breadcrumb (old_path -> ... -> new_path) in dimmed text\n- Main area: scrollable list of TraceChain entries:\n - Collapsed: MR state icon + !iid + title + author + change_type + date (single line)\n - Expanded: indented sections for linked issues and discussion snippets\n - Issues: state icon + #iid + title + reference_type\n - Discussions: @author + date + body preview (first 2 lines, truncated at 120 chars)\n- Keyboard:\n - j/k: scroll chains\n - Enter: toggle expand/collapse on selected chain\n - Enter on highlighted issue: navigate to IssueDetail(EntityKey)\n - Enter on highlighted MR line: navigate to MrDetail(EntityKey)\n - /: focus path input\n - Tab: cycle autocomplete suggestions when path focused\n - r: toggle follow_renames (re-fetches)\n - d: toggle include_discussions (re-fetches)\n - q: back\n\n**Contextual entry points** (wired from other screens):\n- MR Detail: when cursor is on a file path in the file changes list, t opens Trace pre-filled with that path\n- Issue Detail: if discussion references a file path, t opens Trace for that path\n- Requires MrDetailState and IssueDetailState to expose selected_file_path() -> Option\n\n## Acceptance Criteria\n- [ ] Screen::Trace added to message.rs Screen enum with label \"Trace\" and breadcrumb\n- [ ] TraceState struct with all fields, Default impl\n- [ ] Path input with autocomplete dropdown from mr_file_changes (fuzzy substring match)\n- [ ] :line suffix parsing via parse_trace_path (line_filter stored but used for future highlighting)\n- [ ] Rename chain displayed as breadcrumb when renames_followed is true\n- [ ] TraceChain list with expand/collapse — multiple chains expandable simultaneously\n- [ ] MR state icons: merged (purple), opened (green), closed (red) — matching CLI theme\n- [ ] Enter on issue row navigates to IssueDetail(EntityKey::issue(project_id, iid))\n- [ ] Enter on MR header navigates to MrDetail(EntityKey::mr(project_id, iid))\n- [ ] r toggles follow_renames, d toggles include_discussions — both trigger re-fetch\n- [ ] Empty state: \"No trace chains found\" with hint \"Run 'lore sync' to fetch MR file changes\"\n- [ ] Contextual navigation: t on file path in MR Detail opens Trace pre-filled\n- [ ] Registered in command palette (label \"Trace file\", keywords [\"trace\", \"provenance\", \"why\"])\n- [ ] AppState.has_text_focus() updated to include trace.path_focused\n- [ ] AppState.blur_text_focus() updated to include trace.path_focused = false\n\n## Files\n- MODIFY: crates/lore-tui/src/message.rs (add Screen::Trace variant + label + is_detail_or_entity)\n- CREATE: crates/lore-tui/src/state/trace.rs (TraceState struct + Default)\n- MODIFY: crates/lore-tui/src/state/mod.rs (pub mod trace, pub use TraceState, add to AppState, update has_text_focus/blur_text_focus)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_trace, fetch_known_paths)\n- CREATE: crates/lore-tui/src/view/trace.rs (render_trace fn)\n- MODIFY: crates/lore-tui/src/view/mod.rs (add Screen::Trace dispatch arm in render_screen)\n- MODIFY: crates/lore-tui/src/view/mr_detail.rs (add t keybinding for contextual trace — deferred if mr_detail not yet implemented)\n\n## TDD Anchor\nRED: Write test_fetch_trace_returns_chain in action tests. Setup: in-memory DB, insert project, MR, mr_file_changes row (new_path=\"src/main.rs\"), entity_reference linking MR to issue. Call fetch_trace(conn, Some(project_id), \"src/main.rs\", true, true, 50). Assert: result.trace_chains.len() == 1, result.trace_chains[0].issues.len() == 1.\nGREEN: Implement fetch_trace calling run_trace from src/core/trace.rs.\nVERIFY: cargo test -p lore-tui trace -- --nocapture\n\nAdditional tests:\n- test_trace_empty_result: path \"nonexistent.rs\" returns total_chains=0\n- test_trace_rename_chain: insert rename chain A->B->C, query A, assert resolved_paths contains all 3\n- test_trace_discussion_toggle: include_discussions=false returns empty discussions vec per chain\n- test_parse_trace_path_with_line: \"src/main.rs:42\" -> (\"src/main.rs\", Some(42))\n- test_parse_trace_path_no_line: \"src/main.rs\" -> (\"src/main.rs\", None)\n- test_autocomplete_filters_paths: known_paths=[\"src/a.rs\",\"src/b.rs\",\"lib/c.rs\"], input=\"src/\" -> matches=[\"src/a.rs\",\"src/b.rs\"]\n\n## Edge Cases\n- File path not in any MR: empty state with sync hint\n- Very long rename chains (>5 paths): show first 2 + \"... N more\" + last path\n- Hundreds of trace chains: limit default 50, show \"showing 50 of N\" footer\n- Path with Windows drive letter (C:/foo.rs): parse_trace_path handles this correctly\n- Autocomplete with thousands of paths: substring filter is O(n) but fast enough for <100k paths\n- Project scope: if global_scope.project_id is set, pass it to run_trace and autocomplete query\n- Contextual entry from MR Detail: if MR Detail screen not yet implemented, defer the t keybinding to a follow-up\n\n## Dependency Context\n- bd-1f5b (blocks): Makes parse_trace_path() pub in src/cli/commands/trace.rs. Without this, TUI must reimplement the parser.\n- src/core/trace.rs: run_trace() is already pub — no changes needed. TUI calls it directly.\n- src/core/file_history.rs: resolve_rename_chain() used transitively by run_trace — TUI does not call it directly.\n- Navigation: uses NavigationStack.push(Screen::IssueDetail(key)) and Screen::MrDetail(key) from crates/lore-tui/src/navigation.rs.\n- AppState composition: TraceState added as field in AppState struct (state/mod.rs line ~154-174). has_text_focus and blur_text_focus at lines 194-207 must include trace.path_focused.\n- Contextual entry: requires MrDetailState to expose the currently selected file path. If MR Detail is not yet built, the contextual keybinding is deferred.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-18T18:13:47.076070Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:33:32.709165Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2uzm","depends_on_id":"bd-1f5b","type":"blocks","created_at":"2026-02-18T18:14:33.294262Z","created_by":"tayloreernisse"},{"issue_id":"bd-2uzm","depends_on_id":"bd-nwux","type":"parent-child","created_at":"2026-02-18T18:13:47.079630Z","created_by":"tayloreernisse"}]} {"id":"bd-2w1p","title":"Add half-life fields and config validation to ScoringConfig","description":"## Background\nThe flat-weight ScoringConfig (config.rs:155-167) has only 3 fields: author_weight (25), reviewer_weight (10), note_bonus (1). Time-decay scoring needs half-life parameters, a reviewer split (participated vs assigned-only), closed MR discount, substantive-note threshold, and bot filtering.\n\n## Approach\nExtend the existing ScoringConfig struct at config.rs:155. Add new fields with #[serde(default)] and camelCase rename to match existing convention (authorWeight, reviewerWeight, noteBonus). Extend the Default impl at config.rs:169 with new defaults. Extend validate_scoring() at config.rs:274-291 (currently validates 3 weights >= 0).\n\n### New fields to add:\n```rust\n#[serde(rename = \"reviewerAssignmentWeight\")]\npub reviewer_assignment_weight: i64, // default: 3\n#[serde(rename = \"authorHalfLifeDays\")]\npub author_half_life_days: u32, // default: 180\n#[serde(rename = \"reviewerHalfLifeDays\")]\npub reviewer_half_life_days: u32, // default: 90\n#[serde(rename = \"reviewerAssignmentHalfLifeDays\")]\npub reviewer_assignment_half_life_days: u32, // default: 45\n#[serde(rename = \"noteHalfLifeDays\")]\npub note_half_life_days: u32, // default: 45\n#[serde(rename = \"closedMrMultiplier\")]\npub closed_mr_multiplier: f64, // default: 0.5\n#[serde(rename = \"reviewerMinNoteChars\")]\npub reviewer_min_note_chars: u32, // default: 20\n#[serde(rename = \"excludedUsernames\")]\npub excluded_usernames: Vec, // default: vec![]\n```\n\n### Validation additions to validate_scoring() (config.rs:274):\n- All *_half_life_days must be > 0 AND <= 3650\n- All *_weight / *_bonus must be >= 0\n- reviewer_assignment_weight must be >= 0\n- closed_mr_multiplier must be finite (not NaN/Inf) AND in (0.0, 1.0]\n- reviewer_min_note_chars must be >= 0 AND <= 4096\n- excluded_usernames entries must be non-empty strings\n- Return LoreError::ConfigInvalid with clear message on failure\n\n## TDD Loop\n\n### RED (write first):\n```rust\n#[test]\nfn test_config_validation_rejects_zero_half_life() {\n let mut cfg = ScoringConfig::default();\n assert!(validate_scoring(&cfg).is_ok());\n cfg.author_half_life_days = 0;\n assert!(validate_scoring(&cfg).is_err());\n cfg.author_half_life_days = 180;\n cfg.reviewer_half_life_days = 0;\n assert!(validate_scoring(&cfg).is_err());\n cfg.reviewer_half_life_days = 90;\n cfg.closed_mr_multiplier = 0.0;\n assert!(validate_scoring(&cfg).is_err());\n cfg.closed_mr_multiplier = 1.5;\n assert!(validate_scoring(&cfg).is_err());\n cfg.closed_mr_multiplier = 1.0;\n assert!(validate_scoring(&cfg).is_ok());\n}\n\n#[test]\nfn test_config_validation_rejects_absurd_half_life() {\n let mut cfg = ScoringConfig::default();\n cfg.author_half_life_days = 5000; // > 3650 cap\n assert!(validate_scoring(&cfg).is_err());\n cfg.author_half_life_days = 3650; // boundary: valid\n assert!(validate_scoring(&cfg).is_ok());\n cfg.reviewer_min_note_chars = 5000; // > 4096 cap\n assert!(validate_scoring(&cfg).is_err());\n cfg.reviewer_min_note_chars = 4096; // boundary: valid\n assert!(validate_scoring(&cfg).is_ok());\n}\n\n#[test]\nfn test_config_validation_rejects_nan_multiplier() {\n let mut cfg = ScoringConfig::default();\n cfg.closed_mr_multiplier = f64::NAN;\n assert!(validate_scoring(&cfg).is_err());\n cfg.closed_mr_multiplier = f64::INFINITY;\n assert!(validate_scoring(&cfg).is_err());\n cfg.closed_mr_multiplier = f64::NEG_INFINITY;\n assert!(validate_scoring(&cfg).is_err());\n}\n```\n\n### GREEN: Add fields to struct + Default impl + validation rules.\n### VERIFY: cargo test -p lore -- test_config_validation\n\n## Acceptance Criteria\n- [ ] test_config_validation_rejects_zero_half_life passes\n- [ ] test_config_validation_rejects_absurd_half_life passes\n- [ ] test_config_validation_rejects_nan_multiplier passes\n- [ ] ScoringConfig::default() returns correct values for all 11 fields\n- [ ] cargo check --all-targets passes\n- [ ] Existing config deserialization works (#[serde(default)] fills new fields)\n- [ ] validate_scoring() is pub(crate) or accessible from config.rs test module\n\n## Files\n- MODIFY: src/core/config.rs (struct at line 155, Default impl at line 169, validate_scoring at line 274)\n\n## Edge Cases\n- f64 comparison: use .is_finite() for NaN/Inf check, > 0.0 and <= 1.0 for range\n- Vec default: use Vec::new()\n- Upper bounds prevent silent misconfig (5000-day half-life effectively disables decay)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T16:59:14.654469Z","created_by":"tayloreernisse","updated_at":"2026-02-12T21:01:21.744442Z","closed_at":"2026-02-12T21:01:21.744205Z","close_reason":"Completed: added 8 new fields to ScoringConfig, extended Default impl, and added validation for half-life bounds, closed_mr_multiplier, reviewer_min_note_chars, and excluded_usernames. All 19 config tests pass.","compaction_level":0,"original_size":0,"labels":["scoring"]} {"id":"bd-2wpf","title":"Ship timeline CLI with human and robot renderers","description":"## Problem\nThe timeline pipeline (5-stage SEED->HYDRATE->EXPAND->COLLECT->RENDER) is implemented but not wired to the CLI. This is one of lore's most unique features — chronological narrative reconstruction from resource events, cross-references, and notes — and it is invisible to users and agents.\n\n## Current State\n- Types defined: src/core/timeline.rs (TimelineEvent, TimelineSeed, etc.)\n- Seed stage: src/core/timeline_seed.rs (FTS search -> seed entities)\n- Expand stage: src/core/timeline_expand.rs (cross-reference expansion)\n- Collect stage: src/core/timeline_collect.rs (event gathering from resource events + notes)\n- CLI command structure: src/cli/commands/timeline.rs (exists but incomplete)\n- Remaining beads: bd-1nf (CLI wiring), bd-2f2 (human renderer), bd-dty (robot renderer)\n\n## Acceptance Criteria\n1. lore timeline 'authentication refactor' works end-to-end:\n - Searches for matching entities (SEED)\n - Fetches raw data (HYDRATE)\n - Expands via cross-references (EXPAND with --depth flag, default 1)\n - Collects events chronologically (COLLECT)\n - Renders human-readable narrative (RENDER)\n2. Human renderer output:\n - Chronological event stream with timestamps\n - Color-coded by event type (state change, label change, note, reference)\n - Actor names with role context\n - Grouped by day/week for readability\n - Evidence snippets from notes (first 200 chars)\n3. Robot renderer output (--robot / -J):\n - JSON array of events with: timestamp, event_type, actor, entity_ref, body/snippet, metadata\n - Seed entities listed separately (what matched the query)\n - Expansion depth metadata (how far from seed)\n - Total event count and time range\n4. CLI flags:\n - --project (scope to project)\n - --since (time range)\n - --depth N (expansion depth, default 1, max 3)\n - --expand-mentions (follow mention references, not just closes/related)\n - -n LIMIT (max events)\n5. Performance: timeline for a single issue with 50 events renders in <200ms\n\n## Relationship to Existing Beads\nThis supersedes/unifies: bd-1nf (CLI wiring), bd-2f2 (human renderer), bd-dty (robot renderer). Those can be closed when this ships.\n\n## Files to Modify\n- src/cli/commands/timeline.rs (CLI wiring, flag parsing, output dispatch)\n- src/core/timeline.rs (may need RENDER stage types)\n- New: src/cli/render/timeline_human.rs or inline in timeline.rs\n- New: src/cli/render/timeline_robot.rs or inline in timeline.rs","status":"closed","priority":1,"issue_type":"feature","created_at":"2026-02-12T15:46:16.246889Z","created_by":"tayloreernisse","updated_at":"2026-02-12T15:50:43.885226Z","closed_at":"2026-02-12T15:50:43.885180Z","close_reason":"Already implemented: run_timeline(), print_timeline(), print_timeline_json_with_meta(), handle_timeline() all exist and are fully wired. Code audit 2026-02-12.","compaction_level":0,"original_size":0,"labels":["cli","cli-imp"],"dependencies":[{"issue_id":"bd-2wpf","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2x2h","title":"Implement Sync screen (running + summary modes + progress coalescer)","description":"## Background\nThe Sync screen provides real-time progress visualization during data synchronization. The TUI drives sync directly via lore library calls (not subprocess) — this gives direct access to progress callbacks, proper error propagation, and cooperative cancellation via CancelToken. The TUI is the primary human interface; the CLI serves robots/scripts.\n\nAfter sync completes, the screen transitions to a summary view showing exact changed entity counts. A progress coalescer prevents render thrashing by batching rapid progress updates.\n\nDesign principle: the TUI is self-contained. It does NOT detect or react to external CLI sync operations. If someone runs lore sync externally, the TUI's natural re-query on navigation handles stale data implicitly.\n\n## Approach\nCreate state, action, and view modules for the Sync screen:\n\n**State** (crates/lore-tui/src/screen/sync/state.rs):\n- SyncScreenMode enum: FullScreen, Inline (for use from Bootstrap screen)\n- SyncState enum: Idle, Running(SyncProgress), Complete(SyncSummary), Error(String)\n- SyncProgress: per-lane progress (issues, MRs, discussions, notes, events, statuses) with counts and ETA\n- SyncSummary: changed entity counts (new, updated, deleted per type), duration, errors\n- ProgressCoalescer: buffers progress updates, emits at most every 100ms to prevent render thrash\n\n**sync_delta_ledger** (crates/lore-tui/src/screen/sync/delta_ledger.rs):\n- SyncDeltaLedger: in-memory per-run record of changed entity IDs\n- Fields: new_issue_iids (Vec), updated_issue_iids (Vec), new_mr_iids (Vec), updated_mr_iids (Vec)\n- record_change(entity_type, iid, change_kind) — called by sync progress callback\n- summary() -> SyncSummary — produces the final counts for the summary view\n- Purpose: after sync completes, the dashboard and list screens can use the ledger to highlight \"new since last sync\" items\n\n**Action** (crates/lore-tui/src/screen/sync/action.rs):\n- start_sync(db: &DbManager, config: &Config, cancel: CancelToken) -> Cmd\n- Calls lore library ingestion functions directly: ingest_issues, ingest_mrs, ingest_discussions, etc.\n- Progress callback sends Msg::SyncProgress(lane, count, total) via channel\n- On completion sends Msg::SyncComplete(SyncSummary)\n- On cancel sends Msg::SyncCancelled(partial_summary)\n\n**Per-project fault isolation:** If sync for one project fails, continue syncing other projects. Collect per-project errors and display in summary view. Don't abort entire sync on single project failure.\n\n**View** (crates/lore-tui/src/screen/sync/view.rs):\n- Running view: per-lane progress bars with counts/totals, overall ETA, cancel hint (Esc)\n- Stream stats footer: show items/sec throughput for active lanes\n- Summary view: table of entity types with new/updated/deleted columns, total duration, per-project error list\n- Error view: error message with retry option\n- Inline mode: compact single-line progress for embedding in Bootstrap screen\n\nThe Sync screen uses TaskSupervisor for the background sync task with cooperative cancellation.\n\n## Acceptance Criteria\n- [ ] Sync screen launches sync via lore library calls (NOT subprocess)\n- [ ] Per-lane progress bars update in real-time during sync\n- [ ] ProgressCoalescer batches updates to at most 10/second (100ms floor)\n- [ ] Esc cancels sync cooperatively via CancelToken, shows partial summary\n- [ ] Sync completion transitions to summary view with accurate change counts\n- [ ] Summary view shows new/updated/deleted counts per entity type\n- [ ] Error during sync shows error message with retry option\n- [ ] Sync task registered with TaskSupervisor (dedup by TaskKey::Sync)\n- [ ] Per-project fault isolation: single project failure doesn't abort entire sync\n- [ ] SyncDeltaLedger records changed entity IDs for post-sync highlighting\n- [ ] Stream stats footer shows items/sec throughput\n- [ ] ScreenMode::Inline renders compact single-line progress for Bootstrap embedding\n- [ ] Unit tests for ProgressCoalescer batching behavior\n- [ ] Unit tests for SyncDeltaLedger record/summary\n- [ ] Integration test: mock sync with FakeClock verifies progress -> summary transition\n\n## Files\n- CREATE: crates/lore-tui/src/screen/sync/state.rs\n- CREATE: crates/lore-tui/src/screen/sync/action.rs\n- CREATE: crates/lore-tui/src/screen/sync/view.rs\n- CREATE: crates/lore-tui/src/screen/sync/delta_ledger.rs\n- CREATE: crates/lore-tui/src/screen/sync/mod.rs\n- MODIFY: crates/lore-tui/src/screen/mod.rs (add pub mod sync)\n\n## TDD Anchor\nRED: Write test_progress_coalescer_batches_rapid_updates that sends 50 progress updates in 10ms and asserts coalescer emits at most 1.\nGREEN: Implement ProgressCoalescer with configurable floor interval.\nVERIFY: cargo test -p lore-tui sync -- --nocapture\n\nAdditional tests:\n- test_sync_cancel_produces_partial_summary\n- test_sync_complete_produces_full_summary\n- test_sync_error_shows_retry\n- test_sync_dedup_prevents_double_launch\n- test_delta_ledger_records_changes: record 5 new issues and 3 updated MRs, assert summary counts\n- test_per_project_fault_isolation: simulate one project failure, verify others complete\n\n## Edge Cases\n- Sync cancelled immediately after start — partial summary with zero counts is valid\n- Network timeout during sync — error state with last-known progress preserved\n- Very large sync (100k+ entities) — progress coalescer prevents render thrash\n- Sync started while another sync TaskKey::Sync exists — TaskSupervisor dedup rejects it\n- Inline mode from Bootstrap: compact rendering, no full progress bars\n\n## Dependency Context\nUses TaskSupervisor from bd-3le2 for dedup and cancellation. Uses DbManager from bd-2kop for database access. Uses lore library ingestion module directly for sync operations. Used by Bootstrap screen (bd-3ty8) in inline mode.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:09.481354Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.266057Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2x2h","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2x2h","depends_on_id":"bd-3le2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2x2h","depends_on_id":"bd-u7se","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-2y79","title":"Add work item status via GraphQL enrichment","description":"## Summary\n\nGitLab 18.2+ has native work item status (To do, In progress, Done, Won't do, Duplicate) available ONLY via GraphQL, not REST. This enriches synced issues with status information by making supplementary GraphQL calls after REST ingestion.\n\n**Plan document:** plans/work-item-status-graphql.md\n\n## Critical Findings (from API research)\n\n- **EE-only (Premium/Ultimate)** — Free tier won't have the widget at all\n- **GraphQL auth differs from REST** — must use `Authorization: Bearer `, NOT `PRIVATE-TOKEN`\n- **Must use `workItems` resolver, NOT `project.issues`** — legacy issues path doesn't expose status widgets\n- **5 categories:** TRIAGE, TO_DO, IN_PROGRESS, DONE, CANCELED (not 3 as originally assumed)\n- **Max 100 items per GraphQL page** (standard GitLab limit)\n- **Custom statuses possible on 18.5+** — can't assume only system-defined statuses\n\n## Migration\n\nUses migration **021** (001-020 already exist on disk).\nAdds `status_name TEXT` and `status_category TEXT` to `issues` table (both nullable).\n\n## Files\n\n- src/gitlab/graphql.rs (NEW — minimal GraphQL client + status fetcher)\n- src/gitlab/mod.rs (add pub mod graphql)\n- src/gitlab/types.rs (WorkItemStatus, WorkItemStatusCategory enum)\n- src/core/db.rs (migration 021 in MIGRATIONS array)\n- src/core/config.rs (fetch_work_item_status toggle in SyncConfig)\n- src/ingestion/orchestrator.rs (enrichment step after issue sync)\n- src/cli/commands/show.rs (display status)\n- src/cli/commands/list.rs (status in list output + --status filter)\n\n## Acceptance Criteria\n\n- [ ] GraphQL client POSTs queries with Bearer auth and handles errors\n- [ ] Status fetched via workItems resolver with pagination\n- [ ] Migration 021 adds status_name and status_category to issues\n- [ ] lore show issue displays status (when available)\n- [ ] lore --robot show issue includes status in JSON\n- [ ] lore list issues --status filter works\n- [ ] Graceful degradation: Free tier, old GitLab, disabled GraphQL all handled\n- [ ] Config toggle: fetch_work_item_status (default true)\n- [ ] cargo check + clippy + tests pass","status":"open","priority":1,"issue_type":"feature","created_at":"2026-02-05T18:32:39.287957Z","created_by":"tayloreernisse","updated_at":"2026-02-10T19:45:28.686499Z","compaction_level":0,"original_size":0,"labels":["api","phase-b"]} +{"id":"bd-2y79","title":"Add work item status via GraphQL enrichment","description":"## Summary\n\nGitLab 18.2+ has native work item status (To do, In progress, Done, Won't do, Duplicate) available ONLY via GraphQL, not REST. This enriches synced issues with status information by making supplementary GraphQL calls after REST ingestion.\n\n**Plan document:** plans/work-item-status-graphql.md\n\n## Critical Findings (from API research)\n\n- **EE-only (Premium/Ultimate)** — Free tier won't have the widget at all\n- **GraphQL auth differs from REST** — must use `Authorization: Bearer `, NOT `PRIVATE-TOKEN`\n- **Must use `workItems` resolver, NOT `project.issues`** — legacy issues path doesn't expose status widgets\n- **5 categories:** TRIAGE, TO_DO, IN_PROGRESS, DONE, CANCELED (not 3 as originally assumed)\n- **Max 100 items per GraphQL page** (standard GitLab limit)\n- **Custom statuses possible on 18.5+** — can't assume only system-defined statuses\n\n## Migration\n\nUses migration **021** (001-020 already exist on disk).\nAdds `status_name TEXT` and `status_category TEXT` to `issues` table (both nullable).\n\n## Files\n\n- src/gitlab/graphql.rs (NEW — minimal GraphQL client + status fetcher)\n- src/gitlab/mod.rs (add pub mod graphql)\n- src/gitlab/types.rs (WorkItemStatus, WorkItemStatusCategory enum)\n- src/core/db.rs (migration 021 in MIGRATIONS array)\n- src/core/config.rs (fetch_work_item_status toggle in SyncConfig)\n- src/ingestion/orchestrator.rs (enrichment step after issue sync)\n- src/cli/commands/show.rs (display status)\n- src/cli/commands/list.rs (status in list output + --status filter)\n\n## Acceptance Criteria\n\n- [ ] GraphQL client POSTs queries with Bearer auth and handles errors\n- [ ] Status fetched via workItems resolver with pagination\n- [ ] Migration 021 adds status_name and status_category to issues\n- [ ] lore show issue displays status (when available)\n- [ ] lore --robot show issue includes status in JSON\n- [ ] lore list issues --status filter works\n- [ ] Graceful degradation: Free tier, old GitLab, disabled GraphQL all handled\n- [ ] Config toggle: fetch_work_item_status (default true)\n- [ ] cargo check + clippy + tests pass","status":"closed","priority":1,"issue_type":"feature","created_at":"2026-02-05T18:32:39.287957Z","created_by":"tayloreernisse","updated_at":"2026-02-17T15:08:29.499020Z","closed_at":"2026-02-17T15:08:29.498969Z","close_reason":"Already implemented: GraphQL status enrichment shipped in v0.8.x — migration 021, graphql.rs, --status filter, --no-status flag all complete","compaction_level":0,"original_size":0,"labels":["api","phase-b"]} {"id":"bd-2ygk","title":"Implement user flow integration tests (9 PRD flows)","description":"## Background\n\nThe PRD Section 6 defines 9 end-to-end user flows that exercise cross-screen navigation, state preservation, and data flow. The existing vertical slice test (bd-1mju) covers one flow (Dashboard -> Issue List -> Issue Detail -> Sync). These integration tests cover the remaining 8 flows plus re-test the vertical slice from a user-journey perspective. Each test simulates a realistic keystroke sequence using FrankenTUI's test harness and verifies that the correct screens are reached with the correct data visible.\n\n## Approach\n\nCreate a test module `tests/tui_user_flows.rs` with 9 test functions, each simulating a keystroke sequence against a FrankenTUI `TestHarness` with a pre-populated test database. Tests use `FakeClock` for deterministic timestamps.\n\n**Test database fixture**: A shared setup function creates an in-memory SQLite DB with ~20 issues, ~10 MRs, ~30 discussions, a few experts, and timeline events. This fixture is reused across all flow tests.\n\n**Flow tests**:\n\n1. **`test_flow_find_expert`** — Dashboard -> `w` -> type \"src/auth/\" -> verify Expert mode results appear -> `↓` select first person -> `Enter` -> verify navigation to Issue List filtered by that person\n2. **`test_flow_timeline_query`** — Dashboard -> `t` -> type \"auth timeout\" -> `Enter` -> verify Timeline shows seed events -> `Enter` on first event -> verify entity detail opens -> `Esc` -> back on Timeline\n3. **`test_flow_quick_search`** — Any screen -> `/` -> type query -> verify results appear -> `Tab` (switch mode) -> verify mode label changes -> `Enter` -> verify entity detail opens\n4. **`test_flow_sync_and_browse`** — Dashboard -> `s` -> `Enter` (start sync) -> wait for completion -> verify Summary shows deltas -> `i` -> verify Issue List filtered to new items\n5. **`test_flow_review_workload`** — Dashboard -> `w` -> `Tab` (Workload mode) -> type \"@bjones\" -> verify workload sections appear (assigned, authored, reviewing)\n6. **`test_flow_command_palette`** — Any screen -> `Ctrl+P` -> type \"mrs draft\" -> verify fuzzy match -> `Enter` -> verify MR List opened with draft filter\n7. **`test_flow_morning_triage`** — Dashboard -> `i` -> verify Issue List (opened, sorted by updated) -> `Enter` on first -> verify Issue Detail -> `Esc` -> verify cursor preserved on same row -> `j` -> verify cursor moved\n8. **`test_flow_direct_screen_jumps`** — Issue Detail -> `gt` -> verify Timeline -> `gw` -> verify Who -> `gi` -> verify Issue List -> `H` -> verify Dashboard (clean reset)\n9. **`test_flow_risk_sweep`** — Dashboard -> scroll to Insights -> `Enter` on first insight -> verify pre-filtered Issue List\n\nEach test follows the pattern:\n```rust\n#[test]\nfn test_flow_X() {\n let (harness, app) = setup_test_harness_with_fixture();\n // Send keystrokes\n harness.send_key(Key::Char('w'));\n // Assert screen state\n assert_eq!(app.current_screen(), Screen::Who);\n // Assert visible content\n let frame = harness.render();\n assert!(frame.contains(\"Expert\"));\n}\n```\n\n## Acceptance Criteria\n- [ ] All 9 flow tests exist and compile\n- [ ] Each test uses the shared DB fixture (no per-test DB setup)\n- [ ] Each test verifies screen transitions via `current_screen()` assertions\n- [ ] Each test verifies at least one content assertion (rendered text contains expected data)\n- [ ] test_flow_morning_triage verifies cursor preservation after Enter/Esc round-trip\n- [ ] test_flow_direct_screen_jumps verifies the g-prefix navigation chain\n- [ ] test_flow_sync_and_browse verifies delta-filtered navigation after sync\n- [ ] All tests use FakeClock for deterministic timestamps\n- [ ] Tests complete in <5 seconds each (no real I/O)\n\n## Files\n- CREATE: crates/lore-tui/tests/tui_user_flows.rs\n- MODIFY: (none — this is a new test file only)\n\n## TDD Anchor\nRED: Write `test_flow_morning_triage` first — it exercises the most common daily workflow (Dashboard -> Issue List -> Issue Detail -> back with cursor preservation). Start with just the Dashboard -> Issue List transition.\nGREEN: Requires all Phase 2 core screens to be working; the test itself is the GREEN verification.\nVERIFY: cargo test -p lore-tui test_flow_morning_triage\n\nAdditional tests: All 9 flows listed above.\n\n## Edge Cases\n- Flow tests must handle async data loading — use harness.tick() or harness.wait_for_idle() to let async tasks complete before asserting\n- g-prefix timeout (500ms) — tests must send the second key within the timeout; use harness clock control\n- Sync flow test needs a mock sync that completes quickly — use a pre-populated SyncDeltaLedger rather than running actual sync\n\n## Dependency Context\n- Depends on bd-1mju (vertical slice integration test) which establishes the test harness patterns and fixture setup.\n- Depends on bd-2nfs (snapshot test infrastructure) which provides the FakeClock and TestHarness setup.\n- Depends on all Phase 2 core screen beads (bd-35g5 Dashboard, bd-3ei1 Issue List, bd-8ab7 Issue Detail, bd-2kr0 MR List, bd-3t1b MR Detail) being implemented.\n- Depends on Phase 3 power feature beads (bd-1zow Search, bd-29qw Timeline, bd-u7se Who, bd-wzqi Command Palette) being implemented.\n- Depends on bd-2x2h (Sync screen) for the sync+browse flow test.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:41.060826Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:29:52.743563Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2ygk","depends_on_id":"bd-1mju","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-1zow","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-29qw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-2kr0","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-2nfs","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-2x2h","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-35g5","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-3ei1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-3t1b","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-8ab7","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-u7se","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2ygk","depends_on_id":"bd-wzqi","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2yo","title":"Fetch MR diffs API and populate mr_file_changes","description":"## Background\n\nThis bead fetches MR diff metadata from the GitLab API and populates the mr_file_changes table created by migration 016. It extracts only file-level metadata (paths, change type) and discards actual diff content.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 4.3 (Ingestion).\n\n## Codebase Context\n\n- pending_dependent_fetches already has `job_type='mr_diffs'` in CHECK constraint (migration 011)\n- dependent_queue.rs has: enqueue_job(), claim_jobs(), complete_job(), fail_job() with exponential backoff\n- Orchestrator pattern: enqueue after entity ingestion, drain after primary ingestion completes\n- GitLab client uses fetch_all_pages() for pagination\n- Existing drain patterns in orchestrator.rs: drain_resource_events() and drain_mr_closes_issues() — follow same pattern\n- config.sync.fetch_mr_file_changes flag guards enqueue (see bd-jec)\n- mr_file_changes table created by migration 016 (bd-1oo) — NOT 015 (015 is commit SHAs)\n- merge_commit_sha and squash_commit_sha already captured during MR ingestion (src/ingestion/merge_requests.rs lines 184, 205-206, 230-231) — no work needed for those fields\n\n## Approach\n\n### 1. API Client — add to `src/gitlab/client.rs`:\n\n```rust\npub async fn fetch_mr_diffs(\n &self,\n project_id: i64,\n mr_iid: i64,\n) -> Result> {\n let path = format\\!(\"/projects/{project_id}/merge_requests/{mr_iid}/diffs\");\n self.fetch_all_pages(&path, &[(\"per_page\", \"100\")]).await\n .or_else(|e| coalesce_not_found(e, Vec::new()))\n}\n```\n\n### 2. Types — add to `src/gitlab/types.rs`:\n\n```rust\n#[derive(Debug, Clone, Deserialize, Serialize)]\npub struct GitLabMrDiff {\n pub old_path: String,\n pub new_path: String,\n pub new_file: bool,\n pub renamed_file: bool,\n pub deleted_file: bool,\n // Ignore: diff, a_mode, b_mode, generated_file (not stored)\n}\n```\n\nAdd `GitLabMrDiff` to `src/gitlab/mod.rs` re-exports.\n\n### 3. Change Type Derivation (in new file):\n\n```rust\nfn derive_change_type(diff: &GitLabMrDiff) -> &'static str {\n if diff.new_file { \"added\" }\n else if diff.renamed_file { \"renamed\" }\n else if diff.deleted_file { \"deleted\" }\n else { \"modified\" }\n}\n```\n\n### 4. DB Storage — new `src/ingestion/mr_diffs.rs`:\n\n```rust\npub fn upsert_mr_file_changes(\n conn: &Connection,\n mr_local_id: i64,\n project_id: i64,\n diffs: &[GitLabMrDiff],\n) -> Result {\n // DELETE FROM mr_file_changes WHERE merge_request_id = ?\n // INSERT each diff row with derived change_type\n // DELETE+INSERT is simpler than UPSERT for array replacement\n}\n```\n\nAdd `pub mod mr_diffs;` to `src/ingestion/mod.rs`.\n\n### 5. Queue Integration — in orchestrator.rs:\n\n```rust\n// After MR upsert, if config.sync.fetch_mr_file_changes:\nenqueue_job(conn, project_id, \"merge_request\", mr_iid, mr_local_id, \"mr_diffs\")?;\n```\n\nAdd `drain_mr_diffs()` following the drain_mr_closes_issues() pattern. Call it after drain_mr_closes_issues() in the sync pipeline.\n\n## Acceptance Criteria\n\n- [ ] `fetch_mr_diffs()` calls GET /projects/:id/merge_requests/:iid/diffs with pagination\n- [ ] GitLabMrDiff type added to src/gitlab/types.rs and re-exported from src/gitlab/mod.rs\n- [ ] Change type derived: new_file->added, renamed_file->renamed, deleted_file->deleted, else->modified\n- [ ] mr_file_changes rows have correct old_path, new_path, change_type\n- [ ] Old rows deleted before insert (clean replacement per MR)\n- [ ] Jobs only enqueued when config.sync.fetch_mr_file_changes is true\n- [ ] 404/403 API errors handled gracefully (empty result, not failure)\n- [ ] drain_mr_diffs() added to orchestrator.rs sync pipeline\n- [ ] `pub mod mr_diffs;` added to src/ingestion/mod.rs\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/gitlab/client.rs` (add fetch_mr_diffs method)\n- `src/gitlab/types.rs` (add GitLabMrDiff struct)\n- `src/gitlab/mod.rs` (re-export GitLabMrDiff)\n- `src/ingestion/mr_diffs.rs` (NEW — upsert_mr_file_changes + derive_change_type)\n- `src/ingestion/mod.rs` (add pub mod mr_diffs)\n- `src/ingestion/orchestrator.rs` (enqueue mr_diffs jobs + drain_mr_diffs)\n\n## TDD Loop\n\nRED:\n- `test_derive_change_type_added` - new_file=true -> \"added\"\n- `test_derive_change_type_renamed` - renamed_file=true -> \"renamed\"\n- `test_derive_change_type_deleted` - deleted_file=true -> \"deleted\"\n- `test_derive_change_type_modified` - all false -> \"modified\"\n- `test_upsert_replaces_existing` - second upsert replaces first\n\nGREEN: Implement API client, type derivation, DB ops, orchestrator wiring.\n\nVERIFY: `cargo test --lib -- mr_diffs`\n\n## Edge Cases\n\n- MR with 500+ files: paginate properly via fetch_all_pages\n- Binary files: handled as modified (renamed_file/new_file/deleted_file all false)\n- File renamed AND modified: renamed_file=true takes precedence\n- Draft MRs: still fetch diffs\n- Deleted MR: 404 -> empty vec via coalesce_not_found()\n- merge_commit_sha/squash_commit_sha: already handled in merge_requests.rs ingestion — NOT part of this bead\n","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:08.939514Z","created_by":"tayloreernisse","updated_at":"2026-02-08T18:27:05.993580Z","closed_at":"2026-02-08T18:27:05.993482Z","close_reason":"Implemented: GitLabMrDiff type, fetch_mr_diffs client method, upsert_mr_file_changes in new mr_diffs.rs module, enqueue_mr_diffs_jobs + drain_mr_diffs in orchestrator, migration 020 for diffs_synced_for_updated_at watermark, progress events, autocorrect registry. All 390 tests pass, clippy clean.","compaction_level":0,"original_size":0,"labels":["api","gate-4","phase-b"],"dependencies":[{"issue_id":"bd-2yo","depends_on_id":"bd-14q","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2yo","depends_on_id":"bd-1oo","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2yo","depends_on_id":"bd-jec","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-2yo","depends_on_id":"bd-tir","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-2yq","title":"[CP1] Issue transformer with label extraction","description":"Transform GitLab issue payloads to normalized database schema.\n\nFunctions to implement:\n- transformIssue(gitlabIssue, localProjectId) → NormalizedIssue\n- extractLabels(gitlabIssue, localProjectId) → Label[]\n\nTransformation rules:\n- Convert ISO timestamps to ms epoch using isoToMs()\n- Set last_seen_at to nowMs()\n- Handle labels vs labels_details (prefer details when available)\n- Handle missing optional fields gracefully\n\nFiles: src/gitlab/transformers/issue.ts\nTests: tests/unit/issue-transformer.test.ts\nDone when: Unit tests pass for payload transformation and label extraction","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:19:09.660448Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.152259Z","closed_at":"2026-01-25T15:21:35.152259Z","deleted_at":"2026-01-25T15:21:35.152254Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-2ys","title":"[CP1] Cargo.toml updates - async-stream and futures","description":"## Background\n\nThe GitLab client pagination methods require async streaming capabilities. The `async-stream` crate provides the `stream!` macro for creating async iterators, and `futures` provides `StreamExt` for consuming them with `.next()` and other combinators.\n\n## Approach\n\nAdd these dependencies to Cargo.toml:\n\n```toml\n[dependencies]\nasync-stream = \"0.3\"\nfutures = { version = \"0.3\", default-features = false, features = [\"alloc\"] }\n```\n\nUse minimal features on `futures` to avoid pulling unnecessary code.\n\n## Acceptance Criteria\n\n- [ ] `async-stream = \"0.3\"` is in Cargo.toml [dependencies]\n- [ ] `futures` with `alloc` feature is in Cargo.toml [dependencies]\n- [ ] `cargo check` succeeds after adding dependencies\n\n## Files\n\n- Cargo.toml (edit)\n\n## TDD Loop\n\nRED: Not applicable (dependency addition)\nGREEN: Add lines to Cargo.toml\nVERIFY: `cargo check`\n\n## Edge Cases\n\n- If `futures` is already present, merge features rather than duplicate\n- Use exact version pins for reproducibility","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.104664Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:25:10.274787Z","closed_at":"2026-01-25T22:25:10.274727Z","close_reason":"Added async-stream 0.3 and futures 0.3 (alloc feature) to Cargo.toml, cargo check passes","compaction_level":0,"original_size":0} -{"id":"bd-2yu5","title":"Add timestamp-aware test helpers","description":"## Background\nExisting test helpers (who.rs:2469-2598) use now_ms() for all timestamps. Time-decay tests need precise timestamp control to verify decay math, state-aware timestamps, and as-of filtering.\n\n## Approach\nAdd three new helpers in the test module (who.rs after insert_file_change at line 2598), patterned after existing helpers:\n\n### insert_mr_at() — variant of insert_mr (who.rs:2469-2493):\n```rust\n#[allow(clippy::too_many_arguments)]\nfn insert_mr_at(\n conn: &Connection,\n id: i64,\n project_id: i64,\n iid: i64,\n author: &str,\n state: &str,\n updated_at_ms: i64,\n merged_at_ms: Option,\n closed_at_ms: Option,\n) {\n conn.execute(\n \"INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, author_username, state, last_seen_at, updated_at, merged_at, closed_at)\n VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)\",\n rusqlite::params![id, id * 10, project_id, iid, format!(\"MR {iid}\"), author, state, now_ms(), updated_at_ms, merged_at_ms, closed_at_ms],\n ).unwrap();\n}\n```\n\n### insert_diffnote_at() — variant of insert_diffnote (who.rs:2541-2580):\n```rust\n#[allow(clippy::too_many_arguments)]\nfn insert_diffnote_at(\n conn: &Connection,\n id: i64,\n discussion_id: i64,\n project_id: i64,\n author: &str,\n new_path: &str,\n old_path: Option<&str>,\n body: &str,\n created_at_ms: i64,\n) {\n conn.execute(\n \"INSERT INTO notes (id, gitlab_id, project_id, discussion_id, author_username, note_type, is_system, position_new_path, position_old_path, body, created_at, updated_at)\n VALUES (?1, ?2, ?3, ?4, ?5, 'DiffNote', 0, ?6, ?7, ?8, ?9, ?9)\",\n rusqlite::params![id, id * 10, project_id, discussion_id, author, new_path, old_path, body, created_at_ms],\n ).unwrap();\n}\n```\n\n### insert_file_change_with_old_path() — variant of insert_file_change (who.rs:2585-2598):\n```rust\nfn insert_file_change_with_old_path(\n conn: &Connection,\n mr_id: i64,\n project_id: i64,\n new_path: &str,\n old_path: Option<&str>,\n change_type: &str,\n) {\n conn.execute(\n \"INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, old_path, change_type)\n VALUES (?1, ?2, ?3, ?4, ?5)\",\n rusqlite::params![mr_id, project_id, new_path, old_path, change_type],\n ).unwrap();\n}\n```\n\nNote: mr_file_changes already has an old_path column (from migration 016). The existing insert_file_change helper simply omits it (defaults to NULL).\n\n## Acceptance Criteria\n- [ ] insert_mr_at compiles and inserts with explicit updated_at, merged_at, closed_at\n- [ ] insert_diffnote_at compiles and inserts with explicit old_path and created_at\n- [ ] insert_file_change_with_old_path compiles and inserts with explicit old_path\n- [ ] Existing helpers (insert_mr, insert_diffnote, insert_file_change) unchanged\n- [ ] cargo check --all-targets passes\n- [ ] All three helpers are used by downstream beads (bd-13q8, bd-1h3f)\n\n## Files\n- MODIFY: src/cli/commands/who.rs (test module, after insert_file_change at line ~2598)\n\n## TDD Loop\nN/A — these are test utilities. Verified indirectly by tests in bd-13q8 and bd-1h3f.\nVERIFY: cargo check --all-targets (compiles cleanly)\n\n## Edge Cases\n- merged_at_ms/closed_at_ms as Option — NULL when not applicable\n- old_path as Option<&str> — NULL when no rename occurred\n- created_at_ms used for both created_at and updated_at on notes (matching existing insert_diffnote pattern)\n- #[allow(clippy::too_many_arguments)] needed on helpers with 8+ params (project uses pedantic clippy)\n\nDependencies:\n -> bd-2w1p (blocks) - Add half-life fields and config validation to ScoringConfig","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-09T17:00:19.594086Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:44:38.676787Z","compaction_level":0,"original_size":0,"labels":["scoring","test"],"dependencies":[{"issue_id":"bd-2yu5","depends_on_id":"bd-2w1p","type":"blocks","created_at":"2026-02-12T20:08:09Z","created_by":"import"}]} +{"id":"bd-2yu5","title":"Add timestamp-aware test helpers","description":"## Background\nExisting test helpers (who.rs:2469-2598) use now_ms() for all timestamps. Time-decay tests need precise timestamp control to verify decay math, state-aware timestamps, and as-of filtering.\n\n## Approach\nAdd three new helpers in the test module (who.rs after insert_file_change at line 2598), patterned after existing helpers:\n\n### insert_mr_at() — variant of insert_mr (who.rs:2469-2493):\n```rust\n#[allow(clippy::too_many_arguments)]\nfn insert_mr_at(\n conn: &Connection,\n id: i64,\n project_id: i64,\n iid: i64,\n author: &str,\n state: &str,\n updated_at_ms: i64,\n merged_at_ms: Option,\n closed_at_ms: Option,\n) {\n conn.execute(\n \"INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, author_username, state, last_seen_at, updated_at, merged_at, closed_at)\n VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?9, ?10, ?11)\",\n rusqlite::params![id, id * 10, project_id, iid, format!(\"MR {iid}\"), author, state, now_ms(), updated_at_ms, merged_at_ms, closed_at_ms],\n ).unwrap();\n}\n```\n\n### insert_diffnote_at() — variant of insert_diffnote (who.rs:2541-2580):\n```rust\n#[allow(clippy::too_many_arguments)]\nfn insert_diffnote_at(\n conn: &Connection,\n id: i64,\n discussion_id: i64,\n project_id: i64,\n author: &str,\n new_path: &str,\n old_path: Option<&str>,\n body: &str,\n created_at_ms: i64,\n) {\n conn.execute(\n \"INSERT INTO notes (id, gitlab_id, project_id, discussion_id, author_username, note_type, is_system, position_new_path, position_old_path, body, created_at, updated_at)\n VALUES (?1, ?2, ?3, ?4, ?5, 'DiffNote', 0, ?6, ?7, ?8, ?9, ?9)\",\n rusqlite::params![id, id * 10, project_id, discussion_id, author, new_path, old_path, body, created_at_ms],\n ).unwrap();\n}\n```\n\n### insert_file_change_with_old_path() — variant of insert_file_change (who.rs:2585-2598):\n```rust\nfn insert_file_change_with_old_path(\n conn: &Connection,\n mr_id: i64,\n project_id: i64,\n new_path: &str,\n old_path: Option<&str>,\n change_type: &str,\n) {\n conn.execute(\n \"INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, old_path, change_type)\n VALUES (?1, ?2, ?3, ?4, ?5)\",\n rusqlite::params![mr_id, project_id, new_path, old_path, change_type],\n ).unwrap();\n}\n```\n\nNote: mr_file_changes already has an old_path column (from migration 016). The existing insert_file_change helper simply omits it (defaults to NULL).\n\n## Acceptance Criteria\n- [ ] insert_mr_at compiles and inserts with explicit updated_at, merged_at, closed_at\n- [ ] insert_diffnote_at compiles and inserts with explicit old_path and created_at\n- [ ] insert_file_change_with_old_path compiles and inserts with explicit old_path\n- [ ] Existing helpers (insert_mr, insert_diffnote, insert_file_change) unchanged\n- [ ] cargo check --all-targets passes\n- [ ] All three helpers are used by downstream beads (bd-13q8, bd-1h3f)\n\n## Files\n- MODIFY: src/cli/commands/who.rs (test module, after insert_file_change at line ~2598)\n\n## TDD Loop\nN/A — these are test utilities. Verified indirectly by tests in bd-13q8 and bd-1h3f.\nVERIFY: cargo check --all-targets (compiles cleanly)\n\n## Edge Cases\n- merged_at_ms/closed_at_ms as Option — NULL when not applicable\n- old_path as Option<&str> — NULL when no rename occurred\n- created_at_ms used for both created_at and updated_at on notes (matching existing insert_diffnote pattern)\n- #[allow(clippy::too_many_arguments)] needed on helpers with 8+ params (project uses pedantic clippy)\n\nDependencies:\n -> bd-2w1p (blocks) - Add half-life fields and config validation to ScoringConfig","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-09T17:00:19.594086Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:43:04.408355Z","closed_at":"2026-02-12T20:43:04.408316Z","close_reason":"Implemented by time-decay swarm: 3 agents, 12 tasks, 621 tests passing, all quality gates green","compaction_level":0,"original_size":0,"labels":["scoring","test"],"dependencies":[{"issue_id":"bd-2yu5","depends_on_id":"bd-2w1p","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-2zl","title":"Epic: Gate 1 - Resource Events Ingestion","description":"## Background\nGate 1 transforms gitlore from a snapshot engine into a temporal data store by ingesting structured event data from GitLab Resource Events APIs (state, label, milestone changes). This is the foundation — Gates 2-5 all depend on the event tables and dependent fetch queue that Gate 1 establishes.\n\nCurrently, when an issue is closed or a label changes, gitlore overwrites the current state. The transition is lost. Gate 1 captures these transitions as discrete events with timestamps, actors, and provenance, enabling temporal queries like \"when did this issue become critical?\" and \"who closed this MR?\"\n\n## Architecture\n- **Three new tables:** resource_state_events, resource_label_events, resource_milestone_events (migration 011, already shipped as bd-hu3)\n- **Generic dependent fetch queue:** pending_dependent_fetches table replaces per-type queue tables. Supports job_types: resource_events, mr_closes_issues, mr_diffs. Used by Gates 1, 2, and 4.\n- **Opt-in via config:** sync.fetchResourceEvents (default true). --no-events CLI flag to skip.\n- **Incremental:** Only changed entities enqueued. --full re-enqueues all.\n- **Crash recovery:** locked_at column with 5-minute stale lock reclaim.\n\n## Children (Execution Order)\n1. **bd-hu3** [CLOSED] — Migration 011: event tables + entity_references + dependent fetch queue\n2. **bd-2e8** [CLOSED] — fetchResourceEvents config flag\n3. **bd-2fm** [CLOSED] — GitLab Resource Event serde types\n4. **bd-sqw** [CLOSED] — Resource Events API endpoints in GitLab client\n5. **bd-1uc** [CLOSED] — DB upsert functions for resource events\n6. **bd-tir** [CLOSED] — Generic dependent fetch queue (enqueue + drain)\n7. **bd-1ep** [CLOSED] — Wire resource event fetching into sync pipeline\n8. **bd-3sh** [CLOSED] — lore count events command\n9. **bd-1m8** [CLOSED] — lore stats --check for event integrity + queue health\n\n## Gate Completion Criteria\n- [ ] All 9 children closed\n- [ ] `lore sync` fetches resource events for changed entities\n- [ ] `lore sync --no-events` skips event fetching\n- [ ] Event fetch failures queued for retry with exponential backoff\n- [ ] Stale locks auto-reclaimed on next sync run\n- [ ] `lore count events` shows counts by type (state/label/milestone)\n- [ ] `lore stats --check` validates referential integrity + queue health\n- [ ] Robot mode JSON for all new commands\n- [ ] Integration test: full sync cycle with events enabled\n\n## Dependencies\n- None (Gate 1 is the foundation)\n- Downstream: Gate 2 (bd-1se) depends on event tables and dependent fetch queue","status":"closed","priority":1,"issue_type":"feature","created_at":"2026-02-02T21:30:49.136036Z","created_by":"tayloreernisse","updated_at":"2026-02-05T16:06:52.080788Z","closed_at":"2026-02-05T16:06:52.080725Z","close_reason":"Already implemented: migration 011 exists, events_db.rs has upsert functions, client.rs has fetch_*_state_events, orchestrator.rs has drain_resource_events. Full Gate 1 functionality is live.","compaction_level":0,"original_size":0,"labels":["epic","gate-1","phase-b"]} {"id":"bd-2zr","title":"[CP1] GitLab client pagination methods","description":"Add async stream methods for paginated GitLab API calls.\n\n## Methods to Add to GitLabClient\n\n### paginate_issues(gitlab_project_id, updated_after, cursor_rewind_seconds) -> Stream\n- Use async_stream::try_stream! macro\n- Query params: scope=all, state=all, order_by=updated_at, sort=asc, per_page=100\n- If updated_after provided, apply cursor_rewind_seconds (subtract from timestamp)\n- Clamp to 0 to avoid underflow: (ts - rewind_ms).max(0)\n- Follow x-next-page header until empty/absent\n- Fall back to empty-page detection if headers missing\n\n### paginate_issue_discussions(gitlab_project_id, issue_iid) -> Stream\n- Paginate through discussions for single issue\n- per_page=100\n- Follow x-next-page header\n\n### request_with_headers(path, params) -> Result<(T, HeaderMap)>\n- Acquire rate limiter\n- Make request with PRIVATE-TOKEN header\n- Return both deserialized data and response headers\n\n## Dependencies\n- async-stream = \"0.3\" (for try_stream! macro)\n- futures = \"0.3\" (for Stream trait and StreamExt)\n\nFiles: src/gitlab/client.rs\nTests: tests/pagination_tests.rs\nDone when: Pagination handles multiple pages and respects cursors, tests pass","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T16:57:13.045971Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.784887Z","closed_at":"2026-01-25T17:02:01.784887Z","deleted_at":"2026-01-25T17:02:01.784883Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-31b","title":"[CP1] Discussion ingestion module","description":"Fetch and store discussions/notes for each issue.\n\nImplement ingestIssueDiscussions(options) → { discussionsFetched, discussionsUpserted, notesUpserted, systemNotesCount }\n\nLogic:\n1. Paginate through all discussions for given issue\n2. For each discussion:\n - Store raw payload (compressed)\n - Upsert discussion record with correct issue FK\n - Transform and upsert all notes\n - Store raw payload per note\n - Track system notes count\n\nFiles: src/ingestion/discussions.ts\nTests: tests/integration/issue-discussion-ingestion.test.ts\nDone when: Discussions and notes populated with correct FKs and is_system flags","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:19:57.131442Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.156574Z","closed_at":"2026-01-25T15:21:35.156574Z","deleted_at":"2026-01-25T15:21:35.156571Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} @@ -179,11 +189,11 @@ {"id":"bd-32mc","title":"OBSERV: Implement log retention cleanup at startup","description":"## Background\nLog files accumulate at ~1-10 MB/day. Without cleanup, they grow unbounded. Retention runs BEFORE subscriber init so deleted file handles aren't held open by the appender.\n\n## Approach\nAdd a cleanup function, called from main.rs before the subscriber is initialized (before current line 44):\n\n```rust\n/// Delete log files older than retention_days.\n/// Matches files named lore.YYYY-MM-DD.log in the log directory.\npub fn cleanup_old_logs(log_dir: &Path, retention_days: u32) -> std::io::Result {\n if retention_days == 0 {\n return Ok(0); // 0 means file logging disabled, don't delete\n }\n let cutoff = SystemTime::now() - Duration::from_secs(u64::from(retention_days) * 86400);\n let mut deleted = 0;\n\n for entry in std::fs::read_dir(log_dir)? {\n let entry = entry?;\n let name = entry.file_name();\n let name_str = name.to_string_lossy();\n\n // Only match lore.YYYY-MM-DD.log pattern\n if !name_str.starts_with(\"lore.\") || !name_str.ends_with(\".log\") {\n continue;\n }\n\n if let Ok(metadata) = entry.metadata() {\n if let Ok(modified) = metadata.modified() {\n if modified < cutoff {\n std::fs::remove_file(entry.path())?;\n deleted += 1;\n }\n }\n }\n }\n Ok(deleted)\n}\n```\n\nPlace this function in src/core/paths.rs (next to get_log_dir) or a new src/core/log_retention.rs. Prefer paths.rs since it's small and related.\n\nCall from main.rs:\n```rust\nlet log_dir = get_log_dir(config.logging.log_dir.as_deref());\nlet _ = cleanup_old_logs(&log_dir, config.logging.retention_days);\n// THEN init subscriber\n```\n\nNote: Config must be loaded before cleanup runs. Current main.rs parses Cli at line 60, but config loading happens inside command handlers. This means we need to either:\n A) Load config early in main() before subscriber init (preferred)\n B) Defer cleanup to after config load\n\nSince the subscriber must also know log_dir, approach A is natural: load config -> cleanup -> init subscriber -> dispatch command.\n\n## Acceptance Criteria\n- [ ] Files matching lore.*.log older than retention_days are deleted\n- [ ] Files matching lore.*.log within retention_days are preserved\n- [ ] Non-matching files (e.g., other.txt) are never deleted\n- [ ] retention_days=0 skips cleanup entirely (no files deleted)\n- [ ] Errors on individual files don't prevent cleanup of remaining files\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/core/paths.rs (add cleanup_old_logs function)\n- src/main.rs (call cleanup before subscriber init)\n\n## TDD Loop\nRED:\n - test_log_retention_cleanup: create tempdir with lore.2026-01-01.log through lore.2026-02-04.log, run with retention_days=7, assert old deleted, recent preserved\n - test_log_retention_ignores_non_log_files: create other.txt alongside old log files, assert other.txt untouched\n - test_log_retention_zero_days: retention_days=0, assert nothing deleted\nGREEN: Implement cleanup_old_logs\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- SystemTime::now() precision varies by OS; use file modified time, not name parsing (simpler and more reliable)\n- read_dir on non-existent directory: get_log_dir creates it first, so this shouldn't happen. But handle gracefully.\n- Permissions error on individual file: log a warning, continue with remaining files (don't propagate)\n- Race condition: another process creates a file during cleanup. Not a concern -- we only delete old files.","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:53:55.627901Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:15:04.452086Z","closed_at":"2026-02-04T17:15:04.452039Z","close_reason":"Implemented cleanup_old_logs() with date-pattern matching and retention_days config, runs at startup before subscriber init","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-32mc","depends_on_id":"bd-17n","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-32mc","depends_on_id":"bd-1k4","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-32mc","depends_on_id":"bd-2nx","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-32q","title":"Implement timeline seed phase: FTS5 keyword search to entity IDs","description":"## Background\n\nThe seed phase is steps 1-2 of the timeline pipeline (spec Section 3.2): SEED + HYDRATE. It converts a keyword query into entity IDs via FTS5 search and collects evidence note candidates.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 3.2 steps 1-2.\n\n## Codebase Context\n\n- FTS5 index exists: documents_fts table (migration 008)\n- documents table: id, source_type ('issue'|'merge_request'|'discussion'), source_id, project_id, created_at, content\n- discussions table: id, issue_id, merge_request_id\n- notes table: discussion_id, author_username, body, created_at, is_system, id (note_id)\n- Safe FTS query builder: src/search/fts.rs has to_fts_query(raw, FtsQueryMode::Safe) for sanitizing user input\n- projects table: path_with_namespace\n- issues/merge_requests: iid, project_id\n\n## Approach\n\nCreate `src/core/timeline_seed.rs`:\n\n```rust\nuse crate::core::timeline::{EntityRef, TimelineEvent, TimelineEventType};\nuse rusqlite::Connection;\n\npub struct SeedResult {\n pub seed_entities: Vec,\n pub evidence_notes: Vec, // NoteEvidence events\n}\n\npub fn seed_timeline(\n conn: &Connection,\n query: &str,\n project_id: Option,\n since_ms: Option,\n max_seeds: usize, // default 50\n) -> Result { ... }\n```\n\n### SQL for SEED + HYDRATE (entity discovery):\n```sql\nSELECT DISTINCT d.source_type, d.source_id, d.project_id,\n CASE d.source_type\n WHEN 'issue' THEN (SELECT iid FROM issues WHERE id = d.source_id)\n WHEN 'merge_request' THEN (SELECT iid FROM merge_requests WHERE id = d.source_id)\n WHEN 'discussion' THEN NULL -- discussions map to parent entity below\n END AS iid,\n CASE d.source_type\n WHEN 'issue' THEN (SELECT p.path_with_namespace FROM projects p JOIN issues i ON i.project_id = p.id WHERE i.id = d.source_id)\n WHEN 'merge_request' THEN (SELECT p.path_with_namespace FROM projects p JOIN merge_requests m ON m.project_id = p.id WHERE m.id = d.source_id)\n WHEN 'discussion' THEN NULL\n END AS project_path\nFROM documents_fts fts\nJOIN documents d ON d.id = fts.rowid\nWHERE documents_fts MATCH ?1\n AND (?2 IS NULL OR d.project_id = ?2)\nORDER BY rank\nLIMIT ?3\n```\n\nFor 'discussion' source_type: resolve to parent entity via discussions.issue_id or discussions.merge_request_id.\n\n### SQL for evidence notes (top 10 FTS5-matched notes):\n```sql\nSELECT n.id as note_id, n.body, n.created_at, n.author_username,\n disc.id as discussion_id,\n CASE WHEN disc.issue_id IS NOT NULL THEN 'issue' ELSE 'merge_request' END as parent_type,\n COALESCE(disc.issue_id, disc.merge_request_id) AS parent_entity_id\nFROM documents_fts fts\nJOIN documents d ON d.id = fts.rowid\nJOIN discussions disc ON disc.id = d.source_id AND d.source_type = 'discussion'\nJOIN notes n ON n.discussion_id = disc.id AND n.is_system = 0\nWHERE documents_fts MATCH ?1\nORDER BY rank\nLIMIT 10\n```\n\nEvidence notes become TimelineEvent with:\n- event_type: NoteEvidence { note_id, snippet (first 200 chars), discussion_id }\n- Use to_fts_query(query, FtsQueryMode::Safe) to sanitize user input before MATCH\n\nRegister in `src/core/mod.rs`: `pub mod timeline_seed;`\n\n## Acceptance Criteria\n\n- [ ] seed_timeline() returns entities from FTS5 search\n- [ ] Entities deduplicated (same entity from multiple docs appears once)\n- [ ] Discussion documents resolved to parent entity (issue or MR)\n- [ ] Evidence notes capped at 10\n- [ ] Evidence note snippets truncated to 200 chars (safe UTF-8 boundary)\n- [ ] Uses to_fts_query(query, FtsQueryMode::Safe) for input sanitization\n- [ ] --since filter works\n- [ ] -p filter works\n- [ ] Empty result for zero-match queries (not error)\n- [ ] Module registered in src/core/mod.rs\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/core/timeline_seed.rs` (NEW)\n- `src/core/mod.rs` (add `pub mod timeline_seed;`)\n\n## TDD Loop\n\nRED:\n- `test_seed_deduplicates_entities`\n- `test_seed_resolves_discussion_to_parent`\n- `test_seed_empty_query_returns_empty`\n- `test_seed_evidence_capped_at_10`\n- `test_seed_evidence_snippet_truncated`\n- `test_seed_respects_since_filter`\n\nTests need in-memory DB with migrations 001-014 + documents/FTS test data.\n\nGREEN: Implement FTS5 queries and deduplication.\n\nVERIFY: `cargo test --lib -- timeline_seed`\n\n## Edge Cases\n\n- FTS5 MATCH invalid syntax: to_fts_query(query, FtsQueryMode::Safe) sanitizes\n- Discussion orphans: LEFT JOIN handles deleted notes\n- UTF-8 truncation: use char_indices() to find safe 200-char boundary\n- Discussion source resolving to both issue_id and merge_request_id: prefer issue_id (shouldn't happen but be defensive)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:33:08.615908Z","created_by":"tayloreernisse","updated_at":"2026-02-05T21:47:07.966488Z","closed_at":"2026-02-05T21:47:07.966437Z","close_reason":"Completed: Created src/core/timeline_seed.rs with seed_timeline() function. FTS5 search to entity IDs with discussion-to-parent resolution, entity deduplication, evidence note extraction (capped, snippet-truncated). 12 tests pass. All quality gates pass.","compaction_level":0,"original_size":0,"labels":["gate-3","phase-b","query"],"dependencies":[{"issue_id":"bd-32q","depends_on_id":"bd-20e","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-32q","depends_on_id":"bd-ike","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-335","title":"Implement Ollama API client","description":"## Background\nThe Ollama API client provides the HTTP interface to the local Ollama embedding server. It handles health checks (is Ollama running? does the model exist?), batch embedding requests (up to 32 texts per call), and error translation to LoreError variants. This is the lowest-level embedding component — the pipeline (bd-am7) builds on top of it.\n\n## Approach\nCreate \\`src/embedding/ollama.rs\\` per PRD Section 4.2. **Uses async reqwest (not blocking).**\n\n```rust\nuse reqwest::Client; // NOTE: async Client, not reqwest::blocking\nuse serde::{Deserialize, Serialize};\nuse crate::core::error::{LoreError, Result};\n\npub struct OllamaConfig {\n pub base_url: String, // default \\\"http://localhost:11434\\\"\n pub model: String, // default \\\"nomic-embed-text\\\"\n pub timeout_secs: u64, // default 60\n}\n\nimpl Default for OllamaConfig { /* PRD defaults */ }\n\npub struct OllamaClient {\n client: Client, // async reqwest::Client\n config: OllamaConfig,\n}\n\n#[derive(Serialize)]\nstruct EmbedRequest { model: String, input: Vec }\n\n#[derive(Deserialize)]\nstruct EmbedResponse { model: String, embeddings: Vec> }\n\n#[derive(Deserialize)]\nstruct TagsResponse { models: Vec }\n\n#[derive(Deserialize)]\nstruct ModelInfo { name: String }\n\nimpl OllamaClient {\n pub fn new(config: OllamaConfig) -> Self;\n\n /// Async health check: GET /api/tags\n /// Model matched via starts_with (\\\"nomic-embed-text\\\" matches \\\"nomic-embed-text:latest\\\")\n pub async fn health_check(&self) -> Result<()>;\n\n /// Async batch embedding: POST /api/embed\n /// Input: Vec of texts, Response: Vec> of 768-dim embeddings\n pub async fn embed_batch(&self, texts: Vec) -> Result>>;\n}\n\n/// Quick health check without full client (async).\npub async fn check_ollama_health(base_url: &str) -> bool;\n```\n\n**Error mapping (per PRD):**\n- Connection refused/timeout -> LoreError::OllamaUnavailable { base_url, source: Some(e) }\n- Model not in /api/tags -> LoreError::OllamaModelNotFound { model }\n- Non-200 from /api/embed -> LoreError::EmbeddingFailed { document_id: 0, reason: format!(\\\"HTTP {}: {}\\\", status, body) }\n\n**Key PRD detail:** Model matching uses \\`starts_with\\` (not exact match) so \\\"nomic-embed-text\\\" matches \\\"nomic-embed-text:latest\\\".\n\n## Acceptance Criteria\n- [ ] Uses async reqwest::Client (not blocking)\n- [ ] health_check() is async, detects server availability and model presence\n- [ ] Model matched via starts_with (handles \\\":latest\\\" suffix)\n- [ ] embed_batch() is async, sends POST /api/embed\n- [ ] Batch size up to 32 texts\n- [ ] Returns Vec> with 768 dimensions each\n- [ ] OllamaUnavailable error includes base_url and source error\n- [ ] OllamaModelNotFound error includes model name\n- [ ] Non-200 response mapped to EmbeddingFailed with status + body\n- [ ] Timeout: 60 seconds default (configurable via OllamaConfig)\n- [ ] \\`cargo build\\` succeeds\n\n## Files\n- \\`src/embedding/ollama.rs\\` — new file\n- \\`src/embedding/mod.rs\\` — add \\`pub mod ollama;\\` and re-exports\n\n## TDD Loop\nRED: Tests (unit tests with mock, integration needs Ollama):\n- \\`test_config_defaults\\` — verify default base_url, model, timeout\n- \\`test_health_check_model_starts_with\\` — \\\"nomic-embed-text\\\" matches \\\"nomic-embed-text:latest\\\"\n- \\`test_embed_batch_parse\\` — mock response parsed correctly\n- \\`test_connection_error_maps_to_ollama_unavailable\\`\nGREEN: Implement OllamaClient\nVERIFY: \\`cargo test ollama\\`\n\n## Edge Cases\n- Ollama returns model name with version tag (\\\"nomic-embed-text:latest\\\"): starts_with handles this\n- Empty texts array: send empty batch, Ollama returns empty embeddings\n- Ollama returns wrong number of embeddings (2 texts, 1 embedding): caller (pipeline) validates\n- Non-JSON response: reqwest deserialization error -> wrap appropriately","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:34.025099Z","created_by":"tayloreernisse","updated_at":"2026-01-30T16:58:17.546852Z","closed_at":"2026-01-30T16:58:17.546794Z","close_reason":"Completed: OllamaClient with async health_check (starts_with model matching), embed_batch, error mapping to LoreError variants, check_ollama_health helper, 4 tests pass","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-335","depends_on_id":"bd-ljf","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-343o","title":"Fetch and store GitLab linked issues (Related to)","description":"## Background\n\nGitLab's \"Linked items\" provides bidirectional issue linking distinct from \"closes\" and \"mentioned\" references. This data is only available via the issue links API (GET /projects/:id/issues/:iid/links).\n\n**IMPORTANT:** This bead uses migration **017** (after bd-2y79's migration 016). Coordinate numbering.\n\n## Codebase Context\n\n- entity_references table (migration 011) with:\n - reference_type CHECK: 'closes' | 'mentioned' | 'related'\n - source_method CHECK: 'api' | 'note_parse' | 'description_parse'\n- pending_dependent_fetches: job_type CHECK 'resource_events' | 'mr_closes_issues' | 'mr_diffs'\n- **CRITICAL:** Adding 'issue_links' to job_type CHECK requires recreating pending_dependent_fetches table (SQLite can't ALTER CHECK constraints). Migration 017 must copy data, drop, recreate with expanded CHECK, and reinsert.\n- Orchestrator pattern: enqueue_job() + drain loop with claim/complete/fail (src/ingestion/orchestrator.rs)\n- dependent_queue.rs: enqueue_job(), claim_jobs(), complete_job(), fail_job()\n- GitLab issue links API returns link_type: \"relates_to\", \"blocks\", \"is_blocked_by\"\n- entity_references reference_type only has 'closes', 'mentioned', 'related' — \"blocks\"/\"is_blocked_by\" not modeled. Store all as 'related' with link_type in a JSON payload_json field or as a separate column in a future migration.\n\n## Approach\n\n### Phase 1: API Client (src/gitlab/client.rs)\n```rust\npub async fn fetch_issue_links(\n &self,\n project_id: i64,\n issue_iid: i64,\n) -> Result> {\n // GET /projects/:id/issues/:iid/links\n // Use fetch_all_pages() + coalesce_not_found()\n}\n```\n\n### Phase 2: Types (src/gitlab/types.rs)\n```rust\n#[derive(Debug, Deserialize)]\npub struct GitLabIssueLink {\n pub id: i64,\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub web_url: String,\n pub link_type: String, // \"relates_to\", \"blocks\", \"is_blocked_by\"\n pub link_created_at: Option,\n}\n```\n\n### Phase 3: Migration 017 (migrations/017_issue_links_job_type.sql)\nRecreate pending_dependent_fetches with expanded CHECK:\n```sql\nCREATE TABLE pending_dependent_fetches_new (\n id INTEGER PRIMARY KEY,\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n entity_type TEXT NOT NULL CHECK (entity_type IN ('issue', 'merge_request')),\n entity_iid INTEGER NOT NULL,\n entity_local_id INTEGER NOT NULL,\n job_type TEXT NOT NULL CHECK (job_type IN (\n 'resource_events', 'mr_closes_issues', 'mr_diffs', 'issue_links'\n )),\n payload_json TEXT,\n enqueued_at INTEGER NOT NULL,\n attempts INTEGER NOT NULL DEFAULT 0,\n last_error TEXT,\n next_retry_at INTEGER,\n locked_at INTEGER,\n UNIQUE(project_id, entity_type, entity_iid, job_type)\n);\nINSERT INTO pending_dependent_fetches_new SELECT * FROM pending_dependent_fetches;\nDROP TABLE pending_dependent_fetches;\nALTER TABLE pending_dependent_fetches_new RENAME TO pending_dependent_fetches;\n-- Recreate indexes from migration 011\n```\n\n### Phase 4: Ingestion (src/ingestion/issue_links.rs NEW)\n```rust\npub async fn fetch_and_store_issue_links(\n conn: &Connection,\n client: &GitLabClient,\n project_id: i64,\n issue_local_id: i64,\n issue_iid: i64,\n) -> Result {\n // 1. Fetch links from API\n // 2. Resolve target issue to local DB id (or store as unresolved)\n // 3. Insert into entity_references: reference_type='related', source_method='api'\n // 4. Create bidirectional refs: A->B and B->A\n // 5. Skip self-links\n}\n```\n\n### Phase 5: Queue Integration\n- Enqueue 'issue_links' job after issue ingestion in orchestrator\n- Add drain_issue_links() following drain_mr_closes_issues() pattern\n\n### Phase 6: Display\nIn `lore show issue 123`, add \"Related Issues\" section after closing MRs.\n\n## Acceptance Criteria\n\n- [ ] API client fetches issue links with pagination\n- [ ] Stored as entity_reference: reference_type='related', source_method='api'\n- [ ] Bidirectional: A links B creates both A->B and B->A references\n- [ ] link_type captured (relates_to, blocks, is_blocked_by) — stored as 'related' for now\n- [ ] Cross-project links stored as unresolved (target_entity_id NULL)\n- [ ] Self-links skipped\n- [ ] Migration 017 recreates pending_dependent_fetches with 'issue_links' in CHECK\n- [ ] `lore show issue 123` shows related issues section\n- [ ] `lore --robot show issue 123` includes related_issues in JSON\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- src/gitlab/client.rs (add fetch_issue_links)\n- src/gitlab/types.rs (add GitLabIssueLink)\n- src/ingestion/issue_links.rs (NEW)\n- src/ingestion/mod.rs (add pub mod issue_links)\n- src/ingestion/orchestrator.rs (enqueue + drain)\n- migrations/017_issue_links_job_type.sql (NEW — table recreation)\n- src/core/db.rs (add migration to MIGRATIONS array)\n- src/cli/commands/show.rs (display related issues)\n\n## TDD Loop\n\nRED:\n- test_issue_link_deserialization\n- test_store_issue_links_creates_bidirectional_references\n- test_self_link_skipped\n- test_cross_project_link_unresolved\n\nGREEN: Implement API client, ingestion, migration, display.\n\nVERIFY: cargo test --lib -- issue_links\n\n## Edge Cases\n\n- Cross-project links: target not in local DB -> unresolved reference\n- Self-links: skip\n- UNIQUE constraint prevents duplicate entity_references\n- \"blocks\"/\"is_blocked_by\" semantics not modeled in entity_references yet — store as 'related'\n- Table recreation migration: safe because pending_dependent_fetches is transient queue data\n- Migration numbering: 017 follows bd-2y79's migration 016","status":"open","priority":2,"issue_type":"feature","created_at":"2026-02-05T15:14:25.202900Z","created_by":"tayloreernisse","updated_at":"2026-02-05T20:16:28.629763Z","compaction_level":0,"original_size":0,"labels":["ISSUE"]} +{"id":"bd-343o","title":"Fetch and store GitLab linked issues (Related to)","description":"## Background\n\nGitLab's \"Linked items\" provides bidirectional issue linking distinct from \"closes\" and \"mentioned\" references. This data is only available via the issue links API (GET /projects/:id/issues/:iid/links). The goal is to fetch these links during sync and store them as entity_references so they appear in `lore show issue` and are queryable.\n\n**Why:** Currently `lore show issue` displays closing MRs (via `get_closing_mrs()` in show.rs:~line 1544) but has NO related issues section. This bead adds that capability.\n\n## Codebase Context\n\n- **entity_references table** (migration 011): reference_type CHECK: 'closes' | 'mentioned' | 'related'; source_method CHECK: 'api' | 'note_parse' | 'description_parse'\n- **pending_dependent_fetches** (migration 011): job_type CHECK: 'resource_events' | 'mr_closes_issues' | 'mr_diffs'. No later migrations modified this table.\n- **CRITICAL:** Adding 'issue_links' to job_type CHECK requires recreating pending_dependent_fetches table (SQLite can't ALTER CHECK constraints). Migration **027** must copy data, drop, recreate with expanded CHECK, and reinsert.\n- **Orchestrator** (src/ingestion/orchestrator.rs, 1745 lines): Three drain functions exist — drain_resource_events() (line 932), drain_mr_closes_issues() (line 1254), drain_mr_diffs() (line 1514). Follow the same claim/complete/fail pattern from dependent_queue.rs.\n- **dependent_queue.rs**: enqueue_job(), claim_jobs(), complete_job(), fail_job() with exponential backoff\n- **show.rs** (1544 lines): Has get_closing_mrs() for closing MR display. NO related_issues section exists yet.\n- **GitLab API**: GET /projects/:id/issues/:iid/links returns link_type: \"relates_to\", \"blocks\", \"is_blocked_by\"\n- **Migration count**: 26 migrations exist (001-026). Next migration = **027**.\n\n## Approach\n\n### Phase 1: API Client (src/gitlab/client.rs)\n```rust\npub async fn fetch_issue_links(\n &self,\n project_id: i64,\n issue_iid: i64,\n) -> Result> {\n // GET /projects/:id/issues/:iid/links\n // Use fetch_all_pages() + coalesce_not_found()\n}\n```\n\n### Phase 2: Types (src/gitlab/types.rs)\n```rust\n#[derive(Debug, Deserialize)]\npub struct GitLabIssueLink {\n pub id: i64,\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub web_url: String,\n pub link_type: String, // \"relates_to\", \"blocks\", \"is_blocked_by\"\n pub link_created_at: Option,\n}\n```\n\n### Phase 3: Migration 027 (migrations/027_issue_links_job_type.sql)\nRecreate pending_dependent_fetches with expanded CHECK:\n```sql\nCREATE TABLE pending_dependent_fetches_new (\n id INTEGER PRIMARY KEY,\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n entity_type TEXT NOT NULL CHECK (entity_type IN ('issue', 'merge_request')),\n entity_iid INTEGER NOT NULL,\n entity_local_id INTEGER NOT NULL,\n job_type TEXT NOT NULL CHECK (job_type IN (\n 'resource_events', 'mr_closes_issues', 'mr_diffs', 'issue_links'\n )),\n payload_json TEXT,\n enqueued_at INTEGER NOT NULL,\n attempts INTEGER NOT NULL DEFAULT 0,\n last_error TEXT,\n next_retry_at INTEGER,\n locked_at INTEGER,\n UNIQUE(project_id, entity_type, entity_iid, job_type)\n);\nINSERT INTO pending_dependent_fetches_new SELECT * FROM pending_dependent_fetches;\nDROP TABLE pending_dependent_fetches;\nALTER TABLE pending_dependent_fetches_new RENAME TO pending_dependent_fetches;\n-- Recreate indexes from migration 011 (idx_pdf_job_type, idx_pdf_next_retry)\n```\n\nRegister in MIGRATIONS array in src/core/db.rs (entry 27).\n\n### Phase 4: Ingestion (src/ingestion/issue_links.rs NEW)\n```rust\npub async fn fetch_and_store_issue_links(\n conn: &Connection,\n client: &GitLabClient,\n project_id: i64,\n issue_local_id: i64,\n issue_iid: i64,\n) -> Result {\n // 1. Fetch links from API\n // 2. Resolve target issue to local DB id (SELECT id FROM issues WHERE project_id=? AND iid=?)\n // 3. Insert into entity_references: reference_type='related', source_method='api'\n // 4. Create bidirectional refs: A->B and B->A\n // 5. Skip self-links\n // 6. Cross-project: store with target_entity_id=NULL (unresolved)\n}\n```\n\n### Phase 5: Queue Integration (src/ingestion/orchestrator.rs)\n- Enqueue 'issue_links' job after issue ingestion (near the existing resource_events enqueue)\n- Add drain_issue_links() following drain_mr_closes_issues() pattern (lines 1254-1512)\n- Config gate: add `sync.fetchIssueLinks` (default true) to config, like existing `sync.fetchResourceEvents`\n\n### Phase 6: Display (src/cli/commands/show.rs)\nIn `lore show issue 123`, add \"Related Issues\" section after closing MRs.\nPattern: query entity_references WHERE source_entity_type='issue' AND source_entity_id= AND reference_type='related'.\n\n## Acceptance Criteria\n\n- [ ] API client fetches issue links with pagination (fetch_all_pages + coalesce_not_found)\n- [ ] Stored as entity_reference: reference_type='related', source_method='api'\n- [ ] Bidirectional: A links B creates both A->B and B->A references\n- [ ] link_type captured (relates_to, blocks, is_blocked_by) — stored as 'related' for now\n- [ ] Cross-project links stored as unresolved (target_entity_id NULL)\n- [ ] Self-links skipped\n- [ ] Migration **027** recreates pending_dependent_fetches with 'issue_links' in CHECK\n- [ ] Migration registered in MIGRATIONS array in src/core/db.rs\n- [ ] `lore show issue 123` shows related issues section\n- [ ] `lore --robot show issue 123` includes related_issues in JSON\n- [ ] Config gate: sync.fetchIssueLinks (default true, camelCase serde rename)\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n- [ ] `cargo fmt --check` passes\n\n## Files\n\n- MODIFY: src/gitlab/client.rs (add fetch_issue_links)\n- MODIFY: src/gitlab/types.rs (add GitLabIssueLink)\n- CREATE: src/ingestion/issue_links.rs\n- MODIFY: src/ingestion/mod.rs (add pub mod issue_links)\n- MODIFY: src/ingestion/orchestrator.rs (enqueue + drain_issue_links)\n- CREATE: migrations/027_issue_links_job_type.sql\n- MODIFY: src/core/db.rs (add migration 027 to MIGRATIONS array)\n- MODIFY: src/core/config.rs (add sync.fetchIssueLinks)\n- MODIFY: src/cli/commands/show.rs (display related issues)\n\n## TDD Anchor\n\nRED:\n- test_issue_link_deserialization (types.rs: deserialize GitLabIssueLink from JSON)\n- test_store_issue_links_creates_bidirectional_references (in-memory DB, insert 2 issues, store link, verify 2 rows in entity_references)\n- test_self_link_skipped (same issue_iid both sides, verify 0 rows)\n- test_cross_project_link_unresolved (target not in DB, verify target_entity_id IS NULL)\n\nGREEN: Implement API client, ingestion, migration, display.\n\nVERIFY: cargo test --lib -- issue_links\n\n## Edge Cases\n\n- Cross-project links: target not in local DB -> unresolved reference (target_entity_id NULL)\n- Self-links: skip entirely\n- UNIQUE constraint on entity_references prevents duplicate refs on re-sync\n- \"blocks\"/\"is_blocked_by\" semantics not modeled in entity_references yet — store as 'related'\n- Table recreation migration: safe because pending_dependent_fetches is transient queue data that gets re-enqueued on next sync\n- Recreated table must restore indexes: idx_pdf_job_type, idx_pdf_next_retry (check migration 011 for exact definitions)\n\n## Dependency Context\n\n- **entity_references** (migration 011): provides the target table. reference_type='related' already in CHECK.\n- **dependent_queue.rs**: provides enqueue_job/claim_jobs/complete_job/fail_job lifecycle used by drain_issue_links()\n- **orchestrator drain pattern**: drain_mr_closes_issues() (line 1254) is the closest template — fetch API data, insert entity_references, complete job","status":"open","priority":2,"issue_type":"feature","created_at":"2026-02-05T15:14:25.202900Z","created_by":"tayloreernisse","updated_at":"2026-02-17T16:50:44.934373Z","compaction_level":0,"original_size":0,"labels":["ISSUE"]} {"id":"bd-34ek","title":"OBSERV: Implement MetricsLayer custom tracing subscriber layer","description":"## Background\nMetricsLayer is a custom tracing subscriber layer that records span timing and structured fields, then materializes them into Vec. This avoids threading a mutable collector through every function signature -- spans are the single source of truth.\n\n## Approach\nAdd to src/core/metrics.rs (same file as StageTiming):\n\n```rust\nuse std::collections::HashMap;\nuse std::sync::{Arc, Mutex};\nuse std::time::Instant;\nuse tracing::span::{Attributes, Id, Record};\nuse tracing::Subscriber;\nuse tracing_subscriber::layer::{Context, Layer};\nuse tracing_subscriber::registry::LookupSpan;\n\n#[derive(Debug)]\nstruct SpanData {\n name: String,\n parent_id: Option,\n start: Instant,\n fields: HashMap,\n}\n\n#[derive(Debug, Clone)]\npub struct MetricsLayer {\n spans: Arc>>,\n completed: Arc>>,\n}\n\nimpl MetricsLayer {\n pub fn new() -> Self {\n Self {\n spans: Arc::new(Mutex::new(HashMap::new())),\n completed: Arc::new(Mutex::new(Vec::new())),\n }\n }\n\n /// Extract timing tree for a completed run.\n /// Call this after the root span closes.\n pub fn extract_timings(&self) -> Vec {\n let completed = self.completed.lock().unwrap();\n // Build tree: find root entries (no parent), attach children\n // ... tree construction logic\n }\n}\n\nimpl Layer for MetricsLayer\nwhere\n S: Subscriber + for<'a> LookupSpan<'a>,\n{\n fn on_new_span(&self, attrs: &Attributes<'_>, id: &Id, ctx: Context<'_, S>) {\n let parent_id = ctx.span(id).and_then(|s| s.parent().map(|p| p.id()));\n let mut fields = HashMap::new();\n // Visit attrs to capture initial field values\n let mut visitor = FieldVisitor(&mut fields);\n attrs.record(&mut visitor);\n\n self.spans.lock().unwrap().insert(id.into_u64(), SpanData {\n name: attrs.metadata().name().to_string(),\n parent_id,\n start: Instant::now(),\n fields,\n });\n }\n\n fn on_record(&self, id: &Id, values: &Record<'_>, _ctx: Context<'_, S>) {\n // Capture recorded fields (items_processed, items_skipped, errors)\n if let Some(data) = self.spans.lock().unwrap().get_mut(&id.into_u64()) {\n let mut visitor = FieldVisitor(&mut data.fields);\n values.record(&mut visitor);\n }\n }\n\n fn on_close(&self, id: Id, _ctx: Context<'_, S>) {\n if let Some(data) = self.spans.lock().unwrap().remove(&id.into_u64()) {\n let elapsed = data.start.elapsed();\n let timing = StageTiming {\n name: data.name,\n project: data.fields.get(\"project\").and_then(|v| v.as_str()).map(String::from),\n elapsed_ms: elapsed.as_millis() as u64,\n items_processed: data.fields.get(\"items_processed\").and_then(|v| v.as_u64()).unwrap_or(0) as usize,\n items_skipped: data.fields.get(\"items_skipped\").and_then(|v| v.as_u64()).unwrap_or(0) as usize,\n errors: data.fields.get(\"errors\").and_then(|v| v.as_u64()).unwrap_or(0) as usize,\n sub_stages: vec![], // Will be populated during extract_timings tree construction\n };\n self.completed.lock().unwrap().push((id.into_u64(), timing));\n }\n }\n}\n```\n\nNeed a FieldVisitor struct implementing tracing::field::Visit to capture field values.\n\nRegister in subscriber stack (src/main.rs), alongside stderr and file layers:\n```rust\nlet metrics_layer = MetricsLayer::new();\nlet metrics_handle = metrics_layer.clone(); // Clone Arc for later extraction\n\nregistry()\n .with(stderr_layer.with_filter(stderr_filter))\n .with(file_layer.with_filter(file_filter))\n .with(metrics_layer) // No filter -- captures all spans\n .init();\n```\n\nPass metrics_handle to command handlers so they can call extract_timings() after the pipeline completes.\n\n## Acceptance Criteria\n- [ ] MetricsLayer captures span enter/close timing\n- [ ] on_record captures items_processed, items_skipped, errors fields\n- [ ] extract_timings() returns correctly nested Vec tree\n- [ ] Parallel spans (multiple projects) both appear as sub_stages of parent\n- [ ] Thread-safe: Arc> allows concurrent span operations\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/core/metrics.rs (add MetricsLayer, FieldVisitor, tree construction)\n- src/main.rs (register MetricsLayer in subscriber stack)\n\n## TDD Loop\nRED:\n - test_metrics_layer_single_span: enter/exit one span, extract, assert one StageTiming\n - test_metrics_layer_nested_spans: parent + child, assert child in parent.sub_stages\n - test_metrics_layer_parallel_spans: two sibling spans, assert both in parent.sub_stages\n - test_metrics_layer_field_recording: record items_processed=42, assert captured\nGREEN: Implement MetricsLayer with on_new_span, on_record, on_close, extract_timings\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- Span ID reuse: tracing may reuse span IDs after close. Using remove on close prevents stale data.\n- Lock contention: Mutex per operation. For high-span-count scenarios, consider parking_lot::Mutex. But lore's span count is low (<100 per run), so std::sync::Mutex is fine.\n- extract_timings tree construction: iterate completed Vec, build parent->children map, then recursively construct StageTiming tree. Root entries have parent_id matching the root span or None.\n- MetricsLayer has no filter: it sees ALL spans. To avoid noise from dependency spans, check if span name starts with known stage names, or rely on the \"stage\" field being present.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:54:31.960669Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:25:25.523811Z","closed_at":"2026-02-04T17:25:25.523730Z","close_reason":"Implemented MetricsLayer custom tracing subscriber layer with span timing capture, rate-limit/retry event detection, tree extraction, and 12 unit tests","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-34ek","depends_on_id":"bd-1o4h","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-34ek","depends_on_id":"bd-24j1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-34ek","depends_on_id":"bd-3er","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-34o","title":"Implement MR transformer","description":"## Background\nTransforms GitLab MR API responses into normalized schema for database storage. Handles deprecated field fallbacks and extracts metadata (labels, assignees, reviewers).\n\n## Approach\nCreate new transformer module following existing issue transformer pattern:\n- `NormalizedMergeRequest` - Database-ready struct\n- `MergeRequestWithMetadata` - MR + extracted labels/assignees/reviewers\n- `transform_merge_request()` - Main transformation function\n- `extract_labels()` - Label extraction helper\n\n## Files\n- `src/gitlab/transformers/merge_request.rs` - New transformer module\n- `src/gitlab/transformers/mod.rs` - Export new module\n- `tests/mr_transformer_tests.rs` - Unit tests\n\n## Acceptance Criteria\n- [ ] `NormalizedMergeRequest` struct exists with all DB columns\n- [ ] `MergeRequestWithMetadata` contains MR + label_names + assignee_usernames + reviewer_usernames\n- [ ] `transform_merge_request()` returns `Result`\n- [ ] `draft` computed as `gitlab_mr.draft || gitlab_mr.work_in_progress`\n- [ ] `detailed_merge_status` prefers `detailed_merge_status` over `merge_status_legacy`\n- [ ] `merge_user_username` prefers `merge_user` over `merged_by`\n- [ ] `head_sha` extracted from `sha` field\n- [ ] `references_short` and `references_full` extracted from `references` Option\n- [ ] Timestamps parsed with `iso_to_ms()`, errors returned (not zeroed)\n- [ ] `last_seen_at` set to `now_ms()`\n- [ ] `cargo test mr_transformer` passes\n\n## TDD Loop\nRED: `cargo test mr_transformer` -> module not found\nGREEN: Add transformer with all fields\nVERIFY: `cargo test mr_transformer`\n\n## Struct Definitions\n```rust\n#[derive(Debug, Clone)]\npub struct NormalizedMergeRequest {\n pub gitlab_id: i64,\n pub project_id: i64,\n pub iid: i64,\n pub title: String,\n pub description: Option,\n pub state: String,\n pub draft: bool,\n pub author_username: String,\n pub source_branch: String,\n pub target_branch: String,\n pub head_sha: Option,\n pub references_short: Option,\n pub references_full: Option,\n pub detailed_merge_status: Option,\n pub merge_user_username: Option,\n pub created_at: i64,\n pub updated_at: i64,\n pub merged_at: Option,\n pub closed_at: Option,\n pub last_seen_at: i64,\n pub web_url: String,\n}\n\n#[derive(Debug, Clone)]\npub struct MergeRequestWithMetadata {\n pub merge_request: NormalizedMergeRequest,\n pub label_names: Vec,\n pub assignee_usernames: Vec,\n pub reviewer_usernames: Vec,\n}\n```\n\n## Function Signature\n```rust\npub fn transform_merge_request(\n gitlab_mr: &GitLabMergeRequest,\n local_project_id: i64,\n) -> Result\n```\n\n## Key Logic\n```rust\n// Draft: prefer draft, fallback to work_in_progress\nlet is_draft = gitlab_mr.draft || gitlab_mr.work_in_progress;\n\n// Merge status: prefer detailed_merge_status\nlet detailed_merge_status = gitlab_mr.detailed_merge_status\n .clone()\n .or_else(|| gitlab_mr.merge_status_legacy.clone());\n\n// Merge user: prefer merge_user\nlet merge_user_username = gitlab_mr.merge_user\n .as_ref()\n .map(|u| u.username.clone())\n .or_else(|| gitlab_mr.merged_by.as_ref().map(|u| u.username.clone()));\n\n// References extraction\nlet (references_short, references_full) = gitlab_mr.references\n .as_ref()\n .map(|r| (Some(r.short.clone()), Some(r.full.clone())))\n .unwrap_or((None, None));\n\n// Head SHA\nlet head_sha = gitlab_mr.sha.clone();\n```\n\n## Edge Cases\n- Invalid timestamps should return `Err`, not zero values\n- Empty labels/assignees/reviewers should return empty Vecs, not None\n- `state` must pass through as-is (including \"locked\")","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:40.849049Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:11:48.501301Z","closed_at":"2026-01-27T00:11:48.501241Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-34o","depends_on_id":"bd-3ir","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-34o","depends_on_id":"bd-5ta","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-34rr","title":"WHO: Migration 017 — composite indexes for query paths","description":"## Background\n\nWith 280K notes, the path/timestamp queries for lore who will degrade without composite indexes. Existing indexes cover note_type and position_new_path separately (migration 006) but not as composites aligned to the who query patterns. This is a non-breaking, additive-only migration.\n\n## Approach\n\nAdd as entry 17 (index 16) in the MIGRATIONS array in src/core/db.rs. LATEST_SCHEMA_VERSION auto-updates via MIGRATIONS.len() as i32.\n\n### Exact SQL for the migration entry:\n\n```sql\n-- Migration 017: Composite indexes for who query paths\n\n-- Expert/Overlap: DiffNote path prefix + timestamp filter.\n-- Leading with position_new_path (not note_type) because the partial index\n-- predicate already handles the constant filter.\nCREATE INDEX IF NOT EXISTS idx_notes_diffnote_path_created\n ON notes(position_new_path, created_at, project_id)\n WHERE note_type = 'DiffNote' AND is_system = 0;\n\n-- Active/Workload: discussion participation lookups.\nCREATE INDEX IF NOT EXISTS idx_notes_discussion_author\n ON notes(discussion_id, author_username)\n WHERE is_system = 0;\n\n-- Active (project-scoped): unresolved discussions by recency.\nCREATE INDEX IF NOT EXISTS idx_discussions_unresolved_recent\n ON discussions(project_id, last_note_at)\n WHERE resolvable = 1 AND resolved = 0;\n\n-- Active (global): unresolved discussions by recency (no project scope).\n-- Without this, (project_id, last_note_at) can't satisfy ORDER BY last_note_at DESC\n-- efficiently when project_id is unconstrained.\nCREATE INDEX IF NOT EXISTS idx_discussions_unresolved_recent_global\n ON discussions(last_note_at)\n WHERE resolvable = 1 AND resolved = 0;\n\n-- Workload: issue assignees by username.\nCREATE INDEX IF NOT EXISTS idx_issue_assignees_username\n ON issue_assignees(username, issue_id);\n```\n\n### Not added (already adequate):\n- merge_requests(author_username) — idx_mrs_author (migration 006)\n- mr_reviewers(username) — idx_mr_reviewers_username (migration 006)\n- notes(discussion_id) — idx_notes_discussion (migration 002)\n\n## Files\n\n- `src/core/db.rs` — append to MIGRATIONS array as entry index 16\n\n## TDD Loop\n\nRED: `cargo test -- test_migration` (existing migration tests should still pass)\nGREEN: Add the migration SQL string to the array\nVERIFY: `cargo test && cargo check --all-targets`\n\n## Acceptance Criteria\n\n- [ ] MIGRATIONS array has 17 entries (index 0-16)\n- [ ] LATEST_SCHEMA_VERSION is 17\n- [ ] cargo test passes (in-memory DB runs all migrations including 017)\n- [ ] No existing index names conflict\n\n## Edge Cases\n\n- The SQL uses CREATE INDEX IF NOT EXISTS — safe for idempotent reruns\n- Partial indexes (WHERE clause) keep index size small: ~33K of 280K notes for DiffNote index","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:39:49.397860Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.593561Z","closed_at":"2026-02-08T04:10:29.593519Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0} -{"id":"bd-35g5","title":"Implement Dashboard state + action + view","description":"## Background\nThe Dashboard is the home screen — first thing users see. It shows entity counts, per-project sync status, recent activity, and a last-sync summary. Data comes from aggregation queries against the local SQLite database.\n\n## Approach\nState (state/dashboard.rs):\n- DashboardState: counts (EntityCounts), projects (Vec), recent (Vec), last_sync (LastSyncInfo)\n- EntityCounts: issues_open, issues_total, mrs_open, mrs_total, discussions, notes_total, notes_system_pct, documents, embeddings\n- ProjectSyncInfo: path (String), minutes_since_sync (u64)\n- RecentActivityItem: entity_type, iid, title, state, minutes_ago\n- update(data: DashboardData) method\n\nAction (action.rs):\n- fetch_dashboard(conn: &Connection, clock: &dyn Clock) -> Result: runs aggregation queries for counts, recent activity, project sync status. Uses clock.now() for relative time calculations.\n\nView (view/dashboard.rs):\n- render_dashboard(frame, state: &DashboardState, area: Rect, theme: &Theme): responsive layout with breakpoints\n - Wide (>=120 cols): 3-column: [Stats | Projects | Recent]\n - Medium (80-119): 2-column: [Stats+Projects | Recent]\n - Narrow (<80): single column stacked\n- render_stat_panel(): entity counts with colored numbers\n- render_project_list(): project names with sync staleness indicators\n- render_recent_activity(): scrollable list of recent changes\n- render_sync_summary(): last sync stats (if available)\n\n## Acceptance Criteria\n- [ ] DashboardState stores counts, projects, recent activity, last sync info\n- [ ] fetch_dashboard returns correct counts from DB\n- [ ] Dashboard renders with responsive breakpoints (3/2/1 column layouts)\n- [ ] Entity counts show open/total for issues and MRs\n- [ ] Project list shows sync staleness with color coding (green <1h, yellow <6h, red >6h)\n- [ ] Recent activity list is scrollable with j/k\n- [ ] Relative timestamps use injected Clock (not wall-clock)\n\n## Files\n- MODIFY: crates/lore-tui/src/state/dashboard.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_dashboard)\n- CREATE: crates/lore-tui/src/view/dashboard.rs\n\n## TDD Anchor\nRED: Write test_fetch_dashboard_counts in action.rs that creates in-memory DB with 5 issues (3 open, 2 closed), calls fetch_dashboard, asserts issues_open=3, issues_total=5.\nGREEN: Implement fetch_dashboard with COUNT queries.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_dashboard\n\n## Edge Cases\n- Empty database (first launch before sync): all counts should be 0, no crash\n- Very long project paths: truncate to fit column width\n- notes_system_pct: compute as (system_notes * 100 / total_notes), handle division by zero\n- Clock injection ensures snapshot tests are deterministic (no \"3 minutes ago\" changing between runs)\n\n## Dependency Context\nUses AppState, DashboardState, LoadState from \"Implement AppState composition\" task.\nUses DbManager from \"Implement DbManager\" task.\nUses Clock from \"Implement Clock trait\" task.\nUses theme from \"Implement theme configuration\" task.\nUses render_screen routing from \"Implement common widgets\" task.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T16:57:44.419736Z","created_by":"tayloreernisse","updated_at":"2026-02-12T21:24:51.764669Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-35g5","depends_on_id":"bd-14q8","type":"blocks","created_at":"2026-02-12T21:24:51.677908Z","created_by":"tayloreernisse"},{"issue_id":"bd-35g5","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-35g5","depends_on_id":"bd-1f5b","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-35g5","depends_on_id":"bd-26f2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-35g5","depends_on_id":"bd-29wn","type":"blocks","created_at":"2026-02-12T21:24:51.764637Z","created_by":"tayloreernisse"},{"issue_id":"bd-35g5","depends_on_id":"bd-3pm2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-35g5","depends_on_id":"bd-6pmy","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-35g5","title":"Implement Dashboard state + action + view","description":"## Background\nThe Dashboard is the home screen — first thing users see. It shows entity counts, per-project sync status, recent activity, and a last-sync summary. Data comes from aggregation queries against the local SQLite database.\n\n## Approach\nState (state/dashboard.rs):\n- DashboardState: counts (EntityCounts), projects (Vec), recent (Vec), last_sync (LastSyncInfo)\n- EntityCounts: issues_open, issues_total, mrs_open, mrs_total, discussions, notes_total, notes_system_pct, documents, embeddings\n- ProjectSyncInfo: path (String), minutes_since_sync (u64)\n- RecentActivityItem: entity_type, iid, title, state, minutes_ago\n- update(data: DashboardData) method\n\nAction (action.rs):\n- fetch_dashboard(conn: &Connection, clock: &dyn Clock) -> Result: runs aggregation queries for counts, recent activity, project sync status. Uses clock.now() for relative time calculations.\n\nView (view/dashboard.rs):\n- render_dashboard(frame, state: &DashboardState, area: Rect, theme: &Theme): responsive layout with breakpoints\n - Wide (>=120 cols): 3-column: [Stats | Projects | Recent]\n - Medium (80-119): 2-column: [Stats+Projects | Recent]\n - Narrow (<80): single column stacked\n- render_stat_panel(): entity counts with colored numbers\n- render_project_list(): project names with sync staleness indicators\n- render_recent_activity(): scrollable list of recent changes\n- render_sync_summary(): last sync stats (if available)\n\n## Acceptance Criteria\n- [ ] DashboardState stores counts, projects, recent activity, last sync info\n- [ ] fetch_dashboard returns correct counts from DB\n- [ ] Dashboard renders with responsive breakpoints (3/2/1 column layouts)\n- [ ] Entity counts show open/total for issues and MRs\n- [ ] Project list shows sync staleness with color coding (green <1h, yellow <6h, red >6h)\n- [ ] Recent activity list is scrollable with j/k\n- [ ] Relative timestamps use injected Clock (not wall-clock)\n\n## Files\n- MODIFY: crates/lore-tui/src/state/dashboard.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_dashboard)\n- CREATE: crates/lore-tui/src/view/dashboard.rs\n\n## TDD Anchor\nRED: Write test_fetch_dashboard_counts in action.rs that creates in-memory DB with 5 issues (3 open, 2 closed), calls fetch_dashboard, asserts issues_open=3, issues_total=5.\nGREEN: Implement fetch_dashboard with COUNT queries.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_dashboard\n\n## Edge Cases\n- Empty database (first launch before sync): all counts should be 0, no crash\n- Very long project paths: truncate to fit column width\n- notes_system_pct: compute as (system_notes * 100 / total_notes), handle division by zero\n- Clock injection ensures snapshot tests are deterministic (no \"3 minutes ago\" changing between runs)\n\n## Dependency Context\nUses AppState, DashboardState, LoadState from \"Implement AppState composition\" task.\nUses DbManager from \"Implement DbManager\" task.\nUses Clock from \"Implement Clock trait\" task.\nUses theme from \"Implement theme configuration\" task.\nUses render_screen routing from \"Implement common widgets\" task.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:57:44.419736Z","created_by":"tayloreernisse","updated_at":"2026-02-18T19:10:21.107116Z","closed_at":"2026-02-18T19:10:21.106958Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-35g5","depends_on_id":"bd-14q8","type":"blocks","created_at":"2026-02-12T21:24:51.677908Z","created_by":"tayloreernisse"},{"issue_id":"bd-35g5","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-35g5","depends_on_id":"bd-1f5b","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-35g5","depends_on_id":"bd-26f2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-35g5","depends_on_id":"bd-29wn","type":"blocks","created_at":"2026-02-12T21:24:51.764637Z","created_by":"tayloreernisse"},{"issue_id":"bd-35g5","depends_on_id":"bd-3pm2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-35g5","depends_on_id":"bd-6pmy","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-35o","title":"Create golden query test suite","description":"## Background\nGolden query tests verify end-to-end search quality with known-good expected results. They use a seeded SQLite DB with deterministic fixture data and fixed embedding vectors (no Ollama dependency). Each test query must return at least one expected URL in the top 10 results. These tests catch search regressions (ranking changes, filter bugs, missing results).\n\n## Approach\nCreate test infrastructure:\n\n**1. tests/fixtures/golden_queries.json:**\n```json\n[\n {\n \"query\": \"authentication login\",\n \"mode\": \"lexical\",\n \"filters\": {},\n \"expected_urls\": [\"https://gitlab.example.com/group/project/-/issues/234\"],\n \"min_results\": 1,\n \"max_rank\": 10\n },\n {\n \"query\": \"jwt token refresh\",\n \"mode\": \"hybrid\",\n \"filters\": {\"type\": \"merge_request\"},\n \"expected_urls\": [\"https://gitlab.example.com/group/project/-/merge_requests/456\"],\n \"min_results\": 1,\n \"max_rank\": 10\n }\n]\n```\n\n**2. Test harness (tests/golden_query_tests.rs):**\n- Load golden_queries.json\n- Create in-memory DB, apply all migrations\n- Seed with deterministic fixture documents (issues, MRs, discussions)\n- For hybrid/semantic queries: seed with fixed embedding vectors (768-dim, manually constructed for known similarity)\n- For each query: run search, verify expected URL in top N results\n\n**Fixture data design:**\n- 10-20 documents covering different source types\n- Known content that matches expected queries\n- Fixed embeddings: construct vectors where similar documents have small cosine distance\n- No randomness — fully deterministic\n\n## Acceptance Criteria\n- [ ] Golden queries file exists with at least 5 test queries\n- [ ] Test harness loads queries and validates each\n- [ ] All golden queries pass: expected URL in top 10\n- [ ] No external dependencies (no Ollama, no GitLab)\n- [ ] Deterministic fixture data (fixed embeddings, fixed content)\n- [ ] `cargo test --test golden_query_tests` passes in CI\n\n## Files\n- `tests/fixtures/golden_queries.json` — new file\n- `tests/golden_query_tests.rs` — new file (or tests/golden_queries.rs)\n\n## TDD Loop\nRED: Create golden_queries.json with expected results, harness fails (no fixture data)\nGREEN: Seed fixture data that satisfies expected results\nVERIFY: `cargo test --test golden_query_tests`\n\n## Edge Cases\n- Query matches multiple expected URLs: all must be present\n- Lexical queries: FTS ranking determines position, not vector\n- Hybrid queries: RRF combines both signals — fixed vectors must be designed to produce expected ranking\n- Empty result for a golden query: test failure with clear message showing actual results","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-30T15:27:21.788493Z","created_by":"tayloreernisse","updated_at":"2026-01-30T18:12:47.085563Z","closed_at":"2026-01-30T18:12:47.085363Z","close_reason":"Golden query test suite: 7 golden queries in fixture, 8 seeded documents, 2 test functions (all_pass + fixture_valid), deterministic in-memory DB, no external deps. 312 total tests pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-35o","depends_on_id":"bd-2no","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-35r","title":"[CP1] Discussion and note transformers","description":"Transform GitLab discussion/note payloads to normalized database schema.\n\nFunctions to implement:\n- transformDiscussion(gitlabDiscussion, localProjectId, localIssueId) → NormalizedDiscussion\n- transformNotes(gitlabDiscussion, localProjectId) → NormalizedNote[]\n\nTransformation rules:\n- Compute first_note_at/last_note_at from notes array\n- Compute resolvable/resolved status from notes\n- Set is_system from note.system\n- Preserve note order via position (array index)\n- Convert ISO timestamps to ms epoch\n\nFiles: src/gitlab/transformers/discussion.ts\nTests: tests/unit/discussion-transformer.test.ts\nDone when: Unit tests pass for discussion/note transformation with system note flagging","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:19:16.861421Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.154646Z","closed_at":"2026-01-25T15:21:35.154646Z","deleted_at":"2026-01-25T15:21:35.154643Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-36m","title":"Final validation and test coverage","description":"## Background\nFinal validation gate ensuring all CP2 features work correctly. Verifies tests, lint, and manual smoke tests pass.\n\n## Approach\nRun comprehensive validation:\n1. Automated tests (unit + integration)\n2. Clippy and formatting\n3. Critical test case verification\n4. Gate A/B/C/D/E checklist\n5. Manual smoke tests\n\n## Files\nNone - validation only\n\n## Acceptance Criteria\n- [ ] `cargo test` passes (all tests green)\n- [ ] `cargo test --release` passes\n- [ ] `cargo clippy -- -D warnings` passes (zero warnings)\n- [ ] `cargo fmt --check` passes\n- [ ] Critical tests pass (see list below)\n- [ ] Gate A/B/C/D/E verification complete\n- [ ] Manual smoke tests pass\n\n## Validation Commands\n```bash\n# 1. Build and test\ncargo build --release\ncargo test --release\n\n# 2. Lint\ncargo clippy -- -D warnings\ncargo fmt --check\n\n# 3. Run specific critical tests\ncargo test does_not_advance_discussion_watermark_on_partial_failure\ncargo test prefers_detailed_merge_status_when_both_fields_present\ncargo test prefers_merge_user_when_both_fields_present\ncargo test prefers_draft_when_both_draft_and_work_in_progress_present\ncargo test atomic_note_replacement_preserves_data_on_parse_failure\ncargo test full_sync_resets_discussion_watermarks\n```\n\n## Critical Test Cases\n| Test | What It Verifies |\n|------|------------------|\n| `does_not_advance_discussion_watermark_on_partial_failure` | Pagination failure doesn't lose data |\n| `prefers_detailed_merge_status_when_both_fields_present` | Non-deprecated field wins |\n| `prefers_merge_user_when_both_fields_present` | Non-deprecated field wins |\n| `prefers_draft_when_both_draft_and_work_in_progress_present` | OR semantics for draft |\n| `atomic_note_replacement_preserves_data_on_parse_failure` | Parse before delete |\n| `full_sync_resets_discussion_watermarks` | --full truly refreshes |\n\n## Gate Checklist\n\n### Gate A: MRs Only\n- [ ] `gi ingest --type=merge_requests` fetches all MRs\n- [ ] MR state supports: opened, merged, closed, locked\n- [ ] draft field captured with work_in_progress fallback\n- [ ] detailed_merge_status used with merge_status fallback\n- [ ] head_sha and references captured\n- [ ] Cursor-based sync is resumable\n\n### Gate B: Labels + Assignees + Reviewers\n- [ ] Labels linked via mr_labels junction\n- [ ] Stale labels removed on resync\n- [ ] Assignees linked via mr_assignees\n- [ ] Reviewers linked via mr_reviewers\n\n### Gate C: Dependent Discussion Sync\n- [ ] Discussions fetched for MRs with updated_at advancement\n- [ ] DiffNote position metadata captured\n- [ ] DiffNote SHA triplet captured\n- [ ] Upsert + sweep pattern for notes\n- [ ] Watermark NOT advanced on partial failure\n- [ ] Unchanged MRs skip discussion refetch\n\n### Gate D: Resumability Proof\n- [ ] Kill mid-run, rerun -> bounded redo\n- [ ] `--full` resets cursor AND discussion watermarks\n- [ ] Single-flight lock prevents concurrent runs\n\n### Gate E: CLI Complete\n- [ ] `gi list mrs` with all filters including --draft/--no-draft\n- [ ] `gi show mr ` with discussions and DiffNote context\n- [ ] `gi count mrs` with state breakdown\n- [ ] `gi sync-status` shows MR cursors\n\n## Manual Smoke Tests\n| Command | Expected |\n|---------|----------|\n| `gi ingest --type=merge_requests` | Completes, shows counts |\n| `gi list mrs --limit=10` | Shows 10 MRs with correct columns |\n| `gi list mrs --state=merged` | Only merged MRs |\n| `gi list mrs --draft` | Only draft MRs with [DRAFT] prefix |\n| `gi show mr ` | Full detail with discussions |\n| `gi count mrs` | Count with state breakdown |\n| Re-run ingest | \"0 new MRs\", skipped discussion count |\n| `gi ingest --type=merge_requests --full` | Full resync |\n\n## Data Integrity Checks\n```sql\n-- MR count matches GitLab\nSELECT COUNT(*) FROM merge_requests;\n\n-- Every MR has raw payload\nSELECT COUNT(*) FROM merge_requests WHERE raw_payload_id IS NULL;\n-- Should be 0\n\n-- Labels linked correctly\nSELECT m.iid, COUNT(ml.label_id) \nFROM merge_requests m\nLEFT JOIN mr_labels ml ON ml.merge_request_id = m.id\nGROUP BY m.id;\n\n-- DiffNotes have position metadata\nSELECT COUNT(*) FROM notes WHERE position_new_path IS NOT NULL;\n```","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:43.697983Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:45:17.794393Z","closed_at":"2026-01-27T00:45:17.794325Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-36m","depends_on_id":"bd-3js","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-36m","depends_on_id":"bd-mk3","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -196,12 +206,13 @@ {"id":"bd-3a4k","title":"CLI: list issues status column, filter, and robot fields","description":"## Background\nList issues needs a Status column in the table, status fields in robot JSON, and a --status filter for querying by work item status name. The filter supports multiple values (OR semantics) and case-insensitive matching.\n\n## Approach\nExtend list.rs row types, SQL, table rendering. Add --status Vec to clap args. Build dynamic WHERE clause with COLLATE NOCASE. Wire into both ListFilters constructions in main.rs. Register in autocorrect.\n\n## Files\n- src/cli/commands/list.rs (row types, SQL, table, filter, color helper)\n- src/cli/mod.rs (--status flag on IssuesArgs)\n- src/main.rs (wire statuses into both ListFilters)\n- src/cli/autocorrect.rs (add --status to COMMAND_FLAGS)\n\n## Implementation\n\nIssueListRow + IssueListRowJson: add 5 status fields (all Option)\nFrom<&IssueListRow> for IssueListRowJson: clone all 5 fields\n\nquery_issues SELECT: add i.status_name, i.status_category, i.status_color, i.status_icon_name, i.status_synced_at after existing columns\n Existing SELECT has 12 columns (indices 0-11). New columns: indices 12-16.\n Row mapping: status_name: row.get(12)?, ..., status_synced_at: row.get(16)?\n\nListFilters: add pub statuses: &'a [String]\n\nWHERE clause builder (after has_due_date block):\n if statuses.len() == 1: \"i.status_name = ? COLLATE NOCASE\" + push param\n if statuses.len() > 1: \"i.status_name IN (?, ?, ...) COLLATE NOCASE\" + push all params\n\nTable: add \"Status\" column header (bold) between State and Assignee\n Row: match &issue.status_name -> Some: colored_cell_hex(status, color), None: Cell::new(\"\")\n\nNew helper:\n fn colored_cell_hex(content, hex: Option<&str>) -> Cell\n If no hex or colors disabled: Cell::new(content)\n Parse 6-char hex, use Cell::new(content).fg(Color::Rgb { r, g, b })\n\nIn src/cli/mod.rs IssuesArgs:\n #[arg(long, help_heading = \"Filters\")]\n pub status: Vec,\n\nIn src/main.rs handle_issues (~line 695):\n ListFilters { ..., statuses: &args.status }\nIn legacy List handler (~line 2421):\n ListFilters { ..., statuses: &[] }\n\nIn src/cli/autocorrect.rs COMMAND_FLAGS \"issues\" entry:\n Add \"--status\" between existing flags\n\n## Acceptance Criteria\n- [ ] Status column appears in table between State and Assignee\n- [ ] NULL status -> empty cell\n- [ ] Status colored by hex in human mode\n- [ ] --status \"In progress\" filters correctly\n- [ ] --status \"in progress\" matches \"In progress\" (COLLATE NOCASE)\n- [ ] --status \"To do\" --status \"In progress\" -> OR semantics (both returned)\n- [ ] Robot: status_name, status_category in each issue JSON\n- [ ] --fields supports status_name, status_category, status_color, status_icon_name, status_synced_at\n- [ ] --fields minimal does NOT include status fields\n- [ ] Autocorrect registry test passes (--status registered)\n- [ ] cargo check --all-targets passes\n\n## TDD Loop\nRED: test_list_filter_by_status, test_list_filter_by_status_case_insensitive, test_list_filter_by_multiple_statuses\nGREEN: Implement all changes across 4 files\nVERIFY: cargo test list_filter && cargo test registry_covers\n\n## Edge Cases\n- COLLATE NOCASE is ASCII-only but sufficient (all system statuses are ASCII)\n- Single-value uses = for simplicity; multi-value uses IN with dynamic placeholders\n- --status combined with other filters (--state, --label) -> AND logic\n- autocorrect registry_covers_command_flags test will FAIL if --status not registered\n- Legacy List command path also constructs ListFilters — needs statuses: &[]\n- Column index offset: new columns start at 12 (0-indexed)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:42:26.438Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.421297Z","closed_at":"2026-02-11T07:21:33.421247Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3a4k","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3a4k","depends_on_id":"bd-3dum","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3ae","title":"Epic: CP2 Gate A - MRs Only","description":"## Background\nGate A validates core MR ingestion works before adding complexity. Proves the cursor-based sync, pagination, and basic CLI work. This is the foundation - if Gate A fails, nothing else matters.\n\n## Acceptance Criteria (Pass/Fail)\n- [ ] `gi ingest --type=merge_requests` completes without error\n- [ ] `SELECT COUNT(*) FROM merge_requests` > 0\n- [ ] `gi list mrs --limit=5` shows 5 MRs with iid, title, state, author\n- [ ] `gi count mrs` shows total count matching DB query\n- [ ] MR with `state=locked` can be stored (if exists in test data)\n- [ ] Draft MR shows `draft=1` in DB and `[DRAFT]` in list output\n- [ ] `work_in_progress=true` MR shows `draft=1` (fallback works)\n- [ ] `head_sha` populated for MRs with commits\n- [ ] `references_short` and `references_full` populated\n- [ ] Re-run ingest shows \"0 new MRs\" or minimal refetch (cursor working)\n- [ ] Cursor saved at page boundary, not item boundary\n\n## Validation Script\n```bash\n#!/bin/bash\nset -e\n\nDB_PATH=\"${XDG_DATA_HOME:-$HOME/.local/share}/gitlab-inbox/db.sqlite3\"\n\necho \"=== Gate A: MRs Only ===\"\n\n# 1. Clear any existing MR data for clean test\necho \"Step 1: Reset MR cursor for clean test...\"\nsqlite3 \"$DB_PATH\" \"DELETE FROM sync_cursors WHERE resource_type = 'merge_requests';\"\n\n# 2. Run MR ingestion\necho \"Step 2: Ingest MRs...\"\ngi ingest --type=merge_requests\n\n# 3. Verify MRs exist\necho \"Step 3: Verify MR count...\"\nMR_COUNT=$(sqlite3 \"$DB_PATH\" \"SELECT COUNT(*) FROM merge_requests;\")\necho \" MR count: $MR_COUNT\"\n[ \"$MR_COUNT\" -gt 0 ] || { echo \"FAIL: No MRs ingested\"; exit 1; }\n\n# 4. Verify list command\necho \"Step 4: Test list command...\"\ngi list mrs --limit=5\n\n# 5. Verify count command\necho \"Step 5: Test count command...\"\ngi count mrs\n\n# 6. Verify draft handling\necho \"Step 6: Check draft MRs...\"\nDRAFT_COUNT=$(sqlite3 \"$DB_PATH\" \"SELECT COUNT(*) FROM merge_requests WHERE draft = 1;\")\necho \" Draft MR count: $DRAFT_COUNT\"\n\n# 7. Verify head_sha population\necho \"Step 7: Check head_sha...\"\nSHA_COUNT=$(sqlite3 \"$DB_PATH\" \"SELECT COUNT(*) FROM merge_requests WHERE head_sha IS NOT NULL;\")\necho \" MRs with head_sha: $SHA_COUNT\"\n\n# 8. Verify references\necho \"Step 8: Check references...\"\nREF_COUNT=$(sqlite3 \"$DB_PATH\" \"SELECT COUNT(*) FROM merge_requests WHERE references_short IS NOT NULL;\")\necho \" MRs with references: $REF_COUNT\"\n\n# 9. Verify cursor saved\necho \"Step 9: Check cursor...\"\nCURSOR=$(sqlite3 \"$DB_PATH\" \"SELECT updated_at, gitlab_id FROM sync_cursors WHERE resource_type = 'merge_requests';\")\necho \" Cursor: $CURSOR\"\n[ -n \"$CURSOR\" ] || { echo \"FAIL: Cursor not saved\"; exit 1; }\n\n# 10. Re-run and verify minimal refetch\necho \"Step 10: Re-run ingest (should be minimal)...\"\ngi ingest --type=merge_requests\n# Output should show minimal or zero new MRs\n\necho \"\"\necho \"=== Gate A: PASSED ===\"\n```\n\n## Test Commands (Quick Verification)\n```bash\n# Run these in order:\ngi ingest --type=merge_requests\ngi list mrs --limit=10\ngi count mrs\n\n# Verify in DB:\nsqlite3 ~/.local/share/gitlab-inbox/db.sqlite3 \"\n SELECT \n COUNT(*) as total,\n SUM(CASE WHEN draft = 1 THEN 1 ELSE 0 END) as drafts,\n SUM(CASE WHEN head_sha IS NOT NULL THEN 1 ELSE 0 END) as with_sha,\n SUM(CASE WHEN references_short IS NOT NULL THEN 1 ELSE 0 END) as with_refs\n FROM merge_requests;\n\"\n\n# Re-run (should be no-op):\ngi ingest --type=merge_requests\n```\n\n## Dependencies\nThis gate requires these beads to be complete:\n- bd-3ir (Database migration)\n- bd-5ta (GitLab MR types)\n- bd-34o (MR transformer)\n- bd-iba (GitLab client pagination)\n- bd-ser (MR ingestion module)\n\n## Edge Cases\n- `locked` state is transitional (merge in progress); may not exist in test data\n- Some older GitLab instances may not return `head_sha` for all MRs\n- `work_in_progress` is deprecated but should still work as fallback\n- Very large projects (10k+ MRs) may take significant time on first sync","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-26T22:06:00.966522Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:48:21.057298Z","closed_at":"2026-01-27T00:48:21.057225Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3ae","depends_on_id":"bd-iba","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3ae","depends_on_id":"bd-ser","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3as","title":"Implement timeline event collection and chronological interleaving","description":"## Background\n\nThe event collection phase is steps 4-5 of the timeline pipeline (spec Section 3.2). It takes seed + expanded entity sets and collects all their events from resource event tables, then interleaves chronologically.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 3.2 steps 4-5, Section 3.3 (Event Model).\n\n## Codebase Context\n\n- resource_state_events: columns include state, actor_username (not actor_gitlab_id for display), created_at, issue_id, merge_request_id, source_merge_request_iid, source_commit\n- resource_label_events: columns include action ('add'|'remove'), label_name (NULLABLE since migration 012), actor_username, created_at\n- resource_milestone_events: columns include action ('add'|'remove'), milestone_title (NULLABLE since migration 012), actor_username, created_at\n- issues table: created_at, author_username, title, web_url\n- merge_requests table: created_at, author_username, title, web_url, merged_at, updated_at\n- All timestamps are ms epoch UTC (stored as INTEGER)\n\n## Approach\n\nCreate `src/core/timeline_collect.rs`:\n\n```rust\nuse rusqlite::Connection;\nuse crate::core::timeline::{TimelineEvent, TimelineEventType, EntityRef, ExpandedEntityRef};\n\npub fn collect_events(\n conn: &Connection,\n seed_entities: &[EntityRef],\n expanded_entities: &[ExpandedEntityRef],\n evidence_notes: &[TimelineEvent], // from seed phase\n since_ms: Option, // --since filter\n limit: usize, // -n flag (default 100)\n) -> Result> { ... }\n```\n\n### Event Collection Per Entity\n\nFor each entity (seed + expanded), collect:\n\n1. **Creation event** (`Created`):\n ```sql\n -- Issues:\n SELECT created_at, author_username, title, web_url FROM issues WHERE id = ?1\n -- MRs:\n SELECT created_at, author_username, title, web_url FROM merge_requests WHERE id = ?1\n ```\n\n2. **State changes** (`StateChanged { state }`):\n ```sql\n SELECT state, actor_username, created_at FROM resource_state_events\n WHERE (issue_id = ?1 OR merge_request_id = ?1)\n AND (?2 IS NULL OR created_at >= ?2) -- since filter\n ORDER BY created_at ASC\n ```\n NOTE: For MRs, a state='merged' event also produces a separate Merged variant.\n\n3. **Label changes** (`LabelAdded`/`LabelRemoved`):\n ```sql\n SELECT action, label_name, actor_username, created_at FROM resource_label_events\n WHERE (issue_id = ?1 OR merge_request_id = ?1)\n AND (?2 IS NULL OR created_at >= ?2)\n ORDER BY created_at ASC\n ```\n Handle NULL label_name (deleted label): use \"[deleted label]\" as fallback.\n\n4. **Milestone changes** (`MilestoneSet`/`MilestoneRemoved`):\n ```sql\n SELECT action, milestone_title, actor_username, created_at FROM resource_milestone_events\n WHERE (issue_id = ?1 OR merge_request_id = ?1)\n AND (?2 IS NULL OR created_at >= ?2)\n ORDER BY created_at ASC\n ```\n Handle NULL milestone_title: use \"[deleted milestone]\" as fallback.\n\n5. **Merge event** (Merged, MR only):\n Derive from merge_requests.merged_at (preferred) OR resource_state_events WHERE state='merged'. Skip StateChanged when state='merged' — emit only the Merged variant.\n\n### Chronological Interleave\n\n```rust\nevents.sort(); // Uses Ord impl from bd-20e\nif let Some(since) = since_ms {\n events.retain(|e| e.timestamp >= since);\n}\nevents.truncate(limit);\n```\n\nRegister in `src/core/mod.rs`: `pub mod timeline_collect;`\n\n## Acceptance Criteria\n\n- [ ] Collects Created, StateChanged, LabelAdded/Removed, MilestoneSet/Removed, Merged, NoteEvidence events\n- [ ] Merged events deduplicated from StateChanged{merged} — emit only Merged variant\n- [ ] NULL label_name/milestone_title handled with fallback text\n- [ ] --since filter applied to all event types\n- [ ] Events sorted chronologically with stable tiebreak\n- [ ] Limit applied AFTER sorting\n- [ ] Evidence notes from seed phase included\n- [ ] is_seed correctly set based on entity source\n- [ ] Module registered in src/core/mod.rs\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/core/timeline_collect.rs` (NEW)\n- `src/core/mod.rs` (add `pub mod timeline_collect;`)\n\n## TDD Loop\n\nRED:\n- `test_collect_creation_event` - entity produces Created event\n- `test_collect_state_events` - state changes produce StateChanged events\n- `test_collect_merged_dedup` - state='merged' produces Merged not StateChanged\n- `test_collect_null_label_fallback` - NULL label_name uses fallback text\n- `test_collect_since_filter` - old events excluded\n- `test_collect_chronological_sort` - mixed entity events interleave correctly\n- `test_collect_respects_limit`\n\nTests need in-memory DB with migrations 001-014 applied.\n\nGREEN: Implement SQL queries and event assembly.\n\nVERIFY: `cargo test --lib -- timeline_collect`\n\n## Edge Cases\n\n- MR with merged_at=NULL and no state='merged' event: no Merged event emitted\n- Entity with 0 events in resource tables: only Created event returned\n- NULL actor_username: actor field is None\n- Timestamps at exact --since boundary: use >= (inclusive)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:33:08.703942Z","created_by":"tayloreernisse","updated_at":"2026-02-05T21:53:01.160429Z","closed_at":"2026-02-05T21:53:01.160380Z","close_reason":"Completed: Created src/core/timeline_collect.rs with event collection for Created, StateChanged, LabelAdded/Removed, MilestoneSet/Removed, Merged, NoteEvidence. Merged dedup (state=merged skipped in favor of Merged variant). NULL label/milestone fallbacks. Since filter, chronological sort, limit. 10 tests pass.","compaction_level":0,"original_size":0,"labels":["gate-3","phase-b","query"],"dependencies":[{"issue_id":"bd-3as","depends_on_id":"bd-1ep","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3as","depends_on_id":"bd-ike","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3as","depends_on_id":"bd-ypa","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-3bec","title":"Wire surgical dispatch in run_sync and update robot-docs","description":"## Background\n\nThe existing `run_sync` function (lines 63-360 of `src/cli/commands/sync.rs`) handles the normal full-sync pipeline. Once `run_sync_surgical` (bd-1i4i) is implemented, this bead wires the dispatch: when `SyncOptions` contains issue or MR IIDs, route to the surgical path instead of the normal path. This also requires updating `handle_sync_cmd` (line 2120 of `src/main.rs`) to pass through the new CLI fields (bd-1lja), and updating the robot-docs schema to document the new surgical response fields.\n\n## Approach\n\nThree changes:\n\n**1. Dispatch in `run_sync` (src/cli/commands/sync.rs)**\n\nAdd an early check at the top of `run_sync` (after line 68):\n\n```rust\npub async fn run_sync(\n config: &Config,\n options: SyncOptions,\n run_id: Option<&str>,\n signal: &ShutdownSignal,\n) -> Result {\n // Surgical dispatch: if any IIDs specified, route to surgical pipeline\n if !options.issues.is_empty() || !options.merge_requests.is_empty() {\n return run_sync_surgical(config, options, run_id, signal).await;\n }\n\n // ... existing normal sync pipeline unchanged ...\n}\n```\n\n**2. Update `handle_sync_cmd` (src/main.rs line 2120)**\n\nPass new fields from `SyncArgs` into `SyncOptions`:\n\n```rust\nlet options = SyncOptions {\n full: args.full && !args.no_full,\n force: args.force && !args.no_force,\n no_embed: args.no_embed,\n no_docs: args.no_docs,\n no_events: args.no_events,\n robot_mode,\n dry_run,\n // New surgical fields (from bd-1lja)\n issues: args.issue.clone(),\n merge_requests: args.mr.clone(),\n project: args.project.clone(),\n preflight_only: args.preflight_only,\n};\n```\n\nAlso: when surgical mode is detected (issues/MRs non-empty), skip the normal SyncRunRecorder setup in `handle_sync_cmd` since `run_sync_surgical` manages its own recorder.\n\n**3. Update robot-docs (src/main.rs handle_robot_docs)**\n\nAdd documentation for the surgical sync response format. The robot-docs output should include:\n- New CLI flags: `--issue`, `--mr`, `-p`/`--project`, `--preflight-only`\n- Surgical response fields: `surgical_mode`, `surgical_iids`, `entity_results`, `preflight_only`\n- `EntitySyncResult` schema: `entity_type`, `iid`, `outcome`, `error`, `toctou_reason`\n- Exit codes for surgical-specific errors\n\n## Acceptance Criteria\n\n1. `lore sync --issue 7 -p group/project` dispatches to `run_sync_surgical`, not normal sync\n2. `lore sync` (no IIDs) follows the existing normal pipeline unchanged\n3. `handle_sync_cmd` passes `issues`, `merge_requests`, `project`, `preflight_only` from args to options\n4. `lore robot-docs` output includes surgical sync documentation\n5. All existing sync tests pass without modification\n6. Robot mode JSON output for surgical sync matches documented schema\n\n## Files\n\n- `src/cli/commands/sync.rs` — add dispatch check at top of `run_sync`, add `use super::sync_surgical::run_sync_surgical`\n- `src/main.rs` — update `handle_sync_cmd` to pass new fields, update robot-docs text\n- `src/cli/commands/mod.rs` — ensure `sync_surgical` module is public (may already be done by bd-1i4i)\n\n## TDD Anchor\n\nTests in `src/cli/commands/sync.rs` or a companion test file:\n\n```rust\n#[cfg(test)]\nmod dispatch_tests {\n use super::*;\n\n #[test]\n fn sync_options_with_issues_is_surgical() {\n let options = SyncOptions {\n issues: vec![7],\n ..SyncOptions::default()\n };\n assert!(!options.issues.is_empty() || !options.merge_requests.is_empty());\n }\n\n #[test]\n fn sync_options_without_iids_is_normal() {\n let options = SyncOptions::default();\n assert!(options.issues.is_empty() && options.merge_requests.is_empty());\n }\n\n #[test]\n fn sync_options_with_mrs_is_surgical() {\n let options = SyncOptions {\n merge_requests: vec![10, 20],\n ..SyncOptions::default()\n };\n assert!(!options.issues.is_empty() || !options.merge_requests.is_empty());\n }\n\n #[tokio::test]\n async fn dispatch_routes_to_surgical_when_issues_present() {\n // Integration-level test: verify run_sync with IIDs calls surgical path.\n // This test uses wiremock to mock the surgical path's GitLab calls.\n // The key assertion: when options.issues is non-empty, the function\n // does NOT attempt the normal ingest flow (no project cursor queries).\n let server = wiremock::MockServer::start().await;\n wiremock::Mock::given(wiremock::matchers::method(\"GET\"))\n .and(wiremock::matchers::path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(wiremock::ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([{\n \"id\": 100, \"iid\": 7, \"project_id\": 1, \"title\": \"Test\",\n \"state\": \"opened\", \"created_at\": \"2026-01-01T00:00:00Z\",\n \"updated_at\": \"2026-02-17T00:00:00Z\",\n \"author\": {\"id\": 1, \"username\": \"dev\", \"name\": \"Dev\"},\n \"web_url\": \"https://gitlab.example.com/group/project/-/issues/7\"\n }])))\n .mount(&server).await;\n\n let mut config = Config::default();\n config.gitlab.url = server.uri();\n config.gitlab.token = \"test-token\".to_string();\n let options = SyncOptions {\n issues: vec![7],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n let result = run_sync(&config, options, Some(\"dispatch-test\"), &signal).await;\n\n // Should succeed via surgical path (or at least not panic from normal path)\n assert!(result.is_ok());\n let r = result.unwrap();\n assert_eq!(r.surgical_mode, Some(true));\n }\n\n #[test]\n fn robot_docs_includes_surgical_sync() {\n // Verify the robot-docs string contains surgical sync documentation\n // This tests the static text, not runtime behavior\n let docs = include_str!(\"../../../src/main.rs\");\n // The robot-docs handler should mention surgical sync\n // (Actual assertion depends on how robot-docs are generated)\n }\n}\n```\n\n## Edge Cases\n\n- **Dry-run + surgical**: `handle_sync_cmd` currently short-circuits dry-run before SyncRunRecorder setup (line 2149). Surgical dry-run should also short-circuit, but preflight-only is the surgical equivalent. Clarify: `--dry-run --issue 7` should be treated as `--preflight-only --issue 7`.\n- **Normal sync recorder vs surgical recorder**: `handle_sync_cmd` creates a `SyncRunRecorder` for normal sync (line 2159). When dispatching to surgical, skip this since `run_sync_surgical` creates its own. Use the `options.issues.is_empty() && options.merge_requests.is_empty()` check to decide.\n- **Robot-docs backward compatibility**: New fields are additive. Existing robot-docs consumers that ignore unknown fields are unaffected.\n- **No project specified with IIDs**: If `--issue 7` is passed without `-p project`, the dispatch should fail with a clear usage error (validation in bd-1lja).\n\n## Dependency Context\n\n- **Depends on (upstream)**: bd-1i4i (the `run_sync_surgical` function to call), bd-1lja (SyncOptions extensions with `issues`, `merge_requests`, `project`, `preflight_only` fields), bd-wcja (SyncResult surgical fields for assertion)\n- **No downstream dependents** — this is the final wiring bead for the main code path.\n- Must NOT modify the normal sync pipeline behavior. The dispatch is a pure conditional branch at function entry.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:18:10.648172Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:03:44.531713Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"]} {"id":"bd-3bo","title":"[CP1] gi count issues/discussions/notes commands","description":"Count entities in the database.\n\nCommands:\n- gi count issues → 'Issues: N'\n- gi count discussions --type=issue → 'Issue Discussions: N'\n- gi count notes --type=issue → 'Issue Notes: N (excluding M system)'\n\nFiles: src/cli/commands/count.ts\nDone when: Counts match expected values from GitLab","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T15:20:16.190875Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.156293Z","closed_at":"2026-01-25T15:21:35.156293Z","deleted_at":"2026-01-25T15:21:35.156290Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-3bpk","title":"NOTE-0A: Upsert/sweep for issue discussion notes","description":"## Background\nIssue discussion note ingestion uses a delete/reinsert pattern (DELETE FROM notes WHERE discussion_id = ? at line 132-135 of src/ingestion/discussions.rs then re-insert). This makes notes.id unstable across syncs. MR discussion notes already use upsert (ON CONFLICT(gitlab_id) DO UPDATE at line 470-536 of src/ingestion/mr_discussions.rs) producing stable IDs. Phase 2 depends on stable notes.id as source_id for note documents.\n\n## Approach\nRefactor src/ingestion/discussions.rs to match the MR pattern in src/ingestion/mr_discussions.rs:\n\n1. Create shared NoteUpsertOutcome struct (in src/ingestion/discussions.rs, also used by mr_discussions.rs):\n pub struct NoteUpsertOutcome { pub local_note_id: i64, pub changed_semantics: bool }\n\n2. Replace insert_note() (line 201-233) with upsert_note_for_issue(). Current signature is:\n fn insert_note(conn: &Connection, discussion_id: i64, note: &NormalizedNote, payload_id: Option) -> Result<()>\n New signature:\n fn upsert_note_for_issue(conn: &Connection, discussion_id: i64, note: &NormalizedNote, last_seen_at: i64, payload_id: Option) -> Result\n\n Use ON CONFLICT(gitlab_id) DO UPDATE SET body, note_type, updated_at, last_seen_at, resolvable, resolved, resolved_by, resolved_at, position_old_path, position_new_path, position_old_line, position_new_line, position_type, position_line_range_start, position_line_range_end, position_base_sha, position_start_sha, position_head_sha\n\n IMPORTANT: The current issue insert_note() only populates: gitlab_id, discussion_id, project_id, note_type, is_system, author_username, body, created_at, updated_at, last_seen_at, position (integer array order), resolvable, resolved, resolved_by, resolved_at, raw_payload_id. It does NOT populate the decomposed position columns (position_new_path, etc.). The MR upsert_note() at line 470 DOES populate all decomposed position columns. Your upsert must include ALL columns from the MR pattern. The NormalizedNote struct (from src/gitlab/transformers.rs) has all position fields.\n\n3. Change detection via pre-read: SELECT existing note before upsert, compare semantic fields (body, note_type, resolved, resolved_by, positions). Exclude updated_at/last_seen_at from semantic comparison. Use IS NOT for NULL-safe comparison.\n\n4. Add sweep_stale_issue_notes(conn, discussion_id, last_seen_at) — DELETE FROM notes WHERE discussion_id = ? AND last_seen_at < ?\n\n5. Replace the delete-reinsert loop (lines 132-139) with:\n for note in notes { let outcome = upsert_note_for_issue(&tx, local_discussion_id, ¬e, last_seen_at, None)?; }\n sweep_stale_issue_notes(&tx, local_discussion_id, last_seen_at)?;\n\n6. Update upsert_note() in mr_discussions.rs (line 470) to return NoteUpsertOutcome with same semantic change detection. Current signature returns Result<()>.\n\nReference files:\n- src/ingestion/mr_discussions.rs: upsert_note() line 470, sweep_stale_notes() line 551\n- src/ingestion/discussions.rs: insert_note() line 201, delete pattern line 132-135\n- src/gitlab/transformers.rs: NormalizedNote struct definition\n\n## Files\n- MODIFY: src/ingestion/discussions.rs (refactor insert_note -> upsert + sweep, lines 132-233)\n- MODIFY: src/ingestion/mr_discussions.rs (return NoteUpsertOutcome from upsert_note at line 470)\n\n## TDD Anchor\nRED: test_issue_note_upsert_stable_id — insert 2 notes, record IDs, re-sync same gitlab_ids, assert IDs unchanged.\nGREEN: Implement upsert_note_for_issue with ON CONFLICT.\nVERIFY: cargo test upsert_stable_id -- --nocapture\nTests: test_issue_note_upsert_detects_body_change, test_issue_note_upsert_unchanged_returns_false, test_issue_note_upsert_updated_at_only_does_not_mark_semantic_change, test_issue_note_sweep_removes_stale, test_issue_note_upsert_returns_local_id\n\n## Acceptance Criteria\n- [ ] upsert_note_for_issue() uses ON CONFLICT(gitlab_id) DO UPDATE\n- [ ] Local note IDs stable across re-syncs of identical data\n- [ ] changed_semantics = true only for body/note_type/resolved/position changes\n- [ ] changed_semantics = false for updated_at-only changes\n- [ ] sweep removes notes with stale last_seen_at\n- [ ] MR upsert_note() returns NoteUpsertOutcome\n- [ ] Issue upsert populates ALL position columns (matching MR pattern)\n- [ ] All 6 tests pass, clippy clean\n\n## Edge Cases\n- NULL body: IS NOT comparison handles NULLs correctly\n- UNIQUE(gitlab_id) already exists on notes table (migration 002)\n- last_seen_at prevents stale-sweep of notes currently being ingested\n- Issue notes currently don't populate position_new_path etc. — the new upsert must extract these from NormalizedNote (check that the transformer populates them for issue DiffNotes)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:59:14.783336Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:24.151831Z","closed_at":"2026-02-12T18:13:24.151781Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"],"dependencies":[{"issue_id":"bd-3bpk","depends_on_id":"bd-18bf","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3bpk","depends_on_id":"bd-2b28","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3bpk","depends_on_id":"bd-2ezb","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3bpk","depends_on_id":"bd-jbfw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3cjp","title":"NOTE-2I: Batch parent metadata cache for note regeneration","description":"## Background\nextract_note_document() (from NOTE-2C) fetches parent entity metadata per note via SQL queries. During initial backfill of ~8K notes, this creates N+1 amplification — 50 notes on same MR = 50 identical parent lookups. This is a performance optimization for batch regeneration only.\n\n## Approach\n1. Add ParentMetadataCache struct in src/documents/extractor.rs:\n pub struct ParentMetadataCache {\n cache: HashMap<(String, i64), ParentMetadata>,\n }\n Key: (noteable_type: String, parent_local_id: i64)\n ParentMetadata struct: { iid: i64, title: String, web_url: String, labels: Vec, project_path: String }\n\n Methods:\n - pub fn new() -> Self\n - pub fn get_or_fetch(&mut self, conn: &Connection, noteable_type: &str, parent_id: i64) -> Result>\n get_or_fetch uses HashMap entry API: on miss, fetches from DB (same queries as extract_note_document), caches, returns ref.\n\n2. Add pub fn extract_note_document_cached(conn: &Connection, note_id: i64, cache: &mut ParentMetadataCache) -> Result>:\n Same logic as extract_note_document but calls cache.get_or_fetch() instead of inline parent queries. The uncached version remains for single-note use.\n\n3. Update batch regeneration loop in src/documents/regenerator.rs. The main regeneration loop is in regenerate_dirty_documents() (top of file, ~line 20). It processes dirty entries one at a time via regenerate_one() (line 86). For batch cache to work:\n - Create ParentMetadataCache before the loop\n - In the SourceType::Note arm of regenerate_one, pass the cache through\n - This requires either making regenerate_one() take an optional cache parameter, or restructuring to handle Note specially in the loop body.\n\n Cleanest approach: Add cache: &mut Option parameter to regenerate_one(). Initialize as Some(ParentMetadataCache::new()) before the loop. Only SourceType::Note uses it. Other types ignore it.\n\n Cache is created fresh per regenerate_dirty_documents() call — no cross-invocation persistence.\n\n## Files\n- MODIFY: src/documents/extractor.rs (add ParentMetadataCache struct + extract_note_document_cached)\n- MODIFY: src/documents/regenerator.rs (add cache parameter to regenerate_one, use in batch loop)\n- MODIFY: src/documents/mod.rs (export ParentMetadataCache if needed externally)\n\n## TDD Anchor\nRED: test_note_regeneration_batch_uses_cache — insert project, issue, 10 notes on same issue, mark all dirty, regenerate all, assert all 10 documents created correctly.\nGREEN: Implement ParentMetadataCache and extract_note_document_cached.\nVERIFY: cargo test note_regeneration_batch -- --nocapture\nTests: test_note_regeneration_cache_consistent_with_direct_extraction (cached output == uncached output), test_note_regeneration_cache_invalidates_across_parents (notes from different parents get correct metadata)\n\n## Acceptance Criteria\n- [ ] ParentMetadataCache reduces DB queries during batch regeneration (10 notes on 1 parent = 1 parent fetch, not 10)\n- [ ] Cached extraction produces identical DocumentData output to uncached\n- [ ] Cache keyed per (noteable_type, parent_id) — no cross-parent leakage\n- [ ] Cache scoped to single regenerate_dirty_documents call — no persistence or invalidation complexity\n- [ ] All 3 tests pass\n\n## Dependency Context\n- Depends on NOTE-2C (bd-18yh): extract_note_document function must exist to create the cached variant\n\n## Edge Cases\n- Parent deleted between cache creation and lookup: get_or_fetch returns None, extract_note_document_cached returns None (same as uncached)\n- Very large batch (10K+ notes): cache grows but is bounded by number of unique parents (typically <100 issues/MRs)\n- Cache miss for orphaned discussion: cached None result prevents repeated failed lookups","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T17:03:00.515490Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.870738Z","closed_at":"2026-02-12T18:13:15.870693Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"]} {"id":"bd-3ddw","title":"Create lore-tui crate scaffold","description":"## Background\nThe TUI is implemented as a separate binary crate (crates/lore-tui/) that uses nightly Rust for FrankenTUI. It is EXCLUDED from the root workspace to keep nightly-only deps isolated. The lore CLI spawns lore-tui at runtime via binary delegation (PATH lookup) — zero compile-time dependency from lore to lore-tui. lore-tui depends on lore as a library (src/lib.rs exists and exports all modules).\n\nFrankenTUI is published on crates.io as ftui (0.1.1), ftui-core, ftui-runtime, ftui-render, ftui-style. Use crates.io versions. Local clone exists at ~/projects/FrankenTUI/ for reference.\n\n## Approach\nCreate the crate directory structure:\n- crates/lore-tui/Cargo.toml with dependencies:\n - ftui = \"0.1.1\" (crates.io) and related ftui-* crates\n - lore = { path = \"../..\" } (library dependency for Config, db, ingestion, etc.)\n - clap, anyhow, chrono, dirs, rusqlite (bundled), crossterm\n- crates/lore-tui/rust-toolchain.toml pinning nightly-2026-02-08\n- crates/lore-tui/src/main.rs — binary entry point with TuiCli struct (clap Parser) supporting --config, --sync, --fresh, --render-mode, --ascii, --no-alt-screen\n- crates/lore-tui/src/lib.rs — public API: launch_tui(), launch_sync_tui(), LaunchOptions struct, module declarations\n- Root Cargo.toml: verify lore-tui is NOT in [workspace] members\n\n## Acceptance Criteria\n- [ ] crates/lore-tui/Cargo.toml exists with ftui (crates.io) and lore (path dep) dependencies\n- [ ] crates/lore-tui/rust-toolchain.toml pins nightly-2026-02-08\n- [ ] crates/lore-tui/src/main.rs compiles with clap CLI args\n- [ ] crates/lore-tui/src/lib.rs declares all module stubs and exports LaunchOptions, launch_tui, launch_sync_tui\n- [ ] cargo +stable check --workspace --all-targets passes (lore-tui excluded)\n- [ ] cargo +nightly check --manifest-path crates/lore-tui/Cargo.toml --all-targets passes\n- [ ] Root Cargo.toml does NOT include lore-tui in workspace members\n\n## Files\n- CREATE: crates/lore-tui/Cargo.toml\n- CREATE: crates/lore-tui/rust-toolchain.toml\n- CREATE: crates/lore-tui/src/main.rs\n- CREATE: crates/lore-tui/src/lib.rs\n- VERIFY: Cargo.toml (root — confirm lore-tui NOT in members)\n\n## TDD Anchor\nRED: Write a shell test that runs cargo +nightly check --manifest-path crates/lore-tui/Cargo.toml and asserts exit 0.\nGREEN: Create the full crate scaffold with all deps.\nVERIFY: cargo +stable check --workspace --all-targets && cargo +nightly check --manifest-path crates/lore-tui/Cargo.toml\n\n## Edge Cases\n- ftui crates may require specific nightly features — pin exact nightly date\n- Path dependency to lore means lore-tui sees lore's edition 2024 — verify compat\n- rusqlite bundled feature pulls in cc build — may need nightly-compatible cc version\n- If ftui 0.1.1 has breaking changes vs PRD assumptions, check ~/projects/FrankenTUI/ for latest API\n\n## Dependency Context\nRoot task — no dependencies. All other Phase 0 tasks depend on this scaffold existing.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:53:10.859837Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:43:49.635086Z","closed_at":"2026-02-12T19:43:49.635040Z","close_reason":"Scaffold created and compiles: Cargo.toml, rust-toolchain.toml, main.rs, lib.rs all passing cargo check + clippy + fmt","compaction_level":0,"original_size":0,"labels":["TUI"]} {"id":"bd-3dum","title":"Orchestrator: status enrichment phase with transactional writes","description":"## Background\nThe orchestrator controls the sync pipeline. Status enrichment is a new Phase 1.5 that runs after issue ingestion but before discussion sync. It must be non-fatal — errors skip enrichment but don't crash the sync.\n\n## Approach\nAdd enrichment phase to ingest_project_issues_with_progress. Use client.graphql_client() factory. Look up project path from DB via .optional()? for non-fatal failure. Transactional writes via enrich_issue_statuses_txn() with two phases: clear stale, then apply new.\n\n## Files\n- src/ingestion/orchestrator.rs (enrichment phase + txn helper + IngestProjectResult fields + ProgressEvent variants)\n- src/cli/commands/ingest.rs (add match arms for new ProgressEvent variants)\n\n## Implementation\n\nIngestProjectResult new fields:\n statuses_enriched: usize, statuses_cleared: usize, statuses_seen: usize,\n statuses_without_widget: usize, partial_error_count: usize,\n first_partial_error: Option, status_enrichment_error: Option,\n status_enrichment_mode: String, status_unsupported_reason: Option\n Default: all 0/None/\"\" as appropriate\n\nProgressEvent new variants:\n StatusEnrichmentComplete { enriched: usize, cleared: usize }\n StatusEnrichmentSkipped\n\nPhase 1.5 logic (after ingest_issues, before discussion sync):\n 1. Check config.sync.fetch_work_item_status && !signal.is_cancelled()\n 2. If false: set mode=\"skipped\", emit StatusEnrichmentSkipped\n 3. Look up project path: conn.query_row(\"SELECT path_with_namespace FROM projects WHERE id = ?1\", [project_id], |r| r.get(0)).optional()?\n 4. If None: warn, set status_enrichment_error=\"project_path_missing\", emit StatusEnrichmentComplete{0,0}\n 5. Create graphql_client via client.graphql_client()\n 6. Call fetch_issue_statuses(&graphql_client, &project_path).await\n 7. On Ok: map unsupported_reason to mode/reason, call enrich_issue_statuses_txn(), set counters\n 8. On Err: warn, set status_enrichment_error, mode=\"fetched\"\n 9. Emit StatusEnrichmentComplete\n\nenrich_issue_statuses_txn(conn, project_id, statuses, all_fetched_iids, now_ms) -> Result<(usize, usize)>:\n Uses conn.unchecked_transaction() (conn is &Connection not &mut)\n Phase 1 (clear): UPDATE issues SET status_*=NULL, status_synced_at=now_ms WHERE project_id=? AND iid=? AND status_name IS NOT NULL — for IIDs in all_fetched_iids but NOT in statuses\n Phase 2 (apply): UPDATE issues SET status_name=?, status_category=?, status_color=?, status_icon_name=?, status_synced_at=now_ms WHERE project_id=? AND iid=?\n tx.commit(), return (enriched, cleared)\n\nIn src/cli/commands/ingest.rs progress callback, add arms:\n ProgressEvent::StatusEnrichmentComplete { enriched, cleared } => { ... }\n ProgressEvent::StatusEnrichmentSkipped => { ... }\n\n## Acceptance Criteria\n- [ ] Enrichment runs after ingest_issues, before discussion sync\n- [ ] Gated by config.sync.fetch_work_item_status\n- [ ] Project path missing -> skipped with error=\"project_path_missing\", sync continues\n- [ ] enrich_issue_statuses_txn correctly UPDATEs status columns + status_synced_at\n- [ ] Stale status cleared: issue in all_fetched_iids but not statuses -> NULL + synced_at set\n- [ ] Transaction rollback on failure: no partial updates\n- [ ] Idempotent: running twice with same data produces same result\n- [ ] GraphQL error: logged, enrichment_error captured, sync continues\n- [ ] ingest.rs compiles with new ProgressEvent arms\n- [ ] cargo check --all-targets passes\n\n## TDD Loop\nRED: test_enrich_issue_statuses_txn, test_enrich_skips_unknown_iids, test_enrich_clears_removed_status, test_enrich_transaction_rolls_back_on_failure, test_enrich_idempotent_across_two_runs, test_enrich_sets_synced_at_on_clear, test_enrichment_error_captured_in_result, test_project_path_missing_skips_enrichment\n Tests use in-memory DB with migration 021 applied\nGREEN: Implement enrichment phase + txn helper + result fields + progress arms\nVERIFY: cargo test enrich && cargo test orchestrator\n\n## Edge Cases\n- unchecked_transaction() needed because conn is &Connection not &mut Connection\n- .optional()? requires use rusqlite::OptionalExtension\n- status_synced_at is set on BOTH clear and apply operations (not NULL on clear)\n- Clear SQL has WHERE status_name IS NOT NULL to avoid counting already-cleared rows\n- Progress callback match must be updated in SAME batch as enum change (compile error otherwise)\n- status_enrichment_mode must be set in ALL code paths (fetched/unsupported/skipped)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:42:11.254917Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.419310Z","closed_at":"2026-02-11T07:21:33.419268Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3dum","depends_on_id":"bd-1gvg","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3dum","depends_on_id":"bd-2jzn","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3dum","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} -{"id":"bd-3ei1","title":"Implement Issue List (state + action + view)","description":"## Background\nThe Issue List is the primary browse interface for issues. It uses keyset pagination (not OFFSET) for deterministic cross-page traversal under concurrent sync writes. A browse snapshot fence preserves stable ordering until explicit refresh.\n\n## Approach\nState (state/issue_list.rs):\n- IssueListState: window (Vec), total_count, selected_index, scroll_offset, next_cursor (Option), prev_cursor (Option), prefetch_in_flight (bool), filter (IssueFilter), filter_input (TextInput), filter_focused (bool), sort_field (SortField), sort_order (SortOrder), snapshot_upper_updated_at (Option), filter_hash (u64), peek_visible (bool), peek_content (Option)\n- IssueCursor: updated_at (i64), iid (i64) — boundary values for keyset pagination\n- IssueFilter: state (Option), author (Option), assignee (Option), label (Option), milestone (Option), status (Option), free_text (Option), project_id (Option)\n- IssueListRow: project_path, iid, title, state, author, assignee, labels, updated_at, status_name, status_icon\n- handle_key(): j/k scroll, J/K page, Enter select, / focus filter, Tab sort, g+g top, G bottom, r refresh, Space toggle Quick Peek\n- scroll_to_top(), apply_filter(), set_sort(), toggle_peek()\n\n**Snapshot fence:** On first load and on explicit refresh (r), store snapshot_upper_updated_at = MAX(updated_at) from result set. Subsequent page fetches add WHERE updated_at <= snapshot_upper_updated_at to prevent rows from shifting as sync inserts new data. Explicit refresh (r) resets the fence.\n\n**filter_hash:** Compute a hash of the current filter state. When filter changes (new hash != old hash), reset cursor to page 1 and clear snapshot fence. This prevents stale pagination after filter changes.\n\n**Prefetch:** When scroll position reaches 80% of current window, trigger background prefetch of next page via TaskSupervisor. Prefetched data appended to window when user scrolls past current page boundary.\n\n**Quick Peek (Space key):**\n- Space toggles a right-side preview pane (40% width) showing the currently selected issue's detail\n- Preview content loads asynchronously via TaskSupervisor\n- Cursor movement (j/k) updates the preview for the newly selected row\n- Esc or Space again closes the peek pane\n- On narrow terminals (<100 cols), peek replaces the list instead of side-by-side\n\nAction (action.rs):\n- fetch_issues(conn, filter, cursor, page_size, clock, snapshot_fence) -> Result: keyset pagination query with WHERE (updated_at, iid) < (cursor.updated_at, cursor.iid) AND updated_at <= snapshot_fence ORDER BY updated_at DESC, iid DESC LIMIT page_size+1 (extra row detects has_next). Uses idx_issues_list_default index.\n- fetch_issue_peek(conn, entity_key) -> Result: loads issue detail for Quick Peek preview\n- IssueListPage: rows, next_cursor, prev_cursor, total_count\n\nView (view/issue_list.rs):\n- render_issue_list(frame, state, area, theme): FilterBar at top, EntityTable below, status bar at bottom\n- When peek_visible: split area horizontally — list (60%) | peek preview (40%)\n- Columns: IID, Title (flex), State, Author, Labels, Updated, Status\n\n## Acceptance Criteria\n- [ ] Keyset pagination fetches pages without OFFSET\n- [ ] Next/prev page navigation preserves deterministic ordering\n- [ ] Browse snapshot fence (snapshot_upper_updated_at) prevents rows from shifting during concurrent sync\n- [ ] Explicit refresh (r) resets snapshot fence and re-queries from first page\n- [ ] filter_hash tracks filter state; filter change resets cursor to page 1\n- [ ] Prefetch triggers at 80% scroll position via TaskSupervisor\n- [ ] Filter bar accepts DSL tokens and triggers re-query via ScreenIntent::RequeryNeeded\n- [ ] j/k scrolls within current page, J/K loads next/prev page\n- [ ] Enter navigates to IssueDetail(EntityKey), Esc returns to list with cursor preserved\n- [ ] Tab cycles sort column, sort indicator shown\n- [ ] Total count displayed in status area\n- [ ] Space toggles Quick Peek right-side preview pane\n- [ ] Quick Peek loads issue detail asynchronously\n- [ ] j/k in peek mode updates preview for newly selected row\n- [ ] Narrow terminal (<100 cols): peek replaces list instead of split view\n\n## Files\n- MODIFY: crates/lore-tui/src/state/issue_list.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_issues, fetch_issue_peek)\n- CREATE: crates/lore-tui/src/view/issue_list.rs\n\n## TDD Anchor\nRED: Write test_keyset_pagination in action.rs that inserts 30 issues, fetches page 1 (size 10), then fetches page 2 using returned cursor, asserts no overlap between pages.\nGREEN: Implement keyset pagination query.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_keyset_pagination\n\nAdditional tests:\n- test_snapshot_fence_excludes_newer_rows: insert row with updated_at > fence, assert not in results\n- test_filter_change_resets_cursor: change filter, verify cursor reset to None\n- test_prefetch_triggered_at_80pct: scroll to 80% of window, verify prefetch_in_flight set\n\n## Edge Cases\n- Multi-project datasets: cursor must include project_id scope from global ScopeContext\n- Issues with identical updated_at: keyset tiebreaker on iid ensures deterministic ordering\n- Empty result set: show \"No issues match your filter\" message, not empty table\n- Filter changes must reset cursor to first page (not continue from mid-pagination)\n- Quick Peek on empty list: no-op (don't show empty pane)\n- Rapid j/k with peek open: debounce peek loads to avoid flooding TaskSupervisor\n\n## Dependency Context\nUses EntityTable and FilterBar from \"Implement entity table + filter bar widgets\" (bd-18qs).\nUses AppState, IssueListState, ScreenIntent from \"Implement AppState composition\" (bd-1v9m).\nUses TaskSupervisor for load management and prefetch from \"Implement TaskSupervisor\" (bd-3le2).\nUses DbManager from \"Implement DbManager\" (bd-2kop).\nRequires idx_issues_list_default index from \"Add required TUI indexes\" (bd-3pm2).","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T16:58:31.401233Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:28.173113Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3ei1","depends_on_id":"bd-18qs","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3ei1","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3ei1","depends_on_id":"bd-35g5","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3ei1","depends_on_id":"bd-3pm2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-3ei1","title":"Implement Issue List (state + action + view)","description":"## Background\nThe Issue List is the primary browse interface for issues. It uses keyset pagination (not OFFSET) for deterministic cross-page traversal under concurrent sync writes. A browse snapshot fence preserves stable ordering until explicit refresh.\n\n## Approach\nState (state/issue_list.rs):\n- IssueListState: window (Vec), total_count, selected_index, scroll_offset, next_cursor (Option), prev_cursor (Option), prefetch_in_flight (bool), filter (IssueFilter), filter_input (TextInput), filter_focused (bool), sort_field (SortField), sort_order (SortOrder), snapshot_upper_updated_at (Option), filter_hash (u64), peek_visible (bool), peek_content (Option)\n- IssueCursor: updated_at (i64), iid (i64) — boundary values for keyset pagination\n- IssueFilter: state (Option), author (Option), assignee (Option), label (Option), milestone (Option), status (Option), free_text (Option), project_id (Option)\n- IssueListRow: project_path, iid, title, state, author, assignee, labels, updated_at, status_name, status_icon\n- handle_key(): j/k scroll, J/K page, Enter select, / focus filter, Tab sort, g+g top, G bottom, r refresh, Space toggle Quick Peek\n- scroll_to_top(), apply_filter(), set_sort(), toggle_peek()\n\n**Snapshot fence:** On first load and on explicit refresh (r), store snapshot_upper_updated_at = MAX(updated_at) from result set. Subsequent page fetches add WHERE updated_at <= snapshot_upper_updated_at to prevent rows from shifting as sync inserts new data. Explicit refresh (r) resets the fence.\n\n**filter_hash:** Compute a hash of the current filter state. When filter changes (new hash != old hash), reset cursor to page 1 and clear snapshot fence. This prevents stale pagination after filter changes.\n\n**Prefetch:** When scroll position reaches 80% of current window, trigger background prefetch of next page via TaskSupervisor. Prefetched data appended to window when user scrolls past current page boundary.\n\n**Quick Peek (Space key):**\n- Space toggles a right-side preview pane (40% width) showing the currently selected issue's detail\n- Preview content loads asynchronously via TaskSupervisor\n- Cursor movement (j/k) updates the preview for the newly selected row\n- Esc or Space again closes the peek pane\n- On narrow terminals (<100 cols), peek replaces the list instead of side-by-side\n\nAction (action.rs):\n- fetch_issues(conn, filter, cursor, page_size, clock, snapshot_fence) -> Result: keyset pagination query with WHERE (updated_at, iid) < (cursor.updated_at, cursor.iid) AND updated_at <= snapshot_fence ORDER BY updated_at DESC, iid DESC LIMIT page_size+1 (extra row detects has_next). Uses idx_issues_list_default index.\n- fetch_issue_peek(conn, entity_key) -> Result: loads issue detail for Quick Peek preview\n- IssueListPage: rows, next_cursor, prev_cursor, total_count\n\nView (view/issue_list.rs):\n- render_issue_list(frame, state, area, theme): FilterBar at top, EntityTable below, status bar at bottom\n- When peek_visible: split area horizontally — list (60%) | peek preview (40%)\n- Columns: IID, Title (flex), State, Author, Labels, Updated, Status\n\n## Acceptance Criteria\n- [ ] Keyset pagination fetches pages without OFFSET\n- [ ] Next/prev page navigation preserves deterministic ordering\n- [ ] Browse snapshot fence (snapshot_upper_updated_at) prevents rows from shifting during concurrent sync\n- [ ] Explicit refresh (r) resets snapshot fence and re-queries from first page\n- [ ] filter_hash tracks filter state; filter change resets cursor to page 1\n- [ ] Prefetch triggers at 80% scroll position via TaskSupervisor\n- [ ] Filter bar accepts DSL tokens and triggers re-query via ScreenIntent::RequeryNeeded\n- [ ] j/k scrolls within current page, J/K loads next/prev page\n- [ ] Enter navigates to IssueDetail(EntityKey), Esc returns to list with cursor preserved\n- [ ] Tab cycles sort column, sort indicator shown\n- [ ] Total count displayed in status area\n- [ ] Space toggles Quick Peek right-side preview pane\n- [ ] Quick Peek loads issue detail asynchronously\n- [ ] j/k in peek mode updates preview for newly selected row\n- [ ] Narrow terminal (<100 cols): peek replaces list instead of split view\n\n## Files\n- MODIFY: crates/lore-tui/src/state/issue_list.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_issues, fetch_issue_peek)\n- CREATE: crates/lore-tui/src/view/issue_list.rs\n\n## TDD Anchor\nRED: Write test_keyset_pagination in action.rs that inserts 30 issues, fetches page 1 (size 10), then fetches page 2 using returned cursor, asserts no overlap between pages.\nGREEN: Implement keyset pagination query.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_keyset_pagination\n\nAdditional tests:\n- test_snapshot_fence_excludes_newer_rows: insert row with updated_at > fence, assert not in results\n- test_filter_change_resets_cursor: change filter, verify cursor reset to None\n- test_prefetch_triggered_at_80pct: scroll to 80% of window, verify prefetch_in_flight set\n\n## Edge Cases\n- Multi-project datasets: cursor must include project_id scope from global ScopeContext\n- Issues with identical updated_at: keyset tiebreaker on iid ensures deterministic ordering\n- Empty result set: show \"No issues match your filter\" message, not empty table\n- Filter changes must reset cursor to first page (not continue from mid-pagination)\n- Quick Peek on empty list: no-op (don't show empty pane)\n- Rapid j/k with peek open: debounce peek loads to avoid flooding TaskSupervisor\n\n## Dependency Context\nUses EntityTable and FilterBar from \"Implement entity table + filter bar widgets\" (bd-18qs).\nUses AppState, IssueListState, ScreenIntent from \"Implement AppState composition\" (bd-1v9m).\nUses TaskSupervisor for load management and prefetch from \"Implement TaskSupervisor\" (bd-3le2).\nUses DbManager from \"Implement DbManager\" (bd-2kop).\nRequires idx_issues_list_default index from \"Add required TUI indexes\" (bd-3pm2).","status":"in_progress","priority":2,"issue_type":"task","created_at":"2026-02-12T16:58:31.401233Z","created_by":"tayloreernisse","updated_at":"2026-02-18T19:31:08.594688Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3ei1","depends_on_id":"bd-18qs","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3ei1","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3ei1","depends_on_id":"bd-35g5","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3ei1","depends_on_id":"bd-3pm2","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3eis","title":"Implement property tests for navigation invariants","description":"## Background\nProperty-based tests verify navigation invariants hold for all possible sequences of push/pop/forward/jump/reset operations. Uses proptest or quickcheck for automated input generation.\n\n## Approach\n- Property: stack depth always >= 1 (Dashboard is always reachable)\n- Property: after push(X), current() == X\n- Property: after push(X) then pop(), current() returns to previous\n- Property: forward_stack cleared after any push (browser semantics)\n- Property: jump_list only contains detail/entity screens\n- Property: reset_to(X) clears all history, current() == X\n- Property: breadcrumbs length == back_stack.len() + 1\n- Arbitrary sequence of operations should never panic\n\n## Acceptance Criteria\n- [ ] All 7 navigation properties hold for 10000 generated test cases\n- [ ] No panic for any sequence of operations\n- [ ] Proptest shrinking finds minimal counterexamples on failure\n\n## Files\n- CREATE: crates/lore-tui/tests/nav_property_tests.rs\n\n## TDD Anchor\nRED: Write proptest that generates random sequences of push/pop/forward/reset, asserts stack depth >= 1 after every operation.\nGREEN: Ensure NavigationStack maintains invariant (pop returns None at root).\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml nav_property\n\n## Dependency Context\nUses NavigationStack from \"Implement NavigationStack\" task.\nUses Screen enum from \"Implement core types\" task.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:04:53.366767Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:38.381515Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3eis","depends_on_id":"bd-1b6k","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3eis","depends_on_id":"bd-3fjk","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3eis","depends_on_id":"bd-nu0d","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3er","title":"OBSERV Epic: Phase 3 - Performance Metrics Collection","description":"StageTiming struct, custom MetricsLayer tracing subscriber layer, span-to-metrics extraction, robot JSON enrichment with meta.stages, human-readable timing summary.\n\nDepends on: Phase 2 (spans must exist to extract timing from)\nUnblocks: Phase 4 (sync history needs Vec to store)\n\nFiles: src/core/metrics.rs (new), src/cli/commands/sync.rs, src/cli/commands/ingest.rs, src/main.rs\n\nAcceptance criteria (PRD Section 6.3):\n- lore --robot sync includes meta.run_id and meta.stages array\n- Each stage has name, elapsed_ms, items_processed\n- Top-level stages have sub_stages arrays\n- Interactive sync prints timing summary table\n- Zero-value fields omitted from JSON","status":"closed","priority":2,"issue_type":"epic","created_at":"2026-02-04T15:53:27.415566Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:32:56.743477Z","closed_at":"2026-02-04T17:32:56.743430Z","close_reason":"All Phase 3 tasks complete: StageTiming struct, MetricsLayer, span field recording, robot JSON enrichment with stages, and human-readable timing summary","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-3er","depends_on_id":"bd-2ni","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3eu","title":"Implement hybrid search with adaptive recall","description":"## Background\nHybrid search is the top-level search orchestrator that combines FTS5 lexical results with sqlite-vec semantic results via RRF ranking. It supports three modes (Lexical, Semantic, Hybrid) and implements adaptive recall (wider initial fetch when filters are applied) and graceful degradation (falls back to FTS when Ollama is unavailable). All modes use RRF for consistent --explain output.\n\n## Approach\nCreate `src/search/hybrid.rs` per PRD Section 5.3.\n\n**Key types:**\n```rust\n#[derive(Debug, Clone, Copy, PartialEq, Eq)]\npub enum SearchMode {\n Hybrid, // Vector + FTS with RRF\n Lexical, // FTS only\n Semantic, // Vector only\n}\n\nimpl SearchMode {\n pub fn from_str(s: &str) -> Option {\n match s.to_lowercase().as_str() {\n \"hybrid\" => Some(Self::Hybrid),\n \"lexical\" | \"fts\" => Some(Self::Lexical),\n \"semantic\" | \"vector\" => Some(Self::Semantic),\n _ => None,\n }\n }\n\n pub fn as_str(&self) -> &'static str {\n match self {\n Self::Hybrid => \"hybrid\",\n Self::Lexical => \"lexical\",\n Self::Semantic => \"semantic\",\n }\n }\n}\n\npub struct HybridResult {\n pub document_id: i64,\n pub score: f64, // Normalized RRF score (0-1)\n pub vector_rank: Option,\n pub fts_rank: Option,\n pub rrf_score: f64, // Raw RRF score\n}\n```\n\n**Core function (ASYNC, PRD-exact signature):**\n```rust\npub async fn search_hybrid(\n conn: &Connection,\n client: Option<&OllamaClient>, // None if Ollama unavailable\n ollama_base_url: Option<&str>, // For actionable error messages\n query: &str,\n mode: SearchMode,\n filters: &SearchFilters,\n fts_mode: FtsQueryMode,\n) -> Result<(Vec, Vec)>\n```\n\n**IMPORTANT — client is `Option<&OllamaClient>`:** This enables graceful degradation. When Ollama is unavailable, the caller passes `None` and hybrid mode falls back to FTS-only with a warning. The `ollama_base_url` is separate so error messages can include it even when client is None.\n\n**Adaptive recall constants (PRD Section 5.3):**\n```rust\nconst BASE_RECALL_MIN: usize = 50;\nconst FILTERED_RECALL_MIN: usize = 200;\nconst RECALL_CAP: usize = 1500;\n```\n\n**Recall formula:**\n```rust\nlet requested = filters.clamp_limit();\nlet top_k = if filters.has_any_filter() {\n (requested * 50).max(FILTERED_RECALL_MIN).min(RECALL_CAP)\n} else {\n (requested * 10).max(BASE_RECALL_MIN).min(RECALL_CAP)\n};\n```\n\n**Mode behavior:**\n- **Lexical:** FTS only -> rank_rrf with empty vector list (single-list RRF)\n- **Semantic:** Vector only -> requires client (error if None) -> rank_rrf with empty FTS list\n- **Hybrid:** Both FTS + vector -> rank_rrf with both lists\n- **Hybrid with client=None:** Graceful degradation to Lexical with warning, NOT error\n\n**Graceful degradation logic:**\n```rust\nSearchMode::Hybrid => {\n let fts_results = search_fts(conn, query, top_k, fts_mode)?;\n let fts_tuples: Vec<_> = fts_results.iter().map(|r| (r.document_id, r.rank)).collect();\n\n match client {\n Some(client) => {\n let query_embedding = client.embed_batch(vec\\![query.to_string()]).await?;\n let embedding = query_embedding.into_iter().next().unwrap();\n let vec_results = search_vector(conn, &embedding, top_k)?;\n let vec_tuples: Vec<_> = vec_results.iter().map(|r| (r.document_id, r.distance)).collect();\n let ranked = rank_rrf(&vec_tuples, &fts_tuples);\n // ... map to HybridResult\n Ok((results, warnings))\n }\n None => {\n warnings.push(\"Ollama unavailable, falling back to lexical search\".into());\n let ranked = rank_rrf(&[], &fts_tuples);\n // ... map to HybridResult\n Ok((results, warnings))\n }\n }\n}\n```\n\n## Acceptance Criteria\n- [ ] Function is `async` (per PRD — Ollama client methods are async)\n- [ ] Signature takes `client: Option<&OllamaClient>` (not required)\n- [ ] Signature takes `ollama_base_url: Option<&str>` for actionable error messages\n- [ ] Returns `(Vec, Vec)` — results + warnings\n- [ ] Lexical mode: FTS-only results ranked via RRF (single list)\n- [ ] Semantic mode: vector-only results ranked via RRF; error if client is None\n- [ ] Hybrid mode: both FTS + vector results merged via RRF\n- [ ] Graceful degradation: client=None in Hybrid falls back to FTS with warning (not error)\n- [ ] Adaptive recall: unfiltered max(50, limit*10), filtered max(200, limit*50), capped 1500\n- [ ] All modes produce consistent --explain output (vector_rank, fts_rank, rrf_score)\n- [ ] SearchMode::from_str accepts aliases: \"fts\" for Lexical, \"vector\" for Semantic\n- [ ] `cargo build` succeeds\n\n## Files\n- `src/search/hybrid.rs` — new file\n- `src/search/mod.rs` — add `pub use hybrid::{search_hybrid, HybridResult, SearchMode};`\n\n## TDD Loop\nRED: Tests (some integration, some unit):\n- `test_lexical_mode` — FTS results only\n- `test_semantic_mode` — vector results only\n- `test_hybrid_mode` — both lists merged\n- `test_graceful_degradation` — None client falls back to FTS with warning in warnings vec\n- `test_adaptive_recall_unfiltered` — recall = max(50, limit*10)\n- `test_adaptive_recall_filtered` — recall = max(200, limit*50)\n- `test_recall_cap` — never exceeds 1500\n- `test_search_mode_from_str` — \"hybrid\", \"lexical\", \"fts\", \"semantic\", \"vector\", invalid\nGREEN: Implement search_hybrid\nVERIFY: `cargo test hybrid`\n\n## Edge Cases\n- Both FTS and vector return zero results: empty output (not error)\n- FTS returns results but vector returns empty: RRF still works (single-list)\n- Very high limit (100) with filters: recall = min(5000, 1500) = 1500\n- Semantic mode with client=None: error (OllamaUnavailable), not degradation\n- Semantic mode with 0% coverage: return LoreError::EmbeddingsNotBuilt","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:50.343002Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:56:16.631748Z","closed_at":"2026-01-30T17:56:16.631682Z","close_reason":"Implemented hybrid search with 3 modes (lexical/semantic/hybrid), graceful degradation when Ollama unavailable, adaptive recall (50-1500), RRF fusion. 6 tests pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3eu","depends_on_id":"bd-1k1","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3eu","depends_on_id":"bd-335","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3eu","depends_on_id":"bd-3ez","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3eu","depends_on_id":"bd-bjo","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -215,6 +226,7 @@ {"id":"bd-3ir","title":"Add database migration 006_merge_requests.sql","description":"## Background\nFoundation for all CP2 MR features. This migration defines the schema that all other MR components depend on. Must complete BEFORE any other CP2 work can proceed.\n\n## Approach\nCreate migration file that adds:\n1. `merge_requests` table with all CP2 fields\n2. `mr_labels`, `mr_assignees`, `mr_reviewers` junction tables\n3. Indexes on discussions for MR queries\n4. DiffNote position columns on notes table\n\n## Files\n- `migrations/006_merge_requests.sql` - New migration file\n- `src/core/db.rs` - Update MIGRATIONS const to include version 6\n\n## Acceptance Criteria\n- [ ] Migration file exists at `migrations/006_merge_requests.sql`\n- [ ] `merge_requests` table has columns: id, gitlab_id, project_id, iid, title, description, state, draft, author_username, source_branch, target_branch, head_sha, references_short, references_full, detailed_merge_status, merge_user_username, created_at, updated_at, merged_at, closed_at, last_seen_at, discussions_synced_for_updated_at, discussions_sync_last_attempt_at, discussions_sync_attempts, discussions_sync_last_error, web_url, raw_payload_id\n- [ ] `mr_labels` junction table exists with (merge_request_id, label_id) PK\n- [ ] `mr_assignees` junction table exists with (merge_request_id, username) PK\n- [ ] `mr_reviewers` junction table exists with (merge_request_id, username) PK\n- [ ] `idx_discussions_mr_id` and `idx_discussions_mr_resolved` indexes exist\n- [ ] `notes` table has new columns: position_type, position_line_range_start, position_line_range_end, position_base_sha, position_start_sha, position_head_sha\n- [ ] `gi doctor` runs without migration errors\n- [ ] `cargo test` passes\n\n## TDD Loop\nRED: Cannot open DB with version 6 schema\nGREEN: Add migration file with full SQL\nVERIFY: `cargo run -- doctor` shows healthy DB\n\n## SQL Reference (from PRD)\n```sql\n-- Merge requests table\nCREATE TABLE merge_requests (\n id INTEGER PRIMARY KEY,\n gitlab_id INTEGER UNIQUE NOT NULL,\n project_id INTEGER NOT NULL REFERENCES projects(id),\n iid INTEGER NOT NULL,\n title TEXT,\n description TEXT,\n state TEXT, -- opened | merged | closed | locked\n draft INTEGER NOT NULL DEFAULT 0, -- SQLite boolean\n author_username TEXT,\n source_branch TEXT,\n target_branch TEXT,\n head_sha TEXT,\n references_short TEXT,\n references_full TEXT,\n detailed_merge_status TEXT,\n merge_user_username TEXT,\n created_at INTEGER, -- ms epoch UTC\n updated_at INTEGER,\n merged_at INTEGER,\n closed_at INTEGER,\n last_seen_at INTEGER NOT NULL,\n discussions_synced_for_updated_at INTEGER,\n discussions_sync_last_attempt_at INTEGER,\n discussions_sync_attempts INTEGER DEFAULT 0,\n discussions_sync_last_error TEXT,\n web_url TEXT,\n raw_payload_id INTEGER REFERENCES raw_payloads(id)\n);\nCREATE INDEX idx_mrs_project_updated ON merge_requests(project_id, updated_at);\nCREATE UNIQUE INDEX uq_mrs_project_iid ON merge_requests(project_id, iid);\n-- ... (see PRD for full index list)\n\n-- Junction tables\nCREATE TABLE mr_labels (\n merge_request_id INTEGER REFERENCES merge_requests(id) ON DELETE CASCADE,\n label_id INTEGER REFERENCES labels(id) ON DELETE CASCADE,\n PRIMARY KEY(merge_request_id, label_id)\n);\n\nCREATE TABLE mr_assignees (\n merge_request_id INTEGER REFERENCES merge_requests(id) ON DELETE CASCADE,\n username TEXT NOT NULL,\n PRIMARY KEY(merge_request_id, username)\n);\n\nCREATE TABLE mr_reviewers (\n merge_request_id INTEGER REFERENCES merge_requests(id) ON DELETE CASCADE,\n username TEXT NOT NULL,\n PRIMARY KEY(merge_request_id, username)\n);\n\n-- DiffNote position columns (ALTER TABLE)\nALTER TABLE notes ADD COLUMN position_type TEXT;\nALTER TABLE notes ADD COLUMN position_line_range_start INTEGER;\nALTER TABLE notes ADD COLUMN position_line_range_end INTEGER;\nALTER TABLE notes ADD COLUMN position_base_sha TEXT;\nALTER TABLE notes ADD COLUMN position_start_sha TEXT;\nALTER TABLE notes ADD COLUMN position_head_sha TEXT;\n\nINSERT INTO schema_version (version, applied_at, description)\nVALUES (6, strftime('%s', 'now') * 1000, 'Merge requests, MR labels, assignees, reviewers');\n```\n\n## Edge Cases\n- SQLite does not support ADD CONSTRAINT - FK defined as nullable in CP1\n- `locked` state is transitional (merge-in-progress) - store as first-class\n- discussions_synced_for_updated_at prevents redundant discussion refetch","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:40.101470Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:06:43.899079Z","closed_at":"2026-01-27T00:06:43.898875Z","close_reason":"Migration 006_merge_requests.sql created and verified. Schema v6 applied successfully with all tables, indexes, and position columns.","compaction_level":0,"original_size":0} {"id":"bd-3ir1","title":"Implement terminal safety module (sanitize + URL policy + redact)","description":"## Background\nGitLab content (issue descriptions, comments, MR descriptions) can contain arbitrary text including ANSI escape sequences, bidirectional text overrides, OSC hyperlinks, and C1 control codes. Displaying unsanitized content in a terminal can hijack cursor position, inject fake UI elements, or cause rendering corruption. This module provides a sanitization layer that strips dangerous sequences while preserving a safe ANSI subset for readability.\n\n## Approach\nCreate `crates/lore-tui/src/safety.rs` with:\n- `sanitize_for_terminal(input: &str) -> String` — the main entry point\n- Strip C1 control codes (0x80-0x9F)\n- Strip OSC sequences (ESC ] ... ST)\n- Strip cursor movement (CSI A/B/C/D/E/F/G/H/J/K)\n- Strip bidi overrides (U+202A-U+202E, U+2066-U+2069)\n- **PRESERVE safe ANSI subset**: SGR sequences for bold (1), italic (3), underline (4), reset (0), and standard foreground/background colors (30-37, 40-47, 90-97, 100-107). These improve readability of formatted GitLab content.\n- `UrlPolicy` enum: `Strip`, `Footnote`, `Passthrough` — controls how OSC 8 hyperlinks are handled\n- `RedactPattern` for optional PII/secret redaction (email, token patterns)\n- All functions are pure (no I/O), fully testable\n\nReference existing terminal safety patterns in ftui-core if available.\n\n## Acceptance Criteria\n- [ ] sanitize_for_terminal strips C1, OSC, cursor movement, bidi overrides\n- [ ] sanitize_for_terminal preserves bold, italic, underline, reset, and standard color SGR sequences\n- [ ] UrlPolicy::Strip removes OSC 8 hyperlinks entirely\n- [ ] UrlPolicy::Footnote converts OSC 8 hyperlinks to numbered footnotes [1] with URL list at end\n- [ ] RedactPattern matches common secret patterns (tokens, emails) and replaces with [REDACTED]\n- [ ] No unsafe code\n- [ ] Unit tests cover each dangerous sequence type AND verify safe sequences are preserved\n- [ ] Fuzz test with 1000 random byte sequences: no panic\n\n## Files\n- CREATE: crates/lore-tui/src/safety.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add pub mod safety)\n\n## TDD Anchor\nRED: Write `test_strips_cursor_movement` that asserts CSI sequences for cursor up/down/left/right are removed from input while bold SGR is preserved.\nGREEN: Implement the sanitizer state machine that categorizes and filters escape sequences.\nVERIFY: cargo test -p lore-tui safety -- --nocapture\n\nAdditional tests:\n- test_strips_c1_control_codes\n- test_strips_bidi_overrides\n- test_strips_osc_sequences\n- test_preserves_bold_italic_underline_reset\n- test_preserves_standard_colors\n- test_url_policy_strip\n- test_url_policy_footnote\n- test_redact_patterns\n- test_fuzz_no_panic\n\n## Edge Cases\n- Malformed/truncated escape sequences (ESC without closing) — must not consume following text\n- Nested SGR sequences (e.g., bold+color combined in single CSI) — preserve entire sequence if all parameters are safe\n- UTF-8 multibyte chars adjacent to escape sequences — must not corrupt char boundaries\n- Empty input returns empty string\n- Input with only safe content passes through unchanged\n\n## Dependency Context\nDepends on bd-3ddw (scaffold) for the crate structure to exist. No other dependencies — this is a pure utility module.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:54:30.165761Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:55:51.154570Z","closed_at":"2026-02-12T19:55:51.154518Z","close_reason":"Implemented safety module: sanitize_for_terminal(), UrlPolicy, RedactPattern. 22 tests passing, clippy clean.","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3ir1","depends_on_id":"bd-3ddw","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3j6","title":"Add transform_mr_discussion and transform_notes_with_diff_position","description":"## Background\nExtends discussion transformer for MR context. MR discussions can contain DiffNotes with file position metadata. This is critical for code review context in CP3 document generation.\n\n## Approach\nAdd two new functions to existing `src/gitlab/transformers/discussion.rs`:\n1. `transform_mr_discussion()` - Transform discussion with MR reference\n2. `transform_notes_with_diff_position()` - Extract DiffNote position metadata\n\nCP1 already has the polymorphic `NormalizedDiscussion` with `NoteableRef` enum - reuse that pattern.\n\n## Files\n- `src/gitlab/transformers/discussion.rs` - Add new functions\n- `tests/diffnote_tests.rs` - DiffNote position extraction tests\n- `tests/mr_discussion_tests.rs` - MR discussion transform tests\n\n## Acceptance Criteria\n- [ ] `transform_mr_discussion()` returns `NormalizedDiscussion` with `merge_request_id: Some(local_mr_id)`\n- [ ] `transform_notes_with_diff_position()` returns `Result, String>`\n- [ ] DiffNote position fields extracted: `position_old_path`, `position_new_path`, `position_old_line`, `position_new_line`\n- [ ] Extended position fields extracted: `position_type`, `position_line_range_start`, `position_line_range_end`\n- [ ] SHA triplet extracted: `position_base_sha`, `position_start_sha`, `position_head_sha`\n- [ ] Strict timestamp parsing - returns `Err` on invalid timestamps (no `unwrap_or(0)`)\n- [ ] `cargo test diffnote` passes\n- [ ] `cargo test mr_discussion` passes\n\n## TDD Loop\nRED: `cargo test diffnote_position` -> test fails\nGREEN: Add position extraction logic\nVERIFY: `cargo test diffnote`\n\n## Function Signatures\n```rust\n/// Transform GitLab discussion for MR context.\n/// Reuses existing transform_discussion logic, just with MR reference.\npub fn transform_mr_discussion(\n gitlab_discussion: &GitLabDiscussion,\n local_project_id: i64,\n local_mr_id: i64,\n) -> NormalizedDiscussion {\n // Use existing transform_discussion with NoteableRef::MergeRequest(local_mr_id)\n transform_discussion(\n gitlab_discussion,\n local_project_id,\n NoteableRef::MergeRequest(local_mr_id),\n )\n}\n\n/// Transform notes with DiffNote position extraction.\n/// Returns Result to enforce strict timestamp parsing.\npub fn transform_notes_with_diff_position(\n gitlab_discussion: &GitLabDiscussion,\n local_project_id: i64,\n) -> Result, String>\n```\n\n## DiffNote Position Extraction\n```rust\n// Extract position metadata if present\nlet (old_path, new_path, old_line, new_line, position_type, lr_start, lr_end, base_sha, start_sha, head_sha) = note\n .position\n .as_ref()\n .map(|pos| (\n pos.old_path.clone(),\n pos.new_path.clone(),\n pos.old_line,\n pos.new_line,\n pos.position_type.clone(), // \"text\" | \"image\" | \"file\"\n pos.line_range.as_ref().map(|r| r.start_line),\n pos.line_range.as_ref().map(|r| r.end_line),\n pos.base_sha.clone(),\n pos.start_sha.clone(),\n pos.head_sha.clone(),\n ))\n .unwrap_or((None, None, None, None, None, None, None, None, None, None));\n```\n\n## Strict Timestamp Parsing\n```rust\n// CRITICAL: Return error on invalid timestamps, never zero\nlet created_at = iso_to_ms(¬e.created_at)\n .ok_or_else(|| format\\!(\n \"Invalid note.created_at for note {}: {}\",\n note.id, note.created_at\n ))?;\n```\n\n## NormalizedNote Fields for DiffNotes\n```rust\nNormalizedNote {\n // ... existing fields ...\n // DiffNote position metadata\n position_old_path: old_path,\n position_new_path: new_path,\n position_old_line: old_line,\n position_new_line: new_line,\n // Extended position\n position_type,\n position_line_range_start: lr_start,\n position_line_range_end: lr_end,\n // SHA triplet\n position_base_sha: base_sha,\n position_start_sha: start_sha,\n position_head_sha: head_sha,\n}\n```\n\n## Edge Cases\n- Notes without position should have all position fields as None\n- Invalid timestamp should fail the entire discussion (no partial results)\n- File renames: `old_path \\!= new_path` indicates a renamed file\n- Multi-line comments: `line_range` present means comment spans lines 45-48","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:41.208380Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:20:13.473091Z","closed_at":"2026-01-27T00:20:13.473031Z","close_reason":"Implemented transform_mr_discussion() and transform_notes_with_diff_position() with full DiffNote position extraction:\n- Extended NormalizedNote with 10 DiffNote position fields (path, line, type, line_range, SHA triplet)\n- Added strict timestamp parsing that returns Err on invalid timestamps\n- Created 13 diffnote_position_tests covering all extraction paths and error cases\n- Created 6 mr_discussion_tests verifying MR reference handling\n- All 161 tests passing","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3j6","depends_on_id":"bd-3ir","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3j6","depends_on_id":"bd-5ta","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} +{"id":"bd-3jqx","title":"Implement async integration tests: cancellation, timeout, embed isolation, payload integrity","description":"## Background\n\nThe surgical sync pipeline involves async operations, cancellation signals, timeouts, scoped embedding, and multi-entity coordination. Unit tests in individual beads cover their own logic, but integration tests are needed to verify the full pipeline under realistic conditions: cancellation at different stages, timeout behavior with continuation, embedding scope isolation (only affected documents get embedded), and payload integrity (project_id mismatches rejected). These tests use wiremock for HTTP mocking and tokio for async runtime.\n\n## Approach\n\nCreate `tests/surgical_integration.rs` as an integration test file (Rust convention: `tests/` directory for integration tests). Six test functions covering the critical behavioral properties of the surgical pipeline:\n\n1. **Cancellation before preflight**: Signal cancelled before any HTTP call. Verify: recorder marked failed, no GitLab requests made, result has zero updates.\n2. **Cancellation during dependent stage**: Signal cancelled after preflight succeeds but during discussion fetch. Verify: partial results recorded, recorder marked failed, entities processed before cancellation have outcomes.\n3. **Per-entity timeout with continuation**: One entity's GitLab endpoint is slow (wiremock delay). Verify: that entity gets `failed` outcome with timeout error, remaining entities continue and succeed.\n4. **Embed scope isolation**: Sync two issues. Verify: only documents generated from those two issues are embedded, not the entire corpus. Assert by checking document IDs passed to embed function.\n5. **Payload project_id mismatch rejection**: Preflight returns an issue with `project_id` different from the resolved project. Verify: that entity gets `failed` outcome with clear error, other entities unaffected.\n6. **Successful full pipeline**: Sync one issue end-to-end through all stages. Verify: SyncResult has correct counts, entity_results has `synced` outcome, documents regenerated, embeddings created.\n\nAll tests use in-memory SQLite (`create_connection(Path::new(\":memory:\"))` + `run_migrations`) and wiremock `MockServer`.\n\n## Acceptance Criteria\n\n1. All 6 tests compile and pass\n2. Tests are isolated (each creates its own DB and mock server)\n3. Cancellation tests verify recorder state (failed status in sync_runs table)\n4. Timeout test uses wiremock delay, not `tokio::time::sleep` on the test side\n5. Embed isolation test verifies document-level scoping, not just function call\n6. Tests run in CI without flakiness (no real network, no real Ollama)\n\n## Files\n\n- `tests/surgical_integration.rs` — all 6 integration tests\n\n## TDD Anchor\n\n```rust\n// tests/surgical_integration.rs\n\nuse lore::cli::commands::sync::{SyncOptions, SyncResult};\nuse lore::core::db::{create_connection, run_migrations};\nuse lore::core::shutdown::ShutdownSignal;\nuse lore::Config;\nuse std::path::Path;\nuse std::time::Duration;\nuse wiremock::{Mock, MockServer, ResponseTemplate};\nuse wiremock::matchers::{method, path_regex};\n\nfn test_config(mock_url: &str) -> Config {\n let mut config = Config::default();\n config.gitlab.url = mock_url.to_string();\n config.gitlab.token = \"test-token\".to_string();\n config\n}\n\nfn setup_db() -> rusqlite::Connection {\n let conn = create_connection(Path::new(\":memory:\")).unwrap();\n run_migrations(&conn).unwrap();\n conn.execute(\n \"INSERT INTO projects (gitlab_project_id, path_with_namespace, web_url)\n VALUES (1, 'group/project', 'https://gitlab.example.com/group/project')\",\n [],\n ).unwrap();\n conn\n}\n\nfn mock_issue_json(iid: u64) -> serde_json::Value {\n serde_json::json!({\n \"id\": 100 + iid, \"iid\": iid, \"project_id\": 1, \"title\": format!(\"Issue {}\", iid),\n \"state\": \"opened\", \"created_at\": \"2026-01-01T00:00:00Z\",\n \"updated_at\": \"2026-02-17T00:00:00Z\",\n \"author\": {\"id\": 1, \"username\": \"dev\", \"name\": \"Dev\"},\n \"web_url\": format!(\"https://gitlab.example.com/group/project/-/issues/{}\", iid)\n })\n}\n\n#[tokio::test]\nasync fn cancellation_before_preflight() {\n let server = MockServer::start().await;\n // No mocks mounted — if any request is made, wiremock will return 404\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issues: vec![7],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n signal.cancel(); // Cancel before anything starts\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"cancel-pre\"), &signal,\n ).await.unwrap();\n\n assert_eq!(result.issues_updated, 0);\n assert_eq!(result.mrs_updated, 0);\n // Verify no HTTP requests were made\n assert_eq!(server.received_requests().await.unwrap().len(), 0);\n}\n\n#[tokio::test]\nasync fn cancellation_during_dependent_stage() {\n let server = MockServer::start().await;\n // Mock issue fetch (preflight succeeds)\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([mock_issue_json(7)])))\n .mount(&server).await;\n // Mock discussion fetch with delay (gives time to cancel)\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues/7/discussions\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([]))\n .set_body_delay(Duration::from_secs(2)))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issues: vec![7],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n // Cancel after a short delay (after preflight, during dependents)\n let signal_clone = signal.clone();\n tokio::spawn(async move {\n tokio::time::sleep(Duration::from_millis(200)).await;\n signal_clone.cancel();\n });\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"cancel-dep\"), &signal,\n ).await.unwrap();\n\n // Preflight should have run, but ingest may be partial\n assert!(result.surgical_mode == Some(true));\n}\n\n#[tokio::test]\nasync fn per_entity_timeout_with_continuation() {\n let server = MockServer::start().await;\n // Issue 7: slow response (simulates timeout)\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\\?.*iids\\[\\]=7\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([mock_issue_json(7)]))\n .set_body_delay(Duration::from_secs(30)))\n .mount(&server).await;\n // Issue 42: fast response\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\\?.*iids\\[\\]=42\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([mock_issue_json(42)])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issues: vec![7, 42],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n // With a per-entity timeout, issue 7 should fail, issue 42 should succeed\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"timeout-test\"), &signal,\n ).await.unwrap();\n\n let entities = result.entity_results.as_ref().unwrap();\n // One should be failed (timeout), one should be synced\n let failed = entities.iter().filter(|e| e.outcome == \"failed\").count();\n let synced = entities.iter().filter(|e| e.outcome == \"synced\").count();\n assert!(failed >= 1 || synced >= 1, \"Expected mixed outcomes\");\n}\n\n#[tokio::test]\nasync fn embed_scope_isolation() {\n let server = MockServer::start().await;\n // Mock two issues\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([\n mock_issue_json(7), mock_issue_json(42)\n ])))\n .mount(&server).await;\n // Mock empty discussions for both\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues/\\d+/discussions\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issues: vec![7, 42],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n no_embed: false,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"embed-iso\"), &signal,\n ).await.unwrap();\n\n // Embedding should only have processed documents from issues 7 and 42\n // Not the full corpus. Verify via document counts.\n assert!(result.documents_embedded <= 2,\n \"Expected at most 2 documents embedded (one per issue), got {}\",\n result.documents_embedded);\n}\n\n#[tokio::test]\nasync fn payload_project_id_mismatch_rejection() {\n let server = MockServer::start().await;\n // Return issue with project_id=999 (doesn't match resolved project_id=1)\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([{\n \"id\": 200, \"iid\": 7, \"project_id\": 999, \"title\": \"Wrong Project\",\n \"state\": \"opened\", \"created_at\": \"2026-01-01T00:00:00Z\",\n \"updated_at\": \"2026-02-17T00:00:00Z\",\n \"author\": {\"id\": 1, \"username\": \"dev\", \"name\": \"Dev\"},\n \"web_url\": \"https://gitlab.example.com/other/project/-/issues/7\"\n }])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issues: vec![7],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"mismatch\"), &signal,\n ).await.unwrap();\n\n let entities = result.entity_results.as_ref().unwrap();\n assert_eq!(entities.len(), 1);\n assert_eq!(entities[0].outcome, \"failed\");\n assert!(entities[0].error.as_ref().unwrap().contains(\"project_id\"));\n}\n\n#[tokio::test]\nasync fn successful_full_pipeline() {\n let server = MockServer::start().await;\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([mock_issue_json(7)])))\n .mount(&server).await;\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues/7/discussions\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([])))\n .mount(&server).await;\n // Mock any resource event endpoints\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues/7/resource_\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issues: vec![7],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n no_embed: true, // Skip embed to avoid Ollama dependency\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"full-pipe\"), &signal,\n ).await.unwrap();\n\n assert_eq!(result.surgical_mode, Some(true));\n assert_eq!(result.surgical_iids.as_ref().unwrap().issues, vec![7]);\n assert_eq!(result.preflight_only, Some(false));\n\n let entities = result.entity_results.as_ref().unwrap();\n assert_eq!(entities.len(), 1);\n assert_eq!(entities[0].entity_type, \"issue\");\n assert_eq!(entities[0].iid, 7);\n assert_eq!(entities[0].outcome, \"synced\");\n assert!(entities[0].error.is_none());\n\n assert!(result.issues_updated >= 1);\n assert!(result.documents_regenerated >= 1);\n}\n```\n\n## Edge Cases\n\n- **Wiremock delay vs tokio timeout**: Use `set_body_delay` on wiremock, not `tokio::time::sleep` in tests. The per-entity timeout in the orchestrator (bd-1i4i) should use `tokio::time::timeout` around the HTTP call.\n- **Embed isolation without Ollama**: Tests that verify embed scoping should either mock Ollama or use `no_embed: true` and verify the document ID list passed to the embed function. The `successful_full_pipeline` test uses `no_embed: true` to avoid requiring a running Ollama server in CI.\n- **Test isolation**: Each test creates its own `MockServer`, in-memory DB, and `ShutdownSignal`. No shared state between tests.\n- **Flakiness prevention**: Cancellation timing tests (test 2) use deterministic delays (cancel after 200ms, response delayed 2s). If flaky, increase the gap between cancel time and response delay.\n- **CI compatibility**: No real GitLab, no real Ollama, no real filesystem locks (in-memory DB means AppLock may need adaptation for tests — consider a test-only lock bypass or use a temp file DB for lock tests).\n\n## Dependency Context\n\n- **Depends on (upstream)**: bd-1i4i (the `run_sync_surgical` function under test), bd-wcja (SyncResult surgical fields to assert), bd-1lja (SyncOptions extensions), bd-3sez (surgical ingest for TOCTOU test), bd-arka (SyncRunRecorder for recorder state assertions), bd-1elx (scoped embed for isolation test), bd-kanh (per-entity helpers)\n- **No downstream dependents** — this is a terminal test-only bead.\n- These tests validate the behavioral contracts that all upstream beads promise. They are the acceptance gate for the surgical sync feature.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:18:46.182356Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:04:49.331351Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"]} {"id":"bd-3js","title":"Implement MR CLI commands (list, show, count)","description":"## Background\nCLI commands for viewing and filtering merge requests. Includes list, show, and count commands with MR-specific filters.\n\n## Approach\nUpdate existing CLI command files:\n1. `list.rs` - Add MR listing with filters\n2. `show.rs` - Add MR detail view with discussions\n3. `count.rs` - Add MR counting with state breakdown\n\n## Files\n- `src/cli/commands/list.rs` - Add MR subcommand\n- `src/cli/commands/show.rs` - Add MR detail view\n- `src/cli/commands/count.rs` - Add MR counting\n\n## Acceptance Criteria\n- [ ] `gi list mrs` shows MR table with iid, title, state, author, branches\n- [ ] `gi list mrs --state=merged` filters by state\n- [ ] `gi list mrs --state=locked` filters locally (not server-side)\n- [ ] `gi list mrs --draft` shows only draft MRs\n- [ ] `gi list mrs --no-draft` excludes draft MRs\n- [ ] `gi list mrs --reviewer=username` filters by reviewer\n- [ ] `gi list mrs --target-branch=main` filters by target branch\n- [ ] `gi list mrs --source-branch=feature/x` filters by source branch\n- [ ] Draft MRs show `[DRAFT]` prefix in title\n- [ ] `gi show mr ` displays full detail including discussions\n- [ ] DiffNote shows file context: `[src/file.ts:45]`\n- [ ] Multi-line DiffNote shows: `[src/file.ts:45-48]`\n- [ ] `gi show mr` shows `detailed_merge_status`\n- [ ] `gi count mrs` shows total with state breakdown\n- [ ] `gi sync-status` shows MR cursor positions\n- [ ] `cargo test cli_commands` passes\n\n## TDD Loop\nRED: `cargo test list_mrs` -> command not found\nGREEN: Add MR subcommand\nVERIFY: `gi list mrs --help`\n\n## gi list mrs Output\n```\nMerge Requests (showing 20 of 1,234)\n\n !847 Refactor auth to use JWT tokens merged @johndoe main <- feature/jwt 3 days ago\n !846 Fix memory leak in websocket handler opened @janedoe main <- fix/websocket 5 days ago\n !845 [DRAFT] Add dark mode CSS variables opened @bobsmith main <- ui/dark-mode 1 week ago\n```\n\n## SQL for MR Listing\n```sql\nSELECT \n m.iid, m.title, m.state, m.draft, m.author_username,\n m.target_branch, m.source_branch, m.updated_at\nFROM merge_requests m\nWHERE m.project_id = ?\n AND (? IS NULL OR m.state = ?) -- state filter\n AND (? IS NULL OR m.draft = ?) -- draft filter\n AND (? IS NULL OR m.author_username = ?) -- author filter\n AND (? IS NULL OR m.target_branch = ?) -- target-branch filter\n AND (? IS NULL OR m.source_branch = ?) -- source-branch filter\n AND (? IS NULL OR EXISTS ( -- reviewer filter\n SELECT 1 FROM mr_reviewers r \n WHERE r.merge_request_id = m.id AND r.username = ?\n ))\nORDER BY m.updated_at DESC\nLIMIT ?\n```\n\n## gi show mr Output\n```\nMerge Request !847: Refactor auth to use JWT tokens\n================================================================================\n\nProject: group/project-one\nState: merged\nDraft: No\nAuthor: @johndoe\nAssignees: @janedoe, @bobsmith\nReviewers: @alice, @charlie\nSource: feature/jwt\nTarget: main\nMerge Status: mergeable\nMerged By: @alice\nMerged At: 2024-03-20 14:30:00\nLabels: enhancement, auth, reviewed\n\nDescription:\n Moving away from session cookies to JWT-based authentication...\n\nDiscussions (8):\n\n @janedoe (2024-03-16) [src/auth/jwt.ts:45]:\n Should we use a separate signing key for refresh tokens?\n\n @johndoe (2024-03-16):\n Good point. I'll add a separate key with rotation support.\n\n @alice (2024-03-18) [RESOLVED]:\n Looks good! Just one nit about the token expiry constant.\n```\n\n## DiffNote File Context Display\n```rust\n// Build file context string\nlet file_context = match (note.position_new_path, note.position_new_line, note.position_line_range_end) {\n (Some(path), Some(line), Some(end_line)) if line != end_line => {\n format!(\"[{}:{}-{}]\", path, line, end_line)\n }\n (Some(path), Some(line), _) => {\n format!(\"[{}:{}]\", path, line)\n }\n _ => String::new(),\n};\n```\n\n## gi count mrs Output\n```\nMerge Requests: 1,234\n opened: 89\n merged: 1,045\n closed: 100\n```\n\n## Filter Arguments (clap)\n```rust\n#[derive(Parser)]\nstruct ListMrsArgs {\n #[arg(long)]\n state: Option, // opened|merged|closed|locked|all\n #[arg(long)]\n draft: bool,\n #[arg(long)]\n no_draft: bool,\n #[arg(long)]\n author: Option,\n #[arg(long)]\n assignee: Option,\n #[arg(long)]\n reviewer: Option,\n #[arg(long)]\n target_branch: Option,\n #[arg(long)]\n source_branch: Option,\n #[arg(long)]\n label: Vec,\n #[arg(long)]\n project: Option,\n #[arg(long, default_value = \"20\")]\n limit: u32,\n}\n```\n\n## Edge Cases\n- `--state=locked` must filter locally (GitLab API doesn't support it)\n- Ambiguous MR iid across projects: prompt for `--project`\n- Empty discussions: show \"No discussions\" message\n- Multi-line DiffNotes: show line range in context","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:43.354939Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:37:31.792569Z","closed_at":"2026-01-27T00:37:31.792504Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3js","depends_on_id":"bd-20h","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3js","depends_on_id":"bd-ser","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3kj","title":"[CP0] gi version, backup, reset, sync-status commands","description":"## Background\n\nThese are the remaining utility commands for CP0. version is trivial. backup creates safety copies before destructive operations. reset provides clean-slate capability. sync-status is a stub for CP0 that will be implemented in CP1.\n\nReference: docs/prd/checkpoint-0.md sections \"gi version\", \"gi backup\", \"gi reset\", \"gi sync-status\"\n\n## Approach\n\n**src/cli/commands/version.ts:**\n```typescript\nimport { Command } from 'commander';\nimport { version } from '../../../package.json' with { type: 'json' };\n\nexport const versionCommand = new Command('version')\n .description('Show version information')\n .action(() => {\n console.log(\\`gi version \\${version}\\`);\n });\n```\n\n**src/cli/commands/backup.ts:**\n```typescript\nimport { Command } from 'commander';\nimport { copyFileSync, mkdirSync } from 'node:fs';\nimport { loadConfig } from '../../core/config';\nimport { getDbPath, getBackupDir } from '../../core/paths';\n\nexport const backupCommand = new Command('backup')\n .description('Create timestamped database backup')\n .action(async (options, command) => {\n const globalOpts = command.optsWithGlobals();\n const config = loadConfig(globalOpts.config);\n \n const dbPath = getDbPath(config.storage?.dbPath);\n const backupDir = getBackupDir(config.storage?.backupDir);\n \n mkdirSync(backupDir, { recursive: true });\n \n // Format: data-2026-01-24T10-30-00.db (colons replaced for Windows compat)\n const timestamp = new Date().toISOString().replace(/:/g, '-').replace(/\\\\..*/, '');\n const backupPath = \\`\\${backupDir}/data-\\${timestamp}.db\\`;\n \n copyFileSync(dbPath, backupPath);\n console.log(\\`Created backup: \\${backupPath}\\`);\n });\n```\n\n**src/cli/commands/reset.ts:**\n```typescript\nimport { Command } from 'commander';\nimport { unlinkSync, existsSync } from 'node:fs';\nimport { createInterface } from 'node:readline';\nimport { loadConfig } from '../../core/config';\nimport { getDbPath } from '../../core/paths';\n\nexport const resetCommand = new Command('reset')\n .description('Delete database and reset all state')\n .option('--confirm', 'Skip confirmation prompt')\n .action(async (options, command) => {\n const globalOpts = command.optsWithGlobals();\n const config = loadConfig(globalOpts.config);\n const dbPath = getDbPath(config.storage?.dbPath);\n \n if (!existsSync(dbPath)) {\n console.log('No database to reset.');\n return;\n }\n \n if (!options.confirm) {\n console.log(\\`This will delete:\\n - Database: \\${dbPath}\\n - All sync cursors\\n - All cached data\\n\\`);\n // Prompt for 'yes' confirmation\n // If not 'yes', exit 2\n }\n \n unlinkSync(dbPath);\n // Also delete WAL and SHM files if they exist\n if (existsSync(\\`\\${dbPath}-wal\\`)) unlinkSync(\\`\\${dbPath}-wal\\`);\n if (existsSync(\\`\\${dbPath}-shm\\`)) unlinkSync(\\`\\${dbPath}-shm\\`);\n \n console.log(\"Database reset. Run 'gi sync' to repopulate.\");\n });\n```\n\n**src/cli/commands/sync-status.ts:**\n```typescript\n// CP0 stub - full implementation in CP1\nexport const syncStatusCommand = new Command('sync-status')\n .description('Show sync state')\n .action(() => {\n console.log(\"No sync runs yet. Run 'gi sync' to start.\");\n });\n```\n\n## Acceptance Criteria\n\n- [ ] `gi version` outputs \"gi version X.Y.Z\"\n- [ ] `gi backup` creates timestamped copy of database\n- [ ] Backup filename is Windows-compatible (no colons)\n- [ ] Backup directory created if missing\n- [ ] `gi reset` prompts for 'yes' confirmation\n- [ ] `gi reset --confirm` skips prompt\n- [ ] Reset deletes .db, .db-wal, and .db-shm files\n- [ ] Reset exits 2 if user doesn't type 'yes'\n- [ ] `gi sync-status` outputs stub message\n\n## Files\n\nCREATE:\n- src/cli/commands/version.ts\n- src/cli/commands/backup.ts\n- src/cli/commands/reset.ts\n- src/cli/commands/sync-status.ts\n\n## TDD Loop\n\nN/A - simple commands, verify manually:\n\n```bash\ngi version\ngi backup\nls ~/.local/share/gi/backups/\ngi reset # type 'no'\ngi reset --confirm\nls ~/.local/share/gi/data.db # should not exist\ngi sync-status\n```\n\n## Edge Cases\n\n- Backup when database doesn't exist - show clear error\n- Reset when database doesn't exist - show \"No database to reset\"\n- WAL/SHM files may not exist - check before unlinking\n- Timestamp with milliseconds could cause very long filename\n- readline prompt in non-interactive terminal - handle SIGINT","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:51.774210Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:31:46.227285Z","closed_at":"2026-01-25T03:31:46.227220Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3kj","depends_on_id":"bd-13b","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"},{"issue_id":"bd-3kj","depends_on_id":"bd-3ng","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} {"id":"bd-3l56","title":"Add lore sync --tui convenience flag","description":"## Background\n\nThe PRD defines two CLI entry paths to the TUI: `lore tui` (full TUI) and `lore sync --tui` (convenience shortcut that launches the TUI directly on the Sync screen in inline mode). The `lore tui` command is covered by bd-26lp. This bead adds the `--tui` flag to the existing `SyncArgs` struct, which delegates to the `lore-tui` binary with `--sync` flag.\n\n## Approach\n\nTwo changes to the existing lore CLI crate (NOT the lore-tui crate):\n\n1. **Add `--tui` flag to `SyncArgs`** in `src/cli/mod.rs`:\n ```rust\n /// Show sync progress in interactive TUI (inline mode)\n #[arg(long)]\n pub tui: bool,\n ```\n\n2. **Handle the flag in sync command dispatch** in `src/main.rs` (or wherever Commands::Sync is matched):\n - If `args.tui` is true, call `resolve_tui_binary()` (from bd-26lp) and spawn it with `--sync` flag\n - Forward the config path if specified\n - Exit with the lore-tui process exit code\n - If lore-tui is not found, print a helpful error message\n\nThe `resolve_tui_binary()` function is implemented by bd-26lp (CLI integration). This bead simply adds the flag and the early-return delegation path in the sync command handler.\n\n## Acceptance Criteria\n- [ ] `lore sync --tui` is accepted by the CLI parser (no unknown flag error)\n- [ ] When `--tui` is set, the sync command delegates to `lore-tui --sync` binary\n- [ ] Config path is forwarded if `--config` was specified\n- [ ] If lore-tui binary is not found, prints error with install instructions and exits non-zero\n- [ ] `lore sync --tui --full` does NOT pass `--full` to lore-tui (TUI has its own sync controls)\n- [ ] `--tui` flag appears in `lore sync --help` output\n\n## Files\n- MODIFY: src/cli/mod.rs (add `tui: bool` field to `SyncArgs` struct at line ~776)\n- MODIFY: src/main.rs or src/cli/commands/sync.rs (add early-return delegation when `args.tui`)\n\n## TDD Anchor\nRED: Write `test_sync_tui_flag_accepted` that verifies `SyncArgs` can be parsed with `--tui` flag.\nGREEN: Add the `tui: bool` field to SyncArgs.\nVERIFY: cargo test sync_tui_flag\n\nAdditional tests:\n- test_sync_tui_flag_default_false (not set by default)\n\n## Edge Cases\n- `--tui` combined with `--dry-run` — the TUI handles dry-run internally, so `--dry-run` should be ignored when `--tui` is set (or warn)\n- `--tui` when lore-tui binary does not exist — clear error, not a panic\n- `--tui` in robot mode (`--robot`) — nonsensical combination, should error with \"cannot use --tui with --robot\"\n\n## Dependency Context\n- Depends on bd-26lp (CLI integration) which implements `resolve_tui_binary()` and `validate_tui_compat()` functions that this bead calls.\n- The SyncArgs struct is at src/cli/mod.rs:739. The existing fields are: full, no_full, force, no_force, no_embed, no_docs, no_events, no_file_changes, dry_run, no_dry_run.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:40.785182Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:29:49.341576Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3l56","depends_on_id":"bd-26lp","type":"blocks","created_at":"2026-02-12T19:34:39Z","created_by":"import"}]} @@ -236,6 +248,7 @@ {"id":"bd-3qn6","title":"Rewrite who --path to use mr_file_changes for authorship signal","description":"## Problem\n\nwho --path currently only queries DiffNote records (notes.position_new_path), so it only finds people who left inline review comments on that exact file. This is highly misleading -- it reports 'no experts' for files that have been actively authored and reviewed, just without inline comments on that specific path.\n\n## Solution\n\nRewrite query_expert() to incorporate mr_file_changes as a primary signal source:\n\n1. MR authorship signal: JOIN mr_file_changes to find MR authors who touched the file (strongest signal)\n2. MR reviewer signal: JOIN mr_file_changes + merge_request_reviewers to find reviewers of MRs that touched the file (even without DiffNotes on that file)\n3. DiffNote signal: Keep existing DiffNote query as a supplementary signal (inline comments show deep familiarity)\n\n### Scoring weights (to tune):\n- MR author who touched the file: 15 points per MR\n- MR reviewer of MR touching the file: 10 points per MR\n- DiffNote reviewer on that file: 20 points per MR + 1 per note (existing)\n- DiffNote MR author: 12 points per MR (existing)\n\n### Path matching:\n- Reuse build_path_query() but extend DB probes to also check mr_file_changes.new_path\n- For prefix matching, LIKE on mr_file_changes.new_path\n\n### Also fix:\n- build_path_query() probes should check mr_file_changes in addition to notes, so path resolution works even when no DiffNotes exist\n\n## Acceptance Criteria\n- who --path returns results for files touched in MRs even without DiffNotes\n- Existing DiffNote-based scoring still contributes\n- build_path_query probes mr_file_changes for path existence\n- Tests cover: MR-only authorship, DiffNote-only, combined scoring\n- Robot mode JSON output unchanged (same schema)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T18:16:41.991344Z","created_by":"tayloreernisse","updated_at":"2026-02-08T18:34:25.704024Z","closed_at":"2026-02-08T18:34:25.703965Z","close_reason":"Rewrote query_expert() and query_overlap() in who.rs to incorporate mr_file_changes + mr_reviewers as signal sources alongside existing DiffNote data. Uses 4-branch UNION ALL with COUNT(DISTINCT CASE) for proper deduplication across signal types. 8 new tests, all 397 pass.","compaction_level":0,"original_size":0,"labels":["cli","phase-b","who"],"dependencies":[{"issue_id":"bd-3qn6","depends_on_id":"bd-2yo","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-3qs","title":"Implement lore generate-docs CLI command","description":"## Background\nThe generate-docs CLI command is the user-facing wrapper around the document regeneration pipeline. It has two modes: incremental (default, processes dirty_sources queue only) and full (seeds dirty_sources with ALL entities, then drains). Both modes use the same regenerator codepath to avoid logic divergence. Full mode uses keyset pagination (WHERE id > last_id) for seeding to avoid O(n^2) OFFSET degradation on large tables.\n\n## Approach\nCreate `src/cli/commands/generate_docs.rs` per PRD Section 2.4.\n\n**Core function:**\n```rust\npub fn run_generate_docs(\n config: &Config,\n full: bool,\n project_filter: Option<&str>,\n) -> Result\n```\n\n**Full mode seeding (keyset pagination):**\n```rust\nconst FULL_MODE_CHUNK_SIZE: usize = 2000;\n\n// For each source type (issues, MRs, discussions):\nlet mut last_id: i64 = 0;\nloop {\n let tx = conn.transaction()?;\n let inserted = tx.execute(\n \"INSERT INTO dirty_sources (source_type, source_id, queued_at, ...)\n SELECT 'issue', id, ?, 0, NULL, NULL, NULL\n FROM issues WHERE id > ? ORDER BY id LIMIT ?\n ON CONFLICT(source_type, source_id) DO NOTHING\",\n params![now_ms(), last_id, FULL_MODE_CHUNK_SIZE],\n )?;\n if inserted == 0 { tx.commit()?; break; }\n // Advance keyset cursor...\n tx.commit()?;\n}\n```\n\n**After draining (full mode only):**\n```sql\nINSERT INTO documents_fts(documents_fts) VALUES('optimize')\n```\n\n**CLI args:**\n```rust\n#[derive(Args)]\npub struct GenerateDocsArgs {\n #[arg(long)]\n full: bool,\n #[arg(long)]\n project: Option,\n}\n```\n\n**Output:** Human-readable table + JSON robot mode.\n\n## Acceptance Criteria\n- [ ] Default mode (no --full): processes only existing dirty_sources entries\n- [ ] --full mode: seeds dirty_sources with ALL issues, MRs, and discussions\n- [ ] Full mode uses keyset pagination (WHERE id > last_id, not OFFSET)\n- [ ] Full mode chunk size is 2000\n- [ ] Full mode does FTS optimize after completion\n- [ ] Both modes use regenerate_dirty_documents() (same codepath)\n- [ ] Progress bar shown in human mode (via indicatif)\n- [ ] JSON output in robot mode with GenerateDocsResult\n- [ ] GenerateDocsResult has issues/mrs/discussions/total/truncated/skipped counts\n- [ ] `cargo build` succeeds\n\n## Files\n- `src/cli/commands/generate_docs.rs` — new file\n- `src/cli/commands/mod.rs` — add `pub mod generate_docs;`\n- `src/cli/mod.rs` — add GenerateDocsArgs, wire up generate-docs subcommand\n- `src/main.rs` — add generate-docs command handler\n\n## TDD Loop\nRED: Integration test with seeded DB\nGREEN: Implement run_generate_docs with seeding + drain\nVERIFY: `cargo build && cargo test generate_docs`\n\n## Edge Cases\n- Empty database (no issues/MRs/discussions): full mode seeds nothing, returns all-zero counts\n- --project filter in full mode: only seed dirty_sources for entities in that project\n- Interrupted full mode: dirty_sources entries persist (ON CONFLICT DO NOTHING), resume by re-running\n- FTS optimize on empty FTS table: no-op (safe)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:25:55.226666Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:49:23.397157Z","closed_at":"2026-01-30T17:49:23.397098Z","close_reason":"Implemented generate-docs command with incremental + full mode, keyset pagination seeding, FTS optimize, project filter, human + JSON output. Builds clean.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3qs","depends_on_id":"bd-1u1","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-3qs","depends_on_id":"bd-221","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-3rl","title":"Epic: Gate C - Sync MVP","description":"## Background\nGate C adds the sync orchestrator and queue infrastructure that makes the search pipeline incremental and self-maintaining. It introduces dirty source tracking (change detection during ingestion), the discussion fetch queue, and the unified lore sync command that orchestrates the full pipeline. Gate C also adds integrity checks and repair paths.\n\n## Gate C Deliverables\n1. Orchestrated lore sync command with incremental doc regen + re-embedding\n2. Integrity checks + repair paths for FTS/embeddings consistency\n\n## Bead Dependencies (execution order, after Gate A)\n1. **bd-mem** — Shared backoff utility (no deps, shared with Gate B)\n2. **bd-38q** — Dirty source tracking (blocked by bd-36p, bd-hrs, bd-mem)\n3. **bd-1je** — Discussion queue (blocked by bd-hrs, bd-mem)\n4. **bd-1i2** — Integrate dirty tracking into ingestion (blocked by bd-38q)\n5. **bd-1x6** — Sync CLI (blocked by bd-38q, bd-1je, bd-1i2, bd-3qs, bd-2sx)\n\n## Acceptance Criteria\n- [ ] `lore sync` runs full pipeline: ingest -> generate-docs -> embed\n- [ ] `lore sync --full` does full re-sync + regeneration\n- [ ] `lore sync --no-embed` skips embedding stage\n- [ ] Dirty tracking: upserted entities automatically marked for regeneration\n- [ ] Queue draining: dirty_sources fully drained in bounded batch loop\n- [ ] Backoff: failed items use exponential backoff with jitter\n- [ ] `lore stats --check` detects inconsistencies\n- [ ] `lore stats --repair` fixes FTS/embedding inconsistencies","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-30T15:25:13.494698Z","created_by":"tayloreernisse","updated_at":"2026-01-30T18:05:52.121666Z","closed_at":"2026-01-30T18:05:52.121619Z","close_reason":"All Gate C sub-beads complete: backoff utility, dirty tracking, discussion queue, ingestion integration, sync CLI, stats CLI","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3rl","depends_on_id":"bd-1x6","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-3rl","depends_on_id":"bd-pr1","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-3sez","title":"Create surgical.rs core module with preflight fetch, ingest functions, and TOCTOU guards","description":"## Background\n\nThe surgical sync pipeline needs a core module (`src/ingestion/surgical.rs`) that fetches a single issue or MR by IID from GitLab and ingests it into the local SQLite database. This replaces the bulk pagination path (`ingest_issues`/`ingest_merge_requests`) for targeted, on-demand sync of specific entities.\n\nKey constraints:\n- `process_single_issue` (issues.rs:143) and `process_single_mr` (merge_requests.rs:144) are private functions. This bead wraps them with pub(crate) entry points that add TOCTOU guard logic and dirty marking.\n- `updated_at` is a `String` (ISO 8601) in `GitLabIssue`/`GitLabMergeRequest` but stored as `INTEGER` (ms-epoch) in the DB. The TOCTOU guard must parse the ISO string to ms-epoch for comparison.\n- `ProcessMrResult` (merge_requests.rs:138) is a private struct. The MR ingest wrapper returns its own result type or re-exports the needed fields.\n- `SyncRunRecorder` has `succeed()` and `fail()` that consume `self`. Not needed here since surgical.rs is called from the orchestrator which owns the recorder.\n\n## Approach\n\nCreate `src/ingestion/surgical.rs` with:\n\n1. **`preflight_fetch`** (async): Takes `&GitLabClient`, `gitlab_project_id`, and a list of `(entity_type, iid)` targets. Calls `client.get_issue_by_iid()` and `client.get_mr_by_iid()` (from bd-159p). Returns `PreflightResult { issues: Vec, merge_requests: Vec, failures: Vec }`.\n\n2. **`ingest_issue_by_iid`** (sync): Takes `&Connection`, `&Config`, `project_id`, `&GitLabIssue`. Applies TOCTOU guard (compare payload `updated_at` parsed to ms-epoch vs DB `updated_at`), then calls `process_single_issue` (requires making it `pub(crate)` in bd-1sc6), marks dirty via `dirty_tracker::mark_dirty(conn, SourceType::Issue, local_issue_id)`, and returns `IngestIssueResult { upserted: bool, labels_created: usize, skipped_stale: bool, dirty_source_keys: Vec<(SourceType, i64)> }`.\n\n3. **`ingest_mr_by_iid`** (sync): Same pattern for MRs. Calls `process_single_mr` (requires `pub(crate)` in bd-1sc6), returns `IngestMrResult { upserted: bool, labels_created: usize, assignees_linked: usize, reviewers_linked: usize, skipped_stale: bool, dirty_source_keys: Vec<(SourceType, i64)> }`.\n\n4. **TOCTOU guard**: `fn is_stale(payload_updated_at: &str, db_updated_at_ms: Option) -> Result`. Parses ISO 8601 string to ms-epoch using `chrono::DateTime::parse_from_rfc3339`. Returns `true` if `payload_ms <= db_ms` (payload is same age or older than what we already have).\n\nWire the module in `src/ingestion/mod.rs`.\n\n## Acceptance Criteria\n\n- [ ] `preflight_fetch` calls GitLabClient by-IID methods and collects successes + failures\n- [ ] `ingest_issue_by_iid` wraps `process_single_issue` with TOCTOU guard and dirty marking\n- [ ] `ingest_mr_by_iid` wraps `process_single_mr` with TOCTOU guard and dirty marking\n- [ ] TOCTOU guard correctly parses ISO 8601 String to ms-epoch for comparison with DB i64\n- [ ] Stale payloads (payload updated_at <= DB updated_at) are skipped, not ingested\n- [ ] `dirty_source_keys` returned include the `(SourceType, source_id)` tuples for downstream scoped doc regen\n- [ ] Module registered in `src/ingestion/mod.rs`\n- [ ] All tests from bd-x8oq pass\n\n## Files\n\n- `src/ingestion/surgical.rs` (NEW)\n- `src/ingestion/mod.rs` (add `pub(crate) mod surgical;`)\n- `src/ingestion/issues.rs` (change `process_single_issue` to `pub(crate)` — done in bd-1sc6)\n- `src/ingestion/merge_requests.rs` (change `process_single_mr` and `ProcessMrResult` to `pub(crate)` — done in bd-1sc6)\n\n## TDD Anchor\n\nTests live in bd-x8oq (`src/ingestion/surgical_tests.rs`), referenced via `#[cfg(test)] #[path = \"surgical_tests.rs\"] mod tests;` in surgical.rs. Key tests that validate this bead:\n\n- `test_ingest_issue_by_iid_upserts_and_marks_dirty` — verifies full issue ingest path + dirty marking\n- `test_ingest_mr_by_iid_upserts_and_marks_dirty` — verifies full MR ingest path + dirty marking\n- `test_toctou_skips_stale_issue` — inserts issue with updated_at=T1, calls ingest with payload updated_at=T1, asserts skipped_stale=true\n- `test_toctou_skips_stale_mr` — same for MRs\n- `test_toctou_allows_newer_issue` — payload T2 > DB T1, asserts upserted=true\n- `test_is_stale_parses_iso8601` — unit test for the ISO 8601 to ms-epoch parsing\n- `test_is_stale_handles_none_db_value` — first ingest (no existing row), should return false (not stale)\n- `test_preflight_fetch_returns_issues_and_mrs` — wiremock test for successful preflight\n- `test_preflight_fetch_collects_failures` — wiremock 404 returns failure, not error\n\n## Edge Cases\n\n- ISO 8601 with timezone offset (GitLab returns `+00:00` not `Z`) must parse correctly\n- First-ever ingest of an IID: no existing DB row, TOCTOU guard must treat as \"not stale\" (db_updated_at is None)\n- GitLab returns 404 for a deleted issue/MR during preflight: failure, not hard error\n- Concurrent surgical syncs for same IID: `process_single_issue` uses `unchecked_transaction()` with UPSERT, so last-writer-wins is safe\n- `process_single_mr` returns `ProcessMrResult` which is private: either make it `pub(crate)` in bd-1sc6 or replicate needed fields\n\n## Dependency Context\n\n- **Blocked by bd-159p**: `get_issue_by_iid` and `get_mr_by_iid` on GitLabClient (preflight needs these)\n- **Blocked by bd-1sc6**: Visibility changes to `process_single_issue`, `process_single_mr`, `ProcessMrResult` (must be `pub(crate)`)\n- **Blocks bd-1i4i**: Orchestration function calls `preflight_fetch` + `ingest_issue_by_iid` / `ingest_mr_by_iid`\n- **Blocks bd-kanh**: Dependent helpers are called after ingest to fetch discussions, resource events, etc.\n- **Blocks bd-wcja**: SyncResult surgical fields depend on return types from this module\n- **Co-depends with bd-x8oq**: Tests for this code live in that bead's test file","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:14:19.449695Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:02:01.692160Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-3sez","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-3sez","depends_on_id":"bd-3jqx","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-3sez","depends_on_id":"bd-kanh","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-3sez","depends_on_id":"bd-wcja","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-3sez","depends_on_id":"bd-x8oq","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-3sh","title":"Add 'lore count events' command with robot mode","description":"## Background\nNeed to verify event ingestion and report counts by type. The existing count command (src/cli/commands/count.rs) handles issues, mrs, discussions, notes with both human and robot output. This adds 'events' as a new count subcommand.\n\n## Approach\nExtend the existing count command in src/cli/commands/count.rs:\n\n1. Add CountTarget::Events variant (or string match) in the count dispatcher\n2. Query each event table with GROUP BY entity type:\n```sql\nSELECT \n CASE WHEN issue_id IS NOT NULL THEN 'issue' ELSE 'merge_request' END as entity_type,\n COUNT(*) as count\nFROM resource_state_events\nGROUP BY entity_type;\n-- (repeat for label and milestone events)\n```\n\n3. Human output: table format\n```\nEvent Type Issues MRs Total\nState events 1,234 567 1,801\nLabel events 2,345 890 3,235\nMilestone events 456 123 579\nTotal 4,035 1,580 5,615\n```\n\n4. Robot JSON:\n```json\n{\n \"ok\": true,\n \"data\": {\n \"state_events\": {\"issue\": 1234, \"merge_request\": 567, \"total\": 1801},\n \"label_events\": {\"issue\": 2345, \"merge_request\": 890, \"total\": 3235},\n \"milestone_events\": {\"issue\": 456, \"merge_request\": 123, \"total\": 579},\n \"total\": 5615\n }\n}\n```\n\n5. Register in CLI: add \"events\" to count's entity_type argument in src/cli/mod.rs\n\n## Acceptance Criteria\n- [ ] `lore count events` shows correct counts by event type and entity type\n- [ ] Robot JSON matches the schema above\n- [ ] Works with empty tables (all zeros)\n- [ ] Does not error if migration 011 hasn't been applied (graceful degradation or \"no event tables\" message)\n\n## Files\n- src/cli/commands/count.rs (add events counting logic)\n- src/cli/mod.rs (add \"events\" to count's accepted entity types)\n\n## TDD Loop\nRED: tests/count_tests.rs (or extend existing):\n- `test_count_events_empty_tables` - verify all zeros on fresh DB\n- `test_count_events_with_data` - seed state + label events, verify correct counts\n- `test_count_events_robot_json` - verify JSON structure\n\nGREEN: Add the events branch to count command\n\nVERIFY: `cargo test count -- --nocapture`\n\n## Edge Cases\n- Tables don't exist if user hasn't run migrate — check table existence first or catch the error\n- COUNT with GROUP BY returns no rows for empty tables — need to handle missing entity types as 0","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-02T21:31:57.379702Z","created_by":"tayloreernisse","updated_at":"2026-02-03T16:21:21.408874Z","closed_at":"2026-02-03T16:21:21.408806Z","close_reason":"Added 'events' to count CLI parser, run_count_events function, print_event_count (table format) and print_event_count_json (structured JSON). Wired into handle_count in main.rs.","compaction_level":0,"original_size":0,"labels":["cli","gate-1","phase-b"],"dependencies":[{"issue_id":"bd-3sh","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-3sh","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-3t1b","title":"Implement MR Detail (state + action + view)","description":"## Background\nThe MR Detail shows a single merge request with file changes, diff discussions (position-specific comments), and general discussions. Same progressive hydration pattern as Issue Detail. MR detail has additional sections: file change list and diff-context notes.\n\n## Approach\nState (state/mr_detail.rs):\n- MrDetailState: current_key (Option), metadata (Option), discussions (Vec), diff_discussions (Vec), file_changes (Vec), cross_refs (Vec), tree_state (TreePersistState), scroll_offset, active_tab (MrTab: Overview|Files|Discussions)\n- MrMetadata: iid, title, description, state, author, reviewer, assignee, labels, target_branch, source_branch, created_at, updated_at, web_url, draft, merge_status\n- FileChange: old_path, new_path, change_type (added/modified/deleted/renamed), diff_line_count\n- DiffDiscussion: file_path, old_line, new_line, notes (Vec)\n\nAction (action.rs):\n- fetch_mr_detail(conn, key, clock) -> Result: uses with_read_snapshot\n\nView (view/mr_detail.rs):\n- render_mr_detail(frame, state, area, theme): header, tab bar (Overview|Files|Discussions), tab content\n- Overview tab: description + cross-refs\n- Files tab: file change list with change type indicators (+/-/~)\n- Discussions tab: general discussions + diff discussions grouped by file\n\n## Acceptance Criteria\n- [ ] MR metadata loads in Phase 1\n- [ ] Tab navigation between Overview, Files, Discussions\n- [ ] File changes list shows change type and line count\n- [ ] Diff discussions grouped by file path\n- [ ] General discussions rendered in tree widget\n- [ ] Cross-references navigable (related issues, etc.)\n- [ ] All text sanitized via sanitize_for_terminal()\n- [ ] Esc returns to MR List with state preserved\n\n## Files\n- MODIFY: crates/lore-tui/src/state/mr_detail.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_mr_detail)\n- CREATE: crates/lore-tui/src/view/mr_detail.rs\n\n## TDD Anchor\nRED: Write test_fetch_mr_detail in action.rs that inserts an MR with 3 file changes, calls fetch_mr_detail, asserts 3 files returned.\nGREEN: Implement fetch_mr_detail with file change query.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_mr_detail\n\n## Edge Cases\n- MR with no file changes (draft MR created without pushes): show \"No file changes\" message\n- Diff discussions referencing deleted files: show file path with strikethrough style\n- Very large MRs (hundreds of files): paginate file list, don't load all at once\n\n## Dependency Context\nUses discussion tree and cross-ref widgets from \"Implement discussion tree + cross-reference widgets\" task.\nUses same patterns as \"Implement Issue Detail\" task.\nUses MrDetailState from \"Implement AppState composition\" task.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T16:59:38.427124Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:28.423643Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3t1b","depends_on_id":"bd-1cl9","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-3t1b","depends_on_id":"bd-1d6z","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-3t1b","depends_on_id":"bd-2kr0","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-3t6r","title":"Epic: TUI Phase 5 — Polish","description":"## Background\nPhase 5 adds polish features: responsive breakpoints for all screens, session state persistence (resume where you left off), single-instance locking, entity/render caches for performance, text width handling for Unicode, snapshot tests, and terminal compatibility test matrix.\n\n## Acceptance Criteria\n- [ ] All screens adapt to terminal width with responsive breakpoints\n- [ ] Session state persisted and restored on relaunch\n- [ ] Single-instance lock prevents concurrent TUI launches\n- [ ] Entity cache enables near-instant detail view reopens\n- [ ] Snapshot tests produce deterministic output with FakeClock\n- [ ] Terminal compat verified across iTerm2, tmux, Alacritty, kitty","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:02:47.178645Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.435708Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3t6r","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} @@ -251,11 +264,12 @@ {"id":"bd-8t4","title":"Extract cross-references from resource_state_events","description":"## Background\nresource_state_events includes source_merge_request (with iid) for 'closed by MR' events. After state events are stored (Gate 1), post-processing extracts these into entity_references for the cross-reference graph.\n\n## Approach\nCreate src/core/references.rs (new module) or add to events_db.rs:\n\n```rust\n/// Extract cross-references from stored state events and insert into entity_references.\n/// Looks for state events with source_merge_request_id IS NOT NULL (meaning \"closed by MR\").\n/// \n/// Directionality: source = MR (that caused the close), target = issue (that was closed)\npub fn extract_refs_from_state_events(\n conn: &Connection,\n project_id: i64,\n) -> Result // returns count of new references inserted\n```\n\nSQL logic:\n```sql\nINSERT OR IGNORE INTO entity_references (\n source_entity_type, source_entity_id,\n target_entity_type, target_entity_id,\n reference_type, source_method, created_at\n)\nSELECT\n 'merge_request',\n mr.id,\n 'issue',\n rse.issue_id,\n 'closes',\n 'api_state_event',\n rse.created_at\nFROM resource_state_events rse\nJOIN merge_requests mr ON mr.project_id = rse.project_id AND mr.iid = rse.source_merge_request_id\nWHERE rse.source_merge_request_id IS NOT NULL\n AND rse.issue_id IS NOT NULL\n AND rse.project_id = ?1;\n```\n\nKey: source_merge_request_id stores the MR iid, so we JOIN on merge_requests.iid to get the local DB id.\n\nRegister in src/core/mod.rs: `pub mod references;`\n\nCall this after drain_dependent_queue in the sync pipeline (after all state events are stored).\n\n## Acceptance Criteria\n- [ ] State events with source_merge_request_id produce 'closes' references\n- [ ] Source = MR (resolved by iid), target = issue\n- [ ] source_method = 'api_state_event'\n- [ ] INSERT OR IGNORE prevents duplicates with api_closes_issues data\n- [ ] Returns count of newly inserted references\n- [ ] No-op when no state events have source_merge_request_id\n\n## Files\n- src/core/references.rs (new)\n- src/core/mod.rs (add `pub mod references;`)\n- src/cli/commands/sync.rs (call after drain step)\n\n## TDD Loop\nRED: tests/references_tests.rs:\n- `test_extract_refs_from_state_events_basic` - seed a \"closed\" state event with source_merge_request_id, verify entity_reference created\n- `test_extract_refs_dedup_with_closes_issues` - insert ref from closes_issues API first, verify state event extraction doesn't duplicate\n- `test_extract_refs_no_source_mr` - state events without source_merge_request_id produce no refs\n\nSetup: create_test_db with migrations 001-011, seed project + issue + MR + state events.\n\nGREEN: Implement extract_refs_from_state_events\n\nVERIFY: `cargo test references -- --nocapture`\n\n## Edge Cases\n- source_merge_request_id may reference an MR not synced locally (cross-project close) — the JOIN will produce no match, which is correct behavior (ref simply not created)\n- Multiple state events can reference the same MR for the same issue (reopen + re-close) — INSERT OR IGNORE handles dedup\n- The merge_requests table might not have the MR yet if sync is still running — call this after all dependent fetches complete","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:32:33.619606Z","created_by":"tayloreernisse","updated_at":"2026-02-04T20:13:28.219791Z","closed_at":"2026-02-04T20:13:28.219633Z","compaction_level":0,"original_size":0,"labels":["extraction","gate-2","phase-b"],"dependencies":[{"issue_id":"bd-8t4","depends_on_id":"bd-1ep","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-8t4","depends_on_id":"bd-1se","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-8t4","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-91j1","title":"Comprehensive robot-docs as agent bootstrap","description":"## Background\nAgents reach for glab because they already know it from training data. lore robot-docs exists but is not comprehensive enough to serve as a zero-training bootstrap. An agent encountering lore for the first time should be able to use any command correctly after reading robot-docs output alone.\n\n## Current State (Verified 2026-02-12)\n- `handle_robot_docs()` at src/main.rs:2069\n- Called at no-args in robot mode (main.rs:165) and via Commands::RobotDocs { brief } (main.rs:229)\n- Current output top-level keys: name, version, description, activation, commands, aliases, exit_codes, clap_error_codes, error_format, workflows\n- Missing: response_schema per command, example_output per command, quick_start section, glab equivalence table\n- --brief flag exists but returns shorter version of same structure\n- main.rs is 2579 lines total\n\n## Current robot-docs Output Structure\n```json\n{\n \"name\": \"lore\",\n \"version\": \"0.6.1\",\n \"description\": \"...\",\n \"activation\": { \"flags\": [\"--robot\", \"-J\"], \"env\": \"LORE_ROBOT=1\", \"auto_detect\": \"non-TTY\" },\n \"commands\": [{ \"name\": \"...\", \"description\": \"...\", \"flags\": [...], \"example\": \"...\" }],\n \"aliases\": { ... },\n \"exit_codes\": { ... },\n \"clap_error_codes\": { ... },\n \"error_format\": { ... },\n \"workflows\": { ... }\n}\n```\n\n## Approach\n\n### 1. Add quick_start section\nTop-level key with glab-to-lore translation and lore-exclusive feature summary:\n```json\n\"quick_start\": {\n \"glab_equivalents\": [\n { \"glab\": \"glab issue list\", \"lore\": \"lore -J issues -n 50\", \"note\": \"Richer: includes labels, status, closing MRs\" },\n { \"glab\": \"glab issue view 123\", \"lore\": \"lore -J issues 123\", \"note\": \"Includes discussions, work-item status\" },\n { \"glab\": \"glab mr list\", \"lore\": \"lore -J mrs\", \"note\": \"Includes draft status, reviewers\" },\n { \"glab\": \"glab mr view 456\", \"lore\": \"lore -J mrs 456\", \"note\": \"Includes discussions, file changes\" },\n { \"glab\": \"glab api '/projects/:id/issues'\", \"lore\": \"lore -J issues -p project\", \"note\": \"Fuzzy project matching\" }\n ],\n \"lore_exclusive\": [\n \"search: FTS5 + vector hybrid search across all entities\",\n \"who: Expert/workload/reviews analysis per file path or person\",\n \"timeline: Chronological event reconstruction across entities\",\n \"stats: Database statistics with document/note/discussion counts\",\n \"count: Entity counts with state breakdowns\"\n ]\n}\n```\n\n### 2. Add response_schema per command\nFor each command in the commands array, add a `response_schema` field showing the JSON shape:\n```json\n{\n \"name\": \"issues\",\n \"response_schema\": {\n \"ok\": \"boolean\",\n \"data\": { \"type\": \"array|object\", \"fields\": [\"iid\", \"title\", \"state\", \"...\"] },\n \"meta\": { \"elapsed_ms\": \"integer\" }\n }\n}\n```\nCommands with multiple output shapes (list vs detail) need both documented.\n\n### 3. Add example_output per command\nRealistic truncated JSON for each command. Keep each example under 500 bytes.\n\n### 4. Token budget enforcement\n- --brief mode: ONLY quick_start + command names + invocation syntax. Target <4000 tokens (~16000 bytes).\n- Full mode: everything. Target <12000 tokens (~48000 bytes).\n- Measure with: `cargo run --release -- --robot robot-docs --brief | wc -c`\n\n## TDD Loop\nRED: Tests in src/main.rs or new src/cli/commands/robot_docs.rs:\n- test_robot_docs_has_quick_start: parse output JSON, assert quick_start.glab_equivalents array has >= 5 entries\n- test_robot_docs_brief_size: --brief output < 16000 bytes\n- test_robot_docs_full_size: full output < 48000 bytes\n- test_robot_docs_has_response_schemas: every command entry has response_schema key\n- test_robot_docs_commands_complete: assert all registered commands appear (issues, mrs, search, who, timeline, count, stats, sync, embed, doctor, health, ingest, generate-docs, show)\n\nGREEN: Add quick_start, response_schema, example_output to robot-docs output\n\nVERIFY:\n```bash\ncargo test robot_docs && cargo clippy --all-targets -- -D warnings\ncargo run --release -- --robot robot-docs | jq '.quick_start.glab_equivalents | length'\n# Should return >= 5\ncargo run --release -- --robot robot-docs --brief | wc -c\n# Should be < 16000\n```\n\n## Acceptance Criteria\n- [ ] robot-docs JSON has quick_start.glab_equivalents array with >= 5 entries\n- [ ] robot-docs JSON has quick_start.lore_exclusive array\n- [ ] Every command entry has response_schema showing the JSON shape\n- [ ] Every command entry has example_output with realistic truncated data\n- [ ] --brief output is under 16000 bytes (~4000 tokens)\n- [ ] Full output is under 48000 bytes (~12000 tokens)\n- [ ] An agent reading ONLY robot-docs can correctly invoke any lore command\n- [ ] cargo test passes with new robot_docs tests\n\n## Edge Cases\n- Commands with multiple output shapes (e.g., issues list vs issues detail via iid) need both schemas documented\n- --fields flag changes output shape -- document the effect in the response_schema\n- robot-docs output must be stable across versions (agents may cache it)\n- Version field should match Cargo.toml version\n\n## Files to Modify\n- src/main.rs fn handle_robot_docs() (~line 2069) — add quick_start section, response_schema, example_output\n- Consider extracting to src/cli/commands/robot_docs.rs if the function exceeds 200 lines","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-12T15:44:40.495479Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:49:01.043915Z","closed_at":"2026-02-12T16:49:01.043832Z","close_reason":"Robot-docs enhanced with quick_start (glab equivalents, lore exclusives, read/write split) and example_output for issues/mrs/search/who","compaction_level":0,"original_size":0,"labels":["cli","cli-imp","robot-mode"],"dependencies":[{"issue_id":"bd-91j1","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-9av","title":"[CP1] gi sync-status enhancement","description":"Enhance sync-status from CP0 stub to show issue cursors.\n\n## Changes to src/cli/commands/sync_status.rs\n\nUpdate the existing stub to show:\n- Last run timestamp and duration\n- Cursor positions per project (issues resource_type)\n- Entity counts (issues, discussions, notes)\n\n## Output Format\nLast sync: 2026-01-25 10:30:00 (succeeded, 45s)\n\nCursors:\n group/project-one\n issues: 2026-01-25T10:25:00Z (gitlab_id: 12345678)\n\nCounts:\n Issues: 1,234\n Discussions: 5,678\n Notes: 23,456 (4,567 system)\n\nFiles: src/cli/commands/sync_status.rs\nDone when: Shows cursor positions and counts after ingestion","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:58:27.246825Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.968507Z","closed_at":"2026-01-25T17:02:01.968507Z","deleted_at":"2026-01-25T17:02:01.968503Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} -{"id":"bd-9dd","title":"Implement 'lore trace' command with human and robot output","description":"## Background\n\nThe trace command is Gate 5's capstone CLI. It answers 'Why was this code introduced?' by building file -> MR -> issue -> discussion chains.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 5.3.\n\n## Codebase Context\n\n- CLI pattern: same as file-history (Commands enum, handler in main.rs)\n- trace.rs (bd-2n4): run_trace() returns TraceResult with chains\n- Path parsing: support 'src/foo.rs:45' syntax (line number for future Tier 2)\n- merge_requests.merged_at exists (migration 006) — use COALESCE(merged_at, updated_at) for ordering\n\n## Approach\n\n### 1. TraceArgs (`src/cli/mod.rs`):\n```rust\n#[derive(Parser)]\npub struct TraceArgs {\n pub path: String, // supports :line suffix\n #[arg(short = 'p', long)] pub project: Option,\n #[arg(long)] pub discussions: bool,\n #[arg(long = \"no-follow-renames\")] pub no_follow_renames: bool,\n #[arg(short = 'n', long = \"limit\", default_value = \"20\")] pub limit: usize,\n}\n```\n\n### 2. Path parsing:\n```rust\nfn parse_trace_path(input: &str) -> (String, Option) {\n if let Some((path, line)) = input.rsplit_once(':') {\n if let Ok(n) = line.parse::() { return (path.to_string(), Some(n)); }\n }\n (input.to_string(), None)\n}\n```\nIf line present: warn 'Line-level tracing requires Tier 2. Showing file-level results.'\n\n### 3. Human output shows chains with MR -> issue -> discussion context\n\n### 4. Robot JSON:\n```json\n{\"ok\": true, \"data\": {\"path\": \"...\", \"resolved_paths\": [...], \"trace_chains\": [...]}, \"meta\": {\"tier\": \"api_only\", \"line_requested\": null}}\n```\n\n## Acceptance Criteria\n\n- [ ] `lore trace src/foo.rs` with human output\n- [ ] `lore --robot trace src/foo.rs` with JSON\n- [ ] :line suffix parses and emits Tier 2 warning\n- [ ] -p, --discussions, --no-follow-renames, -n all work\n- [ ] Rename-aware via resolve_rename_chain\n- [ ] meta.tier = 'api_only'\n- [ ] Added to VALID_COMMANDS and robot-docs\n- [ ] `cargo check --all-targets` passes\n\n## Files\n\n- `src/cli/mod.rs` (TraceArgs + Commands::Trace)\n- `src/cli/commands/trace.rs` (NEW)\n- `src/cli/commands/mod.rs` (re-export)\n- `src/main.rs` (handler + VALID_COMMANDS + robot-docs)\n\n## TDD Loop\n\nRED:\n- `test_parse_trace_path_simple` - \"src/foo.rs\" -> (path, None)\n- `test_parse_trace_path_with_line` - \"src/foo.rs:42\" -> (path, Some(42))\n- `test_parse_trace_path_windows` - \"C:/foo.rs\" -> (path, None) — don't misparse drive letter\n\nGREEN: Implement CLI wiring and handlers.\n\nVERIFY: `cargo check --all-targets`\n\n## Edge Cases\n\n- Windows paths: don't misparse C: as line number\n- No MR data: friendly message with suggestion to sync\n- Very deep rename chain: bounded by resolve_rename_chain","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:32.788530Z","created_by":"tayloreernisse","updated_at":"2026-02-05T19:57:11.527220Z","compaction_level":0,"original_size":0,"labels":["cli","gate-5","phase-b"],"dependencies":[{"issue_id":"bd-9dd","depends_on_id":"bd-1ht","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-9dd","depends_on_id":"bd-2n4","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-9dd","title":"Implement 'lore trace' command with human and robot output","description":"## Background\n\nThe trace command is Gate 5's capstone CLI. It answers 'Why was this code introduced?' by building file -> MR -> issue -> discussion chains.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 5.3.\n\n## Codebase Context\n\n- CLI pattern: same as file-history (Commands enum, handler in main.rs)\n- trace.rs (bd-2n4): run_trace() returns TraceResult with chains\n- Path parsing: support 'src/foo.rs:45' syntax (line number for future Tier 2)\n- merge_requests.merged_at exists (migration 006) — use COALESCE(merged_at, updated_at) for ordering\n\n## Approach\n\n### 1. TraceArgs (`src/cli/mod.rs`):\n```rust\n#[derive(Parser)]\npub struct TraceArgs {\n pub path: String, // supports :line suffix\n #[arg(short = 'p', long)] pub project: Option,\n #[arg(long)] pub discussions: bool,\n #[arg(long = \"no-follow-renames\")] pub no_follow_renames: bool,\n #[arg(short = 'n', long = \"limit\", default_value = \"20\")] pub limit: usize,\n}\n```\n\n### 2. Path parsing:\n```rust\nfn parse_trace_path(input: &str) -> (String, Option) {\n if let Some((path, line)) = input.rsplit_once(':') {\n if let Ok(n) = line.parse::() { return (path.to_string(), Some(n)); }\n }\n (input.to_string(), None)\n}\n```\nIf line present: warn 'Line-level tracing requires Tier 2. Showing file-level results.'\n\n### 3. Human output shows chains with MR -> issue -> discussion context\n\n### 4. Robot JSON:\n```json\n{\"ok\": true, \"data\": {\"path\": \"...\", \"resolved_paths\": [...], \"trace_chains\": [...]}, \"meta\": {\"tier\": \"api_only\", \"line_requested\": null}}\n```\n\n## Acceptance Criteria\n\n- [ ] `lore trace src/foo.rs` with human output\n- [ ] `lore --robot trace src/foo.rs` with JSON\n- [ ] :line suffix parses and emits Tier 2 warning\n- [ ] -p, --discussions, --no-follow-renames, -n all work\n- [ ] Rename-aware via resolve_rename_chain\n- [ ] meta.tier = 'api_only'\n- [ ] Added to VALID_COMMANDS and robot-docs\n- [ ] `cargo check --all-targets` passes\n\n## Files\n\n- `src/cli/mod.rs` (TraceArgs + Commands::Trace)\n- `src/cli/commands/trace.rs` (NEW)\n- `src/cli/commands/mod.rs` (re-export)\n- `src/main.rs` (handler + VALID_COMMANDS + robot-docs)\n\n## TDD Loop\n\nRED:\n- `test_parse_trace_path_simple` - \"src/foo.rs\" -> (path, None)\n- `test_parse_trace_path_with_line` - \"src/foo.rs:42\" -> (path, Some(42))\n- `test_parse_trace_path_windows` - \"C:/foo.rs\" -> (path, None) — don't misparse drive letter\n\nGREEN: Implement CLI wiring and handlers.\n\nVERIFY: `cargo check --all-targets`\n\n## Edge Cases\n\n- Windows paths: don't misparse C: as line number\n- No MR data: friendly message with suggestion to sync\n- Very deep rename chain: bounded by resolve_rename_chain","status":"in_progress","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:32.788530Z","created_by":"tayloreernisse","updated_at":"2026-02-17T19:08:40.322237Z","compaction_level":0,"original_size":0,"labels":["cli","gate-5","phase-b"],"dependencies":[{"issue_id":"bd-9dd","depends_on_id":"bd-1ht","type":"parent-child","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-9dd","depends_on_id":"bd-2n4","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-9lbr","title":"lore explain: auto-generate issue/MR narrative","description":"## Background\nGiven an issue or MR, auto-generate a structured narrative of what happened: who was involved, what decisions were made, what changed, and what is unresolved. Template-based v1 (no LLM dependency), deterministic and reproducible.\n\n## Current Infrastructure (Verified 2026-02-12)\n- show.rs: IssueDetail (line 69) and MrDetail (line 14) — entity detail with discussions\n- timeline.rs: 5-stage pipeline SHIPPED — chronological event reconstruction\n- notes table: 282K rows with body, author, created_at, is_system, discussion_id\n- discussions table: links notes to parent entity (noteable_type, noteable_id), has resolved flag\n- resource_state_events table: state changes with created_at, user_username (src/core/events_db.rs)\n- resource_label_events table: label add/remove with created_at, user_username\n- entity_references table (src/core/references.rs): cross-references between entities (closing MRs, related issues). Column names: `source_entity_type`, `source_entity_id`, `target_entity_type`, `target_entity_id`, `target_project_path`, `target_entity_iid`, `reference_type`, `source_method`\n\n## Approach\nNew command: `lore explain issues N` / `lore explain mrs N`\n\n### Data Assembly (reuse existing internals as library calls)\n1. Entity detail: reuse show.rs query logic for IssueDetail/MrDetail\n2. Timeline events: reuse timeline pipeline with entity-scoped seed\n3. Discussion notes:\n```sql\nSELECT n.id, n.body, n.author_username, n.created_at\nFROM notes n\nJOIN discussions d ON n.discussion_id = d.id\nWHERE d.noteable_type = ? AND d.noteable_id = ?\n AND n.is_system = 0\nORDER BY n.created_at\n```\n4. Cross-references:\n```sql\nSELECT target_entity_type, target_entity_id, target_project_path,\n target_entity_iid, reference_type, source_method\nFROM entity_references\nWHERE (source_entity_type = ?1 AND source_entity_id = ?2)\nUNION ALL\nSELECT source_entity_type, source_entity_id, NULL,\n NULL, reference_type, source_method\nFROM entity_references\nWHERE (target_entity_type = ?1 AND target_entity_id = ?2)\n```\n\n### Key Decisions Heuristic\nNotes from assignees/author that follow state or label changes within 1 hour:\n```rust\nstruct StateOrLabelEvent {\n created_at: i64, // ms epoch\n user: String,\n description: String, // e.g. \"state: opened -> closed\" or \"label: +bug\"\n}\n\nfn extract_key_decisions(\n state_events: &[ResourceStateEvent],\n label_events: &[ResourceLabelEvent],\n notes: &[Note],\n) -> Vec {\n // Merge both event types into a unified chronological list\n let mut events: Vec = Vec::new();\n for e in state_events {\n events.push(StateOrLabelEvent {\n created_at: e.created_at,\n user: e.user_username.clone(),\n description: format!(\"state: {} -> {}\", e.from_state.as_deref().unwrap_or(\"?\"), e.to_state),\n });\n }\n for e in label_events {\n let action = if e.action == \"add\" { \"+\" } else { \"-\" };\n events.push(StateOrLabelEvent {\n created_at: e.created_at,\n user: e.user_username.clone(),\n description: format!(\"label: {}{}\", action, e.label_name.as_deref().unwrap_or(\"?\")),\n });\n }\n events.sort_by_key(|e| e.created_at);\n\n let mut decisions = Vec::new();\n let one_hour_ms: i64 = 60 * 60 * 1000;\n\n for event in &events {\n // Find notes by same actor within 60 min after the event\n for note in notes {\n if note.author_username == event.user\n && note.created_at >= event.created_at\n && note.created_at <= event.created_at + one_hour_ms\n {\n decisions.push(KeyDecision {\n timestamp: event.created_at,\n actor: event.user.clone(),\n action: event.description.clone(),\n context_note: truncate(¬e.body, 500),\n });\n break; // one note per event\n }\n }\n }\n decisions.truncate(10); // Cap at 10 key decisions\n decisions\n}\n```\n\n### Narrative Sections\n1. **Header**: title, author, opened date, state, assignees, labels, status_name\n2. **Description excerpt**: first 500 chars of description (or full if shorter)\n3. **Key decisions**: notes correlated with state/label changes (heuristic above)\n4. **Activity summary**: counts of state changes, label changes, notes, time range\n5. **Open threads**: discussions WHERE resolved = false\n6. **Related entities**: closing MRs (with state), related issues from entity_references\n7. **Timeline excerpt**: first 20 events from timeline pipeline\n\n## Robot Mode Output Schema\n```json\n{\n \"ok\": true,\n \"data\": {\n \"entity\": {\n \"type\": \"issue\", \"iid\": 3864, \"title\": \"...\", \"state\": \"opened\",\n \"author\": \"teernisse\", \"assignees\": [\"teernisse\"],\n \"labels\": [\"customer:BNSF\"], \"created_at\": \"...\", \"updated_at\": \"...\",\n \"url\": \"...\", \"status_name\": \"In progress\"\n },\n \"description_excerpt\": \"First 500 chars of description...\",\n \"key_decisions\": [{\n \"timestamp\": \"2026-01-15T...\",\n \"actor\": \"teernisse\",\n \"action\": \"state: opened -> in_progress\",\n \"context_note\": \"Starting work on the BNSF throw time integration...\"\n }],\n \"activity\": {\n \"state_changes\": 3, \"label_changes\": 5, \"notes\": 42,\n \"first_event\": \"2026-01-10T...\", \"last_event\": \"2026-02-12T...\"\n },\n \"open_threads\": [{\n \"discussion_id\": \"abc123\",\n \"started_by\": \"cseiber\",\n \"started_at\": \"2026-02-01T...\",\n \"note_count\": 5,\n \"last_note_at\": \"2026-02-10T...\"\n }],\n \"related\": {\n \"closing_mrs\": [{ \"iid\": 200, \"title\": \"...\", \"state\": \"merged\" }],\n \"related_issues\": [{ \"iid\": 3800, \"title\": \"Rail Break Card\", \"relation\": \"related\" }]\n },\n \"timeline_excerpt\": [{ \"timestamp\": \"...\", \"event_type\": \"...\", \"actor\": \"...\", \"summary\": \"...\" }]\n },\n \"meta\": { \"elapsed_ms\": 350 }\n}\n```\n\n## Clap Registration\n```rust\n// In src/main.rs Commands enum, add:\nExplain {\n /// Entity type: \"issues\" or \"mrs\"\n entity_type: String,\n /// Entity IID\n iid: i64,\n /// Scope to project (fuzzy match)\n #[arg(short, long)]\n project: Option,\n},\n```\n\n## TDD Loop\nRED: Tests in src/cli/commands/explain.rs:\n- test_explain_issue_basic: insert issue + notes + state events, run explain, assert all sections present (entity, description_excerpt, key_decisions, activity, open_threads, related, timeline_excerpt)\n- test_explain_key_decision_heuristic: insert state change event + note by same author within 30 min, assert note appears in key_decisions\n- test_explain_key_decision_ignores_unrelated_notes: insert note by different author, assert it does NOT appear in key_decisions\n- test_explain_open_threads: insert 2 discussions (1 resolved, 1 unresolved), assert only unresolved in open_threads\n- test_explain_no_notes: issue with zero notes produces header + description + empty sections\n- test_explain_mr: insert MR with merged_at, assert entity includes type=\"merge_request\"\n- test_explain_activity_counts: insert 3 state events + 2 label events + 10 notes, assert counts match\n\nGREEN: Implement explain command with section assembly\n\nVERIFY:\n```bash\ncargo test explain:: && cargo clippy --all-targets -- -D warnings\ncargo run --release -- -J explain issues 3864 | jq '.data | keys'\n# Should include: entity, description_excerpt, key_decisions, activity, open_threads, related, timeline_excerpt\n```\n\n## Acceptance Criteria\n- [ ] lore explain issues N produces structured output for any synced issue\n- [ ] lore explain mrs N produces structured output for any synced MR\n- [ ] Robot mode returns all 7 sections\n- [ ] Human mode renders readable narrative with headers and indentation\n- [ ] Key decisions heuristic: captures notes within 60 min of state/label changes by same actor\n- [ ] Works fully offline (no API calls, no LLM)\n- [ ] Performance: <500ms for issue with 50 notes\n- [ ] Command registered in main.rs and robot-docs\n- [ ] key_decisions capped at 10, timeline_excerpt capped at 20 events\n\n## Edge Cases\n- Issue with empty description: description_excerpt = \"(no description)\"\n- Issue with 500+ notes: timeline_excerpt capped at 20, key_decisions capped at 10\n- Issue not found in local DB: exit code 17 with suggestion to sync\n- Ambiguous project: exit code 18 with suggestion to use -p flag\n- MR with no review activity: activity section shows zeros\n- Cross-project references: show as unresolved with project path hint\n- Notes that are pure code blocks: include in key_decisions if correlated with events (they may contain implementation decisions)\n- ResourceStateEvent/ResourceLabelEvent field names: check src/core/events_db.rs for exact struct definitions before implementing\n\n## Dependency Context\n- **bd-2g50 (data gaps)**: BLOCKER. Provides `closed_at` field on IssueDetail for the header section. Without it, explain can still show state=\"closed\" but won't have the exact close timestamp.\n\n## Files to Create/Modify\n- NEW: src/cli/commands/explain.rs\n- src/cli/commands/mod.rs (add pub mod explain; re-export)\n- src/main.rs (register Explain subcommand in Commands enum, add handle_explain fn)\n- Reuse: show.rs queries, timeline pipeline, notes/discussions/resource_events queries from src/core/events_db.rs","status":"open","priority":2,"issue_type":"feature","created_at":"2026-02-12T15:46:41.386454Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:31:34.538422Z","compaction_level":0,"original_size":0,"labels":["cli-imp","intelligence"],"dependencies":[{"issue_id":"bd-9lbr","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-9lbr","depends_on_id":"bd-2g50","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-9wl5","title":"NOTE-2G: Parent metadata change propagation to note documents","description":"## Background\nNote documents inherit labels and title from parent issue/MR. When parent metadata changes, note documents become stale. The existing pipeline already marks discussion documents dirty on parent changes — note documents need the same treatment.\n\n## Approach\nFind where ingestion detects parent entity changes and marks discussion documents dirty. The dirty marking for discussions happens in:\n- src/ingestion/discussions.rs line 127: mark_dirty_tx(&tx, SourceType::Discussion, local_discussion_id)\n- src/ingestion/mr_discussions.rs line 162 and 362: mark_dirty_tx(&tx, SourceType::Discussion, local_discussion_id)\n\nThese fire when a discussion is upserted (which happens when parent entity is re-ingested). For note documents, we need to additionally mark all non-system notes of that discussion as dirty:\n\nAfter each mark_dirty_tx for Discussion, add:\n // Mark child note documents dirty (they inherit parent metadata)\n let note_ids: Vec = tx.prepare(\"SELECT id FROM notes WHERE discussion_id = ? AND is_system = 0\")?\n .query_map([local_discussion_id], |r| r.get(0))?\n .collect::, _>>()?;\n for note_id in note_ids {\n dirty_tracker::mark_dirty_tx(&tx, SourceType::Note, note_id)?;\n }\n\nAlternative (more efficient, set-based):\n INSERT INTO dirty_sources (source_type, source_id, queued_at)\n SELECT 'note', n.id, ?1\n FROM notes n\n WHERE n.discussion_id = ?2 AND n.is_system = 0\n ON CONFLICT(source_type, source_id) DO UPDATE SET queued_at = excluded.queued_at, attempt_count = 0\n\nUse the set-based approach for better performance with large discussions.\n\n## Files\n- MODIFY: src/ingestion/discussions.rs (add note dirty marking after line 127)\n- MODIFY: src/ingestion/mr_discussions.rs (add note dirty marking after lines 162 and 362)\n\n## TDD Anchor\nRED: test_parent_title_change_marks_notes_dirty — change issue title, re-ingest discussions, assert note documents appear in dirty_sources.\nGREEN: Add set-based INSERT INTO dirty_sources after discussion dirty marking.\nVERIFY: cargo test parent_title_change_marks_notes -- --nocapture\nTests: test_parent_label_change_marks_notes_dirty (modify issue labels, re-ingest, check dirty queue)\n\n## Acceptance Criteria\n- [ ] Discussion upsert for issue marks child non-system note documents dirty\n- [ ] Discussion upsert for MR marks child non-system note documents dirty (both call sites)\n- [ ] Only non-system notes marked dirty (is_system = 0 filter)\n- [ ] Set-based SQL (not per-note loop) for performance\n- [ ] Both tests pass\n\n## Dependency Context\n- Depends on NOTE-2D (bd-2ezb): dirty tracking infrastructure for notes must exist (dirty_sources accepts source_type='note', regenerator handles it)\n\n## Edge Cases\n- Discussion with 0 non-system notes: set-based INSERT is a no-op\n- Discussion with 100+ notes: set-based approach handles efficiently in one SQL statement\n- Concurrent discussion ingestion: ON CONFLICT DO UPDATE handles race safely","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:40.292874Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.717576Z","closed_at":"2026-02-12T18:13:15.717528Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"]} {"id":"bd-am7","title":"Implement embedding pipeline with chunking","description":"## Background\nThe embedding pipeline takes documents, chunks them (paragraph-boundary splitting with overlap), sends chunks to Ollama for embedding via async HTTP, and stores vectors in sqlite-vec + metadata. It uses keyset pagination, concurrent HTTP requests via FuturesUnordered, per-batch transactions, and dimension validation.\n\n## Approach\nCreate \\`src/embedding/pipeline.rs\\` per PRD Section 4.4. **The pipeline is async.**\n\n**Constants (per PRD):**\n```rust\nconst BATCH_SIZE: usize = 32; // texts per Ollama API call\nconst DB_PAGE_SIZE: usize = 500; // keyset pagination page size\nconst EXPECTED_DIMS: usize = 768; // nomic-embed-text dimensions\nconst CHUNK_MAX_CHARS: usize = 32_000; // max chars per chunk\nconst CHUNK_OVERLAP_CHARS: usize = 500; // overlap between chunks\n```\n\n**Core async function:**\n```rust\npub async fn embed_documents(\n conn: &Connection,\n client: &OllamaClient,\n selection: EmbedSelection,\n concurrency: usize, // max in-flight HTTP requests\n progress_callback: Option>,\n) -> Result\n```\n\n**EmbedSelection:** Pending | RetryFailed\n**EmbedResult:** { embedded, failed, skipped }\n\n**Algorithm (per PRD):**\n1. count_pending_documents(conn, selection) for progress total\n2. Keyset pagination loop: find_pending_documents(conn, DB_PAGE_SIZE, last_id, selection)\n3. For each page:\n a. Begin transaction\n b. For each doc: clear_document_embeddings(&tx, doc.id), split_into_chunks(&doc.content)\n c. Build ChunkWork items with doc_hash + chunk_hash\n d. Commit clearing transaction\n4. Batch ChunkWork texts into Ollama calls (BATCH_SIZE=32)\n5. Use **FuturesUnordered** for concurrent HTTP, cap at \\`concurrency\\`\n6. collect_writes() in per-batch transactions: validate dims (768), store LE bytes, write metadata\n7. On error: record_embedding_error per chunk (not abort)\n8. Advance keyset cursor\n\n**ChunkWork struct:**\n```rust\nstruct ChunkWork {\n doc_id: i64,\n chunk_index: usize,\n doc_hash: String, // SHA-256 of FULL document (staleness detection)\n chunk_hash: String, // SHA-256 of THIS chunk (provenance)\n text: String,\n}\n```\n\n**Splitting:** split_into_chunks(content) -> Vec<(usize, String)>\n- Documents <= CHUNK_MAX_CHARS: single chunk (index 0)\n- Longer: split at paragraph boundaries (\\\\n\\\\n), fallback to sentence/word, with CHUNK_OVERLAP_CHARS overlap\n\n**Storage:** embeddings as raw LE bytes, rowid = encode_rowid(doc_id, chunk_idx)\n**Staleness detection:** uses document_hash (not chunk_hash) because it's document-level\n\nAlso create \\`src/embedding/change_detector.rs\\` (referenced in PRD module structure):\n```rust\npub fn detect_embedding_changes(conn: &Connection) -> Result>;\n```\n\n## Acceptance Criteria\n- [ ] Pipeline is async (uses FuturesUnordered for concurrent HTTP)\n- [ ] concurrency parameter caps in-flight HTTP requests\n- [ ] progress_callback reports (processed, total)\n- [ ] New documents embedded, changed re-embedded, unchanged skipped\n- [ ] clear_document_embeddings before re-embedding (range delete vec0 + metadata)\n- [ ] Chunking at paragraph boundaries with 500-char overlap\n- [ ] Short documents (<32k chars) produce exactly 1 chunk\n- [ ] Embeddings stored as raw LE bytes in vec0\n- [ ] Rowids encoded via encode_rowid(doc_id, chunk_index)\n- [ ] Dimension validation: 768 floats per embedding (mismatch -> record error, not store)\n- [ ] Per-batch transactions for writes\n- [ ] Errors recorded in embedding_metadata per chunk (last_error, attempt_count)\n- [ ] Keyset pagination (d.id > last_id, not OFFSET)\n- [ ] Pending detection uses document_hash (not chunk_hash)\n- [ ] \\`cargo build\\` succeeds\n\n## Files\n- \\`src/embedding/pipeline.rs\\` — new file (async)\n- \\`src/embedding/change_detector.rs\\` — new file\n- \\`src/embedding/mod.rs\\` — add \\`pub mod pipeline; pub mod change_detector;\\` + re-exports\n\n## TDD Loop\nRED: Unit tests for chunking:\n- \\`test_short_document_single_chunk\\` — <32k produces [(0, full_content)]\n- \\`test_long_document_multiple_chunks\\` — >32k splits at paragraph boundaries\n- \\`test_chunk_overlap\\` — adjacent chunks share 500-char overlap\n- \\`test_no_paragraph_boundary\\` — falls back to char boundary\nIntegration tests need Ollama or mock.\nGREEN: Implement split_into_chunks, embed_documents (async)\nVERIFY: \\`cargo test pipeline\\`\n\n## Edge Cases\n- Empty document content_text: skip (don't embed)\n- No paragraph boundaries: split at CHUNK_MAX_CHARS with overlap\n- Ollama error for one batch: record error per chunk, continue with next batch\n- Dimension mismatch (model returns 512 instead of 768): record error, don't store corrupt data\n- Document deleted between pagination and embedding: skip gracefully","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:34.093701Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:58:58.908585Z","closed_at":"2026-01-30T17:58:58.908525Z","close_reason":"Implemented embedding pipeline: chunking at paragraph boundaries with 500-char overlap, change detector (keyset pagination, hash-based staleness), async embed via Ollama with batch processing, dimension validation, per-chunk error recording, LE byte vector storage. 7 chunking tests pass. 289 total tests.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-am7","depends_on_id":"bd-1y8","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-am7","depends_on_id":"bd-2ac","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-am7","depends_on_id":"bd-335","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-apmo","title":"OBSERV: Create migration 014 for sync_runs enrichment","description":"## Background\nThe sync_runs table (created in migration 001) has columns id, started_at, heartbeat_at, finished_at, status, command, error, metrics_json but NOTHING writes to it. This migration adds columns for the observability correlation ID and aggregate counts, enabling queryable sync history.\n\n## Approach\nCreate migrations/014_sync_runs_enrichment.sql:\n\n```sql\n-- Migration 014: sync_runs enrichment for observability\n-- Adds correlation ID and aggregate counts for queryable sync history\n\nALTER TABLE sync_runs ADD COLUMN run_id TEXT;\nALTER TABLE sync_runs ADD COLUMN total_items_processed INTEGER DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN total_errors INTEGER DEFAULT 0;\n\n-- Index for correlation queries (find run by run_id from logs)\nCREATE INDEX IF NOT EXISTS idx_sync_runs_run_id ON sync_runs(run_id);\n```\n\nMigration naming convention: check migrations/ directory. Current latest is 013_resource_event_watermarks.sql. Next is 014.\n\nNote: SQLite ALTER TABLE ADD COLUMN is always safe -- it sets NULL for existing rows. DEFAULT 0 applies to new INSERTs only.\n\n## Acceptance Criteria\n- [ ] Migration 014 applies cleanly on a fresh DB (all migrations 001-014)\n- [ ] Migration 014 applies cleanly on existing DB with 001-013 already applied\n- [ ] sync_runs table has run_id TEXT column\n- [ ] sync_runs table has total_items_processed INTEGER DEFAULT 0 column\n- [ ] sync_runs table has total_errors INTEGER DEFAULT 0 column\n- [ ] idx_sync_runs_run_id index exists\n- [ ] Existing sync_runs rows (if any) have NULL run_id, 0 for counts\n- [ ] cargo clippy --all-targets -- -D warnings passes (no code changes, but verify migration is picked up)\n\n## Files\n- migrations/014_sync_runs_enrichment.sql (new file)\n\n## TDD Loop\nRED:\n - test_migration_014_applies: apply all migrations on fresh in-memory DB, query sync_runs schema\n - test_migration_014_idempotent: CREATE INDEX IF NOT EXISTS makes re-run safe; ALTER TABLE ADD COLUMN is NOT idempotent in SQLite (will error). Consider: skip this test or use IF NOT EXISTS workaround\nGREEN: Create migration file\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- ALTER TABLE ADD COLUMN in SQLite: NOT idempotent. Running migration twice will error \"duplicate column name.\" The migration system should prevent re-runs, but IF NOT EXISTS is not available for ALTER TABLE in SQLite. Rely on migration tracking.\n- Migration numbering conflict: if another PR adds 014 first, renumber to 015. Check before merging.\n- metrics_json already exists (from migration 001): we don't touch it. The new columns supplement it with queryable aggregates.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:54:51.311879Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:34:05.309761Z","closed_at":"2026-02-04T17:34:05.309714Z","close_reason":"Created migration 014 adding run_id TEXT, total_items_processed INTEGER, total_errors INTEGER to sync_runs, with idx_sync_runs_run_id index","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-apmo","depends_on_id":"bd-3pz","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-arka","title":"Extend SyncRunRecorder with surgical mode lifecycle methods","description":"## Background\nThe existing `SyncRunRecorder` in `src/core/sync_run.rs` manages sync run lifecycle with three methods: `start()` (creates row, returns Self), `succeed(self, ...)` (consumes self, sets succeeded), and `fail(self, ...)` (consumes self, sets failed). Both `succeed()` and `fail()` take ownership of `self` — this is intentional to prevent double-finalization.\n\nSurgical sync needs additional lifecycle methods to:\n1. Set surgical-specific metadata (mode, phase, IIDs JSON) after `start()`\n2. Record per-entity results (increment counters, store entity-level outcomes)\n3. Cancel a run (distinct from failure — user-initiated or timeout)\n4. Update phase progression during the surgical pipeline\n\nThese methods operate on the columns added by migration 027 (bead bd-tiux).\n\n## Approach\n\n### Step 1: Add `set_surgical_metadata` method\n\nCalled once after `start()` to set the surgical mode columns:\n\n```rust\npub fn set_surgical_metadata(\n &self,\n conn: &Connection,\n mode: &str,\n phase: &str,\n iids_json: &str,\n) -> Result<()> {\n conn.execute(\n \"UPDATE sync_runs SET mode = ?1, phase = ?2, surgical_iids_json = ?3 WHERE id = ?4\",\n rusqlite::params![mode, phase, iids_json, self.row_id],\n )?;\n Ok(())\n}\n```\n\nTakes `&self` (not `self`) because the recorder continues to be used after metadata is set.\n\n### Step 2: Add `update_phase` method\n\nCalled as the surgical pipeline progresses through phases:\n\n```rust\npub fn update_phase(&self, conn: &Connection, phase: &str) -> Result<()> {\n conn.execute(\n \"UPDATE sync_runs SET phase = ?1, heartbeat_at = ?2 WHERE id = ?3\",\n rusqlite::params![phase, now_ms(), self.row_id],\n )?;\n Ok(())\n}\n```\n\n### Step 3: Add `record_entity_result` method\n\nCalled after each entity (issue or MR) is processed to increment counters:\n\n```rust\npub fn record_entity_result(\n &self,\n conn: &Connection,\n entity_type: &str,\n stage: &str,\n) -> Result<()> {\n let column = match (entity_type, stage) {\n (\"issue\", \"fetched\") => \"issues_fetched\",\n (\"issue\", \"ingested\") => \"issues_ingested\",\n (\"mr\", \"fetched\") => \"mrs_fetched\",\n (\"mr\", \"ingested\") => \"mrs_ingested\",\n (\"issue\" | \"mr\", \"skipped_stale\") => \"skipped_stale\",\n (\"doc\", \"regenerated\") => \"docs_regenerated\",\n (\"doc\", \"embedded\") => \"docs_embedded\",\n (_, \"warning\") => \"warnings_count\",\n _ => return Ok(()), // Unknown combinations are silently ignored\n };\n conn.execute(\n &format!(\"UPDATE sync_runs SET {column} = {column} + 1 WHERE id = ?1\"),\n rusqlite::params![self.row_id],\n )?;\n Ok(())\n}\n```\n\nNote: The column name comes from a hardcoded match, NOT from user input — no SQL injection risk.\n\n### Step 4: Add `cancel` method\n\nConsumes self (like succeed/fail) to finalize the run as cancelled:\n\n```rust\npub fn cancel(self, conn: &Connection, reason: &str) -> Result<()> {\n let now = now_ms();\n conn.execute(\n \"UPDATE sync_runs SET finished_at = ?1, cancelled_at = ?2, status = 'cancelled', error = ?3 WHERE id = ?4\",\n rusqlite::params![now, now, reason, self.row_id],\n )?;\n Ok(())\n}\n```\n\nTakes `self` (ownership) like `succeed()` and `fail()` — prevents further use after cancellation.\n\n### Step 5: Expose `row_id` getter\n\nThe orchestrator (bd-1i4i) may need the row_id for logging/tracing:\n\n```rust\npub fn row_id(&self) -> i64 {\n self.row_id\n}\n```\n\n## Acceptance Criteria\n- [ ] `set_surgical_metadata(&self, conn, mode, phase, iids_json)` writes mode/phase/surgical_iids_json columns\n- [ ] `update_phase(&self, conn, phase)` updates phase and heartbeat_at\n- [ ] `record_entity_result(&self, conn, entity_type, stage)` increments the correct counter column\n- [ ] `record_entity_result` silently ignores unknown entity_type/stage combinations\n- [ ] `cancel(self, conn, reason)` consumes self, sets status='cancelled', finished_at, cancelled_at, error\n- [ ] `row_id()` returns the internal row_id\n- [ ] `succeed(self, ...)` still works after `set_surgical_metadata` + `record_entity_result` calls\n- [ ] `fail(self, ...)` still works after `set_surgical_metadata` + `update_phase` calls\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n- [ ] All existing sync_run tests continue to pass\n\n## Files\n- MODIFY: src/core/sync_run.rs (add methods to SyncRunRecorder impl block)\n- MODIFY: src/core/sync_run_tests.rs (add new tests)\n\n## TDD Anchor\nRED: Write tests in `src/core/sync_run_tests.rs`:\n\n```rust\n#[test]\nfn surgical_lifecycle_start_metadata_succeed() {\n let conn = setup_test_db();\n let recorder = SyncRunRecorder::start(&conn, \"sync\", \"surg001\").unwrap();\n let row_id = recorder.row_id();\n\n recorder.set_surgical_metadata(\n &conn, \"surgical\", \"preflight\", r#\"{\"issues\":[7,8],\"mrs\":[101]}\"#,\n ).unwrap();\n\n recorder.update_phase(&conn, \"ingest\").unwrap();\n recorder.record_entity_result(&conn, \"issue\", \"fetched\").unwrap();\n recorder.record_entity_result(&conn, \"issue\", \"fetched\").unwrap();\n recorder.record_entity_result(&conn, \"issue\", \"ingested\").unwrap();\n recorder.record_entity_result(&conn, \"mr\", \"fetched\").unwrap();\n recorder.record_entity_result(&conn, \"mr\", \"ingested\").unwrap();\n\n recorder.succeed(&conn, &[], 3, 0).unwrap();\n\n let (mode, phase, iids, issues_fetched, mrs_fetched, issues_ingested, mrs_ingested, status): (\n String, String, String, i64, i64, i64, i64, String,\n ) = conn.query_row(\n \"SELECT mode, phase, surgical_iids_json, issues_fetched, mrs_fetched, issues_ingested, mrs_ingested, status\n FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?, r.get(3)?, r.get(4)?, r.get(5)?, r.get(6)?, r.get(7)?)),\n ).unwrap();\n\n assert_eq!(mode, \"surgical\");\n assert_eq!(phase, \"ingest\"); // Last phase set before succeed\n assert!(iids.contains(\"101\"));\n assert_eq!(issues_fetched, 2);\n assert_eq!(mrs_fetched, 1);\n assert_eq!(issues_ingested, 1);\n assert_eq!(mrs_ingested, 1);\n assert_eq!(status, \"succeeded\");\n}\n\n#[test]\nfn surgical_lifecycle_cancel() {\n let conn = setup_test_db();\n let recorder = SyncRunRecorder::start(&conn, \"sync\", \"cancel01\").unwrap();\n let row_id = recorder.row_id();\n\n recorder.set_surgical_metadata(&conn, \"surgical\", \"preflight\", \"{}\").unwrap();\n recorder.cancel(&conn, \"User requested cancellation\").unwrap();\n\n let (status, error, cancelled_at, finished_at): (String, Option, Option, Option) = conn.query_row(\n \"SELECT status, error, cancelled_at, finished_at FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?, r.get(3)?)),\n ).unwrap();\n\n assert_eq!(status, \"cancelled\");\n assert_eq!(error.as_deref(), Some(\"User requested cancellation\"));\n assert!(cancelled_at.is_some());\n assert!(finished_at.is_some());\n}\n\n#[test]\nfn record_entity_result_ignores_unknown() {\n let conn = setup_test_db();\n let recorder = SyncRunRecorder::start(&conn, \"sync\", \"unk001\").unwrap();\n // Should not panic or error on unknown combinations\n recorder.record_entity_result(&conn, \"widget\", \"exploded\").unwrap();\n}\n\n#[test]\nfn record_entity_result_json_counters() {\n let conn = setup_test_db();\n let recorder = SyncRunRecorder::start(&conn, \"sync\", \"cnt001\").unwrap();\n let row_id = recorder.row_id();\n\n recorder.record_entity_result(&conn, \"doc\", \"regenerated\").unwrap();\n recorder.record_entity_result(&conn, \"doc\", \"regenerated\").unwrap();\n recorder.record_entity_result(&conn, \"doc\", \"embedded\").unwrap();\n recorder.record_entity_result(&conn, \"issue\", \"skipped_stale\").unwrap();\n\n let (docs_regen, docs_embed, skipped): (i64, i64, i64) = conn.query_row(\n \"SELECT docs_regenerated, docs_embedded, skipped_stale FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?)),\n ).unwrap();\n\n assert_eq!(docs_regen, 2);\n assert_eq!(docs_embed, 1);\n assert_eq!(skipped, 1);\n}\n```\n\nGREEN: Add all methods to `SyncRunRecorder`.\nVERIFY: `cargo test surgical_lifecycle && cargo test record_entity_result`\n\n## Edge Cases\n- `succeed()` and `fail()` consume `self` — the compiler enforces that no methods are called after finalization. `cancel()` also consumes self for the same reason.\n- `set_surgical_metadata`, `update_phase`, and `record_entity_result` take `&self` — they can be called multiple times before finalization.\n- The `record_entity_result` match uses a hardcoded column name derived from known string constants, not user input. The `format!` is safe because the column name is always one of the hardcoded strings.\n- `record_entity_result` silently returns Ok(()) for unknown entity_type/stage combos rather than erroring — this avoids breaking the pipeline for non-critical telemetry.\n- Phase is NOT overwritten by `succeed()`/`fail()`/`cancel()` — the last phase set via `update_phase()` is preserved as the \"phase at completion\" for observability.\n\n## Dependency Context\nDepends on bd-tiux (migration 027) for the surgical columns to exist. Downstream beads bd-1i4i (orchestrator) and bd-3jqx (integration tests) use these methods.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:13:50.827946Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:04:15.562997Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-arka","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-arka","depends_on_id":"bd-3jqx","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-b51e","title":"WHO: Overlap mode query (query_overlap)","description":"## Background\n\nOverlap mode answers \"Who else has MRs/notes touching my files?\" — helps identify potential reviewers, collaborators, or conflicting work at a path. Tracks author and reviewer roles separately for richer signal.\n\n## Approach\n\n### SQL: two static variants (prefix/exact) with reviewer + author UNION ALL\n\nBoth branches return: username, role, touch_count (COUNT DISTINCT m.id), last_seen_at, mr_refs (GROUP_CONCAT of project-qualified refs).\n\nKey differences from Expert:\n- No scoring formula — just touch_count ranking\n- mr_refs collected for actionable output (group/project!iid format)\n- Rust-side merge needed (can't fully aggregate in SQL due to HashSet dedup of mr_refs across branches)\n\n### Reviewer branch includes:\n- Self-review exclusion: `n.author_username != m.author_username`\n- MR state filter: `m.state IN ('opened','merged')`\n- Project-qualified refs: `GROUP_CONCAT(DISTINCT (p.path_with_namespace || '!' || m.iid))`\n\n### Rust accumulator pattern:\n```rust\nstruct OverlapAcc {\n username: String,\n author_touch_count: u32,\n review_touch_count: u32,\n touch_count: u32,\n last_seen_at: i64,\n mr_refs: HashSet, // O(1) dedup from the start\n}\n// Build HashMap from rows\n// Convert to Vec, sort, bound mr_refs\n```\n\n### Bounded mr_refs:\n```rust\nconst MAX_MR_REFS_PER_USER: usize = 50;\nlet mr_refs_total = mr_refs.len() as u32;\nlet mr_refs_truncated = mr_refs.len() > MAX_MR_REFS_PER_USER;\n```\n\n### Deterministic sort: touch_count DESC, last_seen_at DESC, username ASC\n\n### format_overlap_role():\n```rust\nfn format_overlap_role(user: &OverlapUser) -> &'static str {\n match (user.author_touch_count > 0, user.review_touch_count > 0) {\n (true, true) => \"A+R\", (true, false) => \"A\",\n (false, true) => \"R\", (false, false) => \"-\",\n }\n}\n```\n\n### OverlapResult/OverlapUser structs include path_match (\"exact\"/\"prefix\"), truncated bool, per-user mr_refs_total + mr_refs_truncated\n\n## Files\n\n- `src/cli/commands/who.rs`\n\n## TDD Loop\n\nRED:\n```\ntest_overlap_dual_roles — user is author of MR 1 and reviewer of MR 2 at same path; verify A+R role, both touch counts > 0, mr_refs contain \"team/backend!\"\ntest_overlap_multi_project_mr_refs — same iid 100 in two projects; verify both \"team/backend!100\" and \"team/frontend!100\" present\ntest_overlap_excludes_self_review_notes — author comments on own MR; review_touch_count must be 0\n```\n\nGREEN: Implement query_overlap with both SQL variants + accumulator\nVERIFY: `cargo test -- overlap`\n\n## Acceptance Criteria\n\n- [ ] test_overlap_dual_roles passes (A+R role detection)\n- [ ] test_overlap_multi_project_mr_refs passes (project-qualified refs unique)\n- [ ] test_overlap_excludes_self_review_notes passes\n- [ ] Default since window: 30d\n- [ ] mr_refs sorted alphabetically for deterministic output\n- [ ] touch_count uses coherent units (COUNT DISTINCT m.id on BOTH branches)\n\n## Edge Cases\n\n- Both branches count MRs (not DiffNotes) for coherent touch_count — mixing units produces misleading totals\n- mr_refs from GROUP_CONCAT may contain duplicates across branches — HashSet handles dedup\n- Project scoping on n.project_id (not m.project_id) for index alignment\n- mr_refs sorted before output (HashSet iteration is nondeterministic)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:40:46.729921Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.598708Z","closed_at":"2026-02-08T04:10:29.598673Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-b51e","depends_on_id":"bd-2ldg","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-b51e","depends_on_id":"bd-34rr","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-bcte","title":"Implement filter DSL parser state machine","description":"## Background\n\nThe Issue List and MR List filter bars accept typed filter expressions (e.g., `state:opened author:@asmith label:\"high priority\" -milestone:v2.0`). The PRD Appendix B defines a full state machine: Inactive -> Active -> FieldSelect/FreeText -> ValueInput. The parser needs to handle field:value pairs, negation prefix (`-`), quoted values with spaces, bare text as free-text search, and inline error diagnostics when an unrecognized field name is typed. This is a substantial subsystem that the entity table filter bar widget (bd-18qs) depends on for its core functionality.\n\n## Approach\n\nCreate a `filter_dsl.rs` module with:\n\n1. **FilterToken enum** — `Field { name: String, value: String, negated: bool }` | `FreeText(String)` | `Error { position: usize, message: String }`\n2. **`parse_filter(input: &str) -> Vec`** — Tokenizer that handles:\n - `field:value` — recognized fields: state, author, assignee, label, milestone, since, project (issue); + reviewer, draft, target, source (MR)\n - `-field:value` — negation prefix strips the `-` and sets `negated: true`\n - `field:\"quoted value\"` — double-quoted values preserve spaces\n - bare words — collected as `FreeText` tokens\n - unrecognized field names — produce `Error` token with position and message\n3. **FilterBarState** state machine:\n - `Inactive` — filter bar not focused\n - `Active(Typing)` — user typing, no suggestion yet\n - `Active(Suggesting)` — 200ms pause triggers field name suggestions\n - `FieldSelect` — dropdown showing recognized field names after `:`\n - `ValueInput` — context-dependent completions (e.g., state values: opened/closed/all)\n4. **`apply_issue_filter(tokens: &[FilterToken]) -> IssueFilterParams`** — converts tokens to query parameters\n5. **`apply_mr_filter(tokens: &[FilterToken]) -> MrFilterParams`** — MR variant with reviewer, draft, target/source fields\n\n## Acceptance Criteria\n- [ ] `parse_filter(\"state:opened\")` returns one Field token with name=\"state\", value=\"opened\", negated=false\n- [ ] `parse_filter(\"-label:bug\")` returns one Field with negated=true\n- [ ] `parse_filter('author:\"Jane Doe\"')` returns one Field with value=\"Jane Doe\" (quotes stripped)\n- [ ] `parse_filter(\"foo:bar\")` where \"foo\" is not a recognized field returns Error token with position\n- [ ] `parse_filter(\"state:opened some text\")` returns Field + FreeText tokens\n- [ ] `parse_filter(\"\")` returns empty vec\n- [ ] FilterBarState transitions match the Appendix B state machine diagram\n- [ ] apply_issue_filter correctly maps all 7 issue fields (state, author, assignee, label, milestone, since, project)\n- [ ] apply_mr_filter correctly maps additional MR fields (reviewer, draft, target, source)\n- [ ] Inline error diagnostics include the character position of the unrecognized field\n\n## Files\n- CREATE: crates/lore-tui/src/widgets/filter_dsl.rs\n- MODIFY: crates/lore-tui/src/widgets/mod.rs (add `pub mod filter_dsl;`)\n\n## TDD Anchor\nRED: Write `test_parse_simple_field_value` that asserts `parse_filter(\"state:opened\")` returns `[Field { name: \"state\", value: \"opened\", negated: false }]`.\nGREEN: Implement the tokenizer for the simplest case.\nVERIFY: cargo test -p lore-tui parse_simple\n\nAdditional tests:\n- test_parse_negation\n- test_parse_quoted_value\n- test_parse_unrecognized_field_produces_error\n- test_parse_mixed_tokens\n- test_parse_empty_input\n- test_apply_issue_filter_maps_all_fields\n- test_apply_mr_filter_maps_additional_fields\n- test_filter_bar_state_transitions\n\n## Edge Cases\n- Unclosed quote (`author:\"Jane`) — treat rest of input as the value, produce warning token\n- Empty value (`state:`) — produce Error token, not a Field with empty value\n- Multiple colons (`field:val:ue`) — first colon splits, rest is part of value\n- Unicode in field values (`author:@rene`) — must handle multi-byte chars correctly\n- Very long filter strings (>1000 chars) — must not allocate unbounded; truncate with error\n\n## Dependency Context\n- Depends on bd-18qs (entity table + filter bar widgets) which provides the TextInput widget and filter bar rendering. This bead provides the PARSER that bd-18qs's filter bar CALLS.\n- Consumed by bd-3ei1 (Issue List) and bd-2kr0 (MR List) for converting user filter input into query parameters.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:37.516695Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:29:47.312394Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-bcte","depends_on_id":"bd-18qs","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-bjo","title":"Implement vector search function","description":"## Background\nVector search queries the sqlite-vec virtual table for nearest-neighbor documents. Because documents may have multiple chunks, the raw KNN results need deduplication by document_id (keeping the best/lowest distance per document). The function over-fetches 3x to ensure enough unique documents after dedup.\n\n## Approach\nCreate `src/search/vector.rs`:\n\n```rust\npub struct VectorResult {\n pub document_id: i64,\n pub distance: f64, // Lower = closer match\n}\n\n/// Search documents using sqlite-vec KNN query.\n/// Over-fetches 3x limit to handle chunk dedup.\npub fn search_vector(\n conn: &Connection,\n query_embedding: &[f32], // 768-dim embedding of search query\n limit: usize,\n) -> Result>\n```\n\n**SQL (KNN query):**\n```sql\nSELECT rowid, distance\nFROM embeddings\nWHERE embedding MATCH ?\n AND k = ?\nORDER BY distance\n```\n\n**Algorithm:**\n1. Convert query_embedding to raw LE bytes\n2. Execute KNN with k = limit * 3 (over-fetch for dedup)\n3. Decode each rowid via decode_rowid() -> (document_id, chunk_index)\n4. Group by document_id, keep minimum distance (best chunk)\n5. Sort by distance ascending\n6. Take first `limit` results\n\n## Acceptance Criteria\n- [ ] Returns deduplicated document-level results (not chunk-level)\n- [ ] Best chunk distance kept per document (lowest distance wins)\n- [ ] KNN with k parameter (3x limit)\n- [ ] Query embedding passed as raw LE bytes\n- [ ] Results sorted by distance ascending (closest first)\n- [ ] Returns at most `limit` results\n- [ ] Empty embeddings table returns empty Vec\n- [ ] `cargo build` succeeds\n\n## Files\n- `src/search/vector.rs` — new file\n- `src/search/mod.rs` — add `pub use vector::{search_vector, VectorResult};`\n\n## TDD Loop\nRED: Integration tests need sqlite-vec + seeded embeddings:\n- `test_vector_search_basic` — finds nearest document\n- `test_vector_search_dedup` — multi-chunk doc returns once with best distance\n- `test_vector_search_empty` — empty table returns empty\n- `test_vector_search_limit` — respects limit parameter\nGREEN: Implement search_vector\nVERIFY: `cargo test vector`\n\n## Edge Cases\n- All chunks belong to same document: returns single result\n- Query embedding wrong dimension: sqlite-vec may error — handle gracefully\n- Over-fetch returns fewer than limit unique docs: return what we have\n- Distance = 0.0: exact match (valid result)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:50.270357Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:44:56.233611Z","closed_at":"2026-01-30T17:44:56.233512Z","close_reason":"Implemented search_vector with KNN query, 3x over-fetch, chunk dedup. 3 tests pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-bjo","depends_on_id":"bd-1y8","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-bjo","depends_on_id":"bd-2ac","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} @@ -266,12 +280,14 @@ {"id":"bd-dty","title":"Implement timeline robot mode JSON output","description":"## Background\n\nRobot mode JSON for timeline follows the {ok, data, meta} envelope pattern. The JSON schema MUST match spec Section 3.5 exactly — this is the contract for AI agent consumers.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 3.5 (Robot Mode JSON).\n\n## Codebase Context\n\n- Robot mode pattern: all commands use {ok: true, data: {...}, meta: {...}} envelope\n- Timestamps: internal ms epoch UTC -> output ISO 8601 via core::time::ms_to_iso()\n- source_method values in DB: 'api', 'note_parse', 'description_parse' (NOT spec's api_closes_issues etc.)\n- Serde rename: use #[serde(rename = \"type\")] for entity objects per spec\n\n## Approach\n\nCreate `print_timeline_json()` in `src/cli/commands/timeline.rs`:\n\n### Key JSON structure (spec Section 3.5):\n- data.seed_entities: [{type, iid, project}] — note \"type\" not \"entity_type\", \"project\" not \"project_path\"\n- data.expanded_entities: [{type, iid, project, depth, via: {from: {type,iid,project}, reference_type, source_method}}]\n- data.unresolved_references: [{source: {type,iid,project}, target_project, target_type, target_iid, reference_type}]\n- data.events: [{timestamp (ISO 8601), entity_type, entity_iid, project, event_type, summary, actor, url, is_seed, details}]\n- meta: {search_mode: \"lexical\", expansion_depth, expand_mentions, total_entities, total_events, evidence_notes_included, unresolved_references, showing}\n\n### Details object per event type:\n- created: {labels: [...]}\n- note_evidence: {note_id, snippet}\n- state_changed: {state}\n- label_added: {label}\n\n### Rust JSON Structs\n\n```rust\n#[derive(Serialize)]\nstruct TimelineJson {\n ok: bool,\n data: TimelineDataJson,\n meta: TimelineMetaJson,\n}\n\n#[derive(Serialize)]\nstruct TimelineDataJson {\n query: String,\n event_count: usize,\n seed_entities: Vec,\n expanded_entities: Vec,\n unresolved_references: Vec,\n events: Vec,\n}\n\n#[derive(Serialize)]\nstruct EntityJson {\n #[serde(rename = \"type\")]\n entity_type: String,\n iid: i64,\n project: String,\n}\n\n#[derive(Serialize)]\nstruct TimelineMetaJson {\n search_mode: String, // always \"lexical\"\n expansion_depth: u32,\n expand_mentions: bool,\n total_entities: usize,\n total_events: usize, // before limit\n evidence_notes_included: usize,\n unresolved_references: usize,\n showing: usize, // after limit\n}\n```\n\n### source_method values: use CODEBASE values (api/note_parse/description_parse), not spec values\n\n## Acceptance Criteria\n\n- [ ] Valid JSON to stdout\n- [ ] {ok, data, meta} envelope\n- [ ] ISO 8601 timestamps\n- [ ] Entity objects use \"type\" and \"project\" keys per spec\n- [ ] Nested \"via\" object on expanded entities per spec\n- [ ] Events include url and details fields\n- [ ] meta.total_events before limit; meta.showing after limit\n- [ ] source_method uses codebase values\n- [ ] `cargo check --all-targets` passes\n\n## Files\n\n- `src/cli/commands/timeline.rs` (add print_timeline_json + JSON structs)\n- `src/cli/commands/mod.rs` (re-export)\n\n## TDD Loop\n\nVerify: `lore --robot timeline \"test\" | jq '.data.expanded_entities[0].via.from'`\n\n## Edge Cases\n\n- Empty results: events=[], meta.showing=0\n- Null actor/url: serialize as null (not omitted)\n- source_method: use actual DB values, not spec originals","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:33:28.374690Z","created_by":"tayloreernisse","updated_at":"2026-02-06T13:49:12.653118Z","closed_at":"2026-02-06T13:49:12.653067Z","close_reason":"Implemented print_timeline_json_with_meta() robot JSON output in src/cli/commands/timeline.rs with {ok,data,meta} envelope, ISO timestamps, entity/expanded/unresolved JSON structs, event details per type","compaction_level":0,"original_size":0,"labels":["cli","gate-3","phase-b","robot-mode"],"dependencies":[{"issue_id":"bd-dty","depends_on_id":"bd-3as","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-dty","depends_on_id":"bd-ike","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-ef0u","title":"NOTE-2B: SourceType enum extension for notes","description":"## Background\nThe SourceType enum in src/documents/extractor.rs (line 15-19) needs a Note variant for the document pipeline to handle note-type documents.\n\n## Approach\nIn src/documents/extractor.rs:\n1. Add Note variant to SourceType enum (line 15-19, after Discussion):\n pub enum SourceType { Issue, MergeRequest, Discussion, Note }\n\n2. Add match arm to as_str() (line 22-28): Self::Note => \"note\"\n\n3. Add parse aliases (line 30-37): \"note\" | \"notes\" => Some(Self::Note)\n\n4. Display impl (line 40-43) already delegates to as_str() — no change needed.\n\n5. IMPORTANT: Also update seed_dirty() in src/cli/commands/generate_docs.rs (line 66-70) which has a match on SourceType that maps to table names. SourceType::Note should NOT be added to this match — notes are seeded differently (by querying the notes table, not by table name pattern). This is handled by NOTE-2E.\n\n## Files\n- MODIFY: src/documents/extractor.rs (SourceType enum at line 15, as_str at line 22, parse at line 30)\n\n## TDD Anchor\nRED: test_source_type_parse_note — assert SourceType::parse(\"note\") == Some(SourceType::Note)\nGREEN: Add Note variant and match arms.\nVERIFY: cargo test source_type_parse_note -- --nocapture\nTests: test_source_type_note_as_str (assert as_str() == \"note\"), test_source_type_note_display (assert format!(\"{}\", SourceType::Note) == \"note\"), test_source_type_parse_notes_alias (assert parse(\"notes\") works)\n\n## Acceptance Criteria\n- [ ] SourceType::Note variant exists\n- [ ] as_str() returns \"note\"\n- [ ] parse() accepts \"note\", \"notes\" (case-insensitive via to_lowercase)\n- [ ] Display trait works via as_str delegation\n- [ ] No change to seed_dirty() match — that's a separate bead (NOTE-2E)\n- [ ] All 4 tests pass, clippy clean\n- [ ] CRITICAL: regenerate_one() in src/documents/regenerator.rs (line 86-91) has exhaustive match on SourceType — adding Note variant will cause a compile error until NOTE-2D adds the match arm. Either add a temporary todo!() or coordinate with NOTE-2D.\n\n## Dependency Context\n- Depends on NOTE-2A (bd-1oi7): migration 024 must exist so test DBs accept source_type='note' in documents/dirty_sources tables\n\n## Edge Cases\n- Exhaustive match: Adding the variant breaks regenerate_one() (line 86-91) and seed_dirty() (line 66-70) until downstream beads handle it. Agent should add temporary unreachable!() arms with comments referencing the downstream bead IDs.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:45.555568Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:24.004157Z","closed_at":"2026-02-12T18:13:24.004106Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"],"dependencies":[{"issue_id":"bd-ef0u","depends_on_id":"bd-18yh","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-ef0u","depends_on_id":"bd-2ezb","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-epj","title":"[CP0] Config loading with Zod validation","description":"## Background\n\nConfig loading is critical infrastructure - every CLI command needs the config. Uses Zod for schema validation with sensible defaults. Must handle missing files gracefully with typed errors.\n\nReference: docs/prd/checkpoint-0.md sections \"Configuration Schema\", \"Config Resolution Order\"\n\n## Approach\n\n**src/core/config.ts:**\n```typescript\nimport { z } from 'zod';\nimport { readFileSync } from 'node:fs';\nimport { ConfigNotFoundError, ConfigValidationError } from './errors';\nimport { getConfigPath } from './paths';\n\nexport const ConfigSchema = z.object({\n gitlab: z.object({\n baseUrl: z.string().url(),\n tokenEnvVar: z.string().default('GITLAB_TOKEN'),\n }),\n projects: z.array(z.object({\n path: z.string().min(1),\n })).min(1),\n sync: z.object({\n backfillDays: z.number().int().positive().default(14),\n staleLockMinutes: z.number().int().positive().default(10),\n heartbeatIntervalSeconds: z.number().int().positive().default(30),\n cursorRewindSeconds: z.number().int().nonnegative().default(2),\n primaryConcurrency: z.number().int().positive().default(4),\n dependentConcurrency: z.number().int().positive().default(2),\n }).default({}),\n storage: z.object({\n dbPath: z.string().optional(),\n backupDir: z.string().optional(),\n compressRawPayloads: z.boolean().default(true),\n }).default({}),\n embedding: z.object({\n provider: z.literal('ollama').default('ollama'),\n model: z.string().default('nomic-embed-text'),\n baseUrl: z.string().url().default('http://localhost:11434'),\n concurrency: z.number().int().positive().default(4),\n }).default({}),\n});\n\nexport type Config = z.infer;\n\nexport function loadConfig(cliOverride?: string): Config {\n const path = getConfigPath(cliOverride);\n // throws ConfigNotFoundError if missing\n // throws ConfigValidationError if invalid\n}\n```\n\n## Acceptance Criteria\n\n- [ ] `loadConfig()` returns validated Config object\n- [ ] `loadConfig()` throws ConfigNotFoundError if file missing\n- [ ] `loadConfig()` throws ConfigValidationError with Zod errors if invalid\n- [ ] Empty optional fields get default values\n- [ ] projects array must have at least 1 item\n- [ ] gitlab.baseUrl must be valid URL\n- [ ] All number fields must be positive integers\n- [ ] tests/unit/config.test.ts passes (8 tests)\n\n## Files\n\nCREATE:\n- src/core/config.ts\n- tests/unit/config.test.ts\n- tests/fixtures/mock-responses/valid-config.json\n- tests/fixtures/mock-responses/invalid-config.json\n\n## TDD Loop\n\nRED:\n```typescript\n// tests/unit/config.test.ts\ndescribe('Config', () => {\n it('loads config from file path')\n it('throws ConfigNotFoundError if file missing')\n it('throws ConfigValidationError if required fields missing')\n it('validates project paths are non-empty strings')\n it('applies default values for optional fields')\n it('loads from XDG path by default')\n it('respects GI_CONFIG_PATH override')\n it('respects --config flag override')\n})\n```\n\nGREEN: Implement loadConfig() function\n\nVERIFY: `npm run test -- tests/unit/config.test.ts`\n\n## Edge Cases\n\n- JSON parse error should wrap in ConfigValidationError\n- Zod error messages should be human-readable\n- File exists but empty → ConfigValidationError\n- File has extra fields → should pass (Zod strips by default)","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:49.091078Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:04:32.592139Z","closed_at":"2026-01-25T03:04:32.592003Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-epj","depends_on_id":"bd-gg1","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-flwo","title":"Interactive path selection for ambiguous matches (TTY picker)","description":"When a partial file path matches multiple files, show an interactive numbered picker in TTY mode instead of a hard error. In robot mode, return candidates as structured JSON in the error envelope. Use dialoguer crate for selection UI. The path_resolver module already detects ambiguity via SuffixResult::Ambiguous and limits to 11 candidates.","status":"open","priority":3,"issue_type":"feature","created_at":"2026-02-13T16:31:50.005222Z","created_by":"tayloreernisse","updated_at":"2026-02-13T16:31:50.007520Z","compaction_level":0,"original_size":0,"labels":["cli-ux","gate-4"]} {"id":"bd-g0d5","title":"WHO: Verification gate — check, clippy, fmt, EXPLAIN QUERY PLAN","description":"## Background\n\nFinal verification gate before the who epic is considered complete. Confirms code quality, test coverage, and index utilization against real data.\n\n## Approach\n\n### Step 1: Compiler checks\n```bash\ncargo check --all-targets\ncargo clippy --all-targets -- -D warnings\ncargo fmt --check\ncargo test\n```\n\n### Step 2: Manual smoke test (against real DB)\n```bash\ncargo run --release -- who src/features/global-search/\ncargo run --release -- who @asmith\ncargo run --release -- who @asmith --reviews\ncargo run --release -- who --active\ncargo run --release -- who --active --since 30d\ncargo run --release -- who --overlap libs/shared-frontend/src/features/global-search/\ncargo run --release -- who --path README.md\ncargo run --release -- who --path Makefile\ncargo run --release -- -J who src/features/global-search/ # robot mode\ncargo run --release -- -J who @asmith # robot mode\ncargo run --release -- who src/features/global-search/ -p typescript # project scoped\n```\n\n### Step 3: EXPLAIN QUERY PLAN verification\n```bash\n# Expert: should use idx_notes_diffnote_path_created\nsqlite3 ~/.local/share/lore/lore.db \"\n EXPLAIN QUERY PLAN\n SELECT n.author_username, COUNT(*), MAX(n.created_at)\n FROM notes n\n WHERE n.note_type = 'DiffNote' AND n.is_system = 0\n AND n.position_new_path LIKE 'src/features/global-search/%' ESCAPE '\\\\'\n AND n.created_at >= 0\n GROUP BY n.author_username;\"\n\n# Active global: should use idx_discussions_unresolved_recent_global\nsqlite3 ~/.local/share/lore/lore.db \"\n EXPLAIN QUERY PLAN\n SELECT d.id, d.last_note_at FROM discussions d\n WHERE d.resolvable = 1 AND d.resolved = 0 AND d.last_note_at >= 0\n ORDER BY d.last_note_at DESC LIMIT 20;\"\n\n# Active scoped: should use idx_discussions_unresolved_recent\nsqlite3 ~/.local/share/lore/lore.db \"\n EXPLAIN QUERY PLAN\n SELECT d.id, d.last_note_at FROM discussions d\n WHERE d.resolvable = 1 AND d.resolved = 0 AND d.project_id = 1\n AND d.last_note_at >= 0\n ORDER BY d.last_note_at DESC LIMIT 20;\"\n```\n\n## Files\n\nNo files modified — verification only.\n\n## TDD Loop\n\nThis bead is the TDD VERIFY phase for the entire epic. No code written.\nVERIFY: All commands in Steps 1-3 must succeed. Document results.\n\n## Acceptance Criteria\n\n- [ ] cargo check --all-targets: 0 errors\n- [ ] cargo clippy --all-targets -- -D warnings: 0 warnings\n- [ ] cargo fmt --check: no formatting changes needed\n- [ ] cargo test: all tests pass (including 20+ who tests)\n- [ ] Expert EXPLAIN shows idx_notes_diffnote_path_created\n- [ ] Active global EXPLAIN shows idx_discussions_unresolved_recent_global\n- [ ] Active scoped EXPLAIN shows idx_discussions_unresolved_recent\n- [ ] All 5 modes produce reasonable output against real data\n- [ ] Robot mode produces valid JSON for all modes\n\n## Edge Cases\n\n- DB path may differ from ~/.local/share/lore/lore.db — check config with `lore -J doctor` first to get actual db_path\n- EXPLAIN QUERY PLAN output format varies by SQLite version — look for the index name in any output column, not an exact string match\n- If the DB has not been synced recently, smoke tests may return empty results — run `lore sync` first if needed\n- Project name \"typescript\" in the -p flag may not exist — use an actual project from `lore -J status` output\n- The real DB may not have migration 017 yet — run `cargo run --release -- migrate` first if the who command fails with a missing index error\n- clippy::pedantic + clippy::nursery are enabled — common issues: arrays vs vec![] for sorted collections, too_many_arguments on test helpers (use #[allow])","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-08T02:41:42.642988Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.606672Z","closed_at":"2026-02-08T04:10:29.606631Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-g0d5","depends_on_id":"bd-tfh3","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-g0d5","depends_on_id":"bd-zibc","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-gba","title":"OBSERV: Add tracing-appender dependency to Cargo.toml","description":"## Background\ntracing-appender provides non-blocking, daily-rotating file writes for the tracing ecosystem. It's the canonical solution used by tokio-rs projects. We need it for the file logging layer (Phase 1) that writes JSON logs to ~/.local/share/lore/logs/.\n\n## Approach\nAdd tracing-appender to [dependencies] in Cargo.toml (line ~54, after the existing tracing-subscriber entry):\n\n```toml\ntracing-appender = \"0.2\"\n```\n\nAlso add the \"json\" feature to tracing-subscriber since the file layer and --log-format json both need it:\n\n```toml\ntracing-subscriber = { version = \"0.3\", features = [\"env-filter\", \"json\"] }\n```\n\nCurrent tracing deps (Cargo.toml lines 53-54):\n tracing = \"0.1\"\n tracing-subscriber = { version = \"0.3\", features = [\"env-filter\"] }\n\n## Acceptance Criteria\n- [ ] cargo check --all-targets succeeds with tracing-appender available\n- [ ] tracing_appender::rolling::daily() is importable\n- [ ] tracing-subscriber json feature is available (fmt::layer().json() compiles)\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- Cargo.toml (modify lines 53-54 region)\n\n## TDD Loop\nRED: Not applicable (dependency addition)\nGREEN: Add deps, run cargo check\nVERIFY: cargo check --all-targets && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- Ensure tracing-appender 0.2 is compatible with tracing-subscriber 0.3 (both from tokio-rs/tracing monorepo, always compatible)\n- The \"json\" feature on tracing-subscriber pulls in serde_json, which is already a dependency","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:53:55.364100Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:10:22.520471Z","closed_at":"2026-02-04T17:10:22.520423Z","close_reason":"Added tracing-appender 0.2 and json feature to tracing-subscriber","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-gba","depends_on_id":"bd-2nx","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-gcnx","title":"NOTE-TEST: Test bead","description":"type: task","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:58:40.129030Z","updated_at":"2026-02-12T16:58:47.794167Z","closed_at":"2026-02-12T16:58:47.794116Z","close_reason":"test","compaction_level":0,"original_size":0} {"id":"bd-gg1","title":"[CP0] Core utilities - paths, time, errors, logger","description":"## Background\n\nCore utilities provide the foundation for all other modules. Path resolution enables XDG-compliant config/data locations. Time utilities ensure consistent timestamp handling (ms epoch for DB, ISO for API). Error classes provide typed exceptions for clean error handling. Logger provides structured logging to stderr.\n\nReference: docs/prd/checkpoint-0.md sections \"Config + Data Locations\", \"Timestamp Convention\", \"Error Classes\", \"Logging Configuration\"\n\n## Approach\n\n**src/core/paths.ts:**\n- `getConfigPath(cliOverride?)`: resolution order is CLI flag → GI_CONFIG_PATH env → XDG default → local fallback\n- `getDataDir()`: uses XDG_DATA_HOME or ~/.local/share/gi\n- `getDbPath(configOverride?)`: returns data dir + data.db\n- `getBackupDir(configOverride?)`: returns data dir + backups/\n\n**src/core/time.ts:**\n- `isoToMs(isoString)`: converts GitLab API ISO 8601 → ms epoch\n- `msToIso(ms)`: converts ms epoch → ISO 8601\n- `nowMs()`: returns Date.now() for DB storage\n\n**src/core/errors.ts:**\nError hierarchy (all extend GiError base class with code and cause):\n- ConfigNotFoundError, ConfigValidationError\n- GitLabAuthError, GitLabNotFoundError, GitLabRateLimitError, GitLabNetworkError\n- DatabaseLockError, MigrationError\n- TokenNotSetError\n\n**src/core/logger.ts:**\n- pino logger to stderr (fd 2) with pino-pretty in dev\n- Child loggers: dbLogger, gitlabLogger, configLogger\n- LOG_LEVEL env var support (default: info)\n\n## Acceptance Criteria\n\n- [ ] `getConfigPath()` returns ~/.config/gi/config.json when no overrides\n- [ ] `getConfigPath()` respects GI_CONFIG_PATH env var\n- [ ] `getConfigPath(\"./custom.json\")` returns \"./custom.json\"\n- [ ] `isoToMs(\"2024-01-27T00:00:00.000Z\")` returns 1706313600000\n- [ ] `msToIso(1706313600000)` returns \"2024-01-27T00:00:00.000Z\"\n- [ ] All error classes have correct code property\n- [ ] Logger outputs to stderr (not stdout)\n- [ ] tests/unit/paths.test.ts passes\n- [ ] tests/unit/errors.test.ts passes\n\n## Files\n\nCREATE:\n- src/core/paths.ts\n- src/core/time.ts\n- src/core/errors.ts\n- src/core/logger.ts\n- tests/unit/paths.test.ts\n- tests/unit/errors.test.ts\n\n## TDD Loop\n\nRED: Write tests first\n```typescript\n// tests/unit/paths.test.ts\ndescribe('getConfigPath', () => {\n it('uses XDG_CONFIG_HOME if set')\n it('falls back to ~/.config/gi if XDG not set')\n it('prefers --config flag over environment')\n it('prefers environment over XDG default')\n it('falls back to local gi.config.json in dev')\n})\n```\n\nGREEN: Implement paths.ts, errors.ts, time.ts, logger.ts\n\nVERIFY: `npm run test -- tests/unit/paths.test.ts tests/unit/errors.test.ts`\n\n## Edge Cases\n\n- XDG_CONFIG_HOME may not exist - don't create, just return path\n- existsSync() check for local fallback - only return if file exists\n- Time conversion must handle timezone edge cases - always use UTC\n- Logger must work even if pino-pretty not installed (production)","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:48.604382Z","created_by":"tayloreernisse","updated_at":"2026-01-25T02:53:26.527997Z","closed_at":"2026-01-25T02:53:26.527862Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-gg1","depends_on_id":"bd-327","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-hbo","title":"[CP1] Discussion ingestion module","description":"## Background\n\nDiscussion ingestion fetches all discussions and notes for a single issue. It is called as part of dependent sync - only for issues whose `updated_at` has advanced beyond `discussions_synced_for_updated_at`. After successful sync, it updates the watermark to prevent redundant refetches.\n\n## Approach\n\n### Module: src/ingestion/discussions.rs\n\n### Key Structs\n\n```rust\n#[derive(Debug, Default)]\npub struct IngestDiscussionsResult {\n pub discussions_fetched: usize,\n pub discussions_upserted: usize,\n pub notes_upserted: usize,\n pub system_notes_count: usize,\n}\n```\n\n### Main Function\n\n```rust\npub async fn ingest_issue_discussions(\n conn: &Connection,\n client: &GitLabClient,\n config: &Config,\n project_id: i64, // Local DB project ID\n gitlab_project_id: i64, // GitLab project ID\n issue_iid: i64,\n local_issue_id: i64,\n issue_updated_at: i64, // For watermark update\n) -> Result\n```\n\n### Logic\n\n1. Stream discussions via `client.paginate_issue_discussions()`\n2. For each discussion:\n - Begin transaction\n - Store raw payload (compressed based on config)\n - Transform to NormalizedDiscussion\n - Upsert discussion\n - Get local discussion ID\n - Transform notes via `transform_notes()`\n - For each note: store raw payload, upsert note\n - Track system_notes_count\n - Commit transaction\n3. After all discussions processed: `mark_discussions_synced(conn, local_issue_id, issue_updated_at)`\n\n### Helper Functions\n\n```rust\nfn upsert_discussion(conn, discussion, payload_id) -> Result<()>\nfn get_local_discussion_id(conn, project_id, gitlab_id) -> Result\nfn upsert_note(conn, discussion_id, note, payload_id) -> Result<()>\nfn mark_discussions_synced(conn, issue_id, issue_updated_at) -> Result<()>\n```\n\n### Critical Invariant\n\n`discussions_synced_for_updated_at` MUST be updated only AFTER all discussions are successfully synced. This watermark prevents redundant refetches on subsequent runs.\n\n## Acceptance Criteria\n\n- [ ] `ingest_issue_discussions` streams all discussions for an issue\n- [ ] Each discussion wrapped in transaction for atomicity\n- [ ] Raw payloads stored for discussions and notes\n- [ ] `discussions_synced_for_updated_at` updated after successful sync\n- [ ] System notes tracked in result.system_notes_count\n- [ ] Notes linked to correct discussion via local discussion ID\n\n## Files\n\n- src/ingestion/mod.rs (add `pub mod discussions;`)\n- src/ingestion/discussions.rs (create)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/discussion_watermark_tests.rs\n#[tokio::test] async fn fetches_discussions_when_updated_at_advanced()\n#[tokio::test] async fn updates_watermark_after_successful_discussion_sync()\n#[tokio::test] async fn does_not_update_watermark_on_discussion_sync_failure()\n#[tokio::test] async fn stores_raw_payload_for_each_discussion()\n#[tokio::test] async fn stores_raw_payload_for_each_note()\n```\n\nGREEN: Implement ingest_issue_discussions with watermark logic\n\nVERIFY: `cargo test discussion_watermark`\n\n## Edge Cases\n\n- Issue with 0 discussions - mark synced anyway (empty is valid)\n- Discussion with 0 notes - should not happen per GitLab API (discussions always have >= 1 note)\n- Network failure mid-sync - watermark NOT updated, next run retries\n- individual_note=true discussions - have exactly 1 note","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.267582Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:52:47.500700Z","closed_at":"2026-01-25T22:52:47.500644Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-hbo","depends_on_id":"bd-1qf","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-hbo","depends_on_id":"bd-2iq","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-hbo","depends_on_id":"bd-xhz","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-hrs","title":"Create migration 007_documents.sql","description":"## Background\nMigration 007 creates the document storage layer that Gate A's entire search pipeline depends on. It introduces 5 tables: `documents` (the searchable unit), `document_labels` and `document_paths` (for filtered search), and two queue tables (`dirty_sources`, `pending_discussion_fetches`) that drive incremental document regeneration and discussion fetching in Gate C. This is the most-depended-on bead in the project (6 downstream beads block on it).\n\n## Approach\nCreate `migrations/007_documents.sql` with the exact SQL from PRD Section 1.1. The schema is fully specified in the PRD — no design decisions remain.\n\nKey implementation details:\n- `documents` table has `UNIQUE(source_type, source_id)` constraint for upsert support\n- `document_labels` and `document_paths` use `WITHOUT ROWID` for compact storage\n- `dirty_sources` uses composite PK `(source_type, source_id)` with `ON CONFLICT` upsert semantics\n- `pending_discussion_fetches` uses composite PK `(project_id, noteable_type, noteable_iid)`\n- Both queue tables have `next_attempt_at` indexed for efficient backoff queries\n- `labels_hash` and `paths_hash` on documents enable write optimization (skip unchanged labels/paths)\n\nRegister the migration in `src/core/db.rs` by adding entry 7 to the `MIGRATIONS` array.\n\n## Acceptance Criteria\n- [ ] `migrations/007_documents.sql` file exists with all 5 CREATE TABLE statements\n- [ ] Migration applies cleanly on fresh DB (`cargo test migration_tests`)\n- [ ] Migration applies cleanly after CP2 schema (migrations 001-006 already applied)\n- [ ] All foreign keys enforced: `documents.project_id -> projects(id)`, `document_labels.document_id -> documents(id) ON DELETE CASCADE`, `document_paths.document_id -> documents(id) ON DELETE CASCADE`, `pending_discussion_fetches.project_id -> projects(id)`\n- [ ] All indexes created: `idx_documents_project_updated`, `idx_documents_author`, `idx_documents_source`, `idx_documents_hash`, `idx_document_labels_label`, `idx_document_paths_path`, `idx_dirty_sources_next_attempt`, `idx_pending_discussions_next_attempt`\n- [ ] `labels_hash TEXT NOT NULL DEFAULT ''` and `paths_hash TEXT NOT NULL DEFAULT ''` columns present on `documents`\n- [ ] Schema version 7 recorded in `schema_version` table\n- [ ] `cargo build` succeeds after registering migration in db.rs\n\n## Files\n- `migrations/007_documents.sql` — new file (copy exact SQL from PRD Section 1.1)\n- `src/core/db.rs` — add migration 7 to `MIGRATIONS` array\n\n## TDD Loop\nRED: Add migration to db.rs, run `cargo test migration_tests` — fails because SQL file missing\nGREEN: Create `migrations/007_documents.sql` with full schema\nVERIFY: `cargo test migration_tests && cargo build`\n\n## Edge Cases\n- Migration must be idempotent-safe if applied twice (INSERT into schema_version will fail on second run — this is expected and handled by the migration runner's version check)\n- `WITHOUT ROWID` tables (document_labels, document_paths) require explicit PK — already defined\n- `CHECK` constraint on `documents.source_type` must match exactly: `'issue','merge_request','discussion'`\n- `CHECK` constraint on `documents.truncated_reason` allows NULL or one of 4 specific values","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:25:25.734380Z","created_by":"tayloreernisse","updated_at":"2026-01-30T16:54:12.854351Z","closed_at":"2026-01-30T16:54:12.854149Z","close_reason":"Completed: migration 007_documents.sql with 5 tables (documents, document_labels, document_paths, dirty_sources, pending_discussion_fetches), 8 indexes, registered in db.rs, cargo build + migration tests pass","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-hrs","depends_on_id":"bd-3lc","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-hs6j","title":"Implement run_generate_docs_for_sources scoped doc regeneration","description":"## Background\n\nCurrently `regenerate_dirty_documents()` in `src/documents/regenerator.rs` processes ALL entries in the `dirty_sources` table. The surgical sync pipeline needs a scoped variant that only regenerates documents for specific `(source_type, source_id)` pairs — the ones produced by the surgical ingest step.\n\nThe dirty_sources table schema: `(source_type TEXT, source_id INTEGER)` primary key, where source_type is one of `'issue'`, `'merge_request'`, `'discussion'`, `'note'`. After `ingest_issue_by_iid` or `ingest_mr_by_iid` calls `mark_dirty()`, these rows exist in dirty_sources with the matching keys.\n\nThe existing `regenerate_one(conn, source_type, source_id, cache)` private function does the actual work for a single source. The scoped function can call it directly for each provided key, without going through `get_dirty_sources()` which pulls from the full table.\n\nKey requirement: the function must return the `document_id` values of regenerated documents so the scoped embedding step (bd-1elx) can process only those documents.\n\n## Approach\n\nAdd `regenerate_dirty_documents_for_sources()` to `src/documents/regenerator.rs`:\n\n```rust\npub struct RegenerateForSourcesResult {\n pub regenerated: usize,\n pub unchanged: usize,\n pub errored: usize,\n pub document_ids: Vec, // IDs of regenerated docs for scoped embedding\n}\n\npub fn regenerate_dirty_documents_for_sources(\n conn: &Connection,\n source_keys: &[(SourceType, i64)],\n) -> Result\n```\n\nImplementation:\n1. Create a `ParentMetadataCache` (same as bulk path).\n2. Iterate over provided `source_keys`.\n3. For each key, call `regenerate_one(conn, source_type, source_id, &mut cache)`.\n4. On success (changed=true): call `clear_dirty()`, query `documents` table for the document_id by `(source_type, source_id)`, push to `document_ids` vec.\n5. On success (changed=false): call `clear_dirty()`, still query for document_id (content unchanged but may need re-embedding if model changed).\n6. On error: call `record_dirty_error()`, increment errored count.\n\nAlso export from `src/documents/mod.rs`: `pub use regenerator::{RegenerateForSourcesResult, regenerate_dirty_documents_for_sources};`\n\n## Acceptance Criteria\n\n- [ ] `regenerate_dirty_documents_for_sources` only processes the provided source_keys, not all dirty_sources\n- [ ] Returns `document_ids` for all successfully processed documents (both regenerated and unchanged)\n- [ ] Clears dirty_sources entries for successfully processed sources\n- [ ] Records errors for failed sources without aborting the batch\n- [ ] Exported from `src/documents/mod.rs`\n- [ ] Existing `regenerate_dirty_documents` is unchanged (no regression)\n\n## Files\n\n- `src/documents/regenerator.rs` (add new function + result struct)\n- `src/documents/mod.rs` (export new function + struct)\n\n## TDD Anchor\n\nTests in `src/documents/regenerator_tests.rs` (add to existing test file):\n\n```rust\n#[test]\nfn test_scoped_regen_only_processes_specified_sources() {\n let conn = setup_test_db();\n // Insert 2 issues with dirty markers\n insert_test_issue(&conn, 1, \"Issue A\");\n insert_test_issue(&conn, 2, \"Issue B\");\n mark_dirty(&conn, SourceType::Issue, 1).unwrap();\n mark_dirty(&conn, SourceType::Issue, 2).unwrap();\n\n // Regenerate only issue 1\n let result = regenerate_dirty_documents_for_sources(\n &conn,\n &[(SourceType::Issue, 1)],\n ).unwrap();\n\n assert!(result.regenerated >= 1 || result.unchanged >= 1);\n // Issue 1 dirty cleared, issue 2 still dirty\n let remaining = get_dirty_sources(&conn).unwrap();\n assert_eq!(remaining.len(), 1);\n assert_eq!(remaining[0], (SourceType::Issue, 2));\n}\n\n#[test]\nfn test_scoped_regen_returns_document_ids() {\n let conn = setup_test_db();\n insert_test_issue(&conn, 1, \"Issue A\");\n mark_dirty(&conn, SourceType::Issue, 1).unwrap();\n\n let result = regenerate_dirty_documents_for_sources(\n &conn,\n &[(SourceType::Issue, 1)],\n ).unwrap();\n\n assert!(!result.document_ids.is_empty());\n // Verify document_id exists in documents table\n let exists: bool = conn.query_row(\n \"SELECT EXISTS(SELECT 1 FROM documents WHERE id = ?1)\",\n [result.document_ids[0]], |r| r.get(0),\n ).unwrap();\n assert!(exists);\n}\n\n#[test]\nfn test_scoped_regen_handles_missing_source() {\n let conn = setup_test_db();\n // Source key not in dirty_sources, regenerate_one will fail or return None\n let result = regenerate_dirty_documents_for_sources(\n &conn,\n &[(SourceType::Issue, 9999)],\n ).unwrap();\n // Should handle gracefully: either errored=1 or unchanged with no doc_id\n assert_eq!(result.document_ids.len(), 0);\n}\n```\n\n## Edge Cases\n\n- Source key exists in dirty_sources but the underlying entity was deleted: `regenerate_one` returns `None` from the extractor, calls `delete_document`, returns `Ok(true)`. No document_id to return.\n- Source key not in dirty_sources at all (already cleared by concurrent process): `regenerate_one` still works (it reads from the entity tables, not dirty_sources). But `clear_dirty` is a no-op DELETE.\n- Same source_key appears twice in the input slice: second call is idempotent (dirty already cleared, doc already up to date).\n- `unchanged` documents: content_hash matches, but we still need the document_id for embedding (model version may have changed). Include in `document_ids`.\n- Error in one source must not abort processing of remaining sources.\n\n## Dependency Context\n\n- **No blockers**: Uses only existing functions (`regenerate_one`, `clear_dirty`, `record_dirty_error`) which are all private to the regenerator module. New function lives in same module.\n- **Blocks bd-1i4i**: Orchestration function calls this after surgical ingest to get document_ids for scoped embedding.\n- **Feeds bd-1elx**: `document_ids` output is the input for `run_embed_for_document_ids`.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:16:14.014030Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:04:33.913166Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-hs6j","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-hu3","title":"Write migration 011: resource event tables, entity_references, and dependent fetch queue","description":"## Background\nPhase B needs three new event tables and a generic dependent fetch queue to power temporal queries (timeline, file-history, trace). These tables store structured event data from GitLab Resource Events APIs, replacing fragile system note parsing for state/label/milestone changes.\n\nMigration 010_chunk_config.sql already exists, so Phase B starts at migration 011.\n\n## Approach\nCreate migrations/011_resource_events.sql with the exact schema from the Phase B spec (§1.2 + §2.2):\n\n**Event tables:**\n- resource_state_events: state changes (opened/closed/reopened/merged/locked) with source_merge_request_id for \"closed by MR\" linking\n- resource_label_events: label add/remove with label_name\n- resource_milestone_events: milestone add/remove with milestone_title + milestone_id\n\n**Cross-reference table (Gate 2):**\n- entity_references: source/target entity pairs with reference_type (closes/mentioned/related), source_method provenance, and unresolved reference support (target_entity_id NULL with target_project_path + target_entity_iid)\n\n**Dependent fetch queue:**\n- pending_dependent_fetches: generic job queue with job_type IN ('resource_events', 'mr_closes_issues', 'mr_diffs'), locked_at crash recovery, exponential backoff via attempts + next_retry_at\n\n**All tables must have:**\n- CHECK constraints for entity exclusivity (issue XOR merge_request) on event tables\n- UNIQUE constraints (gitlab_id + project_id for events, composite for queue, multi-column for references)\n- Partial indexes (WHERE issue_id IS NOT NULL, WHERE target_entity_id IS NULL, etc.)\n- CASCADE deletes on project_id and entity FKs\n\nRegister in src/core/db.rs MIGRATIONS array:\n```rust\n(\"011\", include_str!(\"../../migrations/011_resource_events.sql\")),\n```\n\nEnd migration with:\n```sql\nINSERT INTO schema_version (version, applied_at, description)\nVALUES (11, strftime('%s', 'now') * 1000, 'Resource events, entity references, and dependent fetch queue');\n```\n\n## Acceptance Criteria\n- [ ] migrations/011_resource_events.sql exists with all 4 tables + indexes + constraints\n- [ ] src/core/db.rs MIGRATIONS array includes (\"011\", include_str!(...))\n- [ ] `cargo build` succeeds (migration SQL compiles into binary)\n- [ ] `cargo test migration` passes (migration applies cleanly on fresh DB)\n- [ ] All CHECK constraints enforced (issue XOR merge_request on event tables)\n- [ ] All UNIQUE constraints present (prevents duplicate events/refs/jobs)\n- [ ] entity_references UNIQUE handles NULL coalescing correctly\n- [ ] pending_dependent_fetches job_type CHECK includes all three types\n\n## Files\n- migrations/011_resource_events.sql (new)\n- src/core/db.rs (add to MIGRATIONS array, line ~46)\n\n## TDD Loop\nRED: Add test in tests/migration_tests.rs:\n- `test_migration_011_creates_event_tables` - verify all 4 tables exist after migration\n- `test_migration_011_entity_exclusivity_constraint` - verify CHECK rejects both NULL and both non-NULL for issue_id/merge_request_id\n- `test_migration_011_event_dedup` - verify UNIQUE(gitlab_id, project_id) rejects duplicate events\n- `test_migration_011_entity_references_dedup` - verify UNIQUE constraint with NULL coalescing\n- `test_migration_011_queue_dedup` - verify UNIQUE(project_id, entity_type, entity_iid, job_type)\n\nGREEN: Write the migration SQL + register in db.rs\n\nVERIFY: `cargo test migration_tests -- --nocapture`\n\n## Edge Cases\n- entity_references UNIQUE uses COALESCE for NULLable columns — test with both resolved and unresolved refs\n- pending_dependent_fetches job_type CHECK — ensure 'mr_diffs' is included (Gate 4 needs it)\n- SQLite doesn't enforce CHECK on INSERT OR REPLACE — verify constraint behavior\n- The entity exclusivity CHECK must allow exactly one of issue_id/merge_request_id to be non-NULL\n- Verify CASCADE deletes work (delete project → all events/refs/jobs deleted)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:31:23.933894Z","created_by":"tayloreernisse","updated_at":"2026-02-03T16:06:28.918228Z","closed_at":"2026-02-03T16:06:28.917906Z","close_reason":"Already completed in prior session, re-closing after accidental reopen","compaction_level":0,"original_size":0,"labels":["gate-1","phase-b","schema"],"dependencies":[{"issue_id":"bd-hu3","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-iba","title":"Add GitLab client MR pagination methods","description":"## Background\nGitLab client pagination for merge requests and discussions. Must support robust pagination with fallback chain because some GitLab instances/proxies strip headers.\n\n## Approach\nAdd to existing `src/gitlab/client.rs`:\n1. `MergeRequestPage` struct - Items + pagination metadata\n2. `parse_link_header_next()` - RFC 8288 Link header parsing\n3. `fetch_merge_requests_page()` - Single page fetch with metadata\n4. `paginate_merge_requests()` - Async stream for all MRs\n5. `paginate_mr_discussions()` - Async stream for MR discussions\n\n## Files\n- `src/gitlab/client.rs` - Add pagination methods\n\n## Acceptance Criteria\n- [ ] `MergeRequestPage` struct exists with `items`, `next_page`, `is_last_page`\n- [ ] `parse_link_header_next()` extracts `rel=\"next\"` URL from Link header\n- [ ] Pagination fallback chain: Link header > x-next-page > full-page heuristic\n- [ ] `paginate_merge_requests()` returns `Pin>>>`\n- [ ] `paginate_mr_discussions()` returns `Pin>>>`\n- [ ] MR endpoint uses `scope=all&state=all` to include all MRs\n- [ ] `cargo test client` passes\n\n## TDD Loop\nRED: `cargo test fetch_merge_requests` -> method not found\nGREEN: Add pagination methods\nVERIFY: `cargo test client`\n\n## Struct Definitions\n```rust\n#[derive(Debug)]\npub struct MergeRequestPage {\n pub items: Vec,\n pub next_page: Option,\n pub is_last_page: bool,\n}\n```\n\n## Link Header Parsing (RFC 8288)\n```rust\n/// Parse Link header to extract rel=\"next\" URL.\nfn parse_link_header_next(headers: &reqwest::header::HeaderMap) -> Option {\n headers\n .get(\"link\")\n .and_then(|v| v.to_str().ok())\n .and_then(|link_str| {\n // Format: ; rel=\"next\", ; rel=\"last\"\n for part in link_str.split(',') {\n let part = part.trim();\n if part.contains(\"rel=\\\"next\\\"\") || part.contains(\"rel=next\") {\n if let Some(start) = part.find('<') {\n if let Some(end) = part.find('>') {\n return Some(part[start + 1..end].to_string());\n }\n }\n }\n }\n None\n })\n}\n```\n\n## Pagination Fallback Chain\n```rust\nlet next_page = match (link_next, x_next_page, items.len() as u32 == per_page) {\n (Some(_), _, _) => Some(page + 1), // Link header present: continue\n (None, Some(np), _) => Some(np), // x-next-page present: use it\n (None, None, true) => Some(page + 1), // Full page, no headers: try next\n (None, None, false) => None, // Partial page: we're done\n};\n```\n\n## Fetch Single Page\n```rust\npub async fn fetch_merge_requests_page(\n &self,\n gitlab_project_id: i64,\n updated_after: Option,\n cursor_rewind_seconds: u32,\n page: u32,\n per_page: u32,\n) -> Result {\n let mut params = vec![\n (\"scope\", \"all\".to_string()),\n (\"state\", \"all\".to_string()),\n (\"order_by\", \"updated_at\".to_string()),\n (\"sort\", \"asc\".to_string()),\n (\"per_page\", per_page.to_string()),\n (\"page\", page.to_string()),\n ];\n // Apply cursor rewind for safety\n // ...\n}\n```\n\n## Async Stream Pattern\n```rust\npub fn paginate_merge_requests(\n &self,\n gitlab_project_id: i64,\n updated_after: Option,\n cursor_rewind_seconds: u32,\n) -> Pin> + Send + '_>> {\n Box::pin(async_stream::try_stream! {\n let mut page = 1u32;\n let per_page = 100u32;\n loop {\n let page_result = self.fetch_merge_requests_page(...).await?;\n for mr in page_result.items {\n yield mr;\n }\n if page_result.is_last_page {\n break;\n }\n match page_result.next_page {\n Some(np) => page = np,\n None => break,\n }\n }\n })\n}\n```\n\n## Edge Cases\n- `scope=all` required to include all MRs (not just authored by current user)\n- `state=all` required to include merged/closed (GitLab defaults may exclude)\n- `locked` state cannot be filtered server-side (use local SQL filtering)\n- Cursor rewind should clamp to 0 to avoid negative timestamps","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:41.633065Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:13:05.613625Z","closed_at":"2026-01-27T00:13:05.613440Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-iba","depends_on_id":"bd-5ta","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-ike","title":"Epic: Gate 3 - Decision Timeline (lore timeline)","description":"## Background\n\nGate 3 is the first user-facing temporal feature: `lore timeline `. It answers \"What happened with X?\" by finding matching entities via FTS5, expanding cross-references, collecting all temporal events, and rendering a chronological narrative.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Gate 3 (Sections 3.1-3.6).\n\n## Prerequisites (All Complete)\n\n- Gates 1-2 COMPLETE: resource_state_events, resource_label_events, resource_milestone_events, entity_references all populated\n- FTS5 search index (CP3): working search infrastructure for keyword matching\n- Migration 015 (commit SHAs, closes watermark) exists on disk (registered by bd-1oo)\n\n## Architecture — 5-Stage Pipeline\n\n```\n1. SEED: FTS5 keyword search -> matched document IDs (issues, MRs, notes)\n2. HYDRATE: Map document IDs -> source entities + top matched notes as evidence\n3. EXPAND: BFS over entity_references (depth-limited, edge-type filtered)\n4. COLLECT: Gather events from all tables for seed + expanded entities\n5. RENDER: Sort chronologically, format as human or robot output\n```\n\nNo new tables required. All reads are from existing tables at query time.\n\n## Children (Execution Order)\n\n1. **bd-20e** — Define TimelineEvent model and TimelineEventType enum (types first)\n2. **bd-32q** — Implement timeline seed phase: FTS5 keyword search to entity IDs\n3. **bd-ypa** — Implement timeline expand phase: BFS cross-reference expansion\n4. **bd-3as** — Implement timeline event collection and chronological interleaving\n5. **bd-1nf** — Register lore timeline command with all flags (CLI wiring)\n6. **bd-2f2** — Implement timeline human output renderer\n7. **bd-dty** — Implement timeline robot mode JSON output\n\n## Gate Completion Criteria\n\n- [ ] `lore timeline ` returns chronologically ordered events\n- [ ] Seed entities found via FTS5 keyword search (issues, MRs, and notes)\n- [ ] State, label, and milestone events interleaved from resource event tables\n- [ ] Entity creation and merge events included\n- [ ] Evidence-bearing notes included as note_evidence events (top FTS5 matches, bounded default 10)\n- [ ] Cross-reference expansion follows entity_references to configurable depth\n- [ ] Default: follows closes + related edges; --expand-mentions adds mentioned\n- [ ] --depth 0 disables expansion\n- [ ] --since filters by event timestamp\n- [ ] -p scopes to project\n- [ ] Human output is colored and readable\n- [ ] Robot mode returns structured JSON with expansion provenance\n- [ ] Unresolved (external) references included in JSON output\n","status":"closed","priority":1,"issue_type":"feature","created_at":"2026-02-02T21:31:01.036474Z","created_by":"tayloreernisse","updated_at":"2026-02-06T13:49:21.285350Z","closed_at":"2026-02-06T13:49:21.285302Z","close_reason":"Gate 3 complete: all 7 children closed. Timeline pipeline fully implemented with SEED->HYDRATE->EXPAND->COLLECT->RENDER stages, human+robot renderers, CLI wiring with 9 flags, robot-docs manifest entry","compaction_level":0,"original_size":0,"labels":["epic","gate-3","phase-b"],"dependencies":[{"issue_id":"bd-ike","depends_on_id":"bd-1se","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-ike","depends_on_id":"bd-2zl","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} @@ -279,6 +295,7 @@ {"id":"bd-jec","title":"Add fetchMrFileChanges config flag","description":"## Background\n\nConfig flag controlling whether MR diff fetching is enabled, following the fetchResourceEvents pattern.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 4.2.\n\n## Codebase Context\n\n- src/core/config.rs has SyncConfig with fetch_resource_events: bool (serde rename 'fetchResourceEvents', default true)\n- Default impl exists for SyncConfig\n- CLI sync options in src/cli/mod.rs have --no-events flag pattern\n- Orchestrator checks config.sync.fetch_resource_events before enqueuing resource_events jobs\n\n## Approach\n\n### 1. Add to SyncConfig (`src/core/config.rs`):\n```rust\n#[serde(rename = \"fetchMrFileChanges\", default = \"default_true\")]\npub fetch_mr_file_changes: bool,\n```\n\nUpdate Default impl to include fetch_mr_file_changes: true.\n\n### 2. CLI override (`src/cli/mod.rs`):\n```rust\n#[arg(long = \"no-file-changes\")]\npub no_file_changes: bool,\n```\n\n### 3. Apply in main.rs:\n```rust\nif args.no_file_changes { config.sync.fetch_mr_file_changes = false; }\n```\n\n### 4. Guard in orchestrator:\n```rust\nif config.sync.fetch_mr_file_changes { enqueue mr_diffs jobs }\n```\n\n## Acceptance Criteria\n\n- [ ] fetchMrFileChanges in SyncConfig, default true\n- [ ] Config without field defaults to true\n- [ ] --no-file-changes disables diff fetching\n- [ ] Orchestrator skips mr_diffs when false\n- [ ] `cargo check --all-targets` passes\n\n## Files\n\n- `src/core/config.rs` (add field + Default)\n- `src/cli/mod.rs` (add --no-file-changes)\n- `src/main.rs` (apply override)\n- `src/ingestion/orchestrator.rs` (guard enqueue)\n\n## TDD Loop\n\nRED:\n- `test_config_default_fetch_mr_file_changes` - default is true\n- `test_config_deserialize_false` - JSON with false\n\nGREEN: Add field, default, serde attribute.\n\nVERIFY: `cargo test --lib -- config`\n\n## Edge Cases\n\n- Config missing fetchMrFileChanges key entirely: serde default_true fills in true\n- Config explicitly set to false: no mr_diffs jobs enqueued, mr_file_changes table empty\n- --no-file-changes with --full sync: overrides config, no diffs fetched even on full resync\n- sync.fetchMrFileChanges = false in config + no --no-file-changes flag: respects config (no override)","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-02T21:34:08.892666Z","created_by":"tayloreernisse","updated_at":"2026-02-08T18:18:36.409511Z","closed_at":"2026-02-08T18:18:36.409467Z","close_reason":"Added fetch_mr_file_changes to SyncConfig (default true, serde rename fetchMrFileChanges), --no-file-changes CLI flag in SyncArgs, override in main.rs. Orchestrator guard deferred to bd-2yo which implements the actual drain.","compaction_level":0,"original_size":0,"labels":["config","gate-4","phase-b"],"dependencies":[{"issue_id":"bd-jec","depends_on_id":"bd-14q","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-jov","title":"[CP1] Discussion and note transformers","description":"Transform GitLab discussion/note payloads to normalized database schema.\n\n## Module\nsrc/gitlab/transformers/discussion.rs\n\n## Structs\n\n### NormalizedDiscussion\n- gitlab_discussion_id: String\n- project_id: i64\n- issue_id: i64\n- noteable_type: String (\"Issue\")\n- individual_note: bool\n- first_note_at, last_note_at: Option\n- last_seen_at: i64\n- resolvable, resolved: bool\n\n### NormalizedNote\n- gitlab_id: i64\n- project_id: i64\n- note_type: Option\n- is_system: bool\n- author_username: String\n- body: String\n- created_at, updated_at, last_seen_at: i64\n- position: i32 (array index in notes[])\n- resolvable, resolved: bool\n- resolved_by: Option\n- resolved_at: Option\n\n## Functions\n\n### transform_discussion(gitlab_discussion, local_project_id, local_issue_id) -> NormalizedDiscussion\n- Compute first_note_at/last_note_at from notes array min/max created_at\n- Compute resolvable (any note resolvable)\n- Compute resolved (resolvable AND all resolvable notes resolved)\n\n### transform_notes(gitlab_discussion, local_project_id) -> Vec\n- Enumerate notes to get position (array index)\n- Set is_system from note.system\n- Convert timestamps to ms epoch\n\nFiles: src/gitlab/transformers/discussion.rs\nTests: tests/discussion_transformer_tests.rs\nDone when: Unit tests pass for discussion/note transformation with system note flagging","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:43:04.481361Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.759691Z","closed_at":"2026-01-25T17:02:01.759691Z","deleted_at":"2026-01-25T17:02:01.759684Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-k7b","title":"[CP1] gi show issue command","description":"Show issue details with discussions.\n\n## Module\nsrc/cli/commands/show.rs\n\n## Clap Definition\nShow {\n #[arg(value_parser = [\"issue\", \"mr\"])]\n entity: String,\n \n iid: i64,\n \n #[arg(long)]\n project: Option,\n}\n\n## Output Format\nIssue #1234: Authentication redesign\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\nProject: group/project-one\nState: opened\nAuthor: @johndoe\nCreated: 2024-01-15\nUpdated: 2024-03-20\nLabels: enhancement, auth\nURL: https://gitlab.example.com/group/project-one/-/issues/1234\n\nDescription:\n We need to redesign the authentication flow to support...\n\nDiscussions (5):\n\n @janedoe (2024-01-16):\n I agree we should move to JWT-based auth...\n\n @johndoe (2024-01-16):\n What about refresh token strategy?\n\n @bobsmith (2024-01-17):\n Have we considered OAuth2?\n\n## Ambiguity Handling\nIf multiple projects have same iid, either:\n- Prompt for --project flag\n- Show error listing which projects have that iid\n\nFiles: src/cli/commands/show.rs\nDone when: Issue detail view displays all fields including threaded discussions","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:58:26.904813Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.944183Z","closed_at":"2026-01-25T17:02:01.944183Z","deleted_at":"2026-01-25T17:02:01.944179Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} +{"id":"bd-kanh","title":"Extract orchestrator per-entity logic and implement inline dependent helpers","description":"## Background\n\nThe orchestrator's drain functions (`drain_resource_events` at line 932, `drain_mr_closes_issues` at line 1254, `drain_mr_diffs` at line 1514) are private and tightly coupled to the job queue system (`pending_dependent_fetches`, `claim_jobs`, `complete_job`). They batch-process all entities for a project, not individual ones. Surgical sync needs per-entity versions of these operations.\n\nThe underlying storage functions already exist and are usable:\n- `store_resource_events(conn, project_id, entity_type, entity_local_id, state_events, label_events, milestone_events)` (orchestrator.rs:1100) — calls `upsert_state_events`, `upsert_label_events`, `upsert_milestone_events`\n- `store_closes_issues_refs(conn, project_id, mr_local_id, closes_issues)` (orchestrator.rs:1409) — inserts entity references\n- `upsert_mr_file_changes(conn, project_id, mr_local_id, diffs)` (mr_diffs.rs:26) — already pub\n\nThe GitLabClient methods for fetching are also already pub:\n- `fetch_all_resource_events(gitlab_project_id, entity_type, iid)` -> (state, label, milestone) events\n- `fetch_mr_closes_issues(gitlab_project_id, iid)` -> Vec\n- `fetch_mr_diffs(gitlab_project_id, iid)` -> Vec\n\nThe gap: no standalone per-entity functions that fetch + store for a single entity without the job queue machinery.\n\n## Approach\n\nCreate standalone helper functions in `src/ingestion/surgical.rs` (or a new `src/ingestion/surgical_dependents.rs` sub-module) that surgical.rs calls after ingesting each entity:\n\n1. **`fetch_and_store_resource_events_for_entity`** (async): Takes `client`, `conn`, `project_id`, `gitlab_project_id`, `entity_type` (\"issue\"|\"merge_request\"), `entity_iid`, `entity_local_id`. Calls `client.fetch_all_resource_events()`, then `store_resource_events()` (needs `pub(crate)` visibility, currently private in orchestrator.rs). Updates the watermark column (`resource_events_synced_for_updated_at`).\n\n2. **`fetch_and_store_discussions_for_entity`** (async): For issues, calls existing `ingest_issue_discussions()`. For MRs, calls `ingest_mr_discussions()`. Both are already pub. This is a thin routing wrapper.\n\n3. **`fetch_and_store_closes_issues_for_entity`** (async, MR-only): Calls `client.fetch_mr_closes_issues()`, then `store_closes_issues_refs()` (needs `pub(crate)`). Updates watermark.\n\n4. **`fetch_and_store_file_changes_for_entity`** (async, MR-only): Calls `client.fetch_mr_diffs()`, then `upsert_mr_file_changes()` (already pub). Updates watermark.\n\nVisibility changes needed in orchestrator.rs (part of bd-1sc6):\n- `store_resource_events` -> `pub(crate)`\n- `store_closes_issues_refs` -> `pub(crate)`\n- `update_resource_event_watermark_tx` -> `pub(crate)` (or inline the SQL)\n- `update_closes_issues_watermark_tx` -> `pub(crate)` (or inline)\n\n## Acceptance Criteria\n\n- [ ] `fetch_and_store_resource_events_for_entity` fetches all 3 event types and stores them in one transaction\n- [ ] `fetch_and_store_discussions_for_entity` routes to correct discussion ingest function by entity type\n- [ ] `fetch_and_store_closes_issues_for_entity` fetches and stores closes_issues refs for MRs\n- [ ] `fetch_and_store_file_changes_for_entity` fetches and stores MR diffs\n- [ ] Each helper updates the appropriate watermark column after successful store\n- [ ] Each helper returns a result struct with counts (fetched, stored, skipped)\n- [ ] All helpers are `pub(crate)` for use by the orchestration function (bd-1i4i)\n- [ ] Config-gated: resource events only fetched if `config.sync.fetch_resource_events == true`, file changes only if `config.sync.fetch_mr_file_changes == true`\n\n## Files\n\n- `src/ingestion/surgical.rs` (add helper functions, or create `surgical_dependents.rs` sub-module)\n- `src/ingestion/orchestrator.rs` (change `store_resource_events`, `store_closes_issues_refs`, watermark functions to `pub(crate)` — via bd-1sc6)\n\n## TDD Anchor\n\nTests in `src/ingestion/surgical_tests.rs` (bd-x8oq):\n\n```rust\n#[tokio::test]\nasync fn test_fetch_and_store_resource_events_for_issue() {\n let conn = setup_db();\n let mock = MockServer::start().await;\n // Mock state/label/milestone event endpoints\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/\\d+/issues/\\d+/resource_state_events\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(json!([])))\n .mount(&mock).await;\n // ... similar for label and milestone\n let client = make_test_client(&mock);\n let result = fetch_and_store_resource_events_for_entity(\n &client, &conn, /*project_id=*/1, /*gitlab_project_id=*/100,\n \"issue\", /*iid=*/42, /*local_id=*/1,\n ).await.unwrap();\n assert_eq!(result.fetched, 0); // empty events\n // Verify watermark updated\n let watermark: Option = conn.query_row(\n \"SELECT resource_events_synced_for_updated_at FROM issues WHERE id = 1\",\n [], |r| r.get(0),\n ).unwrap();\n assert!(watermark.is_some());\n}\n\n#[tokio::test]\nasync fn test_fetch_and_store_closes_issues_for_mr() {\n let conn = setup_db();\n let mock = MockServer::start().await;\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/\\d+/merge_requests/\\d+/closes_issues\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(json!([\n {\"iid\": 10, \"project_id\": 100}\n ])))\n .mount(&mock).await;\n let client = make_test_client(&mock);\n let result = fetch_and_store_closes_issues_for_entity(\n &client, &conn, 1, 100, /*mr_iid=*/5, /*mr_local_id=*/1,\n ).await.unwrap();\n assert_eq!(result.stored, 1);\n}\n\n#[tokio::test]\nasync fn test_fetch_and_store_file_changes_for_mr() {\n // Similar: mock /diffs endpoint, verify upsert_mr_file_changes called\n}\n\n#[tokio::test]\nasync fn test_resource_events_skipped_when_config_disabled() {\n // config.sync.fetch_resource_events = false -> returns Ok with 0 counts\n}\n```\n\n## Edge Cases\n\n- `fetch_all_resource_events` returns 3 separate Results (state, label, milestone). If one fails (e.g., 403 on milestone events), the others should still be stored. Partial success handling.\n- `fetch_mr_closes_issues` on a deleted MR returns 404: `coalesce_not_found` already handles this in the client, returning empty vec.\n- Watermark update must happen AFTER successful store, not before, to avoid marking as synced when store failed.\n- Discussion ingest for MRs uses `prefetch_mr_discussions` (async) + `write_prefetched_mr_discussions` (sync) two-phase pattern. The helper must handle both phases.\n- If `config.sync.fetch_resource_events` is false, skip resource event fetch entirely (return empty result).\n- If `config.sync.fetch_mr_file_changes` is false, skip file changes fetch entirely.\n\n## Dependency Context\n\n- **Blocked by bd-3sez**: surgical.rs must exist before adding helpers to it\n- **Blocked by bd-1sc6 (indirectly via bd-3sez)**: `store_resource_events` and `store_closes_issues_refs` need `pub(crate)` visibility\n- **Blocks bd-1i4i**: Orchestration function calls these helpers after each entity ingest\n- **Blocks bd-3jqx**: Integration tests exercise the full surgical pipeline including these helpers\n- **Uses existing pub APIs**: `GitLabClient::fetch_all_resource_events`, `fetch_mr_closes_issues`, `fetch_mr_diffs`, `upsert_mr_file_changes`, `ingest_issue_discussions`, `ingest_mr_discussions`","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:15:42.863072Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:03:51.432160Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-kanh","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-kanh","depends_on_id":"bd-3jqx","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-kvij","title":"Rewrite agent skills to mandate lore for all reads","description":"## Background\nAgent skills and AGENTS.md files currently allow agents to choose between glab and lore for read operations. Agents default to glab (familiar from training data) even though lore returns richer data. Need a clean, enforced boundary: lore=reads, glab=writes.\n\n## Approach\n1. Audit all config files for glab read patterns\n2. Replace each with lore equivalent\n3. Add explicit Read/Write Split section to AGENTS.md and CLAUDE.md\n\n## Translation Table\n| glab (remove) | lore (replace with) |\n|------------------------------------|----------------------------------|\n| glab issue view N | lore -J issues N |\n| glab issue list | lore -J issues -n 50 |\n| glab issue list -l bug | lore -J issues --label bug |\n| glab mr view N | lore -J mrs N |\n| glab mr list | lore -J mrs |\n| glab mr list -s opened | lore -J mrs -s opened |\n| glab api '/projects/:id/issues' | lore -J issues -p project |\n\n## Files to Audit\n\n### Project-level\n- /Users/tayloreernisse/projects/gitlore/AGENTS.md — primary project instructions\n\n### Global Claude config\n- ~/.claude/CLAUDE.md — global instructions (already has lore section, verify no glab reads)\n\n### Skills directory\nScan all .md files under ~/.claude/skills/ for glab read patterns.\nLikely candidates: any skill that references GitLab data retrieval.\n\n### Rules directory\nScan all .md files under ~/.claude/rules/ for glab read patterns.\n\n### Work-ghost templates\n- ~/projects/work-ghost/tasks/*.md — task templates that reference glab reads\n\n## Verification Commands\nAfter all changes:\n```bash\n# Should return ZERO matches (no glab read commands remain)\nrg 'glab issue view|glab issue list|glab mr view|glab mr list|glab api.*issues|glab api.*merge_requests' ~/.claude/ AGENTS.md --type md\n\n# These should REMAIN (write operations stay with glab)\nrg 'glab (issue|mr) (create|update|close|delete|approve|merge|note|rebase)' ~/.claude/ AGENTS.md --type md\n```\n\n## Read/Write Split Section to Add\nAdd to AGENTS.md and ~/.claude/CLAUDE.md:\n```markdown\n## Read/Write Split: lore vs glab\n\n| Operation | Tool | Why |\n|-----------|------|-----|\n| List issues/MRs | lore | Richer: includes status, discussions, closing MRs |\n| View issue/MR detail | lore | Pre-joined discussions, work-item status |\n| Search across entities | lore | FTS5 + vector hybrid search |\n| Expert/workload analysis | lore | who command — no glab equivalent |\n| Timeline reconstruction | lore | Chronological narrative — no glab equivalent |\n| Create/update/close | glab | Write operations |\n| Approve/merge MR | glab | Write operations |\n| CI/CD pipelines | glab | Not in lore scope |\n```\n\n## TDD Loop\nThis is a config-only task — no Rust code changes. Verification is via grep:\n\nRED: Run verification commands above, expect matches (glab reads still present)\nGREEN: Replace all glab read references with lore equivalents\nVERIFY: Run verification commands, expect zero glab read matches\n\n## Acceptance Criteria\n- [ ] Zero glab read references in AGENTS.md\n- [ ] Zero glab read references in ~/.claude/CLAUDE.md\n- [ ] Zero glab read references in ~/.claude/skills/**/*.md\n- [ ] Zero glab read references in ~/.claude/rules/**/*.md\n- [ ] glab write references preserved (create, update, close, approve, merge, CI)\n- [ ] Read/Write Split section added to AGENTS.md\n- [ ] Read/Write Split section added to ~/.claude/CLAUDE.md\n- [ ] Fresh agent session uses lore for reads without prompting (manual verification)\n\n## Edge Cases\n- Skills that use glab api for data NOT in lore (e.g., CI pipeline data, project settings) — these should remain\n- glab MCP server references — evaluate case-by-case (keep for write operations)\n- Shell aliases or env vars that invoke glab for reads — out of scope unless in config files\n- Skills that use `glab issue list | jq` for ad-hoc queries — replace with `lore -J issues | jq`\n- References to glab in documentation context (explaining what tools exist) vs operational context (telling agent to use glab) — only replace operational references","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-12T15:44:56.530081Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:49:04.598735Z","closed_at":"2026-02-12T16:49:04.598679Z","close_reason":"Agent skills rewritten: AGENTS.md and CLAUDE.md updated with read/write split mandating lore for reads, glab for writes","compaction_level":0,"original_size":0,"labels":["cli","cli-imp"],"dependencies":[{"issue_id":"bd-kvij","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-lcb","title":"Epic: CP2 Gate E - CLI Complete","description":"## Background\nGate E validates all CLI commands are functional and user-friendly. This is the final usability gate - even if all data is correct, users need good CLI UX to access it.\n\n## Acceptance Criteria (Pass/Fail)\n\n### List Command\n- [ ] `gi list mrs` shows MR table with columns: iid, title, state, author, branches, updated\n- [ ] `gi list mrs --state=opened` filters to only opened MRs\n- [ ] `gi list mrs --state=merged` filters to only merged MRs\n- [ ] `gi list mrs --state=closed` filters to only closed MRs\n- [ ] `gi list mrs --state=locked` filters locally (not server-side filter)\n- [ ] `gi list mrs --draft` shows only draft MRs\n- [ ] `gi list mrs --no-draft` excludes draft MRs\n- [ ] Draft MRs show `[DRAFT]` prefix in title column\n- [ ] `gi list mrs --author=username` filters by author\n- [ ] `gi list mrs --assignee=username` filters by assignee\n- [ ] `gi list mrs --reviewer=username` filters by reviewer\n- [ ] `gi list mrs --target-branch=main` filters by target branch\n- [ ] `gi list mrs --source-branch=feature/x` filters by source branch\n- [ ] `gi list mrs --label=bugfix` filters by label\n- [ ] `gi list mrs --limit=N` limits output\n\n### Show Command\n- [ ] `gi show mr ` displays full MR detail\n- [ ] Show includes: title, description, state, draft status, author\n- [ ] Show includes: assignees, reviewers, labels\n- [ ] Show includes: source_branch, target_branch\n- [ ] Show includes: detailed_merge_status (e.g., \"mergeable\")\n- [ ] Show includes: merge_user and merged_at for merged MRs\n- [ ] Show includes: discussions with author and date\n- [ ] DiffNote shows file context: `[src/file.ts:45]`\n- [ ] Multi-line DiffNote shows range: `[src/file.ts:45-48]`\n- [ ] Resolved discussions show `[RESOLVED]` marker\n\n### Count Command\n- [ ] `gi count mrs` shows total count\n- [ ] Count shows state breakdown: opened, merged, closed\n\n### Sync Status\n- [ ] `gi sync-status` shows MR cursor position\n- [ ] Sync status shows last sync timestamp\n\n## Validation Script\n```bash\n#!/bin/bash\nset -e\n\nDB_PATH=\"${XDG_DATA_HOME:-$HOME/.local/share}/gitlab-inbox/db.sqlite3\"\n\necho \"=== Gate E: CLI Complete ===\"\n\n# 1. Test list command (basic)\necho \"Step 1: Basic list...\"\ngi list mrs --limit=5 || { echo \"FAIL: list mrs failed\"; exit 1; }\n\n# 2. Test state filters\necho \"Step 2: State filters...\"\nfor state in opened merged closed; do\n echo \" Testing --state=$state\"\n gi list mrs --state=$state --limit=3 || echo \" Warning: No $state MRs\"\ndone\n\n# 3. Test draft filters\necho \"Step 3: Draft filters...\"\ngi list mrs --draft --limit=3 || echo \" Note: No draft MRs found\"\ngi list mrs --no-draft --limit=3 || echo \" Note: All MRs are drafts?\"\n\n# 4. Check [DRAFT] prefix\necho \"Step 4: Check [DRAFT] prefix...\"\nDRAFT_IID=$(sqlite3 \"$DB_PATH\" \"SELECT iid FROM merge_requests WHERE draft = 1 LIMIT 1;\")\nif [ -n \"$DRAFT_IID\" ]; then\n if gi list mrs --limit=100 | grep -q \"\\[DRAFT\\]\"; then\n echo \" PASS: [DRAFT] prefix found\"\n else\n echo \" FAIL: Draft MR exists but no [DRAFT] prefix in output\"\n fi\nelse\n echo \" Skip: No draft MRs to test\"\nfi\n\n# 5. Test author/assignee/reviewer filters\necho \"Step 5: User filters...\"\nAUTHOR=$(sqlite3 \"$DB_PATH\" \"SELECT author_username FROM merge_requests LIMIT 1;\")\nif [ -n \"$AUTHOR\" ]; then\n echo \" Testing --author=$AUTHOR\"\n gi list mrs --author=\"$AUTHOR\" --limit=3\nfi\n\nREVIEWER=$(sqlite3 \"$DB_PATH\" \"SELECT username FROM mr_reviewers LIMIT 1;\")\nif [ -n \"$REVIEWER\" ]; then\n echo \" Testing --reviewer=$REVIEWER\"\n gi list mrs --reviewer=\"$REVIEWER\" --limit=3\nfi\n\n# 6. Test branch filters\necho \"Step 6: Branch filters...\"\nTARGET=$(sqlite3 \"$DB_PATH\" \"SELECT target_branch FROM merge_requests LIMIT 1;\")\nif [ -n \"$TARGET\" ]; then\n echo \" Testing --target-branch=$TARGET\"\n gi list mrs --target-branch=\"$TARGET\" --limit=3\nfi\n\n# 7. Test show command\necho \"Step 7: Show command...\"\nMR_IID=$(sqlite3 \"$DB_PATH\" \"SELECT iid FROM merge_requests LIMIT 1;\")\ngi show mr \"$MR_IID\" || { echo \"FAIL: show mr failed\"; exit 1; }\n\n# 8. Test show with DiffNote context\necho \"Step 8: Show with DiffNote...\"\nDIFFNOTE_MR=$(sqlite3 \"$DB_PATH\" \"\n SELECT DISTINCT m.iid\n FROM merge_requests m\n JOIN discussions d ON d.merge_request_id = m.id\n JOIN notes n ON n.discussion_id = d.id\n WHERE n.position_new_path IS NOT NULL\n LIMIT 1;\n\")\nif [ -n \"$DIFFNOTE_MR\" ]; then\n echo \" Testing MR with DiffNotes: !$DIFFNOTE_MR\"\n OUTPUT=$(gi show mr \"$DIFFNOTE_MR\")\n if echo \"$OUTPUT\" | grep -qE '\\[[^]]+:[0-9]+\\]'; then\n echo \" PASS: File context [path:line] found\"\n else\n echo \" FAIL: DiffNote should show [path:line] context\"\n fi\nelse\n echo \" Skip: No MRs with DiffNotes\"\nfi\n\n# 9. Test count command\necho \"Step 9: Count command...\"\ngi count mrs || { echo \"FAIL: count mrs failed\"; exit 1; }\n\n# 10. Test sync-status\necho \"Step 10: Sync status...\"\ngi sync-status || echo \" Note: sync-status may need implementation\"\n\necho \"\"\necho \"=== Gate E: PASSED ===\"\n```\n\n## Test Commands (Quick Verification)\n```bash\n# List with all column types visible:\ngi list mrs --limit=10\n\n# Show a specific MR:\ngi show mr 42\n\n# Count with breakdown:\ngi count mrs\n\n# Complex filter:\ngi list mrs --state=opened --reviewer=alice --target-branch=main --limit=5\n```\n\n## Expected Output Formats\n\n### gi list mrs\n```\nMerge Requests (showing 5 of 1,234)\n\n !847 Refactor auth to use JWT tokens merged @johndoe main <- feature/jwt 3d ago\n !846 Fix memory leak in websocket handler opened @janedoe main <- fix/websocket 5d ago\n !845 [DRAFT] Add dark mode CSS variables opened @bobsmith main <- ui/dark-mode 1w ago\n !844 Update dependencies to latest versions closed @alice main <- chore/deps 2w ago\n```\n\n### gi show mr 847\n```\nMerge Request !847: Refactor auth to use JWT tokens\n================================================================================\n\nProject: group/project-one\nState: merged\nDraft: No\nAuthor: @johndoe\nAssignees: @janedoe, @bobsmith\nReviewers: @alice, @charlie\nLabels: enhancement, auth, reviewed\nSource: feature/jwt\nTarget: main\nMerge Status: merged\nMerged By: @alice\nMerged At: 2024-03-20 14:30:00\n\nDescription:\n Moving away from session cookies to JWT-based authentication...\n\nDiscussions (3):\n\n @janedoe (2024-03-16) [src/auth/jwt.ts:45]:\n Should we use a separate signing key for refresh tokens?\n\n @johndoe (2024-03-16):\n Good point. I'll add a separate key with rotation support.\n\n @alice (2024-03-18) [RESOLVED]:\n Looks good! Just one nit about the token expiry constant.\n```\n\n### gi count mrs\n```\nMerge Requests: 1,234\n opened: 89\n merged: 1,045\n closed: 100\n```\n\n## Dependencies\nThis gate requires:\n- bd-3js (CLI commands implementation)\n- All previous gates must pass first\n\n## Edge Cases\n- Ambiguous MR iid across projects: should prompt for `--project` or show error\n- Very long titles: should truncate with `...` in list view\n- Empty description: should show \"No description\" or empty section\n- No discussions: should show \"No discussions\" message\n- Unicode in titles/descriptions: should render correctly","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-26T22:06:02.411132Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:48:21.061166Z","closed_at":"2026-01-27T00:48:21.061125Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-lcb","depends_on_id":"bd-3js","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-ljf","title":"Add embedding error variants to LoreError","description":"## Background\nGate B introduces Ollama-dependent operations that need distinct error variants for clear diagnostics. Each error has a unique exit code, a descriptive message, and an actionable suggestion. These errors must integrate with the existing LoreError enum pattern (renamed from GiError in bd-3lc).\n\n## Approach\nExtend `src/core/error.rs` with 4 new variants per PRD Section 4.3.\n\n**ErrorCode additions:**\n```rust\npub enum ErrorCode {\n // ... existing (InternalError=1 through TransformError=13)\n OllamaUnavailable, // exit code 14\n OllamaModelNotFound, // exit code 15\n EmbeddingFailed, // exit code 16\n}\n```\n\n**LoreError additions:**\n```rust\n/// Ollama-specific connection failure. Use instead of Http for Ollama errors\n/// because it includes base_url for actionable error messages.\n#[error(\"Cannot connect to Ollama at {base_url}. Is it running?\")]\nOllamaUnavailable {\n base_url: String,\n #[source]\n source: Option,\n},\n\n#[error(\"Ollama model '{model}' not found. Run: ollama pull {model}\")]\nOllamaModelNotFound { model: String },\n\n#[error(\"Embedding failed for document {document_id}: {reason}\")]\nEmbeddingFailed { document_id: i64, reason: String },\n\n#[error(\"No embeddings found. Run: lore embed\")]\nEmbeddingsNotBuilt,\n```\n\n**code() mapping:**\n- OllamaUnavailable => ErrorCode::OllamaUnavailable\n- OllamaModelNotFound => ErrorCode::OllamaModelNotFound\n- EmbeddingFailed => ErrorCode::EmbeddingFailed\n- EmbeddingsNotBuilt => ErrorCode::EmbeddingFailed (shares exit code 16)\n\n**suggestion() mapping:**\n- OllamaUnavailable => \"Start Ollama: ollama serve\"\n- OllamaModelNotFound => \"Pull the model: ollama pull nomic-embed-text\"\n- EmbeddingFailed => \"Check Ollama logs or retry with 'lore embed --retry-failed'\"\n- EmbeddingsNotBuilt => \"Generate embeddings first: lore embed\"\n\n## Acceptance Criteria\n- [ ] All 4 error variants compile\n- [ ] Exit codes: OllamaUnavailable=14, OllamaModelNotFound=15, EmbeddingFailed=16\n- [ ] EmbeddingsNotBuilt shares exit code 16 (mapped to ErrorCode::EmbeddingFailed)\n- [ ] OllamaUnavailable has `base_url: String` and `source: Option`\n- [ ] EmbeddingFailed has `document_id: i64` and `reason: String`\n- [ ] Each variant has actionable .suggestion() text per PRD\n- [ ] ErrorCode Display: OLLAMA_UNAVAILABLE, OLLAMA_MODEL_NOT_FOUND, EMBEDDING_FAILED\n- [ ] Robot mode JSON includes code + suggestion for each variant\n- [ ] `cargo build` succeeds\n\n## Files\n- `src/core/error.rs` — extend LoreError enum + ErrorCode enum + impl blocks\n\n## TDD Loop\nRED: Add variants, `cargo build` fails on missing match arms\nGREEN: Add match arms in code(), exit_code(), suggestion(), to_robot_error(), Display\nVERIFY: `cargo build && cargo test error`\n\n## Edge Cases\n- OllamaUnavailable with source=None: still valid (used when no HTTP error available)\n- EmbeddingFailed with document_id=0: used for batch-level failures (not per-doc)\n- EmbeddingsNotBuilt vs OllamaUnavailable: former means \"never ran embed\", latter means \"Ollama down right now\"","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:33.994316Z","created_by":"tayloreernisse","updated_at":"2026-01-30T16:51:20.385574Z","closed_at":"2026-01-30T16:51:20.385369Z","close_reason":"Completed: Added 4 LoreError variants (OllamaUnavailable, OllamaModelNotFound, EmbeddingFailed, EmbeddingsNotBuilt) and 3 ErrorCode variants with exit codes 14-16. cargo build succeeds.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-ljf","depends_on_id":"bd-3lc","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} @@ -287,7 +304,7 @@ {"id":"bd-mem","title":"Implement shared backoff utility","description":"## Background\nBoth `dirty_sources` and `pending_discussion_fetches` tables use exponential backoff with `next_attempt_at` timestamps. Without a shared utility, each module would duplicate the backoff curve logic, risking drift. The shared backoff module ensures consistent retry behavior across all queue consumers in Gate C.\n\n## Approach\nCreate `src/core/backoff.rs` per PRD Section 6.X.\n\n**IMPORTANT — PRD-exact signature and implementation:**\n```rust\nuse rand::Rng;\n\n/// Compute next_attempt_at with exponential backoff and jitter.\n///\n/// Formula: now + min(3600000, 1000 * 2^attempt_count) * (0.9 to 1.1)\n/// - Capped at 1 hour to prevent runaway delays\n/// - ±10% jitter prevents synchronized retries after outages\n///\n/// Used by:\n/// - `dirty_sources` retry scheduling (document regeneration failures)\n/// - `pending_discussion_fetches` retry scheduling (API fetch failures)\n///\n/// Having one implementation prevents subtle divergence between queues\n/// (e.g., different caps or jitter ranges).\npub fn compute_next_attempt_at(now: i64, attempt_count: i64) -> i64 {\n // Cap attempt_count to prevent overflow (2^30 > 1 hour anyway)\n let capped_attempts = attempt_count.min(30) as u32;\n let base_delay_ms = 1000_i64.saturating_mul(1 << capped_attempts);\n let capped_delay_ms = base_delay_ms.min(3_600_000); // 1 hour cap\n\n // Add ±10% jitter\n let jitter_factor = rand::thread_rng().gen_range(0.9..=1.1);\n let delay_with_jitter = (capped_delay_ms as f64 * jitter_factor) as i64;\n\n now + delay_with_jitter\n}\n```\n\n**Key PRD details (must match exactly):**\n- `attempt_count` parameter is `i64` (not `u32`) — matches SQLite integer type from DB columns\n- Overflow prevention: `.min(30) as u32` caps before shift\n- Base delay: `1000_i64.saturating_mul(1 << capped_attempts)` — uses `saturating_mul` for safety\n- Cap: `3_600_000` (1 hour)\n- Jitter: `gen_range(0.9..=1.1)` — inclusive range\n- Return: `i64` (milliseconds epoch)\n\n**Cargo.toml change:** Add `rand = \"0.8\"` to `[dependencies]`.\n\n## Acceptance Criteria\n- [ ] Single shared implementation used by both dirty_tracker and discussion_queue\n- [ ] Signature: `pub fn compute_next_attempt_at(now: i64, attempt_count: i64) -> i64`\n- [ ] attempt_count is i64 (matches SQLite column type), not u32\n- [ ] Overflow prevention: `.min(30) as u32` before shift\n- [ ] Base delay uses `1000_i64.saturating_mul(1 << capped_attempts)`\n- [ ] Cap at 1 hour (3,600,000 ms)\n- [ ] Jitter: `gen_range(0.9..=1.1)` inclusive range\n- [ ] Exponential curve: 1s, 2s, 4s, 8s, ... up to 1h cap\n- [ ] `cargo test backoff` passes\n\n## Files\n- `src/core/backoff.rs` — new file\n- `src/core/mod.rs` — add `pub mod backoff;`\n- `Cargo.toml` — add `rand = \"0.8\"`\n\n## TDD Loop\nRED: `src/core/backoff.rs` with `#[cfg(test)] mod tests`:\n- `test_exponential_curve` — verify delays double each attempt (within jitter range)\n- `test_cap_at_one_hour` — attempt 20+ still produces delay <= MAX_DELAY_MS * 1.1\n- `test_jitter_range` — run 100 iterations, all delays within [0.9x, 1.1x] of base\n- `test_first_retry_is_about_one_second` — attempt 1 produces ~1000ms delay\n- `test_overflow_safety` — very large attempt_count doesn't panic\nGREEN: Implement compute_next_attempt_at()\nVERIFY: `cargo test backoff`\n\n## Edge Cases\n- `attempt_count` > 30: `.min(30)` caps, saturating_mul prevents overflow\n- `attempt_count` = 0: not used in practice (callers pass `attempt_count + 1`)\n- `attempt_count` = 1: delay is ~1 second (first retry)\n- Negative attempt_count: `.min(30)` still works, shift of negative-as-u32 wraps but saturating_mul handles it","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-30T15:27:09.474Z","created_by":"tayloreernisse","updated_at":"2026-01-30T16:57:24.900137Z","closed_at":"2026-01-30T16:57:24.899942Z","close_reason":"Completed: compute_next_attempt_at with exp backoff (1s base, 1h cap, +-10% jitter), i64 params matching SQLite, overflow-safe, 5 tests pass","compaction_level":0,"original_size":0} {"id":"bd-mk3","title":"Update ingest command for merge_requests type","description":"## Background\nCLI entry point for MR ingestion. Routes `--type=merge_requests` to the orchestrator. Must ensure `--full` resets both MR cursor AND discussion watermarks. This is the user-facing command that kicks off the entire MR sync pipeline.\n\n## Approach\nUpdate `src/cli/commands/ingest.rs` to handle `merge_requests` type:\n1. Add `merge_requests` branch to the resource type match statement\n2. Validate resource type early with helpful error message\n3. Pass `full` flag through to orchestrator (it handles the watermark reset internally)\n\n## Files\n- `src/cli/commands/ingest.rs` - Add merge_requests branch to `run_ingest`\n\n## Acceptance Criteria\n- [ ] `gi ingest --type=merge_requests` runs MR ingestion successfully\n- [ ] `gi ingest --type=merge_requests --full` resets cursor AND discussion watermarks\n- [ ] `gi ingest --type=invalid` returns helpful error listing valid types\n- [ ] Progress output shows MR counts, discussion counts, and skip counts\n- [ ] Default type remains `issues` for backward compatibility\n- [ ] `cargo test ingest_command` passes\n\n## TDD Loop\nRED: `gi ingest --type=merge_requests` -> \"invalid type: merge_requests\"\nGREEN: Add merge_requests to match statement in run_ingest\nVERIFY: `gi ingest --type=merge_requests --help` shows merge_requests as valid\n\n## Function Signature\n```rust\npub async fn run_ingest(\n config: &Config,\n args: &IngestArgs,\n) -> Result<(), GiError>\n```\n\n## IngestArgs Reference (existing)\n```rust\n#[derive(Parser, Debug)]\npub struct IngestArgs {\n /// Resource type to ingest\n #[arg(long, short = 't', default_value = \"issues\")]\n pub r#type: String,\n \n /// Filter to specific project (by path or ID)\n #[arg(long, short = 'p')]\n pub project: Option,\n \n /// Force run even if another ingest is in progress\n #[arg(long, short = 'f')]\n pub force: bool,\n \n /// Full sync - reset cursor and refetch all\n #[arg(long)]\n pub full: bool,\n}\n```\n\n## Code Change\n```rust\nuse crate::core::errors::GiError;\nuse crate::ingestion::orchestrator::Orchestrator;\n\npub async fn run_ingest(\n config: &Config,\n args: &IngestArgs,\n) -> Result<(), GiError> {\n let resource_type = args.r#type.as_str();\n \n // Validate resource type early\n match resource_type {\n \"issues\" | \"merge_requests\" => {}\n _ => {\n return Err(GiError::InvalidArgument {\n name: \"type\".to_string(),\n value: resource_type.to_string(),\n expected: \"issues or merge_requests\".to_string(),\n });\n }\n }\n \n // Acquire single-flight lock (unless --force)\n if !args.force {\n acquire_ingest_lock(config, resource_type)?;\n }\n \n // Get projects to ingest (filtered if --project specified)\n let projects = get_projects_to_ingest(config, args.project.as_deref())?;\n \n for project in projects {\n println!(\"Ingesting {} for {}...\", resource_type, project.path);\n \n let orchestrator = Orchestrator::new(\n &config,\n project.id,\n project.gitlab_id,\n )?;\n \n let result = orchestrator.run_ingestion(resource_type, args.full).await?;\n \n // Print results based on resource type\n match resource_type {\n \"issues\" => {\n println!(\" {}: {} issues fetched, {} upserted\",\n project.path, result.issues_fetched, result.issues_upserted);\n }\n \"merge_requests\" => {\n println!(\" {}: {} MRs fetched, {} new labels, {} assignees, {} reviewers\",\n project.path,\n result.mrs_fetched,\n result.labels_created,\n result.assignees_linked,\n result.reviewers_linked,\n );\n println!(\" Discussions: {} synced, {} notes ({} DiffNotes)\",\n result.discussions_synced,\n result.notes_synced,\n result.diffnotes_count,\n );\n if result.mrs_skipped_discussion_sync > 0 {\n println!(\" Skipped discussion sync for {} unchanged MRs\",\n result.mrs_skipped_discussion_sync);\n }\n if result.failed_discussion_syncs > 0 {\n eprintln!(\" Warning: {} MRs failed discussion sync (will retry next run)\",\n result.failed_discussion_syncs);\n }\n }\n _ => unreachable!(),\n }\n }\n \n // Release lock\n if !args.force {\n release_ingest_lock(config, resource_type)?;\n }\n \n Ok(())\n}\n```\n\n## Output Format\n```\nIngesting merge_requests for group/project-one...\n group/project-one: 567 MRs fetched, 12 new labels, 89 assignees, 45 reviewers\n Discussions: 456 synced, 1,234 notes (89 DiffNotes)\n Skipped discussion sync for 444 unchanged MRs\n\nTotal: 567 MRs, 456 discussions, 1,234 notes\n```\n\n## Full Sync Behavior\nWhen `--full` is passed:\n1. MR cursor reset to NULL (handled by `ingest_merge_requests` with `full_sync: true`)\n2. Discussion watermarks reset to NULL (handled by `reset_discussion_watermarks` called from ingestion)\n3. All MRs re-fetched from GitLab API\n4. All discussions re-fetched for every MR\n\n## Error Types (from GiError enum)\n```rust\n// In src/core/errors.rs\npub enum GiError {\n InvalidArgument {\n name: String,\n value: String,\n expected: String,\n },\n LockError {\n resource: String,\n message: String,\n },\n // ... other variants\n}\n```\n\n## Edge Cases\n- Default type is `issues` for backward compatibility with CP1\n- Project filter (`--project`) can limit to specific project by path or ID\n- Force flag (`--force`) bypasses single-flight lock for debugging\n- If no projects configured, return helpful error about running `gi project add` first\n- Empty project (no MRs): completes successfully with \"0 MRs fetched\"","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:43.034952Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:28:52.711235Z","closed_at":"2026-01-27T00:28:52.711166Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-mk3","depends_on_id":"bd-10f","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-nu0d","title":"Implement resize storm + rapid keypress + event fuzz tests","description":"## Background\nStress tests verify the TUI handles adverse input conditions without panic: rapid terminal resizes, fast keypress sequences, and randomized event traces. The event fuzz suite uses deterministic seed replay for reproducibility.\n\n## Approach\nResize storm:\n- Send 100 resize events in rapid succession (varying sizes from 20x10 to 300x80)\n- Assert no panic, no layout corruption, final render is valid for final size\n- FrankenTUI's BOCPD resize coalescing should handle this — verify it works\n\nRapid keypress:\n- Send 50 key events in <100ms: mix of navigation, filter input, mode switches\n- Assert no panic, no stuck input mode, final state is consistent\n- Verify Ctrl+C always exits regardless of state\n\nEvent fuzz (deterministic):\n- Generate 10k randomized event traces from: key events, resize events, paste events, tick events\n- Use seeded RNG for reproducibility\n- Replay each trace, check invariants after each event:\n - Navigation stack depth >= 1 (always has at least Dashboard)\n - InputMode transitions are valid (no impossible state combinations)\n - No panic\n - LoadState transitions are valid (no Idle->Refreshing without LoadingInitial first for initial load)\n- On invariant violation: log seed + event index for reproduction\n\n## Acceptance Criteria\n- [ ] 100 rapid resizes: no panic, valid final render\n- [ ] 50 rapid keys: no stuck input mode, Ctrl+C exits\n- [ ] 10k fuzz traces: zero invariant violations\n- [ ] Fuzz tests deterministically reproducible via seed\n- [ ] Navigation invariant: stack always has at least Dashboard\n- [ ] InputMode invariant: valid transitions only\n\n## Files\n- CREATE: crates/lore-tui/tests/stress_tests.rs\n- CREATE: crates/lore-tui/tests/fuzz_tests.rs\n\n## TDD Anchor\nRED: Write test_resize_storm_no_panic that sends 100 resize events to LoreApp, asserts no panic.\nGREEN: Ensure view() handles all terminal sizes gracefully.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_resize_storm\n\n## Edge Cases\n- Zero-size terminal (0x0): must not panic, skip rendering\n- Very large terminal (500x200): must not allocate unbounded memory\n- Paste events can contain arbitrary bytes including control chars — sanitize\n- Fuzz seed must be logged at test start for reproduction\n\n## Dependency Context\nUses LoreApp from \"Implement LoreApp Model\" task.\nUses NavigationStack from \"Implement NavigationStack\" task.\nUses FakeClock for deterministic time in fuzz tests.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:04:42.012118Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:38.299688Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-nu0d","depends_on_id":"bd-1b6k","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-nu0d","depends_on_id":"bd-2nfs","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} -{"id":"bd-nwux","title":"Epic: TUI Phase 3 — Power Features","description":"## Background\nPhase 3 adds the power-user screens: Search (3 modes with preview), Timeline (5-stage pipeline visualization), Who (5 expert/workload modes), and Command Palette (fuzzy match). These screens leverage the foundation from Phases 1-2.\n\n## Acceptance Criteria\n- [ ] Search supports lexical, hybrid, and semantic modes with split-pane preview\n- [ ] Search capability detection enables/disables modes based on available indexes\n- [ ] Timeline renders chronological event stream with color-coded event types\n- [ ] Who supports Expert, Workload, Reviews, Active, and Overlap modes\n- [ ] Command palette provides fuzzy-match access to all commands","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:00:27.375421Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.286486Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-nwux","depends_on_id":"bd-3pxe","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-nwux","title":"Epic: TUI Phase 3 — Power Features","description":"## Background\nPhase 3 adds the power-user screens: Search (3 modes with preview), Timeline (5-stage pipeline visualization), Who (5 expert/workload modes), Command Palette (fuzzy match), Trace (code provenance drill-down), and File History (per-file MR timeline). These screens leverage the foundation from Phases 1-2.\n\nThe Trace and File History screens were added after v0.8.0 introduced `lore trace` and `lore file-history` CLI commands. They provide interactive drill-down into code provenance chains (file -> MR -> issue -> discussion) and per-file change timelines with rename tracking.\n\n## Acceptance Criteria\n- [ ] Search supports lexical, hybrid, and semantic modes with split-pane preview\n- [ ] Search capability detection enables/disables modes based on available indexes\n- [ ] Timeline renders chronological event stream with color-coded event types\n- [ ] Who supports Expert, Workload, Reviews, Active, and Overlap modes (with include-closed toggle)\n- [ ] Command palette provides fuzzy-match access to all commands\n- [ ] Trace screen shows file -> MR -> issue -> discussion chains with interactive drill-down\n- [ ] File History screen shows per-file MR timeline with rename chain and DiffNote snippets","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:00:27.375421Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:07:05.438191Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-nwux","depends_on_id":"bd-3pxe","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-o7b","title":"[CP1] gi show issue command","description":"## Background\n\nThe `gi show issue ` command displays detailed information about a single issue including metadata, description, labels, and all discussions with their notes. It provides a complete view similar to the GitLab web UI.\n\n## Approach\n\n### Module: src/cli/commands/show.rs\n\n### Clap Definition\n\n```rust\n#[derive(Args)]\npub struct ShowArgs {\n /// Entity type\n #[arg(value_parser = [\"issue\", \"mr\"])]\n pub entity: String,\n\n /// Entity IID\n pub iid: i64,\n\n /// Project path (required if ambiguous)\n #[arg(long)]\n pub project: Option,\n}\n```\n\n### Handler Function\n\n```rust\npub async fn handle_show(args: ShowArgs, conn: &Connection) -> Result<()>\n```\n\n### Logic (for entity=\"issue\")\n\n1. **Find issue**: Query by iid, optionally filtered by project\n - If multiple projects have same iid, require --project or error\n2. **Load metadata**: title, state, author, created_at, updated_at, web_url\n3. **Load labels**: JOIN through issue_labels to labels table\n4. **Load discussions**: All discussions for this issue\n5. **Load notes**: All notes for each discussion, ordered by position\n6. **Format output**: Rich display with sections\n\n### Output Format (matches PRD)\n\n```\nIssue #1234: Authentication redesign\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\nProject: group/project-one\nState: opened\nAuthor: @johndoe\nCreated: 2024-01-15\nUpdated: 2024-03-20\nLabels: enhancement, auth\nURL: https://gitlab.example.com/group/project-one/-/issues/1234\n\nDescription:\n We need to redesign the authentication flow to support...\n\nDiscussions (5):\n\n @janedoe (2024-01-16):\n I agree we should move to JWT-based auth...\n\n @johndoe (2024-01-16):\n What about refresh token strategy?\n\n @bobsmith (2024-01-17):\n Have we considered OAuth2?\n```\n\n### Queries\n\n```sql\n-- Find issue\nSELECT i.*, p.path as project_path\nFROM issues i\nJOIN projects p ON i.project_id = p.id\nWHERE i.iid = ? AND (p.path = ? OR ? IS NULL)\n\n-- Get labels\nSELECT l.name FROM labels l\nJOIN issue_labels il ON l.id = il.label_id\nWHERE il.issue_id = ?\n\n-- Get discussions with notes\nSELECT d.*, n.* FROM discussions d\nJOIN notes n ON d.id = n.discussion_id\nWHERE d.issue_id = ?\nORDER BY d.first_note_at, n.position\n```\n\n## Acceptance Criteria\n\n- [ ] Shows issue metadata (title, state, author, dates, URL)\n- [ ] Shows labels as comma-separated list\n- [ ] Shows description (truncated if very long)\n- [ ] Shows discussions grouped with notes indented\n- [ ] Handles --project filter correctly\n- [ ] Errors clearly if iid is ambiguous without --project\n\n## Files\n\n- src/cli/commands/mod.rs (add `pub mod show;`)\n- src/cli/commands/show.rs (create)\n- src/cli/mod.rs (add Show variant to Commands enum)\n\n## TDD Loop\n\nRED:\n```rust\n#[tokio::test] async fn show_issue_displays_metadata()\n#[tokio::test] async fn show_issue_displays_labels()\n#[tokio::test] async fn show_issue_displays_discussions()\n#[tokio::test] async fn show_issue_requires_project_when_ambiguous()\n```\n\nGREEN: Implement handler with queries and formatting\n\nVERIFY: `cargo test show_issue`\n\n## Edge Cases\n\n- Issue with no labels - show \"Labels: (none)\"\n- Issue with no discussions - show \"Discussions: (none)\"\n- Issue with very long description - truncate with \"...\"\n- System notes in discussions - filter out or show with [system] prefix\n- Individual notes (not threaded) - show without reply indentation","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-25T17:02:38.384702Z","created_by":"tayloreernisse","updated_at":"2026-01-25T23:05:25.688102Z","closed_at":"2026-01-25T23:05:25.688043Z","close_reason":"Implemented gi show issue command with metadata, labels, and discussions display","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-o7b","depends_on_id":"bd-208","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-o7b","depends_on_id":"bd-hbo","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-ozy","title":"[CP1] Ingestion orchestrator","description":"## Background\n\nThe ingestion orchestrator coordinates issue sync followed by dependent discussion sync. It implements the CP1 canonical pattern: fetch issues, identify which need discussion sync (updated_at advanced), then execute discussion sync with bounded concurrency.\n\n## Approach\n\n### Module: src/ingestion/orchestrator.rs\n\n### Main Function\n\n```rust\npub async fn ingest_project_issues(\n conn: &Connection,\n client: &GitLabClient,\n config: &Config,\n project_id: i64, // Local DB project ID\n gitlab_project_id: i64,\n) -> Result\n\n#[derive(Debug, Default)]\npub struct IngestProjectResult {\n pub issues_fetched: usize,\n pub issues_upserted: usize,\n pub labels_created: usize,\n pub discussions_fetched: usize,\n pub notes_fetched: usize,\n pub system_notes_count: usize,\n pub issues_skipped_discussion_sync: usize,\n}\n```\n\n### Orchestration Steps\n\n1. **Call issue ingestion**: `ingest_issues(conn, client, config, project_id, gitlab_project_id)`\n2. **Get issues needing discussion sync**: From IngestIssuesResult.issues_needing_discussion_sync\n3. **Execute bounded discussion sync**:\n - Use `tokio::task::LocalSet` for single-threaded runtime\n - Respect `config.sync.dependent_concurrency` (default: 5)\n - For each IssueForDiscussionSync:\n - Call `ingest_issue_discussions(...)`\n - Aggregate results\n4. **Calculate skipped count**: total_issues - issues_needing_discussion_sync.len()\n\n### Bounded Concurrency Pattern\n\n```rust\nuse futures::stream::{self, StreamExt};\n\nlet local_set = LocalSet::new();\nlocal_set.run_until(async {\n stream::iter(issues_needing_sync)\n .map(|issue| async {\n ingest_issue_discussions(\n conn, client, config,\n project_id, gitlab_project_id,\n issue.iid, issue.local_issue_id, issue.updated_at,\n ).await\n })\n .buffer_unordered(config.sync.dependent_concurrency)\n .try_collect::>()\n .await\n}).await\n```\n\nNote: Single-threaded runtime means concurrency is I/O-bound, not parallel execution.\n\n## Acceptance Criteria\n\n- [ ] Orchestrator calls issue ingestion first\n- [ ] Only issues with updated_at > discussions_synced_for_updated_at get discussion sync\n- [ ] Bounded concurrency respects dependent_concurrency config\n- [ ] Results aggregated from both issue and discussion ingestion\n- [ ] issues_skipped_discussion_sync accurately reflects unchanged issues\n\n## Files\n\n- src/ingestion/mod.rs (add `pub mod orchestrator;`)\n- src/ingestion/orchestrator.rs (create)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/orchestrator_tests.rs\n#[tokio::test] async fn orchestrates_issue_then_discussion_sync()\n#[tokio::test] async fn skips_discussion_sync_for_unchanged_issues()\n#[tokio::test] async fn respects_bounded_concurrency()\n#[tokio::test] async fn aggregates_results_correctly()\n```\n\nGREEN: Implement orchestrator with bounded concurrency\n\nVERIFY: `cargo test orchestrator`\n\n## Edge Cases\n\n- All issues unchanged - no discussion sync calls\n- All issues new - all get discussion sync\n- dependent_concurrency=1 - sequential discussion fetches\n- Issue ingestion fails - orchestrator returns error, no discussion sync","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.289941Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:54:07.447647Z","closed_at":"2026-01-25T22:54:07.447577Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-ozy","depends_on_id":"bd-208","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-ozy","depends_on_id":"bd-hbo","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-pgdw","title":"OBSERV: Add root tracing span with run_id to sync and ingest","description":"## Background\nA root tracing span per command invocation provides the top of the span hierarchy. All child spans (ingest_issues, fetch_pages, etc.) inherit the run_id field, making every log line within a run filterable by jq.\n\n## Approach\nIn run_sync() (src/cli/commands/sync.rs:54), after generating run_id, create a root span:\n\n```rust\npub async fn run_sync(config: &Config, options: SyncOptions) -> Result {\n let run_id = &uuid::Uuid::new_v4().to_string()[..8];\n let _root = tracing::info_span!(\"sync\", %run_id).entered();\n // ... existing sync pipeline code\n}\n```\n\nIn run_ingest() (src/cli/commands/ingest.rs:107), same pattern:\n\n```rust\npub async fn run_ingest(...) -> Result {\n let run_id = &uuid::Uuid::new_v4().to_string()[..8];\n let _root = tracing::info_span!(\"ingest\", %run_id, resource_type).entered();\n // ... existing ingest code\n}\n```\n\nCRITICAL: The _root guard must live for the entire function scope. If it drops early (e.g., shadowed or moved into a block), child spans lose their parent context. Use let _root (underscore prefix) to signal intentional unused binding that's kept alive for its Drop impl.\n\nFor async functions, use .entered() NOT .enter(). In async Rust, Span::enter() returns a guard that is NOT Send, which prevents the future from being sent across threads. However, .entered() on an info_span! creates an Entered which is also !Send. For async, prefer:\n\n```rust\nlet root_span = tracing::info_span!(\"sync\", %run_id);\nasync move {\n // ... body\n}.instrument(root_span).await\n```\n\nOr use #[instrument] on the function itself with the run_id field.\n\n## Acceptance Criteria\n- [ ] Root span established for every sync and ingest invocation\n- [ ] run_id appears in span context of all child log lines\n- [ ] jq 'select(.spans[]? | .run_id)' can extract all lines from a run\n- [ ] Span is active for entire function duration (not dropped early)\n- [ ] Works correctly with async/await (span propagated across .await points)\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/cli/commands/sync.rs (add root span in run_sync, line ~54)\n- src/cli/commands/ingest.rs (add root span in run_ingest, line ~107)\n\n## TDD Loop\nRED: test_root_span_propagates_run_id (capture JSON log output, verify run_id in span context)\nGREEN: Add root spans to run_sync and run_ingest\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- Async span propagation: .entered() is !Send. For async functions, use .instrument() or #[instrument]. The run_sync function is async (line 54: pub async fn run_sync).\n- Nested command calls: run_sync calls run_ingest internally. If both create root spans, we get a nested hierarchy: sync > ingest. This is correct behavior -- the ingest span becomes a child of sync.\n- Span storage: tracing-subscriber registry handles span storage automatically. No manual setup needed beyond adding the layer.","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:54:07.771605Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:19:33.006274Z","closed_at":"2026-02-04T17:19:33.006227Z","close_reason":"Added root tracing spans with run_id to run_sync() and run_ingest() using .instrument() pattern for async compatibility","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-pgdw","depends_on_id":"bd-2ni","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-pgdw","depends_on_id":"bd-37qw","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} @@ -298,18 +315,21 @@ {"id":"bd-sqw","title":"Add Resource Events API endpoints to GitLab client","description":"## Background\nNeed paginated fetching of state/label/milestone events per entity from GitLab Resource Events APIs. The existing client uses reqwest with rate limiting and has stream_issues/stream_merge_requests patterns for paginated endpoints. However, resource events are per-entity (not project-wide), so they should return Vec rather than use streaming.\n\nExisting pagination pattern in client.rs: follow Link headers with per_page=100.\n\n## Approach\nAdd to src/gitlab/client.rs a generic helper and 6 endpoint methods:\n\n1. Generic paginated fetch helper (if not already present):\n```rust\nasync fn fetch_all_pages(&self, url: &str) -> Result> {\n let mut results = Vec::new();\n let mut next_url = Some(url.to_string());\n while let Some(current_url) = next_url {\n self.rate_limiter.lock().unwrap().wait();\n let resp = self.client.get(¤t_url)\n .header(\"PRIVATE-TOKEN\", &self.token)\n .query(&[(\"per_page\", \"100\")])\n .send().await?;\n // ... parse Link header for next page\n let page: Vec = resp.json().await?;\n results.extend(page);\n next_url = parse_next_link(&resp_headers);\n }\n Ok(results)\n}\n```\n\n2. Six endpoint methods:\n```rust\npub async fn fetch_issue_state_events(&self, project_id: i64, iid: i64) -> Result>\npub async fn fetch_issue_label_events(&self, project_id: i64, iid: i64) -> Result>\npub async fn fetch_issue_milestone_events(&self, project_id: i64, iid: i64) -> Result>\npub async fn fetch_mr_state_events(&self, project_id: i64, iid: i64) -> Result>\npub async fn fetch_mr_label_events(&self, project_id: i64, iid: i64) -> Result>\npub async fn fetch_mr_milestone_events(&self, project_id: i64, iid: i64) -> Result>\n```\n\nURL patterns:\n- Issues: `/api/v4/projects/{project_id}/issues/{iid}/resource_{type}_events`\n- MRs: `/api/v4/projects/{project_id}/merge_requests/{iid}/resource_{type}_events`\n\n3. Consider a convenience method that fetches all 3 event types for an entity in one call:\n```rust\npub async fn fetch_all_resource_events(&self, project_id: i64, entity_type: &str, iid: i64) \n -> Result<(Vec, Vec, Vec)>\n```\n\n## Acceptance Criteria\n- [ ] All 6 endpoints construct correct URLs\n- [ ] Pagination follows Link headers (handles entities with >100 events)\n- [ ] Rate limiter respected for each page request\n- [ ] 404 returns GitLabNotFound error (entity may have been deleted)\n- [ ] Network errors wrapped in GitLabNetworkError\n- [ ] Types from bd-2fm used for deserialization\n\n## Files\n- src/gitlab/client.rs (add methods + optionally generic helper)\n\n## TDD Loop\nRED: Add to tests/gitlab_client_tests.rs (or new file):\n- `test_fetch_issue_state_events_url` - verify URL construction (mock or inspect)\n- `test_fetch_mr_label_events_url` - verify URL construction\n- Note: Full integration tests require a mock HTTP server (mockito or wiremock). If the project doesn't already have one, write URL-construction unit tests only.\n\nGREEN: Implement the 6 methods using the generic helper\n\nVERIFY: `cargo test gitlab_client -- --nocapture && cargo build`\n\n## Edge Cases\n- project_id here is the GitLab project ID (not local DB id) — callers must pass gitlab_project_id\n- Empty results (new entity with no events) should return Ok(Vec::new()), not error\n- GitLab returns 403 for projects where Resource Events API is disabled — map to appropriate error\n- Very old entities may have thousands of events — pagination is essential\n- Rate limiter must be called per-page, not per-entity","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:31:24.137296Z","created_by":"tayloreernisse","updated_at":"2026-02-03T16:19:18.432602Z","closed_at":"2026-02-03T16:19:18.432559Z","close_reason":"Added fetch_all_pages generic paginator, 6 per-entity endpoint methods (state/label/milestone for issues and MRs), and fetch_all_resource_events convenience method in src/gitlab/client.rs.","compaction_level":0,"original_size":0,"labels":["api","gate-1","phase-b"],"dependencies":[{"issue_id":"bd-sqw","depends_on_id":"bd-2fm","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-sqw","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-tfh3","title":"WHO: Comprehensive test suite","description":"## Background\n\n20+ tests covering mode resolution, path query construction, SQL queries, and edge cases. All tests use in-memory SQLite with run_migrations().\n\n## Approach\n\n### Test helpers (shared across all tests):\n```rust\nfn setup_test_db() -> Connection {\n let conn = create_connection(Path::new(\":memory:\")).unwrap();\n run_migrations(&conn).unwrap();\n conn\n}\nfn insert_project(conn, id, path) // gitlab_project_id=id*100, web_url from path\nfn insert_mr(conn, id, project_id, iid, author, state) // gitlab_id=id*10, timestamps=now_ms()\nfn insert_issue(conn, id, project_id, iid, author) // state='opened'\nfn insert_discussion(conn, id, project_id, mr_id, issue_id, resolvable, resolved)\n#[allow(clippy::too_many_arguments)]\nfn insert_diffnote(conn, id, discussion_id, project_id, author, file_path, body)\nfn insert_assignee(conn, issue_id, username)\nfn insert_reviewer(conn, mr_id, username)\n```\n\n### Test list with key assertions:\n\n**Mode resolution:**\n- test_is_file_path_discrimination: src/auth/ -> Expert, asmith -> Workload, @asmith -> Workload, asmith+--reviews -> Reviews, --path README.md -> Expert, --path Makefile -> Expert\n\n**Path queries:**\n- test_build_path_query: trailing/ -> prefix, no-dot-no-slash -> prefix, file.ext -> exact, root.md -> exact, .github/workflows/ -> prefix, v1.2/auth/ -> prefix, test_files/ -> escaped prefix\n- test_build_path_query_exact_does_not_escape: README_with_underscore.md -> raw (no \\\\_)\n- test_path_flag_dotless_root_file_is_exact: Makefile -> exact, Dockerfile -> exact\n- test_build_path_query_dotless_subdir_file_uses_db_probe: src/Dockerfile with DB data -> exact; without -> prefix\n- test_build_path_query_probe_is_project_scoped: data in proj 1, unscoped -> exact; scoped proj 2 -> prefix; scoped proj 1 -> exact\n- test_escape_like: normal->normal, has_underscore->has\\\\_underscore, has%percent->has\\\\%percent\n- test_normalize_repo_path: ./src/ -> src/, /src/ -> src/, ././src -> src, backslash conversion, // collapse, whitespace trim\n\n**Queries:**\n- test_expert_query: 3 experts ranked correctly, reviewer_b first\n- test_expert_excludes_self_review_notes: author_a review_mr_count=0, author_mr_count>0\n- test_expert_truncation: limit=2 truncated=true len=2; limit=10 truncated=false\n- test_workload_query: assigned_issues.len()=1, authored_mrs.len()=1\n- test_reviews_query: total=3, categorized=2, categories.len()=2\n- test_normalize_review_prefix: suggestion/Suggestion:/nit/nitpick/non-blocking/TODO\n- test_active_query: total=1, discussions.len()=1, note_count=2 (NOT 1), discussion_id>0\n- test_active_participants_sorted: [\"alpha_user\", \"zebra_user\"]\n- test_overlap_dual_roles: A+R role, both touch counts >0, mr_refs contain project path\n- test_overlap_multi_project_mr_refs: team/backend!100 AND team/frontend!100 present\n- test_overlap_excludes_self_review_notes: review_touch_count=0\n- test_lookup_project_path: round-trip \"team/backend\"\n\n## Files\n\n- `src/cli/commands/who.rs` (inside #[cfg(test)] mod tests)\n\n## TDD Loop\n\nTests are written alongside each query bead (RED phase). This bead tracks the full test suite as a verification gate.\nVERIFY: `cargo test -- who`\n\n## Acceptance Criteria\n\n- [ ] All 20+ tests pass\n- [ ] cargo test -- who shows 0 failures\n- [ ] No clippy warnings from test code (use #[allow(clippy::too_many_arguments)] on insert_diffnote)\n\n## Edge Cases\n\n- In-memory DB includes migration 017 (indexes created but no real data perf benefit)\n- Test timestamps use now_ms() — tests are time-independent (since_ms=0 in most queries)\n- insert_mr uses gitlab_id=id*10 to avoid conflicts","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:41:25.839065Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.601284Z","closed_at":"2026-02-08T04:10:29.601248Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-tfh3","depends_on_id":"bd-1rdi","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-tfh3","depends_on_id":"bd-2711","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-tfh3","depends_on_id":"bd-3mj2","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-tfh3","depends_on_id":"bd-b51e","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-tfh3","depends_on_id":"bd-m7k1","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-tfh3","depends_on_id":"bd-s3rc","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-tfh3","depends_on_id":"bd-zqpf","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-tir","title":"Implement generic dependent fetch queue (enqueue + drain)","description":"## Background\nThe pending_dependent_fetches table (migration 011) provides a generic job queue for all dependent resource fetches across Gates 1, 2, and 4. This module implements the queue operations: enqueue, claim, complete, fail, and stale lock reclamation. It generalizes the existing discussion_queue.rs pattern.\n\n## Approach\nCreate src/core/dependent_queue.rs with:\n\n```rust\nuse rusqlite::Connection;\nuse super::error::Result;\n\n/// A pending job from the dependent fetch queue.\npub struct PendingJob {\n pub id: i64,\n pub project_id: i64,\n pub entity_type: String, // \"issue\" | \"merge_request\"\n pub entity_iid: i64,\n pub entity_local_id: i64,\n pub job_type: String, // \"resource_events\" | \"mr_closes_issues\" | \"mr_diffs\"\n pub payload_json: Option,\n pub attempts: i32,\n}\n\n/// Enqueue a dependent fetch job. Idempotent via UNIQUE constraint (INSERT OR IGNORE).\npub fn enqueue_job(\n conn: &Connection,\n project_id: i64,\n entity_type: &str,\n entity_iid: i64,\n entity_local_id: i64,\n job_type: &str,\n payload_json: Option<&str>,\n) -> Result // returns true if actually inserted (not deduped)\n\n/// Claim a batch of jobs for processing. Atomically sets locked_at.\n/// Only claims jobs where locked_at IS NULL AND (next_retry_at IS NULL OR next_retry_at <= now).\npub fn claim_jobs(\n conn: &Connection,\n job_type: &str,\n batch_size: usize,\n) -> Result>\n\n/// Mark a job as complete (DELETE the row).\npub fn complete_job(conn: &Connection, job_id: i64) -> Result<()>\n\n/// Mark a job as failed. Increment attempts, set next_retry_at with exponential backoff, clear locked_at.\n/// Backoff: 30s * 2^(attempts-1), capped at 480s.\npub fn fail_job(conn: &Connection, job_id: i64, error: &str) -> Result<()>\n\n/// Reclaim stale locks (locked_at older than threshold).\n/// Returns count of reclaimed jobs.\npub fn reclaim_stale_locks(conn: &Connection, stale_threshold_minutes: u32) -> Result\n\n/// Count pending jobs by job_type (for stats/progress).\npub fn count_pending_jobs(conn: &Connection) -> Result>\n```\n\nRegister in src/core/mod.rs: `pub mod dependent_queue;`\n\n**Key implementation details:**\n- claim_jobs uses a two-step approach: SELECT ids WHERE available, then UPDATE SET locked_at for those ids. Use a single transaction.\n- enqueued_at = current time in ms epoch UTC\n- locked_at = current time in ms epoch UTC when claimed\n- Backoff formula: next_retry_at = now + min(30_000 * 2^(attempts-1), 480_000) ms\n\n## Acceptance Criteria\n- [ ] enqueue_job is idempotent (INSERT OR IGNORE on UNIQUE constraint)\n- [ ] enqueue_job returns true on insert, false on dedup\n- [ ] claim_jobs only claims unlocked, non-retrying jobs\n- [ ] claim_jobs respects batch_size limit\n- [ ] complete_job DELETEs the row\n- [ ] fail_job increments attempts, sets next_retry_at, clears locked_at, records last_error\n- [ ] Backoff: 30s, 60s, 120s, 240s, 480s (capped)\n- [ ] reclaim_stale_locks clears locked_at for jobs older than threshold\n- [ ] count_pending_jobs returns accurate counts by job_type\n\n## Files\n- src/core/dependent_queue.rs (new)\n- src/core/mod.rs (add `pub mod dependent_queue;`)\n\n## TDD Loop\nRED: tests/dependent_queue_tests.rs (new):\n- `test_enqueue_job_basic` - enqueue a job, verify it exists\n- `test_enqueue_job_idempotent` - enqueue same job twice, verify single row\n- `test_claim_jobs_batch` - enqueue 5, claim 3, verify 3 returned and locked\n- `test_claim_jobs_skips_locked` - lock a job, claim again, verify it's skipped\n- `test_claim_jobs_respects_retry_at` - set next_retry_at in future, verify skipped\n- `test_claim_jobs_includes_retryable` - set next_retry_at in past, verify claimed\n- `test_complete_job_deletes` - complete a job, verify gone\n- `test_fail_job_backoff` - fail 3 times, verify exponential next_retry_at values\n- `test_reclaim_stale_locks` - set old locked_at, reclaim, verify cleared\n\nSetup: create_test_db() with migrations 001-011, seed project + issue.\n\nGREEN: Implement all functions\n\nVERIFY: `cargo test dependent_queue -- --nocapture`\n\n## Edge Cases\n- claim_jobs with batch_size=0 should return empty vec (not error)\n- enqueue_job with invalid job_type will be rejected by CHECK constraint — map rusqlite error to LoreError\n- fail_job on a non-existent job_id should be a no-op (job may have been completed by another path)\n- reclaim_stale_locks with 0 threshold would reclaim everything — ensure threshold is reasonable (minimum 1 min)\n- Timestamps must use consistent ms epoch UTC (not seconds)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:31:57.290181Z","created_by":"tayloreernisse","updated_at":"2026-02-03T16:19:14.222626Z","closed_at":"2026-02-03T16:19:14.222579Z","close_reason":"Implemented PendingJob struct, enqueue_job, claim_jobs, complete_job, fail_job (with exponential backoff), reclaim_stale_locks, count_pending_jobs in src/core/dependent_queue.rs.","compaction_level":0,"original_size":0,"labels":["gate-1","phase-b","queue"],"dependencies":[{"issue_id":"bd-tir","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-tir","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} -{"id":"bd-u7se","title":"Implement Who screen (5 modes: expert/workload/reviews/active/overlap)","description":"## Background\nThe Who screen is the people explorer, showing contributor expertise and workload across 5 modes. Each mode renders differently: Expert shows file-path expertise scores, Workload shows issue/MR assignment counts, Reviews shows review activity, Active shows recent contributors, Overlap shows shared file knowledge.\n\n## Approach\nState (state/who.rs):\n- WhoState: mode (WhoMode), results (WhoResult), path (String), path_input (TextInput), path_focused (bool), selected_index (usize)\n- WhoMode: Expert, Workload, Reviews, Active, Overlap\n- WhoResult: variant per mode with different data shapes\n\nAction (action.rs):\n- fetch_who(conn, mode, path, limit) -> Result: dispatches to existing who query functions in lore CLI (query_experts, query_workload, etc.)\n\nView (view/who.rs):\n- Mode tabs at top: E(xpert) | W(orkload) | R(eviews) | A(ctive) | O(verlap)\n- Expert: path input + sorted table of authors by expertise score + bar chart\n- Workload: stacked bar chart of open issues/MRs per person\n- Reviews: table of review counts (given/received) per person\n- Active: time-sorted list of recent contributors\n- Overlap: matrix or pair-wise table showing shared file knowledge\n- Keyboard: 1-5 or Tab to switch modes, j/k scroll, / focus path input\n\n## Acceptance Criteria\n- [ ] 5 modes switchable via Tab or number keys\n- [ ] Expert mode: path input filters by file path, shows expertise scores\n- [ ] Workload mode: shows assignment counts per person\n- [ ] Reviews mode: shows review activity counts\n- [ ] Active mode: shows recent contributors sorted by activity\n- [ ] Overlap mode: shows shared knowledge between contributors\n- [ ] Each mode renders appropriate visualization\n- [ ] Enter on a person navigates to their issues (scoped issue list)\n\n## Files\n- MODIFY: crates/lore-tui/src/state/who.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_who)\n- CREATE: crates/lore-tui/src/view/who.rs\n\n## TDD Anchor\nRED: Write test_fetch_who_expert that creates notes with diff paths, calls fetch_who(Expert, \"src/\"), asserts authors sorted by expertise score.\nGREEN: Implement fetch_who dispatching to existing who queries.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_who\n\n## Edge Cases\n- Empty results for a mode: show \"No data for this mode\" message\n- Expert mode with no diff notes: explain that expert data requires diff notes to be synced\n- Very long file paths in Expert mode: truncate from left (show ...path/to/file.rs)\n\n## Dependency Context\nUses existing who query functions from src/cli/commands/who.rs (made pub).\nUses WhoState from \"Implement AppState composition\" task.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:22.734056Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.085483Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-u7se","depends_on_id":"bd-29qw","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-u7se","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-tiux","title":"Add sync_runs migration 027 for surgical mode columns","description":"## Background\nThe `sync_runs` table (created in migration 001, enriched in 014) tracks sync run lifecycle for observability and crash recovery. Surgical sync needs additional columns to track its distinct mode, phase progression, IID targeting, and per-stage counters. This is a schema-only change — no Rust struct changes beyond registering the migration SQL file.\n\nThe migration system uses a `MIGRATIONS` array in `src/core/db.rs`. Each entry is a `(version, sql_file_name)` tuple. SQL files live in `src/core/migrations/`. The current latest migration is 026 (`026_scoring_indexes.sql`), so this will be migration 027. `LATEST_SCHEMA_VERSION` is computed as `MIGRATIONS.len() as i32` and automatically becomes 27.\n\n## Approach\n\n### Step 1: Create migration SQL file: `src/core/migrations/027_surgical_sync_runs.sql`\n\n```sql\n-- Migration 027: Extend sync_runs for surgical sync observability\n-- Adds mode/phase tracking and surgical-specific counters.\n\nALTER TABLE sync_runs ADD COLUMN mode TEXT;\nALTER TABLE sync_runs ADD COLUMN phase TEXT;\nALTER TABLE sync_runs ADD COLUMN surgical_iids_json TEXT;\nALTER TABLE sync_runs ADD COLUMN issues_fetched INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN mrs_fetched INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN issues_ingested INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN mrs_ingested INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN skipped_stale INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN docs_regenerated INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN docs_embedded INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN warnings_count INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN cancelled_at INTEGER;\n\nCREATE INDEX IF NOT EXISTS idx_sync_runs_mode_started\n ON sync_runs(mode, started_at DESC);\nCREATE INDEX IF NOT EXISTS idx_sync_runs_status_phase_started\n ON sync_runs(status, phase, started_at DESC);\n```\n\n**Column semantics:**\n- `mode`: \"standard\" or \"surgical\" (NULL for pre-migration rows)\n- `phase`: preflight, ingest, dependents, docs, embed, done, failed, cancelled\n- `surgical_iids_json`: JSON like `{\"issues\":[7,8],\"mrs\":[101]}`\n- Counter columns: integers with DEFAULT 0 for backward compat\n- `cancelled_at`: ms-epoch timestamp, NULL unless cancelled\n\n### Step 2: Register in MIGRATIONS array (src/core/db.rs)\n\nAdd to the `MIGRATIONS` array (currently 26 entries ending with `026_scoring_indexes.sql`):\n\n```rust\n(27, include_str!(\"migrations/027_surgical_sync_runs.sql\")),\n```\n\n## Acceptance Criteria\n- [ ] File `src/core/migrations/027_surgical_sync_runs.sql` exists with all ALTER TABLE and CREATE INDEX statements\n- [ ] Migration 027 is registered in MIGRATIONS array in `src/core/db.rs`\n- [ ] `LATEST_SCHEMA_VERSION` evaluates to 27\n- [ ] Migration runs successfully on fresh databases (in-memory test)\n- [ ] Pre-existing sync_runs rows are unaffected (NULL mode/phase, 0 counters)\n- [ ] New columns accept expected values via INSERT and SELECT round-trip\n- [ ] NULL defaults work for mode, phase, surgical_iids_json, cancelled_at\n- [ ] DEFAULT 0 works for all counter columns\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo test` passes (all migration tests use in-memory DB)\n\n## Files\n- CREATE: src/core/migrations/027_surgical_sync_runs.sql\n- MODIFY: src/core/db.rs (add entry to MIGRATIONS array)\n\n## TDD Anchor\nRED: Write tests in `src/core/sync_run_tests.rs` (which is already `#[path]`-included from `sync_run.rs`):\n\n```rust\n#[test]\nfn sync_run_surgical_columns_exist() {\n let conn = setup_test_db();\n conn.execute(\n \"INSERT INTO sync_runs (started_at, heartbeat_at, status, command, mode, phase, surgical_iids_json)\n VALUES (1000, 1000, 'running', 'sync', 'surgical', 'preflight', '{\\\"issues\\\":[7],\\\"mrs\\\":[]}')\",\n [],\n ).unwrap();\n let (mode, phase, iids_json): (String, String, String) = conn.query_row(\n \"SELECT mode, phase, surgical_iids_json FROM sync_runs WHERE mode = 'surgical'\",\n [],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?)),\n ).unwrap();\n assert_eq!(mode, \"surgical\");\n assert_eq!(phase, \"preflight\");\n assert!(iids_json.contains(\"7\"));\n}\n\n#[test]\nfn sync_run_counter_defaults_are_zero() {\n let conn = setup_test_db();\n conn.execute(\n \"INSERT INTO sync_runs (started_at, heartbeat_at, status, command)\n VALUES (2000, 2000, 'running', 'sync')\",\n [],\n ).unwrap();\n let row_id = conn.last_insert_rowid();\n let (issues_fetched, mrs_fetched, docs_regenerated, warnings_count): (i64, i64, i64, i64) = conn.query_row(\n \"SELECT issues_fetched, mrs_fetched, docs_regenerated, warnings_count FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?, r.get(3)?)),\n ).unwrap();\n assert_eq!(issues_fetched, 0);\n assert_eq!(mrs_fetched, 0);\n assert_eq!(docs_regenerated, 0);\n assert_eq!(warnings_count, 0);\n}\n\n#[test]\nfn sync_run_nullable_columns_default_to_null() {\n let conn = setup_test_db();\n conn.execute(\n \"INSERT INTO sync_runs (started_at, heartbeat_at, status, command)\n VALUES (3000, 3000, 'running', 'sync')\",\n [],\n ).unwrap();\n let row_id = conn.last_insert_rowid();\n let (mode, phase, cancelled_at): (Option, Option, Option) = conn.query_row(\n \"SELECT mode, phase, cancelled_at FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?)),\n ).unwrap();\n assert!(mode.is_none());\n assert!(phase.is_none());\n assert!(cancelled_at.is_none());\n}\n\n#[test]\nfn sync_run_counter_round_trip() {\n let conn = setup_test_db();\n conn.execute(\n \"INSERT INTO sync_runs (started_at, heartbeat_at, status, command, mode, issues_fetched, mrs_ingested, docs_embedded)\n VALUES (4000, 4000, 'succeeded', 'sync', 'surgical', 3, 2, 5)\",\n [],\n ).unwrap();\n let row_id = conn.last_insert_rowid();\n let (issues_fetched, mrs_ingested, docs_embedded): (i64, i64, i64) = conn.query_row(\n \"SELECT issues_fetched, mrs_ingested, docs_embedded FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?)),\n ).unwrap();\n assert_eq!(issues_fetched, 3);\n assert_eq!(mrs_ingested, 2);\n assert_eq!(docs_embedded, 5);\n}\n```\n\nGREEN: Create the SQL file and register the migration.\nVERIFY: `cargo test sync_run_surgical && cargo test sync_run_counter && cargo test sync_run_nullable`\n\n## Edge Cases\n- SQLite ALTER TABLE ADD COLUMN requires DEFAULT for NOT NULL columns. All counter columns use `DEFAULT 0`.\n- mode/phase/surgical_iids_json/cancelled_at are nullable TEXT/INTEGER — no DEFAULT needed.\n- Pre-migration rows get NULL for new nullable columns and 0 for counter columns — backward compatible.\n- The indexes (`idx_sync_runs_mode_started`, `idx_sync_runs_status_phase_started`) use `IF NOT EXISTS` for idempotency.\n\n## Dependency Context\nThis is a leaf/foundation bead with no upstream dependencies. Downstream bead bd-arka (SyncRunRecorder extensions) depends on these columns existing to write surgical mode lifecycle data.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:13:19.914672Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:03:28.195017Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-tiux","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-tiux","depends_on_id":"bd-arka","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} +{"id":"bd-u7se","title":"Implement Who screen (5 modes: expert/workload/reviews/active/overlap)","description":"## Background\nThe Who screen is the people explorer, showing contributor expertise and workload across 5 modes. Each mode renders differently: Expert shows file-path expertise scores, Workload shows issue/MR assignment counts, Reviews shows review activity, Active shows recent contributors, Overlap shows shared file knowledge.\n\nOn master, the who command was refactored from a single who.rs into src/cli/commands/who/ module with types.rs, expert.rs, workload.rs, reviews.rs, active.rs, overlap.rs. Types are cleanly separated in types.rs. Query functions are currently pub(super) — bd-1f5b promotes them to pub and moves types to core.\n\n## Data Shapes (from src/cli/commands/who/types.rs on master)\n\nResult types are per-mode:\n- WhoResult enum: Expert(ExpertResult), Workload(WorkloadResult), Reviews(ReviewsResult), Active(ActiveResult), Overlap(OverlapResult)\n- ExpertResult: path_query, path_match, experts Vec, truncated — Expert has username, score, components, mr_refs, details\n- WorkloadResult: username, assigned_issues, authored_mrs, reviewing_mrs, unresolved_discussions (each with truncated flag)\n- ReviewsResult: username, total_diffnotes, categorized_count, mrs_reviewed, categories Vec\n- ActiveResult: discussions Vec, total_unresolved_in_window, truncated\n- OverlapResult: path_query, path_match, users Vec, truncated\n\nAfter bd-1f5b, these live in src/core/who_types.rs.\n\n## Query Function Signatures (after bd-1f5b promotes visibility)\n\n```rust\n// expert.rs — path-based file expertise\npub fn query_expert(conn, path, project_id, since_ms, as_of_ms, limit, scoring: &ScoringConfig, detail, explain_score, include_bots) -> Result\n\n// workload.rs — username-based assignment view\npub fn query_workload(conn, username, project_id, since_ms: Option, limit, include_closed: bool) -> Result\n\n// reviews.rs — username-based review activity\npub fn query_reviews(conn, username, project_id, since_ms) -> Result\n\n// active.rs — recent unresolved discussions\npub fn query_active(conn, project_id, since_ms, limit, include_closed: bool) -> Result\n\n// overlap.rs — shared file knowledge between contributors\npub fn query_overlap(conn, path, project_id, since_ms, limit) -> Result\n```\n\nNote: include_closed only affects query_workload and query_active. Expert, Reviews, and Overlap ignore it.\n\n## Approach\n\n**State** (state/who.rs):\n- WhoState: mode (WhoMode), result (Option), path (String), path_input (TextInput), username_input (TextInput), path_focused (bool), username_focused (bool), selected_index (usize), include_closed (bool), scroll_offset (u16)\n- WhoMode enum: Expert, Workload, Reviews, Active, Overlap\n- Expert and Overlap modes need a path input. Workload and Reviews need a username input. Active needs neither.\n\n**Action** (action.rs):\n- fetch_who_expert(conn, path, project_id, since_ms, limit, scoring) -> Result\n- fetch_who_workload(conn, username, project_id, since_ms, limit, include_closed) -> Result\n- fetch_who_reviews(conn, username, project_id, since_ms) -> Result\n- fetch_who_active(conn, project_id, since_ms, limit, include_closed) -> Result\n- fetch_who_overlap(conn, path, project_id, since_ms, limit) -> Result\nEach wraps the corresponding query_* function from who module.\n\n**View** (view/who.rs):\n- Mode tabs at top: E(xpert) | W(orkload) | R(eviews) | A(ctive) | O(verlap)\n- Input area adapts to mode: path input for Expert/Overlap, username input for Workload/Reviews, hidden for Active\n- Expert: sorted table of authors by expertise score + bar chart\n- Workload: sections for assigned issues, authored MRs, reviewing MRs, unresolved discussions\n- Reviews: table of review categories with counts and percentages\n- Active: time-sorted list of recent unresolved discussions with participants\n- Overlap: table of users with author/review touch counts\n- Keyboard: 1-5 or Tab to switch modes, j/k scroll, / focus input, c toggle include-closed, q back\n- Status bar indicator shows [closed: on/off] when include_closed is toggled\n- Truncation indicators: when result.truncated is true, show \"showing N of more\" footer\n\n## Acceptance Criteria\n- [ ] 5 modes switchable via Tab or number keys\n- [ ] Expert mode: path input filters by file path, shows expertise scores in table with bar chart\n- [ ] Workload mode: username input, shows 4 sections (assigned issues, authored MRs, reviewing MRs, unresolved discussions)\n- [ ] Reviews mode: username input, shows review category breakdown table\n- [ ] Active mode: no input needed, shows recent unresolved discussions sorted by last_note_at\n- [ ] Overlap mode: path input, shows table of users with touch counts\n- [ ] Toggle for include-closed (c key) with visual indicator — re-fetches only Workload and Active modes\n- [ ] Truncation footer when results exceed limit\n- [ ] Enter on a person in Expert/Overlap navigates to Workload for that username\n- [ ] Enter on an entity in Workload/Active navigates to IssueDetail or MrDetail\n\n## Files\n- MODIFY: crates/lore-tui/src/state/who.rs (expand from current 12-line stub)\n- MODIFY: crates/lore-tui/src/state/mod.rs (update WhoState import, add to has_text_focus/blur_text_focus)\n- MODIFY: crates/lore-tui/src/message.rs (replace placeholder WhoResult with import from core, add WhoMode enum, add Msg::WhoModeChanged, Msg::WhoIncludeClosedToggled)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_who_* functions)\n- CREATE: crates/lore-tui/src/view/who.rs\n- MODIFY: crates/lore-tui/src/view/mod.rs (add who view dispatch)\n\n## TDD Anchor\nRED: Write test_fetch_who_expert_returns_result that opens in-memory DB, inserts test MR + file changes + notes, calls fetch_who_expert(\"src/\"), asserts ExpertResult with one expert.\nGREEN: Implement fetch_who_expert calling query_expert from who module.\nVERIFY: cargo test -p lore-tui who -- --nocapture\n\nAdditional tests:\n- test_who_mode_switching: cycle through 5 modes, assert input field visibility changes\n- test_include_closed_only_affects_workload_active: toggle include_closed, verify Expert/Reviews/Overlap dont re-fetch\n- test_who_empty_result: mode with no data shows empty state message\n- test_who_truncation_indicator: result with truncated=true shows footer\n\n## Edge Cases\n- Empty results for any mode: show \"No data\" message with mode-specific hint\n- Expert mode with no diff notes: explain that expert data requires diff notes to be synced\n- Very long file paths: truncate from left (show ...path/to/file.rs)\n- include_closed toggle re-fetches immediately for Workload/Active, no-op for other modes\n- Workload unresolved_discussions may reference closed entities — include_closed=true shows them\n- ScoringConfig accessed from Config (available to TUI via db.rs module)\n\n## Dependency Context\n- bd-1f5b (blocks): Promotes query_expert, query_workload, query_reviews, query_active, query_overlap to pub and moves types to src/core/who_types.rs. Without this, TUI cannot call who queries.\n- Current WhoState stub (12 lines) in state/who.rs references message::WhoResult placeholder — must be replaced with core types.\n- AppState.has_text_focus() in state/mod.rs:194-198 must be updated to include who path_focused and username_focused.\n- AppState.blur_text_focus() in state/mod.rs:202-206 must be updated similarly.\n- Navigation from Expert/Overlap rows: Enter on a username should push Screen::Who with mode=Workload pre-filled — requires passing username to WhoState.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:22.734056Z","created_by":"tayloreernisse","updated_at":"2026-02-18T18:32:30.621517Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-u7se","depends_on_id":"bd-29qw","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-u7se","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-v6i","title":"[CP1] gi ingest --type=issues command","description":"## Background\n\nThe `gi ingest --type=issues` command is the main entry point for issue ingestion. It acquires a single-flight lock, calls the orchestrator for each configured project, and outputs progress/summary to the user.\n\n## Approach\n\n### Module: src/cli/commands/ingest.rs\n\n### Clap Definition\n\n```rust\n#[derive(Args)]\npub struct IngestArgs {\n /// Resource type to ingest\n #[arg(long, value_parser = [\"issues\", \"merge_requests\"])]\n pub r#type: String,\n\n /// Filter to single project\n #[arg(long)]\n pub project: Option,\n\n /// Override stale sync lock\n #[arg(long)]\n pub force: bool,\n}\n```\n\n### Handler Function\n\n```rust\npub async fn handle_ingest(args: IngestArgs, config: &Config) -> Result<()>\n```\n\n### Logic\n\n1. **Acquire single-flight lock**: `acquire_sync_lock(conn, args.force)?`\n2. **Get projects to sync**:\n - If `args.project` specified, filter to that one\n - Otherwise, get all configured projects from DB\n3. **For each project**:\n - Print \"Ingesting issues for {project_path}...\"\n - Call `ingest_project_issues(conn, client, config, project_id, gitlab_project_id)`\n - Print \"{N} issues fetched, {M} new labels\"\n4. **Print discussion sync summary**:\n - \"Fetching discussions ({N} issues with updates)...\"\n - \"{N} discussions, {M} notes (excluding {K} system notes)\"\n - \"Skipped discussion sync for {N} unchanged issues.\"\n5. **Release lock**: Lock auto-released when handler returns\n\n### Output Format (matches PRD)\n\n```\nIngesting issues...\n\n group/project-one: 1,234 issues fetched, 45 new labels\n\nFetching discussions (312 issues with updates)...\n\n group/project-one: 312 issues → 1,234 discussions, 5,678 notes\n\nTotal: 1,234 issues, 1,234 discussions, 5,678 notes (excluding 1,234 system notes)\nSkipped discussion sync for 922 unchanged issues.\n```\n\n## Acceptance Criteria\n\n- [ ] Clap args parse --type, --project, --force correctly\n- [ ] Single-flight lock acquired before sync starts\n- [ ] Lock error message is clear if concurrent run attempted\n- [ ] Progress output shows per-project counts\n- [ ] Summary includes unchanged issues skipped count\n- [ ] --force flag allows overriding stale lock\n\n## Files\n\n- src/cli/commands/mod.rs (add `pub mod ingest;`)\n- src/cli/commands/ingest.rs (create)\n- src/cli/mod.rs (add Ingest variant to Commands enum)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/cli_ingest_tests.rs\n#[tokio::test] async fn ingest_issues_acquires_lock()\n#[tokio::test] async fn ingest_issues_fails_on_concurrent_run()\n#[tokio::test] async fn ingest_issues_respects_project_filter()\n#[tokio::test] async fn ingest_issues_force_overrides_stale_lock()\n```\n\nGREEN: Implement handler with lock and orchestrator calls\n\nVERIFY: `cargo test cli_ingest`\n\n## Edge Cases\n\n- No projects configured - return early with helpful message\n- Project filter matches nothing - error with \"project not found\"\n- Lock already held - clear error \"Sync already in progress\"\n- Ctrl-C during sync - lock should be released (via Drop or SIGINT handler)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.312565Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:56:44.090142Z","closed_at":"2026-01-25T22:56:44.090086Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-v6i","depends_on_id":"bd-ozy","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-v6tc","title":"Description","description":"This is a test","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:52:04.745618Z","updated_at":"2026-02-12T16:52:10.755235Z","closed_at":"2026-02-12T16:52:10.755188Z","close_reason":"test artifacts","compaction_level":0,"original_size":0} +{"id":"bd-wcja","title":"Extend SyncResult with surgical mode fields for robot output","description":"## Background\n\nRobot mode (`--robot`) serializes `SyncResult` as JSON for machine consumers. Currently `SyncResult` (lines 31-52 of `src/cli/commands/sync.rs`) only has fields for normal full sync. Surgical sync needs additional metadata in the JSON response: whether surgical mode was active, which IIDs were requested, per-entity outcomes, and whether it was a preflight-only run. These must be `Option` fields so normal sync serialization is unchanged (serde `skip_serializing_if = \"Option::is_none\"`).\n\n## Approach\n\nAdd four `Option` fields to the existing `SyncResult` struct:\n\n```rust\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub surgical_mode: Option,\n\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub surgical_iids: Option,\n\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub entity_results: Option>,\n\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub preflight_only: Option,\n```\n\nDefine two new supporting structs in the same file:\n\n```rust\n#[derive(Debug, Default, Serialize)]\npub struct SurgicalIids {\n pub issues: Vec,\n pub merge_requests: Vec,\n}\n\n#[derive(Debug, Serialize)]\npub struct EntitySyncResult {\n pub entity_type: String, // \"issue\" or \"merge_request\"\n pub iid: u64,\n pub outcome: String, // \"synced\", \"skipped_toctou\", \"failed\", \"not_found\"\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub error: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub toctou_reason: Option,\n}\n```\n\nBecause `SyncResult` derives `Default`, the new `Option` fields default to `None` automatically. Non-surgical callers need zero changes.\n\n## Acceptance Criteria\n\n1. `SyncResult` compiles with all four new `Option` fields\n2. `SurgicalIids` and `EntitySyncResult` are defined with `Serialize` derive\n3. Serializing a `SyncResult` with surgical fields set produces JSON with `surgical_mode`, `surgical_iids`, `entity_results`, `preflight_only` keys\n4. Serializing a default `SyncResult` (all `None`) produces JSON identical to current output (no surgical keys)\n5. `SyncResult::default()` still works without specifying new fields\n6. All existing tests pass unchanged\n\n## Files\n\n- `src/cli/commands/sync.rs` — add fields to `SyncResult`, define `SurgicalIids` and `EntitySyncResult`\n\n## TDD Anchor\n\nAdd a test module or extend the existing one in `src/cli/commands/sync.rs` (or a new `sync_tests.rs` file):\n\n```rust\n#[cfg(test)]\nmod surgical_result_tests {\n use super::*;\n\n #[test]\n fn sync_result_default_omits_surgical_fields() {\n let result = SyncResult::default();\n let json = serde_json::to_value(&result).unwrap();\n assert!(json.get(\"surgical_mode\").is_none());\n assert!(json.get(\"surgical_iids\").is_none());\n assert!(json.get(\"entity_results\").is_none());\n assert!(json.get(\"preflight_only\").is_none());\n }\n\n #[test]\n fn sync_result_with_surgical_fields_serializes_correctly() {\n let result = SyncResult {\n surgical_mode: Some(true),\n surgical_iids: Some(SurgicalIids {\n issues: vec![7, 42],\n merge_requests: vec![10],\n }),\n entity_results: Some(vec![\n EntitySyncResult {\n entity_type: \"issue\".to_string(),\n iid: 7,\n outcome: \"synced\".to_string(),\n error: None,\n toctou_reason: None,\n },\n EntitySyncResult {\n entity_type: \"issue\".to_string(),\n iid: 42,\n outcome: \"skipped_toctou\".to_string(),\n error: None,\n toctou_reason: Some(\"updated_at changed\".to_string()),\n },\n ]),\n preflight_only: Some(false),\n ..SyncResult::default()\n };\n let json = serde_json::to_value(&result).unwrap();\n assert_eq!(json[\"surgical_mode\"], true);\n assert_eq!(json[\"surgical_iids\"][\"issues\"], serde_json::json!([7, 42]));\n assert_eq!(json[\"entity_results\"].as_array().unwrap().len(), 2);\n assert_eq!(json[\"entity_results\"][1][\"outcome\"], \"skipped_toctou\");\n assert_eq!(json[\"preflight_only\"], false);\n }\n\n #[test]\n fn entity_sync_result_omits_none_fields() {\n let entity = EntitySyncResult {\n entity_type: \"merge_request\".to_string(),\n iid: 10,\n outcome: \"synced\".to_string(),\n error: None,\n toctou_reason: None,\n };\n let json = serde_json::to_value(&entity).unwrap();\n assert!(json.get(\"error\").is_none());\n assert!(json.get(\"toctou_reason\").is_none());\n assert!(json.get(\"entity_type\").is_some());\n }\n}\n```\n\n## Edge Cases\n\n- `entity_results: Some(vec![])` — empty vec serializes as `[]`, not omitted. This is correct for \"surgical mode ran but had no entities to process.\"\n- `surgical_iids` with empty vecs — valid for edge case where user passes `--issue` but all IIDs are filtered out before sync.\n- Ensure `EntitySyncResult.outcome` uses a fixed set of string values. Consider a future enum, but `String` is fine for initial implementation to keep serialization simple.\n\n## Dependency Context\n\n- **No upstream dependencies** — this bead only adds struct fields, no behavioral changes.\n- **Downstream**: bd-1i4i (orchestrator) populates these fields. bd-3bec (wiring) passes them through.\n- The `#[derive(Default)]` on `SyncResult` means all `Option` fields are `None` by default, so this is a fully additive change.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:17:03.915330Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:02:01.980946Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-wcja","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-wnuo","title":"Implement performance benchmark fixtures (S/M/L tiers)","description":"## Background\nTiered performance fixtures validate latency at three data scales. S and M tiers are CI-enforced gates; L tier is advisory. Fixtures are synthetic SQLite databases with realistic data distributions.\n\n## Approach\nFixture generator (benches/ or tests/fixtures/):\n- S-tier: 10k issues, 5k MRs, 50k notes, 10k docs\n- M-tier: 100k issues, 50k MRs, 500k notes, 50k docs\n- L-tier: 250k issues, 100k MRs, 1M notes, 100k docs\n- Realistic distributions: state (60% closed, 30% opened, 10% other), authors from pool of 50 names, labels from pool of 20, dates spanning 2 years\n\nBenchmarks:\n- p95 first-paint latency: Dashboard load, Issue List load, MR List load\n- p95 keyset pagination: next page fetch\n- p95 search latency: lexical and hybrid modes\n- Memory ceiling: RSS after full dashboard + list load\n- SLO assertions per tier (see Phase 0 criteria)\n\nRequired indexes must be present in fixture DBs:\n- idx_issues_list_default, idx_mrs_list_default, idx_discussions_entity, idx_notes_discussion\n\n## Acceptance Criteria\n- [ ] S-tier fixture generated with correct counts\n- [ ] M-tier fixture generated with correct counts\n- [ ] L-tier fixture generated (on-demand, not CI)\n- [ ] p95 first-paint < 50ms (S), < 75ms (M), < 150ms (L)\n- [ ] p95 keyset pagination < 50ms (S), < 75ms (M), < 100ms (L)\n- [ ] p95 search latency < 100ms (S), < 200ms (M), < 400ms (L)\n- [ ] Memory < 150MB RSS (S), < 250MB RSS (M)\n- [ ] All required indexes present in fixtures\n- [ ] EXPLAIN QUERY PLAN shows index usage for top 10 queries\n\n## Files\n- CREATE: crates/lore-tui/benches/perf_benchmarks.rs\n- CREATE: crates/lore-tui/tests/fixtures/generate_fixtures.rs\n\n## TDD Anchor\nRED: Write benchmark_dashboard_load_s_tier that generates S-tier fixture, measures Dashboard load time, asserts p95 < 50ms.\nGREEN: Implement fetch_dashboard with efficient queries.\nVERIFY: cargo bench --manifest-path crates/lore-tui/Cargo.toml\n\n## Edge Cases\n- Fixture generation must be deterministic (seeded RNG) for reproducible benchmarks\n- CI machines may be slower — use generous multipliers or relative thresholds\n- S-tier fits in memory; M-tier requires WAL mode for concurrent access\n- Benchmark warmup: discard first 5 iterations\n\n## Dependency Context\nUses all action.rs query functions from Phase 2/3 tasks.\nUses DbManager from \"Implement DbManager\" task.\nUses required index migrations from the main lore crate.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:05:12.867291Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:38.463811Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-wnuo","depends_on_id":"bd-1b6k","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-wnuo","depends_on_id":"bd-3eis","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-wrw1","title":"Implement CLI/TUI parity tests (counts, lists, detail, search, sanitization)","description":"## Background\nParity tests ensure the TUI and CLI show the same data. Both interfaces query the same SQLite database, but through different code paths (TUI action functions vs CLI command handlers). Drift can occur when query functions are duplicated or modified independently. These tests catch drift by running both code paths against the same in-memory DB and comparing results.\n\n## Approach\n\n### Test Strategy: Library-Level (Same Process)\nTests run in the same process with a shared in-memory SQLite DB. No binary execution, no JSON parsing, no process spawning. Both TUI action functions and CLI query functions are called as library code.\n\nSetup pattern:\n```rust\nuse lore::core::db::{create_connection, run_migrations};\nuse std::path::Path;\n\nfn setup_parity_db() -> rusqlite::Connection {\n let conn = create_connection(Path::new(\":memory:\")).unwrap();\n run_migrations(&conn).unwrap();\n insert_fixture_data(&conn); // shared fixture with known counts\n conn\n}\n```\n\n### Fixture Data\nCreate a deterministic fixture with known quantities:\n- 1 project (gitlab_project_id=1, path_with_namespace=\"group/repo\", web_url=\"https://gitlab.example.com/group/repo\")\n- 15 issues (5 opened, 5 closed, 5 with various states)\n- 10 merge_requests (3 opened, 3 merged, 2 closed, 2 draft)\n- 30 discussions (20 for issues, 10 for MRs)\n- 60 notes (2 per discussion)\n- Insert via direct SQL (same pattern as existing tests in src/core/db.rs)\n\n### Parity Checks\n\n**Dashboard Count Parity:**\n- TUI: call the dashboard fetch function that returns entity counts\n- CLI: call the same count query functions used by `lore --robot count`\n- Assert: issue_count, mr_count, discussion_count, note_count all match\n\n**Issue List Parity:**\n- TUI: call issue list action with default filter (state=all, limit=50, sort=updated_at DESC)\n- CLI: call the issue list query used by `lore --robot issues`\n- Assert: same IIDs in same order, same state values for each\n\n**MR List Parity:**\n- TUI: call MR list action with default filter\n- CLI: call the MR list query used by `lore --robot mrs`\n- Assert: same IIDs in same order, same state values, same draft flags\n\n**Issue Detail Parity:**\n- TUI: call issue detail fetch for a specific IID\n- CLI: call the issue detail query used by `lore --robot issues `\n- Assert: same metadata fields (title, state, author, labels, created_at, updated_at), same discussion count\n\n**Search Parity:**\n- TUI: call search action with a known query term\n- CLI: call the search function used by `lore --robot search`\n- Assert: same document IDs returned in same rank order\n\n**Sanitization Parity:**\n- Insert an issue with ANSI escape sequences in the title: \"Normal \\x1b[31mRED\\x1b[0m text\"\n- TUI: fetch and sanitize via terminal safety module\n- CLI: fetch and render via robot mode (which strips ANSI)\n- Assert: both produce clean output without raw escape sequences\n\n## Acceptance Criteria\n- [ ] Dashboard counts: TUI == CLI for issues, MRs, discussions, notes on shared fixture\n- [ ] Issue list: TUI returns same IIDs in same order as CLI query function\n- [ ] MR list: TUI returns same IIDs in same order as CLI query function\n- [ ] Issue detail: TUI metadata matches CLI for title, state, author, discussion count\n- [ ] Search results: same document IDs in same rank order\n- [ ] Sanitization: both strip ANSI escape sequences from issue titles\n- [ ] All tests use in-memory DB (no file I/O, no binary spawning)\n- [ ] Tests are deterministic (fixed fixture, no wall clock dependency)\n\n## Files\n- CREATE: crates/lore-tui/tests/parity_tests.rs\n\n## TDD Anchor\nRED: Write `test_dashboard_count_parity` that creates shared fixture DB, calls both TUI dashboard fetch and CLI count query functions, asserts all counts equal.\nGREEN: Ensure TUI query functions exist and match CLI query logic.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml parity\n\nAdditional tests:\n- test_issue_list_parity\n- test_mr_list_parity\n- test_issue_detail_parity\n- test_search_parity\n- test_sanitization_parity\n\n## Edge Cases\n- CLI and TUI may use different default sort orders — normalize to same ORDER BY in test setup\n- CLI list commands default to limit=50, TUI may default to page size — test with explicit limit\n- Fixture must include edge cases: NULL labels, empty descriptions, issues with work item status set\n- Schema version must match between both code paths (same migration version)\n- FTS index must be populated for search parity (call generate-docs equivalent on fixture)\n\n## Dependency Context\n- Uses TUI action functions from Phase 2/3 screen beads (must exist as library code)\n- Uses CLI query functions from src/cli/ (already exist as `lore` library exports)\n- Uses lore::core::db for shared DB setup\n- Uses terminal safety module (bd-3ir1) for sanitization comparison\n- Depends on bd-14hv (soak tests) being complete per phase ordering","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:05:51.620596Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:38.629958Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-wrw1","depends_on_id":"bd-14hv","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-wrw1","depends_on_id":"bd-2o49","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-wzqi","title":"Implement Command Palette (state + view)","description":"## Background\nThe Command Palette is a modal overlay (Ctrl+P) that provides fuzzy-match access to all commands. It uses FrankenTUI's built-in CommandPalette widget and is populated from the CommandRegistry.\n\n## Approach\nState (state/command_palette.rs):\n- CommandPaletteState: wraps ftui CommandPalette widget state\n- input (String), filtered_commands (Vec), selected_index (usize), visible (bool)\n\nView (view/command_palette.rs):\n- Modal overlay centered on screen (60% width, 50% height)\n- Text input at top for fuzzy search\n- Scrollable list of matching commands with keybinding hints\n- Enter executes selected command, Esc closes palette\n- Fuzzy matching: subsequence match on command label and help text\n\nIntegration:\n- Ctrl+P from any screen opens palette (handled in interpret_key stage 2)\n- execute_palette_action() in app.rs converts selected command to Msg\n\n## Acceptance Criteria\n- [ ] Ctrl+P opens palette from any screen in Normal mode\n- [ ] Fuzzy matching filters commands as user types\n- [ ] Commands show label + keybinding + help text\n- [ ] Enter executes selected command\n- [ ] Esc closes palette without action\n- [ ] Palette populated from CommandRegistry (single source of truth)\n- [ ] Modal renders on top of current screen content\n\n## Files\n- MODIFY: crates/lore-tui/src/state/command_palette.rs (expand from stub)\n- CREATE: crates/lore-tui/src/view/command_palette.rs\n\n## TDD Anchor\nRED: Write test_palette_fuzzy_match that creates registry with 5 commands, filters with \"iss\", asserts Issue-related commands match.\nGREEN: Implement fuzzy matching on command labels.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_palette_fuzzy\n\n## Edge Cases\n- Empty search shows all commands\n- Very long command labels: truncate with ellipsis\n- Command not available on current screen: show but gray out\n- Palette should not steal focus from text inputs — only opens in Normal mode\n\n## Dependency Context\nUses CommandRegistry from \"Implement CommandRegistry\" task.\nUses ftui CommandPalette widget from FrankenTUI.\nUses InputMode::Palette from \"Implement core types\" task.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:37.250065Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.175286Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-wzqi","depends_on_id":"bd-35g5","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-wzqi","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-x8oq","title":"Write surgical_tests.rs with TDD test suite","description":"## Background\n\nThe surgical sync module (`src/ingestion/surgical.rs` from bd-3sez) needs a comprehensive test suite. Tests use in-memory SQLite (no real GitLab or Ollama) and wiremock for HTTP mocks. The test file lives at `src/ingestion/surgical_tests.rs` and is included via `#[cfg(test)] #[path = \"surgical_tests.rs\"] mod tests;` in surgical.rs.\n\nKey testing constraints:\n- In-memory DB pattern: `create_connection(Path::new(\":memory:\"))` + `run_migrations(&conn)`\n- Test project insert: `INSERT INTO projects (gitlab_project_id, path_with_namespace, web_url)` (no `name`/`last_seen_at` columns)\n- `GitLabIssue` required fields: `id`, `iid`, `project_id`, `title`, `state`, `created_at`, `updated_at`, `author`, `web_url`\n- `GitLabMergeRequest` adds: `source_branch`, `target_branch`, `draft`, `merge_status`, `reviewers`\n- `updated_at` is `String` (ISO 8601) in GitLab types, e.g. `\"2026-02-17T12:00:00.000+00:00\"`\n- `SourceType` enum variants: `Issue`, `MergeRequest`, `Discussion`, `Note`\n- `dirty_sources` table: `(source_type TEXT, source_id INTEGER)` primary key\n\n## Approach\n\nCreate `src/ingestion/surgical_tests.rs` with:\n\n### Test Helpers\n- `setup_db() -> Connection` — in-memory DB with migrations + test project row\n- `make_test_issue(iid: i64, updated_at: &str) -> GitLabIssue` — minimal valid JSON fixture\n- `make_test_mr(iid: i64, updated_at: &str) -> GitLabMergeRequest` — minimal valid JSON fixture\n- `get_db_updated_at(conn, table, iid) -> Option` — helper to query DB updated_at for assertions\n- `get_dirty_keys(conn) -> Vec<(String, i64)>` — query dirty_sources for assertions\n\n### Sync Tests (13)\n1. `test_ingest_issue_by_iid_upserts_and_marks_dirty` — fresh issue ingest, verify DB row + dirty_sources entry\n2. `test_ingest_mr_by_iid_upserts_and_marks_dirty` — fresh MR ingest, verify DB row + dirty_sources entry\n3. `test_toctou_skips_stale_issue` — insert issue at T1, call ingest with payload at T1, assert skipped_stale=true and no dirty mark\n4. `test_toctou_skips_stale_mr` — same for MRs\n5. `test_toctou_allows_newer_issue` — DB has T1, payload has T2 (T2 > T1), assert upserted=true\n6. `test_toctou_allows_newer_mr` — same for MRs\n7. `test_is_stale_parses_iso8601` — unit test: `\"2026-02-17T12:00:00.000+00:00\"` parses to correct ms-epoch\n8. `test_is_stale_handles_none_db_value` — first ingest, no DB row, assert not stale\n9. `test_is_stale_with_z_suffix` — `\"2026-02-17T12:00:00Z\"` also parses correctly\n10. `test_ingest_issue_returns_dirty_source_keys` — verify `dirty_source_keys` contains `(SourceType::Issue, local_id)`\n11. `test_ingest_mr_returns_dirty_source_keys` — verify MR dirty source keys\n12. `test_ingest_issue_updates_existing` — ingest same IID twice with newer updated_at, verify update\n13. `test_ingest_mr_updates_existing` — same for MRs\n\n### Async Preflight Test (1, wiremock)\n14. `test_preflight_fetch_returns_issues_and_mrs` — wiremock GET `/projects/:id/issues?iids[]=42` returns 200 with fixture, verify PreflightResult.issues has 1 entry\n\n### Integration Stubs (4, for bd-3jqx)\n15. `test_surgical_cancellation_during_preflight` — stub: signal.cancel() before preflight, verify early return\n16. `test_surgical_timeout_during_fetch` — stub: wiremock delay exceeds timeout\n17. `test_surgical_embed_isolation` — stub: verify only surgical docs get embedded\n18. `test_surgical_payload_integrity` — stub: verify ingested data matches GitLab payload exactly\n\n## Acceptance Criteria\n\n- [ ] All 13 sync tests pass with in-memory SQLite\n- [ ] Async preflight test passes with wiremock\n- [ ] 4 integration stubs compile and are marked `#[ignore]` (implemented in bd-3jqx)\n- [ ] Test helpers produce valid GitLabIssue/GitLabMergeRequest fixtures that pass `transform_issue`/`transform_merge_request`\n- [ ] No flaky tests: deterministic timestamps, no real network calls\n- [ ] File wired into surgical.rs via `#[cfg(test)] #[path = \"surgical_tests.rs\"] mod tests;`\n\n## Files\n\n- `src/ingestion/surgical_tests.rs` (NEW)\n- `src/ingestion/surgical.rs` (add `#[cfg(test)]` module path — created in bd-3sez)\n\n## TDD Anchor\n\nThis bead IS the test suite. Tests are written first (TDD red phase), then bd-3sez implements the production code to make them pass (green phase). Specific test signatures:\n\n```rust\n#[test]\nfn test_ingest_issue_by_iid_upserts_and_marks_dirty() {\n let conn = setup_db();\n let issue = make_test_issue(42, \"2026-02-17T12:00:00.000+00:00\");\n let config = Config::default();\n let result = ingest_issue_by_iid(&conn, &config, /*project_id=*/1, &issue).unwrap();\n assert!(result.upserted);\n assert!(!result.skipped_stale);\n let dirty = get_dirty_keys(&conn);\n assert!(dirty.contains(&(\"issue\".to_string(), /*local_id from DB*/)));\n}\n\n#[test]\nfn test_toctou_skips_stale_issue() {\n let conn = setup_db();\n let issue = make_test_issue(42, \"2026-02-17T12:00:00.000+00:00\");\n ingest_issue_by_iid(&conn, &Config::default(), 1, &issue).unwrap();\n // Ingest same timestamp again\n let result = ingest_issue_by_iid(&conn, &Config::default(), 1, &issue).unwrap();\n assert!(result.skipped_stale);\n}\n\n#[tokio::test]\nasync fn test_preflight_fetch_returns_issues_and_mrs() {\n let mock = MockServer::start().await;\n // ... wiremock setup ...\n}\n```\n\n## Edge Cases\n\n- `make_test_issue` must produce all required fields (`id`, `iid`, `project_id`, `title`, `state`, `created_at`, `updated_at`, `author` with `username` and `id`, `web_url`) or `transform_issue` will fail\n- `make_test_mr` additionally needs `source_branch`, `target_branch`, `draft`, `merge_status`, `reviewers`\n- ISO 8601 fixtures must use `+00:00` suffix (GitLab format), not `Z`\n- Integration stubs must be `#[ignore]` so they do not fail CI before bd-3jqx implements them\n- Test DB needs `run_migrations` to create all tables including `dirty_sources`, `documents`, `issues`, `merge_requests`\n\n## Dependency Context\n\n- **Blocked by bd-3sez**: Cannot compile tests until surgical.rs module exists (circular co-dependency — develop together)\n- **Blocks bd-3jqx**: Integration test stubs are implemented in that bead\n- **No other blockers**: Uses only in-memory DB and wiremock, no external dependencies","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:15:05.498388Z","created_by":"tayloreernisse","updated_at":"2026-02-17T20:02:42.840151Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"]} {"id":"bd-xhz","title":"[CP1] GitLab client pagination methods","description":"## Background\n\nGitLab pagination methods enable fetching large result sets (issues, discussions) as async streams. The client uses `x-next-page` headers to determine continuation and applies cursor rewind for tuple-based incremental sync.\n\n## Approach\n\nAdd pagination methods to GitLabClient using `async-stream` crate:\n\n### Methods to Add\n\n```rust\nimpl GitLabClient {\n /// Paginate through issues for a project.\n pub fn paginate_issues(\n &self,\n gitlab_project_id: i64,\n updated_after: Option, // ms epoch cursor\n cursor_rewind_seconds: u32,\n ) -> Pin> + Send + '_>>\n\n /// Paginate through discussions for an issue.\n pub fn paginate_issue_discussions(\n &self,\n gitlab_project_id: i64,\n issue_iid: i64,\n ) -> Pin> + Send + '_>>\n\n /// Make request and return response with headers for pagination.\n async fn request_with_headers(\n &self,\n path: &str,\n params: &[(&str, String)],\n ) -> Result<(T, HeaderMap)>\n}\n```\n\n### Pagination Logic\n\n1. Start at page 1, per_page=100\n2. For issues: add scope=all, state=all, order_by=updated_at, sort=asc\n3. Apply cursor rewind: `updated_after = cursor - rewind_seconds` (clamped to 0)\n4. Yield each item from response\n5. Check `x-next-page` header for continuation\n6. Stop when header is empty/absent OR response is empty\n\n### Cursor Rewind\n\n```rust\nif let Some(ts) = updated_after {\n let rewind_ms = (cursor_rewind_seconds as i64) * 1000;\n let rewound = (ts - rewind_ms).max(0); // Clamp to avoid underflow\n // Convert to ISO 8601 for updated_after param\n}\n```\n\n## Acceptance Criteria\n\n- [ ] `paginate_issues` returns Stream of GitLabIssue\n- [ ] `paginate_issues` adds scope=all, state=all, order_by=updated_at, sort=asc\n- [ ] `paginate_issues` applies cursor rewind with max(0) clamping\n- [ ] `paginate_issue_discussions` returns Stream of GitLabDiscussion\n- [ ] Both methods follow x-next-page header until empty\n- [ ] Both methods stop on empty response (fallback)\n- [ ] `request_with_headers` returns (T, HeaderMap) tuple\n\n## Files\n\n- src/gitlab/client.rs (edit - add methods)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/pagination_tests.rs\n#[tokio::test] async fn fetches_all_pages_when_multiple_exist()\n#[tokio::test] async fn respects_per_page_parameter()\n#[tokio::test] async fn follows_x_next_page_header_until_empty()\n#[tokio::test] async fn falls_back_to_empty_page_stop_if_headers_missing()\n#[tokio::test] async fn applies_cursor_rewind_for_tuple_semantics()\n#[tokio::test] async fn clamps_negative_rewind_to_zero()\n```\n\nGREEN: Implement pagination methods with async-stream\n\nVERIFY: `cargo test pagination`\n\n## Edge Cases\n\n- cursor_updated_at near zero - rewind must not underflow (use max(0))\n- GitLab returns empty x-next-page - treat as end of pages\n- GitLab omits pagination headers entirely - use empty response as stop condition\n- DateTime conversion fails - omit updated_after and fetch all (safe fallback)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.222168Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:28:39.192876Z","closed_at":"2026-01-25T22:28:39.192815Z","close_reason":"Implemented paginate_issues and paginate_issue_discussions with async-stream, cursor rewind with max(0) clamping, x-next-page header following, 4 unit tests passing","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-xhz","depends_on_id":"bd-1np","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-xhz","depends_on_id":"bd-2ys","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-xsgw","title":"NOTE-TEST2: Another test bead","description":"type: task","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:58:53.392214Z","updated_at":"2026-02-12T16:59:02.051710Z","closed_at":"2026-02-12T16:59:02.051663Z","close_reason":"test","compaction_level":0,"original_size":0} {"id":"bd-y095","title":"Implement SyncDeltaLedger for post-sync filtered navigation","description":"## Background\n\nAfter a sync completes, the Sync Summary screen shows delta counts (+12 new issues, +3 new MRs). Pressing `i` or `m` should navigate to Issue/MR List filtered to show ONLY the entities that changed in this sync run. The SyncDeltaLedger is an in-memory data structure (not persisted to DB) that records the exact IIDs of new/updated entities during a sync run. It lives for the duration of one TUI session and is cleared when a new sync starts. If the ledger is unavailable (e.g., after app restart), the Sync Summary falls back to a timestamp-based filter using `sync_status.last_completed_at`.\n\n## Approach\n\nCreate a `sync_delta.rs` module with:\n\n1. **`SyncDeltaLedger` struct**:\n ```rust\n pub struct SyncDeltaLedger {\n issues_new: Vec, // IIDs of newly created issues\n issues_updated: Vec, // IIDs of updated (not new) issues\n mrs_new: Vec, // IIDs of newly created MRs\n mrs_updated: Vec, // IIDs of updated MRs\n discussions_new: usize, // count only (too many to track individually)\n events_new: usize, // count only\n completed_at: Option, // timestamp when sync finished (fallback anchor)\n }\n ```\n2. **Builder pattern** — `SyncDeltaLedger::new()` starts empty, populated during sync via:\n - `record_issue(iid: i64, is_new: bool)`\n - `record_mr(iid: i64, is_new: bool)`\n - `record_discussions(count: usize)`\n - `record_events(count: usize)`\n - `finalize(completed_at: i64)` — marks ledger as complete\n3. **Query methods**:\n - `new_issue_iids() -> &[i64]` — for `i` key navigation in Summary mode\n - `new_mr_iids() -> &[i64]` — for `m` key navigation\n - `all_changed_issue_iids() -> Vec` — new + updated combined\n - `all_changed_mr_iids() -> Vec` — new + updated combined\n - `is_available() -> bool` — true if finalize() was called\n - `fallback_timestamp() -> Option` — completed_at for timestamp-based fallback\n4. **`clear()`** — resets all fields when a new sync starts\n\nThe ledger is owned by `SyncState` (part of `AppState`) and populated by the sync action handler when processing `SyncResult` from `run_sync()`. The existing `SyncResult` struct (src/cli/commands/sync.rs:30) already tracks `issues_updated` and `mrs_updated` counts but not individual IIDs — the TUI sync action will need to collect IIDs from the ingest callbacks.\n\n## Acceptance Criteria\n- [ ] `SyncDeltaLedger::new()` creates an empty ledger with `is_available() == false`\n- [ ] `record_issue(42, true)` adds 42 to `issues_new`; `record_issue(43, false)` adds to `issues_updated`\n- [ ] `new_issue_iids()` returns only new IIDs, `all_changed_issue_iids()` returns new + updated\n- [ ] `finalize(ts)` sets `is_available() == true` and stores the timestamp\n- [ ] `clear()` resets everything back to empty with `is_available() == false`\n- [ ] `fallback_timestamp()` returns None before finalize, Some(ts) after\n- [ ] Ledger handles >10,000 IIDs without issues (just Vec growth)\n\n## Files\n- CREATE: crates/lore-tui/src/sync_delta.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add `pub mod sync_delta;`)\n\n## TDD Anchor\nRED: Write `test_empty_ledger_not_available` that asserts `SyncDeltaLedger::new().is_available() == false` and `new_issue_iids().is_empty()`.\nGREEN: Implement the struct with new() and is_available().\nVERIFY: cargo test -p lore-tui sync_delta\n\nAdditional tests:\n- test_record_and_query_issues\n- test_record_and_query_mrs\n- test_finalize_makes_available\n- test_clear_resets_everything\n- test_all_changed_combines_new_and_updated\n- test_fallback_timestamp\n\n## Edge Cases\n- Recording the same IID twice (e.g., issue updated twice during sync) — should deduplicate or allow duplicates? Allow duplicates (Vec, not HashSet) for simplicity; consumers can deduplicate if needed.\n- Very large syncs with >50,000 entities — Vec is fine, no cap needed.\n- Calling query methods before finalize — returns data so far (is_available=false signals incompleteness).\n\n## Dependency Context\n- Depends on bd-2x2h (Sync screen) which owns SyncState and drives the sync lifecycle. The ledger is a field of SyncState.\n- Consumed by Sync Summary mode's `i`/`m` key handlers to produce filtered Issue/MR List navigation with exact IID sets.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:38.738460Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:29:48.475698Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-y095","depends_on_id":"bd-2x2h","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-ymd","title":"[CP1] Final validation - Gate A through D","description":"Run all tests and verify all internal gates pass.\n\n## Gate A: Issues Only (Must Pass First)\n- [ ] gi ingest --type=issues fetches all issues from configured projects\n- [ ] Issues stored with correct schema, including last_seen_at\n- [ ] Cursor-based sync is resumable (re-run fetches only new/updated)\n- [ ] Incremental cursor updates every 100 issues\n- [ ] Raw payloads stored for each issue\n- [ ] gi list issues and gi count issues work\n\n## Gate B: Labels Correct (Must Pass)\n- [ ] Labels extracted and stored (name-only)\n- [ ] Label links created correctly\n- [ ] Stale label links removed on re-sync (verified with test)\n- [ ] Label count per issue matches GitLab\n\n## Gate C: Dependent Discussion Sync (Must Pass)\n- [ ] Discussions fetched for issues with updated_at advancement\n- [ ] Notes stored with is_system flag correctly set\n- [ ] Raw payloads stored for discussions and notes\n- [ ] discussions_synced_for_updated_at watermark updated after sync\n- [ ] Unchanged issues skip discussion refetch (verified with test)\n- [ ] Bounded concurrency (dependent_concurrency respected)\n\n## Gate D: Resumability Proof (Must Pass)\n- [ ] Kill mid-run, rerun; bounded redo (cursor progress preserved)\n- [ ] No redundant discussion refetch after crash recovery\n- [ ] Single-flight lock prevents concurrent runs\n\n## Final Gate (Must Pass)\n- [ ] All unit tests pass (cargo test)\n- [ ] All integration tests pass (mocked with wiremock)\n- [ ] cargo clippy passes with no warnings\n- [ ] cargo fmt --check passes\n- [ ] Compiles with --release\n\n## Validation Commands\ncargo test\ncargo clippy -- -D warnings\ncargo fmt --check\ncargo build --release\n\nFiles: All CP1 files\nDone when: All gate criteria pass","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T16:59:26.795633Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:02.132613Z","closed_at":"2026-01-25T17:02:02.132613Z","deleted_at":"2026-01-25T17:02:02.132608Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-ypa","title":"Implement timeline expand phase: BFS cross-reference expansion","description":"## Background\n\nThe expand phase is step 3 of the timeline pipeline (spec Section 3.2). Starting from seed entities, it performs BFS over entity_references to discover related entities not matched by keywords.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 3.2 step 3, Section 3.5 (expanded_entities JSON).\n\n## Codebase Context\n\n- entity_references table exists (migration 011) with columns: source_entity_type, source_entity_id, target_entity_type, target_entity_id, target_project_path, target_entity_iid, reference_type, source_method, created_at\n- reference_type CHECK: `'closes' | 'mentioned' | 'related'`\n- source_method CHECK: `'api' | 'note_parse' | 'description_parse'` — use these values in provenance, NOT the spec's original values\n- Indexes: idx_entity_refs_source (source_entity_type, source_entity_id), idx_entity_refs_target (target_entity_id WHERE NOT NULL)\n\n## Approach\n\nCreate `src/core/timeline_expand.rs`:\n\n```rust\nuse std::collections::{HashSet, VecDeque};\nuse rusqlite::Connection;\nuse crate::core::timeline::{EntityRef, ExpandedEntityRef, UnresolvedRef};\n\npub struct ExpandResult {\n pub expanded_entities: Vec,\n pub unresolved_references: Vec,\n}\n\npub fn expand_timeline(\n conn: &Connection,\n seeds: &[EntityRef],\n depth: u32, // 0=no expansion, 1=default, 2+=deep\n include_mentions: bool, // --expand-mentions flag\n max_entities: usize, // cap at 100 to prevent explosion\n) -> Result { ... }\n```\n\n### BFS Algorithm\n\n```\nvisited: HashSet<(String, i64)> = seeds as set (entity_type, entity_id)\nqueue: VecDeque<(EntityRef, u32)> for multi-hop\n\nFor each seed:\n query_neighbors(conn, seed, edge_types) -> outgoing + incoming refs\n - Outgoing: SELECT target_* FROM entity_references WHERE source_entity_type=? AND source_entity_id=? AND reference_type IN (...)\n - Incoming: SELECT source_* FROM entity_references WHERE target_entity_type=? AND target_entity_id=? AND reference_type IN (...)\n - Unresolved (target_entity_id IS NULL): collect in UnresolvedRef, don't traverse\n - New resolved: add to expanded with provenance (via_from, via_reference_type, via_source_method)\n - If current_depth < depth: enqueue for further BFS\n```\n\n### Edge Type Filtering\n\n```rust\nfn edge_types(include_mentions: bool) -> Vec<&'static str> {\n if include_mentions {\n vec![\"closes\", \"related\", \"mentioned\"]\n } else {\n vec![\"closes\", \"related\"]\n }\n}\n```\n\n### Provenance (Critical for spec compliance)\n\nEach expanded entity needs via object per spec Section 3.5:\n- via_from: EntityRef of the entity that referenced this one\n- via_reference_type: from entity_references.reference_type column\n- via_source_method: from entity_references.source_method column (**codebase values: 'api', 'note_parse', 'description_parse'**)\n\nRegister in `src/core/mod.rs`: `pub mod timeline_expand;`\n\n## Acceptance Criteria\n\n- [ ] BFS traverses outgoing AND incoming edges in entity_references\n- [ ] Default: only \"closes\" and \"related\" edges (not \"mentioned\")\n- [ ] --expand-mentions: also traverses \"mentioned\" edges\n- [ ] depth=0: returns empty expanded list\n- [ ] max_entities cap prevents explosion (default 100)\n- [ ] Provenance: via_source_method uses codebase values (api/note_parse/description_parse), NOT spec values\n- [ ] Unresolved references (target_entity_id IS NULL) collected, not traversed\n- [ ] No duplicates: visited set by (entity_type, entity_id)\n- [ ] Self-references skipped\n- [ ] Module registered in src/core/mod.rs\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/core/timeline_expand.rs` (NEW)\n- `src/core/mod.rs` (add `pub mod timeline_expand;`)\n\n## TDD Loop\n\nRED: Tests in `src/core/timeline_expand.rs`:\n- `test_expand_depth_zero` - returns empty\n- `test_expand_finds_linked_entity` - seed issue -> closes -> linked MR\n- `test_expand_bidirectional` - starting from target also finds source\n- `test_expand_respects_max_entities`\n- `test_expand_skips_mentions_by_default`\n- `test_expand_includes_mentions_when_flagged`\n- `test_expand_collects_unresolved`\n- `test_expand_tracks_provenance` - verify via_source_method is 'api' not 'api_closes_issues'\n\nTests need in-memory DB with migrations 001-014 applied + entity_references test data.\n\nGREEN: Implement BFS.\n\nVERIFY: `cargo test --lib -- timeline_expand`\n\n## Edge Cases\n\n- Circular references: visited set prevents infinite loop\n- Entity referenced from multiple seeds: first-come provenance wins\n- Empty entity_references: returns empty, not error\n- Cross-project refs with NULL target_entity_id: add to unresolved","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:33:08.659381Z","created_by":"tayloreernisse","updated_at":"2026-02-05T21:49:46.868460Z","closed_at":"2026-02-05T21:49:46.868410Z","close_reason":"Completed: Created src/core/timeline_expand.rs with BFS cross-reference expansion. Bidirectional traversal, depth limiting, mention filtering, max entity cap, provenance tracking, unresolved reference collection. 10 tests pass. All quality gates pass.","compaction_level":0,"original_size":0,"labels":["gate-3","phase-b","query"],"dependencies":[{"issue_id":"bd-ypa","depends_on_id":"bd-32q","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-ypa","depends_on_id":"bd-3ia","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-ypa","depends_on_id":"bd-ike","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-z0s","title":"[CP1] Final validation - Gate A through D","description":"Run all tests and verify all internal gates pass.\n\n## Gate A: Issues Only (Must Pass First)\n- [ ] gi ingest --type=issues fetches all issues from configured projects\n- [ ] Issues stored with correct schema, including last_seen_at\n- [ ] Cursor-based sync is resumable (re-run fetches only new/updated)\n- [ ] Incremental cursor updates every 100 issues\n- [ ] Raw payloads stored for each issue\n- [ ] gi list issues and gi count issues work\n\n## Gate B: Labels Correct (Must Pass)\n- [ ] Labels extracted and stored (name-only)\n- [ ] Label links created correctly\n- [ ] **Stale label links removed on re-sync** (verified with test)\n- [ ] Label count per issue matches GitLab\n\n## Gate C: Dependent Discussion Sync (Must Pass)\n- [ ] Discussions fetched for issues with updated_at advancement\n- [ ] Notes stored with is_system flag correctly set\n- [ ] Raw payloads stored for discussions and notes\n- [ ] discussions_synced_for_updated_at watermark updated after sync\n- [ ] **Unchanged issues skip discussion refetch** (verified with test)\n- [ ] Bounded concurrency (dependent_concurrency respected)\n\n## Gate D: Resumability Proof (Must Pass)\n- [ ] Kill mid-run, rerun; bounded redo (cursor progress preserved)\n- [ ] No redundant discussion refetch after crash recovery\n- [ ] Single-flight lock prevents concurrent runs\n\n## Final Gate (Must Pass)\n- [ ] All unit tests pass (cargo test)\n- [ ] All integration tests pass (mocked with wiremock)\n- [ ] cargo clippy passes with no warnings\n- [ ] cargo fmt --check passes\n- [ ] Compiles with --release\n\n## Validation Commands\ncargo test\ncargo clippy -- -D warnings\ncargo fmt --check\ncargo build --release\n\n## Data Integrity Checks\n- SELECT COUNT(*) FROM issues matches GitLab issue count\n- Every issue has a raw_payloads row\n- Every discussion has a raw_payloads row\n- Labels in issue_labels junction all exist in labels table\n- Re-running gi ingest --type=issues fetches 0 new items\n- After removing a label in GitLab and re-syncing, the link is removed\n\nFiles: All CP1 files\nDone when: All gate criteria pass","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.459095Z","created_by":"tayloreernisse","updated_at":"2026-01-25T23:27:09.567537Z","closed_at":"2026-01-25T23:27:09.567478Z","close_reason":"All gates pass: 71 tests, clippy clean, fmt clean, release build successful","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-z0s","depends_on_id":"bd-17v","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-z0s","depends_on_id":"bd-2f0","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-z0s","depends_on_id":"bd-39w","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-z0s","depends_on_id":"bd-3n1","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-z0s","depends_on_id":"bd-o7b","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-z0s","depends_on_id":"bd-v6i","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} -{"id":"bd-z94","title":"Implement 'lore file-history' command with human and robot output","description":"## Background\n\nThe file-history command is Gate 4's user-facing CLI. It answers 'which MRs touched this file, and why?'\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 4.4-4.5.\n\n## Codebase Context\n\n- CLI pattern: Commands enum in src/cli/mod.rs, handler in src/main.rs, output in src/cli/commands/\n- Project resolution: resolve_project() returns project_id or exit 18 (Ambiguous)\n- Robot mode: {ok, data, meta} envelope pattern\n- merge_requests.merged_at exists (migration 006) — order by COALESCE(merged_at, updated_at) DESC\n- discussions table: issue_id, merge_request_id\n- notes table: position_new_path for DiffNotes (used for --discussions flag)\n- mr_file_changes table: migration 016 (bd-1oo)\n- resolve_rename_chain() from bd-1yx (src/core/file_history.rs) for rename handling\n- VALID_COMMANDS array in src/main.rs (line ~448)\n\n## Approach\n\n### 1. FileHistoryArgs subcommand (`src/cli/mod.rs`):\n```rust\n/// Show MRs that touched a file, with linked issues and discussions\n#[command(name = \"file-history\")]\nFileHistory(FileHistoryArgs),\n```\n\n```rust\n#[derive(Parser, Debug)]\npub struct FileHistoryArgs {\n /// File path to trace history for\n pub path: String,\n /// Scope to a specific project (fuzzy match)\n #[arg(short = 'p', long)]\n pub project: Option,\n /// Include discussion snippets from DiffNotes on this file\n #[arg(long)]\n pub discussions: bool,\n /// Disable rename chain resolution\n #[arg(long = \"no-follow-renames\")]\n pub no_follow_renames: bool,\n /// Only show merged MRs\n #[arg(long)]\n pub merged: bool,\n /// Maximum results\n #[arg(short = 'n', long = \"limit\", default_value = \"50\")]\n pub limit: usize,\n}\n```\n\n### 2. Query logic (`src/cli/commands/file_history.rs`):\n\n1. Resolve project (exit 18 on ambiguous)\n2. Call resolve_rename_chain() unless --no-follow-renames\n3. Query mr_file_changes for all resolved paths\n4. JOIN merge_requests for MR details\n5. Optionally fetch DiffNote discussions on the file\n6. Order by COALESCE(merged_at, updated_at) DESC\n7. Apply --merged filter and --limit\n\n### 3. Human output:\n```\nFile History: src/auth/oauth.rs (via 3 paths, 5 MRs)\nRename chain: src/authentication/oauth.rs -> src/auth/oauth.rs\n\n !456 \"Implement OAuth2 flow\" merged @alice 2024-01-22 modified\n !489 \"Fix OAuth token expiry\" merged @bob 2024-02-15 modified\n !512 \"Refactor auth module\" merged @carol 2024-03-01 renamed\n```\n\n### 4. Robot JSON:\n```json\n{\n \"ok\": true,\n \"data\": {\n \"path\": \"src/auth/oauth.rs\",\n \"rename_chain\": [\"src/authentication/oauth.rs\", \"src/auth/oauth.rs\"],\n \"merge_requests\": [\n {\n \"iid\": 456,\n \"title\": \"Implement OAuth2 flow\",\n \"state\": \"merged\",\n \"author\": \"alice\",\n \"merged_at\": \"2024-01-22T...\",\n \"change_type\": \"modified\",\n \"discussion_count\": 12,\n \"file_discussion_count\": 4,\n \"merge_commit_sha\": \"abc123\"\n }\n ]\n },\n \"meta\": {\n \"total_mrs\": 5,\n \"renames_followed\": true,\n \"paths_searched\": 2\n }\n}\n```\n\n## Acceptance Criteria\n\n- [ ] `lore file-history src/foo.rs` works with human output\n- [ ] `lore --robot file-history src/foo.rs` works with JSON envelope\n- [ ] Rename chain displayed in human output when renames detected\n- [ ] Robot JSON includes rename_chain array\n- [ ] --no-follow-renames disables resolution (queries literal path only)\n- [ ] --merged filters to merged MRs only\n- [ ] --discussions includes DiffNote snippets from notes.position_new_path matching\n- [ ] -p for project scoping (exit 18 on ambiguous)\n- [ ] -n limits results\n- [ ] No MR history: friendly message (exit 0, not error)\n- [ ] \"file-history\" added to VALID_COMMANDS array\n- [ ] robot-docs manifest includes file-history command\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/cli/mod.rs` (FileHistoryArgs struct + Commands::FileHistory variant)\n- `src/cli/commands/file_history.rs` (NEW — query + human + robot output)\n- `src/cli/commands/mod.rs` (add `pub mod file_history;` + re-exports)\n- `src/main.rs` (handler dispatch + VALID_COMMANDS + robot-docs entry)\n\n## TDD Loop\n\nNo unit tests for CLI wiring. Verify with:\n\n```bash\ncargo check --all-targets\ncargo run -- file-history --help\n```\n\n## Edge Cases\n\n- File path with spaces: clap handles quoting\n- Path not in any MR: empty result, friendly message, not error\n- MRs ordered by COALESCE(merged_at, updated_at) DESC (unmerged MRs use updated_at)\n- --discussions with no DiffNotes: empty discussion section, not error\n- rename_chain omitted from robot JSON when --no-follow-renames is set\n","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:09.027259Z","created_by":"tayloreernisse","updated_at":"2026-02-05T20:57:44.467745Z","compaction_level":0,"original_size":0,"labels":["cli","gate-4","phase-b"],"dependencies":[{"issue_id":"bd-z94","depends_on_id":"bd-14q","type":"parent-child","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-z94","depends_on_id":"bd-1yx","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-z94","depends_on_id":"bd-2yo","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-z94","depends_on_id":"bd-3ia","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} +{"id":"bd-z94","title":"Implement 'lore file-history' command with human and robot output","description":"## Background\n\nThe file-history command is Gate 4's user-facing CLI. It answers \"which MRs touched this file, and why?\"\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 4.4-4.5.\n\n## Codebase Context\n\n- CLI pattern: Commands enum in src/cli/mod.rs, handler in src/main.rs, output in src/cli/commands/\n- Project resolution: resolve_project() returns project_id or exit 18 (Ambiguous)\n- Robot mode: {ok, data, meta} envelope pattern\n- merge_requests.merged_at exists (migration 006) — order by COALESCE(merged_at, updated_at) DESC\n- discussions table: issue_id, merge_request_id\n- notes table: position_new_path for DiffNotes (used for --discussions flag)\n- **mr_file_changes table**: migration 016 — already exists and is populated by drain_mr_diffs() (orchestrator.rs lines 708-726, 1514+)\n- resolve_rename_chain() from bd-1yx (src/core/file_history.rs) for rename handling\n- VALID_COMMANDS array in src/main.rs (line ~448)\n- **26 migrations** exist (001-026). LATEST_SCHEMA_VERSION derived from MIGRATIONS.len().\n\n## Approach\n\n### 1. FileHistoryArgs subcommand (`src/cli/mod.rs`):\n```rust\n/// Show MRs that touched a file, with linked issues and discussions\n#[command(name = \"file-history\")]\nFileHistory(FileHistoryArgs),\n```\n\n```rust\n#[derive(Parser, Debug)]\npub struct FileHistoryArgs {\n /// File path to trace history for\n pub path: String,\n /// Scope to a specific project (fuzzy match)\n #[arg(short = 'p', long)]\n pub project: Option,\n /// Include discussion snippets from DiffNotes on this file\n #[arg(long)]\n pub discussions: bool,\n /// Disable rename chain resolution\n #[arg(long = \"no-follow-renames\")]\n pub no_follow_renames: bool,\n /// Only show merged MRs\n #[arg(long)]\n pub merged: bool,\n /// Maximum results\n #[arg(short = 'n', long = \"limit\", default_value = \"50\")]\n pub limit: usize,\n}\n```\n\n### 2. Query logic (`src/cli/commands/file_history.rs`):\n\n1. Resolve project (exit 18 on ambiguous)\n2. Call resolve_rename_chain() unless --no-follow-renames\n3. Query mr_file_changes for all resolved paths\n4. JOIN merge_requests for MR details\n5. Optionally fetch DiffNote discussions on the file (notes.position_new_path)\n6. Order by COALESCE(merged_at, updated_at) DESC\n7. Apply --merged filter and --limit\n\n### 3. Human output:\n```\nFile History: src/auth/oauth.rs (via 3 paths, 5 MRs)\nRename chain: src/authentication/oauth.rs -> src/auth/oauth.rs\n\n !456 \"Implement OAuth2 flow\" merged @alice 2024-01-22 modified\n !489 \"Fix OAuth token expiry\" merged @bob 2024-02-15 modified\n !512 \"Refactor auth module\" merged @carol 2024-03-01 renamed\n```\n\n### 4. Robot JSON:\n```json\n{\n \"ok\": true,\n \"data\": {\n \"path\": \"src/auth/oauth.rs\",\n \"rename_chain\": [\"src/authentication/oauth.rs\", \"src/auth/oauth.rs\"],\n \"merge_requests\": [\n {\n \"iid\": 456,\n \"title\": \"Implement OAuth2 flow\",\n \"state\": \"merged\",\n \"author\": \"alice\",\n \"merged_at\": \"2024-01-22T...\",\n \"change_type\": \"modified\",\n \"discussion_count\": 12,\n \"file_discussion_count\": 4,\n \"merge_commit_sha\": \"abc123\"\n }\n ]\n },\n \"meta\": {\n \"total_mrs\": 5,\n \"renames_followed\": true,\n \"paths_searched\": 2\n }\n}\n```\n\n## Acceptance Criteria\n\n- [ ] `lore file-history src/foo.rs` works with human output\n- [ ] `lore --robot file-history src/foo.rs` works with JSON envelope\n- [ ] Rename chain displayed in human output when renames detected\n- [ ] Robot JSON includes rename_chain array\n- [ ] --no-follow-renames disables resolution (queries literal path only)\n- [ ] --merged filters to merged MRs only\n- [ ] --discussions includes DiffNote snippets from notes.position_new_path matching\n- [ ] -p for project scoping (exit 18 on ambiguous)\n- [ ] -n limits results\n- [ ] No MR history: friendly message (exit 0, not error)\n- [ ] \"file-history\" added to VALID_COMMANDS array\n- [ ] robot-docs manifest includes file-history command\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n- [ ] `cargo fmt --check` passes\n\n## Files\n\n- MODIFY: src/cli/mod.rs (FileHistoryArgs struct + Commands::FileHistory variant)\n- CREATE: src/cli/commands/file_history.rs (query + human + robot output)\n- MODIFY: src/cli/commands/mod.rs (add pub mod file_history + re-exports)\n- MODIFY: src/main.rs (handler dispatch + VALID_COMMANDS + robot-docs entry)\n\n## TDD Anchor\n\nRED: No unit tests for CLI wiring — verify with cargo check + manual run.\n\nGREEN: Implement query, human renderer, robot renderer.\n\nVERIFY:\n```bash\ncargo check --all-targets\ncargo run --release -- file-history --help\ncargo run --release -- file-history src/main.rs\ncargo run --release -- --robot file-history src/main.rs\n```\n\n## Edge Cases\n\n- File path with spaces: clap handles quoting\n- Path not in any MR: empty result, friendly message, exit 0 (not error)\n- MRs ordered by COALESCE(merged_at, updated_at) DESC (unmerged MRs use updated_at)\n- --discussions with no DiffNotes: empty discussion section, not error\n- rename_chain omitted from robot JSON when --no-follow-renames is set\n- mr_file_changes table empty (sync hasn't fetched diffs yet): friendly message suggesting `lore sync`\n\n## Dependency Context\n\n- **bd-1yx (resolve_rename_chain)**: provides resolve_rename_chain() in src/core/file_history.rs — takes a path and returns Vec of all historical paths. MUST be implemented before this bead.\n- **bd-2yo / migration 016 (mr_file_changes)**: provides the mr_file_changes table with new_path, old_path, change_type columns. Already exists and is populated by drain_mr_diffs() in orchestrator.rs (lines 708-726, 1514+).\n- **bd-3ia (closes_issues)**: provides entity_references with reference_type='closes' linking MRs to issues. Used for \"linked issues\" column if extended later.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:09.027259Z","created_by":"tayloreernisse","updated_at":"2026-02-17T17:57:21.258978Z","closed_at":"2026-02-17T17:57:21.258929Z","close_reason":"Implemented file-history command with human/robot output, rename chain resolution, DiffNote discussions, --merged/--no-follow-renames filters, autocorrect registry, robot-docs manifest","compaction_level":0,"original_size":0,"labels":["cli","gate-4","phase-b"],"dependencies":[{"issue_id":"bd-z94","depends_on_id":"bd-14q","type":"parent-child","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-z94","depends_on_id":"bd-1yx","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-z94","depends_on_id":"bd-2yo","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"},{"issue_id":"bd-z94","depends_on_id":"bd-3ia","type":"blocks","created_at":"2026-02-18T17:42:00Z","created_by":"import"}]} {"id":"bd-zibc","title":"WHO: VALID_COMMANDS + robot-docs manifest","description":"## Background\n\nRegister the who command in main.rs so that typo suggestions work and robot-docs manifest includes the command for agent self-discovery.\n\n## Approach\n\n### 1. VALID_COMMANDS array (~line 471 in suggest_similar_command):\nAdd \"who\" after \"timeline\":\n```rust\nconst VALID_COMMANDS: &[&str] = &[\n \"issues\", \"mrs\", /* ... existing ... */ \"timeline\", \"who\",\n];\n```\n\n### 2. robot-docs manifest (handle_robot_docs, after \"timeline\" entry):\n```json\n\"who\": {\n \"description\": \"People intelligence: experts, workload, active discussions, overlap, review patterns\",\n \"flags\": [\"\", \"--path \", \"--active\", \"--overlap \", \"--reviews\", \"--since \", \"-p/--project\", \"-n/--limit\"],\n \"modes\": {\n \"expert\": \"lore who — Who knows about this area? (also: --path for root files)\",\n \"workload\": \"lore who — What is someone working on?\",\n \"reviews\": \"lore who --reviews — Review pattern analysis\",\n \"active\": \"lore who --active — Active unresolved discussions\",\n \"overlap\": \"lore who --overlap — Who else is touching these files?\"\n },\n \"example\": \"lore --robot who src/features/auth/\",\n \"response_schema\": {\n \"ok\": \"bool\",\n \"data\": {\n \"mode\": \"string\",\n \"input\": {\"target\": \"string|null\", \"path\": \"string|null\", \"project\": \"string|null\", \"since\": \"string|null\", \"limit\": \"int\"},\n \"resolved_input\": {\"mode\": \"string\", \"project_id\": \"int|null\", \"project_path\": \"string|null\", \"since_ms\": \"int\", \"since_iso\": \"string\", \"since_mode\": \"string (default|explicit|none)\", \"limit\": \"int\"},\n \"...\": \"mode-specific fields\"\n },\n \"meta\": {\"elapsed_ms\": \"int\"}\n }\n}\n```\n\n### 3. workflows JSON — add people_intelligence:\n```json\n\"people_intelligence\": [\n \"lore --robot who src/path/to/feature/\",\n \"lore --robot who @username\",\n \"lore --robot who @username --reviews\",\n \"lore --robot who --active --since 7d\",\n \"lore --robot who --overlap src/path/\",\n \"lore --robot who --path README.md\"\n]\n```\n\n## Files\n\n- `src/main.rs`\n\n## TDD Loop\n\nVERIFY: `cargo check && cargo run --release -- robot-docs | python3 -c \"import json,sys; d=json.load(sys.stdin); assert 'who' in d['commands']\"`\n\n## Acceptance Criteria\n\n- [ ] \"who\" in VALID_COMMANDS\n- [ ] `lore robot-docs` JSON contains who command with all 5 modes\n- [ ] workflows contains people_intelligence array\n- [ ] cargo check passes\n\n## Edge Cases\n\n- The VALID_COMMANDS array is used for typo suggestion via Levenshtein distance — ensure \"who\" does not collide with other short commands (it does not; closest is \"show\" at distance 2)\n- robot-docs JSON is constructed via serde_json::json!() macro inside a raw string — ensure no trailing commas or JSON syntax errors in the manually-written JSON block\n- The response_schema in robot-docs is documentation-only (not validated at runtime) — ensure it matches actual output structure from bd-3mj2\n- If handle_robot_docs location has changed since plan was written, search for \"robot-docs\" or \"robot_docs\" in main.rs to find current location","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:41:35.098890Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.601819Z","closed_at":"2026-02-08T04:10:29.601785Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-zibc","depends_on_id":"bd-2rk9","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} {"id":"bd-zqpf","title":"WHO: Expert mode query (query_expert)","description":"## Background\n\nExpert mode answers \"Who should I talk to about this feature/file?\" by analyzing DiffNote activity at a given path. It scores users by a combination of review breadth (distinct MRs reviewed), authorship breadth (distinct MRs authored), and review intensity (DiffNote count). This is the primary use case for lore who.\n\n## Approach\n\nSingle CTE with two UNION ALL branches (reviewer + author), then SQL-level aggregation, scoring, sorting, and LIMIT.\n\n### Key SQL pattern (prefix variant — exact variant replaces LIKE with =):\n\n```sql\nWITH activity AS (\n -- Reviewer branch: DiffNotes on other people's MRs\n SELECT n.author_username AS username, 'reviewer' AS role,\n COUNT(DISTINCT m.id) AS mr_cnt, COUNT(*) AS note_cnt,\n MAX(n.created_at) AS last_seen_at\n FROM notes n\n JOIN discussions d ON n.discussion_id = d.id\n JOIN merge_requests m ON d.merge_request_id = m.id\n WHERE n.note_type = 'DiffNote' AND n.is_system = 0\n AND n.author_username IS NOT NULL\n AND (m.author_username IS NULL OR n.author_username != m.author_username) -- self-review exclusion\n AND m.state IN ('opened','merged')\n AND n.position_new_path LIKE ?1 ESCAPE '\\'\n AND n.created_at >= ?2\n AND (?3 IS NULL OR n.project_id = ?3)\n GROUP BY n.author_username\n\n UNION ALL\n\n -- Author branch: MR authors with DiffNote activity at this path\n SELECT m.author_username AS username, 'author' AS role,\n COUNT(DISTINCT m.id) AS mr_cnt, 0 AS note_cnt,\n MAX(n.created_at) AS last_seen_at\n FROM merge_requests m\n JOIN discussions d ON d.merge_request_id = m.id\n JOIN notes n ON n.discussion_id = d.id\n WHERE n.note_type = 'DiffNote' AND n.is_system = 0\n AND m.author_username IS NOT NULL\n AND n.position_new_path LIKE ?1 ESCAPE '\\'\n AND n.created_at >= ?2\n AND (?3 IS NULL OR n.project_id = ?3)\n GROUP BY m.author_username\n)\nSELECT username,\n SUM(CASE WHEN role='reviewer' THEN mr_cnt ELSE 0 END) AS review_mr_count,\n SUM(CASE WHEN role='reviewer' THEN note_cnt ELSE 0 END) AS review_note_count,\n SUM(CASE WHEN role='author' THEN mr_cnt ELSE 0 END) AS author_mr_count,\n MAX(last_seen_at) AS last_seen_at,\n (SUM(CASE WHEN role='reviewer' THEN mr_cnt ELSE 0 END) * 20 +\n SUM(CASE WHEN role='author' THEN mr_cnt ELSE 0 END) * 12 +\n SUM(CASE WHEN role='reviewer' THEN note_cnt ELSE 0 END) * 1) AS score\nFROM activity\nGROUP BY username\nORDER BY score DESC, last_seen_at DESC, username ASC\nLIMIT ?4\n```\n\n### Two static SQL strings selected via `if pq.is_prefix { sql_prefix } else { sql_exact }` — the only difference is LIKE vs = on position_new_path. Both use prepare_cached().\n\n### Scoring formula: review_mr * 20 + author_mr * 12 + review_notes * 1\n- MR breadth dominates (prevents \"comment storm\" gaming)\n- Integer arithmetic (no f64 display issues)\n\n### LIMIT+1 truncation pattern:\n```rust\nlet limit_plus_one = (limit + 1) as i64;\n// ... query with limit_plus_one ...\nlet truncated = experts.len() > limit;\nlet experts = experts.into_iter().take(limit).collect();\n```\n\n### ExpertResult struct:\n```rust\npub struct ExpertResult {\n pub path_query: String,\n pub path_match: String, // \"exact\" or \"prefix\"\n pub experts: Vec,\n pub truncated: bool,\n}\npub struct Expert {\n pub username: String, pub score: i64,\n pub review_mr_count: u32, pub review_note_count: u32,\n pub author_mr_count: u32, pub last_seen_ms: i64,\n}\n```\n\n## Files\n\n- `src/cli/commands/who.rs`\n\n## TDD Loop\n\nRED:\n```\ntest_expert_query — insert project, MR, discussion, 3 DiffNotes; verify expert ranking\ntest_expert_excludes_self_review_notes — author_a comments on own MR; review_mr_count must be 0\ntest_expert_truncation — 3 experts, limit=2 -> truncated=true, len=2; limit=10 -> false\n```\n\nGREEN: Implement query_expert with both SQL variants\nVERIFY: `cargo test -- expert`\n\n## Acceptance Criteria\n\n- [ ] test_expert_query passes (reviewer_b ranked first by score)\n- [ ] test_expert_excludes_self_review_notes passes (author_a has review_mr_count=0)\n- [ ] test_expert_truncation passes (truncated flag correct at both limits)\n- [ ] Default since window: 6m\n\n## Edge Cases\n\n- Self-review: MR author commenting on own diff must NOT count as reviewer (filter n.author_username != m.author_username with IS NULL guard on m.author_username)\n- MR state: only 'opened' and 'merged' — closed/unmerged MRs are noise\n- Project scoping is on n.project_id (not m.project_id) to maximize index usage\n- Author branch also filters n.is_system = 0 for consistency","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:40:20.990590Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.596337Z","closed_at":"2026-02-08T04:10:29.596299Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-zqpf","depends_on_id":"bd-2ldg","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"},{"issue_id":"bd-zqpf","depends_on_id":"bd-34rr","type":"blocks","created_at":"2026-02-12T19:34:59Z","created_by":"import"}]} diff --git a/.beads/last-touched b/.beads/last-touched index 0c1bd4d..f520793 100644 --- a/.beads/last-touched +++ b/.beads/last-touched @@ -1 +1 @@ -bd-2cbw +bd-2kr0 diff --git a/crates/lore-tui/Cargo.lock b/crates/lore-tui/Cargo.lock index cf2d991..c461984 100644 --- a/crates/lore-tui/Cargo.lock +++ b/crates/lore-tui/Cargo.lock @@ -171,6 +171,23 @@ version = "1.0.4" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "9330f8b2ff13f34540b44e946ef35111825727b38d33286ef986142615121801" +[[package]] +name = "charmed-lipgloss" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "45e10db01f5eaea11d98ca5c5cffd8cc4add7ac56d0128d91ba1f2a3757b6c5a" +dependencies = [ + "bitflags", + "colored", + "crossterm 0.29.0", + "serde", + "serde_json", + "thiserror", + "toml", + "tracing", + "unicode-width 0.1.14", +] + [[package]] name = "chrono" version = "0.4.43" @@ -241,14 +258,13 @@ source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "b05b61dc5112cbb17e4b6cd61790d9845d13888356391624cbe7e41efeac1e75" [[package]] -name = "comfy-table" -version = "7.2.2" +name = "colored" +version = "2.2.0" source = "registry+https://github.com/rust-lang/crates.io-index" -checksum = "958c5d6ecf1f214b4c2bbbbf6ab9523a864bd136dcf71a7e8904799acfe1ad47" +checksum = "117725a109d387c937a1533ce01b450cbde6b88abceea8473c4d7a85853cda3c" dependencies = [ - "crossterm 0.29.0", - "unicode-segmentation", - "unicode-width", + "lazy_static", + "windows-sys 0.59.0", ] [[package]] @@ -260,7 +276,7 @@ dependencies = [ "encode_unicode", "libc", "once_cell", - "unicode-width", + "unicode-width 0.2.2", "windows-sys 0.61.2", ] @@ -603,7 +619,7 @@ dependencies = [ "signal-hook 0.4.3", "unicode-display-width", "unicode-segmentation", - "unicode-width", + "unicode-width 0.2.2", ] [[package]] @@ -642,7 +658,7 @@ dependencies = [ "smallvec", "unicode-display-width", "unicode-segmentation", - "unicode-width", + "unicode-width 0.2.2", ] [[package]] @@ -660,7 +676,7 @@ dependencies = [ "ftui-text", "tracing", "unicode-segmentation", - "unicode-width", + "unicode-width 0.2.2", ] [[package]] @@ -704,7 +720,7 @@ dependencies = [ "ftui-style", "ftui-text", "unicode-segmentation", - "unicode-width", + "unicode-width 0.2.2", ] [[package]] @@ -1145,7 +1161,7 @@ checksum = "9375e112e4b463ec1b1c6c011953545c65a30164fbab5b581df32b3abf0dcb88" dependencies = [ "console", "portable-atomic", - "unicode-width", + "unicode-width 0.2.2", "unit-prefix", "web-time", ] @@ -1287,13 +1303,13 @@ checksum = "5e5032e24019045c762d3c0f28f5b6b8bbf38563a65908389bf7978758920897" [[package]] name = "lore" -version = "0.7.0" +version = "0.8.3" dependencies = [ "async-stream", + "charmed-lipgloss", "chrono", "clap", "clap_complete", - "comfy-table", "console", "dialoguer", "dirs", @@ -1964,6 +1980,15 @@ dependencies = [ "zmij", ] +[[package]] +name = "serde_spanned" +version = "0.6.9" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "bf41e0cfaf7226dca15e8197172c295a782857fcb97fad1808a166870dee75a3" +dependencies = [ + "serde", +] + [[package]] name = "serde_urlencoded" version = "0.7.1" @@ -2317,6 +2342,47 @@ dependencies = [ "tokio", ] +[[package]] +name = "toml" +version = "0.8.23" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "dc1beb996b9d83529a9e75c17a1686767d148d70663143c7854d8b4a09ced362" +dependencies = [ + "serde", + "serde_spanned", + "toml_datetime", + "toml_edit", +] + +[[package]] +name = "toml_datetime" +version = "0.6.11" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "22cddaf88f4fbc13c51aebbf5f8eceb5c7c5a9da2ac40a13519eb5b0a0e8f11c" +dependencies = [ + "serde", +] + +[[package]] +name = "toml_edit" +version = "0.22.27" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "41fe8c660ae4257887cf66394862d21dbca4a6ddd26f04a3560410406a2f819a" +dependencies = [ + "indexmap", + "serde", + "serde_spanned", + "toml_datetime", + "toml_write", + "winnow", +] + +[[package]] +name = "toml_write" +version = "0.1.2" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5d99f8c9a7727884afe522e9bd5edbfc91a3312b36a77b5fb8926e4c31a41801" + [[package]] name = "tower" version = "0.5.3" @@ -2481,6 +2547,12 @@ version = "1.12.0" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "f6ccf251212114b54433ec949fd6a7841275f9ada20dddd2f29e9ceea4501493" +[[package]] +name = "unicode-width" +version = "0.1.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "7dd6e30e90baa6f72411720665d41d89b9a3d039dc45b8faea1ddd07f617f6af" + [[package]] name = "unicode-width" version = "0.2.2" @@ -2967,6 +3039,15 @@ version = "0.53.1" source = "registry+https://github.com/rust-lang/crates.io-index" checksum = "d6bbff5f0aada427a1e5a6da5f1f98158182f26556f345ac9e04d36d0ebed650" +[[package]] +name = "winnow" +version = "0.7.14" +source = "registry+https://github.com/rust-lang/crates.io-index" +checksum = "5a5364e9d77fcdeeaa6062ced926ee3381faa2ee02d3eb83a5c27a8825540829" +dependencies = [ + "memchr", +] + [[package]] name = "wit-bindgen" version = "0.51.0" diff --git a/crates/lore-tui/src/action.rs b/crates/lore-tui/src/action.rs new file mode 100644 index 0000000..245e76a --- /dev/null +++ b/crates/lore-tui/src/action.rs @@ -0,0 +1,1628 @@ +#![allow(dead_code)] + +//! Action layer — pure data-fetching functions for TUI screens. +//! +//! Actions query the local SQLite database and return data structs. +//! They never touch terminal state, never spawn tasks, and use injected +//! [`Clock`] for time calculations (deterministic tests). + +use anyhow::{Context, Result}; +use rusqlite::Connection; + +use crate::clock::Clock; +use crate::state::dashboard::{ + DashboardData, EntityCounts, LastSyncInfo, ProjectSyncInfo, RecentActivityItem, +}; +use crate::state::issue_list::{ + IssueCursor, IssueFilter, IssueListPage, IssueListRow, SortField, SortOrder, +}; +use crate::state::mr_list::{MrCursor, MrFilter, MrListPage, MrListRow, MrSortField, MrSortOrder}; + +// --------------------------------------------------------------------------- +// Dashboard +// --------------------------------------------------------------------------- + +/// Fetch all data for the dashboard screen. +/// +/// Runs aggregation queries for entity counts, per-project sync freshness, +/// recent activity, and the last sync run summary. +pub fn fetch_dashboard(conn: &Connection, clock: &dyn Clock) -> Result { + let counts = fetch_entity_counts(conn)?; + let projects = fetch_project_sync_info(conn, clock)?; + let recent = fetch_recent_activity(conn, clock)?; + let last_sync = fetch_last_sync(conn)?; + + Ok(DashboardData { + counts, + projects, + recent, + last_sync, + }) +} + +/// Count all entities in the database. +fn fetch_entity_counts(conn: &Connection) -> Result { + let issues_total: i64 = conn + .query_row("SELECT COUNT(*) FROM issues", [], |r| r.get(0)) + .context("counting issues")?; + + let issues_open: i64 = conn + .query_row( + "SELECT COUNT(*) FROM issues WHERE state = 'opened'", + [], + |r| r.get(0), + ) + .context("counting open issues")?; + + let mrs_total: i64 = conn + .query_row("SELECT COUNT(*) FROM merge_requests", [], |r| r.get(0)) + .context("counting merge requests")?; + + let mrs_open: i64 = conn + .query_row( + "SELECT COUNT(*) FROM merge_requests WHERE state = 'opened'", + [], + |r| r.get(0), + ) + .context("counting open merge requests")?; + + let discussions: i64 = conn + .query_row("SELECT COUNT(*) FROM discussions", [], |r| r.get(0)) + .context("counting discussions")?; + + let notes_total: i64 = conn + .query_row("SELECT COUNT(*) FROM notes", [], |r| r.get(0)) + .context("counting notes")?; + + let notes_system: i64 = conn + .query_row("SELECT COUNT(*) FROM notes WHERE is_system = 1", [], |r| { + r.get(0) + }) + .context("counting system notes")?; + + let notes_system_pct = if notes_total > 0 { + u8::try_from(notes_system * 100 / notes_total).unwrap_or(100) + } else { + 0 + }; + + let documents: i64 = conn + .query_row("SELECT COUNT(*) FROM documents", [], |r| r.get(0)) + .context("counting documents")?; + + let embeddings: i64 = conn + .query_row("SELECT COUNT(*) FROM embedding_metadata", [], |r| r.get(0)) + .context("counting embeddings")?; + + #[allow(clippy::cast_sign_loss)] // SQL COUNT(*) is always >= 0 + Ok(EntityCounts { + issues_open: issues_open as u64, + issues_total: issues_total as u64, + mrs_open: mrs_open as u64, + mrs_total: mrs_total as u64, + discussions: discussions as u64, + notes_total: notes_total as u64, + notes_system_pct, + documents: documents as u64, + embeddings: embeddings as u64, + }) +} + +/// Per-project sync freshness based on the most recent sync_runs entry. +fn fetch_project_sync_info(conn: &Connection, clock: &dyn Clock) -> Result> { + let now_ms = clock.now_ms(); + + let mut stmt = conn + .prepare( + "SELECT p.path_with_namespace, + MAX(sr.finished_at) as last_sync_ms + FROM projects p + LEFT JOIN sync_runs sr ON sr.status = 'succeeded' + AND sr.finished_at IS NOT NULL + GROUP BY p.id + ORDER BY p.path_with_namespace", + ) + .context("preparing project sync query")?; + + let rows = stmt + .query_map([], |row| { + let path: String = row.get(0)?; + let last_sync_ms: Option = row.get(1)?; + Ok((path, last_sync_ms)) + }) + .context("querying project sync info")?; + + let mut result = Vec::new(); + for row in rows { + let (path, last_sync_ms) = row.context("reading project sync row")?; + let minutes_since_sync = match last_sync_ms { + Some(ms) => { + let elapsed_ms = now_ms.saturating_sub(ms); + u64::try_from(elapsed_ms / 60_000).unwrap_or(u64::MAX) + } + None => u64::MAX, // Never synced. + }; + result.push(ProjectSyncInfo { + path, + minutes_since_sync, + }); + } + + Ok(result) +} + +/// Recent activity: the 20 most recently updated issues and MRs. +fn fetch_recent_activity(conn: &Connection, clock: &dyn Clock) -> Result> { + let now_ms = clock.now_ms(); + + let mut stmt = conn + .prepare( + "SELECT entity_type, iid, title, state, updated_at FROM ( + SELECT 'issue' AS entity_type, iid, title, state, updated_at + FROM issues + UNION ALL + SELECT 'mr' AS entity_type, iid, title, state, updated_at + FROM merge_requests + ) + ORDER BY updated_at DESC + LIMIT 20", + ) + .context("preparing recent activity query")?; + + let rows = stmt + .query_map([], |row| { + let entity_type: String = row.get(0)?; + let iid: i64 = row.get(1)?; + let title: String = row.get::<_, Option>(2)?.unwrap_or_default(); + let state: String = row.get::<_, Option>(3)?.unwrap_or_default(); + let updated_at: i64 = row.get(4)?; + Ok((entity_type, iid, title, state, updated_at)) + }) + .context("querying recent activity")?; + + let mut result = Vec::new(); + for row in rows { + let (entity_type, iid, title, state, updated_at) = + row.context("reading recent activity row")?; + let elapsed_ms = now_ms.saturating_sub(updated_at); + let minutes_ago = u64::try_from(elapsed_ms / 60_000).unwrap_or(u64::MAX); + result.push(RecentActivityItem { + entity_type, + iid: iid as u64, + title, + state, + minutes_ago, + }); + } + + Ok(result) +} + +/// The most recent sync run summary. +fn fetch_last_sync(conn: &Connection) -> Result> { + let result = conn.query_row( + "SELECT status, finished_at, command, error + FROM sync_runs + ORDER BY id DESC + LIMIT 1", + [], + |row| { + Ok(LastSyncInfo { + status: row.get(0)?, + finished_at: row.get(1)?, + command: row.get(2)?, + error: row.get(3)?, + }) + }, + ); + + match result { + Ok(info) => Ok(Some(info)), + Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None), + Err(e) => Err(e).context("querying last sync run"), + } +} + +// --------------------------------------------------------------------------- +// Issue List +// --------------------------------------------------------------------------- + +/// Page size for issue list queries. +const ISSUE_PAGE_SIZE: usize = 50; + +/// Fetch a page of issues matching the given filter and sort. +/// +/// Uses keyset pagination: when `cursor` is `Some`, returns rows after +/// (less-than for DESC, greater-than for ASC) the cursor boundary. +/// When `snapshot_fence` is `Some`, limits results to rows updated_at <= fence +/// to prevent newly synced items from shifting the page window. +pub fn fetch_issue_list( + conn: &Connection, + filter: &IssueFilter, + sort_field: SortField, + sort_order: SortOrder, + cursor: Option<&IssueCursor>, + snapshot_fence: Option, +) -> Result { + // -- Build dynamic WHERE conditions and params -------------------------- + let mut conditions: Vec = Vec::new(); + let mut params: Vec> = Vec::new(); + + // Filter: project_id + if let Some(pid) = filter.project_id { + conditions.push("i.project_id = ?".into()); + params.push(Box::new(pid)); + } + + // Filter: state + if let Some(ref state) = filter.state { + conditions.push("i.state = ?".into()); + params.push(Box::new(state.clone())); + } + + // Filter: author + if let Some(ref author) = filter.author { + conditions.push("i.author_username = ?".into()); + params.push(Box::new(author.clone())); + } + + // Filter: label (via join) + let label_join = if let Some(ref label) = filter.label { + conditions.push("fl.name = ?".into()); + params.push(Box::new(label.clone())); + "JOIN issue_labels fil ON fil.issue_id = i.id \ + JOIN labels fl ON fl.id = fil.label_id" + } else { + "" + }; + + // Filter: free_text (LIKE on title) + if let Some(ref text) = filter.free_text { + conditions.push("i.title LIKE ?".into()); + params.push(Box::new(format!("%{text}%"))); + } + + // Snapshot fence + if let Some(fence) = snapshot_fence { + conditions.push("i.updated_at <= ?".into()); + params.push(Box::new(fence)); + } + + // -- Count query (before cursor filter) --------------------------------- + let where_clause = if conditions.is_empty() { + String::new() + } else { + format!("WHERE {}", conditions.join(" AND ")) + }; + + let count_sql = format!( + "SELECT COUNT(DISTINCT i.id) FROM issues i \ + JOIN projects p ON p.id = i.project_id \ + {label_join} {where_clause}" + ); + let count_params: Vec<&dyn rusqlite::types::ToSql> = + params.iter().map(|b| b.as_ref()).collect(); + + let total_count: i64 = conn + .query_row(&count_sql, count_params.as_slice(), |r| r.get(0)) + .context("counting issues for list")?; + + // -- Keyset cursor condition ------------------------------------------- + let (sort_col, sort_dir) = sort_column_and_dir(sort_field, sort_order); + let cursor_op = if sort_dir == "DESC" { "<" } else { ">" }; + + if let Some(c) = cursor { + conditions.push(format!("({sort_col}, i.iid) {cursor_op} (?, ?)")); + params.push(Box::new(c.updated_at)); + params.push(Box::new(c.iid)); + } + + // -- Data query --------------------------------------------------------- + let where_clause_full = if conditions.is_empty() { + String::new() + } else { + format!("WHERE {}", conditions.join(" AND ")) + }; + + let data_sql = format!( + "SELECT p.path_with_namespace, i.iid, i.title, i.state, \ + i.author_username, i.updated_at, \ + GROUP_CONCAT(DISTINCT l.name) AS label_names \ + FROM issues i \ + JOIN projects p ON p.id = i.project_id \ + {label_join} \ + LEFT JOIN issue_labels il ON il.issue_id = i.id \ + LEFT JOIN labels l ON l.id = il.label_id \ + {where_clause_full} \ + GROUP BY i.id \ + ORDER BY {sort_col} {sort_dir}, i.iid {sort_dir} \ + LIMIT ?" + ); + + // +1 to detect if there's a next page + let fetch_limit = (ISSUE_PAGE_SIZE + 1) as i64; + params.push(Box::new(fetch_limit)); + + let all_params: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|b| b.as_ref()).collect(); + + let mut stmt = conn + .prepare(&data_sql) + .context("preparing issue list query")?; + + let rows_result = stmt + .query_map(all_params.as_slice(), |row| { + let project_path: String = row.get(0)?; + let iid: i64 = row.get(1)?; + let title: String = row.get::<_, Option>(2)?.unwrap_or_default(); + let state: String = row.get::<_, Option>(3)?.unwrap_or_default(); + let author: String = row.get::<_, Option>(4)?.unwrap_or_default(); + let updated_at: i64 = row.get(5)?; + let label_names: Option = row.get(6)?; + + let labels = label_names + .map(|s| s.split(',').map(String::from).collect()) + .unwrap_or_default(); + + Ok(IssueListRow { + project_path, + iid, + title, + state, + author, + labels, + updated_at, + }) + }) + .context("querying issue list")?; + + let mut rows: Vec = Vec::new(); + for row in rows_result { + rows.push(row.context("reading issue list row")?); + } + + // Determine next cursor from the last row (if we got more than page size) + let has_next = rows.len() > ISSUE_PAGE_SIZE; + if has_next { + rows.truncate(ISSUE_PAGE_SIZE); + } + + let next_cursor = if has_next { + rows.last().map(|r| IssueCursor { + updated_at: r.updated_at, + iid: r.iid, + }) + } else { + None + }; + + #[allow(clippy::cast_sign_loss)] + Ok(IssueListPage { + rows, + next_cursor, + total_count: total_count as u64, + }) +} + +/// Map sort field + order to SQL column name and direction keyword. +fn sort_column_and_dir(field: SortField, order: SortOrder) -> (&'static str, &'static str) { + let col = match field { + SortField::UpdatedAt => "i.updated_at", + SortField::Iid => "i.iid", + SortField::Title => "i.title", + SortField::State => "i.state", + SortField::Author => "i.author_username", + }; + let dir = match order { + SortOrder::Desc => "DESC", + SortOrder::Asc => "ASC", + }; + (col, dir) +} + +// --------------------------------------------------------------------------- +// MR List +// --------------------------------------------------------------------------- + +/// Page size for MR list queries. +const MR_PAGE_SIZE: usize = 50; + +/// Fetch a page of merge requests matching the given filter and sort. +/// +/// Uses keyset pagination and snapshot fence — same pattern as issues. +pub fn fetch_mr_list( + conn: &Connection, + filter: &MrFilter, + sort_field: MrSortField, + sort_order: MrSortOrder, + cursor: Option<&MrCursor>, + snapshot_fence: Option, +) -> Result { + // -- Build dynamic WHERE conditions and params -------------------------- + let mut conditions: Vec = Vec::new(); + let mut params: Vec> = Vec::new(); + + if let Some(pid) = filter.project_id { + conditions.push("m.project_id = ?".into()); + params.push(Box::new(pid)); + } + + if let Some(ref state) = filter.state { + conditions.push("m.state = ?".into()); + params.push(Box::new(state.clone())); + } + + if let Some(ref author) = filter.author { + conditions.push("m.author_username = ?".into()); + params.push(Box::new(author.clone())); + } + + if let Some(draft) = filter.draft { + conditions.push("m.draft = ?".into()); + params.push(Box::new(i64::from(draft))); + } + + if let Some(ref target) = filter.target_branch { + conditions.push("m.target_branch = ?".into()); + params.push(Box::new(target.clone())); + } + + if let Some(ref source) = filter.source_branch { + conditions.push("m.source_branch = ?".into()); + params.push(Box::new(source.clone())); + } + + // Filter: reviewer (via join on mr_reviewers) + let reviewer_join = if let Some(ref reviewer) = filter.reviewer { + conditions.push("rv.username = ?".into()); + params.push(Box::new(reviewer.clone())); + "JOIN mr_reviewers rv ON rv.merge_request_id = m.id" + } else { + "" + }; + + // Filter: label (via join on mr_labels + labels) + let label_join = if let Some(ref label) = filter.label { + conditions.push("fl.name = ?".into()); + params.push(Box::new(label.clone())); + "JOIN mr_labels fil ON fil.merge_request_id = m.id \ + JOIN labels fl ON fl.id = fil.label_id" + } else { + "" + }; + + // Filter: free_text (LIKE on title) + if let Some(ref text) = filter.free_text { + conditions.push("m.title LIKE ?".into()); + params.push(Box::new(format!("%{text}%"))); + } + + // Snapshot fence + if let Some(fence) = snapshot_fence { + conditions.push("m.updated_at <= ?".into()); + params.push(Box::new(fence)); + } + + // -- Count query (before cursor filter) --------------------------------- + let where_clause = if conditions.is_empty() { + String::new() + } else { + format!("WHERE {}", conditions.join(" AND ")) + }; + + let count_sql = format!( + "SELECT COUNT(DISTINCT m.id) FROM merge_requests m \ + JOIN projects p ON p.id = m.project_id \ + {reviewer_join} {label_join} {where_clause}" + ); + let count_params: Vec<&dyn rusqlite::types::ToSql> = + params.iter().map(|b| b.as_ref()).collect(); + + let total_count: i64 = conn + .query_row(&count_sql, count_params.as_slice(), |r| r.get(0)) + .context("counting MRs for list")?; + + // -- Keyset cursor condition ------------------------------------------- + let (sort_col, sort_dir) = mr_sort_column_and_dir(sort_field, sort_order); + let cursor_op = if sort_dir == "DESC" { "<" } else { ">" }; + + if let Some(c) = cursor { + conditions.push(format!("({sort_col}, m.iid) {cursor_op} (?, ?)")); + params.push(Box::new(c.updated_at)); + params.push(Box::new(c.iid)); + } + + // -- Data query --------------------------------------------------------- + let where_clause_full = if conditions.is_empty() { + String::new() + } else { + format!("WHERE {}", conditions.join(" AND ")) + }; + + let data_sql = format!( + "SELECT p.path_with_namespace, m.iid, m.title, m.state, \ + m.author_username, m.target_branch, m.updated_at, m.draft, \ + GROUP_CONCAT(DISTINCT l.name) AS label_names \ + FROM merge_requests m \ + JOIN projects p ON p.id = m.project_id \ + {reviewer_join} \ + {label_join} \ + LEFT JOIN mr_labels ml ON ml.merge_request_id = m.id \ + LEFT JOIN labels l ON l.id = ml.label_id \ + {where_clause_full} \ + GROUP BY m.id \ + ORDER BY {sort_col} {sort_dir}, m.iid {sort_dir} \ + LIMIT ?" + ); + + let fetch_limit = (MR_PAGE_SIZE + 1) as i64; + params.push(Box::new(fetch_limit)); + + let all_params: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|b| b.as_ref()).collect(); + + let mut stmt = conn.prepare(&data_sql).context("preparing MR list query")?; + + let rows_result = stmt + .query_map(all_params.as_slice(), |row| { + let project_path: String = row.get(0)?; + let iid: i64 = row.get(1)?; + let title: String = row.get::<_, Option>(2)?.unwrap_or_default(); + let state: String = row.get::<_, Option>(3)?.unwrap_or_default(); + let author: String = row.get::<_, Option>(4)?.unwrap_or_default(); + let target_branch: String = row.get::<_, Option>(5)?.unwrap_or_default(); + let updated_at: i64 = row.get(6)?; + let draft_int: i64 = row.get(7)?; + let label_names: Option = row.get(8)?; + + let labels = label_names + .map(|s| s.split(',').map(String::from).collect()) + .unwrap_or_default(); + + Ok(MrListRow { + project_path, + iid, + title, + state, + author, + target_branch, + labels, + updated_at, + draft: draft_int != 0, + }) + }) + .context("querying MR list")?; + + let mut rows: Vec = Vec::new(); + for row in rows_result { + rows.push(row.context("reading MR list row")?); + } + + let has_next = rows.len() > MR_PAGE_SIZE; + if has_next { + rows.truncate(MR_PAGE_SIZE); + } + + let next_cursor = if has_next { + rows.last().map(|r| MrCursor { + updated_at: r.updated_at, + iid: r.iid, + }) + } else { + None + }; + + #[allow(clippy::cast_sign_loss)] + Ok(MrListPage { + rows, + next_cursor, + total_count: total_count as u64, + }) +} + +/// Map MR sort field + order to SQL column name and direction keyword. +fn mr_sort_column_and_dir(field: MrSortField, order: MrSortOrder) -> (&'static str, &'static str) { + let col = match field { + MrSortField::UpdatedAt => "m.updated_at", + MrSortField::Iid => "m.iid", + MrSortField::Title => "m.title", + MrSortField::State => "m.state", + MrSortField::Author => "m.author_username", + MrSortField::TargetBranch => "m.target_branch", + }; + let dir = match order { + MrSortOrder::Desc => "DESC", + MrSortOrder::Asc => "ASC", + }; + (col, dir) +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::clock::FakeClock; + + /// Create the minimal schema needed for dashboard queries. + fn create_dashboard_schema(conn: &Connection) { + conn.execute_batch( + " + CREATE TABLE projects ( + id INTEGER PRIMARY KEY, + gitlab_project_id INTEGER UNIQUE NOT NULL, + path_with_namespace TEXT NOT NULL + ); + CREATE TABLE issues ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT NOT NULL, + author_username TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE merge_requests ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + project_id INTEGER NOT NULL, + iid INTEGER NOT NULL, + title TEXT, + state TEXT, + author_username TEXT, + created_at INTEGER, + updated_at INTEGER, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE discussions ( + id INTEGER PRIMARY KEY, + gitlab_discussion_id TEXT NOT NULL, + project_id INTEGER NOT NULL, + noteable_type TEXT NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE notes ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER UNIQUE NOT NULL, + discussion_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + is_system INTEGER NOT NULL DEFAULT 0, + author_username TEXT, + body TEXT, + created_at INTEGER NOT NULL, + updated_at INTEGER NOT NULL, + last_seen_at INTEGER NOT NULL + ); + CREATE TABLE documents ( + id INTEGER PRIMARY KEY, + source_type TEXT NOT NULL, + source_id INTEGER NOT NULL, + project_id INTEGER NOT NULL, + content_text TEXT NOT NULL, + content_hash TEXT NOT NULL + ); + CREATE TABLE embedding_metadata ( + document_id INTEGER NOT NULL, + chunk_index INTEGER NOT NULL DEFAULT 0, + model TEXT NOT NULL, + dims INTEGER NOT NULL, + document_hash TEXT NOT NULL, + chunk_hash TEXT NOT NULL, + created_at INTEGER NOT NULL, + PRIMARY KEY(document_id, chunk_index) + ); + CREATE TABLE sync_runs ( + id INTEGER PRIMARY KEY, + started_at INTEGER NOT NULL, + heartbeat_at INTEGER NOT NULL, + finished_at INTEGER, + status TEXT NOT NULL, + command TEXT NOT NULL, + error TEXT + ); + ", + ) + .expect("create dashboard schema"); + } + + /// Insert a test issue. + fn insert_issue(conn: &Connection, iid: i64, state: &str, updated_at: i64) { + conn.execute( + "INSERT INTO issues (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at) + VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)", + rusqlite::params![iid * 100, iid, format!("Issue {iid}"), state, updated_at], + ) + .expect("insert issue"); + } + + /// Insert a test MR. + fn insert_mr(conn: &Connection, iid: i64, state: &str, updated_at: i64) { + conn.execute( + "INSERT INTO merge_requests (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at) + VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)", + rusqlite::params![iid * 100 + 50, iid, format!("MR {iid}"), state, updated_at], + ) + .expect("insert mr"); + } + + // ----------------------------------------------------------------------- + // TDD Anchor: entity counts + // ----------------------------------------------------------------------- + + #[test] + fn test_fetch_dashboard_counts() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + // 5 issues: 3 open, 2 closed. + let now_ms = 1_700_000_000_000_i64; + insert_issue(&conn, 1, "opened", now_ms - 10_000); + insert_issue(&conn, 2, "opened", now_ms - 20_000); + insert_issue(&conn, 3, "opened", now_ms - 30_000); + insert_issue(&conn, 4, "closed", now_ms - 40_000); + insert_issue(&conn, 5, "closed", now_ms - 50_000); + + let clock = FakeClock::from_ms(now_ms); + let data = fetch_dashboard(&conn, &clock).unwrap(); + + assert_eq!(data.counts.issues_open, 3); + assert_eq!(data.counts.issues_total, 5); + } + + #[test] + fn test_fetch_dashboard_mr_counts() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + let now_ms = 1_700_000_000_000_i64; + insert_mr(&conn, 1, "opened", now_ms); + insert_mr(&conn, 2, "merged", now_ms); + insert_mr(&conn, 3, "opened", now_ms); + insert_mr(&conn, 4, "closed", now_ms); + + let clock = FakeClock::from_ms(now_ms); + let data = fetch_dashboard(&conn, &clock).unwrap(); + + assert_eq!(data.counts.mrs_open, 2); + assert_eq!(data.counts.mrs_total, 4); + } + + #[test] + fn test_fetch_dashboard_empty_database() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + let clock = FakeClock::from_ms(1_700_000_000_000); + let data = fetch_dashboard(&conn, &clock).unwrap(); + + assert_eq!(data.counts.issues_open, 0); + assert_eq!(data.counts.issues_total, 0); + assert_eq!(data.counts.mrs_open, 0); + assert_eq!(data.counts.mrs_total, 0); + assert_eq!(data.counts.notes_system_pct, 0); + assert!(data.projects.is_empty()); + assert!(data.recent.is_empty()); + assert!(data.last_sync.is_none()); + } + + #[test] + fn test_fetch_dashboard_notes_system_pct() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + // 4 notes: 1 system, 3 user -> 25% system. + for i in 0..4 { + conn.execute( + "INSERT INTO notes (gitlab_id, discussion_id, project_id, is_system, created_at, updated_at, last_seen_at) + VALUES (?1, 1, 1, ?2, 1000, 1000, 1000)", + rusqlite::params![i, if i == 0 { 1 } else { 0 }], + ) + .unwrap(); + } + + let clock = FakeClock::from_ms(1_700_000_000_000); + let data = fetch_dashboard(&conn, &clock).unwrap(); + + assert_eq!(data.counts.notes_total, 4); + assert_eq!(data.counts.notes_system_pct, 25); + } + + #[test] + fn test_fetch_dashboard_project_sync_info() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + conn.execute( + "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'group/alpha')", + [], + ) + .unwrap(); + conn.execute( + "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (2, 'group/beta')", + [], + ) + .unwrap(); + + // Sync ran 30 minutes ago. sync_runs is global (no project_id), + // so all projects see the same last-sync time. + let now_ms = 1_700_000_000_000_i64; + conn.execute( + "INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command) + VALUES (?1, ?1, ?2, 'succeeded', 'sync')", + [now_ms - 30 * 60_000, now_ms - 30 * 60_000], + ) + .unwrap(); + + let clock = FakeClock::from_ms(now_ms); + let data = fetch_dashboard(&conn, &clock).unwrap(); + + assert_eq!(data.projects.len(), 2); + assert_eq!(data.projects[0].path, "group/alpha"); + assert_eq!(data.projects[0].minutes_since_sync, 30); + assert_eq!(data.projects[1].path, "group/beta"); + assert_eq!(data.projects[1].minutes_since_sync, 30); // Same: sync_runs is global. + } + + #[test] + fn test_fetch_dashboard_recent_activity_ordered() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + let now_ms = 1_700_000_000_000_i64; + insert_issue(&conn, 1, "opened", now_ms - 60_000); // 1 min ago + insert_mr(&conn, 1, "merged", now_ms - 120_000); // 2 min ago + insert_issue(&conn, 2, "closed", now_ms - 180_000); // 3 min ago + + let clock = FakeClock::from_ms(now_ms); + let data = fetch_dashboard(&conn, &clock).unwrap(); + + assert_eq!(data.recent.len(), 3); + assert_eq!(data.recent[0].entity_type, "issue"); + assert_eq!(data.recent[0].iid, 1); + assert_eq!(data.recent[0].minutes_ago, 1); + assert_eq!(data.recent[1].entity_type, "mr"); + assert_eq!(data.recent[1].minutes_ago, 2); + assert_eq!(data.recent[2].entity_type, "issue"); + assert_eq!(data.recent[2].minutes_ago, 3); + } + + #[test] + fn test_fetch_dashboard_last_sync() { + let conn = Connection::open_in_memory().unwrap(); + create_dashboard_schema(&conn); + + let now_ms = 1_700_000_000_000_i64; + conn.execute( + "INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command, error) + VALUES (?1, ?1, ?2, 'failed', 'sync', 'network timeout')", + [now_ms - 60_000, now_ms - 50_000], + ) + .unwrap(); + conn.execute( + "INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command) + VALUES (?1, ?1, ?2, 'succeeded', 'sync')", + [now_ms - 30_000, now_ms - 20_000], + ) + .unwrap(); + + let clock = FakeClock::from_ms(now_ms); + let data = fetch_dashboard(&conn, &clock).unwrap(); + + let sync = data.last_sync.unwrap(); + assert_eq!(sync.status, "succeeded"); + assert_eq!(sync.command, "sync"); + assert!(sync.error.is_none()); + } + + // ----------------------------------------------------------------------- + // Issue list + // ----------------------------------------------------------------------- + + /// Extended schema that adds labels + issue_labels for issue list tests. + fn create_issue_list_schema(conn: &Connection) { + create_dashboard_schema(conn); + conn.execute_batch( + " + CREATE TABLE labels ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER, + project_id INTEGER NOT NULL, + name TEXT NOT NULL, + color TEXT, + description TEXT + ); + CREATE TABLE issue_labels ( + issue_id INTEGER NOT NULL, + label_id INTEGER NOT NULL, + PRIMARY KEY(issue_id, label_id) + ); + ", + ) + .expect("create issue list schema"); + } + + /// Insert a test issue with an author. + fn insert_issue_full(conn: &Connection, iid: i64, state: &str, author: &str, updated_at: i64) { + conn.execute( + "INSERT INTO issues (gitlab_id, project_id, iid, title, state, author_username, created_at, updated_at, last_seen_at) + VALUES (?1, 1, ?2, ?3, ?4, ?5, ?6, ?6, ?6)", + rusqlite::params![ + iid * 100, + iid, + format!("Issue {iid}"), + state, + author, + updated_at + ], + ) + .expect("insert issue full"); + } + + /// Attach a label to an issue. + fn attach_label(conn: &Connection, issue_iid: i64, label_name: &str) { + // Find issue id. + let issue_id: i64 = conn + .query_row("SELECT id FROM issues WHERE iid = ?", [issue_iid], |r| { + r.get(0) + }) + .expect("find issue"); + + // Ensure label exists. + conn.execute( + "INSERT OR IGNORE INTO labels (project_id, name) VALUES (1, ?)", + [label_name], + ) + .expect("insert label"); + let label_id: i64 = conn + .query_row("SELECT id FROM labels WHERE name = ?", [label_name], |r| { + r.get(0) + }) + .expect("find label"); + + conn.execute( + "INSERT INTO issue_labels (issue_id, label_id) VALUES (?, ?)", + [issue_id, label_id], + ) + .expect("attach label"); + } + + fn setup_issue_list_data(conn: &Connection) { + let base = 1_700_000_000_000_i64; + conn.execute( + "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'group/project')", + [], + ) + .unwrap(); + + insert_issue_full(conn, 1, "opened", "alice", base - 10_000); + insert_issue_full(conn, 2, "opened", "bob", base - 20_000); + insert_issue_full(conn, 3, "closed", "alice", base - 30_000); + insert_issue_full(conn, 4, "opened", "charlie", base - 40_000); + insert_issue_full(conn, 5, "closed", "bob", base - 50_000); + + attach_label(conn, 1, "bug"); + attach_label(conn, 1, "critical"); + attach_label(conn, 2, "feature"); + attach_label(conn, 4, "bug"); + } + + #[test] + fn test_fetch_issue_list_basic() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let filter = IssueFilter::default(); + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 5); + assert_eq!(page.rows.len(), 5); + // Newest first. + assert_eq!(page.rows[0].iid, 1); + assert_eq!(page.rows[4].iid, 5); + assert!(page.next_cursor.is_none()); + } + + #[test] + fn test_fetch_issue_list_filter_state() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let filter = IssueFilter { + state: Some("opened".into()), + ..Default::default() + }; + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 3); + assert_eq!(page.rows.len(), 3); + assert!(page.rows.iter().all(|r| r.state == "opened")); + } + + #[test] + fn test_fetch_issue_list_filter_author() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let filter = IssueFilter { + author: Some("alice".into()), + ..Default::default() + }; + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 2); + assert_eq!(page.rows.len(), 2); + assert!(page.rows.iter().all(|r| r.author == "alice")); + } + + #[test] + fn test_fetch_issue_list_filter_label() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let filter = IssueFilter { + label: Some("bug".into()), + ..Default::default() + }; + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 2); // issues 1 and 4 + assert_eq!(page.rows.len(), 2); + } + + #[test] + fn test_fetch_issue_list_labels_aggregated() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let filter = IssueFilter::default(); + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Desc, + None, + None, + ) + .unwrap(); + + // Issue 1 has labels "bug" and "critical". + let issue1 = page.rows.iter().find(|r| r.iid == 1).unwrap(); + assert_eq!(issue1.labels.len(), 2); + assert!(issue1.labels.contains(&"bug".to_string())); + assert!(issue1.labels.contains(&"critical".to_string())); + + // Issue 5 has no labels. + let issue5 = page.rows.iter().find(|r| r.iid == 5).unwrap(); + assert!(issue5.labels.is_empty()); + } + + #[test] + fn test_fetch_issue_list_sort_ascending() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let filter = IssueFilter::default(); + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Asc, + None, + None, + ) + .unwrap(); + + // Oldest first. + assert_eq!(page.rows[0].iid, 5); + assert_eq!(page.rows[4].iid, 1); + } + + #[test] + fn test_fetch_issue_list_snapshot_fence() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let base = 1_700_000_000_000_i64; + // Fence at base-25000: should exclude issues 1 (at base-10000) and 2 (at base-20000). + let fence = base - 25_000; + let filter = IssueFilter::default(); + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Desc, + None, + Some(fence), + ) + .unwrap(); + + assert_eq!(page.total_count, 3); + assert_eq!(page.rows.len(), 3); + assert!(page.rows.iter().all(|r| r.updated_at <= fence)); + } + + #[test] + fn test_fetch_issue_list_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + conn.execute( + "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'g/p')", + [], + ) + .unwrap(); + + let page = fetch_issue_list( + &conn, + &IssueFilter::default(), + SortField::UpdatedAt, + SortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 0); + assert!(page.rows.is_empty()); + assert!(page.next_cursor.is_none()); + } + + #[test] + fn test_fetch_issue_list_free_text() { + let conn = Connection::open_in_memory().unwrap(); + create_issue_list_schema(&conn); + setup_issue_list_data(&conn); + + let filter = IssueFilter { + free_text: Some("Issue 3".into()), + ..Default::default() + }; + let page = fetch_issue_list( + &conn, + &filter, + SortField::UpdatedAt, + SortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 1); + assert_eq!(page.rows[0].iid, 3); + } + + // ----------------------------------------------------------------------- + // MR list + // ----------------------------------------------------------------------- + + /// Extended schema adding mr_labels, mr_reviewers for MR list tests. + fn create_mr_list_schema(conn: &Connection) { + create_dashboard_schema(conn); + conn.execute_batch( + " + CREATE TABLE labels ( + id INTEGER PRIMARY KEY, + gitlab_id INTEGER, + project_id INTEGER NOT NULL, + name TEXT NOT NULL, + color TEXT, + description TEXT + ); + CREATE TABLE mr_labels ( + merge_request_id INTEGER NOT NULL, + label_id INTEGER NOT NULL, + PRIMARY KEY(merge_request_id, label_id) + ); + CREATE TABLE mr_reviewers ( + merge_request_id INTEGER NOT NULL, + username TEXT NOT NULL, + PRIMARY KEY(merge_request_id, username) + ); + ALTER TABLE merge_requests ADD COLUMN draft INTEGER NOT NULL DEFAULT 0; + ALTER TABLE merge_requests ADD COLUMN target_branch TEXT; + ALTER TABLE merge_requests ADD COLUMN source_branch TEXT; + ", + ) + .expect("create MR list schema"); + } + + /// Insert a test MR with full fields. + fn insert_mr_full( + conn: &Connection, + iid: i64, + state: &str, + author: &str, + target_branch: &str, + draft: bool, + updated_at: i64, + ) { + conn.execute( + "INSERT INTO merge_requests \ + (gitlab_id, project_id, iid, title, state, author_username, \ + target_branch, draft, created_at, updated_at, last_seen_at) \ + VALUES (?1, 1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?8, ?8)", + rusqlite::params![ + iid * 100 + 50, + iid, + format!("MR {iid}"), + state, + author, + target_branch, + i64::from(draft), + updated_at, + ], + ) + .expect("insert mr full"); + } + + /// Attach a label to an MR. + fn attach_mr_label(conn: &Connection, mr_iid: i64, label_name: &str) { + let mr_id: i64 = conn + .query_row( + "SELECT id FROM merge_requests WHERE iid = ?", + [mr_iid], + |r| r.get(0), + ) + .expect("find mr"); + + conn.execute( + "INSERT OR IGNORE INTO labels (project_id, name) VALUES (1, ?)", + [label_name], + ) + .expect("insert label"); + let label_id: i64 = conn + .query_row("SELECT id FROM labels WHERE name = ?", [label_name], |r| { + r.get(0) + }) + .expect("find label"); + + conn.execute( + "INSERT INTO mr_labels (merge_request_id, label_id) VALUES (?, ?)", + [mr_id, label_id], + ) + .expect("attach mr label"); + } + + /// Add a reviewer to an MR. + fn add_mr_reviewer(conn: &Connection, mr_iid: i64, username: &str) { + let mr_id: i64 = conn + .query_row( + "SELECT id FROM merge_requests WHERE iid = ?", + [mr_iid], + |r| r.get(0), + ) + .expect("find mr"); + + conn.execute( + "INSERT INTO mr_reviewers (merge_request_id, username) VALUES (?, ?)", + rusqlite::params![mr_id, username], + ) + .expect("add mr reviewer"); + } + + fn setup_mr_list_data(conn: &Connection) { + let base = 1_700_000_000_000_i64; + conn.execute( + "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'group/project')", + [], + ) + .unwrap(); + + insert_mr_full(conn, 1, "opened", "alice", "main", false, base - 10_000); + insert_mr_full(conn, 2, "opened", "bob", "main", true, base - 20_000); + insert_mr_full(conn, 3, "merged", "alice", "develop", false, base - 30_000); + insert_mr_full(conn, 4, "opened", "charlie", "main", true, base - 40_000); + insert_mr_full(conn, 5, "closed", "bob", "release", false, base - 50_000); + + attach_mr_label(conn, 1, "backend"); + attach_mr_label(conn, 1, "urgent"); + attach_mr_label(conn, 2, "frontend"); + attach_mr_label(conn, 4, "backend"); + + add_mr_reviewer(conn, 1, "diana"); + add_mr_reviewer(conn, 2, "diana"); + add_mr_reviewer(conn, 3, "edward"); + } + + #[test] + fn test_fetch_mr_list_basic() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter::default(); + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 5); + assert_eq!(page.rows.len(), 5); + assert_eq!(page.rows[0].iid, 1); // newest first + assert_eq!(page.rows[4].iid, 5); + assert!(page.next_cursor.is_none()); + } + + #[test] + fn test_fetch_mr_list_filter_state() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter { + state: Some("opened".into()), + ..Default::default() + }; + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 3); + assert!(page.rows.iter().all(|r| r.state == "opened")); + } + + #[test] + fn test_fetch_mr_list_filter_draft() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter { + draft: Some(true), + ..Default::default() + }; + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 2); // MRs 2 and 4 + assert!(page.rows.iter().all(|r| r.draft)); + } + + #[test] + fn test_fetch_mr_list_filter_target_branch() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter { + target_branch: Some("main".into()), + ..Default::default() + }; + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 3); // MRs 1, 2, 4 + assert!(page.rows.iter().all(|r| r.target_branch == "main")); + } + + #[test] + fn test_fetch_mr_list_filter_reviewer() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter { + reviewer: Some("diana".into()), + ..Default::default() + }; + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 2); // MRs 1 and 2 + } + + #[test] + fn test_fetch_mr_list_filter_label() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter { + label: Some("backend".into()), + ..Default::default() + }; + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 2); // MRs 1 and 4 + } + + #[test] + fn test_fetch_mr_list_labels_aggregated() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter::default(); + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + let mr1 = page.rows.iter().find(|r| r.iid == 1).unwrap(); + assert_eq!(mr1.labels.len(), 2); + assert!(mr1.labels.contains(&"backend".to_string())); + assert!(mr1.labels.contains(&"urgent".to_string())); + + let mr5 = page.rows.iter().find(|r| r.iid == 5).unwrap(); + assert!(mr5.labels.is_empty()); + } + + #[test] + fn test_fetch_mr_list_sort_ascending() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter::default(); + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Asc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.rows[0].iid, 5); // oldest first + assert_eq!(page.rows[4].iid, 1); + } + + #[test] + fn test_fetch_mr_list_snapshot_fence() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let base = 1_700_000_000_000_i64; + let fence = base - 25_000; + let filter = MrFilter::default(); + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + Some(fence), + ) + .unwrap(); + + assert_eq!(page.total_count, 3); + assert!(page.rows.iter().all(|r| r.updated_at <= fence)); + } + + #[test] + fn test_fetch_mr_list_empty() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + conn.execute( + "INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'g/p')", + [], + ) + .unwrap(); + + let page = fetch_mr_list( + &conn, + &MrFilter::default(), + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 0); + assert!(page.rows.is_empty()); + assert!(page.next_cursor.is_none()); + } + + #[test] + fn test_fetch_mr_list_free_text() { + let conn = Connection::open_in_memory().unwrap(); + create_mr_list_schema(&conn); + setup_mr_list_data(&conn); + + let filter = MrFilter { + free_text: Some("MR 3".into()), + ..Default::default() + }; + let page = fetch_mr_list( + &conn, + &filter, + MrSortField::UpdatedAt, + MrSortOrder::Desc, + None, + None, + ) + .unwrap(); + + assert_eq!(page.total_count, 1); + assert_eq!(page.rows[0].iid, 3); + } +} diff --git a/crates/lore-tui/src/app/mod.rs b/crates/lore-tui/src/app/mod.rs new file mode 100644 index 0000000..8048f0e --- /dev/null +++ b/crates/lore-tui/src/app/mod.rs @@ -0,0 +1,73 @@ +#![allow(dead_code)] // Phase 1: methods consumed as screens are implemented + +//! Full FrankenTUI Model implementation for the lore TUI. +//! +//! LoreApp is the central coordinator: it owns all state, dispatches +//! messages through a 5-stage key pipeline, records crash context +//! breadcrumbs, manages async tasks via the supervisor, and routes +//! view() to per-screen render functions. + +mod tests; +mod update; + +use crate::clock::{Clock, SystemClock}; +use crate::commands::{CommandRegistry, build_registry}; +use crate::crash_context::CrashContext; +use crate::db::DbManager; +use crate::message::InputMode; +use crate::navigation::NavigationStack; +use crate::state::AppState; +use crate::task_supervisor::TaskSupervisor; + +// --------------------------------------------------------------------------- +// LoreApp +// --------------------------------------------------------------------------- + +/// Root model for the lore TUI. +/// +/// Owns all state and implements the FrankenTUI Model trait. The +/// update() method is the single entry point for all state transitions. +pub struct LoreApp { + pub state: AppState, + pub navigation: NavigationStack, + pub supervisor: TaskSupervisor, + pub crash_context: CrashContext, + pub command_registry: CommandRegistry, + pub input_mode: InputMode, + pub clock: Box, + pub db: Option, +} + +impl LoreApp { + /// Create a new LoreApp with default state. + /// + /// Uses a real system clock and no DB connection (set separately). + #[must_use] + pub fn new() -> Self { + Self { + state: AppState::default(), + navigation: NavigationStack::new(), + supervisor: TaskSupervisor::new(), + crash_context: CrashContext::new(), + command_registry: build_registry(), + input_mode: InputMode::Normal, + clock: Box::new(SystemClock), + db: None, + } + } + + /// Create a LoreApp for testing with a custom clock. + #[cfg(test)] + fn with_clock(clock: Box) -> Self { + Self { + clock, + ..Self::new() + } + } +} + +impl Default for LoreApp { + fn default() -> Self { + Self::new() + } +} diff --git a/crates/lore-tui/src/app/tests.rs b/crates/lore-tui/src/app/tests.rs new file mode 100644 index 0000000..f35d8e8 --- /dev/null +++ b/crates/lore-tui/src/app/tests.rs @@ -0,0 +1,330 @@ +//! Tests for LoreApp. + +#![cfg(test)] + +use chrono::TimeDelta; +use ftui::{Cmd, Event, KeyCode, KeyEvent, Model, Modifiers}; + +use crate::clock::FakeClock; +use crate::message::{InputMode, Msg, Screen}; + +use super::LoreApp; + +fn test_app() -> LoreApp { + LoreApp::with_clock(Box::new(FakeClock::new(chrono::Utc::now()))) +} + +/// Verify that `App::fullscreen(LoreApp::new()).run()` compiles. +fn _assert_app_fullscreen_compiles() { + fn _inner() { + use ftui::App; + let _app_builder = App::fullscreen(LoreApp::new()); + } +} + +/// Verify that `App::inline(LoreApp::new(), 12).run()` compiles. +fn _assert_app_inline_compiles() { + fn _inner() { + use ftui::App; + let _app_builder = App::inline(LoreApp::new(), 12); + } +} + +#[test] +fn test_lore_app_init_returns_none() { + let mut app = test_app(); + let cmd = app.init(); + assert!(matches!(cmd, Cmd::None)); +} + +#[test] +fn test_lore_app_quit_returns_quit_cmd() { + let mut app = test_app(); + let cmd = app.update(Msg::Quit); + assert!(matches!(cmd, Cmd::Quit)); +} + +#[test] +fn test_lore_app_tick_returns_none() { + let mut app = test_app(); + let cmd = app.update(Msg::Tick); + assert!(matches!(cmd, Cmd::None)); +} + +#[test] +fn test_lore_app_navigate_to_updates_nav_stack() { + let mut app = test_app(); + let cmd = app.update(Msg::NavigateTo(Screen::IssueList)); + assert!(matches!(cmd, Cmd::None)); + assert!(app.navigation.is_at(&Screen::IssueList)); + assert_eq!(app.navigation.depth(), 2); +} + +#[test] +fn test_lore_app_go_back() { + let mut app = test_app(); + app.update(Msg::NavigateTo(Screen::IssueList)); + app.update(Msg::GoBack); + assert!(app.navigation.is_at(&Screen::Dashboard)); +} + +#[test] +fn test_lore_app_go_forward() { + let mut app = test_app(); + app.update(Msg::NavigateTo(Screen::IssueList)); + app.update(Msg::GoBack); + app.update(Msg::GoForward); + assert!(app.navigation.is_at(&Screen::IssueList)); +} + +#[test] +fn test_ctrl_c_always_quits() { + let mut app = test_app(); + let key = KeyEvent::new(KeyCode::Char('c')).with_modifiers(Modifiers::CTRL); + let cmd = app.update(Msg::RawEvent(Event::Key(key))); + assert!(matches!(cmd, Cmd::Quit)); +} + +#[test] +fn test_q_key_quits_in_normal_mode() { + let mut app = test_app(); + let key = KeyEvent::new(KeyCode::Char('q')); + let cmd = app.update(Msg::RawEvent(Event::Key(key))); + assert!(matches!(cmd, Cmd::Quit)); +} + +#[test] +fn test_q_key_blocked_in_text_mode() { + let mut app = test_app(); + app.input_mode = InputMode::Text; + let key = KeyEvent::new(KeyCode::Char('q')); + let cmd = app.update(Msg::RawEvent(Event::Key(key))); + // q in text mode should NOT quit. + assert!(matches!(cmd, Cmd::None)); +} + +#[test] +fn test_esc_blurs_text_mode() { + let mut app = test_app(); + app.input_mode = InputMode::Text; + app.state.search.query_focused = true; + + let key = KeyEvent::new(KeyCode::Escape); + app.update(Msg::RawEvent(Event::Key(key))); + + assert!(matches!(app.input_mode, InputMode::Normal)); + assert!(!app.state.has_text_focus()); +} + +#[test] +fn test_g_prefix_enters_go_mode() { + let mut app = test_app(); + let key = KeyEvent::new(KeyCode::Char('g')); + app.update(Msg::RawEvent(Event::Key(key))); + assert!(matches!(app.input_mode, InputMode::GoPrefix { .. })); +} + +#[test] +fn test_g_then_i_navigates_to_issues() { + let mut app = test_app(); + + // First key: 'g' + let key_g = KeyEvent::new(KeyCode::Char('g')); + app.update(Msg::RawEvent(Event::Key(key_g))); + + // Second key: 'i' + let key_i = KeyEvent::new(KeyCode::Char('i')); + app.update(Msg::RawEvent(Event::Key(key_i))); + + assert!(app.navigation.is_at(&Screen::IssueList)); +} + +#[test] +fn test_go_prefix_timeout_cancels() { + let clock = FakeClock::new(chrono::Utc::now()); + let mut app = LoreApp::with_clock(Box::new(clock.clone())); + + // Press 'g'. + let key_g = KeyEvent::new(KeyCode::Char('g')); + app.update(Msg::RawEvent(Event::Key(key_g))); + assert!(matches!(app.input_mode, InputMode::GoPrefix { .. })); + + // Advance clock past timeout. + clock.advance(TimeDelta::milliseconds(600)); + + // Press 'i' after timeout — should NOT navigate to issues. + let key_i = KeyEvent::new(KeyCode::Char('i')); + app.update(Msg::RawEvent(Event::Key(key_i))); + + // Should still be at Dashboard (no navigation happened). + assert!(app.navigation.is_at(&Screen::Dashboard)); + assert!(matches!(app.input_mode, InputMode::Normal)); +} + +#[test] +fn test_show_help_toggles() { + let mut app = test_app(); + assert!(!app.state.show_help); + + app.update(Msg::ShowHelp); + assert!(app.state.show_help); + + app.update(Msg::ShowHelp); + assert!(!app.state.show_help); +} + +#[test] +fn test_error_msg_sets_toast() { + let mut app = test_app(); + app.update(Msg::Error(crate::message::AppError::DbBusy)); + assert!(app.state.error_toast.is_some()); + assert!(app.state.error_toast.as_ref().unwrap().contains("busy")); +} + +#[test] +fn test_resize_updates_terminal_size() { + let mut app = test_app(); + app.update(Msg::Resize { + width: 120, + height: 40, + }); + assert_eq!(app.state.terminal_size, (120, 40)); +} + +#[test] +fn test_stale_result_dropped() { + use crate::message::Screen; + use crate::task_supervisor::TaskKey; + + let mut app = test_app(); + + // Submit two tasks for IssueList — second supersedes first. + let gen1 = app + .supervisor + .submit(TaskKey::LoadScreen(Screen::IssueList)) + .generation; + let gen2 = app + .supervisor + .submit(TaskKey::LoadScreen(Screen::IssueList)) + .generation; + + // Stale result with gen1 should be ignored. + app.update(Msg::IssueListLoaded { + generation: gen1, + page: crate::state::issue_list::IssueListPage { + rows: vec![crate::state::issue_list::IssueListRow { + project_path: "group/project".into(), + iid: 1, + title: "stale".into(), + state: "opened".into(), + author: "taylor".into(), + labels: vec![], + updated_at: 1_700_000_000_000, + }], + next_cursor: None, + total_count: 1, + }, + }); + assert!(app.state.issue_list.rows.is_empty()); + + // Current result with gen2 should be applied. + app.update(Msg::IssueListLoaded { + generation: gen2, + page: crate::state::issue_list::IssueListPage { + rows: vec![crate::state::issue_list::IssueListRow { + project_path: "group/project".into(), + iid: 2, + title: "fresh".into(), + state: "opened".into(), + author: "taylor".into(), + labels: vec![], + updated_at: 1_700_000_000_000, + }], + next_cursor: None, + total_count: 1, + }, + }); + assert_eq!(app.state.issue_list.rows.len(), 1); + assert_eq!(app.state.issue_list.rows[0].title, "fresh"); +} + +#[test] +fn test_crash_context_records_events() { + let mut app = test_app(); + app.update(Msg::Tick); + app.update(Msg::NavigateTo(Screen::IssueList)); + + // Should have recorded at least 2 events. + assert!(app.crash_context.len() >= 2); +} + +#[test] +fn test_navigate_sets_loading_initial_on_first_visit() { + use crate::state::LoadState; + + let mut app = test_app(); + app.update(Msg::NavigateTo(Screen::IssueList)); + // First visit should show full-screen spinner (LoadingInitial). + assert_eq!( + *app.state.load_state.get(&Screen::IssueList), + LoadState::LoadingInitial + ); +} + +#[test] +fn test_navigate_sets_refreshing_on_revisit() { + use crate::state::LoadState; + + let mut app = test_app(); + // First visit → LoadingInitial. + app.update(Msg::NavigateTo(Screen::IssueList)); + // Simulate load completing. + app.state.set_loading(Screen::IssueList, LoadState::Idle); + // Go back, then revisit. + app.update(Msg::GoBack); + app.update(Msg::NavigateTo(Screen::IssueList)); + // Second visit should show corner spinner (Refreshing). + assert_eq!( + *app.state.load_state.get(&Screen::IssueList), + LoadState::Refreshing + ); +} + +#[test] +fn test_command_palette_opens_from_ctrl_p() { + let mut app = test_app(); + let key = KeyEvent::new(KeyCode::Char('p')).with_modifiers(Modifiers::CTRL); + app.update(Msg::RawEvent(Event::Key(key))); + assert!(matches!(app.input_mode, InputMode::Palette)); + assert!(app.state.command_palette.query_focused); +} + +#[test] +fn test_esc_closes_palette() { + let mut app = test_app(); + app.input_mode = InputMode::Palette; + + let key = KeyEvent::new(KeyCode::Escape); + app.update(Msg::RawEvent(Event::Key(key))); + + assert!(matches!(app.input_mode, InputMode::Normal)); +} + +#[test] +fn test_blur_text_input_msg() { + let mut app = test_app(); + app.input_mode = InputMode::Text; + app.state.search.query_focused = true; + + app.update(Msg::BlurTextInput); + + assert!(matches!(app.input_mode, InputMode::Normal)); + assert!(!app.state.has_text_focus()); +} + +#[test] +fn test_default_is_new() { + let app = LoreApp::default(); + assert!(app.navigation.is_at(&Screen::Dashboard)); + assert!(matches!(app.input_mode, InputMode::Normal)); +} diff --git a/crates/lore-tui/src/app.rs b/crates/lore-tui/src/app/update.rs similarity index 50% rename from crates/lore-tui/src/app.rs rename to crates/lore-tui/src/app/update.rs index 3909246..c4f8fad 100644 --- a/crates/lore-tui/src/app.rs +++ b/crates/lore-tui/src/app/update.rs @@ -1,73 +1,19 @@ -#![allow(dead_code)] // Phase 1: methods consumed as screens are implemented - -//! Full FrankenTUI Model implementation for the lore TUI. -//! -//! LoreApp is the central coordinator: it owns all state, dispatches -//! messages through a 5-stage key pipeline, records crash context -//! breadcrumbs, manages async tasks via the supervisor, and routes -//! view() to per-screen render functions. +//! Model trait impl and key dispatch for LoreApp. use chrono::TimeDelta; use ftui::{Cmd, Event, Frame, KeyCode, KeyEvent, Model, Modifiers}; -use crate::clock::{Clock, SystemClock}; -use crate::commands::{CommandRegistry, build_registry}; -use crate::crash_context::{CrashContext, CrashEvent}; -use crate::db::DbManager; +use crate::crash_context::CrashEvent; use crate::message::{InputMode, Msg, Screen}; -use crate::navigation::NavigationStack; -use crate::state::{AppState, LoadState}; -use crate::task_supervisor::{TaskKey, TaskSupervisor}; +use crate::state::LoadState; +use crate::task_supervisor::TaskKey; + +use super::LoreApp; /// Timeout for the g-prefix key sequence. const GO_PREFIX_TIMEOUT: TimeDelta = TimeDelta::milliseconds(500); -// --------------------------------------------------------------------------- -// LoreApp -// --------------------------------------------------------------------------- - -/// Root model for the lore TUI. -/// -/// Owns all state and implements the FrankenTUI Model trait. The -/// update() method is the single entry point for all state transitions. -pub struct LoreApp { - pub state: AppState, - pub navigation: NavigationStack, - pub supervisor: TaskSupervisor, - pub crash_context: CrashContext, - pub command_registry: CommandRegistry, - pub input_mode: InputMode, - pub clock: Box, - pub db: Option, -} - impl LoreApp { - /// Create a new LoreApp with default state. - /// - /// Uses a real system clock and no DB connection (set separately). - #[must_use] - pub fn new() -> Self { - Self { - state: AppState::default(), - navigation: NavigationStack::new(), - supervisor: TaskSupervisor::new(), - crash_context: CrashContext::new(), - command_registry: build_registry(), - input_mode: InputMode::Normal, - clock: Box::new(SystemClock), - db: None, - } - } - - /// Create a LoreApp for testing with a custom clock. - #[cfg(test)] - fn with_clock(clock: Box) -> Self { - Self { - clock, - ..Self::new() - } - } - // ----------------------------------------------------------------------- // Key dispatch // ----------------------------------------------------------------------- @@ -84,7 +30,7 @@ impl LoreApp { /// 5-stage key dispatch pipeline. /// /// Returns the Cmd to execute (Quit, None, or a task command). - fn interpret_key(&mut self, mut key: KeyEvent) -> Cmd { + pub(crate) fn interpret_key(&mut self, mut key: KeyEvent) -> Cmd { Self::normalize_key(&mut key); let screen = self.navigation.current().clone(); @@ -269,8 +215,14 @@ impl LoreApp { }); self.navigation.push(screen.clone()); - self.state - .set_loading(screen.clone(), LoadState::Refreshing); + + // First visit → full-screen spinner; revisit → corner spinner over stale data. + let load_state = if self.state.load_state.was_visited(&screen) { + LoadState::Refreshing + } else { + LoadState::LoadingInitial + }; + self.state.set_loading(screen.clone(), load_state); // Spawn supervised task for data loading (placeholder — actual DB // query dispatch comes in Phase 2 screen implementations). @@ -284,7 +236,7 @@ impl LoreApp { // ----------------------------------------------------------------------- /// Handle non-key messages. - fn handle_msg(&mut self, msg: Msg) -> Cmd { + pub(crate) fn handle_msg(&mut self, msg: Msg) -> Cmd { // Record in crash context. self.crash_context.push(CrashEvent::MsgDispatched { msg_name: format!("{msg:?}") @@ -343,24 +295,24 @@ impl LoreApp { Msg::Tick => Cmd::none(), // --- Loaded results (stale guard) --- - Msg::IssueListLoaded { generation, rows } => { + Msg::IssueListLoaded { generation, page } => { if self .supervisor .is_current(&TaskKey::LoadScreen(Screen::IssueList), generation) { - self.state.issue_list.rows = rows; + self.state.issue_list.apply_page(page); self.state.set_loading(Screen::IssueList, LoadState::Idle); self.supervisor .complete(&TaskKey::LoadScreen(Screen::IssueList), generation); } Cmd::none() } - Msg::MrListLoaded { generation, rows } => { + Msg::MrListLoaded { generation, page } => { if self .supervisor .is_current(&TaskKey::LoadScreen(Screen::MrList), generation) { - self.state.mr_list.rows = rows; + self.state.mr_list.apply_page(page); self.state.set_loading(Screen::MrList, LoadState::Idle); self.supervisor .complete(&TaskKey::LoadScreen(Screen::MrList), generation); @@ -372,8 +324,7 @@ impl LoreApp { .supervisor .is_current(&TaskKey::LoadScreen(Screen::Dashboard), generation) { - self.state.dashboard.issue_count = data.issue_count; - self.state.dashboard.mr_count = data.mr_count; + self.state.dashboard.update(*data); self.state.set_loading(Screen::Dashboard, LoadState::Idle); self.supervisor .complete(&TaskKey::LoadScreen(Screen::Dashboard), generation); @@ -388,19 +339,13 @@ impl LoreApp { } } -impl Default for LoreApp { - fn default() -> Self { - Self::new() - } -} - impl Model for LoreApp { type Message = Msg; fn init(&mut self) -> Cmd { // Install crash context panic hook. - CrashContext::install_panic_hook(&self.crash_context); - CrashContext::prune_crash_files(); + crate::crash_context::CrashContext::install_panic_hook(&self.crash_context); + crate::crash_context::CrashContext::prune_crash_files(); // Navigate to dashboard (will trigger data load in future phase). Cmd::none() @@ -420,293 +365,3 @@ impl Model for LoreApp { crate::view::render_screen(frame, self); } } - -/// Verify that `App::fullscreen(LoreApp::new()).run()` compiles. -#[cfg(test)] -fn _assert_app_fullscreen_compiles() { - fn _inner() { - use ftui::App; - let _app_builder = App::fullscreen(LoreApp::new()); - } -} - -/// Verify that `App::inline(LoreApp::new(), 12).run()` compiles. -#[cfg(test)] -fn _assert_app_inline_compiles() { - fn _inner() { - use ftui::App; - let _app_builder = App::inline(LoreApp::new(), 12); - } -} - -// --------------------------------------------------------------------------- -// Tests -// --------------------------------------------------------------------------- - -#[cfg(test)] -mod tests { - use super::*; - use crate::clock::FakeClock; - - fn test_app() -> LoreApp { - LoreApp::with_clock(Box::new(FakeClock::new(chrono::Utc::now()))) - } - - #[test] - fn test_lore_app_init_returns_none() { - let mut app = test_app(); - let cmd = app.init(); - assert!(matches!(cmd, Cmd::None)); - } - - #[test] - fn test_lore_app_quit_returns_quit_cmd() { - let mut app = test_app(); - let cmd = app.update(Msg::Quit); - assert!(matches!(cmd, Cmd::Quit)); - } - - #[test] - fn test_lore_app_tick_returns_none() { - let mut app = test_app(); - let cmd = app.update(Msg::Tick); - assert!(matches!(cmd, Cmd::None)); - } - - #[test] - fn test_lore_app_navigate_to_updates_nav_stack() { - let mut app = test_app(); - let cmd = app.update(Msg::NavigateTo(Screen::IssueList)); - assert!(matches!(cmd, Cmd::None)); - assert!(app.navigation.is_at(&Screen::IssueList)); - assert_eq!(app.navigation.depth(), 2); - } - - #[test] - fn test_lore_app_go_back() { - let mut app = test_app(); - app.update(Msg::NavigateTo(Screen::IssueList)); - app.update(Msg::GoBack); - assert!(app.navigation.is_at(&Screen::Dashboard)); - } - - #[test] - fn test_lore_app_go_forward() { - let mut app = test_app(); - app.update(Msg::NavigateTo(Screen::IssueList)); - app.update(Msg::GoBack); - app.update(Msg::GoForward); - assert!(app.navigation.is_at(&Screen::IssueList)); - } - - #[test] - fn test_ctrl_c_always_quits() { - let mut app = test_app(); - let key = KeyEvent::new(KeyCode::Char('c')).with_modifiers(Modifiers::CTRL); - let cmd = app.update(Msg::RawEvent(Event::Key(key))); - assert!(matches!(cmd, Cmd::Quit)); - } - - #[test] - fn test_q_key_quits_in_normal_mode() { - let mut app = test_app(); - let key = KeyEvent::new(KeyCode::Char('q')); - let cmd = app.update(Msg::RawEvent(Event::Key(key))); - assert!(matches!(cmd, Cmd::Quit)); - } - - #[test] - fn test_q_key_blocked_in_text_mode() { - let mut app = test_app(); - app.input_mode = InputMode::Text; - let key = KeyEvent::new(KeyCode::Char('q')); - let cmd = app.update(Msg::RawEvent(Event::Key(key))); - // q in text mode should NOT quit. - assert!(matches!(cmd, Cmd::None)); - } - - #[test] - fn test_esc_blurs_text_mode() { - let mut app = test_app(); - app.input_mode = InputMode::Text; - app.state.search.query_focused = true; - - let key = KeyEvent::new(KeyCode::Escape); - app.update(Msg::RawEvent(Event::Key(key))); - - assert!(matches!(app.input_mode, InputMode::Normal)); - assert!(!app.state.has_text_focus()); - } - - #[test] - fn test_g_prefix_enters_go_mode() { - let mut app = test_app(); - let key = KeyEvent::new(KeyCode::Char('g')); - app.update(Msg::RawEvent(Event::Key(key))); - assert!(matches!(app.input_mode, InputMode::GoPrefix { .. })); - } - - #[test] - fn test_g_then_i_navigates_to_issues() { - let mut app = test_app(); - - // First key: 'g' - let key_g = KeyEvent::new(KeyCode::Char('g')); - app.update(Msg::RawEvent(Event::Key(key_g))); - - // Second key: 'i' - let key_i = KeyEvent::new(KeyCode::Char('i')); - app.update(Msg::RawEvent(Event::Key(key_i))); - - assert!(app.navigation.is_at(&Screen::IssueList)); - } - - #[test] - fn test_go_prefix_timeout_cancels() { - let clock = FakeClock::new(chrono::Utc::now()); - let mut app = LoreApp::with_clock(Box::new(clock.clone())); - - // Press 'g'. - let key_g = KeyEvent::new(KeyCode::Char('g')); - app.update(Msg::RawEvent(Event::Key(key_g))); - assert!(matches!(app.input_mode, InputMode::GoPrefix { .. })); - - // Advance clock past timeout. - clock.advance(TimeDelta::milliseconds(600)); - - // Press 'i' after timeout — should NOT navigate to issues. - let key_i = KeyEvent::new(KeyCode::Char('i')); - app.update(Msg::RawEvent(Event::Key(key_i))); - - // Should still be at Dashboard (no navigation happened). - assert!(app.navigation.is_at(&Screen::Dashboard)); - assert!(matches!(app.input_mode, InputMode::Normal)); - } - - #[test] - fn test_show_help_toggles() { - let mut app = test_app(); - assert!(!app.state.show_help); - - app.update(Msg::ShowHelp); - assert!(app.state.show_help); - - app.update(Msg::ShowHelp); - assert!(!app.state.show_help); - } - - #[test] - fn test_error_msg_sets_toast() { - let mut app = test_app(); - app.update(Msg::Error(crate::message::AppError::DbBusy)); - assert!(app.state.error_toast.is_some()); - assert!(app.state.error_toast.as_ref().unwrap().contains("busy")); - } - - #[test] - fn test_resize_updates_terminal_size() { - let mut app = test_app(); - app.update(Msg::Resize { - width: 120, - height: 40, - }); - assert_eq!(app.state.terminal_size, (120, 40)); - } - - #[test] - fn test_stale_result_dropped() { - let mut app = test_app(); - - // Submit two tasks for IssueList — second supersedes first. - let gen1 = app - .supervisor - .submit(TaskKey::LoadScreen(Screen::IssueList)) - .generation; - let gen2 = app - .supervisor - .submit(TaskKey::LoadScreen(Screen::IssueList)) - .generation; - - // Stale result with gen1 should be ignored. - app.update(Msg::IssueListLoaded { - generation: gen1, - rows: vec![crate::message::IssueRow { - key: crate::message::EntityKey::issue(1, 1), - title: "stale".into(), - state: "opened".into(), - }], - }); - assert!(app.state.issue_list.rows.is_empty()); - - // Current result with gen2 should be applied. - app.update(Msg::IssueListLoaded { - generation: gen2, - rows: vec![crate::message::IssueRow { - key: crate::message::EntityKey::issue(1, 2), - title: "fresh".into(), - state: "opened".into(), - }], - }); - assert_eq!(app.state.issue_list.rows.len(), 1); - assert_eq!(app.state.issue_list.rows[0].title, "fresh"); - } - - #[test] - fn test_crash_context_records_events() { - let mut app = test_app(); - app.update(Msg::Tick); - app.update(Msg::NavigateTo(Screen::IssueList)); - - // Should have recorded at least 2 events. - assert!(app.crash_context.len() >= 2); - } - - #[test] - fn test_navigate_sets_loading_state() { - let mut app = test_app(); - app.update(Msg::NavigateTo(Screen::IssueList)); - assert_eq!( - *app.state.load_state.get(&Screen::IssueList), - LoadState::Refreshing - ); - } - - #[test] - fn test_command_palette_opens_from_ctrl_p() { - let mut app = test_app(); - let key = KeyEvent::new(KeyCode::Char('p')).with_modifiers(Modifiers::CTRL); - app.update(Msg::RawEvent(Event::Key(key))); - assert!(matches!(app.input_mode, InputMode::Palette)); - assert!(app.state.command_palette.query_focused); - } - - #[test] - fn test_esc_closes_palette() { - let mut app = test_app(); - app.input_mode = InputMode::Palette; - - let key = KeyEvent::new(KeyCode::Escape); - app.update(Msg::RawEvent(Event::Key(key))); - - assert!(matches!(app.input_mode, InputMode::Normal)); - } - - #[test] - fn test_blur_text_input_msg() { - let mut app = test_app(); - app.input_mode = InputMode::Text; - app.state.search.query_focused = true; - - app.update(Msg::BlurTextInput); - - assert!(matches!(app.input_mode, InputMode::Normal)); - assert!(!app.state.has_text_focus()); - } - - #[test] - fn test_default_is_new() { - let app = LoreApp::default(); - assert!(app.navigation.is_at(&Screen::Dashboard)); - assert!(matches!(app.input_mode, InputMode::Normal)); - } -} diff --git a/crates/lore-tui/src/clock.rs b/crates/lore-tui/src/clock.rs index 7928a54..3758558 100644 --- a/crates/lore-tui/src/clock.rs +++ b/crates/lore-tui/src/clock.rs @@ -15,6 +15,11 @@ use chrono::{DateTime, TimeDelta, Utc}; pub trait Clock: Send + Sync { /// Returns the current time. fn now(&self) -> DateTime; + + /// Returns the current time as milliseconds since the Unix epoch. + fn now_ms(&self) -> i64 { + self.now().timestamp_millis() + } } // --------------------------------------------------------------------------- @@ -54,6 +59,15 @@ impl FakeClock { } } + /// Create a fake clock frozen at the given millisecond epoch timestamp. + /// + /// Convenience for action tests that work with raw epoch milliseconds. + #[must_use] + pub fn from_ms(epoch_ms: i64) -> Self { + let time = DateTime::from_timestamp_millis(epoch_ms).expect("valid millisecond timestamp"); + Self::new(time) + } + /// Advance the clock by `duration`. Uses `checked_add` to handle overflow /// gracefully — if the addition would overflow, the time is not changed. pub fn advance(&self, duration: TimeDelta) { diff --git a/crates/lore-tui/src/commands.rs b/crates/lore-tui/src/commands.rs.bak similarity index 100% rename from crates/lore-tui/src/commands.rs rename to crates/lore-tui/src/commands.rs.bak diff --git a/crates/lore-tui/src/commands/defs.rs b/crates/lore-tui/src/commands/defs.rs new file mode 100644 index 0000000..156a3de --- /dev/null +++ b/crates/lore-tui/src/commands/defs.rs @@ -0,0 +1,180 @@ +//! Command definitions — types for keybindings, screen filtering, and command metadata. + +use ftui::{KeyCode, Modifiers}; + +use crate::message::Screen; + +// --------------------------------------------------------------------------- +// Key formatting +// --------------------------------------------------------------------------- + +/// Format a key code + modifiers as a human-readable string. +pub(crate) fn format_key(code: KeyCode, modifiers: Modifiers) -> String { + let mut parts = Vec::new(); + if modifiers.contains(Modifiers::CTRL) { + parts.push("Ctrl"); + } + if modifiers.contains(Modifiers::ALT) { + parts.push("Alt"); + } + if modifiers.contains(Modifiers::SHIFT) { + parts.push("Shift"); + } + let key_name = match code { + KeyCode::Char(c) => c.to_string(), + KeyCode::Enter => "Enter".to_string(), + KeyCode::Escape => "Esc".to_string(), + KeyCode::Tab => "Tab".to_string(), + KeyCode::Backspace => "Backspace".to_string(), + KeyCode::Delete => "Del".to_string(), + KeyCode::Up => "Up".to_string(), + KeyCode::Down => "Down".to_string(), + KeyCode::Left => "Left".to_string(), + KeyCode::Right => "Right".to_string(), + KeyCode::Home => "Home".to_string(), + KeyCode::End => "End".to_string(), + KeyCode::PageUp => "PgUp".to_string(), + KeyCode::PageDown => "PgDn".to_string(), + KeyCode::F(n) => format!("F{n}"), + _ => "?".to_string(), + }; + parts.push(&key_name); + // We need to own the joined string. + let joined: String = parts.join("+"); + joined +} + +// --------------------------------------------------------------------------- +// KeyCombo +// --------------------------------------------------------------------------- + +/// A keybinding: either a single key or a two-key sequence. +#[derive(Debug, Clone, PartialEq, Eq, Hash)] +pub enum KeyCombo { + /// Single key press (e.g., `q`, `Esc`, `Ctrl+P`). + Single { code: KeyCode, modifiers: Modifiers }, + /// Two-key sequence (e.g., `g` then `i` for go-to-issues). + Sequence { + first_code: KeyCode, + first_modifiers: Modifiers, + second_code: KeyCode, + second_modifiers: Modifiers, + }, +} + +impl KeyCombo { + /// Convenience: single key with no modifiers. + #[must_use] + pub const fn key(code: KeyCode) -> Self { + Self::Single { + code, + modifiers: Modifiers::NONE, + } + } + + /// Convenience: single key with Ctrl modifier. + #[must_use] + pub const fn ctrl(code: KeyCode) -> Self { + Self::Single { + code, + modifiers: Modifiers::CTRL, + } + } + + /// Convenience: g-prefix sequence (g + char). + #[must_use] + pub const fn g_then(c: char) -> Self { + Self::Sequence { + first_code: KeyCode::Char('g'), + first_modifiers: Modifiers::NONE, + second_code: KeyCode::Char(c), + second_modifiers: Modifiers::NONE, + } + } + + /// Human-readable display string for this key combo. + #[must_use] + pub fn display(&self) -> String { + match self { + Self::Single { code, modifiers } => format_key(*code, *modifiers), + Self::Sequence { + first_code, + first_modifiers, + second_code, + second_modifiers, + } => { + let first = format_key(*first_code, *first_modifiers); + let second = format_key(*second_code, *second_modifiers); + format!("{first} {second}") + } + } + } + + /// Whether this combo starts with the given key. + #[must_use] + pub fn starts_with(&self, code: &KeyCode, modifiers: &Modifiers) -> bool { + match self { + Self::Single { + code: c, + modifiers: m, + } => c == code && m == modifiers, + Self::Sequence { + first_code, + first_modifiers, + .. + } => first_code == code && first_modifiers == modifiers, + } + } +} + +// --------------------------------------------------------------------------- +// ScreenFilter +// --------------------------------------------------------------------------- + +/// Specifies which screens a command is available on. +#[derive(Debug, Clone)] +pub enum ScreenFilter { + /// Available on all screens. + Global, + /// Available only on specific screens. + Only(Vec), +} + +impl ScreenFilter { + /// Whether the command is available on the given screen. + #[must_use] + pub fn matches(&self, screen: &Screen) -> bool { + match self { + Self::Global => true, + Self::Only(screens) => screens.contains(screen), + } + } +} + +// --------------------------------------------------------------------------- +// CommandDef +// --------------------------------------------------------------------------- + +/// Unique command identifier. +pub type CommandId = &'static str; + +/// A registered command with its keybinding, help text, and scope. +#[derive(Debug, Clone)] +pub struct CommandDef { + /// Unique identifier (e.g., "quit", "go_issues"). + pub id: CommandId, + /// Human-readable label for palette and help overlay. + pub label: &'static str, + /// Keybinding (if any). + pub keybinding: Option, + /// Equivalent `lore` CLI command (for "Show CLI equivalent" feature). + pub cli_equivalent: Option<&'static str>, + /// Description for help overlay. + pub help_text: &'static str, + /// Short hint for status bar (e.g., "q:quit"). + pub status_hint: &'static str, + /// Which screens this command is available on. + pub available_in: ScreenFilter, + /// Whether this command works in Text input mode. + pub available_in_text_mode: bool, +} diff --git a/crates/lore-tui/src/commands/mod.rs b/crates/lore-tui/src/commands/mod.rs new file mode 100644 index 0000000..4216c36 --- /dev/null +++ b/crates/lore-tui/src/commands/mod.rs @@ -0,0 +1,227 @@ +#![allow(dead_code)] // Phase 1: consumed by LoreApp in bd-6pmy + +//! Command registry — single source of truth for all TUI actions. +//! +//! Every keybinding, palette entry, help text, CLI equivalent, and +//! status hint is generated from [`CommandRegistry`]. No hardcoded +//! duplicate maps exist in view/state modules. +//! +//! Supports single-key and two-key sequences (g-prefix vim bindings). + +mod defs; +mod registry; + +// Re-export public API — preserves `crate::commands::{CommandRegistry, build_registry, ...}`. +pub use defs::{CommandDef, CommandId, KeyCombo, ScreenFilter}; +pub use registry::{CommandRegistry, build_registry}; + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use chrono::Utc; + use ftui::{KeyCode, Modifiers}; + + use crate::message::{InputMode, Screen}; + + #[test] + fn test_registry_builds_successfully() { + let reg = build_registry(); + assert!(!reg.is_empty()); + assert!(reg.len() >= 15); + } + + #[test] + fn test_registry_lookup_quit() { + let reg = build_registry(); + let cmd = reg.lookup_key( + &KeyCode::Char('q'), + &Modifiers::NONE, + &Screen::Dashboard, + &InputMode::Normal, + ); + assert!(cmd.is_some()); + assert_eq!(cmd.unwrap().id, "quit"); + } + + #[test] + fn test_registry_lookup_quit_blocked_in_text_mode() { + let reg = build_registry(); + let cmd = reg.lookup_key( + &KeyCode::Char('q'), + &Modifiers::NONE, + &Screen::Dashboard, + &InputMode::Text, + ); + assert!(cmd.is_none()); + } + + #[test] + fn test_registry_esc_works_in_text_mode() { + let reg = build_registry(); + let cmd = reg.lookup_key( + &KeyCode::Escape, + &Modifiers::NONE, + &Screen::IssueList, + &InputMode::Text, + ); + assert!(cmd.is_some()); + assert_eq!(cmd.unwrap().id, "go_back"); + } + + #[test] + fn test_registry_ctrl_p_works_in_text_mode() { + let reg = build_registry(); + let cmd = reg.lookup_key( + &KeyCode::Char('p'), + &Modifiers::CTRL, + &Screen::Search, + &InputMode::Text, + ); + assert!(cmd.is_some()); + assert_eq!(cmd.unwrap().id, "command_palette"); + } + + #[test] + fn test_g_is_sequence_starter() { + let reg = build_registry(); + assert!(reg.is_sequence_starter(&KeyCode::Char('g'), &Modifiers::NONE)); + assert!(!reg.is_sequence_starter(&KeyCode::Char('x'), &Modifiers::NONE)); + } + + #[test] + fn test_complete_sequence_gi() { + let reg = build_registry(); + let cmd = reg.complete_sequence( + &KeyCode::Char('g'), + &Modifiers::NONE, + &KeyCode::Char('i'), + &Modifiers::NONE, + &Screen::Dashboard, + ); + assert!(cmd.is_some()); + assert_eq!(cmd.unwrap().id, "go_issues"); + } + + #[test] + fn test_complete_sequence_invalid_second_key() { + let reg = build_registry(); + let cmd = reg.complete_sequence( + &KeyCode::Char('g'), + &Modifiers::NONE, + &KeyCode::Char('x'), + &Modifiers::NONE, + &Screen::Dashboard, + ); + assert!(cmd.is_none()); + } + + #[test] + fn test_screen_specific_command() { + let reg = build_registry(); + // 'j' (move_down) should work on IssueList + let cmd = reg.lookup_key( + &KeyCode::Char('j'), + &Modifiers::NONE, + &Screen::IssueList, + &InputMode::Normal, + ); + assert!(cmd.is_some()); + assert_eq!(cmd.unwrap().id, "move_down"); + + // 'j' should NOT match on Dashboard (move_down is list-only). + let cmd = reg.lookup_key( + &KeyCode::Char('j'), + &Modifiers::NONE, + &Screen::Dashboard, + &InputMode::Normal, + ); + assert!(cmd.is_none()); + } + + #[test] + fn test_palette_entries_sorted_by_label() { + let reg = build_registry(); + let entries = reg.palette_entries(&Screen::Dashboard); + let labels: Vec<&str> = entries.iter().map(|c| c.label).collect(); + let mut sorted = labels.clone(); + sorted.sort(); + assert_eq!(labels, sorted); + } + + #[test] + fn test_help_entries_only_include_keybindings() { + let reg = build_registry(); + let entries = reg.help_entries(&Screen::Dashboard); + for entry in &entries { + assert!( + entry.keybinding.is_some(), + "help entry without keybinding: {}", + entry.id + ); + } + } + + #[test] + fn test_status_hints_non_empty() { + let reg = build_registry(); + let hints = reg.status_hints(&Screen::Dashboard); + assert!(!hints.is_empty()); + // All returned hints should be non-empty strings. + for hint in &hints { + assert!(!hint.is_empty()); + } + } + + #[test] + fn test_cli_equivalents_populated() { + let reg = build_registry(); + let with_cli: Vec<&CommandDef> = reg + .commands + .iter() + .filter(|c| c.cli_equivalent.is_some()) + .collect(); + assert!( + with_cli.len() >= 5, + "expected at least 5 commands with cli_equivalent, got {}", + with_cli.len() + ); + } + + #[test] + fn test_go_prefix_timeout_detection() { + let reg = build_registry(); + // Simulate GoPrefix mode entering: 'g' detected as sequence starter. + assert!(reg.is_sequence_starter(&KeyCode::Char('g'), &Modifiers::NONE)); + + // Simulate InputMode::GoPrefix with timeout check. + let started = Utc::now(); + let mode = InputMode::GoPrefix { + started_at: started, + }; + // In GoPrefix mode, normal lookup should still work for non-sequence keys. + let cmd = reg.lookup_key( + &KeyCode::Char('q'), + &Modifiers::NONE, + &Screen::Dashboard, + &mode, + ); + assert!(cmd.is_some()); + assert_eq!(cmd.unwrap().id, "quit"); + } + + #[test] + fn test_all_commands_have_nonempty_help() { + let reg = build_registry(); + for cmd in ®.commands { + assert!( + !cmd.help_text.is_empty(), + "command {} has empty help_text", + cmd.id + ); + } + } +} diff --git a/crates/lore-tui/src/commands/registry.rs b/crates/lore-tui/src/commands/registry.rs new file mode 100644 index 0000000..c1e2556 --- /dev/null +++ b/crates/lore-tui/src/commands/registry.rs @@ -0,0 +1,418 @@ +//! Command registry — lookup, indexing, and the canonical command list. + +use std::collections::HashMap; + +use ftui::{KeyCode, Modifiers}; + +use crate::message::{InputMode, Screen}; + +use super::defs::{CommandDef, KeyCombo, ScreenFilter}; + +// --------------------------------------------------------------------------- +// CommandRegistry +// --------------------------------------------------------------------------- + +/// Single source of truth for all TUI commands. +/// +/// Built once at startup via [`build_registry`]. Provides O(1) lookup +/// by keybinding and per-screen filtering. +pub struct CommandRegistry { + pub(crate) commands: Vec, + /// Single-key -> command IDs that start with this key. + by_single_key: HashMap<(KeyCode, Modifiers), Vec>, + /// Full sequence -> command index (for two-key combos). + by_sequence: HashMap, +} + +impl CommandRegistry { + /// Look up a command by a single key press on a given screen and input mode. + /// + /// Returns `None` if no matching command is found. For sequence starters + /// (like 'g'), returns `None` — use [`is_sequence_starter`] to detect + /// that case. + #[must_use] + pub fn lookup_key( + &self, + code: &KeyCode, + modifiers: &Modifiers, + screen: &Screen, + mode: &InputMode, + ) -> Option<&CommandDef> { + let is_text = matches!(mode, InputMode::Text); + let key = (*code, *modifiers); + + let indices = self.by_single_key.get(&key)?; + for &idx in indices { + let cmd = &self.commands[idx]; + if !cmd.available_in.matches(screen) { + continue; + } + if is_text && !cmd.available_in_text_mode { + continue; + } + // Only match Single combos here, not sequence starters. + if let Some(KeyCombo::Single { .. }) = &cmd.keybinding { + return Some(cmd); + } + } + None + } + + /// Complete a two-key sequence. + /// + /// Called after the first key of a sequence is detected (e.g., after 'g'). + #[must_use] + pub fn complete_sequence( + &self, + first_code: &KeyCode, + first_modifiers: &Modifiers, + second_code: &KeyCode, + second_modifiers: &Modifiers, + screen: &Screen, + ) -> Option<&CommandDef> { + let combo = KeyCombo::Sequence { + first_code: *first_code, + first_modifiers: *first_modifiers, + second_code: *second_code, + second_modifiers: *second_modifiers, + }; + let &idx = self.by_sequence.get(&combo)?; + let cmd = &self.commands[idx]; + if cmd.available_in.matches(screen) { + Some(cmd) + } else { + None + } + } + + /// Whether a key starts a multi-key sequence (e.g., 'g'). + #[must_use] + pub fn is_sequence_starter(&self, code: &KeyCode, modifiers: &Modifiers) -> bool { + self.by_sequence + .keys() + .any(|combo| combo.starts_with(code, modifiers)) + } + + /// Commands available for the command palette on a given screen. + /// + /// Returned sorted by label. + #[must_use] + pub fn palette_entries(&self, screen: &Screen) -> Vec<&CommandDef> { + let mut entries: Vec<&CommandDef> = self + .commands + .iter() + .filter(|c| c.available_in.matches(screen)) + .collect(); + entries.sort_by_key(|c| c.label); + entries + } + + /// Commands for the help overlay on a given screen. + #[must_use] + pub fn help_entries(&self, screen: &Screen) -> Vec<&CommandDef> { + self.commands + .iter() + .filter(|c| c.available_in.matches(screen)) + .filter(|c| c.keybinding.is_some()) + .collect() + } + + /// Status bar hints for the current screen. + #[must_use] + pub fn status_hints(&self, screen: &Screen) -> Vec<&str> { + self.commands + .iter() + .filter(|c| c.available_in.matches(screen)) + .filter(|c| !c.status_hint.is_empty()) + .map(|c| c.status_hint) + .collect() + } + + /// Total number of registered commands. + #[must_use] + pub fn len(&self) -> usize { + self.commands.len() + } + + /// Whether the registry has no commands. + #[must_use] + pub fn is_empty(&self) -> bool { + self.commands.is_empty() + } +} + +// --------------------------------------------------------------------------- +// build_registry +// --------------------------------------------------------------------------- + +/// Build the command registry with all TUI commands. +/// +/// This is the single source of truth — every keybinding, help text, +/// and palette entry originates here. +#[must_use] +pub fn build_registry() -> CommandRegistry { + let commands = vec![ + // --- Global commands --- + CommandDef { + id: "quit", + label: "Quit", + keybinding: Some(KeyCombo::key(KeyCode::Char('q'))), + cli_equivalent: None, + help_text: "Exit the TUI", + status_hint: "q:quit", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + CommandDef { + id: "go_back", + label: "Go Back", + keybinding: Some(KeyCombo::key(KeyCode::Escape)), + cli_equivalent: None, + help_text: "Go back to previous screen", + status_hint: "esc:back", + available_in: ScreenFilter::Global, + available_in_text_mode: true, + }, + CommandDef { + id: "show_help", + label: "Help", + keybinding: Some(KeyCombo::key(KeyCode::Char('?'))), + cli_equivalent: None, + help_text: "Show keybinding help overlay", + status_hint: "?:help", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + CommandDef { + id: "command_palette", + label: "Command Palette", + keybinding: Some(KeyCombo::ctrl(KeyCode::Char('p'))), + cli_equivalent: None, + help_text: "Open command palette", + status_hint: "C-p:palette", + available_in: ScreenFilter::Global, + available_in_text_mode: true, + }, + CommandDef { + id: "open_in_browser", + label: "Open in Browser", + keybinding: Some(KeyCombo::key(KeyCode::Char('o'))), + cli_equivalent: None, + help_text: "Open current entity in browser", + status_hint: "o:browser", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + CommandDef { + id: "show_cli", + label: "Show CLI Equivalent", + keybinding: Some(KeyCombo::key(KeyCode::Char('!'))), + cli_equivalent: None, + help_text: "Show equivalent lore CLI command", + status_hint: "", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + // --- Navigation: g-prefix sequences --- + CommandDef { + id: "go_home", + label: "Go to Dashboard", + keybinding: Some(KeyCombo::g_then('h')), + cli_equivalent: None, + help_text: "Jump to dashboard", + status_hint: "gh:home", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + CommandDef { + id: "go_issues", + label: "Go to Issues", + keybinding: Some(KeyCombo::g_then('i')), + cli_equivalent: Some("lore issues"), + help_text: "Jump to issue list", + status_hint: "gi:issues", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + CommandDef { + id: "go_mrs", + label: "Go to Merge Requests", + keybinding: Some(KeyCombo::g_then('m')), + cli_equivalent: Some("lore mrs"), + help_text: "Jump to MR list", + status_hint: "gm:mrs", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + CommandDef { + id: "go_search", + label: "Go to Search", + keybinding: Some(KeyCombo::g_then('/')), + cli_equivalent: Some("lore search"), + help_text: "Jump to search", + status_hint: "g/:search", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + CommandDef { + id: "go_timeline", + label: "Go to Timeline", + keybinding: Some(KeyCombo::g_then('t')), + cli_equivalent: Some("lore timeline"), + help_text: "Jump to timeline", + status_hint: "gt:timeline", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + CommandDef { + id: "go_who", + label: "Go to Who", + keybinding: Some(KeyCombo::g_then('w')), + cli_equivalent: Some("lore who"), + help_text: "Jump to people intelligence", + status_hint: "gw:who", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + CommandDef { + id: "go_sync", + label: "Go to Sync", + keybinding: Some(KeyCombo::g_then('s')), + cli_equivalent: Some("lore sync"), + help_text: "Jump to sync status", + status_hint: "gs:sync", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + // --- Vim-style jump list --- + CommandDef { + id: "jump_back", + label: "Jump Back", + keybinding: Some(KeyCombo::ctrl(KeyCode::Char('o'))), + cli_equivalent: None, + help_text: "Jump backward through visited detail views", + status_hint: "C-o:jump back", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + CommandDef { + id: "jump_forward", + label: "Jump Forward", + keybinding: Some(KeyCombo::ctrl(KeyCode::Char('i'))), + cli_equivalent: None, + help_text: "Jump forward through visited detail views", + status_hint: "", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + // --- List navigation --- + CommandDef { + id: "move_down", + label: "Move Down", + keybinding: Some(KeyCombo::key(KeyCode::Char('j'))), + cli_equivalent: None, + help_text: "Move cursor down", + status_hint: "j:down", + available_in: ScreenFilter::Only(vec![ + Screen::IssueList, + Screen::MrList, + Screen::Search, + Screen::Timeline, + ]), + available_in_text_mode: false, + }, + CommandDef { + id: "move_up", + label: "Move Up", + keybinding: Some(KeyCombo::key(KeyCode::Char('k'))), + cli_equivalent: None, + help_text: "Move cursor up", + status_hint: "k:up", + available_in: ScreenFilter::Only(vec![ + Screen::IssueList, + Screen::MrList, + Screen::Search, + Screen::Timeline, + ]), + available_in_text_mode: false, + }, + CommandDef { + id: "select_item", + label: "Select", + keybinding: Some(KeyCombo::key(KeyCode::Enter)), + cli_equivalent: None, + help_text: "Open selected item", + status_hint: "enter:open", + available_in: ScreenFilter::Only(vec![ + Screen::IssueList, + Screen::MrList, + Screen::Search, + ]), + available_in_text_mode: false, + }, + // --- Filter --- + CommandDef { + id: "focus_filter", + label: "Filter", + keybinding: Some(KeyCombo::key(KeyCode::Char('/'))), + cli_equivalent: None, + help_text: "Focus the filter input", + status_hint: "/:filter", + available_in: ScreenFilter::Only(vec![Screen::IssueList, Screen::MrList]), + available_in_text_mode: false, + }, + // --- Scroll --- + CommandDef { + id: "scroll_to_top", + label: "Scroll to Top", + keybinding: Some(KeyCombo::g_then('g')), + cli_equivalent: None, + help_text: "Scroll to the top of the current view", + status_hint: "", + available_in: ScreenFilter::Global, + available_in_text_mode: false, + }, + ]; + + build_from_defs(commands) +} + +/// Build index maps from a list of command definitions. +fn build_from_defs(commands: Vec) -> CommandRegistry { + let mut by_single_key: HashMap<(KeyCode, Modifiers), Vec> = HashMap::new(); + let mut by_sequence: HashMap = HashMap::new(); + + for (idx, cmd) in commands.iter().enumerate() { + if let Some(combo) = &cmd.keybinding { + match combo { + KeyCombo::Single { code, modifiers } => { + by_single_key + .entry((*code, *modifiers)) + .or_default() + .push(idx); + } + KeyCombo::Sequence { .. } => { + by_sequence.insert(combo.clone(), idx); + // Also index the first key so is_sequence_starter works via by_single_key. + if let KeyCombo::Sequence { + first_code, + first_modifiers, + .. + } = combo + { + by_single_key + .entry((*first_code, *first_modifiers)) + .or_default() + .push(idx); + } + } + } + } + } + + CommandRegistry { + commands, + by_single_key, + by_sequence, + } +} diff --git a/crates/lore-tui/src/crash_context.rs b/crates/lore-tui/src/crash_context.rs index e929c77..e99169f 100644 --- a/crates/lore-tui/src/crash_context.rs +++ b/crates/lore-tui/src/crash_context.rs @@ -168,6 +168,13 @@ impl CrashContext { /// /// Captures the current events via a snapshot. The hook chains with /// the default panic handler so backtraces are still printed. + /// + /// FIXME: This snapshots events at install time, which is typically + /// during init() when the buffer is empty. The crash dump will only + /// contain the panic itself, not the preceding key presses and state + /// transitions. Fix requires CrashContext to use interior mutability + /// (Arc>>) so the panic hook reads live + /// state instead of a stale snapshot. pub fn install_panic_hook(ctx: &Self) { let snapshot: Vec = ctx.events.iter().cloned().collect(); let prev_hook = std::panic::take_hook(); diff --git a/crates/lore-tui/src/filter_dsl.rs b/crates/lore-tui/src/filter_dsl.rs new file mode 100644 index 0000000..c73351f --- /dev/null +++ b/crates/lore-tui/src/filter_dsl.rs @@ -0,0 +1,316 @@ +#![allow(dead_code)] // Phase 2: consumed by filter_bar widget + +//! Filter DSL parser for entity list screens. +//! +//! Parses a compact filter string into structured tokens: +//! - `field:value` — typed field filter (e.g., `state:opened`, `author:taylor`) +//! - `-field:value` — negation filter (exclude matches) +//! - `"quoted value"` — preserved as a single free-text token +//! - bare words — free-text search terms +//! +//! The DSL is intentionally simple: no boolean operators, no nesting. +//! Filters are AND-combined at the query layer. + +// --------------------------------------------------------------------------- +// Token types +// --------------------------------------------------------------------------- + +/// A single parsed filter token. +#[derive(Debug, Clone, PartialEq, Eq)] +pub enum FilterToken { + /// `field:value` — match entities where `field` equals `value`. + FieldValue { field: String, value: String }, + /// `-field:value` — exclude entities where `field` equals `value`. + Negation { field: String, value: String }, + /// Bare word(s) used as free-text search. + FreeText(String), + /// `"quoted value"` — preserved as a single search term. + QuotedValue(String), +} + +// --------------------------------------------------------------------------- +// Known fields per entity type +// --------------------------------------------------------------------------- + +/// Known filter fields for issues. +pub const ISSUE_FIELDS: &[&str] = &[ + "state", + "author", + "assignee", + "label", + "milestone", + "status", +]; + +/// Known filter fields for merge requests. +pub const MR_FIELDS: &[&str] = &[ + "state", + "author", + "reviewer", + "target_branch", + "source_branch", + "label", + "draft", +]; + +// --------------------------------------------------------------------------- +// Parser +// --------------------------------------------------------------------------- + +/// Parse a filter input string into a sequence of tokens. +/// +/// Empty input returns an empty vec (no-op filter = show all). +pub fn parse_filter_tokens(input: &str) -> Vec { + let input = input.trim(); + if input.is_empty() { + return Vec::new(); + } + + let mut tokens = Vec::new(); + let mut chars = input.chars().peekable(); + + while chars.peek().is_some() { + // Skip whitespace between tokens. + while chars.peek().is_some_and(|c| c.is_whitespace()) { + chars.next(); + } + + match chars.peek() { + None => break, + Some('"') => { + // Quoted value — consume until closing quote or end. + chars.next(); // consume opening " + let value: String = consume_until(&mut chars, '"'); + if chars.peek() == Some(&'"') { + chars.next(); // consume closing " + } + if !value.is_empty() { + tokens.push(FilterToken::QuotedValue(value)); + } + } + Some('-') => { + // Could be negation prefix or just a free-text word starting with -. + chars.next(); // consume - + let word = consume_word(&mut chars); + if let Some((field, value)) = word.split_once(':') { + tokens.push(FilterToken::Negation { + field: field.to_string(), + value: value.to_string(), + }); + } else if !word.is_empty() { + // Bare negation without field:value — treat as free text with -. + tokens.push(FilterToken::FreeText(format!("-{word}"))); + } + } + Some(_) => { + let word = consume_word(&mut chars); + if let Some((field, value)) = word.split_once(':') { + tokens.push(FilterToken::FieldValue { + field: field.to_string(), + value: value.to_string(), + }); + } else if !word.is_empty() { + tokens.push(FilterToken::FreeText(word)); + } + } + } + } + + tokens +} + +/// Validate that a field name is known for the given entity type. +/// +/// Returns `true` if the field is in the known set, `false` otherwise. +pub fn is_known_field(field: &str, known_fields: &[&str]) -> bool { + known_fields.contains(&field) +} + +/// Extract all unknown fields from a token list. +pub fn unknown_fields<'a>(tokens: &'a [FilterToken], known_fields: &[&str]) -> Vec<&'a str> { + tokens + .iter() + .filter_map(|t| match t { + FilterToken::FieldValue { field, .. } | FilterToken::Negation { field, .. } => { + if is_known_field(field, known_fields) { + None + } else { + Some(field.as_str()) + } + } + _ => None, + }) + .collect() +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/// Consume characters until `delim` is found (exclusive) or end of input. +fn consume_until(chars: &mut std::iter::Peekable>, delim: char) -> String { + let mut buf = String::new(); + while let Some(&c) = chars.peek() { + if c == delim { + break; + } + buf.push(c); + chars.next(); + } + buf +} + +/// Consume a non-whitespace word. +fn consume_word(chars: &mut std::iter::Peekable>) -> String { + let mut buf = String::new(); + while let Some(&c) = chars.peek() { + if c.is_whitespace() { + break; + } + // Stop at quote boundaries so they're handled separately. + if c == '"' && !buf.is_empty() { + break; + } + buf.push(c); + chars.next(); + } + buf +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + // -- TDD Anchor: basic field:value parsing -- + + #[test] + fn test_parse_filter_basic() { + let tokens = parse_filter_tokens("state:opened author:taylor"); + assert_eq!(tokens.len(), 2); + assert_eq!( + tokens[0], + FilterToken::FieldValue { + field: "state".into(), + value: "opened".into() + } + ); + assert_eq!( + tokens[1], + FilterToken::FieldValue { + field: "author".into(), + value: "taylor".into() + } + ); + } + + #[test] + fn test_parse_quoted_value() { + let tokens = parse_filter_tokens("\"in progress\""); + assert_eq!(tokens.len(), 1); + assert_eq!(tokens[0], FilterToken::QuotedValue("in progress".into())); + } + + #[test] + fn test_parse_negation() { + let tokens = parse_filter_tokens("-state:closed"); + assert_eq!(tokens.len(), 1); + assert_eq!( + tokens[0], + FilterToken::Negation { + field: "state".into(), + value: "closed".into() + } + ); + } + + #[test] + fn test_parse_mixed() { + let tokens = parse_filter_tokens("state:opened \"bug fix\" -label:wontfix"); + assert_eq!(tokens.len(), 3); + assert_eq!( + tokens[0], + FilterToken::FieldValue { + field: "state".into(), + value: "opened".into() + } + ); + assert_eq!(tokens[1], FilterToken::QuotedValue("bug fix".into())); + assert_eq!( + tokens[2], + FilterToken::Negation { + field: "label".into(), + value: "wontfix".into() + } + ); + } + + #[test] + fn test_parse_empty_returns_empty() { + assert!(parse_filter_tokens("").is_empty()); + assert!(parse_filter_tokens(" ").is_empty()); + } + + #[test] + fn test_parse_free_text() { + let tokens = parse_filter_tokens("authentication bug"); + assert_eq!(tokens.len(), 2); + assert_eq!(tokens[0], FilterToken::FreeText("authentication".into())); + assert_eq!(tokens[1], FilterToken::FreeText("bug".into())); + } + + #[test] + fn test_parse_bare_negation_as_free_text() { + let tokens = parse_filter_tokens("-wontfix"); + assert_eq!(tokens.len(), 1); + assert_eq!(tokens[0], FilterToken::FreeText("-wontfix".into())); + } + + #[test] + fn test_parse_unicode() { + let tokens = parse_filter_tokens("author:田中 \"認証バグ\""); + assert_eq!(tokens.len(), 2); + assert_eq!( + tokens[0], + FilterToken::FieldValue { + field: "author".into(), + value: "田中".into() + } + ); + assert_eq!(tokens[1], FilterToken::QuotedValue("認証バグ".into())); + } + + #[test] + fn test_parse_unclosed_quote() { + let tokens = parse_filter_tokens("\"open ended"); + assert_eq!(tokens.len(), 1); + assert_eq!(tokens[0], FilterToken::QuotedValue("open ended".into())); + } + + // -- Field validation -- + + #[test] + fn test_known_field_issues() { + assert!(is_known_field("state", ISSUE_FIELDS)); + assert!(is_known_field("author", ISSUE_FIELDS)); + assert!(!is_known_field("reviewer", ISSUE_FIELDS)); + assert!(!is_known_field("bogus", ISSUE_FIELDS)); + } + + #[test] + fn test_known_field_mrs() { + assert!(is_known_field("draft", MR_FIELDS)); + assert!(is_known_field("reviewer", MR_FIELDS)); + assert!(!is_known_field("assignee", MR_FIELDS)); + } + + #[test] + fn test_unknown_fields_detection() { + let tokens = parse_filter_tokens("state:opened bogus:val author:taylor unknown:x"); + let unknown = unknown_fields(&tokens, ISSUE_FIELDS); + assert_eq!(unknown, vec!["bogus", "unknown"]); + } +} diff --git a/crates/lore-tui/src/layout.rs b/crates/lore-tui/src/layout.rs new file mode 100644 index 0000000..a601400 --- /dev/null +++ b/crates/lore-tui/src/layout.rs @@ -0,0 +1,102 @@ +#![allow(clippy::module_name_repetitions)] + +//! Responsive layout helpers for the Lore TUI. +//! +//! Wraps [`ftui::layout::Breakpoint`] and [`ftui::layout::Breakpoints`] with +//! Lore-specific configuration: breakpoint thresholds, column counts per +//! breakpoint, and preview-pane visibility rules. + +use ftui::layout::{Breakpoint, Breakpoints}; + +/// Lore-specific breakpoint thresholds. +/// +/// Uses the ftui defaults: Sm=60, Md=90, Lg=120, Xl=160 columns. +pub const LORE_BREAKPOINTS: Breakpoints = Breakpoints::DEFAULT; + +/// Classify a terminal width into a [`Breakpoint`]. +#[inline] +pub fn classify_width(width: u16) -> Breakpoint { + LORE_BREAKPOINTS.classify_width(width) +} + +/// Number of dashboard columns for a given breakpoint. +/// +/// - `Xs` / `Sm`: 1 column (narrow terminals) +/// - `Md`: 2 columns (standard width) +/// - `Lg` / `Xl`: 3 columns (wide terminals) +#[inline] +pub const fn dashboard_columns(bp: Breakpoint) -> u16 { + match bp { + Breakpoint::Xs | Breakpoint::Sm => 1, + Breakpoint::Md => 2, + Breakpoint::Lg | Breakpoint::Xl => 3, + } +} + +/// Whether the preview pane should be visible at a given breakpoint. +/// +/// Preview requires at least `Md` width to avoid cramping the main list. +#[inline] +pub const fn show_preview_pane(bp: Breakpoint) -> bool { + match bp { + Breakpoint::Md | Breakpoint::Lg | Breakpoint::Xl => true, + Breakpoint::Xs | Breakpoint::Sm => false, + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_classify_width_boundaries() { + // Xs: 0..59 + assert_eq!(classify_width(59), Breakpoint::Xs); + // Sm: 60..89 + assert_eq!(classify_width(60), Breakpoint::Sm); + assert_eq!(classify_width(89), Breakpoint::Sm); + // Md: 90..119 + assert_eq!(classify_width(90), Breakpoint::Md); + assert_eq!(classify_width(119), Breakpoint::Md); + // Lg: 120..159 + assert_eq!(classify_width(120), Breakpoint::Lg); + assert_eq!(classify_width(159), Breakpoint::Lg); + // Xl: 160+ + assert_eq!(classify_width(160), Breakpoint::Xl); + } + + #[test] + fn test_dashboard_columns_per_breakpoint() { + assert_eq!(dashboard_columns(Breakpoint::Xs), 1); + assert_eq!(dashboard_columns(Breakpoint::Sm), 1); + assert_eq!(dashboard_columns(Breakpoint::Md), 2); + assert_eq!(dashboard_columns(Breakpoint::Lg), 3); + assert_eq!(dashboard_columns(Breakpoint::Xl), 3); + } + + #[test] + fn test_show_preview_pane_per_breakpoint() { + assert!(!show_preview_pane(Breakpoint::Xs)); + assert!(!show_preview_pane(Breakpoint::Sm)); + assert!(show_preview_pane(Breakpoint::Md)); + assert!(show_preview_pane(Breakpoint::Lg)); + assert!(show_preview_pane(Breakpoint::Xl)); + } + + #[test] + fn test_edge_cases() { + // Width 0 must not panic, should classify as Xs + assert_eq!(classify_width(0), Breakpoint::Xs); + // Very wide terminal + assert_eq!(classify_width(300), Breakpoint::Xl); + } + + #[test] + fn test_lore_breakpoints_matches_defaults() { + assert_eq!(LORE_BREAKPOINTS, Breakpoints::DEFAULT); + } +} diff --git a/crates/lore-tui/src/lib.rs b/crates/lore-tui/src/lib.rs index 8691755..2852aea 100644 --- a/crates/lore-tui/src/lib.rs +++ b/crates/lore-tui/src/lib.rs @@ -21,11 +21,16 @@ pub mod app; // LoreApp Model trait impl (Phase 0 proof: bd-2emv, full: bd-6pmy) // Phase 1 modules. pub mod commands; // CommandRegistry: keybindings, help, palette (bd-38lb) pub mod crash_context; // CrashContext ring buffer + panic hook (bd-2fr7) +pub mod layout; // Responsive layout: breakpoints, columns, preview pane (bd-1pzj) pub mod navigation; // NavigationStack: back/forward/jump list (bd-1qpp) pub mod state; // AppState, LoadState, ScreenIntent, per-screen states (bd-1v9m) pub mod task_supervisor; // TaskSupervisor: dedup + cancel + generation IDs (bd-3le2) pub mod view; // View layer: render_screen + common widgets (bd-26f2) +// Phase 2 modules. +pub mod action; // Data-fetching actions for TUI screens (bd-35g5+) +pub mod filter_dsl; // Filter DSL tokenizer for list screen filter bars (bd-18qs) + /// Options controlling how the TUI launches. #[derive(Debug, Clone)] pub struct LaunchOptions { diff --git a/crates/lore-tui/src/message.rs b/crates/lore-tui/src/message.rs index daf9c20..a9d97c3 100644 --- a/crates/lore-tui/src/message.rs +++ b/crates/lore-tui/src/message.rs @@ -222,7 +222,7 @@ pub enum Msg { // --- Issue list --- IssueListLoaded { generation: u64, - rows: Vec, + page: crate::state::issue_list::IssueListPage, }, IssueListFilterChanged(String), IssueListSortChanged, @@ -231,7 +231,7 @@ pub enum Msg { // --- MR list --- MrListLoaded { generation: u64, - rows: Vec, + page: crate::state::mr_list::MrListPage, }, MrListFilterChanged(String), MrSelected(EntityKey), @@ -318,7 +318,7 @@ pub enum Msg { // --- Dashboard --- DashboardLoaded { generation: u64, - data: Box, + data: Box, }, // --- Global actions --- @@ -349,23 +349,6 @@ impl From for Msg { // Placeholder data types (will be fleshed out in Phase 1+) // --------------------------------------------------------------------------- -/// Placeholder for an issue row in list views. -#[derive(Debug, Clone)] -pub struct IssueRow { - pub key: EntityKey, - pub title: String, - pub state: String, -} - -/// Placeholder for a merge request row in list views. -#[derive(Debug, Clone)] -pub struct MrRow { - pub key: EntityKey, - pub title: String, - pub state: String, - pub draft: bool, -} - /// Placeholder for issue detail payload. #[derive(Debug, Clone)] pub struct IssueDetail { @@ -410,12 +393,8 @@ pub struct WhoResult { pub experts: Vec, } -/// Placeholder for dashboard summary data. -#[derive(Debug, Clone)] -pub struct DashboardData { - pub issue_count: u64, - pub mr_count: u64, -} +// DashboardData moved to crate::state::dashboard (enriched with +// EntityCounts, ProjectSyncInfo, RecentActivityItem, LastSyncInfo). // --------------------------------------------------------------------------- // Tests diff --git a/crates/lore-tui/src/navigation.rs b/crates/lore-tui/src/navigation.rs index 94e9288..2d05587 100644 --- a/crates/lore-tui/src/navigation.rs +++ b/crates/lore-tui/src/navigation.rs @@ -60,9 +60,10 @@ impl NavigationStack { self.forward_stack.clear(); // Record significant hops in jump list (vim behavior): - // truncate any forward entries beyond jump_index, then append. + // Keep entries up to and including the current position, discard + // any forward entries beyond it, then append the new destination. if self.current.is_detail_or_entity() { - self.jump_list.truncate(self.jump_index); + self.jump_list.truncate(self.jump_index.saturating_add(1)); self.jump_list.push(self.current.clone()); self.jump_index = self.jump_list.len(); } @@ -90,23 +91,37 @@ impl NavigationStack { /// Jump backward through the jump list (vim Ctrl+O). /// - /// Only visits detail/entity screens. + /// Only visits detail/entity screens. Skips entries matching the + /// current screen so the first press always produces a visible change. pub fn jump_back(&mut self) -> Option<&Screen> { - if self.jump_index == 0 { - return None; + while self.jump_index > 0 { + self.jump_index -= 1; + if let Some(target) = self.jump_list.get(self.jump_index).cloned() + && target != self.current + { + self.current = target; + return Some(&self.current); + } } - self.jump_index -= 1; - self.jump_list.get(self.jump_index) + None } /// Jump forward through the jump list (vim Ctrl+I). + /// + /// Skips entries matching the current screen. pub fn jump_forward(&mut self) -> Option<&Screen> { - if self.jump_index >= self.jump_list.len() { - return None; + while self.jump_index < self.jump_list.len() { + if let Some(target) = self.jump_list.get(self.jump_index).cloned() { + self.jump_index += 1; + if target != self.current { + self.current = target; + return Some(&self.current); + } + } else { + break; + } } - let screen = self.jump_list.get(self.jump_index)?; - self.jump_index += 1; - Some(screen) + None } /// Reset to a single screen, clearing all history. @@ -246,24 +261,21 @@ mod tests { nav.push(Screen::MrList); nav.push(mr.clone()); - // jump_index is at 2 (past the end of 2 items) - let prev = nav.jump_back(); - assert_eq!(prev, Some(&mr)); - + // Current is MrDetail. jump_list = [IssueDetail, MrDetail], index = 2. + // First jump_back skips MrDetail (== current) and lands on IssueDetail. let prev = nav.jump_back(); assert_eq!(prev, Some(&issue)); + assert!(nav.is_at(&issue)); - // at beginning + // Already at beginning of jump list. assert!(nav.jump_back().is_none()); - // forward - let next = nav.jump_forward(); - assert_eq!(next, Some(&issue)); - + // jump_forward skips IssueDetail (== current) and lands on MrDetail. let next = nav.jump_forward(); assert_eq!(next, Some(&mr)); + assert!(nav.is_at(&mr)); - // at end + // At end of jump list. assert!(nav.jump_forward().is_none()); } @@ -274,10 +286,9 @@ mod tests { nav.push(Screen::IssueDetail(EntityKey::issue(1, 2))); nav.push(Screen::IssueDetail(EntityKey::issue(1, 3))); - // jump back twice + // jump back twice — lands on issue(1,1), jump_index = 0 nav.jump_back(); nav.jump_back(); - // jump_index = 1, pointing at issue 2 // new detail push truncates forward entries nav.push(Screen::MrDetail(EntityKey::mr(1, 99))); diff --git a/crates/lore-tui/src/state/dashboard.rs b/crates/lore-tui/src/state/dashboard.rs index 21ea0a9..d01a5e1 100644 --- a/crates/lore-tui/src/state/dashboard.rs +++ b/crates/lore-tui/src/state/dashboard.rs @@ -1,10 +1,255 @@ #![allow(dead_code)] //! Dashboard screen state. +//! +//! The dashboard is the home screen — entity counts, per-project sync +//! status, recent activity, and the last sync summary. + +// --------------------------------------------------------------------------- +// EntityCounts +// --------------------------------------------------------------------------- + +/// Aggregated entity counts from the local database. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct EntityCounts { + pub issues_open: u64, + pub issues_total: u64, + pub mrs_open: u64, + pub mrs_total: u64, + pub discussions: u64, + pub notes_total: u64, + /// Percentage of notes that are system-generated (0-100). + pub notes_system_pct: u8, + pub documents: u64, + pub embeddings: u64, +} + +// --------------------------------------------------------------------------- +// ProjectSyncInfo +// --------------------------------------------------------------------------- + +/// Per-project sync freshness. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct ProjectSyncInfo { + pub path: String, + pub minutes_since_sync: u64, +} + +// --------------------------------------------------------------------------- +// RecentActivityItem +// --------------------------------------------------------------------------- + +/// A recently-updated entity for the activity feed. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct RecentActivityItem { + /// "issue" or "mr". + pub entity_type: String, + pub iid: u64, + pub title: String, + pub state: String, + pub minutes_ago: u64, +} + +// --------------------------------------------------------------------------- +// LastSyncInfo +// --------------------------------------------------------------------------- + +/// Summary of the most recent sync run. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct LastSyncInfo { + pub status: String, + /// Milliseconds epoch UTC. + pub finished_at: Option, + pub command: String, + pub error: Option, +} + +// --------------------------------------------------------------------------- +// DashboardData +// --------------------------------------------------------------------------- + +/// Data returned by the `fetch_dashboard` action. +/// +/// Pure data transfer — no rendering or display logic. +#[derive(Debug, Clone, Default)] +pub struct DashboardData { + pub counts: EntityCounts, + pub projects: Vec, + pub recent: Vec, + pub last_sync: Option, +} + +// --------------------------------------------------------------------------- +// DashboardState +// --------------------------------------------------------------------------- /// State for the dashboard summary screen. #[derive(Debug, Default)] pub struct DashboardState { - pub issue_count: u64, - pub mr_count: u64, + pub counts: EntityCounts, + pub projects: Vec, + pub recent: Vec, + pub last_sync: Option, + /// Scroll offset for the recent activity list. + pub scroll_offset: usize, +} + +impl DashboardState { + /// Apply fresh data from a `fetch_dashboard` result. + /// + /// Preserves scroll offset (clamped to new data bounds). + pub fn update(&mut self, data: DashboardData) { + self.counts = data.counts; + self.projects = data.projects; + self.last_sync = data.last_sync; + self.recent = data.recent; + // Clamp scroll offset if the list shrunk. + if !self.recent.is_empty() { + self.scroll_offset = self.scroll_offset.min(self.recent.len() - 1); + } else { + self.scroll_offset = 0; + } + } + + /// Scroll the recent activity list down by one. + pub fn scroll_down(&mut self) { + if !self.recent.is_empty() { + self.scroll_offset = (self.scroll_offset + 1).min(self.recent.len() - 1); + } + } + + /// Scroll the recent activity list up by one. + pub fn scroll_up(&mut self) { + self.scroll_offset = self.scroll_offset.saturating_sub(1); + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + #[test] + fn test_dashboard_state_default() { + let state = DashboardState::default(); + assert_eq!(state.counts.issues_total, 0); + assert_eq!(state.scroll_offset, 0); + assert!(state.recent.is_empty()); + } + + #[test] + fn test_dashboard_state_update_applies_data() { + let mut state = DashboardState::default(); + let data = DashboardData { + counts: EntityCounts { + issues_open: 3, + issues_total: 5, + ..Default::default() + }, + projects: vec![ProjectSyncInfo { + path: "group/project".into(), + minutes_since_sync: 42, + }], + recent: vec![RecentActivityItem { + entity_type: "issue".into(), + iid: 1, + title: "Fix bug".into(), + state: "opened".into(), + minutes_ago: 10, + }], + last_sync: None, + }; + + state.update(data); + assert_eq!(state.counts.issues_open, 3); + assert_eq!(state.counts.issues_total, 5); + assert_eq!(state.projects.len(), 1); + assert_eq!(state.recent.len(), 1); + } + + #[test] + fn test_dashboard_state_update_clamps_scroll() { + let mut state = DashboardState { + scroll_offset: 10, + ..Default::default() + }; + + let data = DashboardData { + recent: vec![RecentActivityItem { + entity_type: "issue".into(), + iid: 1, + title: "Only item".into(), + state: "opened".into(), + minutes_ago: 5, + }], + ..Default::default() + }; + + state.update(data); + assert_eq!(state.scroll_offset, 0); // Clamped to len-1 = 0 + } + + #[test] + fn test_dashboard_state_update_empty_resets_scroll() { + let mut state = DashboardState { + scroll_offset: 5, + ..Default::default() + }; + + state.update(DashboardData::default()); + assert_eq!(state.scroll_offset, 0); + } + + #[test] + fn test_scroll_down_and_up() { + let mut state = DashboardState::default(); + state.recent = (0..5) + .map(|i| RecentActivityItem { + entity_type: "issue".into(), + iid: i, + title: format!("Item {i}"), + state: "opened".into(), + minutes_ago: i, + }) + .collect(); + + assert_eq!(state.scroll_offset, 0); + state.scroll_down(); + assert_eq!(state.scroll_offset, 1); + state.scroll_down(); + assert_eq!(state.scroll_offset, 2); + state.scroll_up(); + assert_eq!(state.scroll_offset, 1); + state.scroll_up(); + assert_eq!(state.scroll_offset, 0); + state.scroll_up(); // Can't go below 0 + assert_eq!(state.scroll_offset, 0); + } + + #[test] + fn test_scroll_down_stops_at_end() { + let mut state = DashboardState::default(); + state.recent = vec![RecentActivityItem { + entity_type: "mr".into(), + iid: 1, + title: "Only".into(), + state: "merged".into(), + minutes_ago: 0, + }]; + + state.scroll_down(); + assert_eq!(state.scroll_offset, 0); // Can't scroll past single item + } + + #[test] + fn test_scroll_on_empty_is_noop() { + let mut state = DashboardState::default(); + state.scroll_down(); + assert_eq!(state.scroll_offset, 0); + state.scroll_up(); + assert_eq!(state.scroll_offset, 0); + } } diff --git a/crates/lore-tui/src/state/issue_list.rs b/crates/lore-tui/src/state/issue_list.rs index 86ec4a7..e8122d3 100644 --- a/crates/lore-tui/src/state/issue_list.rs +++ b/crates/lore-tui/src/state/issue_list.rs @@ -1,14 +1,376 @@ -#![allow(dead_code)] +#![allow(dead_code)] // Phase 2: consumed by LoreApp and view/issue_list //! Issue list screen state. +//! +//! Uses keyset pagination with a snapshot fence for stable ordering +//! under concurrent sync writes. Filter changes reset the pagination +//! cursor and snapshot fence. -use crate::message::IssueRow; +use std::hash::{Hash, Hasher}; + +// --------------------------------------------------------------------------- +// Cursor (keyset pagination boundary) +// --------------------------------------------------------------------------- + +/// Keyset pagination cursor — (updated_at, iid) boundary. +/// +/// The next page query uses `WHERE (updated_at, iid) < (cursor.updated_at, cursor.iid)` +/// to avoid OFFSET instability. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct IssueCursor { + pub updated_at: i64, + pub iid: i64, +} + +// --------------------------------------------------------------------------- +// Filter +// --------------------------------------------------------------------------- + +/// Structured filter for issue list queries. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct IssueFilter { + pub state: Option, + pub author: Option, + pub assignee: Option, + pub label: Option, + pub milestone: Option, + pub status: Option, + pub free_text: Option, + pub project_id: Option, +} + +impl IssueFilter { + /// Compute a hash for change detection. + pub fn hash_value(&self) -> u64 { + let mut hasher = std::collections::hash_map::DefaultHasher::new(); + self.state.hash(&mut hasher); + self.author.hash(&mut hasher); + self.assignee.hash(&mut hasher); + self.label.hash(&mut hasher); + self.milestone.hash(&mut hasher); + self.status.hash(&mut hasher); + self.free_text.hash(&mut hasher); + self.project_id.hash(&mut hasher); + hasher.finish() + } + + /// Whether any filter is active. + pub fn is_active(&self) -> bool { + self.state.is_some() + || self.author.is_some() + || self.assignee.is_some() + || self.label.is_some() + || self.milestone.is_some() + || self.status.is_some() + || self.free_text.is_some() + || self.project_id.is_some() + } +} + +// --------------------------------------------------------------------------- +// Row +// --------------------------------------------------------------------------- + +/// A single row in the issue list. +#[derive(Debug, Clone)] +pub struct IssueListRow { + pub project_path: String, + pub iid: i64, + pub title: String, + pub state: String, + pub author: String, + pub labels: Vec, + pub updated_at: i64, +} + +// --------------------------------------------------------------------------- +// Page result +// --------------------------------------------------------------------------- + +/// Result from a paginated issue list query. +#[derive(Debug, Clone)] +pub struct IssueListPage { + pub rows: Vec, + pub next_cursor: Option, + pub total_count: u64, +} + +// --------------------------------------------------------------------------- +// Sort +// --------------------------------------------------------------------------- + +/// Fields available for sorting. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum SortField { + #[default] + UpdatedAt, + Iid, + Title, + State, + Author, +} + +/// Sort direction. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum SortOrder { + #[default] + Desc, + Asc, +} + +// --------------------------------------------------------------------------- +// IssueListState +// --------------------------------------------------------------------------- /// State for the issue list screen. #[derive(Debug, Default)] pub struct IssueListState { - pub rows: Vec, - pub filter: String, - pub filter_focused: bool, + /// Current page of issue rows. + pub rows: Vec, + /// Total count of matching issues. + pub total_count: u64, + /// Selected row index (within current window). pub selected_index: usize, + /// Scroll offset for the entity table. + pub scroll_offset: usize, + /// Cursor for the next page. + pub next_cursor: Option, + /// Whether a prefetch is in flight. + pub prefetch_in_flight: bool, + /// Current filter. + pub filter: IssueFilter, + /// Raw filter input text. + pub filter_input: String, + /// Whether the filter bar has focus. + pub filter_focused: bool, + /// Sort field. + pub sort_field: SortField, + /// Sort direction. + pub sort_order: SortOrder, + /// Snapshot fence: max updated_at from initial load. + pub snapshot_fence: Option, + /// Hash of the current filter for change detection. + pub filter_hash: u64, + /// Whether Quick Peek is visible. + pub peek_visible: bool, +} + +impl IssueListState { + /// Reset pagination state (called when filter changes or on refresh). + pub fn reset_pagination(&mut self) { + self.rows.clear(); + self.next_cursor = None; + self.selected_index = 0; + self.scroll_offset = 0; + self.snapshot_fence = None; + self.total_count = 0; + self.prefetch_in_flight = false; + } + + /// Apply a new page of results. + pub fn apply_page(&mut self, page: IssueListPage) { + // Set snapshot fence on first page load. + if self.snapshot_fence.is_none() { + self.snapshot_fence = page.rows.first().map(|r| r.updated_at); + } + self.rows.extend(page.rows); + self.next_cursor = page.next_cursor; + self.total_count = page.total_count; + self.prefetch_in_flight = false; + } + + /// Check if filter changed and reset if needed. + pub fn check_filter_change(&mut self) -> bool { + let new_hash = self.filter.hash_value(); + if new_hash != self.filter_hash { + self.filter_hash = new_hash; + self.reset_pagination(); + true + } else { + false + } + } + + /// Whether the user has scrolled near the end of current data (80% threshold). + pub fn should_prefetch(&self) -> bool { + if self.prefetch_in_flight || self.next_cursor.is_none() { + return false; + } + if self.rows.is_empty() { + return false; + } + let threshold = (self.rows.len() * 4) / 5; // 80% + self.selected_index >= threshold + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + fn sample_page(count: usize, has_next: bool) -> IssueListPage { + let rows: Vec = (0..count) + .map(|i| IssueListRow { + project_path: "group/project".into(), + iid: (count - i) as i64, + title: format!("Issue {}", count - i), + state: "opened".into(), + author: "taylor".into(), + labels: vec![], + updated_at: 1_700_000_000_000 - (i as i64 * 60_000), + }) + .collect(); + + let next_cursor = if has_next { + rows.last().map(|r| IssueCursor { + updated_at: r.updated_at, + iid: r.iid, + }) + } else { + None + }; + + IssueListPage { + rows, + next_cursor, + total_count: if has_next { + (count * 2) as u64 + } else { + count as u64 + }, + } + } + + #[test] + fn test_apply_page_sets_snapshot_fence() { + let mut state = IssueListState::default(); + let page = sample_page(5, false); + state.apply_page(page); + + assert_eq!(state.rows.len(), 5); + assert!(state.snapshot_fence.is_some()); + assert_eq!(state.snapshot_fence.unwrap(), 1_700_000_000_000); + } + + #[test] + fn test_apply_page_appends() { + let mut state = IssueListState::default(); + state.apply_page(sample_page(5, true)); + assert_eq!(state.rows.len(), 5); + + state.apply_page(sample_page(3, false)); + assert_eq!(state.rows.len(), 8); + } + + #[test] + fn test_reset_pagination_clears_state() { + let mut state = IssueListState::default(); + state.apply_page(sample_page(5, true)); + state.selected_index = 3; + + state.reset_pagination(); + + assert!(state.rows.is_empty()); + assert_eq!(state.selected_index, 0); + assert!(state.next_cursor.is_none()); + assert!(state.snapshot_fence.is_none()); + } + + #[test] + fn test_check_filter_change_detects_change() { + let mut state = IssueListState::default(); + state.filter_hash = state.filter.hash_value(); + + state.filter.state = Some("opened".into()); + assert!(state.check_filter_change()); + } + + #[test] + fn test_check_filter_change_no_change() { + let mut state = IssueListState::default(); + state.filter_hash = state.filter.hash_value(); + assert!(!state.check_filter_change()); + } + + #[test] + fn test_should_prefetch() { + let mut state = IssueListState::default(); + state.apply_page(sample_page(10, true)); + + state.selected_index = 4; // 40% — no prefetch + assert!(!state.should_prefetch()); + + state.selected_index = 8; // 80% — prefetch + assert!(state.should_prefetch()); + } + + #[test] + fn test_should_prefetch_no_next_page() { + let mut state = IssueListState::default(); + state.apply_page(sample_page(10, false)); + state.selected_index = 9; + assert!(!state.should_prefetch()); + } + + #[test] + fn test_should_prefetch_already_in_flight() { + let mut state = IssueListState::default(); + state.apply_page(sample_page(10, true)); + state.selected_index = 9; + state.prefetch_in_flight = true; + assert!(!state.should_prefetch()); + } + + #[test] + fn test_issue_filter_is_active() { + let empty = IssueFilter::default(); + assert!(!empty.is_active()); + + let active = IssueFilter { + state: Some("opened".into()), + ..Default::default() + }; + assert!(active.is_active()); + } + + #[test] + fn test_issue_filter_hash_deterministic() { + let f1 = IssueFilter { + state: Some("opened".into()), + author: Some("taylor".into()), + ..Default::default() + }; + let f2 = f1.clone(); + assert_eq!(f1.hash_value(), f2.hash_value()); + } + + #[test] + fn test_issue_filter_hash_differs() { + let f1 = IssueFilter { + state: Some("opened".into()), + ..Default::default() + }; + let f2 = IssueFilter { + state: Some("closed".into()), + ..Default::default() + }; + assert_ne!(f1.hash_value(), f2.hash_value()); + } + + #[test] + fn test_snapshot_fence_not_overwritten_on_second_page() { + let mut state = IssueListState::default(); + state.apply_page(sample_page(5, true)); + let fence = state.snapshot_fence; + + state.apply_page(sample_page(3, false)); + assert_eq!( + state.snapshot_fence, fence, + "Fence should not change on second page" + ); + } } diff --git a/crates/lore-tui/src/state/mod.rs b/crates/lore-tui/src/state/mod.rs index 4cdab66..e67271c 100644 --- a/crates/lore-tui/src/state/mod.rs +++ b/crates/lore-tui/src/state/mod.rs @@ -24,7 +24,7 @@ pub mod sync; pub mod timeline; pub mod who; -use std::collections::HashMap; +use std::collections::{HashMap, HashSet}; use crate::message::Screen; @@ -80,6 +80,8 @@ impl LoadState { #[derive(Debug, Default)] pub struct ScreenLoadStateMap { map: HashMap, + /// Screens that have had a load state set at least once. + visited: HashSet, } impl ScreenLoadStateMap { @@ -94,6 +96,7 @@ impl ScreenLoadStateMap { /// /// Setting to `Idle` removes the entry to prevent map growth. pub fn set(&mut self, screen: Screen, state: LoadState) { + self.visited.insert(screen.clone()); if state == LoadState::Idle { self.map.remove(&screen); } else { @@ -101,6 +104,12 @@ impl ScreenLoadStateMap { } } + /// Whether this screen has ever had a load initiated. + #[must_use] + pub fn was_visited(&self, screen: &Screen) -> bool { + self.visited.contains(screen) + } + /// Whether any screen is currently loading. #[must_use] pub fn any_loading(&self) -> bool { diff --git a/crates/lore-tui/src/state/mr_list.rs b/crates/lore-tui/src/state/mr_list.rs index c8ba600..97c8d38 100644 --- a/crates/lore-tui/src/state/mr_list.rs +++ b/crates/lore-tui/src/state/mr_list.rs @@ -1,14 +1,422 @@ -#![allow(dead_code)] +#![allow(dead_code)] // Phase 2: consumed by LoreApp and view/mr_list //! Merge request list screen state. +//! +//! Mirrors the issue list pattern with MR-specific filter fields +//! (draft, reviewer, target/source branch). Uses the same keyset +//! pagination with snapshot fence for stable ordering. -use crate::message::MrRow; +use std::hash::{Hash, Hasher}; + +// --------------------------------------------------------------------------- +// Cursor (keyset pagination boundary) +// --------------------------------------------------------------------------- + +/// Keyset pagination cursor — (updated_at, iid) boundary. +#[derive(Debug, Clone, PartialEq, Eq)] +pub struct MrCursor { + pub updated_at: i64, + pub iid: i64, +} + +// --------------------------------------------------------------------------- +// Filter +// --------------------------------------------------------------------------- + +/// Structured filter for MR list queries. +#[derive(Debug, Clone, Default, PartialEq, Eq)] +pub struct MrFilter { + pub state: Option, + pub author: Option, + pub reviewer: Option, + pub target_branch: Option, + pub source_branch: Option, + pub label: Option, + pub draft: Option, + pub free_text: Option, + pub project_id: Option, +} + +impl MrFilter { + /// Compute a hash for change detection. + pub fn hash_value(&self) -> u64 { + let mut hasher = std::collections::hash_map::DefaultHasher::new(); + self.state.hash(&mut hasher); + self.author.hash(&mut hasher); + self.reviewer.hash(&mut hasher); + self.target_branch.hash(&mut hasher); + self.source_branch.hash(&mut hasher); + self.label.hash(&mut hasher); + self.draft.hash(&mut hasher); + self.free_text.hash(&mut hasher); + self.project_id.hash(&mut hasher); + hasher.finish() + } + + /// Whether any filter is active. + pub fn is_active(&self) -> bool { + self.state.is_some() + || self.author.is_some() + || self.reviewer.is_some() + || self.target_branch.is_some() + || self.source_branch.is_some() + || self.label.is_some() + || self.draft.is_some() + || self.free_text.is_some() + || self.project_id.is_some() + } +} + +// --------------------------------------------------------------------------- +// Row +// --------------------------------------------------------------------------- + +/// A single row in the MR list. +#[derive(Debug, Clone)] +pub struct MrListRow { + pub project_path: String, + pub iid: i64, + pub title: String, + pub state: String, + pub author: String, + pub target_branch: String, + pub labels: Vec, + pub updated_at: i64, + pub draft: bool, +} + +// --------------------------------------------------------------------------- +// Page result +// --------------------------------------------------------------------------- + +/// Result from a paginated MR list query. +#[derive(Debug, Clone)] +pub struct MrListPage { + pub rows: Vec, + pub next_cursor: Option, + pub total_count: u64, +} + +// --------------------------------------------------------------------------- +// Sort +// --------------------------------------------------------------------------- + +/// Fields available for sorting. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum MrSortField { + #[default] + UpdatedAt, + Iid, + Title, + State, + Author, + TargetBranch, +} + +/// Sort direction. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum MrSortOrder { + #[default] + Desc, + Asc, +} + +// --------------------------------------------------------------------------- +// MrListState +// --------------------------------------------------------------------------- /// State for the MR list screen. #[derive(Debug, Default)] pub struct MrListState { - pub rows: Vec, - pub filter: String, - pub filter_focused: bool, + /// Current page of MR rows. + pub rows: Vec, + /// Total count of matching MRs. + pub total_count: u64, + /// Selected row index (within current window). pub selected_index: usize, + /// Scroll offset for the entity table. + pub scroll_offset: usize, + /// Cursor for the next page. + pub next_cursor: Option, + /// Whether a prefetch is in flight. + pub prefetch_in_flight: bool, + /// Current filter. + pub filter: MrFilter, + /// Raw filter input text. + pub filter_input: String, + /// Whether the filter bar has focus. + pub filter_focused: bool, + /// Sort field. + pub sort_field: MrSortField, + /// Sort direction. + pub sort_order: MrSortOrder, + /// Snapshot fence: max updated_at from initial load. + pub snapshot_fence: Option, + /// Hash of the current filter for change detection. + pub filter_hash: u64, + /// Whether Quick Peek is visible. + pub peek_visible: bool, +} + +impl MrListState { + /// Reset pagination state (called when filter changes or on refresh). + pub fn reset_pagination(&mut self) { + self.rows.clear(); + self.next_cursor = None; + self.selected_index = 0; + self.scroll_offset = 0; + self.snapshot_fence = None; + self.total_count = 0; + self.prefetch_in_flight = false; + } + + /// Apply a new page of results. + pub fn apply_page(&mut self, page: MrListPage) { + // Set snapshot fence on first page load. + if self.snapshot_fence.is_none() { + self.snapshot_fence = page.rows.first().map(|r| r.updated_at); + } + self.rows.extend(page.rows); + self.next_cursor = page.next_cursor; + self.total_count = page.total_count; + self.prefetch_in_flight = false; + } + + /// Check if filter changed and reset if needed. + pub fn check_filter_change(&mut self) -> bool { + let new_hash = self.filter.hash_value(); + if new_hash != self.filter_hash { + self.filter_hash = new_hash; + self.reset_pagination(); + true + } else { + false + } + } + + /// Whether the user has scrolled near the end of current data (80% threshold). + pub fn should_prefetch(&self) -> bool { + if self.prefetch_in_flight || self.next_cursor.is_none() { + return false; + } + if self.rows.is_empty() { + return false; + } + let threshold = (self.rows.len() * 4) / 5; // 80% + self.selected_index >= threshold + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + + fn sample_page(count: usize, has_next: bool) -> MrListPage { + let rows: Vec = (0..count) + .map(|i| MrListRow { + project_path: "group/project".into(), + iid: (count - i) as i64, + title: format!("MR {}", count - i), + state: "opened".into(), + author: "taylor".into(), + target_branch: "main".into(), + labels: vec![], + updated_at: 1_700_000_000_000 - (i as i64 * 60_000), + draft: i % 3 == 0, + }) + .collect(); + + let next_cursor = if has_next { + rows.last().map(|r| MrCursor { + updated_at: r.updated_at, + iid: r.iid, + }) + } else { + None + }; + + MrListPage { + rows, + next_cursor, + total_count: if has_next { + (count * 2) as u64 + } else { + count as u64 + }, + } + } + + #[test] + fn test_apply_page_sets_snapshot_fence() { + let mut state = MrListState::default(); + let page = sample_page(5, false); + state.apply_page(page); + + assert_eq!(state.rows.len(), 5); + assert!(state.snapshot_fence.is_some()); + assert_eq!(state.snapshot_fence.unwrap(), 1_700_000_000_000); + } + + #[test] + fn test_apply_page_appends() { + let mut state = MrListState::default(); + state.apply_page(sample_page(5, true)); + assert_eq!(state.rows.len(), 5); + + state.apply_page(sample_page(3, false)); + assert_eq!(state.rows.len(), 8); + } + + #[test] + fn test_reset_pagination_clears_state() { + let mut state = MrListState::default(); + state.apply_page(sample_page(5, true)); + state.selected_index = 3; + + state.reset_pagination(); + + assert!(state.rows.is_empty()); + assert_eq!(state.selected_index, 0); + assert!(state.next_cursor.is_none()); + assert!(state.snapshot_fence.is_none()); + } + + #[test] + fn test_check_filter_change_detects_change() { + let mut state = MrListState::default(); + state.filter_hash = state.filter.hash_value(); + + state.filter.state = Some("opened".into()); + assert!(state.check_filter_change()); + } + + #[test] + fn test_check_filter_change_no_change() { + let mut state = MrListState::default(); + state.filter_hash = state.filter.hash_value(); + assert!(!state.check_filter_change()); + } + + #[test] + fn test_should_prefetch() { + let mut state = MrListState::default(); + state.apply_page(sample_page(10, true)); + + state.selected_index = 4; // 40% -- no prefetch + assert!(!state.should_prefetch()); + + state.selected_index = 8; // 80% -- prefetch + assert!(state.should_prefetch()); + } + + #[test] + fn test_should_prefetch_no_next_page() { + let mut state = MrListState::default(); + state.apply_page(sample_page(10, false)); + state.selected_index = 9; + assert!(!state.should_prefetch()); + } + + #[test] + fn test_should_prefetch_already_in_flight() { + let mut state = MrListState::default(); + state.apply_page(sample_page(10, true)); + state.selected_index = 9; + state.prefetch_in_flight = true; + assert!(!state.should_prefetch()); + } + + #[test] + fn test_mr_filter_is_active() { + let empty = MrFilter::default(); + assert!(!empty.is_active()); + + let active = MrFilter { + state: Some("opened".into()), + ..Default::default() + }; + assert!(active.is_active()); + + let draft_active = MrFilter { + draft: Some(true), + ..Default::default() + }; + assert!(draft_active.is_active()); + } + + #[test] + fn test_mr_filter_hash_deterministic() { + let f1 = MrFilter { + state: Some("opened".into()), + author: Some("taylor".into()), + ..Default::default() + }; + let f2 = f1.clone(); + assert_eq!(f1.hash_value(), f2.hash_value()); + } + + #[test] + fn test_mr_filter_hash_differs() { + let f1 = MrFilter { + state: Some("opened".into()), + ..Default::default() + }; + let f2 = MrFilter { + state: Some("merged".into()), + ..Default::default() + }; + assert_ne!(f1.hash_value(), f2.hash_value()); + } + + #[test] + fn test_snapshot_fence_not_overwritten_on_second_page() { + let mut state = MrListState::default(); + state.apply_page(sample_page(5, true)); + let fence = state.snapshot_fence; + + state.apply_page(sample_page(3, false)); + assert_eq!( + state.snapshot_fence, fence, + "Fence should not change on second page" + ); + } + + #[test] + fn test_mr_filter_reviewer_field() { + let f = MrFilter { + reviewer: Some("alice".into()), + ..Default::default() + }; + assert!(f.is_active()); + assert_ne!(f.hash_value(), MrFilter::default().hash_value()); + } + + #[test] + fn test_mr_filter_target_branch_field() { + let f = MrFilter { + target_branch: Some("main".into()), + ..Default::default() + }; + assert!(f.is_active()); + } + + #[test] + fn test_mr_list_row_draft_field() { + let row = MrListRow { + project_path: "g/p".into(), + iid: 1, + title: "Draft MR".into(), + state: "opened".into(), + author: "taylor".into(), + target_branch: "main".into(), + labels: vec![], + updated_at: 0, + draft: true, + }; + assert!(row.draft); + } } diff --git a/crates/lore-tui/src/view/common/entity_table.rs b/crates/lore-tui/src/view/common/entity_table.rs new file mode 100644 index 0000000..13c4096 --- /dev/null +++ b/crates/lore-tui/src/view/common/entity_table.rs @@ -0,0 +1,676 @@ +#![allow(dead_code)] // Phase 2: consumed by Issue List + MR List screens + +//! Generic entity table widget for list screens. +//! +//! `EntityTable` renders rows with sortable, responsive columns. +//! Columns hide gracefully when the terminal is too narrow, using +//! priority-based visibility. + +use ftui::core::geometry::Rect; +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::Draw; +use ftui::render::frame::Frame; + +// --------------------------------------------------------------------------- +// Column definition +// --------------------------------------------------------------------------- + +/// Describes a single table column. +#[derive(Debug, Clone)] +pub struct ColumnDef { + /// Display name shown in the header. + pub name: &'static str, + /// Minimum width in characters. Column is hidden if it can't meet this. + pub min_width: u16, + /// Flex weight for distributing extra space. + pub flex_weight: u16, + /// Visibility priority (0 = always shown, higher = hidden first). + pub priority: u8, + /// Text alignment within the column. + pub align: Align, +} + +/// Text alignment within a column. +#[derive(Debug, Clone, Copy, PartialEq, Eq, Default)] +pub enum Align { + #[default] + Left, + Right, +} + +// --------------------------------------------------------------------------- +// TableRow trait +// --------------------------------------------------------------------------- + +/// Trait for types that can be rendered as a table row. +pub trait TableRow { + /// Return the cell text for each column, in column order. + fn cells(&self, col_count: usize) -> Vec; +} + +// --------------------------------------------------------------------------- +// EntityTable state +// --------------------------------------------------------------------------- + +/// Rendering state for the entity table. +#[derive(Debug, Clone)] +pub struct EntityTableState { + /// Index of the selected row (0-based, within the full data set). + pub selected: usize, + /// Scroll offset (first visible row index). + pub scroll_offset: usize, + /// Index of the column used for sorting. + pub sort_column: usize, + /// Sort direction. + pub sort_ascending: bool, +} + +impl Default for EntityTableState { + fn default() -> Self { + Self { + selected: 0, + scroll_offset: 0, + sort_column: 0, + sort_ascending: true, + } + } +} + +impl EntityTableState { + /// Move selection down by 1. + pub fn select_next(&mut self, total_rows: usize) { + if total_rows == 0 { + return; + } + self.selected = (self.selected + 1).min(total_rows - 1); + } + + /// Move selection up by 1. + pub fn select_prev(&mut self) { + self.selected = self.selected.saturating_sub(1); + } + + /// Page down (move by `page_size` rows). + pub fn page_down(&mut self, total_rows: usize, page_size: usize) { + if total_rows == 0 { + return; + } + self.selected = (self.selected + page_size).min(total_rows - 1); + } + + /// Page up. + pub fn page_up(&mut self, page_size: usize) { + self.selected = self.selected.saturating_sub(page_size); + } + + /// Jump to top. + pub fn select_first(&mut self) { + self.selected = 0; + } + + /// Jump to bottom. + pub fn select_last(&mut self, total_rows: usize) { + if total_rows > 0 { + self.selected = total_rows - 1; + } + } + + /// Cycle sort column forward (wraps around). + pub fn cycle_sort(&mut self, col_count: usize) { + if col_count == 0 { + return; + } + self.sort_column = (self.sort_column + 1) % col_count; + } + + /// Toggle sort direction on current column. + pub fn toggle_sort_direction(&mut self) { + self.sort_ascending = !self.sort_ascending; + } + + /// Ensure scroll offset keeps selection visible. + fn adjust_scroll(&mut self, visible_rows: usize) { + if visible_rows == 0 { + return; + } + if self.selected < self.scroll_offset { + self.scroll_offset = self.selected; + } + if self.selected >= self.scroll_offset + visible_rows { + self.scroll_offset = self.selected - visible_rows + 1; + } + } +} + +// --------------------------------------------------------------------------- +// Colors +// --------------------------------------------------------------------------- + +/// Colors for the entity table. Will be replaced by Theme injection. +pub struct TableColors { + pub header_fg: PackedRgba, + pub header_bg: PackedRgba, + pub row_fg: PackedRgba, + pub row_alt_bg: PackedRgba, + pub selected_fg: PackedRgba, + pub selected_bg: PackedRgba, + pub sort_indicator: PackedRgba, + pub border: PackedRgba, +} + +// --------------------------------------------------------------------------- +// Render +// --------------------------------------------------------------------------- + +/// Compute which columns are visible given the available width. +/// +/// Returns indices of visible columns sorted by original order, +/// along with their allocated widths. +pub fn visible_columns(columns: &[ColumnDef], available_width: u16) -> Vec<(usize, u16)> { + // Sort by priority (lowest = most important). + let mut indexed: Vec<(usize, &ColumnDef)> = columns.iter().enumerate().collect(); + indexed.sort_by_key(|(_, col)| col.priority); + + let mut result: Vec<(usize, u16)> = Vec::new(); + let mut used_width: u16 = 0; + let gap = 1u16; // 1-char gap between columns. + + for (idx, col) in &indexed { + let needed = col.min_width + if result.is_empty() { 0 } else { gap }; + if used_width + needed <= available_width { + result.push((*idx, col.min_width)); + used_width += needed; + } + } + + // Distribute remaining space by flex weight. + let remaining = available_width.saturating_sub(used_width); + if remaining > 0 { + let total_weight: u16 = result + .iter() + .map(|(idx, _)| columns[*idx].flex_weight) + .sum(); + + if total_weight > 0 { + for (idx, width) in &mut result { + let weight = columns[*idx].flex_weight; + let extra = + (u32::from(remaining) * u32::from(weight) / u32::from(total_weight)) as u16; + *width += extra; + } + } + } + + // Sort by original column order for rendering. + result.sort_by_key(|(idx, _)| *idx); + result +} + +/// Render the entity table header row. +pub fn render_header( + frame: &mut Frame<'_>, + columns: &[ColumnDef], + visible: &[(usize, u16)], + state: &EntityTableState, + y: u16, + area_x: u16, + colors: &TableColors, +) { + let header_cell = Cell { + fg: colors.header_fg, + bg: colors.header_bg, + ..Cell::default() + }; + let sort_cell = Cell { + fg: colors.sort_indicator, + bg: colors.header_bg, + ..Cell::default() + }; + + // Fill header background. + let total_width: u16 = visible.iter().map(|(_, w)| w + 1).sum(); + let header_rect = Rect::new(area_x, y, total_width, 1); + frame.draw_rect_filled( + header_rect, + Cell { + bg: colors.header_bg, + ..Cell::default() + }, + ); + + let mut x = area_x; + for (col_idx, col_width) in visible { + let col = &columns[*col_idx]; + let col_max = x.saturating_add(*col_width); + + let after_name = frame.print_text_clipped(x, y, col.name, header_cell, col_max); + + // Sort indicator. + if *col_idx == state.sort_column { + let arrow = if state.sort_ascending { " ^" } else { " v" }; + frame.print_text_clipped(after_name, y, arrow, sort_cell, col_max); + } + + x = col_max.saturating_add(1); // gap + } +} + +/// Style context for rendering a single row. +pub struct RowContext<'a> { + pub columns: &'a [ColumnDef], + pub visible: &'a [(usize, u16)], + pub is_selected: bool, + pub is_alt: bool, + pub colors: &'a TableColors, +} + +/// Render a data row. +pub fn render_row( + frame: &mut Frame<'_>, + row: &R, + y: u16, + area_x: u16, + ctx: &RowContext<'_>, +) { + let (fg, bg) = if ctx.is_selected { + (ctx.colors.selected_fg, ctx.colors.selected_bg) + } else if ctx.is_alt { + (ctx.colors.row_fg, ctx.colors.row_alt_bg) + } else { + (ctx.colors.row_fg, Cell::default().bg) + }; + + let cell_style = Cell { + fg, + bg, + ..Cell::default() + }; + + // Fill row background if selected or alt. + if ctx.is_selected || ctx.is_alt { + let total_width: u16 = ctx.visible.iter().map(|(_, w)| w + 1).sum(); + frame.draw_rect_filled( + Rect::new(area_x, y, total_width, 1), + Cell { + bg, + ..Cell::default() + }, + ); + } + + let cells = row.cells(ctx.columns.len()); + let mut x = area_x; + + for (col_idx, col_width) in ctx.visible { + let col_max = x.saturating_add(*col_width); + let text = cells.get(*col_idx).map(String::as_str).unwrap_or(""); + + match ctx.columns[*col_idx].align { + Align::Left => { + frame.print_text_clipped(x, y, text, cell_style, col_max); + } + Align::Right => { + let text_len = text.len() as u16; + let start = if text_len < *col_width { + x + col_width - text_len + } else { + x + }; + frame.print_text_clipped(start, y, text, cell_style, col_max); + } + } + + x = col_max.saturating_add(1); // gap + } +} + +/// Render a complete entity table: header + scrollable rows. +pub fn render_entity_table( + frame: &mut Frame<'_>, + rows: &[R], + columns: &[ColumnDef], + state: &mut EntityTableState, + area: Rect, + colors: &TableColors, +) { + if area.height < 2 || area.width < 5 { + return; + } + + let visible = visible_columns(columns, area.width); + if visible.is_empty() { + return; + } + + // Header row. + render_header(frame, columns, &visible, state, area.y, area.x, colors); + + // Separator. + let sep_y = area.y.saturating_add(1); + let sep_cell = Cell { + fg: colors.border, + ..Cell::default() + }; + let rule = "─".repeat(area.width as usize); + frame.print_text_clipped( + area.x, + sep_y, + &rule, + sep_cell, + area.x.saturating_add(area.width), + ); + + // Data rows. + let data_start_y = area.y.saturating_add(2); + let visible_rows = area.height.saturating_sub(2) as usize; // minus header + separator + + state.adjust_scroll(visible_rows); + + let start = state.scroll_offset; + let end = (start + visible_rows).min(rows.len()); + + for (i, row) in rows[start..end].iter().enumerate() { + let row_y = data_start_y.saturating_add(i as u16); + let absolute_idx = start + i; + let ctx = RowContext { + columns, + visible: &visible, + is_selected: absolute_idx == state.selected, + is_alt: absolute_idx % 2 == 1, + colors, + }; + + render_row(frame, row, row_y, area.x, &ctx); + } + + // Scroll indicator if more rows below. + if end < rows.len() { + let indicator_y = data_start_y.saturating_add(visible_rows as u16); + if indicator_y < area.y.saturating_add(area.height) { + let muted = Cell { + fg: colors.border, + ..Cell::default() + }; + let remaining = rows.len() - end; + frame.print_text_clipped( + area.x, + indicator_y, + &format!("... {remaining} more"), + muted, + area.x.saturating_add(area.width), + ); + } + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + fn test_columns() -> Vec { + vec![ + ColumnDef { + name: "IID", + min_width: 5, + flex_weight: 0, + priority: 0, + align: Align::Right, + }, + ColumnDef { + name: "Title", + min_width: 10, + flex_weight: 3, + priority: 0, + align: Align::Left, + }, + ColumnDef { + name: "State", + min_width: 8, + flex_weight: 1, + priority: 1, + align: Align::Left, + }, + ColumnDef { + name: "Author", + min_width: 8, + flex_weight: 1, + priority: 2, + align: Align::Left, + }, + ColumnDef { + name: "Updated", + min_width: 10, + flex_weight: 0, + priority: 3, + align: Align::Right, + }, + ] + } + + struct TestRow { + cells: Vec, + } + + impl TableRow for TestRow { + fn cells(&self, _col_count: usize) -> Vec { + self.cells.clone() + } + } + + fn test_colors() -> TableColors { + TableColors { + header_fg: PackedRgba::rgb(0xFF, 0xFF, 0xFF), + header_bg: PackedRgba::rgb(0x30, 0x30, 0x30), + row_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + row_alt_bg: PackedRgba::rgb(0x28, 0x28, 0x24), + selected_fg: PackedRgba::rgb(0xFF, 0xFF, 0xFF), + selected_bg: PackedRgba::rgb(0xDA, 0x70, 0x2C), + sort_indicator: PackedRgba::rgb(0xDA, 0x70, 0x2C), + border: PackedRgba::rgb(0x87, 0x87, 0x80), + } + } + + #[test] + fn test_visible_columns_all_fit() { + let cols = test_columns(); + let vis = visible_columns(&cols, 100); + assert_eq!(vis.len(), 5, "All 5 columns should fit at 100 cols"); + } + + #[test] + fn test_visible_columns_hides_low_priority() { + let cols = test_columns(); + // min widths: 5 + 10 + 8 + 8 + 10 + 4 gaps = 45. + // At 25 cols, only priority 0 columns (IID + Title) should fit. + let vis = visible_columns(&cols, 25); + let visible_indices: Vec = vis.iter().map(|(idx, _)| *idx).collect(); + assert!(visible_indices.contains(&0), "IID should always be visible"); + assert!( + visible_indices.contains(&1), + "Title should always be visible" + ); + assert!( + !visible_indices.contains(&4), + "Updated (priority 3) should be hidden" + ); + } + + #[test] + fn test_column_hiding_at_60_cols() { + let cols = test_columns(); + let vis = visible_columns(&cols, 60); + // min widths for priority 0,1,2: 5+10+8+8 + 3 gaps = 34. + // Priority 3 (Updated, min 10 + gap) = 45 total, should still fit. + assert!(vis.len() >= 3, "At least 3 columns at 60 cols"); + } + + #[test] + fn test_state_select_next_prev() { + let mut state = EntityTableState::default(); + state.select_next(5); + assert_eq!(state.selected, 1); + state.select_next(5); + assert_eq!(state.selected, 2); + state.select_prev(); + assert_eq!(state.selected, 1); + } + + #[test] + fn test_state_select_bounds() { + let mut state = EntityTableState::default(); + state.select_prev(); // at 0, can't go below + assert_eq!(state.selected, 0); + + state.select_next(3); + state.select_next(3); + state.select_next(3); // at 2, can't go above last + assert_eq!(state.selected, 2); + } + + #[test] + fn test_state_page_up_down() { + let mut state = EntityTableState::default(); + state.page_down(20, 5); + assert_eq!(state.selected, 5); + state.page_up(3); + assert_eq!(state.selected, 2); + } + + #[test] + fn test_state_first_last() { + let mut state = EntityTableState { + selected: 5, + ..Default::default() + }; + state.select_first(); + assert_eq!(state.selected, 0); + state.select_last(10); + assert_eq!(state.selected, 9); + } + + #[test] + fn test_state_cycle_sort() { + let mut state = EntityTableState::default(); + assert_eq!(state.sort_column, 0); + state.cycle_sort(5); + assert_eq!(state.sort_column, 1); + state.sort_column = 4; + state.cycle_sort(5); // wraps to 0 + assert_eq!(state.sort_column, 0); + } + + #[test] + fn test_state_toggle_sort_direction() { + let mut state = EntityTableState::default(); + assert!(state.sort_ascending); + state.toggle_sort_direction(); + assert!(!state.sort_ascending); + } + + #[test] + fn test_state_adjust_scroll() { + let mut state = EntityTableState { + selected: 15, + scroll_offset: 0, + ..Default::default() + }; + state.adjust_scroll(10); + assert_eq!(state.scroll_offset, 6); // selected=15 should be at bottom of 10-row window + } + + #[test] + fn test_render_entity_table_no_panic() { + with_frame!(80, 20, |frame| { + let cols = test_columns(); + let rows = vec![ + TestRow { + cells: vec![ + "#42".into(), + "Fix auth bug".into(), + "opened".into(), + "taylor".into(), + "2h ago".into(), + ], + }, + TestRow { + cells: vec![ + "#43".into(), + "Add tests".into(), + "merged".into(), + "alice".into(), + "1d ago".into(), + ], + }, + ]; + let mut state = EntityTableState::default(); + let colors = test_colors(); + + render_entity_table( + &mut frame, + &rows, + &cols, + &mut state, + Rect::new(0, 0, 80, 20), + &colors, + ); + }); + } + + #[test] + fn test_render_entity_table_tiny_noop() { + with_frame!(3, 1, |frame| { + let cols = test_columns(); + let rows: Vec = vec![]; + let mut state = EntityTableState::default(); + let colors = test_colors(); + + render_entity_table( + &mut frame, + &rows, + &cols, + &mut state, + Rect::new(0, 0, 3, 1), + &colors, + ); + }); + } + + #[test] + fn test_render_entity_table_empty_rows() { + with_frame!(80, 10, |frame| { + let cols = test_columns(); + let rows: Vec = vec![]; + let mut state = EntityTableState::default(); + let colors = test_colors(); + + render_entity_table( + &mut frame, + &rows, + &cols, + &mut state, + Rect::new(0, 0, 80, 10), + &colors, + ); + }); + } + + #[test] + fn test_state_select_next_empty() { + let mut state = EntityTableState::default(); + state.select_next(0); // no rows + assert_eq!(state.selected, 0); + } +} diff --git a/crates/lore-tui/src/view/common/error_toast.rs b/crates/lore-tui/src/view/common/error_toast.rs index 30a1c0e..a42a535 100644 --- a/crates/lore-tui/src/view/common/error_toast.rs +++ b/crates/lore-tui/src/view/common/error_toast.rs @@ -23,7 +23,15 @@ pub fn render_error_toast( let max_toast_width = (area.width / 2).clamp(20, 60); let toast_text = if msg.len() as u16 > max_toast_width.saturating_sub(4) { let trunc_len = max_toast_width.saturating_sub(7) as usize; - format!(" {}... ", &msg[..trunc_len.min(msg.len())]) + // Find a char boundary at or before trunc_len to avoid panicking + // on multi-byte UTF-8 (e.g., emoji or CJK in error messages). + let safe_end = msg + .char_indices() + .take_while(|&(i, _)| i <= trunc_len) + .last() + .map_or(0, |(i, c)| i + c.len_utf8()) + .min(msg.len()); + format!(" {}... ", &msg[..safe_end]) } else { format!(" {msg} ") }; diff --git a/crates/lore-tui/src/view/common/filter_bar.rs b/crates/lore-tui/src/view/common/filter_bar.rs new file mode 100644 index 0000000..31e3062 --- /dev/null +++ b/crates/lore-tui/src/view/common/filter_bar.rs @@ -0,0 +1,469 @@ +#![allow(dead_code)] // Phase 2: consumed by Issue List + MR List screens + +//! Filter bar widget for list screens. +//! +//! Wraps a text input with DSL parsing, inline diagnostics for unknown +//! fields, and rendered filter chips below the input. + +use ftui::core::geometry::Rect; +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::Draw; +use ftui::render::frame::Frame; + +use crate::filter_dsl::{self, FilterToken}; + +// --------------------------------------------------------------------------- +// Filter bar state +// --------------------------------------------------------------------------- + +/// State for the filter bar widget. +#[derive(Debug, Clone, Default)] +pub struct FilterBarState { + /// Current filter input text. + pub input: String, + /// Cursor position within the input string (byte offset). + pub cursor: usize, + /// Whether the filter bar has focus. + pub focused: bool, + /// Parsed tokens from the current input. + pub tokens: Vec, + /// Fields that are unknown for the current entity type. + pub unknown_fields: Vec, +} + +impl FilterBarState { + /// Update parsed tokens from the current input text. + pub fn reparse(&mut self, known_fields: &[&str]) { + self.tokens = filter_dsl::parse_filter_tokens(&self.input); + self.unknown_fields = filter_dsl::unknown_fields(&self.tokens, known_fields) + .into_iter() + .map(String::from) + .collect(); + } + + /// Insert a character at the cursor position. + pub fn insert_char(&mut self, ch: char) { + if self.cursor > self.input.len() { + self.cursor = self.input.len(); + } + self.input.insert(self.cursor, ch); + self.cursor += ch.len_utf8(); + } + + /// Delete the character before the cursor (backspace). + pub fn delete_back(&mut self) { + if self.cursor > 0 && !self.input.is_empty() { + // Find the previous character boundary. + let prev = self.input[..self.cursor] + .char_indices() + .next_back() + .map(|(i, _)| i) + .unwrap_or(0); + self.input.remove(prev); + self.cursor = prev; + } + } + + /// Delete the character at the cursor (delete key). + pub fn delete_forward(&mut self) { + if self.cursor < self.input.len() { + self.input.remove(self.cursor); + } + } + + /// Move cursor left by one character. + pub fn move_left(&mut self) { + if self.cursor > 0 { + self.cursor = self.input[..self.cursor] + .char_indices() + .next_back() + .map(|(i, _)| i) + .unwrap_or(0); + } + } + + /// Move cursor right by one character. + pub fn move_right(&mut self) { + if self.cursor < self.input.len() { + self.cursor = self.input[self.cursor..] + .chars() + .next() + .map(|ch| self.cursor + ch.len_utf8()) + .unwrap_or(self.input.len()); + } + } + + /// Move cursor to start. + pub fn move_home(&mut self) { + self.cursor = 0; + } + + /// Move cursor to end. + pub fn move_end(&mut self) { + self.cursor = self.input.len(); + } + + /// Clear the input. + pub fn clear(&mut self) { + self.input.clear(); + self.cursor = 0; + self.tokens.clear(); + self.unknown_fields.clear(); + } + + /// Whether the filter has any active tokens. + pub fn is_active(&self) -> bool { + !self.tokens.is_empty() + } +} + +// --------------------------------------------------------------------------- +// Colors +// --------------------------------------------------------------------------- + +/// Colors for the filter bar. +pub struct FilterBarColors { + pub input_fg: PackedRgba, + pub input_bg: PackedRgba, + pub cursor_fg: PackedRgba, + pub cursor_bg: PackedRgba, + pub chip_fg: PackedRgba, + pub chip_bg: PackedRgba, + pub error_fg: PackedRgba, + pub label_fg: PackedRgba, +} + +// --------------------------------------------------------------------------- +// Render +// --------------------------------------------------------------------------- + +/// Render the filter bar. +/// +/// Layout: +/// ```text +/// Row 0: [Filter: ][input text with cursor___________] +/// Row 1: [chip1] [chip2] [chip3] (if tokens present) +/// ``` +/// +/// Returns the number of rows consumed (1 or 2). +pub fn render_filter_bar( + frame: &mut Frame<'_>, + state: &FilterBarState, + area: Rect, + colors: &FilterBarColors, +) -> u16 { + if area.height == 0 || area.width < 10 { + return 0; + } + + let max_x = area.x.saturating_add(area.width); + let y = area.y; + + // Label. + let label = if state.focused { "Filter: " } else { "/ " }; + let label_cell = Cell { + fg: colors.label_fg, + ..Cell::default() + }; + let after_label = frame.print_text_clipped(area.x, y, label, label_cell, max_x); + + // Input text. + let input_cell = Cell { + fg: colors.input_fg, + bg: if state.focused { + colors.input_bg + } else { + Cell::default().bg + }, + ..Cell::default() + }; + + if state.input.is_empty() && !state.focused { + let muted = Cell { + fg: colors.label_fg, + ..Cell::default() + }; + frame.print_text_clipped(after_label, y, "type / to filter", muted, max_x); + } else { + // Render input text with cursor highlight. + render_input_with_cursor(frame, state, after_label, y, max_x, input_cell, colors); + } + + // Error indicators for unknown fields. + if !state.unknown_fields.is_empty() { + let err_cell = Cell { + fg: colors.error_fg, + ..Cell::default() + }; + let err_msg = format!("Unknown: {}", state.unknown_fields.join(", ")); + // Right-align the error. + let err_x = max_x.saturating_sub(err_msg.len() as u16 + 1); + frame.print_text_clipped(err_x, y, &err_msg, err_cell, max_x); + } + + // Chip row (if tokens present and space available). + if !state.tokens.is_empty() && area.height >= 2 { + let chip_y = y.saturating_add(1); + render_chips(frame, &state.tokens, area.x, chip_y, max_x, colors); + return 2; + } + + 1 +} + +/// Render input text with cursor highlight at the correct position. +fn render_input_with_cursor( + frame: &mut Frame<'_>, + state: &FilterBarState, + start_x: u16, + y: u16, + max_x: u16, + base_cell: Cell, + colors: &FilterBarColors, +) { + if !state.focused { + frame.print_text_clipped(start_x, y, &state.input, base_cell, max_x); + return; + } + + // Split at cursor position. + let cursor = state.cursor; + let input = &state.input; + let (before, after) = if cursor <= input.len() { + (&input[..cursor], &input[cursor..]) + } else { + (input.as_str(), "") + }; + + let mut x = frame.print_text_clipped(start_x, y, before, base_cell, max_x); + + // Cursor character (or space if at end). + let cursor_cell = Cell { + fg: colors.cursor_fg, + bg: colors.cursor_bg, + ..Cell::default() + }; + + if let Some(ch) = after.chars().next() { + let s = String::from(ch); + x = frame.print_text_clipped(x, y, &s, cursor_cell, max_x); + let remaining = &after[ch.len_utf8()..]; + frame.print_text_clipped(x, y, remaining, base_cell, max_x); + } else { + // Cursor at end — render a visible block. + frame.print_text_clipped(x, y, " ", cursor_cell, max_x); + } +} + +/// Render filter chips as compact tags. +fn render_chips( + frame: &mut Frame<'_>, + tokens: &[FilterToken], + start_x: u16, + y: u16, + max_x: u16, + colors: &FilterBarColors, +) { + let chip_cell = Cell { + fg: colors.chip_fg, + bg: colors.chip_bg, + ..Cell::default() + }; + + let mut x = start_x; + + for token in tokens { + if x >= max_x { + break; + } + + let label = match token { + FilterToken::FieldValue { field, value } => format!("{field}:{value}"), + FilterToken::Negation { field, value } => format!("-{field}:{value}"), + FilterToken::FreeText(text) => text.clone(), + FilterToken::QuotedValue(text) => format!("\"{text}\""), + }; + + let chip_text = format!("[{label}]"); + x = frame.print_text_clipped(x, y, &chip_text, chip_cell, max_x); + x = x.saturating_add(1); // gap between chips + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::filter_dsl::ISSUE_FIELDS; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + fn test_colors() -> FilterBarColors { + FilterBarColors { + input_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + input_bg: PackedRgba::rgb(0x28, 0x28, 0x24), + cursor_fg: PackedRgba::rgb(0x00, 0x00, 0x00), + cursor_bg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + chip_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + chip_bg: PackedRgba::rgb(0x40, 0x40, 0x3C), + error_fg: PackedRgba::rgb(0xAF, 0x3A, 0x29), + label_fg: PackedRgba::rgb(0x87, 0x87, 0x80), + } + } + + #[test] + fn test_filter_bar_state_insert_char() { + let mut state = FilterBarState::default(); + state.insert_char('a'); + state.insert_char('b'); + assert_eq!(state.input, "ab"); + assert_eq!(state.cursor, 2); + } + + #[test] + fn test_filter_bar_state_delete_back() { + let mut state = FilterBarState { + input: "abc".into(), + cursor: 3, + ..Default::default() + }; + state.delete_back(); + assert_eq!(state.input, "ab"); + assert_eq!(state.cursor, 2); + } + + #[test] + fn test_filter_bar_state_delete_back_at_start() { + let mut state = FilterBarState { + input: "abc".into(), + cursor: 0, + ..Default::default() + }; + state.delete_back(); + assert_eq!(state.input, "abc"); + assert_eq!(state.cursor, 0); + } + + #[test] + fn test_filter_bar_state_move_left_right() { + let mut state = FilterBarState { + input: "abc".into(), + cursor: 2, + ..Default::default() + }; + state.move_left(); + assert_eq!(state.cursor, 1); + state.move_right(); + assert_eq!(state.cursor, 2); + } + + #[test] + fn test_filter_bar_state_home_end() { + let mut state = FilterBarState { + input: "hello".into(), + cursor: 3, + ..Default::default() + }; + state.move_home(); + assert_eq!(state.cursor, 0); + state.move_end(); + assert_eq!(state.cursor, 5); + } + + #[test] + fn test_filter_bar_state_clear() { + let mut state = FilterBarState { + input: "state:opened".into(), + cursor: 12, + tokens: vec![FilterToken::FieldValue { + field: "state".into(), + value: "opened".into(), + }], + ..Default::default() + }; + state.clear(); + assert!(state.input.is_empty()); + assert_eq!(state.cursor, 0); + assert!(state.tokens.is_empty()); + } + + #[test] + fn test_filter_bar_state_reparse() { + let mut state = FilterBarState { + input: "state:opened bogus:val".into(), + ..Default::default() + }; + state.reparse(ISSUE_FIELDS); + assert_eq!(state.tokens.len(), 2); + assert_eq!(state.unknown_fields, vec!["bogus"]); + } + + #[test] + fn test_filter_bar_state_is_active() { + let mut state = FilterBarState::default(); + assert!(!state.is_active()); + + state.input = "state:opened".into(); + state.reparse(ISSUE_FIELDS); + assert!(state.is_active()); + } + + #[test] + fn test_render_filter_bar_unfocused_no_panic() { + with_frame!(80, 2, |frame| { + let state = FilterBarState::default(); + let colors = test_colors(); + let rows = render_filter_bar(&mut frame, &state, Rect::new(0, 0, 80, 2), &colors); + assert_eq!(rows, 1); + }); + } + + #[test] + fn test_render_filter_bar_focused_no_panic() { + with_frame!(80, 2, |frame| { + let mut state = FilterBarState { + input: "state:opened".into(), + cursor: 12, + focused: true, + ..Default::default() + }; + state.reparse(ISSUE_FIELDS); + let colors = test_colors(); + let rows = render_filter_bar(&mut frame, &state, Rect::new(0, 0, 80, 2), &colors); + assert_eq!(rows, 2); // chips rendered + }); + } + + #[test] + fn test_render_filter_bar_tiny_noop() { + with_frame!(5, 1, |frame| { + let state = FilterBarState::default(); + let colors = test_colors(); + let rows = render_filter_bar(&mut frame, &state, Rect::new(0, 0, 5, 1), &colors); + assert_eq!(rows, 0); + }); + } + + #[test] + fn test_filter_bar_unicode_cursor() { + let mut state = FilterBarState { + input: "author:田中".into(), + cursor: 7, // points at start of 田 + ..Default::default() + }; + state.move_right(); + assert_eq!(state.cursor, 10); // past 田 (3 bytes) + state.move_left(); + assert_eq!(state.cursor, 7); // back to 田 + } +} diff --git a/crates/lore-tui/src/view/common/mod.rs b/crates/lore-tui/src/view/common/mod.rs index 1e60ca2..c3ce5bc 100644 --- a/crates/lore-tui/src/view/common/mod.rs +++ b/crates/lore-tui/src/view/common/mod.rs @@ -5,13 +5,17 @@ //! no side effects. mod breadcrumb; +pub mod entity_table; mod error_toast; +pub mod filter_bar; mod help_overlay; mod loading; mod status_bar; pub use breadcrumb::render_breadcrumb; +pub use entity_table::{ColumnDef, EntityTableState, TableColors, TableRow, render_entity_table}; pub use error_toast::render_error_toast; +pub use filter_bar::{FilterBarColors, FilterBarState, render_filter_bar}; pub use help_overlay::render_help_overlay; pub use loading::render_loading; pub use status_bar::render_status_bar; diff --git a/crates/lore-tui/src/view/dashboard.rs b/crates/lore-tui/src/view/dashboard.rs new file mode 100644 index 0000000..b495e27 --- /dev/null +++ b/crates/lore-tui/src/view/dashboard.rs @@ -0,0 +1,554 @@ +#![allow(dead_code)] // Phase 2: wired into render_screen dispatch + +//! Dashboard screen view — entity counts, project sync status, recent activity. +//! +//! Responsive layout using [`crate::layout::classify_width`]: +//! - Wide (Lg/Xl, >=120 cols): 3-column `[Stats | Projects | Recent]` +//! - Medium (Md, 90–119): 2-column `[Stats+Projects | Recent]` +//! - Narrow (Xs/Sm, <90): single column stacked + +use ftui::core::geometry::Rect; +use ftui::layout::{Breakpoint, Constraint, Flex}; +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::Draw; +use ftui::render::frame::Frame; + +use crate::layout::classify_width; +use crate::state::dashboard::{DashboardState, EntityCounts, LastSyncInfo, RecentActivityItem}; + +// --------------------------------------------------------------------------- +// Colors (Flexoki palette — will use injected Theme in a later phase) +// --------------------------------------------------------------------------- + +const TEXT: PackedRgba = PackedRgba::rgb(0xCE, 0xCD, 0xC3); // tx +const TEXT_MUTED: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2 +const ACCENT: PackedRgba = PackedRgba::rgb(0xDA, 0x70, 0x2C); // orange +const GREEN: PackedRgba = PackedRgba::rgb(0x87, 0x9A, 0x39); // green +const YELLOW: PackedRgba = PackedRgba::rgb(0xD0, 0xA2, 0x15); // yellow +const RED: PackedRgba = PackedRgba::rgb(0xAF, 0x3A, 0x29); // red +const CYAN: PackedRgba = PackedRgba::rgb(0x3A, 0xA9, 0x9F); // cyan +const BORDER: PackedRgba = PackedRgba::rgb(0x87, 0x87, 0x80); // tx-2 + +// --------------------------------------------------------------------------- +// Public entry point +// --------------------------------------------------------------------------- + +/// Render the full dashboard screen into `area`. +pub fn render_dashboard(frame: &mut Frame<'_>, state: &DashboardState, area: Rect) { + if area.height < 2 || area.width < 10 { + return; // Too small to render. + } + + let bp = classify_width(area.width); + + match bp { + Breakpoint::Lg | Breakpoint::Xl => render_wide(frame, state, area), + Breakpoint::Md => render_medium(frame, state, area), + Breakpoint::Xs | Breakpoint::Sm => render_narrow(frame, state, area), + } +} + +// --------------------------------------------------------------------------- +// Layout variants +// --------------------------------------------------------------------------- + +/// Wide: 3-column [Stats | Projects | Recent Activity]. +fn render_wide(frame: &mut Frame<'_>, state: &DashboardState, area: Rect) { + let cols = Flex::horizontal() + .constraints([ + Constraint::Ratio(1, 3), + Constraint::Ratio(1, 3), + Constraint::Ratio(1, 3), + ]) + .split(area); + + render_stat_panel(frame, &state.counts, cols[0]); + render_project_list(frame, state, cols[1]); + render_recent_activity(frame, state, cols[2]); +} + +/// Medium: 2-column [Stats+Projects stacked | Recent Activity]. +fn render_medium(frame: &mut Frame<'_>, state: &DashboardState, area: Rect) { + let cols = Flex::horizontal() + .constraints([Constraint::Ratio(2, 5), Constraint::Ratio(3, 5)]) + .split(area); + + // Left column: stats on top, projects below. + let left_rows = Flex::vertical() + .constraints([Constraint::Ratio(1, 2), Constraint::Ratio(1, 2)]) + .split(cols[0]); + + render_stat_panel(frame, &state.counts, left_rows[0]); + render_project_list(frame, state, left_rows[1]); + + render_recent_activity(frame, state, cols[1]); +} + +/// Narrow: single column stacked. +fn render_narrow(frame: &mut Frame<'_>, state: &DashboardState, area: Rect) { + let rows = Flex::vertical() + .constraints([ + Constraint::Fixed(8), // stats + Constraint::Fixed(4), // projects (compact) + Constraint::Fill, // recent + ]) + .split(area); + + render_stat_panel(frame, &state.counts, rows[0]); + render_project_list(frame, state, rows[1]); + render_recent_activity(frame, state, rows[2]); +} + +// --------------------------------------------------------------------------- +// Panels +// --------------------------------------------------------------------------- + +/// Entity counts panel. +fn render_stat_panel(frame: &mut Frame<'_>, counts: &EntityCounts, area: Rect) { + if area.height == 0 || area.width < 5 { + return; + } + + let title_cell = Cell { + fg: ACCENT, + ..Cell::default() + }; + let label_cell = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + let value_cell = Cell { + fg: TEXT, + ..Cell::default() + }; + + let max_x = area.x.saturating_add(area.width); + let mut y = area.y; + let x = area.x.saturating_add(1); // 1-char left padding + + // Title + frame.print_text_clipped(x, y, "Entity Counts", title_cell, max_x); + y = y.saturating_add(1); + + // Separator + render_horizontal_rule(frame, area.x, y, area.width, BORDER); + y = y.saturating_add(1); + + // Stats rows + let stats: &[(&str, String)] = &[ + ( + "Issues", + format!("{} open / {}", counts.issues_open, counts.issues_total), + ), + ( + "MRs", + format!("{} open / {}", counts.mrs_open, counts.mrs_total), + ), + ("Discussions", counts.discussions.to_string()), + ( + "Notes", + format!( + "{} ({}% system)", + counts.notes_total, counts.notes_system_pct + ), + ), + ("Documents", counts.documents.to_string()), + ("Embeddings", counts.embeddings.to_string()), + ]; + + for (label, value) in stats { + if y >= area.y.saturating_add(area.height) { + break; + } + let after_label = frame.print_text_clipped(x, y, label, label_cell, max_x); + let after_colon = frame.print_text_clipped(after_label, y, ": ", label_cell, max_x); + frame.print_text_clipped(after_colon, y, value, value_cell, max_x); + y = y.saturating_add(1); + } +} + +/// Per-project sync freshness list. +fn render_project_list(frame: &mut Frame<'_>, state: &DashboardState, area: Rect) { + if area.height == 0 || area.width < 5 { + return; + } + + let title_cell = Cell { + fg: ACCENT, + ..Cell::default() + }; + let label_cell = Cell { + fg: TEXT, + ..Cell::default() + }; + + let max_x = area.x.saturating_add(area.width); + let mut y = area.y; + let x = area.x.saturating_add(1); + + frame.print_text_clipped(x, y, "Projects", title_cell, max_x); + y = y.saturating_add(1); + render_horizontal_rule(frame, area.x, y, area.width, BORDER); + y = y.saturating_add(1); + + if state.projects.is_empty() { + let muted = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + frame.print_text_clipped(x, y, "No projects synced", muted, max_x); + return; + } + + for proj in &state.projects { + if y >= area.y.saturating_add(area.height) { + break; + } + + let freshness_color = staleness_color(proj.minutes_since_sync); + let freshness_cell = Cell { + fg: freshness_color, + ..Cell::default() + }; + + let indicator = staleness_indicator(proj.minutes_since_sync); + let after_dot = frame.print_text_clipped(x, y, &indicator, freshness_cell, max_x); + let after_space = frame.print_text_clipped(after_dot, y, " ", label_cell, max_x); + frame.print_text_clipped(after_space, y, &proj.path, label_cell, max_x); + y = y.saturating_add(1); + } + + // Last sync summary if available. + if let Some(ref sync) = state.last_sync + && y < area.y.saturating_add(area.height) + { + y = y.saturating_add(1); // blank line + render_sync_summary(frame, sync, x, y, max_x); + } +} + +/// Scrollable recent activity list. +fn render_recent_activity(frame: &mut Frame<'_>, state: &DashboardState, area: Rect) { + if area.height == 0 || area.width < 5 { + return; + } + + let title_cell = Cell { + fg: ACCENT, + ..Cell::default() + }; + + let max_x = area.x.saturating_add(area.width); + let mut y = area.y; + let x = area.x.saturating_add(1); + + frame.print_text_clipped(x, y, "Recent Activity", title_cell, max_x); + y = y.saturating_add(1); + render_horizontal_rule(frame, area.x, y, area.width, BORDER); + y = y.saturating_add(1); + + if state.recent.is_empty() { + let muted = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + frame.print_text_clipped(x, y, "No recent activity", muted, max_x); + return; + } + + let visible_rows = (area.y.saturating_add(area.height)).saturating_sub(y) as usize; + let items = &state.recent; + let start = state.scroll_offset.min(items.len().saturating_sub(1)); + let end = (start + visible_rows).min(items.len()); + + for item in &items[start..end] { + if y >= area.y.saturating_add(area.height) { + break; + } + render_activity_row(frame, item, x, y, max_x); + y = y.saturating_add(1); + } + + // Scroll indicator if there's more content. + if end < items.len() && y < area.y.saturating_add(area.height) { + let muted = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + let remaining = items.len() - end; + frame.print_text_clipped(x, y, &format!("... {remaining} more"), muted, max_x); + } +} + +// --------------------------------------------------------------------------- +// Helpers +// --------------------------------------------------------------------------- + +/// Render a single recent activity row. +fn render_activity_row( + frame: &mut Frame<'_>, + item: &RecentActivityItem, + x: u16, + y: u16, + max_x: u16, +) { + let type_color = if item.entity_type == "issue" { + CYAN + } else { + ACCENT + }; + let type_cell = Cell { + fg: type_color, + ..Cell::default() + }; + let text_cell = Cell { + fg: TEXT, + ..Cell::default() + }; + let muted_cell = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + + let type_label = if item.entity_type == "issue" { + format!("#{}", item.iid) + } else { + format!("!{}", item.iid) + }; + + let after_type = frame.print_text_clipped(x, y, &type_label, type_cell, max_x); + let after_space = frame.print_text_clipped(after_type, y, " ", text_cell, max_x); + + // Truncate title to leave room for time. + let time_str = format_relative_time(item.minutes_ago); + let time_width = time_str.len() as u16 + 2; // " " + time + let title_max = max_x.saturating_sub(time_width); + + let after_title = frame.print_text_clipped(after_space, y, &item.title, text_cell, title_max); + + // Right-align time string. + let time_x = max_x.saturating_sub(time_str.len() as u16 + 1); + if time_x > after_title { + frame.print_text_clipped(time_x, y, &time_str, muted_cell, max_x); + } +} + +/// Render a last-sync summary line. +fn render_sync_summary(frame: &mut Frame<'_>, sync: &LastSyncInfo, x: u16, y: u16, max_x: u16) { + let status_color = if sync.status == "succeeded" { + GREEN + } else { + RED + }; + let cell = Cell { + fg: status_color, + ..Cell::default() + }; + let muted = Cell { + fg: TEXT_MUTED, + ..Cell::default() + }; + + let label_end = frame.print_text_clipped(x, y, "Last sync: ", muted, max_x); + let status_end = frame.print_text_clipped(label_end, y, &sync.status, cell, max_x); + + if let Some(ref err) = sync.error { + let err_cell = Cell { + fg: RED, + ..Cell::default() + }; + let after_space = frame.print_text_clipped(status_end, y, " — ", muted, max_x); + frame.print_text_clipped(after_space, y, err, err_cell, max_x); + } +} + +/// Draw a horizontal rule across a row. +fn render_horizontal_rule(frame: &mut Frame<'_>, x: u16, y: u16, width: u16, color: PackedRgba) { + let cell = Cell { + fg: color, + ..Cell::default() + }; + let rule = "─".repeat(width as usize); + frame.print_text_clipped(x, y, &rule, cell, x.saturating_add(width)); +} + +/// Staleness color: green <60min, yellow <360min, red >360min. +const fn staleness_color(minutes: u64) -> PackedRgba { + if minutes == u64::MAX { + RED // Never synced. + } else if minutes < 60 { + GREEN + } else if minutes < 360 { + YELLOW + } else { + RED + } +} + +/// Staleness dot indicator. +fn staleness_indicator(minutes: u64) -> String { + if minutes == u64::MAX { + "● never".to_string() + } else if minutes < 60 { + format!("● {minutes}m ago") + } else if minutes < 1440 { + format!("● {}h ago", minutes / 60) + } else { + format!("● {}d ago", minutes / 1440) + } +} + +/// Format relative time for activity feed. +fn format_relative_time(minutes: u64) -> String { + if minutes == 0 { + "just now".to_string() + } else if minutes < 60 { + format!("{minutes}m ago") + } else if minutes < 1440 { + format!("{}h ago", minutes / 60) + } else { + format!("{}d ago", minutes / 1440) + } +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use crate::state::dashboard::{DashboardData, EntityCounts, ProjectSyncInfo}; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + fn sample_state() -> DashboardState { + let mut state = DashboardState::default(); + state.update(DashboardData { + counts: EntityCounts { + issues_open: 42, + issues_total: 100, + mrs_open: 10, + mrs_total: 50, + discussions: 200, + notes_total: 500, + notes_system_pct: 30, + documents: 80, + embeddings: 75, + }, + projects: vec![ + ProjectSyncInfo { + path: "group/alpha".into(), + minutes_since_sync: 15, + }, + ProjectSyncInfo { + path: "group/beta".into(), + minutes_since_sync: 120, + }, + ], + recent: vec![RecentActivityItem { + entity_type: "issue".into(), + iid: 42, + title: "Fix authentication bug".into(), + state: "opened".into(), + minutes_ago: 5, + }], + last_sync: None, + }); + state + } + + #[test] + fn test_render_dashboard_wide_no_panic() { + with_frame!(140, 30, |frame| { + let state = sample_state(); + let area = Rect::new(0, 0, 140, 30); + render_dashboard(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_dashboard_medium_no_panic() { + with_frame!(100, 24, |frame| { + let state = sample_state(); + let area = Rect::new(0, 0, 100, 24); + render_dashboard(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_dashboard_narrow_no_panic() { + with_frame!(60, 20, |frame| { + let state = sample_state(); + let area = Rect::new(0, 0, 60, 20); + render_dashboard(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_dashboard_tiny_noop() { + with_frame!(5, 1, |frame| { + let state = DashboardState::default(); + let area = Rect::new(0, 0, 5, 1); + render_dashboard(&mut frame, &state, area); + }); + } + + #[test] + fn test_render_dashboard_empty_state_no_panic() { + with_frame!(120, 24, |frame| { + let state = DashboardState::default(); + let area = Rect::new(0, 0, 120, 24); + render_dashboard(&mut frame, &state, area); + }); + } + + #[test] + fn test_staleness_color_thresholds() { + assert_eq!(staleness_color(0), GREEN); + assert_eq!(staleness_color(59), GREEN); + assert_eq!(staleness_color(60), YELLOW); + assert_eq!(staleness_color(359), YELLOW); + assert_eq!(staleness_color(360), RED); + assert_eq!(staleness_color(u64::MAX), RED); + } + + #[test] + fn test_staleness_indicator() { + assert_eq!(staleness_indicator(15), "● 15m ago"); + assert_eq!(staleness_indicator(120), "● 2h ago"); + assert_eq!(staleness_indicator(2880), "● 2d ago"); + assert_eq!(staleness_indicator(u64::MAX), "● never"); + } + + #[test] + fn test_format_relative_time() { + assert_eq!(format_relative_time(0), "just now"); + assert_eq!(format_relative_time(5), "5m ago"); + assert_eq!(format_relative_time(90), "1h ago"); + assert_eq!(format_relative_time(1500), "1d ago"); + } + + #[test] + fn test_stat_panel_renders_title() { + with_frame!(40, 10, |frame| { + let counts = EntityCounts { + issues_open: 3, + issues_total: 10, + ..Default::default() + }; + render_stat_panel(&mut frame, &counts, Rect::new(0, 0, 40, 10)); + + // Check that 'E' from "Entity Counts" is rendered at x=1, y=0. + let cell = frame.buffer.get(1, 0).unwrap(); + assert_eq!(cell.content.as_char(), Some('E'), "Expected 'E' at (1,0)"); + }); + } +} diff --git a/crates/lore-tui/src/view/issue_list.rs b/crates/lore-tui/src/view/issue_list.rs new file mode 100644 index 0000000..e5fcc04 --- /dev/null +++ b/crates/lore-tui/src/view/issue_list.rs @@ -0,0 +1,353 @@ +#![allow(dead_code)] // Phase 2: consumed by view/mod.rs screen dispatch + +//! Issue list screen view. +//! +//! Composes the reusable [`EntityTable`] and [`FilterBar`] widgets +//! with issue-specific column definitions and [`TableRow`] implementation. + +use ftui::core::geometry::Rect; +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::Draw; +use ftui::render::frame::Frame; + +use crate::state::issue_list::{IssueListRow, IssueListState, SortField, SortOrder}; +use crate::view::common::entity_table::{ + Align, ColumnDef, EntityTableState, TableColors, TableRow, render_entity_table, +}; +use crate::view::common::filter_bar::{FilterBarColors, FilterBarState, render_filter_bar}; + +// --------------------------------------------------------------------------- +// TableRow implementation for IssueListRow +// --------------------------------------------------------------------------- + +impl TableRow for IssueListRow { + fn cells(&self, col_count: usize) -> Vec { + let mut cells = Vec::with_capacity(col_count); + + // Column order must match ISSUE_COLUMNS definition. + // 0: IID + cells.push(format!("#{}", self.iid)); + // 1: Title + cells.push(self.title.clone()); + // 2: State + cells.push(self.state.clone()); + // 3: Author + cells.push(self.author.clone()); + // 4: Labels + cells.push(self.labels.join(", ")); + // 5: Project + cells.push(self.project_path.clone()); + + cells.truncate(col_count); + cells + } +} + +// --------------------------------------------------------------------------- +// Column definitions +// --------------------------------------------------------------------------- + +/// Column definitions for the issue list table. +const ISSUE_COLUMNS: &[ColumnDef] = &[ + ColumnDef { + name: "IID", + min_width: 5, + flex_weight: 0, + priority: 0, + align: Align::Right, + }, + ColumnDef { + name: "Title", + min_width: 15, + flex_weight: 4, + priority: 0, + align: Align::Left, + }, + ColumnDef { + name: "State", + min_width: 7, + flex_weight: 0, + priority: 0, + align: Align::Left, + }, + ColumnDef { + name: "Author", + min_width: 8, + flex_weight: 1, + priority: 1, + align: Align::Left, + }, + ColumnDef { + name: "Labels", + min_width: 10, + flex_weight: 2, + priority: 2, + align: Align::Left, + }, + ColumnDef { + name: "Project", + min_width: 12, + flex_weight: 1, + priority: 3, + align: Align::Left, + }, +]; + +// --------------------------------------------------------------------------- +// Colors +// --------------------------------------------------------------------------- + +fn table_colors() -> TableColors { + TableColors { + header_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + header_bg: PackedRgba::rgb(0x34, 0x34, 0x31), + row_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + row_alt_bg: PackedRgba::rgb(0x1C, 0x1B, 0x1A), + selected_fg: PackedRgba::rgb(0x10, 0x0F, 0x0F), + selected_bg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + sort_indicator: PackedRgba::rgb(0x87, 0x96, 0x6B), + border: PackedRgba::rgb(0x40, 0x40, 0x3C), + } +} + +fn filter_colors() -> FilterBarColors { + FilterBarColors { + input_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + input_bg: PackedRgba::rgb(0x28, 0x28, 0x24), + cursor_fg: PackedRgba::rgb(0x00, 0x00, 0x00), + cursor_bg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + chip_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + chip_bg: PackedRgba::rgb(0x40, 0x40, 0x3C), + error_fg: PackedRgba::rgb(0xAF, 0x3A, 0x29), + label_fg: PackedRgba::rgb(0x87, 0x87, 0x80), + } +} + +// --------------------------------------------------------------------------- +// Render +// --------------------------------------------------------------------------- + +/// Render the full issue list screen. +/// +/// Layout: +/// ```text +/// Row 0: [Filter bar: / filter input_________] +/// Row 1: [chip1] [chip2] (if filter active) +/// Row 2: ───────────────────────────────────── +/// Row 3..N: IID Title State Author ... +/// ─────────────────────────────────────── +/// #42 Fix login bug open alice ... +/// #41 Add tests open bob ... +/// Bottom: Showing 42 of 128 issues +/// ``` +pub fn render_issue_list(frame: &mut Frame<'_>, state: &IssueListState, area: Rect) { + if area.height < 3 || area.width < 10 { + return; + } + + let mut y = area.y; + let max_x = area.x.saturating_add(area.width); + + // -- Filter bar --------------------------------------------------------- + let filter_area = Rect::new(area.x, y, area.width, 2.min(area.height)); + let fb_state = FilterBarState { + input: state.filter_input.clone(), + cursor: state.filter_input.len(), + focused: state.filter_focused, + tokens: crate::filter_dsl::parse_filter_tokens(&state.filter_input), + unknown_fields: Vec::new(), + }; + let filter_rows = render_filter_bar(frame, &fb_state, filter_area, &filter_colors()); + y = y.saturating_add(filter_rows); + + // -- Status line (total count) ------------------------------------------ + let remaining_height = area.height.saturating_sub(y - area.y); + if remaining_height < 2 { + return; + } + + // Reserve bottom row for status. + let table_height = remaining_height.saturating_sub(1); + let status_y = y.saturating_add(table_height); + + // -- Entity table ------------------------------------------------------- + let sort_col = match state.sort_field { + SortField::UpdatedAt => 0, // Map to IID column (closest visual proxy) + SortField::Iid => 0, + SortField::Title => 1, + SortField::State => 2, + SortField::Author => 3, + }; + + let mut table_state = EntityTableState { + selected: state.selected_index, + scroll_offset: state.scroll_offset, + sort_column: sort_col, + sort_ascending: matches!(state.sort_order, SortOrder::Asc), + }; + + let table_area = Rect::new(area.x, y, area.width, table_height); + render_entity_table( + frame, + &state.rows, + ISSUE_COLUMNS, + &mut table_state, + table_area, + &table_colors(), + ); + + // -- Bottom status ------------------------------------------------------ + if status_y < area.y.saturating_add(area.height) { + render_status_line(frame, state, area.x, status_y, max_x); + } +} + +/// Render the bottom status line showing row count and pagination info. +fn render_status_line(frame: &mut Frame<'_>, state: &IssueListState, x: u16, y: u16, max_x: u16) { + let muted = Cell { + fg: PackedRgba::rgb(0x87, 0x87, 0x80), + ..Cell::default() + }; + + let status = if state.rows.is_empty() { + "No issues found".to_string() + } else { + let showing = state.rows.len(); + let total = state.total_count; + if state.next_cursor.is_some() { + format!("Showing {showing} of {total} issues (more available)") + } else { + format!("Showing {showing} of {total} issues") + } + }; + + frame.print_text_clipped(x, y, &status, muted, max_x); +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + fn sample_state(row_count: usize) -> IssueListState { + let rows: Vec = (0..row_count) + .map(|i| IssueListRow { + project_path: "group/project".into(), + iid: (i + 1) as i64, + title: format!("Issue {}", i + 1), + state: if i % 2 == 0 { "opened" } else { "closed" }.into(), + author: "taylor".into(), + labels: if i == 0 { + vec!["bug".into(), "critical".into()] + } else { + vec![] + }, + updated_at: 1_700_000_000_000 - (i as i64 * 60_000), + }) + .collect(); + + IssueListState { + total_count: row_count as u64, + rows, + ..Default::default() + } + } + + #[test] + fn test_render_issue_list_no_panic() { + with_frame!(120, 30, |frame| { + let state = sample_state(10); + render_issue_list(&mut frame, &state, Rect::new(0, 0, 120, 30)); + }); + } + + #[test] + fn test_render_issue_list_empty_no_panic() { + with_frame!(80, 20, |frame| { + let state = IssueListState::default(); + render_issue_list(&mut frame, &state, Rect::new(0, 0, 80, 20)); + }); + } + + #[test] + fn test_render_issue_list_tiny_noop() { + with_frame!(5, 2, |frame| { + let state = sample_state(5); + render_issue_list(&mut frame, &state, Rect::new(0, 0, 5, 2)); + // Should not panic with too-small area. + }); + } + + #[test] + fn test_render_issue_list_narrow_no_panic() { + with_frame!(40, 15, |frame| { + let state = sample_state(5); + render_issue_list(&mut frame, &state, Rect::new(0, 0, 40, 15)); + }); + } + + #[test] + fn test_render_issue_list_with_filter_no_panic() { + with_frame!(100, 25, |frame| { + let mut state = sample_state(5); + state.filter_input = "state:opened".into(); + state.filter_focused = true; + render_issue_list(&mut frame, &state, Rect::new(0, 0, 100, 25)); + }); + } + + #[test] + fn test_issue_list_row_cells() { + let row = IssueListRow { + project_path: "group/proj".into(), + iid: 42, + title: "Fix bug".into(), + state: "opened".into(), + author: "alice".into(), + labels: vec!["bug".into(), "urgent".into()], + updated_at: 1_700_000_000_000, + }; + + let cells = row.cells(6); + assert_eq!(cells[0], "#42"); + assert_eq!(cells[1], "Fix bug"); + assert_eq!(cells[2], "opened"); + assert_eq!(cells[3], "alice"); + assert_eq!(cells[4], "bug, urgent"); + assert_eq!(cells[5], "group/proj"); + } + + #[test] + fn test_issue_list_row_cells_truncated() { + let row = IssueListRow { + project_path: "g/p".into(), + iid: 1, + title: "t".into(), + state: "opened".into(), + author: "a".into(), + labels: vec![], + updated_at: 0, + }; + + // Request fewer columns than available. + let cells = row.cells(3); + assert_eq!(cells.len(), 3); + } + + #[test] + fn test_column_count() { + assert_eq!(ISSUE_COLUMNS.len(), 6); + } +} diff --git a/crates/lore-tui/src/view/mod.rs b/crates/lore-tui/src/view/mod.rs index db18d5a..f2c1839 100644 --- a/crates/lore-tui/src/view/mod.rs +++ b/crates/lore-tui/src/view/mod.rs @@ -7,16 +7,23 @@ //! bar, and optional overlays (help, error toast). pub mod common; +pub mod dashboard; +pub mod issue_list; +pub mod mr_list; use ftui::layout::{Constraint, Flex}; use ftui::render::cell::PackedRgba; use ftui::render::frame::Frame; use crate::app::LoreApp; +use crate::message::Screen; use common::{ render_breadcrumb, render_error_toast, render_help_overlay, render_loading, render_status_bar, }; +use dashboard::render_dashboard; +use issue_list::render_issue_list; +use mr_list::render_mr_list; // --------------------------------------------------------------------------- // Colors (hardcoded Flexoki palette — will use Theme in Phase 2) @@ -79,12 +86,14 @@ pub fn render_screen(frame: &mut Frame<'_>, app: &LoreApp) { // tick=0 placeholder — animation wired up when Msg::Tick increments a counter. render_loading(frame, content_area, load_state, TEXT, TEXT_MUTED, 0); - // Per-screen content dispatch (Phase 2+). - // match screen { - // Screen::Dashboard => ..., - // Screen::IssueList => ..., - // ... - // } + // Per-screen content dispatch (other screens wired in later phases). + if screen == &Screen::Dashboard { + render_dashboard(frame, &app.state.dashboard, content_area); + } else if screen == &Screen::IssueList { + render_issue_list(frame, &app.state.issue_list, content_area); + } else if screen == &Screen::MrList { + render_mr_list(frame, &app.state.mr_list, content_area); + } // --- Status bar --- render_status_bar( diff --git a/crates/lore-tui/src/view/mr_list.rs b/crates/lore-tui/src/view/mr_list.rs new file mode 100644 index 0000000..721f8f4 --- /dev/null +++ b/crates/lore-tui/src/view/mr_list.rs @@ -0,0 +1,390 @@ +#![allow(dead_code)] // Phase 2: consumed by view/mod.rs screen dispatch + +//! MR list screen view. +//! +//! Composes the reusable [`EntityTable`] and [`FilterBar`] widgets +//! with MR-specific column definitions and [`TableRow`] implementation. + +use ftui::core::geometry::Rect; +use ftui::render::cell::{Cell, PackedRgba}; +use ftui::render::drawing::Draw; +use ftui::render::frame::Frame; + +use crate::state::mr_list::{MrListRow, MrListState, MrSortField, MrSortOrder}; +use crate::view::common::entity_table::{ + Align, ColumnDef, EntityTableState, TableColors, TableRow, render_entity_table, +}; +use crate::view::common::filter_bar::{FilterBarColors, FilterBarState, render_filter_bar}; + +// --------------------------------------------------------------------------- +// TableRow implementation for MrListRow +// --------------------------------------------------------------------------- + +impl TableRow for MrListRow { + fn cells(&self, col_count: usize) -> Vec { + let mut cells = Vec::with_capacity(col_count); + + // Column order must match MR_COLUMNS definition. + // 0: IID (with draft indicator) + let iid_text = if self.draft { + format!("!{} [WIP]", self.iid) + } else { + format!("!{}", self.iid) + }; + cells.push(iid_text); + // 1: Title + cells.push(self.title.clone()); + // 2: State + cells.push(self.state.clone()); + // 3: Author + cells.push(self.author.clone()); + // 4: Target Branch + cells.push(self.target_branch.clone()); + // 5: Labels + cells.push(self.labels.join(", ")); + // 6: Project + cells.push(self.project_path.clone()); + + cells.truncate(col_count); + cells + } +} + +// --------------------------------------------------------------------------- +// Column definitions +// --------------------------------------------------------------------------- + +/// Column definitions for the MR list table. +const MR_COLUMNS: &[ColumnDef] = &[ + ColumnDef { + name: "IID", + min_width: 6, + flex_weight: 0, + priority: 0, + align: Align::Right, + }, + ColumnDef { + name: "Title", + min_width: 15, + flex_weight: 4, + priority: 0, + align: Align::Left, + }, + ColumnDef { + name: "State", + min_width: 7, + flex_weight: 0, + priority: 0, + align: Align::Left, + }, + ColumnDef { + name: "Author", + min_width: 8, + flex_weight: 1, + priority: 1, + align: Align::Left, + }, + ColumnDef { + name: "Target", + min_width: 8, + flex_weight: 1, + priority: 1, + align: Align::Left, + }, + ColumnDef { + name: "Labels", + min_width: 10, + flex_weight: 2, + priority: 2, + align: Align::Left, + }, + ColumnDef { + name: "Project", + min_width: 12, + flex_weight: 1, + priority: 3, + align: Align::Left, + }, +]; + +// --------------------------------------------------------------------------- +// Colors +// --------------------------------------------------------------------------- + +fn table_colors() -> TableColors { + TableColors { + header_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + header_bg: PackedRgba::rgb(0x34, 0x34, 0x31), + row_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + row_alt_bg: PackedRgba::rgb(0x1C, 0x1B, 0x1A), + selected_fg: PackedRgba::rgb(0x10, 0x0F, 0x0F), + selected_bg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + sort_indicator: PackedRgba::rgb(0x87, 0x96, 0x6B), + border: PackedRgba::rgb(0x40, 0x40, 0x3C), + } +} + +fn filter_colors() -> FilterBarColors { + FilterBarColors { + input_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + input_bg: PackedRgba::rgb(0x28, 0x28, 0x24), + cursor_fg: PackedRgba::rgb(0x00, 0x00, 0x00), + cursor_bg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + chip_fg: PackedRgba::rgb(0xCE, 0xCD, 0xC3), + chip_bg: PackedRgba::rgb(0x40, 0x40, 0x3C), + error_fg: PackedRgba::rgb(0xAF, 0x3A, 0x29), + label_fg: PackedRgba::rgb(0x87, 0x87, 0x80), + } +} + +// --------------------------------------------------------------------------- +// Render +// --------------------------------------------------------------------------- + +/// Render the full MR list screen. +/// +/// Layout: +/// ```text +/// Row 0: [Filter bar: / filter input_________] +/// Row 1: [chip1] [chip2] (if filter active) +/// Row 2: ----------------------------------------- +/// Row 3..N: IID Title State Author ... +/// ----------------------------------------- +/// !42 Fix pipeline opened alice ... +/// !41 Add CI config merged bob ... +/// Bottom: Showing 42 of 128 merge requests +/// ``` +pub fn render_mr_list(frame: &mut Frame<'_>, state: &MrListState, area: Rect) { + if area.height < 3 || area.width < 10 { + return; + } + + let mut y = area.y; + let max_x = area.x.saturating_add(area.width); + + // -- Filter bar --------------------------------------------------------- + let filter_area = Rect::new(area.x, y, area.width, 2.min(area.height)); + let fb_state = FilterBarState { + input: state.filter_input.clone(), + cursor: state.filter_input.len(), + focused: state.filter_focused, + tokens: crate::filter_dsl::parse_filter_tokens(&state.filter_input), + unknown_fields: Vec::new(), + }; + let filter_rows = render_filter_bar(frame, &fb_state, filter_area, &filter_colors()); + y = y.saturating_add(filter_rows); + + // -- Status line (total count) ------------------------------------------ + let remaining_height = area.height.saturating_sub(y - area.y); + if remaining_height < 2 { + return; + } + + // Reserve bottom row for status. + let table_height = remaining_height.saturating_sub(1); + let status_y = y.saturating_add(table_height); + + // -- Entity table ------------------------------------------------------- + let sort_col = match state.sort_field { + MrSortField::UpdatedAt | MrSortField::Iid => 0, + MrSortField::Title => 1, + MrSortField::State => 2, + MrSortField::Author => 3, + MrSortField::TargetBranch => 4, + }; + + let mut table_state = EntityTableState { + selected: state.selected_index, + scroll_offset: state.scroll_offset, + sort_column: sort_col, + sort_ascending: matches!(state.sort_order, MrSortOrder::Asc), + }; + + let table_area = Rect::new(area.x, y, area.width, table_height); + render_entity_table( + frame, + &state.rows, + MR_COLUMNS, + &mut table_state, + table_area, + &table_colors(), + ); + + // -- Bottom status ------------------------------------------------------ + if status_y < area.y.saturating_add(area.height) { + render_status_line(frame, state, area.x, status_y, max_x); + } +} + +/// Render the bottom status line showing row count and pagination info. +fn render_status_line(frame: &mut Frame<'_>, state: &MrListState, x: u16, y: u16, max_x: u16) { + let muted = Cell { + fg: PackedRgba::rgb(0x87, 0x87, 0x80), + ..Cell::default() + }; + + let status = if state.rows.is_empty() { + "No merge requests found".to_string() + } else { + let showing = state.rows.len(); + let total = state.total_count; + if state.next_cursor.is_some() { + format!("Showing {showing} of {total} merge requests (more available)") + } else { + format!("Showing {showing} of {total} merge requests") + } + }; + + frame.print_text_clipped(x, y, &status, muted, max_x); +} + +// --------------------------------------------------------------------------- +// Tests +// --------------------------------------------------------------------------- + +#[cfg(test)] +mod tests { + use super::*; + use ftui::render::grapheme_pool::GraphemePool; + + macro_rules! with_frame { + ($width:expr, $height:expr, |$frame:ident| $body:block) => {{ + let mut pool = GraphemePool::new(); + let mut $frame = Frame::new($width, $height, &mut pool); + $body + }}; + } + + fn sample_state(row_count: usize) -> MrListState { + let rows: Vec = (0..row_count) + .map(|i| MrListRow { + project_path: "group/project".into(), + iid: (i + 1) as i64, + title: format!("MR {}", i + 1), + state: if i % 2 == 0 { "opened" } else { "merged" }.into(), + author: "taylor".into(), + target_branch: "main".into(), + labels: if i == 0 { + vec!["backend".into(), "urgent".into()] + } else { + vec![] + }, + updated_at: 1_700_000_000_000 - (i as i64 * 60_000), + draft: i % 3 == 0, + }) + .collect(); + + MrListState { + total_count: row_count as u64, + rows, + ..Default::default() + } + } + + #[test] + fn test_render_mr_list_no_panic() { + with_frame!(120, 30, |frame| { + let state = sample_state(10); + render_mr_list(&mut frame, &state, Rect::new(0, 0, 120, 30)); + }); + } + + #[test] + fn test_render_mr_list_empty_no_panic() { + with_frame!(80, 20, |frame| { + let state = MrListState::default(); + render_mr_list(&mut frame, &state, Rect::new(0, 0, 80, 20)); + }); + } + + #[test] + fn test_render_mr_list_tiny_noop() { + with_frame!(5, 2, |frame| { + let state = sample_state(5); + render_mr_list(&mut frame, &state, Rect::new(0, 0, 5, 2)); + }); + } + + #[test] + fn test_render_mr_list_narrow_no_panic() { + with_frame!(40, 15, |frame| { + let state = sample_state(5); + render_mr_list(&mut frame, &state, Rect::new(0, 0, 40, 15)); + }); + } + + #[test] + fn test_render_mr_list_with_filter_no_panic() { + with_frame!(100, 25, |frame| { + let mut state = sample_state(5); + state.filter_input = "state:opened".into(); + state.filter_focused = true; + render_mr_list(&mut frame, &state, Rect::new(0, 0, 100, 25)); + }); + } + + #[test] + fn test_mr_list_row_cells() { + let row = MrListRow { + project_path: "group/proj".into(), + iid: 42, + title: "Fix pipeline".into(), + state: "opened".into(), + author: "alice".into(), + target_branch: "main".into(), + labels: vec!["backend".into(), "urgent".into()], + updated_at: 1_700_000_000_000, + draft: false, + }; + + let cells = row.cells(7); + assert_eq!(cells[0], "!42"); + assert_eq!(cells[1], "Fix pipeline"); + assert_eq!(cells[2], "opened"); + assert_eq!(cells[3], "alice"); + assert_eq!(cells[4], "main"); + assert_eq!(cells[5], "backend, urgent"); + assert_eq!(cells[6], "group/proj"); + } + + #[test] + fn test_mr_list_row_cells_draft() { + let row = MrListRow { + project_path: "g/p".into(), + iid: 7, + title: "WIP MR".into(), + state: "opened".into(), + author: "bob".into(), + target_branch: "develop".into(), + labels: vec![], + updated_at: 0, + draft: true, + }; + + let cells = row.cells(7); + assert_eq!(cells[0], "!7 [WIP]"); + } + + #[test] + fn test_mr_list_row_cells_truncated() { + let row = MrListRow { + project_path: "g/p".into(), + iid: 1, + title: "t".into(), + state: "opened".into(), + author: "a".into(), + target_branch: "main".into(), + labels: vec![], + updated_at: 0, + draft: false, + }; + + let cells = row.cells(3); + assert_eq!(cells.len(), 3); + } + + #[test] + fn test_column_count() { + assert_eq!(MR_COLUMNS.len(), 7); + } +} diff --git a/migrations/027_tui_list_indexes.sql b/migrations/027_tui_list_indexes.sql new file mode 100644 index 0000000..1229fb4 --- /dev/null +++ b/migrations/027_tui_list_indexes.sql @@ -0,0 +1,41 @@ +-- Covering indexes for TUI list screen keyset pagination. +-- These supplement existing indexes from earlier migrations to +-- enable efficient ORDER BY ... LIMIT queries without temp B-tree sorts. + +-- Issue list: default sort (updated_at DESC, iid DESC) with state filter. +-- Covers: WHERE project_id = ? AND state = ? ORDER BY updated_at DESC, iid DESC +CREATE INDEX IF NOT EXISTS idx_issues_tui_list + ON issues(project_id, state, updated_at DESC, iid DESC); + +-- MR list: default sort (updated_at DESC, iid DESC) with state filter. +CREATE INDEX IF NOT EXISTS idx_mrs_tui_list + ON merge_requests(project_id, state, updated_at DESC, iid DESC); + +-- Discussion list for entity detail screens: ordered by first note timestamp. +CREATE INDEX IF NOT EXISTS idx_discussions_issue_ordered + ON discussions(issue_id, first_note_at DESC) + WHERE issue_id IS NOT NULL; + +CREATE INDEX IF NOT EXISTS idx_discussions_mr_ordered + ON discussions(merge_request_id, first_note_at DESC) + WHERE merge_request_id IS NOT NULL; + +-- Notes within a discussion: chronological order for detail views. +CREATE INDEX IF NOT EXISTS idx_notes_discussion_ordered + ON notes(discussion_id, created_at ASC); + +-- Filter-path indexes for TUI filter bar queries. +-- Issues: author filter with state (covers WHERE author_username = ? AND state = ?). +CREATE INDEX IF NOT EXISTS idx_issues_author_state + ON issues(author_username, state); + +-- MRs: author filter with state. +CREATE INDEX IF NOT EXISTS idx_mrs_author_state + ON merge_requests(author_username, state); + +-- MRs: target branch filter with state. +CREATE INDEX IF NOT EXISTS idx_mrs_target_branch_state + ON merge_requests(target_branch, state); + +INSERT INTO schema_version (version, applied_at, description) +VALUES (27, strftime('%s', 'now') * 1000, 'TUI list screen covering indexes'); diff --git a/src/core/config.rs b/src/core/config.rs index e69de29..eee368f 100644 --- a/src/core/config.rs +++ b/src/core/config.rs @@ -0,0 +1,789 @@ +use serde::Deserialize; +use std::fs; +use std::path::Path; + +use super::error::{LoreError, Result}; +use super::paths::get_config_path; + +#[derive(Debug, Clone, Deserialize)] +pub struct GitLabConfig { + #[serde(rename = "baseUrl")] + pub base_url: String, + + #[serde(rename = "tokenEnvVar", default = "default_token_env_var")] + pub token_env_var: String, +} + +fn default_token_env_var() -> String { + "GITLAB_TOKEN".to_string() +} + +#[derive(Debug, Clone, Deserialize)] +pub struct ProjectConfig { + pub path: String, +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(default)] +pub struct SyncConfig { + #[serde(rename = "backfillDays")] + pub backfill_days: u32, + + #[serde(rename = "staleLockMinutes")] + pub stale_lock_minutes: u32, + + #[serde(rename = "heartbeatIntervalSeconds")] + pub heartbeat_interval_seconds: u32, + + #[serde(rename = "cursorRewindSeconds")] + pub cursor_rewind_seconds: u32, + + #[serde(rename = "primaryConcurrency")] + pub primary_concurrency: u32, + + #[serde(rename = "dependentConcurrency")] + pub dependent_concurrency: u32, + + #[serde(rename = "requestsPerSecond")] + pub requests_per_second: f64, + + #[serde(rename = "fetchResourceEvents", default = "default_true")] + pub fetch_resource_events: bool, + + #[serde(rename = "fetchMrFileChanges", default = "default_true")] + pub fetch_mr_file_changes: bool, + + #[serde(rename = "fetchWorkItemStatus", default = "default_true")] + pub fetch_work_item_status: bool, +} + +fn default_true() -> bool { + true +} + +impl Default for SyncConfig { + fn default() -> Self { + Self { + backfill_days: 14, + stale_lock_minutes: 10, + heartbeat_interval_seconds: 30, + cursor_rewind_seconds: 2, + primary_concurrency: 4, + dependent_concurrency: 8, + requests_per_second: 30.0, + fetch_resource_events: true, + fetch_mr_file_changes: true, + fetch_work_item_status: true, + } + } +} + +#[derive(Debug, Clone, Deserialize, Default)] +#[serde(default)] +pub struct StorageConfig { + #[serde(rename = "dbPath")] + pub db_path: Option, + + #[serde(rename = "backupDir")] + pub backup_dir: Option, + + #[serde( + rename = "compressRawPayloads", + default = "default_compress_raw_payloads" + )] + pub compress_raw_payloads: bool, +} + +fn default_compress_raw_payloads() -> bool { + true +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(default)] +pub struct EmbeddingConfig { + pub provider: String, + pub model: String, + #[serde(rename = "baseUrl")] + pub base_url: String, + pub concurrency: u32, +} + +impl Default for EmbeddingConfig { + fn default() -> Self { + Self { + provider: "ollama".to_string(), + model: "nomic-embed-text".to_string(), + base_url: "http://localhost:11434".to_string(), + concurrency: 4, + } + } +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(default)] +pub struct LoggingConfig { + #[serde(rename = "logDir")] + pub log_dir: Option, + + #[serde(rename = "retentionDays", default = "default_retention_days")] + pub retention_days: u32, + + #[serde(rename = "fileLogging", default = "default_file_logging")] + pub file_logging: bool, +} + +fn default_retention_days() -> u32 { + 30 +} + +fn default_file_logging() -> bool { + true +} + +impl Default for LoggingConfig { + fn default() -> Self { + Self { + log_dir: None, + retention_days: default_retention_days(), + file_logging: default_file_logging(), + } + } +} + +#[derive(Debug, Clone, Deserialize)] +#[serde(default)] +pub struct ScoringConfig { + /// Points per MR where the user authored code touching the path. + #[serde(rename = "authorWeight")] + pub author_weight: i64, + + /// Points per MR where the user reviewed code touching the path. + #[serde(rename = "reviewerWeight")] + pub reviewer_weight: i64, + + /// Bonus points per individual inline review comment (DiffNote). + #[serde(rename = "noteBonus")] + pub note_bonus: i64, + + /// Points per MR where the user was assigned as a reviewer. + #[serde(rename = "reviewerAssignmentWeight")] + pub reviewer_assignment_weight: i64, + + /// Half-life in days for author contribution decay. + #[serde(rename = "authorHalfLifeDays")] + pub author_half_life_days: u32, + + /// Half-life in days for reviewer contribution decay. + #[serde(rename = "reviewerHalfLifeDays")] + pub reviewer_half_life_days: u32, + + /// Half-life in days for reviewer assignment decay. + #[serde(rename = "reviewerAssignmentHalfLifeDays")] + pub reviewer_assignment_half_life_days: u32, + + /// Half-life in days for note/comment contribution decay. + #[serde(rename = "noteHalfLifeDays")] + pub note_half_life_days: u32, + + /// Multiplier applied to scores from closed (not merged) MRs. + #[serde(rename = "closedMrMultiplier")] + pub closed_mr_multiplier: f64, + + /// Minimum character count for a review note to earn note_bonus. + #[serde(rename = "reviewerMinNoteChars")] + pub reviewer_min_note_chars: u32, + + /// Usernames excluded from expert/scoring results. + #[serde(rename = "excludedUsernames")] + pub excluded_usernames: Vec, +} + +impl Default for ScoringConfig { + fn default() -> Self { + Self { + author_weight: 25, + reviewer_weight: 10, + note_bonus: 1, + reviewer_assignment_weight: 3, + author_half_life_days: 180, + reviewer_half_life_days: 90, + reviewer_assignment_half_life_days: 45, + note_half_life_days: 45, + closed_mr_multiplier: 0.5, + reviewer_min_note_chars: 20, + excluded_usernames: vec![], + } + } +} + +#[derive(Debug, Clone, Deserialize)] +pub struct Config { + pub gitlab: GitLabConfig, + pub projects: Vec, + + #[serde(rename = "defaultProject")] + pub default_project: Option, + + #[serde(default)] + pub sync: SyncConfig, + + #[serde(default)] + pub storage: StorageConfig, + + #[serde(default)] + pub embedding: EmbeddingConfig, + + #[serde(default)] + pub logging: LoggingConfig, + + #[serde(default)] + pub scoring: ScoringConfig, +} + +impl Config { + pub fn load(cli_override: Option<&str>) -> Result { + let config_path = get_config_path(cli_override); + + if !config_path.exists() { + return Err(LoreError::ConfigNotFound { + path: config_path.display().to_string(), + }); + } + + Self::load_from_path(&config_path) + } + + pub fn load_from_path(path: &Path) -> Result { + let content = fs::read_to_string(path).map_err(|e| LoreError::ConfigInvalid { + details: format!("Failed to read config file: {e}"), + })?; + + let config: Config = + serde_json::from_str(&content).map_err(|e| LoreError::ConfigInvalid { + details: format!("Invalid JSON: {e}"), + })?; + + if config.projects.is_empty() { + return Err(LoreError::ConfigInvalid { + details: "At least one project is required".to_string(), + }); + } + + for project in &config.projects { + if project.path.is_empty() { + return Err(LoreError::ConfigInvalid { + details: "Project path cannot be empty".to_string(), + }); + } + } + + if url::Url::parse(&config.gitlab.base_url).is_err() { + return Err(LoreError::ConfigInvalid { + details: format!("Invalid GitLab URL: {}", config.gitlab.base_url), + }); + } + + if let Some(ref dp) = config.default_project { + let matched = config.projects.iter().any(|p| { + p.path.eq_ignore_ascii_case(dp) + || p.path + .to_ascii_lowercase() + .ends_with(&format!("/{}", dp.to_ascii_lowercase())) + }); + if !matched { + return Err(LoreError::ConfigInvalid { + details: format!( + "defaultProject '{}' does not match any configured project path", + dp + ), + }); + } + } + + validate_scoring(&config.scoring)?; + + Ok(config) + } + + /// Return the effective project filter: CLI flag wins, then config default. + pub fn effective_project<'a>(&'a self, cli_project: Option<&'a str>) -> Option<&'a str> { + cli_project.or(self.default_project.as_deref()) + } +} + +fn validate_scoring(scoring: &ScoringConfig) -> Result<()> { + if scoring.author_weight < 0 { + return Err(LoreError::ConfigInvalid { + details: "scoring.authorWeight must be >= 0".to_string(), + }); + } + if scoring.reviewer_weight < 0 { + return Err(LoreError::ConfigInvalid { + details: "scoring.reviewerWeight must be >= 0".to_string(), + }); + } + if scoring.note_bonus < 0 { + return Err(LoreError::ConfigInvalid { + details: "scoring.noteBonus must be >= 0".to_string(), + }); + } + if scoring.reviewer_assignment_weight < 0 { + return Err(LoreError::ConfigInvalid { + details: "scoring.reviewerAssignmentWeight must be >= 0".to_string(), + }); + } + if scoring.author_half_life_days == 0 || scoring.author_half_life_days > 3650 { + return Err(LoreError::ConfigInvalid { + details: "scoring.authorHalfLifeDays must be in 1..=3650".to_string(), + }); + } + if scoring.reviewer_half_life_days == 0 || scoring.reviewer_half_life_days > 3650 { + return Err(LoreError::ConfigInvalid { + details: "scoring.reviewerHalfLifeDays must be in 1..=3650".to_string(), + }); + } + if scoring.reviewer_assignment_half_life_days == 0 + || scoring.reviewer_assignment_half_life_days > 3650 + { + return Err(LoreError::ConfigInvalid { + details: "scoring.reviewerAssignmentHalfLifeDays must be in 1..=3650".to_string(), + }); + } + if scoring.note_half_life_days == 0 || scoring.note_half_life_days > 3650 { + return Err(LoreError::ConfigInvalid { + details: "scoring.noteHalfLifeDays must be in 1..=3650".to_string(), + }); + } + if !scoring.closed_mr_multiplier.is_finite() + || scoring.closed_mr_multiplier <= 0.0 + || scoring.closed_mr_multiplier > 1.0 + { + return Err(LoreError::ConfigInvalid { + details: "scoring.closedMrMultiplier must be finite and in (0.0, 1.0]".to_string(), + }); + } + if scoring.reviewer_min_note_chars > 4096 { + return Err(LoreError::ConfigInvalid { + details: "scoring.reviewerMinNoteChars must be <= 4096".to_string(), + }); + } + if scoring + .excluded_usernames + .iter() + .any(|u| u.trim().is_empty()) + { + return Err(LoreError::ConfigInvalid { + details: "scoring.excludedUsernames entries must be non-empty".to_string(), + }); + } + Ok(()) +} + +#[derive(Debug, serde::Serialize)] +pub struct MinimalConfig { + pub gitlab: MinimalGitLabConfig, + pub projects: Vec, + #[serde(rename = "defaultProject", skip_serializing_if = "Option::is_none")] + pub default_project: Option, +} + +#[derive(Debug, serde::Serialize)] +pub struct MinimalGitLabConfig { + #[serde(rename = "baseUrl")] + pub base_url: String, + #[serde(rename = "tokenEnvVar")] + pub token_env_var: String, +} + +impl serde::Serialize for ProjectConfig { + fn serialize(&self, serializer: S) -> std::result::Result + where + S: serde::Serializer, + { + use serde::ser::SerializeStruct; + let mut state = serializer.serialize_struct("ProjectConfig", 1)?; + state.serialize_field("path", &self.path)?; + state.end() + } +} + +#[cfg(test)] +mod tests { + use super::*; + use tempfile::TempDir; + + fn write_config(dir: &TempDir, scoring_json: &str) -> std::path::PathBuf { + let path = dir.path().join("config.json"); + let config = format!( + r#"{{ + "gitlab": {{ + "baseUrl": "https://gitlab.example.com", + "tokenEnvVar": "GITLAB_TOKEN" + }}, + "projects": [ + {{ "path": "group/project" }} + ], + "scoring": {scoring_json} +}}"# + ); + fs::write(&path, config).unwrap(); + path + } + + fn write_config_with_default_project( + dir: &TempDir, + default_project: Option<&str>, + ) -> std::path::PathBuf { + let path = dir.path().join("config.json"); + let dp_field = match default_project { + Some(dp) => format!(r#","defaultProject": "{dp}""#), + None => String::new(), + }; + let config = format!( + r#"{{ + "gitlab": {{ + "baseUrl": "https://gitlab.example.com", + "tokenEnvVar": "GITLAB_TOKEN" + }}, + "projects": [ + {{ "path": "group/project" }}, + {{ "path": "other/repo" }} + ]{dp_field} +}}"# + ); + fs::write(&path, config).unwrap(); + path + } + + #[test] + fn test_load_rejects_negative_author_weight() { + let dir = TempDir::new().unwrap(); + let path = write_config( + &dir, + r#"{ + "authorWeight": -1, + "reviewerWeight": 10, + "noteBonus": 1 +}"#, + ); + let err = Config::load_from_path(&path).unwrap_err(); + let msg = err.to_string(); + assert!( + msg.contains("scoring.authorWeight"), + "unexpected error: {msg}" + ); + } + + #[test] + fn test_load_rejects_negative_reviewer_weight() { + let dir = TempDir::new().unwrap(); + let path = write_config( + &dir, + r#"{ + "authorWeight": 25, + "reviewerWeight": -1, + "noteBonus": 1 +}"#, + ); + let err = Config::load_from_path(&path).unwrap_err(); + let msg = err.to_string(); + assert!( + msg.contains("scoring.reviewerWeight"), + "unexpected error: {msg}" + ); + } + + #[test] + fn test_config_fetch_work_item_status_default_true() { + let config = SyncConfig::default(); + assert!(config.fetch_work_item_status); + } + + #[test] + fn test_config_deserialize_without_key() { + let json = r#"{}"#; + let config: SyncConfig = serde_json::from_str(json).unwrap(); + assert!( + config.fetch_work_item_status, + "Missing key should default to true" + ); + } + + #[test] + fn test_load_rejects_negative_note_bonus() { + let dir = TempDir::new().unwrap(); + let path = write_config( + &dir, + r#"{ + "authorWeight": 25, + "reviewerWeight": 10, + "noteBonus": -1 +}"#, + ); + let err = Config::load_from_path(&path).unwrap_err(); + let msg = err.to_string(); + assert!(msg.contains("scoring.noteBonus"), "unexpected error: {msg}"); + } + + #[test] + fn test_effective_project_cli_overrides_default() { + let config = Config { + gitlab: GitLabConfig { + base_url: "https://gitlab.example.com".to_string(), + token_env_var: "GITLAB_TOKEN".to_string(), + }, + projects: vec![ProjectConfig { + path: "group/project".to_string(), + }], + default_project: Some("group/project".to_string()), + sync: SyncConfig::default(), + storage: StorageConfig::default(), + embedding: EmbeddingConfig::default(), + logging: LoggingConfig::default(), + scoring: ScoringConfig::default(), + }; + assert_eq!( + config.effective_project(Some("other/repo")), + Some("other/repo") + ); + } + + #[test] + fn test_effective_project_falls_back_to_default() { + let config = Config { + gitlab: GitLabConfig { + base_url: "https://gitlab.example.com".to_string(), + token_env_var: "GITLAB_TOKEN".to_string(), + }, + projects: vec![ProjectConfig { + path: "group/project".to_string(), + }], + default_project: Some("group/project".to_string()), + sync: SyncConfig::default(), + storage: StorageConfig::default(), + embedding: EmbeddingConfig::default(), + logging: LoggingConfig::default(), + scoring: ScoringConfig::default(), + }; + assert_eq!(config.effective_project(None), Some("group/project")); + } + + #[test] + fn test_effective_project_none_when_both_absent() { + let config = Config { + gitlab: GitLabConfig { + base_url: "https://gitlab.example.com".to_string(), + token_env_var: "GITLAB_TOKEN".to_string(), + }, + projects: vec![ProjectConfig { + path: "group/project".to_string(), + }], + default_project: None, + sync: SyncConfig::default(), + storage: StorageConfig::default(), + embedding: EmbeddingConfig::default(), + logging: LoggingConfig::default(), + scoring: ScoringConfig::default(), + }; + assert_eq!(config.effective_project(None), None); + } + + #[test] + fn test_load_with_valid_default_project() { + let dir = TempDir::new().unwrap(); + let path = write_config_with_default_project(&dir, Some("group/project")); + let config = Config::load_from_path(&path).unwrap(); + assert_eq!(config.default_project.as_deref(), Some("group/project")); + } + + #[test] + fn test_load_rejects_invalid_default_project() { + let dir = TempDir::new().unwrap(); + let path = write_config_with_default_project(&dir, Some("nonexistent/project")); + let err = Config::load_from_path(&path).unwrap_err(); + let msg = err.to_string(); + assert!(msg.contains("defaultProject"), "unexpected error: {msg}"); + } + + #[test] + fn test_load_default_project_suffix_match() { + let dir = TempDir::new().unwrap(); + let path = write_config_with_default_project(&dir, Some("project")); + let config = Config::load_from_path(&path).unwrap(); + assert_eq!(config.default_project.as_deref(), Some("project")); + } + + #[test] + fn test_minimal_config_omits_null_default_project() { + let config = MinimalConfig { + gitlab: MinimalGitLabConfig { + base_url: "https://gitlab.example.com".to_string(), + token_env_var: "GITLAB_TOKEN".to_string(), + }, + projects: vec![ProjectConfig { + path: "group/project".to_string(), + }], + default_project: None, + }; + let json = serde_json::to_string(&config).unwrap(); + assert!( + !json.contains("defaultProject"), + "null default_project should be omitted: {json}" + ); + } + + #[test] + fn test_minimal_config_includes_default_project_when_set() { + let config = MinimalConfig { + gitlab: MinimalGitLabConfig { + base_url: "https://gitlab.example.com".to_string(), + token_env_var: "GITLAB_TOKEN".to_string(), + }, + projects: vec![ProjectConfig { + path: "group/project".to_string(), + }], + default_project: Some("group/project".to_string()), + }; + let json = serde_json::to_string(&config).unwrap(); + assert!( + json.contains("defaultProject"), + "set default_project should be present: {json}" + ); + } + + #[test] + fn test_config_validation_rejects_zero_half_life() { + let scoring = ScoringConfig { + author_half_life_days: 0, + ..Default::default() + }; + let err = validate_scoring(&scoring).unwrap_err(); + let msg = err.to_string(); + assert!( + msg.contains("authorHalfLifeDays"), + "unexpected error: {msg}" + ); + } + + #[test] + fn test_config_validation_rejects_absurd_half_life() { + let scoring = ScoringConfig { + author_half_life_days: 5000, + ..Default::default() + }; + let err = validate_scoring(&scoring).unwrap_err(); + let msg = err.to_string(); + assert!( + msg.contains("authorHalfLifeDays"), + "unexpected error: {msg}" + ); + } + + #[test] + fn test_config_validation_rejects_nan_multiplier() { + let scoring = ScoringConfig { + closed_mr_multiplier: f64::NAN, + ..Default::default() + }; + let err = validate_scoring(&scoring).unwrap_err(); + let msg = err.to_string(); + assert!( + msg.contains("closedMrMultiplier"), + "unexpected error: {msg}" + ); + } + + #[test] + fn test_config_validation_rejects_zero_multiplier() { + let scoring = ScoringConfig { + closed_mr_multiplier: 0.0, + ..Default::default() + }; + let err = validate_scoring(&scoring).unwrap_err(); + let msg = err.to_string(); + assert!( + msg.contains("closedMrMultiplier"), + "unexpected error: {msg}" + ); + } + + #[test] + fn test_config_validation_rejects_negative_reviewer_assignment_weight() { + let scoring = ScoringConfig { + reviewer_assignment_weight: -1, + ..Default::default() + }; + let err = validate_scoring(&scoring).unwrap_err(); + let msg = err.to_string(); + assert!( + msg.contains("reviewerAssignmentWeight"), + "unexpected error: {msg}" + ); + } + + #[test] + fn test_config_validation_rejects_oversized_min_note_chars() { + let scoring = ScoringConfig { + reviewer_min_note_chars: 5000, + ..Default::default() + }; + let err = validate_scoring(&scoring).unwrap_err(); + let msg = err.to_string(); + assert!( + msg.contains("reviewerMinNoteChars"), + "unexpected error: {msg}" + ); + } + + #[test] + fn test_config_validation_rejects_empty_excluded_username() { + let scoring = ScoringConfig { + excluded_usernames: vec!["valid".to_string(), " ".to_string()], + ..Default::default() + }; + let err = validate_scoring(&scoring).unwrap_err(); + let msg = err.to_string(); + assert!(msg.contains("excludedUsernames"), "unexpected error: {msg}"); + } + + #[test] + fn test_config_validation_accepts_valid_new_fields() { + let scoring = ScoringConfig { + author_half_life_days: 365, + reviewer_half_life_days: 180, + reviewer_assignment_half_life_days: 90, + note_half_life_days: 60, + closed_mr_multiplier: 0.5, + reviewer_min_note_chars: 20, + reviewer_assignment_weight: 3, + excluded_usernames: vec!["bot-user".to_string()], + ..Default::default() + }; + validate_scoring(&scoring).unwrap(); + } + + #[test] + fn test_config_validation_accepts_boundary_half_life() { + // 1 and 3650 are both valid boundaries + let scoring_min = ScoringConfig { + author_half_life_days: 1, + ..Default::default() + }; + validate_scoring(&scoring_min).unwrap(); + + let scoring_max = ScoringConfig { + author_half_life_days: 3650, + ..Default::default() + }; + validate_scoring(&scoring_max).unwrap(); + } + + #[test] + fn test_config_validation_accepts_multiplier_at_one() { + let scoring = ScoringConfig { + closed_mr_multiplier: 1.0, + ..Default::default() + }; + validate_scoring(&scoring).unwrap(); + } +} diff --git a/src/core/db.rs b/src/core/db.rs index 78af367..d07ffe3 100644 --- a/src/core/db.rs +++ b/src/core/db.rs @@ -89,6 +89,10 @@ const MIGRATIONS: &[(&str, &str)] = &[ "026", include_str!("../../migrations/026_scoring_indexes.sql"), ), + ( + "027", + include_str!("../../migrations/027_tui_list_indexes.sql"), + ), ]; pub fn create_connection(db_path: &Path) -> Result {