From 597095a283cd354389ab11c99a8f94a2b3959d18 Mon Sep 17 00:00:00 2001 From: teernisse Date: Fri, 20 Feb 2026 14:25:20 -0500 Subject: [PATCH] chore: update beads tracker state Sync beads issue database to JSONL for version control tracking. --- .beads/issues.jsonl | 22 ++++++++++++++++++++++ .beads/last-touched | 2 +- 2 files changed, 23 insertions(+), 1 deletion(-) diff --git a/.beads/issues.jsonl b/.beads/issues.jsonl index aa977f2..e3d34c5 100644 --- a/.beads/issues.jsonl +++ b/.beads/issues.jsonl @@ -33,7 +33,9 @@ {"id":"bd-1df9","title":"Epic: TUI Phase 4 — Operations","description":"## Background\nPhase 4 adds operational screens: Sync (real-time progress + post-sync summary), Doctor/Stats (health checks), and CLI integration (lore tui command for binary delegation). The Sync screen is the most complex — it needs real-time streaming progress with backpressure handling.\n\n## Acceptance Criteria\n- [ ] Sync screen shows real-time progress during sync with per-lane indicators\n- [ ] Sync summary shows exact changed entities after completion\n- [ ] Doctor screen shows environment health checks\n- [ ] Stats screen shows database statistics\n- [ ] CLI integration: lore tui launches lore-tui binary via runtime delegation","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:01:44.603447Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.361318Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1df9","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T18:11:51.361296Z","created_by":"tayloreernisse"}]} {"id":"bd-1elx","title":"Implement run_embed_for_document_ids scoped embedding","description":"## Background\n\nCurrently `embed_documents()` in `src/embedding/pipeline.rs` uses `find_pending_documents()` to discover ALL documents that need embedding (no existing embedding, changed content_hash, or model mismatch). The surgical sync pipeline needs a scoped variant that only embeds specific document IDs — the ones returned by the scoped doc regeneration step (bd-hs6j).\n\nThe existing `embed_page()` private function handles the actual embedding work for a batch of `PendingDocument` structs. It calls `split_into_chunks`, sends batches to the OllamaClient, and writes embeddings + metadata to the DB. The scoped function can reuse this by constructing `PendingDocument` structs from the provided document IDs.\n\nKey types:\n- `PendingDocument { document_id: i64, content_text: String, content_hash: String }` (from `change_detector.rs`)\n- `EmbedResult { chunks_embedded, docs_embedded, failed, skipped }` (pipeline.rs:21)\n- `OllamaClient` for the actual embedding API calls\n- `ShutdownSignal` for cancellation support\n\n## Approach\n\nAdd `embed_documents_by_ids()` to `src/embedding/pipeline.rs`:\n\n```rust\npub struct EmbedForIdsResult {\n pub chunks_embedded: usize,\n pub docs_embedded: usize,\n pub failed: usize,\n pub skipped: usize,\n}\n\npub async fn embed_documents_by_ids(\n conn: &Connection,\n client: &OllamaClient,\n model_name: &str,\n concurrency: usize,\n document_ids: &[i64],\n signal: &ShutdownSignal,\n) -> Result\n```\n\nImplementation:\n1. If `document_ids` is empty, return immediately with zero counts.\n2. Load `PendingDocument` structs for the specified IDs. Query: `SELECT id, content_text, content_hash FROM documents WHERE id IN (...)`. Filter out documents that already have current embeddings (same content_hash, model, dims, chunk_max_bytes) — reuse the LEFT JOIN logic from `find_pending_documents` but with `WHERE d.id IN (?)` instead of `WHERE d.id > ?`.\n3. If no documents need embedding after filtering, return with skipped=len.\n4. Chunk into pages of `DB_PAGE_SIZE` (500).\n5. For each page, call `embed_page()` (reuse existing private function) within a SAVEPOINT.\n6. Handle cancellation via `signal.is_cancelled()` between pages.\n\nAlternative simpler approach: load all specified doc IDs into a temp table or use a parameterized IN clause, then let `embed_page` process them. Since the list is typically small (1-5 documents for surgical sync), a single page call suffices.\n\nExport from `src/embedding/mod.rs` if not already pub.\n\n## Acceptance Criteria\n\n- [ ] `embed_documents_by_ids` only embeds the specified document IDs, not all pending documents\n- [ ] Documents already embedded with current content_hash + model are skipped (not re-embedded)\n- [ ] Empty document_ids input returns immediately with zero counts\n- [ ] Cancellation via ShutdownSignal is respected between pages\n- [ ] SAVEPOINT/ROLLBACK semantics match existing `embed_documents` for data integrity\n- [ ] Ollama errors for individual documents are counted as failed, not fatal\n- [ ] Function is pub for use by orchestration (bd-1i4i)\n\n## Files\n\n- `src/embedding/pipeline.rs` (add new function + result struct)\n- `src/embedding/mod.rs` (export if needed)\n\n## TDD Anchor\n\nTests in `src/embedding/pipeline_tests.rs` (or new `src/embedding/scoped_embed_tests.rs`):\n\n```rust\n#[tokio::test]\nasync fn test_embed_by_ids_only_embeds_specified_docs() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n setup_ollama_mock(&mock).await;\n let client = OllamaClient::new(&mock.uri());\n\n // Insert 2 documents: A (id=1) and B (id=2)\n insert_test_document(&conn, 1, \"Content A\", \"hash_a\");\n insert_test_document(&conn, 2, \"Content B\", \"hash_b\");\n\n let signal = ShutdownSignal::new();\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1,\n &[1], // Only embed doc 1\n &signal,\n ).await.unwrap();\n\n assert_eq!(result.docs_embedded, 1);\n // Verify doc 1 has embeddings\n let count: i64 = conn.query_row(\n \"SELECT COUNT(*) FROM embedding_metadata WHERE document_id = 1\",\n [], |r| r.get(0),\n ).unwrap();\n assert!(count > 0);\n // Verify doc 2 has NO embeddings\n let count_b: i64 = conn.query_row(\n \"SELECT COUNT(*) FROM embedding_metadata WHERE document_id = 2\",\n [], |r| r.get(0),\n ).unwrap();\n assert_eq!(count_b, 0);\n}\n\n#[tokio::test]\nasync fn test_embed_by_ids_skips_already_embedded() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n setup_ollama_mock(&mock).await;\n let client = OllamaClient::new(&mock.uri());\n\n insert_test_document(&conn, 1, \"Content A\", \"hash_a\");\n let signal = ShutdownSignal::new();\n\n // Embed once\n embed_documents_by_ids(&conn, &client, \"nomic-embed-text\", 1, &[1], &signal).await.unwrap();\n // Embed again with same hash — should skip\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1, &[1], &signal,\n ).await.unwrap();\n assert_eq!(result.docs_embedded, 0);\n assert_eq!(result.skipped, 1);\n}\n\n#[tokio::test]\nasync fn test_embed_by_ids_empty_input() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n let client = OllamaClient::new(&mock.uri());\n let signal = ShutdownSignal::new();\n\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1, &[], &signal,\n ).await.unwrap();\n assert_eq!(result.docs_embedded, 0);\n assert_eq!(result.chunks_embedded, 0);\n}\n\n#[tokio::test]\nasync fn test_embed_by_ids_respects_cancellation() {\n let conn = setup_test_db_with_documents();\n let mock = MockServer::start().await;\n // Use delayed response to allow cancellation\n setup_slow_ollama_mock(&mock).await;\n let client = OllamaClient::new(&mock.uri());\n\n insert_test_document(&conn, 1, \"Content A\", \"hash_a\");\n let signal = ShutdownSignal::new();\n signal.cancel(); // Pre-cancel\n\n let result = embed_documents_by_ids(\n &conn, &client, \"nomic-embed-text\", 1, &[1], &signal,\n ).await.unwrap();\n assert_eq!(result.docs_embedded, 0);\n}\n```\n\n## Edge Cases\n\n- Document ID that does not exist in the documents table: query returns no rows, skipped silently.\n- Document with empty `content_text`: `split_into_chunks` may return 0 chunks, counted as skipped.\n- Ollama server unreachable: returns `OllamaUnavailable` error. Must not leave partial embeddings (SAVEPOINT rollback).\n- Very long document (>1500 bytes): gets chunked into multiple chunks by `split_into_chunks`. All chunks for one document must be embedded atomically.\n- Document already has embeddings but with different model: content_hash check passes but model mismatch detected — should re-embed.\n- Concurrent calls with overlapping document_ids: SAVEPOINT isolation prevents conflicts, last writer wins on embedding_metadata upsert.\n\n## Dependency Context\n\n- **Blocked by bd-hs6j**: Gets `document_ids` from scoped doc regeneration output\n- **Blocks bd-1i4i**: Orchestration function calls this as the final step of surgical sync\n- **Blocks bd-3jqx**: Integration tests verify embed isolation (only surgical docs get embedded)\n- **Uses existing internals**: `embed_page`, `PendingDocument`, `split_into_chunks`, `OllamaClient`, `ShutdownSignal`","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-17T19:16:43.680009Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:04:58.570513Z","closed_at":"2026-02-18T21:04:58.570467Z","close_reason":"Completed: all implementation work done, code reviewed, tests passing","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-1elx","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-17T19:19:25.025604Z","created_by":"tayloreernisse"}]} {"id":"bd-1ep","title":"Wire resource event fetching into sync pipeline","description":"## Background\nAfter issue/MR primary ingestion and discussion fetch, changed entities need resource_events jobs enqueued and drained. This is the integration point that connects the queue (bd-tir), API client (bd-sqw), DB upserts (bd-1uc), and config flag (bd-2e8).\n\n## Approach\nModify the sync pipeline to add two new phases after discussion sync:\n\n**Phase 1 — Enqueue during ingestion:**\nIn src/ingestion/orchestrator.rs, after each entity upsert (issue or MR), call:\n```rust\nif config.sync.fetch_resource_events {\n enqueue_job(conn, project_id, \"issue\", iid, local_id, \"resource_events\", None)?;\n}\n// For MRs, also enqueue mr_closes_issues (always) and mr_diffs (when fetchMrFileChanges)\n```\n\nThe \"changed entity\" detection uses the existing dirty tracker: if an entity was inserted or updated during this sync run, it gets enqueued. On --full sync, all entities are enqueued.\n\n**Phase 2 — Drain dependent queue:**\nAdd a new drain step in src/cli/commands/sync.rs (or new src/core/drain.rs), called after discussion sync:\n```rust\npub async fn drain_dependent_queue(\n conn: &Connection,\n client: &GitLabClient,\n config: &Config,\n progress: Option,\n) -> Result\n```\n\nFlow:\n1. reclaim_stale_locks(conn, config.sync.stale_lock_minutes)\n2. Loop: claim_jobs(conn, \"resource_events\", batch_size=10)\n3. For each job:\n a. Fetch 3 event types via client (fetch_issue_state_events etc.)\n b. Store via upsert functions (upsert_state_events etc.)\n c. complete_job(conn, job.id) on success\n d. fail_job(conn, job.id, error_msg) on failure\n4. Report progress: \"Fetching resource events... [N/M]\"\n5. Repeat until no more claimable jobs\n\n**Progress reporting:**\nAdd new ProgressEvent variants:\n```rust\nResourceEventsFetchStart { total: usize },\nResourceEventsFetchProgress { completed: usize, total: usize },\nResourceEventsFetchComplete { fetched: usize, failed: usize },\n```\n\n## Acceptance Criteria\n- [ ] Full sync enqueues resource_events jobs for all issues and MRs\n- [ ] Incremental sync only enqueues for entities changed since last sync\n- [ ] --no-events prevents enqueueing resource_events jobs\n- [ ] Drain step fetches all 3 event types per entity\n- [ ] Successful fetches stored and job completed\n- [ ] Failed fetches recorded with error, job retried on next sync\n- [ ] Stale locks reclaimed at drain start\n- [ ] Progress displayed: \"Fetching resource events... [N/M]\"\n- [ ] Robot mode progress suppressed (quiet mode)\n\n## Files\n- src/ingestion/orchestrator.rs (add enqueue calls during upsert)\n- src/cli/commands/sync.rs (add drain step after discussions)\n- src/core/drain.rs (new, optional — or inline in sync.rs)\n\n## TDD Loop\nRED: tests/sync_pipeline_tests.rs (or extend existing):\n- `test_sync_enqueues_resource_events_for_changed_entities` - mock sync, verify jobs enqueued\n- `test_sync_no_events_flag_skips_enqueue` - verify no jobs when flag false\n- `test_drain_completes_jobs_on_success` - mock API responses, verify jobs deleted\n- `test_drain_fails_jobs_on_error` - mock API failure, verify job attempts incremented\n\nNote: Full pipeline integration tests may need mock HTTP server. Start with unit tests on enqueue/drain logic using the real DB with mock API responses.\n\nGREEN: Implement enqueue hooks + drain step\n\nVERIFY: `cargo test sync -- --nocapture && cargo build`\n\n## Edge Cases\n- Entity deleted between enqueue and drain: API returns 404, fail_job with \"entity not found\" (retry won't help but backoff caps it)\n- Rate limiting during drain: GitLabRateLimited error should fail_job with retry (transient)\n- Network error during drain: GitLabNetworkError should fail_job with retry\n- Multiple sync runs competing: locked_at prevents double-processing; stale lock reclaim handles crashes\n- Drain should have a max iterations guard to prevent infinite loop if jobs keep failing and being retried within the same run","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:31:57.334527Z","created_by":"tayloreernisse","updated_at":"2026-02-03T17:46:51.336138Z","closed_at":"2026-02-03T17:46:51.336077Z","close_reason":"Implemented: enqueue + drain resource events in orchestrator, wired counts through ingest→sync pipeline, added progress events, 4 new tests, all 209 tests pass","compaction_level":0,"original_size":0,"labels":["gate-1","phase-b","pipeline"],"dependencies":[{"issue_id":"bd-1ep","depends_on_id":"bd-1uc","type":"blocks","created_at":"2026-02-02T21:32:06.225837Z","created_by":"tayloreernisse"},{"issue_id":"bd-1ep","depends_on_id":"bd-2e8","type":"blocks","created_at":"2026-02-02T21:32:06.142442Z","created_by":"tayloreernisse"},{"issue_id":"bd-1ep","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-02T21:31:57.335847Z","created_by":"tayloreernisse"},{"issue_id":"bd-1ep","depends_on_id":"bd-sqw","type":"blocks","created_at":"2026-02-02T21:32:06.183287Z","created_by":"tayloreernisse"},{"issue_id":"bd-1ep","depends_on_id":"bd-tir","type":"blocks","created_at":"2026-02-02T21:32:06.267800Z","created_by":"tayloreernisse"}]} +{"id":"bd-1f1f","title":"Implement username resolution function","description":"## Background\n`lore me` needs deterministic username resolution before any DB query executes. Source precedence is fixed (`--user` > `config.gitlab.username`), and missing username is a usage failure (exit 2), not an internal error.\n\nThis bead also defines the reusable runtime usage-error pathway for me-related commands.\n\n## Approach\n### 1. Add explicit usage error plumbing\nIn `src/core/error.rs`:\n- Add `ErrorCode::UsageError`.\n- Display string: `USAGE_ERROR`.\n- Exit code mapping: `2`.\n- Add `LoreError::UsageError(String)`.\n- Map `LoreError::UsageError(_)` to `ErrorCode::UsageError` in `code()`.\n\n### 2. Implement resolver\nIn `src/cli/commands/me/mod.rs`:\n```rust\npub fn resolve_username(cli_user: Option<&str>, config: &Config) -> Result\n```\nRules:\n- Trim leading/trailing whitespace before validation and return.\n- CLI value wins if trimmed value is non-empty.\n- Otherwise use trimmed `config.gitlab.username` if non-empty.\n- Otherwise return `LoreError::UsageError` with actionable remediation text.\n\n### 3. Preserve case\nDo not lowercase or case-normalize username; only trim surrounding whitespace.\n\n## Acceptance Criteria\n- [ ] `ErrorCode::UsageError` exists and renders as `USAGE_ERROR`\n- [ ] `ErrorCode::UsageError` maps to exit code `2`\n- [ ] `LoreError::UsageError(String)` exists\n- [ ] `LoreError::UsageError(_)` maps to `ErrorCode::UsageError`\n- [ ] `resolve_username(Some(\"cli_user\"), config)` returns `\"cli_user\"` regardless of config\n- [ ] `resolve_username(None, config_with_username)` returns config username\n- [ ] `resolve_username(None, config_without_username)` returns `LoreError::UsageError`\n- [ ] `resolve_username(Some(\" jdoe \"), config)` returns `\"jdoe\"`\n- [ ] Empty/whitespace-only CLI value falls through to config\n- [ ] Empty/whitespace-only config username is treated as missing\n- [ ] Error text includes both config and CLI remediation paths\n\n## Files\n- MODIFY: `src/core/error.rs`\n- MODIFY: `src/cli/commands/me/mod.rs`\n\n## TDD Anchor\nRED:\n- `test_username_cli_overrides_config`\n- `test_username_from_config_when_cli_missing`\n- `test_username_missing_returns_usage_error`\n- `test_username_whitespace_trimmed`\n- `test_usage_error_code_is_exit_2`\n\nGREEN:\n- Add usage-error plumbing.\n- Implement resolver with trimmed, case-preserving precedence.\n\nVERIFY:\n- `cargo test resolve_username`\n- `cargo test usage_error_code`\n\n## Edge Cases\n- Input containing only whitespace (`\" \"`) is treated as missing.\n- Mixed-case usernames remain unchanged after resolution.\n\n## Dependency Context\nConsumes config field from `bd-qpk3` and provides error semantics used by:\n- `bd-a7ba` (scope usage errors)\n- `bd-1vv8` (invalid `--since` usage errors)\n- `bd-32aw` (error-path compliance)\n\nDependencies:\n -> bd-qpk3 (blocks) - Add gitlab.username config field\n\nDependents:\n <- bd-utt4 (blocks) - Define MeArgs struct and register me subcommand","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:35:16.892995Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.045072Z","closed_at":"2026-02-20T16:09:13.045024Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1f1f","depends_on_id":"bd-qpk3","type":"blocks","created_at":"2026-02-19T19:41:08.235391Z","created_by":"tayloreernisse"}]} {"id":"bd-1f5b","title":"Extract query functions from CLI to shared pub API","description":"## Background\nThe TUI's action.rs bridges to existing CLI query functions. To avoid code duplication, the existing query_* functions in cli/commands/*.rs need to be made pub so action.rs can call them. This is the minimal refactoring approach — no new domain query layer, just visibility changes.\n\n## Approach\nModify existing CLI command files to extract and expose query functions:\n- src/cli/commands/list.rs: make query_issues(), query_mrs() pub\n- src/cli/commands/show.rs: make query_issue_detail(), query_mr_detail() pub\n- src/cli/commands/who.rs: make query_experts(), query_workload(), query_reviews(), query_active(), query_overlap() pub\n- src/cli/commands/search.rs: make run_search_query() pub\n\nThese functions should take Connection + parameters and return Result. Any CLI-specific formatting logic stays in the CLI; only the pure query logic is extracted.\n\nIf a function mixes query + format logic, split it:\n1. query_X() -> Result, LoreError> (pure query, made pub)\n2. format_X(data: &[T]) -> String (CLI-only formatting, stays private)\n\n## Acceptance Criteria\n- [ ] query_issues() is pub and callable from outside cli module\n- [ ] query_mrs() is pub and callable\n- [ ] query_issue_detail() and query_mr_detail() are pub\n- [ ] query_experts() and other who functions are pub\n- [ ] run_search_query() is pub\n- [ ] Existing CLI behavior unchanged (no functional changes)\n- [ ] All extracted functions take Connection + params, return Result\n- [ ] cargo test passes (no regressions)\n\n## Files\n- MODIFY: src/cli/commands/list.rs (make query functions pub)\n- MODIFY: src/cli/commands/show.rs (make query functions pub)\n- MODIFY: src/cli/commands/who.rs (make query functions pub)\n- MODIFY: src/cli/commands/search.rs (make search query pub)\n\n## TDD Anchor\nRED: Write test in lore-tui action.rs that calls crate::cli::commands::list::query_issues() and asserts it compiles.\nGREEN: Make query_issues pub.\nVERIFY: cargo test --all-targets\n\n## Edge Cases\n- Some query functions may have Config dependencies — extract only the Connection-dependent parts\n- Visibility changes may expose functions that weren't designed for external use — review signatures\n- This is a non-breaking change (additive pub visibility)\n\n## Dependency Context\nThis modifies the main lore crate (stable Rust).\nRequired by all TUI action.rs query bridge functions.\nMust be completed before TUI can fetch real data.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:06:25.285403Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.713834Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1f5b","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T18:11:34.713807Z","created_by":"tayloreernisse"}]} +{"id":"bd-1fgr","title":"Implement reviewing MRs query for me command","description":"## Background\nThe third dashboard section shows MRs where the user is a reviewer. Reviewers tracked in `mr_reviewers(merge_request_id, username)`. Labels in `mr_labels` junction table. This section shows MRs by OTHER people that the user needs to review, so it includes the MR author's username.\n\n## Approach\nAdd to `src/cli/commands/me/mrs.rs` (same file as authored MRs):\n```rust\npub fn fetch_my_reviewing_mrs(\n conn: &Connection,\n username: &str,\n scope: &ProjectScope,\n) -> Result> {\n let project_filter = match scope {\n ProjectScope::Single(id) => format!(\"AND p.id = {id}\"),\n ProjectScope::All => String::new(),\n };\n let sql = format!(r#\"\n SELECT p.path_with_namespace, mr.iid, mr.title, mr.state,\n mr.author_username, mr.draft, mr.updated_at,\n mr.web_url,\n (SELECT GROUP_CONCAT(l.name, ',')\n FROM mr_labels ml\n JOIN labels l ON l.id = ml.label_id\n WHERE ml.merge_request_id = mr.id) AS label_names\n FROM merge_requests mr\n JOIN mr_reviewers mrr ON mrr.merge_request_id = mr.id\n JOIN projects p ON p.id = mr.project_id\n WHERE mrr.username = ?1\n AND mr.state = 'opened'\n {project_filter}\n ORDER BY mr.updated_at DESC\n \"#);\n // ... map rows to MeMrReviewing (same pattern as authored)\n}\n```\n\nKey difference from authored: `author_username` included (who wrote the MR), joins through `mr_reviewers` not `author_username`.\n\n## Acceptance Criteria\n- [ ] Returns Vec for MRs where `mr_reviewers.username` matches (AC-3.3)\n- [ ] Only returns `state = 'opened'` MRs (AC-5.3)\n- [ ] Includes `author_username` (who wrote the MR) (AC-7.5)\n- [ ] Includes `draft` as bool\n- [ ] Labels via GROUP_CONCAT on `mr_labels` + `labels`\n- [ ] Respects ProjectScope filtering\n- [ ] User both author AND reviewer of same MR → appears in BOTH sections (valid)\n- [ ] No limit, no truncation\n\n## Files\n- MODIFY: src/cli/commands/me/mrs.rs (add alongside authored query)\n\n## TDD Anchor\nRED: Write `test_fetch_reviewing_mrs` using in-memory DB. Insert project, MR by \"alice\", mr_reviewers row for \"jdoe\". Assert returns 1 MR with author_username=\"alice\".\nGREEN: Implement the SQL query.\nVERIFY: `cargo test fetch_my_reviewing_mrs`\n\nAdditional tests:\n- test_reviewing_mrs_excludes_non_reviewer (no mr_reviewers row → not returned)\n- test_user_both_author_and_reviewer (insert MR where user is author + add reviewer row → appears)\n\n## Edge Cases\n- `mr_reviewers` has no timestamp — can't tell when review was requested from this table\n- Multiple reviewers → our user is one of N → MR appears once (JOIN, PK is unique)\n\n## Dependency Context\nUses `MeMrReviewing` and `ProjectScope` from bd-3bwh and bd-a7ba.\nSame file as bd-1obt (authored MRs). Uses `ms_to_iso` from `src/core/time.rs`.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:37:09.743899Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.052896Z","closed_at":"2026-02-20T16:09:13.052861Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1fgr","depends_on_id":"bd-3bwh","type":"blocks","created_at":"2026-02-19T19:41:17.639607Z","created_by":"tayloreernisse"},{"issue_id":"bd-1fgr","depends_on_id":"bd-a7ba","type":"blocks","created_at":"2026-02-19T19:41:17.722182Z","created_by":"tayloreernisse"}]} {"id":"bd-1fn","title":"[CP1] Integration tests for discussion watermark","description":"Integration tests verifying discussion sync watermark behavior.\n\n## Tests (tests/discussion_watermark_tests.rs)\n\n- skips_discussion_fetch_when_updated_at_unchanged\n- fetches_discussions_when_updated_at_advanced\n- updates_watermark_after_successful_discussion_sync\n- does_not_update_watermark_on_discussion_sync_failure\n\n## Test Scenario\n1. Ingest issue with updated_at = T1\n2. Verify discussions_synced_for_updated_at = T1\n3. Re-run ingest with same issue (updated_at = T1)\n4. Verify NO discussion API calls made (watermark prevents)\n5. Simulate issue update (updated_at = T2)\n6. Re-run ingest\n7. Verify discussion API calls made for T2\n8. Verify watermark updated to T2\n\n## Why This Matters\nDiscussion API is expensive (1 call per issue). Watermark ensures\nwe only refetch when issue actually changed, even with cursor rewind.\n\nFiles: tests/discussion_watermark_tests.rs\nDone when: Watermark correctly prevents redundant discussion refetch","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:59:11.362495Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:02.086158Z","deleted_at":"2026-01-25T17:02:02.086154Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-1gu","title":"[CP0] gi auth-test command","description":"## Background\n\nauth-test is a quick diagnostic command to verify GitLab connectivity. Used for troubleshooting and CI pipelines. Simpler than doctor because it only checks auth, not full system health.\n\nReference: docs/prd/checkpoint-0.md section \"gi auth-test\"\n\n## Approach\n\n**src/cli/commands/auth-test.ts:**\n```typescript\nimport { Command } from 'commander';\nimport { loadConfig } from '../../core/config';\nimport { GitLabClient } from '../../gitlab/client';\nimport { TokenNotSetError } from '../../core/errors';\n\nexport const authTestCommand = new Command('auth-test')\n .description('Verify GitLab authentication')\n .action(async (options, command) => {\n const globalOpts = command.optsWithGlobals();\n \n // 1. Load config\n const config = loadConfig(globalOpts.config);\n \n // 2. Get token from environment\n const token = process.env[config.gitlab.tokenEnvVar];\n if (!token) {\n throw new TokenNotSetError(config.gitlab.tokenEnvVar);\n }\n \n // 3. Create client and test auth\n const client = new GitLabClient({\n baseUrl: config.gitlab.baseUrl,\n token,\n });\n \n // 4. Get current user\n const user = await client.getCurrentUser();\n \n // 5. Output success\n console.log(`Authenticated as @${user.username} (${user.name})`);\n console.log(`GitLab: ${config.gitlab.baseUrl}`);\n });\n```\n\n**Output format:**\n```\nAuthenticated as @johndoe (John Doe)\nGitLab: https://gitlab.example.com\n```\n\n## Acceptance Criteria\n\n- [ ] Loads config from default or --config path\n- [ ] Gets token from configured env var (default GITLAB_TOKEN)\n- [ ] Throws TokenNotSetError if env var not set\n- [ ] Calls GET /api/v4/user to verify auth\n- [ ] Prints username and display name on success\n- [ ] Exit 0 on success\n- [ ] Exit 1 on auth failure (GitLabAuthError)\n- [ ] Exit 1 if config not found (ConfigNotFoundError)\n\n## Files\n\nCREATE:\n- src/cli/commands/auth-test.ts\n\n## TDD Loop\n\nN/A - simple command, verify manually and with integration test in init.test.ts\n\n```bash\n# Manual verification\nexport GITLAB_TOKEN=\"valid-token\"\ngi auth-test\n\n# With invalid token\nexport GITLAB_TOKEN=\"invalid\"\ngi auth-test # should exit 1\n```\n\n## Edge Cases\n\n- Config exists but token env var not set - clear error message\n- Token exists but wrong scopes - GitLabAuthError (401)\n- Network unreachable - GitLabNetworkError\n- Token with extra whitespace - should trim","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:51.135580Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:28:16.369542Z","closed_at":"2026-01-25T03:28:16.369481Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1gu","depends_on_id":"bd-13b","type":"blocks","created_at":"2026-01-24T16:13:10.058655Z","created_by":"tayloreernisse"},{"issue_id":"bd-1gu","depends_on_id":"bd-1l1","type":"blocks","created_at":"2026-01-24T16:13:10.077581Z","created_by":"tayloreernisse"}]} {"id":"bd-1gvg","title":"Implement status fetcher with adaptive paging and pagination guard","description":"## Background\nWith the GraphQL client in place, we need a status-specific fetcher that paginates through all issues in a project, extracts status widgets via __typename matching, and handles edge cases like complexity errors and cursor stalls.\n\n## Approach\nAll code goes in src/gitlab/graphql.rs alongside GraphqlClient. The fetcher uses the workItems(types:[ISSUE]) resolver (NOT project.issues which returns the old Issue type without status widgets). Widget matching uses __typename == \"WorkItemWidgetStatus\" for deterministic identification.\n\n## Files\n- src/gitlab/graphql.rs (add to existing file created by bd-2dlt)\n\n## Implementation\n\nConstants:\n ISSUE_STATUS_QUERY: GraphQL query string with $projectPath, $after, $first variables\n PAGE_SIZES: &[u32] = &[100, 50, 25, 10]\n\nPrivate deserialization types:\n WorkItemsResponse { project: Option }\n ProjectNode { work_items: Option } (serde rename workItems)\n WorkItemConnection { nodes: Vec, page_info: PageInfo } (serde rename pageInfo)\n WorkItemNode { iid: String, widgets: Vec }\n PageInfo { end_cursor: Option, has_next_page: bool } (serde renames)\n StatusWidget { status: Option }\n\nPublic types:\n UnsupportedReason enum: GraphqlEndpointMissing, AuthForbidden (Debug, Clone)\n FetchStatusResult struct:\n statuses: HashMap\n all_fetched_iids: HashSet\n unsupported_reason: Option\n partial_error_count: usize\n first_partial_error: Option\n\nis_complexity_or_timeout_error(msg) -> bool: lowercase contains \"complexity\" or \"timeout\"\n\nfetch_issue_statuses(client, project_path) -> Result:\n Pagination loop:\n 1. Build variables with current page_size from PAGE_SIZES[page_size_idx]\n 2. Call client.query() — match errors:\n - GitLabNotFound -> Ok(empty + GraphqlEndpointMissing) + warn\n - GitLabAuthFailed -> Ok(empty + AuthForbidden) + warn \n - Other with complexity/timeout msg -> reduce page_size_idx, continue (retry same cursor)\n - Other with smallest page size exhausted -> return Err\n - Other -> return Err\n 3. Track partial errors from GraphqlQueryResult\n 4. Parse response into WorkItemsResponse\n 5. For each node: parse iid to i64, add to all_fetched_iids, check widgets for __typename == \"WorkItemWidgetStatus\", insert status into map\n 6. Reset page_size_idx to 0 after successful page\n 7. Pagination guard: if has_next_page but new cursor == old cursor or is None, warn + break\n 8. Update cursor, continue loop\n\n## Acceptance Criteria\n- [ ] Paginates: 2-page mock returns all statuses + all IIDs\n- [ ] No status widget: IID in all_fetched_iids but not in statuses\n- [ ] Status widget with null status: IID in all_fetched_iids but not in statuses\n- [ ] 404 -> Ok(empty, unsupported_reason: GraphqlEndpointMissing)\n- [ ] 403 -> Ok(empty, unsupported_reason: AuthForbidden)\n- [ ] Success -> unsupported_reason: None\n- [ ] __typename != \"WorkItemWidgetStatus\" -> ignored, no error\n- [ ] Cursor stall (same endCursor twice) -> aborts, returns partial result\n- [ ] Complexity error at first=100 -> retries at 50, succeeds\n- [ ] Timeout error -> reduces page size\n- [ ] All page sizes fail -> returns Err\n- [ ] After successful page, next page starts at first=100 again\n- [ ] Partial-data pages -> partial_error_count incremented, first_partial_error captured\n\n## TDD Loop\nRED: test_fetch_statuses_pagination, test_fetch_statuses_no_status_widget, test_fetch_statuses_404_graceful, test_fetch_statuses_403_graceful, test_typename_matching_ignores_non_status_widgets, test_fetch_statuses_cursor_stall_aborts, test_fetch_statuses_complexity_error_reduces_page_size, test_fetch_statuses_timeout_error_reduces_page_size, test_fetch_statuses_smallest_page_still_fails, test_fetch_statuses_page_size_resets_after_success, test_fetch_statuses_unsupported_reason_none_on_success, test_fetch_statuses_partial_errors_tracked\n Adaptive tests: mock must inspect $first variable in request body to return different responses per page size\nGREEN: Implement all types + fetch_issue_statuses function\nVERIFY: cargo test fetch_statuses && cargo test typename\n\n## Edge Cases\n- GraphQL returns iid as String — parse to i64\n- widgets is Vec — match __typename field, then deserialize matching widgets\n- let-chain syntax: if is_status_widget && let Ok(sw) = serde_json::from_value::(...)\n- Pagination guard: new_cursor.is_none() || new_cursor == cursor\n- Page size resets to 0 (index into PAGE_SIZES) after each successful page\n- FetchStatusResult is NOT Clone — test fields individually","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:42:00.388137Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.418490Z","closed_at":"2026-02-11T07:21:33.418451Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1gvg","depends_on_id":"bd-2dlt","type":"blocks","created_at":"2026-02-11T06:42:41.801667Z","created_by":"tayloreernisse"},{"issue_id":"bd-1gvg","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-11T06:42:00.389311Z","created_by":"tayloreernisse"}]} @@ -63,6 +65,7 @@ {"id":"bd-1nsl","title":"Epic: Surgical Per-IID Sync","description":"## Background\n\nSurgical Per-IID Sync adds `lore sync --issue --mr -p ` for on-demand sync of specific entities without running the full project-wide pipeline. This is critical for agent workflows: when an agent needs fresh data for a specific issue or MR, waiting for a full sync (minutes) is unacceptable. Surgical sync completes in seconds by fetching only the targeted entities, their discussions, resource events, and dependent data, then scoping doc regeneration and embedding to only the affected documents.\n\n## Architecture\n\nThe pipeline stages mirror full sync but scoped to individual entities:\n\n```\nPREFLIGHT -> TOCTOU CHECK -> INGEST -> DEPENDENTS -> DOCS -> EMBED -> FINALIZE\n```\n\n- **Preflight**: Fetch entity from GitLab API by IID, confirm existence\n- **TOCTOU check**: Compare payload `updated_at` with DB — skip if already current\n- **Ingest**: Upsert entity via existing `process_single_issue`/`process_single_mr`\n- **Dependents**: Inline fetch of discussions, resource events, MR diffs, closes_issues\n- **Docs**: Scoped `regenerate_dirty_documents_for_sources()` — only affected source keys\n- **Embed**: Scoped `embed_documents_by_ids()` — only regenerated document IDs\n- **Finalize**: SyncRunRecorder with surgical mode columns\n\n## Children (Execution Order)\n\n### Foundation (no blockers, can parallelize)\n1. **bd-tiux** — Migration 027: surgical mode columns on sync_runs\n2. **bd-1sc6** — Error variant + pub(crate) visibility changes\n3. **bd-159p** — GitLab client get_by_iid methods\n4. **bd-1lja** — CLI flags + SyncOptions extensions\n\n### Core (blocked by foundation)\n5. **bd-wcja** — SyncResult surgical fields (blocked by bd-3sez)\n6. **bd-arka** — SyncRunRecorder surgical lifecycle (blocked by bd-tiux)\n7. **bd-3sez** — surgical.rs core module + tests (blocked by bd-159p, bd-1sc6)\n8. **bd-hs6j** — Scoped doc regeneration (no blockers)\n9. **bd-1elx** — Scoped embedding (no blockers)\n\n### Orchestration (blocked by core)\n10. **bd-kanh** — Per-entity dependent helpers (blocked by bd-3sez)\n11. **bd-1i4i** — Orchestrator function (blocked by all core beads)\n\n### Wiring + Validation\n12. **bd-3bec** — Wire dispatch in run_sync + robot-docs (blocked by bd-1i4i)\n13. **bd-3jqx** — Integration tests (blocked by bd-1i4i + core beads)\n\n## Completion Criteria\n\n- [ ] `lore sync --issue 7 -p group/project` fetches, ingests, and reports for issue 7 only\n- [ ] `lore sync --mr 101 --mr 102 -p proj` handles multiple MRs\n- [ ] `lore sync --preflight-only --issue 7 -p proj` validates without DB writes\n- [ ] Robot mode JSON includes `surgical_mode`, `surgical_iids`, `entity_results`\n- [ ] TOCTOU: already-current entities are skipped (not re-ingested)\n- [ ] Scoped docs + embed: only affected documents are regenerated and embedded\n- [ ] Cancellation at any stage stops gracefully with partial results\n- [ ] `lore robot-docs` documents all surgical flags and response schemas\n- [ ] All existing full-sync tests pass unchanged\n- [ ] Integration test suite (bd-3jqx) passes","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-02-17T19:11:34.020453Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:38:02.294242Z","closed_at":"2026-02-18T21:38:02.294190Z","close_reason":"All children shipped. Surgical per-IID sync landed in 9ec1344.","compaction_level":0,"original_size":0,"labels":["surgical-sync"]} {"id":"bd-1o1","title":"OBSERV: Add -v/--verbose and --log-format CLI flags","description":"## Background\nUsers and agents need CLI-controlled verbosity without knowing RUST_LOG syntax. The -v flag convention (cargo, curl, ssh) is universally understood. --log-format json enables lore sync 2>&1 | jq workflows without reading log files.\n\n## Approach\nAdd two new global flags to the Cli struct in src/cli/mod.rs (insert after the quiet field at line ~37):\n\n```rust\n/// Increase log verbosity (-v, -vv, -vvv)\n#[arg(short = 'v', long = \"verbose\", action = clap::ArgAction::Count, global = true)]\npub verbose: u8,\n\n/// Log format for stderr output: text (default) or json\n#[arg(long = \"log-format\", global = true, value_parser = [\"text\", \"json\"], default_value = \"text\")]\npub log_format: String,\n```\n\nThe existing Cli struct (src/cli/mod.rs:13-42) has these global flags: config, robot, json, color, quiet. The new flags follow the same pattern.\n\nNote: clap::ArgAction::Count allows -v, -vv, -vvv as a single flag with increasing count (0, 1, 2, 3).\n\n## Acceptance Criteria\n- [ ] lore -v sync parses without error (verbose=1)\n- [ ] lore -vv sync parses (verbose=2)\n- [ ] lore -vvv sync parses (verbose=3)\n- [ ] lore --log-format json sync parses (log_format=\"json\")\n- [ ] lore --log-format text sync parses (default)\n- [ ] lore --log-format xml sync errors (invalid value)\n- [ ] Existing commands unaffected (verbose defaults to 0, log_format to \"text\")\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/cli/mod.rs (modify Cli struct, lines 13-42)\n\n## TDD Loop\nRED: Write test that parses Cli with -v flag and asserts verbose=1\nGREEN: Add the two fields to Cli struct\nVERIFY: cargo test -p lore && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- -v and -q together: both parse fine; conflict resolution happens in subscriber setup (bd-2rr), not here\n- -v flag must be global=true so it works before and after subcommands: lore -v sync AND lore sync -v\n- --log-format is a string, not enum, to keep Cli struct simple","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:53:55.421339Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:10:22.585947Z","closed_at":"2026-02-04T17:10:22.585905Z","close_reason":"Added -v/--verbose (count) and --log-format (text|json) global CLI flags","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-1o1","depends_on_id":"bd-2nx","type":"parent-child","created_at":"2026-02-04T15:53:55.422103Z","created_by":"tayloreernisse"}]} {"id":"bd-1o4h","title":"OBSERV: Define StageTiming struct in src/core/metrics.rs","description":"## Background\nStageTiming is the materialized view of span timing data. It's the data structure that flows through robot JSON output, sync_runs.metrics_json, and the human-readable timing summary. Defined in a new file because it's genuinely new functionality that doesn't fit existing modules.\n\n## Approach\nCreate src/core/metrics.rs:\n\n```rust\nuse serde::Serialize;\n\nfn is_zero(v: &usize) -> bool { *v == 0 }\n\n#[derive(Debug, Clone, Serialize)]\npub struct StageTiming {\n pub name: String,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub project: Option,\n pub elapsed_ms: u64,\n pub items_processed: usize,\n #[serde(skip_serializing_if = \"is_zero\")]\n pub items_skipped: usize,\n #[serde(skip_serializing_if = \"is_zero\")]\n pub errors: usize,\n #[serde(skip_serializing_if = \"Vec::is_empty\")]\n pub sub_stages: Vec,\n}\n```\n\nRegister module in src/core/mod.rs (line ~11, add):\n```rust\npub mod metrics;\n```\n\nThe is_zero helper is a private function used by serde's skip_serializing_if. It must take &usize (reference) and return bool.\n\n## Acceptance Criteria\n- [ ] StageTiming serializes to JSON matching PRD Section 4.6.2 example\n- [ ] items_skipped omitted when 0\n- [ ] errors omitted when 0\n- [ ] sub_stages omitted when empty vec\n- [ ] project omitted when None\n- [ ] name, elapsed_ms, items_processed always present\n- [ ] Struct is Debug + Clone + Serialize\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/core/metrics.rs (new file)\n- src/core/mod.rs (register module, add line after existing pub mod declarations)\n\n## TDD Loop\nRED:\n - test_stage_timing_serialization: create StageTiming with sub_stages, serialize, assert JSON structure\n - test_stage_timing_zero_fields_omitted: errors=0, items_skipped=0, assert no \"errors\" or \"items_skipped\" keys\n - test_stage_timing_empty_sub_stages: sub_stages=vec![], assert no \"sub_stages\" key\nGREEN: Create metrics.rs with StageTiming struct and is_zero helper\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- is_zero must be a function, not a closure (serde skip_serializing_if requires a function path)\n- Vec::is_empty is a method on Vec, and serde accepts \"Vec::is_empty\" as a path for skip_serializing_if\n- Recursive StageTiming (sub_stages contains StageTiming): serde handles this naturally, no special handling needed","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:54:31.907234Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:21:40.915842Z","closed_at":"2026-02-04T17:21:40.915794Z","close_reason":"Created src/core/metrics.rs with StageTiming struct, serde skip_serializing_if for zero/empty fields, 5 tests","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-1o4h","depends_on_id":"bd-3er","type":"parent-child","created_at":"2026-02-04T15:54:31.910015Z","created_by":"tayloreernisse"}]} +{"id":"bd-1obt","title":"Implement authored MRs query for me command","description":"## Background\nThe second dashboard section shows merge requests authored by the current user in opened state. MR authorship is stored directly as `merge_requests.author_username`. Labels are in `mr_labels(merge_request_id, label_id)` joined to `labels(id, name, ...)` — NOT a column on merge_requests.\n\n## Approach\nCreate in `src/cli/commands/me/mrs.rs`:\n```rust\nuse rusqlite::Connection;\nuse crate::core::error::Result;\nuse super::types::{MeMrAuthored, ProjectScope, AttentionState};\n\npub fn fetch_my_authored_mrs(\n conn: &Connection,\n username: &str,\n scope: &ProjectScope,\n) -> Result> {\n let project_filter = match scope {\n ProjectScope::Single(id) => format!(\"AND p.id = {id}\"),\n ProjectScope::All => String::new(),\n };\n let sql = format!(r#\"\n SELECT p.path_with_namespace, mr.iid, mr.title, mr.state,\n mr.draft, mr.detailed_merge_status, mr.updated_at,\n mr.web_url,\n (SELECT GROUP_CONCAT(l.name, ',')\n FROM mr_labels ml\n JOIN labels l ON l.id = ml.label_id\n WHERE ml.merge_request_id = mr.id) AS label_names\n FROM merge_requests mr\n JOIN projects p ON p.id = mr.project_id\n WHERE mr.author_username = ?1\n AND mr.state = 'opened'\n {project_filter}\n ORDER BY mr.updated_at DESC\n \"#);\n let mut stmt = conn.prepare(&sql)?;\n let rows = stmt.query_map([username], |row| {\n let label_str: Option = row.get(8)?;\n Ok(MeMrAuthored {\n project_path: row.get(0)?,\n iid: row.get(1)?,\n title: row.get(2)?,\n state: row.get(3)?,\n draft: row.get::<_, i64>(4)? != 0, // SQLite stores bool as integer\n detailed_merge_status: row.get(5)?,\n attention_state: AttentionState::NotStarted, // enriched later\n labels: label_str\n .map(|s| s.split(',').map(String::from).collect())\n .unwrap_or_default(),\n updated_at_iso: crate::core::time::ms_to_iso(row.get::<_, i64>(6)?),\n web_url: row.get(7)?,\n })\n })?;\n rows.collect::, _>>()\n .map_err(Into::into)\n}\n```\n\nKey details:\n- `draft` stored as INTEGER in SQLite (0/1) — convert to bool\n- `detailed_merge_status` is nullable TEXT\n- Labels via GROUP_CONCAT on `mr_labels` + `labels`\n- MR assignees NOT used (AC-3.5) — no join on mr_assignees\n\n## Acceptance Criteria\n- [ ] Returns Vec for MRs where `author_username` matches (AC-3.2)\n- [ ] Only returns `state = 'opened'` MRs (AC-5.2)\n- [ ] Includes `draft` as bool (AC-7.5) — converted from SQLite integer\n- [ ] Includes `detailed_merge_status` as Option (AC-7.5)\n- [ ] Labels fetched via GROUP_CONCAT on `mr_labels` JOIN `labels`\n- [ ] Respects ProjectScope filtering\n- [ ] No limit, no truncation\n- [ ] Does NOT join `mr_assignees` (AC-3.5)\n\n## Files\n- CREATE: src/cli/commands/me/mrs.rs\n- MODIFY: src/cli/commands/me/mod.rs (add `pub mod mrs;`)\n\n## TDD Anchor\nRED: Write `test_fetch_authored_mrs_returns_authored` using in-memory DB. Insert project + MR with author_username matching, state='opened'. Assert returns 1 MR.\nGREEN: Implement the SQL query.\nVERIFY: `cargo test fetch_my_authored_mrs`\n\nAdditional tests:\n- test_fetch_authored_mrs_excludes_merged (state='merged' not returned)\n- test_fetch_authored_mrs_excludes_other_authors\n- test_fetch_authored_mrs_draft_flag (insert with draft=1, verify draft=true)\n- test_fetch_authored_mrs_includes_labels\n\n## Edge Cases\n- `draft` column is INTEGER in SQLite — must cast to bool (`!= 0`)\n- `detailed_merge_status` may be NULL for older MRs or pre-enrichment\n- draft=1 MRs still appear (they're flagged, not filtered)\n\n## Dependency Context\nUses `MeMrAuthored` and `ProjectScope` from bd-3bwh and bd-a7ba.\nUses `ms_to_iso` from `src/core/time.rs`.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:36:57.295429Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.052019Z","closed_at":"2026-02-20T16:09:13.051982Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1obt","depends_on_id":"bd-3bwh","type":"blocks","created_at":"2026-02-19T19:41:17.479892Z","created_by":"tayloreernisse"},{"issue_id":"bd-1obt","depends_on_id":"bd-a7ba","type":"blocks","created_at":"2026-02-19T19:41:17.560435Z","created_by":"tayloreernisse"}]} {"id":"bd-1oi7","title":"NOTE-2A: Schema migration for note documents (migration 024)","description":"## Background\nThe documents and dirty_sources tables have CHECK constraints limiting source_type to ('issue', 'merge_request', 'discussion'). Need to add 'note' as valid source_type. SQLite doesn't support ALTER CONSTRAINT, so use the table-rebuild pattern. Uses migration slot 024 (022 = query indexes, 023 = issue_detail_fields already exists).\n\n## Approach\nCreate migrations/024_note_documents.sql:\n\n1. Rebuild dirty_sources: CREATE dirty_sources_new with CHECK adding 'note', INSERT SELECT, DROP old, RENAME.\n2. Rebuild documents (complex — must preserve FTS consistency):\n - Save junction table data (_doc_labels_backup, _doc_paths_backup)\n - Drop FTS triggers (documents_ai, documents_ad, documents_au — defined in migration 008_fts5.sql)\n - Drop junction tables (document_labels, document_paths — defined in migration 007_documents.sql)\n - Create documents_new with updated CHECK adding 'note'\n - INSERT INTO documents_new SELECT * FROM documents (preserves rowids for FTS)\n - Drop documents, rename new\n - Recreate all indexes (idx_documents_project_updated, idx_documents_author, idx_documents_source, idx_documents_content_hash — see migration 007_documents.sql for definitions)\n - Recreate junction tables + restore data from backups\n - Recreate FTS triggers (see migration 008_fts5.sql for trigger SQL)\n - INSERT INTO documents_fts(documents_fts) VALUES('rebuild')\n3. Defense-in-depth triggers:\n - notes_ad_cleanup: AFTER DELETE ON notes WHEN old.is_system = 0 → delete doc + dirty_sources for source_type='note', source_id=old.id\n - notes_au_system_cleanup: AFTER UPDATE OF is_system ON notes WHEN NEW.is_system = 1 AND OLD.is_system = 0 → delete doc + dirty_sources\n4. Drop temp backup tables\n\nRegister as (\"024\", include_str!(\"../../migrations/024_note_documents.sql\")) in MIGRATIONS array in src/core/db.rs. Position AFTER the \"023\" entry.\n\n## Files\n- CREATE: migrations/024_note_documents.sql\n- MODIFY: src/core/db.rs (add (\"024\", include_str!(...)) to MIGRATIONS array, after line 75)\n\n## TDD Anchor\nRED: test_migration_024_allows_note_source_type — INSERT with source_type='note' should succeed in both documents and dirty_sources.\nGREEN: Implement the table rebuild migration.\nVERIFY: cargo test migration_024 -- --nocapture\nTests: test_migration_024_preserves_existing_data, test_migration_024_fts_triggers_intact, test_migration_024_row_counts_preserved, test_migration_024_integrity_checks_pass, test_migration_024_fts_rebuild_consistent, test_migration_024_note_delete_trigger_cleans_document, test_migration_024_note_system_flip_trigger_cleans_document, test_migration_024_system_note_delete_trigger_does_not_fire\n\n## Acceptance Criteria\n- [ ] INSERT source_type='note' succeeds in documents and dirty_sources\n- [ ] All existing data preserved through table rebuild (row counts match before/after)\n- [ ] FTS triggers fire correctly after rebuild (insert a doc, verify FTS entry exists)\n- [ ] documents_fts row count == documents row count after rebuild\n- [ ] PRAGMA foreign_key_check returns no violations\n- [ ] notes_ad_cleanup trigger fires on note deletion (deletes document + dirty_sources)\n- [ ] notes_au_system_cleanup trigger fires when is_system flips 0→1\n- [ ] System note deletion does NOT trigger notes_ad_cleanup (is_system = 1 guard)\n- [ ] All 9 tests pass\n\n## Edge Cases\n- Rowid preservation: INSERT INTO documents_new SELECT * preserves id column = rowid for FTS consistency\n- CRITICAL: Must save/restore junction table data (ON DELETE CASCADE on document_labels/document_paths would delete them when documents table is dropped)\n- The FTS rebuild at end is a safety net for any rowid drift\n- Empty database: migration is a no-op (all SELECTs return 0 rows, tables rebuilt with new CHECK)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:35.164340Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:24.078558Z","closed_at":"2026-02-12T18:13:24.078512Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"],"dependencies":[{"issue_id":"bd-1oi7","depends_on_id":"bd-18bf","type":"blocks","created_at":"2026-02-12T17:04:47.854894Z","created_by":"tayloreernisse"},{"issue_id":"bd-1oi7","depends_on_id":"bd-22ai","type":"blocks","created_at":"2026-02-12T17:04:49.940178Z","created_by":"tayloreernisse"},{"issue_id":"bd-1oi7","depends_on_id":"bd-ef0u","type":"blocks","created_at":"2026-02-12T17:04:49.301709Z","created_by":"tayloreernisse"}]} {"id":"bd-1oo","title":"Register migration 015 in db.rs and create migration 016 for mr_file_changes","description":"## Background\n\nThis bead creates the `mr_file_changes` table that stores which files each MR touched, enabling Gate 4 (file-history) and Gate 5 (trace). It maps MRs to the file paths they modify.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 4.1 (Schema).\n\n## Codebase Context — CRITICAL Migration Numbering\n\n- **LATEST_SCHEMA_VERSION = 14** (MIGRATIONS array in db.rs includes 001-014)\n- **Migration 015 exists on disk** (`migrations/015_commit_shas_and_closes_watermark.sql`) but is **NOT registered** in `src/core/db.rs` MIGRATIONS array\n- `merge_commit_sha` and `squash_commit_sha` are already on merge_requests (added by 015 SQL) and already used in `src/ingestion/merge_requests.rs`\n- `closes_issues_synced_for_updated_at` also added by 015 and used in orchestrator.rs\n- **This bead must FIRST register migration 015 in db.rs**, then create migration 016 for mr_file_changes\n- pending_dependent_fetches already has `job_type='mr_diffs'` in CHECK constraint (migration 011)\n- Schema version auto-computes: `LATEST_SCHEMA_VERSION = MIGRATIONS.len() as i32`\n\n## Approach\n\n### Step 1: Register existing migration 015 in db.rs\n\nAdd to MIGRATIONS array in `src/core/db.rs` (after the \"014\" entry):\n\n```rust\n(\n \"015\",\n include_str!(\"../../migrations/015_commit_shas_and_closes_watermark.sql\"),\n),\n```\n\nThis makes LATEST_SCHEMA_VERSION = 15.\n\n### Step 2: Create migration 016 for mr_file_changes\n\nCreate `migrations/016_mr_file_changes.sql`:\n\n```sql\n-- Migration 016: MR file changes table\n-- Powers file-history and trace commands (Gates 4-5)\n\nCREATE TABLE mr_file_changes (\n id INTEGER PRIMARY KEY,\n merge_request_id INTEGER NOT NULL REFERENCES merge_requests(id) ON DELETE CASCADE,\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n old_path TEXT,\n new_path TEXT NOT NULL,\n change_type TEXT NOT NULL CHECK (change_type IN ('added', 'modified', 'renamed', 'deleted')),\n UNIQUE(merge_request_id, new_path)\n);\n\nCREATE INDEX idx_mfc_project_path ON mr_file_changes(project_id, new_path);\nCREATE INDEX idx_mfc_project_old_path ON mr_file_changes(project_id, old_path) WHERE old_path IS NOT NULL;\nCREATE INDEX idx_mfc_mr ON mr_file_changes(merge_request_id);\nCREATE INDEX idx_mfc_renamed ON mr_file_changes(project_id, change_type) WHERE change_type = 'renamed';\n\nINSERT INTO schema_version (version, applied_at, description)\nVALUES (16, strftime('%s', 'now') * 1000, 'MR file changes table');\n```\n\n### Step 3: Register migration 016 in db.rs\n\n```rust\n(\n \"016\",\n include_str!(\"../../migrations/016_mr_file_changes.sql\"),\n),\n```\n\nLATEST_SCHEMA_VERSION will auto-compute to 16.\n\n## Acceptance Criteria\n\n- [ ] Migration 015 registered in MIGRATIONS array in src/core/db.rs\n- [ ] Migration file exists at `migrations/016_mr_file_changes.sql`\n- [ ] `mr_file_changes` table has columns: id, merge_request_id, project_id, old_path, new_path, change_type\n- [ ] UNIQUE constraint on (merge_request_id, new_path)\n- [ ] CHECK constraint on change_type: added, modified, renamed, deleted\n- [ ] 4 indexes: project+new_path, project+old_path (partial), mr_id, project+renamed (partial)\n- [ ] Migration 016 registered in MIGRATIONS array\n- [ ] LATEST_SCHEMA_VERSION auto-computes to 16\n- [ ] `lore migrate` applies both 015 and 016 successfully on a v14 database\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/core/db.rs` (register migrations 015 AND 016 in MIGRATIONS array)\n- `migrations/016_mr_file_changes.sql` (NEW)\n\n## TDD Loop\n\nRED: `lore migrate` on v14 database says \"already up to date\" (015 not registered)\n\nGREEN: Register 015 in db.rs, create 016 file, register 016 in db.rs. `lore migrate` applies both.\n\nVERIFY:\n```bash\ncargo check --all-targets\nlore --robot migrate\nsqlite3 ~/.local/share/lore/lore.db '.schema mr_file_changes'\nsqlite3 ~/.local/share/lore/lore.db \"SELECT version FROM schema_version ORDER BY version DESC LIMIT 1\"\n```\n\n## Edge Cases\n\n- Databases already at v15 via manual migration: 015 will be skipped, only 016 applied\n- old_path is NULL for added files, populated for renamed/deleted\n- No lines_added/lines_removed columns (spec does not require them; removed to match spec exactly)\n- Partial indexes only index relevant rows for rename chain BFS performance\n","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:08.837816Z","created_by":"tayloreernisse","updated_at":"2026-02-05T21:40:46.766136Z","closed_at":"2026-02-05T21:40:46.766074Z","close_reason":"Completed: registered migration 015 in db.rs MIGRATIONS array, created migration 016 (mr_file_changes table with 4 indexes, CHECK constraint, UNIQUE constraint), registered 016 in db.rs. LATEST_SCHEMA_VERSION auto-computes to 16. cargo check, clippy, and fmt all pass.","compaction_level":0,"original_size":0,"labels":["gate-4","phase-b","schema"],"dependencies":[{"issue_id":"bd-1oo","depends_on_id":"bd-14q","type":"parent-child","created_at":"2026-02-02T21:34:08.843541Z","created_by":"tayloreernisse"},{"issue_id":"bd-1oo","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-02T21:34:16.505965Z","created_by":"tayloreernisse"}]} {"id":"bd-1oyf","title":"NOTE-1D: robot-docs integration for notes command","description":"## Background\nAdd the notes command to the robot-docs manifest so agents can discover it. Also forward-prep SearchArgs --type to accept \"note\"/\"notes\" (duplicates work in NOTE-2F but is safe to do early).\n\n## Approach\n1. Robot-docs manifest is in src/main.rs, function handle_robot_docs() starting at line 2087. The commands JSON is built at line 2090 with serde_json::json!. Add a \"notes\" entry following the pattern of \"issues\" (line 2107 area) and \"mrs\" entries:\n\n \"notes\": {\n \"description\": \"List notes from discussions with rich filtering\",\n \"flags\": [\"--limit/-n \", \"--author/-a \", \"--note-type \", \"--contains \", \"--for-issue \", \"--for-mr \", \"-p/--project \", \"--since \", \"--until \", \"--path \", \"--resolution \", \"--sort \", \"--asc\", \"--include-system\", \"--note-id \", \"--gitlab-note-id \", \"--discussion-id \", \"--format \", \"--fields \", \"--open\"],\n \"robot_flags\": [\"--format json\", \"--fields minimal\"],\n \"example\": \"lore --robot notes --author jdefting --since 1y --format json --fields minimal\",\n \"response_schema\": {\n \"ok\": \"bool\",\n \"data\": {\"notes\": \"[NoteListRowJson]\", \"total_count\": \"int\", \"showing\": \"int\"},\n \"meta\": {\"elapsed_ms\": \"int\"}\n }\n }\n\n2. Update SearchArgs.source_type value_parser in src/cli/mod.rs (line 560) to include \"note\":\n value_parser = [\"issue\", \"mr\", \"discussion\", \"note\"]\n (This is also done in NOTE-2F but is safe to do in either order — value_parser is additive)\n\n3. Add \"notes\" to the command list in handle_robot_docs (line 662 area where command names are listed).\n\n## Files\n- MODIFY: src/main.rs (add notes to robot-docs commands JSON at line 2090 area, add to command list at line 662)\n- MODIFY: src/cli/mod.rs (add \"note\" to SearchArgs source_type value_parser at line 560)\n\n## TDD Anchor\nSmoke test: cargo run -- --robot robot-docs | jq '.data.commands.notes' should return the notes command entry.\nVERIFY: cargo test -- --nocapture (no dedicated test needed — robot-docs is a static JSON generator)\n\n## Acceptance Criteria\n- [ ] lore robot-docs output includes notes command with all flags\n- [ ] notes command has response_schema, example, and robot_flags\n- [ ] SearchArgs accepts --type note\n- [ ] All existing tests still pass\n\n## Dependency Context\n- Depends on NOTE-1A (bd-20p9), NOTE-1B (bd-3iod), NOTE-1C (bd-25hb): command must be fully wired before documenting (the manifest should describe actual working behavior)\n\n## Edge Cases\n- robot-docs --brief mode: notes command should still appear in brief output\n- Value parser order doesn't matter — \"note\" can be added at any position in the array","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T17:01:04.191582Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.359505Z","closed_at":"2026-02-12T18:13:15.359457Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["cli","per-note","search"]} @@ -80,15 +83,20 @@ {"id":"bd-1soz","title":"Add half_life_decay() pure function","description":"## Background\nThe decay function is the mathematical core of the scoring model. It must be correct, tested first (TDD RED), and verified independently of any DB or SQL changes.\n\n## Approach\nAdd to who.rs as a private function near the top of the module (before query_expert):\n\n```rust\n/// Exponential half-life decay: R = 2^(-t/h)\n/// Returns 1.0 at elapsed=0, 0.5 at elapsed=half_life, 0.0 if half_life=0.\nfn half_life_decay(elapsed_ms: i64, half_life_days: u32) -> f64 {\n let days = (elapsed_ms as f64 / 86_400_000.0).max(0.0);\n let hl = f64::from(half_life_days);\n if hl <= 0.0 { return 0.0; }\n 2.0_f64.powf(-days / hl)\n}\n```\n\n## TDD Loop\n\n### RED (write first):\n```rust\n#[test]\nfn test_half_life_decay_math() {\n let hl_180 = 180;\n // At t=0, full retention\n assert!((half_life_decay(0, hl_180) - 1.0).abs() < f64::EPSILON);\n // At t=half_life, exactly 0.5\n let one_hl_ms = 180 * 86_400_000_i64;\n assert!((half_life_decay(one_hl_ms, hl_180) - 0.5).abs() < 1e-10);\n // At t=2*half_life, exactly 0.25\n assert!((half_life_decay(2 * one_hl_ms, hl_180) - 0.25).abs() < 1e-10);\n // Negative elapsed clamped to 0 -> 1.0\n assert!((half_life_decay(-1000, hl_180) - 1.0).abs() < f64::EPSILON);\n // Zero half-life -> 0.0 (div-by-zero guard)\n assert!((half_life_decay(86_400_000, 0)).abs() < f64::EPSILON);\n}\n\n#[test]\nfn test_score_monotonicity_by_age() {\n // For any half-life, older timestamps must never produce higher decay than newer ones.\n // Use deterministic LCG PRNG (no rand dependency).\n let mut seed: u64 = 42;\n let hl = 90_u32;\n for _ in 0..50 {\n seed = seed.wrapping_mul(6364136223846793005).wrapping_add(1);\n let newer_ms = (seed % 100_000_000) as i64; // 0-100M ms (~1.15 days max)\n seed = seed.wrapping_mul(6364136223846793005).wrapping_add(1);\n let older_ms = newer_ms + (seed % 500_000_000) as i64; // always >= newer\n assert!(\n half_life_decay(older_ms, hl) <= half_life_decay(newer_ms, hl),\n \"Monotonicity violated: decay({older_ms}) > decay({newer_ms})\"\n );\n }\n}\n```\n\n### GREEN: Add the half_life_decay function (3 lines of math).\n### VERIFY: `cargo test -p lore -- test_half_life_decay_math test_score_monotonicity`\n\n## Acceptance Criteria\n- [ ] test_half_life_decay_math passes (4 boundary cases + div-by-zero guard)\n- [ ] test_score_monotonicity_by_age passes (50 random pairs, deterministic seed)\n- [ ] Function is `fn` not `pub fn` (module-private)\n- [ ] No DB dependency — pure function\n\n## Files\n- src/cli/commands/who.rs (function near top, tests in test module)\n\n## Edge Cases\n- Negative elapsed_ms: clamped to 0 via .max(0.0) -> returns 1.0\n- half_life_days = 0: returns 0.0, not NaN/Inf\n- Very large elapsed (10 years): returns very small positive f64, never negative","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T16:59:22.913281Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:43:04.404986Z","closed_at":"2026-02-12T20:43:04.404933Z","close_reason":"Implemented by time-decay swarm: 3 agents, 12 tasks, 621 tests passing, all quality gates green","compaction_level":0,"original_size":0,"labels":["scoring"]} {"id":"bd-1t4","title":"Epic: CP2 Gate C - Dependent Discussion Sync","description":"## Background\nGate C validates the dependent discussion sync with DiffNote position capture. This is critical for code review context preservation - without DiffNote positions, we lose the file/line context for review comments.\n\n## Acceptance Criteria (Pass/Fail)\n- [ ] Discussions fetched for MRs with updated_at > discussions_synced_for_updated_at\n- [ ] `SELECT COUNT(*) FROM discussions WHERE merge_request_id IS NOT NULL` > 0\n- [ ] DiffNotes have `position_new_path` populated (file path)\n- [ ] DiffNotes have `position_new_line` populated (line number)\n- [ ] DiffNotes have `position_type` populated (text/image/file)\n- [ ] DiffNotes have SHA triplet: `position_base_sha`, `position_start_sha`, `position_head_sha`\n- [ ] Multi-line DiffNotes have `position_line_range_start` and `position_line_range_end`\n- [ ] Unchanged MRs skip discussion refetch (watermark comparison works)\n- [ ] Watermark NOT advanced on HTTP error mid-pagination\n- [ ] Watermark NOT advanced on note timestamp parse failure\n- [ ] `gi show mr ` displays DiffNote with file context `[path:line]`\n\n## Validation Script\n```bash\n#!/bin/bash\nset -e\n\nDB_PATH=\"${XDG_DATA_HOME:-$HOME/.local/share}/gitlab-inbox/db.sqlite3\"\n\necho \"=== Gate C: Dependent Discussion Sync ===\"\n\n# 1. Check discussion count for MRs\necho \"Step 1: Check MR discussion count...\"\nMR_DISC_COUNT=$(sqlite3 \"$DB_PATH\" \"SELECT COUNT(*) FROM discussions WHERE merge_request_id IS NOT NULL;\")\necho \" MR discussions: $MR_DISC_COUNT\"\n[ \"$MR_DISC_COUNT\" -gt 0 ] || { echo \"FAIL: No MR discussions found\"; exit 1; }\n\n# 2. Check note count\necho \"Step 2: Check note count...\"\nNOTE_COUNT=$(sqlite3 \"$DB_PATH\" \"\n SELECT COUNT(*) FROM notes n\n JOIN discussions d ON d.id = n.discussion_id\n WHERE d.merge_request_id IS NOT NULL;\n\")\necho \" MR notes: $NOTE_COUNT\"\n\n# 3. Check DiffNote position data\necho \"Step 3: Check DiffNote positions...\"\nDIFFNOTE_COUNT=$(sqlite3 \"$DB_PATH\" \"SELECT COUNT(*) FROM notes WHERE position_new_path IS NOT NULL;\")\necho \" DiffNotes with position: $DIFFNOTE_COUNT\"\n\n# 4. Sample DiffNote data\necho \"Step 4: Sample DiffNote data...\"\nsqlite3 \"$DB_PATH\" \"\n SELECT \n n.gitlab_id,\n n.position_new_path,\n n.position_new_line,\n n.position_type,\n SUBSTR(n.position_head_sha, 1, 7) as head_sha\n FROM notes n\n WHERE n.position_new_path IS NOT NULL\n LIMIT 5;\n\"\n\n# 5. Check multi-line DiffNotes\necho \"Step 5: Check multi-line DiffNotes...\"\nMULTILINE_COUNT=$(sqlite3 \"$DB_PATH\" \"\n SELECT COUNT(*) FROM notes \n WHERE position_line_range_start IS NOT NULL \n AND position_line_range_end IS NOT NULL\n AND position_line_range_start != position_line_range_end;\n\")\necho \" Multi-line DiffNotes: $MULTILINE_COUNT\"\n\n# 6. Check watermarks set\necho \"Step 6: Check watermarks...\"\nWATERMARKED=$(sqlite3 \"$DB_PATH\" \"\n SELECT COUNT(*) FROM merge_requests \n WHERE discussions_synced_for_updated_at IS NOT NULL;\n\")\necho \" MRs with watermark set: $WATERMARKED\"\n\n# 7. Check last_seen_at for sweep pattern\necho \"Step 7: Check last_seen_at (sweep pattern)...\"\nsqlite3 \"$DB_PATH\" \"\n SELECT \n MIN(last_seen_at) as oldest,\n MAX(last_seen_at) as newest\n FROM discussions \n WHERE merge_request_id IS NOT NULL;\n\"\n\n# 8. Test show command with DiffNote\necho \"Step 8: Find MR with DiffNotes for show test...\"\nMR_IID=$(sqlite3 \"$DB_PATH\" \"\n SELECT DISTINCT m.iid\n FROM merge_requests m\n JOIN discussions d ON d.merge_request_id = m.id\n JOIN notes n ON n.discussion_id = d.id\n WHERE n.position_new_path IS NOT NULL\n LIMIT 1;\n\")\nif [ -n \"$MR_IID\" ]; then\n echo \" Testing: gi show mr $MR_IID\"\n gi show mr \"$MR_IID\" | head -50\nfi\n\n# 9. Re-run and verify skip count\necho \"Step 9: Re-run ingest (should skip unchanged MRs)...\"\ngi ingest --type=merge_requests\n# Should report \"Skipped discussion sync for N unchanged MRs\"\n\necho \"\"\necho \"=== Gate C: PASSED ===\"\n```\n\n## Atomicity Test (Manual - Kill Test)\n```bash\n# This tests that partial failure preserves data\n\n# 1. Get an MR with discussions\nMR_ID=$(sqlite3 \"$DB_PATH\" \"\n SELECT m.id FROM merge_requests m\n JOIN discussions d ON d.merge_request_id = m.id\n LIMIT 1;\n\")\n\n# 2. Note current note count\nBEFORE=$(sqlite3 \"$DB_PATH\" \"\n SELECT COUNT(*) FROM notes n\n JOIN discussions d ON d.id = n.discussion_id\n WHERE d.merge_request_id = $MR_ID;\n\")\necho \"Notes before: $BEFORE\"\n\n# 3. Note watermark\nWATERMARK_BEFORE=$(sqlite3 \"$DB_PATH\" \"\n SELECT discussions_synced_for_updated_at FROM merge_requests WHERE id = $MR_ID;\n\")\necho \"Watermark before: $WATERMARK_BEFORE\"\n\n# 4. Force full sync and kill mid-run\ngi ingest --type=merge_requests --full &\nPID=$!\nsleep 3 && kill -9 $PID 2>/dev/null || true\nwait $PID 2>/dev/null || true\n\n# 5. Verify notes preserved (should be same or more, never less)\nAFTER=$(sqlite3 \"$DB_PATH\" \"\n SELECT COUNT(*) FROM notes n\n JOIN discussions d ON d.id = n.discussion_id\n WHERE d.merge_request_id = $MR_ID;\n\")\necho \"Notes after kill: $AFTER\"\n[ \"$AFTER\" -ge \"$BEFORE\" ] || echo \"WARNING: Notes decreased - atomicity may be broken\"\n\n# 6. Note watermark should NOT have advanced if killed mid-pagination\nWATERMARK_AFTER=$(sqlite3 \"$DB_PATH\" \"\n SELECT discussions_synced_for_updated_at FROM merge_requests WHERE id = $MR_ID;\n\")\necho \"Watermark after: $WATERMARK_AFTER\"\n```\n\n## Test Commands (Quick Verification)\n```bash\n# Check DiffNote data:\nsqlite3 ~/.local/share/gitlab-inbox/db.sqlite3 \"\n SELECT \n (SELECT COUNT(*) FROM discussions WHERE merge_request_id IS NOT NULL) as mr_discussions,\n (SELECT COUNT(*) FROM notes WHERE position_new_path IS NOT NULL) as diffnotes,\n (SELECT COUNT(*) FROM merge_requests WHERE discussions_synced_for_updated_at IS NOT NULL) as watermarked;\n\"\n\n# Find MR with DiffNotes and show it:\ngi show mr $(sqlite3 ~/.local/share/gitlab-inbox/db.sqlite3 \"\n SELECT DISTINCT m.iid FROM merge_requests m\n JOIN discussions d ON d.merge_request_id = m.id\n JOIN notes n ON n.discussion_id = d.id\n WHERE n.position_new_path IS NOT NULL LIMIT 1;\n\")\n```\n\n## Dependencies\nThis gate requires:\n- bd-3j6 (Discussion transformer with DiffNote position extraction)\n- bd-20h (MR discussion ingestion with atomicity guarantees)\n- bd-iba (Client pagination for MR discussions)\n- Gates A and B must pass first\n\n## Edge Cases\n- MRs without discussions: should sync successfully, just with 0 discussions\n- Discussions without DiffNotes: regular comments have NULL position fields\n- Deleted discussions in GitLab: sweep pattern should remove them locally\n- Invalid note timestamps: should NOT advance watermark, should log warning","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-26T22:06:01.769694Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:48:21.060017Z","closed_at":"2026-01-27T00:48:21.059974Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1t4","depends_on_id":"bd-20h","type":"blocks","created_at":"2026-01-26T22:08:55.778989Z","created_by":"tayloreernisse"}]} {"id":"bd-1ta","title":"[CP1] Integration tests for pagination","description":"Integration tests for GitLab pagination with wiremock.\n\n## Tests (tests/pagination_tests.rs)\n\n### Page Navigation\n- fetches_all_pages_when_multiple_exist\n- respects_per_page_parameter\n- follows_x_next_page_header_until_empty\n- falls_back_to_empty_page_stop_if_headers_missing\n\n### Cursor Behavior\n- applies_cursor_rewind_for_tuple_semantics\n- clamps_negative_rewind_to_zero\n\n## Test Setup\n- Use wiremock::MockServer\n- Set up handlers for /api/v4/projects/:id/issues\n- Return x-next-page headers\n- Verify request params (updated_after, per_page)\n\nFiles: tests/pagination_tests.rs\nDone when: All pagination tests pass with mocked server","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:59:07.806593Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:02.038945Z","deleted_at":"2026-01-25T17:02:02.038939Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} +{"id":"bd-1tv8","title":"Epic: lore me — Personal Work Dashboard","description":"## Background\n`lore me` is the self-service command that answers: \"what needs my attention right now?\" for the configured GitLab user. It must work without local per-item state and without manual triage steps by deriving attention directly from synced GitLab data.\n\nThis epic exists to deliver a complete personal dashboard in both human and robot mode with deterministic behavior across section filtering, project scoping, sorting, and error handling.\n\nThe command is intentionally separate from `lore who`:\n- `lore who` = inspect other people or team-level activity.\n- `lore me` = high-signal personal dashboard with attention scoring.\n\n## Approach\nImplement the command in vertical slices, then wire them together in a single handler:\n\n1. Input and identity resolution:\n- Add optional `gitlab.username` config field.\n- Resolve username with strict precedence: `--user` > `config.gitlab.username` > usage error (exit 2).\n- Add `me` command args and section flags (`--issues`, `--mrs`, `--activity`) with combinable behavior.\n\n2. Scope and filtering:\n- Resolve project scope from `--project`, `--all`, and `config.default_project`.\n- Treat `--project` + `--all` as a usage error.\n- Parse `--since` (default `30d`) for activity feed cutoff only.\n\n3. Data model and queries:\n- Introduce typed dashboard structs (`MeDashboard`, per-section item structs, summary struct).\n- Implement open issues / authored MRs / reviewing MRs queries using current-association semantics.\n- Compute attention state from non-system notes with deterministic priority ordering.\n- Build activity feed from 5 sources (notes, state events, label events, milestone events, assignment system-note patterns) and merge via `UNION ALL` sorted by timestamp desc.\n\n4. Rendering:\n- Human mode: summary + legend + section renderers with existing theme/icons behavior and single-project path suppression.\n- Robot mode: `{ok,data,meta}` envelope with stable field names and `--fields minimal` support.\n\n5. Integration and errors:\n- Wire a single `handle_me(...)` flow in CLI dispatch.\n- Enforce required errors and exit codes (no username, no synced data, invalid flag combos, invalid since).\n- Keep empty-result dashboards non-error with explicit zero counts.\n\n## Acceptance Criteria\n- [ ] `lore me` command is available with flags: `--issues`, `--mrs`, `--activity`, `--since`, `--project`, `--all`, `--user`, `--fields`.\n- [ ] Username precedence is exact: CLI flag > config field > exit 2 usage error.\n- [ ] `--project` and `--all` together return usage error (exit 2).\n- [ ] Default scope uses `config.default_project` when present; otherwise all synced projects.\n- [ ] Work item sections include only open items and current association definitions:\n - Issues assigned to me.\n - MRs authored by me.\n - MRs where I am a reviewer.\n- [ ] Activity feed includes the 5 required sources and remains restricted to currently associated items.\n- [ ] Attention state is computed from non-system notes with stable priority order and no local cursor/high-water state.\n- [ ] Work items are sorted by attention priority then updated time descending.\n- [ ] Activity is sorted newest-first and includes own-action marker (`is_own` / `(you)`).\n- [ ] Human output includes summary counts, attention legend, event badges, and single-project path suppression.\n- [ ] Robot output uses `{ok,data,meta}` and supports `--fields minimal` preset for me-specific lists.\n- [ ] Empty data renders valid zero-state dashboard (not an error).\n- [ ] No synced data returns exit 17 with actionable suggestion.\n- [ ] All me-related logic is covered by tests and passes project quality gates.\n\n## Files\nConfirmed existing integration points:\n- MODIFY: `src/main.rs` (CLI args + dispatch)\n- MODIFY: `src/cli/commands/mod.rs` (module exports)\n- MODIFY: `src/core/config.rs` (`gitlab.username`)\n- MODIFY: `src/core/error.rs` (usage-path mapping)\n- MODIFY: `src/cli/robot.rs` (me field preset + field filtering integration)\n\nExpected new command surface:\n- CREATE: `src/cli/commands/me/mod.rs`\n\nLikely supporting files (if split by concern instead of single-file module):\n- CREATE: `src/cli/commands/me/types.rs`\n- CREATE: `src/cli/commands/me/queries.rs`\n- CREATE: `src/cli/commands/me/render_human.rs`\n- CREATE: `src/cli/commands/me/render_robot.rs`\n\n## TDD Anchor\nRED:\n- Add tests for username resolution precedence and missing-username error.\n- Add tests for scope resolution (`--project`, `--all`, default project, conflict).\n- Add query tests for each section and attention-state computation edge ordering.\n- Add activity-feed tests validating all 5 sources and timestamp sort order.\n- Add renderer tests for zero-state, icon tier fallback, and project-path suppression.\n- Add robot tests for envelope shape and `--fields minimal` behavior.\n\nGREEN:\n- Implement minimal behavior to satisfy one failing test class at a time in this order:\n 1) identity/scope,\n 2) data structs,\n 3) section queries,\n 4) attention,\n 5) activity feed,\n 6) human renderer,\n 7) robot renderer,\n 8) handler wiring + errors.\n\nVERIFY:\n- `cargo check --all-targets`\n- `cargo clippy --all-targets -- -D warnings`\n- `cargo fmt --check`\n- `cargo test`\n\n## Edge Cases\n- User has synced projects but zero matching issues/MRs/activity: must render valid dashboard with zeros.\n- `--since` affects only activity feed; it must not filter issue/MR sections.\n- Stale vs not-started distinction: items with zero non-system notes are `not_started`, not `stale`.\n- MR `not_ready` applies only when `draft=1` and reviewer count is zero.\n- Activity should still include closed items when they are currently associated with the user.\n\n## Dependency Context\nThis epic is implemented through the following child beads; each one is required because it provides a concrete subsystem consumed by the handler:\n\n- `bd-qpk3`: adds `gitlab.username` config source consumed by username resolution.\n- `bd-1f1f`: username resolver and usage-error path consumed by command entrypoint.\n- `bd-utt4`: CLI argument struct + subcommand registration consumed by dispatch.\n- `bd-a7ba`: project scope resolver consumed by section query filters.\n- `bd-1vai`: `AttentionState` enum + ordering consumed by sorting and serialization.\n- `bd-3bwh`: dashboard data structs consumed by queries and renderers.\n- `bd-joja`: open-issues query feeding issues section.\n- `bd-1obt`: authored-MR query feeding authored section.\n- `bd-1fgr`: reviewing-MR query feeding reviewing section.\n- `bd-1xuf`: attention SQL enrichment feeding all work-item sections.\n- `bd-b3r3`: core activity-source queries (notes/state/label/milestone).\n- `bd-2nl3`: assignment/review-request extraction from system notes (5th activity source).\n- `bd-2tl5`: `UNION ALL` activity assembly consumed by dashboard activity section.\n- `bd-1vxq`: summary + attention legend renderer used at top of human output.\n- `bd-2fuw`: open-issues human renderer.\n- `bd-9cob`: authored/reviewing MRs human renderer.\n- `bd-e48d`: activity human renderer with event badges.\n- `bd-2ilv`: robot serializer/envelope for me dashboard.\n- `bd-3jiq`: me-specific `--fields minimal` projection behavior.\n- `bd-1vv8`: integration handler that orchestrates all previous slices.\n- `bd-32aw`: final error-path hardening and exit-code compliance.","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-02-19T19:34:50.419598Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:17.575066Z","closed_at":"2026-02-20T16:09:17.575014Z","close_reason":"All 21 child beads implemented by lore-me agent swarm. lore me command fully operational.","compaction_level":0,"original_size":0} {"id":"bd-1u1","title":"Implement document regenerator","description":"## Background\nThe document regenerator drains the dirty_sources queue, regenerating documents for each entry. It uses per-item transactions for crash safety, a triple-hash fast path to skip unchanged documents entirely (no writes at all), and a bounded batch loop that drains completely. Error recording includes backoff computation.\n\n## Approach\nCreate `src/documents/regenerator.rs` per PRD Section 6.3.\n\n**Core function:**\n```rust\npub fn regenerate_dirty_documents(conn: &Connection) -> Result\n```\n\n**RegenerateResult:** { regenerated, unchanged, errored }\n\n**Algorithm (per PRD):**\n1. Loop: get_dirty_sources(conn) -> Vec<(SourceType, i64)>\n2. If empty, break (queue fully drained)\n3. For each (source_type, source_id):\n a. Begin transaction\n b. Call regenerate_one_tx(&tx, source_type, source_id) -> Result\n c. If Ok(changed): clear_dirty_tx, commit, count regenerated or unchanged\n d. If Err: record_dirty_error_tx (with backoff), commit, count errored\n\n**regenerate_one_tx (per PRD):**\n1. Extract document via extract_{type}_document(conn, source_id)\n2. If None (deleted): delete_document, return Ok(true)\n3. If Some(doc): call get_existing_hash() to check current state\n4. **If ALL THREE hashes match: return Ok(false) — skip ALL writes** (fast path)\n5. Otherwise: upsert_document with conditional label/path relinking\n6. Return Ok(content changed)\n\n**Helper functions (PRD-exact):**\n\n`get_existing_hash` — uses `optional()` to distinguish missing rows from DB errors:\n```rust\nfn get_existing_hash(\n conn: &Connection,\n source_type: SourceType,\n source_id: i64,\n) -> Result> {\n use rusqlite::OptionalExtension;\n let hash: Option = stmt\n .query_row(params, |row| row.get(0))\n .optional()?; // IMPORTANT: Not .ok() — .ok() would hide real DB errors\n Ok(hash)\n}\n```\n\n`get_document_id` — resolve document ID after upsert:\n```rust\nfn get_document_id(conn: &Connection, source_type: SourceType, source_id: i64) -> Result\n```\n\n`upsert_document` — checks existing triple hash before writing:\n```rust\nfn upsert_document(conn: &Connection, doc: &DocumentData) -> Result<()> {\n // 1. Query existing (id, content_hash, labels_hash, paths_hash) via OptionalExtension\n // 2. Triple-hash fast path: all match -> return Ok(())\n // 3. Upsert document row (ON CONFLICT DO UPDATE)\n // 4. Get doc_id (from existing or query after insert)\n // 5. Only delete+reinsert labels if labels_hash changed\n // 6. Only delete+reinsert paths if paths_hash changed\n}\n```\n\n**Key PRD detail — triple-hash fast path:**\n```rust\nif old_content_hash == &doc.content_hash\n && old_labels_hash == &doc.labels_hash\n && old_paths_hash == &doc.paths_hash\n{ return Ok(()); } // Skip ALL writes — prevents WAL churn\n```\n\n**Error recording with backoff:**\nrecord_dirty_error_tx reads current attempt_count from DB, computes next_attempt_at via shared backoff utility:\n```rust\nlet next_attempt_at = crate::core::backoff::compute_next_attempt_at(now, attempt_count + 1);\n```\n\n**All internal functions use _tx suffix** (take &Transaction) for atomicity.\n\n## Acceptance Criteria\n- [ ] Queue fully drained (bounded batch loop until empty)\n- [ ] Per-item transactions (crash loses at most 1 doc)\n- [ ] Triple-hash fast path: ALL THREE hashes match -> skip ALL writes (return Ok(false))\n- [ ] Content change: upsert document, update labels/paths\n- [ ] Labels-only change: relabels but skips path writes (paths_hash unchanged)\n- [ ] Deleted entity: delete document (cascade handles FTS/labels/paths/embeddings)\n- [ ] get_existing_hash uses `.optional()` (not `.ok()`) to preserve DB errors\n- [ ] get_document_id resolves document ID after upsert\n- [ ] Error recording: increment attempt_count, compute next_attempt_at via backoff\n- [ ] FTS triggers fire on insert/update/delete (verified by trigger, not regenerator)\n- [ ] RegenerateResult counts accurate (regenerated, unchanged, errored)\n- [ ] Errors do not abort batch (log, increment, continue)\n- [ ] `cargo test regenerator` passes\n\n## Files\n- `src/documents/regenerator.rs` — new file\n- `src/documents/mod.rs` — add `pub use regenerator::regenerate_dirty_documents;`\n\n## TDD Loop\nRED: Tests requiring DB:\n- `test_creates_new_document` — dirty source -> document created\n- `test_skips_unchanged_triple_hash` — all 3 hashes match -> unchanged count incremented, no DB writes\n- `test_updates_changed_content` — content_hash mismatch -> updated\n- `test_updates_changed_labels_only` — content same but labels_hash different -> updated\n- `test_updates_changed_paths_only` — content same but paths_hash different -> updated\n- `test_deletes_missing_source` — source deleted -> document deleted\n- `test_drains_queue` — queue empty after regeneration\n- `test_error_records_backoff` — error -> attempt_count incremented, next_attempt_at set\n- `test_get_existing_hash_not_found` — returns Ok(None) for missing document\nGREEN: Implement regenerate_dirty_documents + all helpers\nVERIFY: `cargo test regenerator`\n\n## Edge Cases\n- Empty queue: return immediately with all-zero counts\n- Extractor error for one item: record_dirty_error_tx, commit, continue\n- Triple-hash prevents WAL churn on incremental syncs (most entities unchanged)\n- Labels change but content does not: labels_hash mismatch triggers upsert with label relinking\n- get_existing_hash on missing document: returns Ok(None) via .optional() (not DB error)\n- get_existing_hash on corrupt DB: propagates real DB error (not masked by .ok())","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:25:55.178825Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:41:29.942386Z","closed_at":"2026-01-30T17:41:29.942324Z","close_reason":"Implemented document regenerator with triple-hash fast path, queue draining, fail-soft error handling + 5 tests","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1u1","depends_on_id":"bd-1yz","type":"blocks","created_at":"2026-01-30T15:29:16.020686Z","created_by":"tayloreernisse"},{"issue_id":"bd-1u1","depends_on_id":"bd-247","type":"blocks","created_at":"2026-01-30T15:29:15.982772Z","created_by":"tayloreernisse"},{"issue_id":"bd-1u1","depends_on_id":"bd-2fp","type":"blocks","created_at":"2026-01-30T15:29:16.055043Z","created_by":"tayloreernisse"}]} {"id":"bd-1uc","title":"Implement DB upsert functions for resource events","description":"## Background\nNeed to store fetched resource events into the three event tables created by migration 011. The existing DB pattern uses rusqlite prepared statements with named parameters. Timestamps from GitLab are ISO 8601 strings that need conversion to ms epoch UTC (matching the existing time.rs parse_datetime_to_ms function).\n\n## Approach\nCreate src/core/events_db.rs (new module) with three upsert functions:\n\n```rust\nuse rusqlite::Connection;\nuse super::error::Result;\n\n/// Upsert state events for an entity.\n/// Uses INSERT OR REPLACE keyed on UNIQUE(gitlab_id, project_id).\npub fn upsert_state_events(\n conn: &Connection,\n project_id: i64, // local DB project id\n entity_type: &str, // \"issue\" | \"merge_request\"\n entity_local_id: i64, // local DB id of the issue/MR\n events: &[GitLabStateEvent],\n) -> Result\n\n/// Upsert label events for an entity.\npub fn upsert_label_events(\n conn: &Connection,\n project_id: i64,\n entity_type: &str,\n entity_local_id: i64,\n events: &[GitLabLabelEvent],\n) -> Result\n\n/// Upsert milestone events for an entity.\npub fn upsert_milestone_events(\n conn: &Connection,\n project_id: i64,\n entity_type: &str,\n entity_local_id: i64,\n events: &[GitLabMilestoneEvent],\n) -> Result\n```\n\nEach function:\n1. Prepares INSERT OR REPLACE statement\n2. For each event, maps GitLab types to DB columns:\n - `actor_gitlab_id` = event.user.map(|u| u.id)\n - `actor_username` = event.user.map(|u| u.username.clone())\n - `created_at` = parse_datetime_to_ms(&event.created_at)?\n - Set issue_id or merge_request_id based on entity_type\n3. Returns count of upserted rows\n4. Wraps in a savepoint for atomicity per entity\n\nRegister module in src/core/mod.rs:\n```rust\npub mod events_db;\n```\n\n## Acceptance Criteria\n- [ ] All three upsert functions compile and handle all event fields\n- [ ] Upserts are idempotent (re-inserting same event doesn't duplicate)\n- [ ] Timestamps converted to ms epoch UTC via parse_datetime_to_ms\n- [ ] actor_gitlab_id and actor_username populated from event.user (handles None)\n- [ ] entity_type correctly maps to issue_id/merge_request_id (other is NULL)\n- [ ] source_merge_request_id populated for state events (iid from source_merge_request)\n- [ ] source_commit populated for state events\n- [ ] label_name populated for label events\n- [ ] milestone_title and milestone_id populated for milestone events\n- [ ] Returns upserted count\n\n## Files\n- src/core/events_db.rs (new)\n- src/core/mod.rs (add `pub mod events_db;`)\n\n## TDD Loop\nRED: tests/events_db_tests.rs (new):\n- `test_upsert_state_events_basic` - insert 3 events, verify count and data\n- `test_upsert_state_events_idempotent` - insert same events twice, verify no duplicates\n- `test_upsert_label_events_with_actor` - verify actor fields populated\n- `test_upsert_milestone_events_null_user` - verify user: null doesn't crash\n- `test_upsert_state_events_entity_exclusivity` - verify only one of issue_id/merge_request_id set\n\nSetup: create_test_db() helper that applies migrations 001-011, inserts a test project + issue + MR.\n\nGREEN: Implement the three functions\n\nVERIFY: `cargo test events_db -- --nocapture`\n\n## Edge Cases\n- parse_datetime_to_ms must handle GitLab's format: \"2024-03-15T10:30:00.000Z\" and \"2024-03-15T10:30:00.000+00:00\"\n- INSERT OR REPLACE will fire CASCADE deletes if there are FK references to these rows — currently no other table references event rows, so this is safe\n- entity_type must be validated (\"issue\" or \"merge_request\") — panic or error on invalid\n- source_merge_request field contains an MR ref object, not an ID — extract .iid for DB column","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:31:57.242549Z","created_by":"tayloreernisse","updated_at":"2026-02-03T16:19:14.169437Z","closed_at":"2026-02-03T16:19:14.169233Z","close_reason":"Implemented upsert_state_events, upsert_label_events, upsert_milestone_events, count_events in src/core/events_db.rs. Uses savepoints for atomicity, LoreError::Database via ? operator for clean error handling.","compaction_level":0,"original_size":0,"labels":["db","gate-1","phase-b"],"dependencies":[{"issue_id":"bd-1uc","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-02T21:31:57.246078Z","created_by":"tayloreernisse"},{"issue_id":"bd-1uc","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-02T21:31:57.247258Z","created_by":"tayloreernisse"}]} {"id":"bd-1ut","title":"[CP0] Final validation - tests, lint, typecheck","description":"## Background\n\nFinal validation ensures everything works together before marking CP0 complete. This is the integration gate - all unit tests, integration tests, lint, and type checking must pass. Manual smoke tests verify the full user experience.\n\nReference: docs/prd/checkpoint-0.md sections \"Definition of Done\", \"Manual Smoke Tests\"\n\n## Approach\n\n**Automated checks:**\n```bash\n# All tests pass\nnpm run test\n\n# TypeScript strict mode\nnpm run build # or: npx tsc --noEmit\n\n# ESLint with no errors\nnpm run lint\n```\n\n**Manual smoke tests (from PRD table):**\n\n| Command | Expected | Pass Criteria |\n|---------|----------|---------------|\n| `gi --help` | Command list | Shows all commands |\n| `gi version` | Version number | Shows installed version |\n| `gi init` | Interactive prompts | Creates valid config |\n| `gi init` (config exists) | Confirmation prompt | Warns before overwriting |\n| `gi init --force` | No prompt | Overwrites without asking |\n| `gi auth-test` | `Authenticated as @username` | Shows GitLab username |\n| `GITLAB_TOKEN=invalid gi auth-test` | Error message | Non-zero exit, clear error |\n| `gi doctor` | Status table | All required checks pass |\n| `gi doctor --json` | JSON object | Valid JSON, `success: true` |\n| `gi backup` | Backup path | Creates timestamped backup |\n| `gi sync-status` | No runs message | Stub output works |\n\n**Definition of Done gate items:**\n- [ ] `gi init` writes config to XDG path and validates projects against GitLab\n- [ ] `gi auth-test` succeeds with real PAT\n- [ ] `gi doctor` reports DB ok + GitLab ok\n- [ ] DB migrations apply; WAL + FK enabled; busy_timeout + synchronous set\n- [ ] App lock mechanism works (concurrent runs blocked)\n- [ ] All unit tests pass\n- [ ] All integration tests pass (mocked)\n- [ ] ESLint passes with no errors\n- [ ] TypeScript compiles with strict mode\n\n## Acceptance Criteria\n\n- [ ] `npm run test` exits 0 (all tests pass)\n- [ ] `npm run build` exits 0 (TypeScript compiles)\n- [ ] `npm run lint` exits 0 (no ESLint errors)\n- [ ] All 11 manual smoke tests pass\n- [ ] All 9 Definition of Done gate items verified\n\n## Files\n\nNo new files created. This bead verifies existing work.\n\n## TDD Loop\n\nThis IS the final verification step:\n\n```bash\n# Automated\nnpm run test\nnpm run build\nnpm run lint\n\n# Manual (requires GITLAB_TOKEN set with valid token)\ngi --help\ngi version\ngi init # go through setup\ngi auth-test\ngi doctor\ngi doctor --json | jq .success # should output true\ngi backup\ngi sync-status\ngi reset --confirm\ngi init # re-setup\n```\n\n## Edge Cases\n\n- Test coverage should be reasonable (aim for 80%+ on core modules)\n- Integration tests may flake on CI - check MSW setup\n- Manual tests require real GitLab token - document in README\n- ESLint may warn vs error - only errors block\n- TypeScript noImplicitAny catches missed types","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:52.078907Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:37:51.858558Z","closed_at":"2026-01-25T03:37:51.858474Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1ut","depends_on_id":"bd-1cb","type":"blocks","created_at":"2026-01-24T16:13:11.184261Z","created_by":"tayloreernisse"},{"issue_id":"bd-1ut","depends_on_id":"bd-1gu","type":"blocks","created_at":"2026-01-24T16:13:11.168637Z","created_by":"tayloreernisse"},{"issue_id":"bd-1ut","depends_on_id":"bd-1kh","type":"blocks","created_at":"2026-01-24T16:13:11.219042Z","created_by":"tayloreernisse"},{"issue_id":"bd-1ut","depends_on_id":"bd-38e","type":"blocks","created_at":"2026-01-24T16:13:11.150286Z","created_by":"tayloreernisse"},{"issue_id":"bd-1ut","depends_on_id":"bd-3kj","type":"blocks","created_at":"2026-01-24T16:13:11.200998Z","created_by":"tayloreernisse"}]} {"id":"bd-1v8","title":"Update robot-docs manifest with Phase B commands","description":"## Background\n\nThe robot-docs manifest is the agent self-discovery mechanism. It must include all Phase B commands so agents can discover temporal intelligence features.\n\n## Codebase Context\n\n- handle_robot_docs() in src/main.rs (line ~1646) returns JSON with commands, exit_codes, workflows, aliases, clap_error_codes\n- Currently 18 commands documented in the manifest\n- VALID_COMMANDS array in src/main.rs (line ~448): [\"issues\", \"mrs\", \"search\", \"sync\", \"ingest\", \"count\", \"status\", \"auth\", \"doctor\", \"version\", \"init\", \"stats\", \"generate-docs\", \"embed\", \"migrate\", \"health\", \"robot-docs\", \"completions\"]\n- Phase B adds 3 new commands: timeline, file-history, trace\n- count gains new entity: \"references\" (bd-2ez)\n- Existing workflows: first_setup, daily_sync, search, pre_flight\n\n## Approach\n\n### 1. Add commands to handle_robot_docs() JSON:\n\n```json\n\"timeline\": {\n \"description\": \"Chronological timeline of events matching a keyword query\",\n \"flags\": [\"\", \"-p \", \"--since \", \"--depth \", \"--expand-mentions\", \"-n \"],\n \"example\": \"lore --robot timeline 'authentication' --since 30d\"\n},\n\"file-history\": {\n \"description\": \"Which MRs touched a file, with rename chain resolution\",\n \"flags\": [\"\", \"-p \", \"--discussions\", \"--no-follow-renames\", \"--merged\", \"-n \"],\n \"example\": \"lore --robot file-history src/auth/oauth.rs\"\n},\n\"trace\": {\n \"description\": \"Trace file -> MR -> issue -> discussions decision chain\",\n \"flags\": [\"\", \"-p \", \"--discussions\", \"--no-follow-renames\", \"-n \"],\n \"example\": \"lore --robot trace src/auth/oauth.rs\"\n}\n```\n\n### 2. Update count command to mention \"references\" entity\n\n### 3. Add temporal_intelligence workflow:\n```json\n\"temporal_intelligence\": {\n \"description\": \"Query temporal data about project history\",\n \"steps\": [\n \"lore sync (ensure events fetched with fetchResourceEvents=true)\",\n \"lore timeline '' for chronological event history\",\n \"lore file-history for file-level MR history\",\n \"lore trace for file -> MR -> issue -> discussion chain\"\n ]\n}\n```\n\n### 4. Add timeline, file-history, trace to VALID_COMMANDS array\n\n## Acceptance Criteria\n\n- [ ] robot-docs includes timeline, file-history, trace commands\n- [ ] count references documented\n- [ ] temporal_intelligence workflow present\n- [ ] VALID_COMMANDS includes all 3 new commands\n- [ ] Examples are valid, runnable commands\n- [ ] cargo check --all-targets passes\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n\n- src/main.rs (update handle_robot_docs + VALID_COMMANDS array)\n\n## TDD Loop\n\nVERIFY: lore robot-docs | jq '.data.commands.timeline'\nVERIFY: lore robot-docs | jq '.data.workflows.temporal_intelligence'","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-02T22:43:07.859092Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:51:58.389215Z","closed_at":"2026-02-18T21:51:58.389154Z","close_reason":"Robot-docs already includes all 26 commands (timeline, file-history, trace, drift, cron, token, etc.)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1v8","depends_on_id":"bd-1ht","type":"parent-child","created_at":"2026-02-02T22:43:40.760196Z","created_by":"tayloreernisse"},{"issue_id":"bd-1v8","depends_on_id":"bd-2n4","type":"blocks","created_at":"2026-02-02T22:43:33.937157Z","created_by":"tayloreernisse"}]} {"id":"bd-1v8t","title":"Add WorkItemStatus type and SyncConfig toggle","description":"## Background\nThe GraphQL status response returns name, category, color, and iconName fields. We need a Rust struct that deserializes this directly. Category is stored as raw Option (not an enum) because GitLab 18.5+ supports custom statuses with arbitrary category values. We also need a config toggle so users can disable status enrichment.\n\n## Approach\nAdd WorkItemStatus to the existing types module. Add fetch_work_item_status to the existing SyncConfig with default_true() helper. Also add WorkItemStatus to pub use re-exports in src/gitlab/mod.rs.\n\n## Files\n- src/gitlab/types.rs (add struct after GitLabMergeRequest, before #[cfg(test)])\n- src/core/config.rs (add field to SyncConfig struct + Default impl)\n- src/gitlab/mod.rs (add WorkItemStatus to pub use)\n\n## Implementation\n\nIn src/gitlab/types.rs (needs Serialize, Deserialize derives already in scope):\n #[derive(Debug, Clone, Serialize, Deserialize)]\n pub struct WorkItemStatus {\n pub name: String,\n pub category: Option,\n pub color: Option,\n #[serde(rename = \"iconName\")]\n pub icon_name: Option,\n }\n\nIn src/core/config.rs SyncConfig struct (after fetch_mr_file_changes):\n #[serde(rename = \"fetchWorkItemStatus\", default = \"default_true\")]\n pub fetch_work_item_status: bool,\n\nIn impl Default for SyncConfig (after fetch_mr_file_changes: true):\n fetch_work_item_status: true,\n\n## Acceptance Criteria\n- [ ] WorkItemStatus deserializes: {\"name\":\"In progress\",\"category\":\"IN_PROGRESS\",\"color\":\"#1f75cb\",\"iconName\":\"status-in-progress\"}\n- [ ] Optional fields: {\"name\":\"To do\"} -> category/color/icon_name are None\n- [ ] Unknown category: {\"name\":\"Custom\",\"category\":\"SOME_FUTURE_VALUE\"} -> Ok\n- [ ] Null category: {\"name\":\"In progress\",\"category\":null} -> None\n- [ ] SyncConfig::default().fetch_work_item_status == true\n- [ ] JSON without fetchWorkItemStatus key -> defaults true\n- [ ] cargo check --all-targets passes\n\n## TDD Loop\nRED: test_work_item_status_deserialize, test_work_item_status_optional_fields, test_work_item_status_unknown_category, test_work_item_status_null_category, test_config_fetch_work_item_status_default_true, test_config_deserialize_without_key\nGREEN: Add struct + config field\nVERIFY: cargo test test_work_item_status && cargo test test_config\n\n## Edge Cases\n- serde rename \"iconName\" -> icon_name (camelCase in GraphQL)\n- Category is Option, NOT an enum\n- Config key is camelCase \"fetchWorkItemStatus\" matching existing convention","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:41:42.790001Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.416990Z","closed_at":"2026-02-11T07:21:33.416950Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1v8t","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-11T06:41:42.791014Z","created_by":"tayloreernisse"}]} {"id":"bd-1v9m","title":"Implement AppState composition + LoadState + ScreenIntent","description":"## Background\nAppState is the top-level state composition — each field corresponds to one screen. State is preserved when navigating away (never cleared on pop). LoadState enables stale-while-revalidate: screens show last data during refresh with a spinner. ScreenIntent is the pure return type from state handlers — they never launch async tasks directly.\n\n## Approach\nCreate crates/lore-tui/src/state/mod.rs:\n- AppState struct: dashboard (DashboardState), issue_list (IssueListState), issue_detail (IssueDetailState), mr_list (MrListState), mr_detail (MrDetailState), search (SearchState), timeline (TimelineState), who (WhoState), sync (SyncState), command_palette (CommandPaletteState), global_scope (ScopeContext), load_state (ScreenLoadStateMap), error_toast (Option), show_help (bool), terminal_size ((u16, u16))\n- LoadState enum: Idle, LoadingInitial, Refreshing, Error(String)\n- ScreenLoadStateMap: wraps HashMap, get()/set()/any_loading()\n- AppState methods: set_loading(), set_error(), clear_error(), has_text_focus(), blur_text_focus(), delegate_text_event(), interpret_screen_key(), handle_screen_msg()\n- ScreenIntent enum: None, Navigate(Screen), RequeryNeeded(Screen)\n- handle_screen_msg() matches Msg variants and returns ScreenIntent (NEVER Cmd::task)\n\nCreate stub per-screen state files (just Default-derivable structs):\n- state/dashboard.rs, issue_list.rs, issue_detail.rs, mr_list.rs, mr_detail.rs, search.rs, timeline.rs, who.rs, sync.rs, command_palette.rs\n\n## Acceptance Criteria\n- [ ] AppState derives Default and compiles with all screen state fields\n- [ ] LoadState has Idle, LoadingInitial, Refreshing, Error variants\n- [ ] ScreenLoadStateMap::get() returns Idle for untracked screens\n- [ ] ScreenLoadStateMap::any_loading() returns true when any screen is loading\n- [ ] has_text_focus() checks all filter/query focused flags\n- [ ] blur_text_focus() resets all focus flags\n- [ ] handle_screen_msg() returns ScreenIntent, never Cmd::task\n- [ ] ScreenIntent::RequeryNeeded signals that LoreApp should dispatch supervised query\n\n## Files\n- CREATE: crates/lore-tui/src/state/mod.rs\n- CREATE: crates/lore-tui/src/state/dashboard.rs (stub)\n- CREATE: crates/lore-tui/src/state/issue_list.rs (stub)\n- CREATE: crates/lore-tui/src/state/issue_detail.rs (stub)\n- CREATE: crates/lore-tui/src/state/mr_list.rs (stub)\n- CREATE: crates/lore-tui/src/state/mr_detail.rs (stub)\n- CREATE: crates/lore-tui/src/state/search.rs (stub)\n- CREATE: crates/lore-tui/src/state/timeline.rs (stub)\n- CREATE: crates/lore-tui/src/state/who.rs (stub)\n- CREATE: crates/lore-tui/src/state/sync.rs (stub)\n- CREATE: crates/lore-tui/src/state/command_palette.rs (stub)\n\n## TDD Anchor\nRED: Write test_load_state_default_idle that creates ScreenLoadStateMap, asserts get(&Screen::Dashboard) returns Idle.\nGREEN: Implement ScreenLoadStateMap with HashMap defaulting to Idle.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_load_state\n\n## Edge Cases\n- LoadState::set() removes Idle entries from the map to prevent unbounded growth\n- Screen::IssueDetail(key) comparison for HashMap: requires Screen to impl Hash+Eq or use ScreenKind discriminant\n- has_text_focus() must be kept in sync as new screens add text inputs","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T16:56:42.023482Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:25.732861Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1v9m","depends_on_id":"bd-2tr4","type":"blocks","created_at":"2026-02-12T18:11:25.732834Z","created_by":"tayloreernisse"},{"issue_id":"bd-1v9m","depends_on_id":"bd-c9gk","type":"blocks","created_at":"2026-02-12T17:09:39.276847Z","created_by":"tayloreernisse"}]} +{"id":"bd-1vai","title":"Define AttentionState enum with sort ordering","description":"## Background\nThe attention state model (AC-4) classifies each work item into one of 5 states based on comment activity. These states have a defined sort priority (AC-9.2). The enum is used across queries, rendering, and JSON serialization. NotReady is MR-only and has no attention icon (just draft indicator).\n\n## Approach\nCreate `src/cli/commands/me/types.rs`:\n```rust\nuse serde::Serialize;\n\n#[derive(Debug, Clone, Copy, PartialEq, Eq, Serialize)]\n#[serde(rename_all = \"snake_case\")]\npub enum AttentionState {\n NeedsAttention, // others commented after me (or I haven't engaged)\n NotStarted, // zero non-system notes from anyone\n AwaitingResponse, // my latest >= others' latest\n Stale, // had activity, but latest note > 30 days old\n NotReady, // MR-only: draft=1 AND no reviewers\n}\n\nimpl AttentionState {\n /// Sort priority — lower = higher urgency (shows first in dashboard)\n #[must_use]\n pub const fn sort_priority(self) -> u8 {\n match self {\n Self::NeedsAttention => 0,\n Self::NotStarted => 1,\n Self::AwaitingResponse => 2,\n Self::Stale => 3,\n Self::NotReady => 4,\n }\n }\n}\n\nimpl Ord for AttentionState {\n fn cmp(&self, other: &Self) -> std::cmp::Ordering {\n self.sort_priority().cmp(&other.sort_priority())\n }\n}\n\nimpl PartialOrd for AttentionState {\n fn partial_cmp(&self, other: &Self) -> Option {\n Some(self.cmp(other))\n }\n}\n\nimpl std::fmt::Display for AttentionState {\n fn fmt(&self, f: &mut std::fmt::Formatter<'_>) -> std::fmt::Result {\n match self {\n Self::NeedsAttention => write!(f, \"needs_attention\"),\n Self::NotStarted => write!(f, \"not_started\"),\n Self::AwaitingResponse => write!(f, \"awaiting_response\"),\n Self::Stale => write!(f, \"stale\"),\n Self::NotReady => write!(f, \"not_ready\"),\n }\n }\n}\n\nimpl AttentionState {\n pub fn from_sql_str(s: &str) -> Self {\n match s {\n \"needs_attention\" => Self::NeedsAttention,\n \"not_started\" => Self::NotStarted,\n \"awaiting_response\" => Self::AwaitingResponse,\n \"stale\" => Self::Stale,\n \"not_ready\" => Self::NotReady,\n _ => Self::NotStarted, // fallback\n }\n }\n}\n```\n\n## Acceptance Criteria\n- [ ] 5 variants: NeedsAttention, NotStarted, AwaitingResponse, Stale, NotReady\n- [ ] Serializes to snake_case: \"needs_attention\", \"not_started\", etc. (AC-7.3)\n- [ ] Implements Ord: NeedsAttention < NotStarted < AwaitingResponse < Stale < NotReady (AC-9.2)\n- [ ] sort_priority() is const fn returning u8\n- [ ] Display shows snake_case name\n- [ ] from_sql_str() parses SQL CASE output strings back to enum\n- [ ] Derives: Debug, Clone, Copy, PartialEq, Eq, Serialize\n\n## Files\n- CREATE: src/cli/commands/me/types.rs\n- MODIFY: src/cli/commands/me/mod.rs (add `pub mod types;`)\n\n## TDD Anchor\nRED: Write `test_attention_state_sort_order`:\n```rust\nlet mut states = [AttentionState::NotReady, AttentionState::Stale,\n AttentionState::NeedsAttention, AttentionState::AwaitingResponse,\n AttentionState::NotStarted];\nstates.sort();\nassert_eq!(states[0], AttentionState::NeedsAttention);\nassert_eq!(states[4], AttentionState::NotReady);\n```\nGREEN: Implement the enum with Ord.\nVERIFY: `cargo test attention_state`\n\nAdditional tests:\n- test_attention_state_serializes_snake_case (serde_json::to_string → \"\\\"needs_attention\\\"\")\n- test_attention_state_from_sql_str (round-trip each variant)\n- test_attention_state_display\n\n## Edge Cases\n- Using array `states.sort()` (not `vec![]`) avoids clippy pedantic lint\n- `from_sql_str` has a fallback to NotStarted for unknown strings — safe default\n- NotReady is MR-only but the enum doesn't enforce this — enforcement at query level (bd-1xuf)\n\n## Dependency Context\nNo upstream dependencies beyond the me/ module existing (bd-utt4).\nConsumed by all query beads and rendering beads.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:36:07.010461Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.048789Z","closed_at":"2026-02-20T16:09:13.048750Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1vai","depends_on_id":"bd-utt4","type":"blocks","created_at":"2026-02-19T19:41:08.513929Z","created_by":"tayloreernisse"}]} {"id":"bd-1vti","title":"Write decay and scoring example-based tests (TDD)","description":"## Background\nAll implementation beads (bd-1soz through bd-11mg) include their own inline TDD tests. This bead is the integration verification: run the full test suite and confirm everything works together with no regressions.\n\n## Approach\nRun cargo test and verify:\n1. All NEW tests pass (31 tests across implementation beads)\n2. All EXISTING tests pass unchanged (existing who tests, config tests, etc.)\n3. No test interference (--test-threads=1 mode)\n4. All tests in who.rs test module compile and run cleanly\n\nTest count by bead:\n- bd-1soz: 2 (test_half_life_decay_math, test_score_monotonicity_by_age)\n- bd-2w1p: 3 (test_config_validation_rejects_zero_half_life, _absurd_half_life, _nan_multiplier)\n- bd-18dn: 2 (test_path_normalization_handles_dot_and_double_slash, _preserves_prefix_semantics)\n- bd-1hoq: 1 (test_expert_sql_returns_expected_signal_rows)\n- bd-1h3f: 2 (test_old_path_probe_exact_and_prefix, test_suffix_probe_uses_old_path_sources)\n- bd-13q8: 13 (decay integration + invariant tests)\n- bd-11mg: 8 (CLI flag tests: explain_score, as_of, excluded_usernames, etc.)\nTotal: 2+3+2+1+2+13+8 = 31 new tests\n\nThis is NOT a code-writing bead — it is a verification checkpoint.\n\n## Acceptance Criteria\n- [ ] cargo test -p lore passes (all tests green)\n- [ ] cargo test -p lore -- --test-threads=1 passes (no test interference)\n- [ ] No existing test assertions were changed (only callsite signatures updated in bd-13q8 and ScoringConfig literals in bd-1b50)\n- [ ] Total test count: existing + 31 new = all pass\n\n## TDD Loop\nN/A — this bead verifies, does not write code.\nVERIFY: cargo test -p lore\n\n## Files\nNone modified — read-only verification.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T17:00:29.453420Z","created_by":"tayloreernisse","updated_at":"2026-02-12T20:43:04.414775Z","closed_at":"2026-02-12T20:43:04.414735Z","close_reason":"Implemented by time-decay swarm: 3 agents, 12 tasks, 621 tests passing, all quality gates green","compaction_level":0,"original_size":0,"labels":["scoring","test"],"dependencies":[{"issue_id":"bd-1vti","depends_on_id":"bd-11mg","type":"blocks","created_at":"2026-02-09T17:01:11.458083Z","created_by":"tayloreernisse"},{"issue_id":"bd-1vti","depends_on_id":"bd-18dn","type":"blocks","created_at":"2026-02-12T19:34:52.168390Z","created_by":"tayloreernisse"},{"issue_id":"bd-1vti","depends_on_id":"bd-1b50","type":"blocks","created_at":"2026-02-09T17:16:54.911778Z","created_by":"tayloreernisse"},{"issue_id":"bd-1vti","depends_on_id":"bd-1h3f","type":"blocks","created_at":"2026-02-09T17:01:11.505050Z","created_by":"tayloreernisse"},{"issue_id":"bd-1vti","depends_on_id":"bd-1soz","type":"blocks","created_at":"2026-02-09T17:16:54.816724Z","created_by":"tayloreernisse"},{"issue_id":"bd-1vti","depends_on_id":"bd-2w1p","type":"blocks","created_at":"2026-02-09T17:16:54.864235Z","created_by":"tayloreernisse"},{"issue_id":"bd-1vti","depends_on_id":"bd-2yu5","type":"blocks","created_at":"2026-02-09T17:01:11.409428Z","created_by":"tayloreernisse"}]} +{"id":"bd-1vv8","title":"Implement me command handler: wire queries to renderers","description":"## Background\nThis is the integration bead for `lore me`: resolve inputs, fetch data, enrich attention, build dashboard DTO, and dispatch to human or robot rendering. It should mirror the command-handler style used by `handle_who`, but with me-specific section filtering and output shaping.\n\n## Approach\nImplement `handle_me(config_override, args, robot_mode)` in `src/cli/commands/me/mod.rs` with deterministic flow:\n\n1. **Bootstrap**\n- load config\n- resolve username (`bd-1f1f`)\n- open DB connection\n- validate synced data exists (`SELECT COUNT(*) FROM projects` > 0)\n\n2. **Scope + temporal inputs**\n- resolve `ProjectScope` (`bd-a7ba`)\n- parse `--since` via `parse_since(&args.since)` into `since_ms`\n- compute `since_iso` from `since_ms` using existing ms->ISO helper\n\n3. **Section selection**\n- no section flags => all sections\n- `--issues`, `--mrs`, `--activity` combinable\n- `--since` affects activity query only\n\n4. **Fetch + enrich**\n- fetch selected sections (`bd-joja`, `bd-1obt`, `bd-1fgr`, `bd-2tl5`)\n- run attention enrichment on work-item collections (`bd-1xuf`)\n- sort each work-item list by attention then `updated_at_iso` desc\n\n5. **Assemble dashboard DTO**\n- compute summary counts from full selected data\n- include `username`, `since_iso`, `summary`, and all four arrays (empty when not selected)\n\n6. **Render/emit**\n- robot: call me robot renderer with `elapsed_ms` and optional `args.fields`\n- human: render summary first, then selected sections in order: issues, authored MRs, reviewing MRs, activity\n\n## Acceptance Criteria\n- [ ] No section flags => full dashboard\n- [ ] `--issues` => only issues section\n- [ ] `--mrs` => both MR sections (authored + reviewing)\n- [ ] `--activity` => only activity section\n- [ ] Section flags combine without conflict (`--issues --mrs` excludes activity)\n- [ ] `--since` is parsed once and only used by activity query\n- [ ] Invalid `--since` returns usage error (exit 2 path)\n- [ ] Work-item sorting is attention-priority then recency within state\n- [ ] Summary `needs_attention_count` aggregates across issues + authored + reviewing lists\n- [ ] Empty result sets still produce valid dashboard with zero counts\n- [ ] Robot output path receives `args.fields` for `--fields minimal` behavior\n- [ ] Human output suppresses project path lines when scope is single-project\n\n## Files\n- MODIFY: `src/cli/commands/me/mod.rs`\n\n## TDD Anchor\nRED:\n- `test_handler_section_filter_matrix`\n- `test_handler_since_only_filters_activity`\n- `test_handler_passes_fields_to_robot_renderer`\n- `test_handler_empty_results_zero_summary`\n- `test_handler_sort_order_attention_then_updated`\n\nGREEN:\n- Implement handler flow end-to-end with minimal branching.\n\nVERIFY:\n- `cargo test handle_me`\n- `cargo test me_handler`\n\n## Edge Cases\n- If only one section is selected, non-selected arrays should still serialize as `[]` in robot output.\n- `since_iso` must reflect parsed cutoff timestamp, not current time.\n- `single_project` detection should come from resolved scope, not from item count.\n\n## Dependency Context\nThis bead consumes all me slice outputs and is blocked by:\n- `bd-3jiq`, `bd-e48d`, `bd-9cob`, `bd-2fuw`, `bd-2tl5`, `bd-1xuf`.\nIts output/error behavior is finalized by `bd-32aw`.\n\nDependencies:\n -> bd-3jiq (blocks) - Implement --fields minimal preset for me robot mode\n -> bd-e48d (blocks) - Render activity feed with event badges in human mode\n -> bd-9cob (blocks) - Render MR sections (authored + reviewing) in human mode\n -> bd-2fuw (blocks) - Render open issues section in human mode\n -> bd-2tl5 (blocks) - Implement activity feed UNION ALL assembly\n -> bd-1xuf (blocks) - Implement attention state SQL computation\n\nDependents:\n <- bd-32aw (blocks) - Implement error handling paths for me command","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:40:42.375637Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.065642Z","closed_at":"2026-02-20T16:09:13.065606Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1vv8","depends_on_id":"bd-1xuf","type":"blocks","created_at":"2026-02-19T19:41:28.339630Z","created_by":"tayloreernisse"},{"issue_id":"bd-1vv8","depends_on_id":"bd-2fuw","type":"blocks","created_at":"2026-02-19T19:41:28.498817Z","created_by":"tayloreernisse"},{"issue_id":"bd-1vv8","depends_on_id":"bd-2tl5","type":"blocks","created_at":"2026-02-19T19:41:28.419850Z","created_by":"tayloreernisse"},{"issue_id":"bd-1vv8","depends_on_id":"bd-3jiq","type":"blocks","created_at":"2026-02-19T19:41:28.731942Z","created_by":"tayloreernisse"},{"issue_id":"bd-1vv8","depends_on_id":"bd-9cob","type":"blocks","created_at":"2026-02-19T19:41:28.577238Z","created_by":"tayloreernisse"},{"issue_id":"bd-1vv8","depends_on_id":"bd-e48d","type":"blocks","created_at":"2026-02-19T19:41:28.653603Z","created_by":"tayloreernisse"}]} +{"id":"bd-1vxq","title":"Render summary header and attention legend","description":"## Background\nThe dashboard starts with a summary header and attention legend (AC-5.5, AC-6.1, AC-6.2). Attention icons go in the existing `Icons` struct in `src/cli/render.rs` (user decision). The `not_ready` state uses no attention icon — just the draft indicator.\n\n## Approach\n\n### 1. Add attention icon methods to Icons struct\nIn `src/cli/render.rs`, add to the `Icons` impl block (around line 91-231):\n```rust\npub fn attention_needs(&self) -> &'static str {\n match self.mode {\n GlyphMode::Nerd => \"\\u{f0f3}\", // nf-fa-bell\n GlyphMode::Unicode => \"\\u{25c6}\", // ◆ (black diamond)\n GlyphMode::Ascii => \"[!]\",\n }\n}\npub fn attention_not_started(&self) -> &'static str {\n match self.mode {\n GlyphMode::Nerd => \"\\u{f005}\", // nf-fa-star\n GlyphMode::Unicode => \"\\u{2605}\", // ★ (black star)\n GlyphMode::Ascii => \"[*]\",\n }\n}\npub fn attention_awaiting(&self) -> &'static str {\n match self.mode {\n GlyphMode::Nerd => \"\\u{f017}\", // nf-fa-clock_o\n GlyphMode::Unicode => \"\\u{25f7}\", // ◷ (white circle with upper right quadrant)\n GlyphMode::Ascii => \"[~]\",\n }\n}\npub fn attention_stale(&self) -> &'static str {\n match self.mode {\n GlyphMode::Nerd => \"\\u{f54c}\", // nf-mdi-skull\n GlyphMode::Unicode => \"\\u{2620}\", // ☠ (skull and crossbones)\n GlyphMode::Ascii => \"[x]\",\n }\n}\n```\n\n### 2. Summary header renderer\nCreate `src/cli/commands/me/render_human.rs`:\n```rust\nuse crate::cli::render::{section_divider, format_relative_time, Theme, Icons, LoreRenderer};\nuse super::types::{MeDashboard, AttentionState};\n\npub fn render_summary(dashboard: &MeDashboard, single_project: bool) -> String {\n let mut out = String::new();\n // Header\n let title = if single_project {\n format!(\"My Dashboard\")\n } else {\n format!(\"My Dashboard ({} projects)\", dashboard.summary.project_count)\n };\n out.push_str(§ion_divider(&title));\n // Counts line\n out.push_str(&format!(\"\\n {} issues · {} authored MRs · {} reviewing MRs\",\n dashboard.summary.open_issues,\n dashboard.summary.authored_mrs,\n dashboard.summary.reviewing_mrs));\n if dashboard.summary.needs_attention_count > 0 {\n out.push_str(&format!(\" · {} need attention\",\n Theme::warning().render(dashboard.summary.needs_attention_count)));\n }\n // Legend\n let icons = LoreRenderer::icons();\n out.push_str(\"\\n\\n \");\n out.push_str(&format!(\"{} Needs attention \", Theme::warning().render(icons.attention_needs())));\n out.push_str(&format!(\"{} Not started \", Theme::info().render(icons.attention_not_started())));\n out.push_str(&format!(\"{} Awaiting response \", Theme::dim().render(icons.attention_awaiting())));\n out.push_str(&format!(\"{} Stale\", Theme::dim().render(icons.attention_stale())));\n out\n}\n```\n\n### 3. Attention icon helper\nShared function for rendering an attention icon by state:\n```rust\npub fn attention_icon(state: &AttentionState) -> String {\n let icons = LoreRenderer::icons();\n match state {\n AttentionState::NeedsAttention => Theme::warning().render(icons.attention_needs()),\n AttentionState::NotStarted => Theme::info().render(icons.attention_not_started()),\n AttentionState::AwaitingResponse => Theme::dim().render(icons.attention_awaiting()),\n AttentionState::Stale => Theme::dim().render(icons.attention_stale()),\n AttentionState::NotReady => String::new(), // no icon — draft indicator suffices\n }\n}\n```\n\n## Acceptance Criteria\n- [ ] Four new methods on Icons: attention_needs, attention_not_started, attention_awaiting, attention_stale\n- [ ] Nerd Font icons: bell, star, clock, skull (AC-6.2)\n- [ ] Unicode fallback: diamond, star, clock, skull+crossbones (AC-6.2)\n- [ ] ASCII fallback: [!], [*], [~], [x] (AC-6.2)\n- [ ] Summary header shows project count, issue count, authored MRs, reviewing MRs (AC-5.5)\n- [ ] needs_attention count highlighted in amber/warning (AC-5.5)\n- [ ] Attention legend shows all 4 icons with labels and correct colors\n- [ ] NotReady renders no icon (empty string) — draft indicator only\n- [ ] Uses existing `section_divider()` for header\n- [ ] Uses existing `Theme::warning()`, `Theme::info()`, `Theme::dim()` for colors\n\n## Files\n- MODIFY: src/cli/render.rs (add 4 attention icon methods to Icons impl)\n- CREATE: src/cli/commands/me/render_human.rs (render_summary + attention_icon helper)\n- MODIFY: src/cli/commands/me/mod.rs (add `pub mod render_human;`)\n\n## TDD Anchor\nRED: Write `test_attention_icon_needs_attention_not_empty` that calls `attention_icon(&AttentionState::NeedsAttention)` and asserts result is non-empty.\nGREEN: Implement the icon methods and helper.\nVERIFY: `cargo test attention_icon`\n\nAdditional tests:\n- test_attention_icon_not_ready_is_empty\n- test_summary_contains_counts (create dashboard with known counts, verify output)\n\n## Edge Cases\n- Icons struct accessed via `LoreRenderer::icons()` — must be initialized first (in tests, call `LoreRenderer::init()`)\n- The render functions return String (not print directly) — enables testing\n- Pad attention icons to consistent width? Nerd icons are typically 1-2 chars, ASCII is 3 chars — may need padding for alignment\n\n## Dependency Context\nUses `MeDashboard`, `MeSummary`, `AttentionState` from bd-3bwh.\nModifies `Icons` struct in `src/cli/render.rs` (existing file).\nPattern: `section_divider()` at render.rs:600, `Theme` at render.rs:291.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:38:51.330729Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.058934Z","closed_at":"2026-02-20T16:09:13.058888Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1vxq","depends_on_id":"bd-3bwh","type":"blocks","created_at":"2026-02-19T19:41:27.834945Z","created_by":"tayloreernisse"}]} {"id":"bd-1wa2","title":"Design Actionable Insights panel (heuristic queries TBD)","description":"## Background\nThe PRD specifies an Actionable Insights panel on the Dashboard that surfaces heuristic signals: stale P1 issues, blocked MRs awaiting review, velocity spikes/dips, and assignee workload imbalance. This requires heuristic query functions that do NOT currently exist in the lore codebase.\n\nSince the TUI work is purely UI built on existing code, the Actionable Insights panel is deferred to a later phase when the heuristic queries are implemented. This bead tracks the design and eventual implementation.\n\n## Approach\nWhen ready to implement:\n1. Define InsightKind enum: StaleHighPriority, BlockedMR, VelocityAnomaly, WorkloadImbalance\n2. Define Insight struct: kind, severity (Info/Warning/Critical), title, description, entity_refs (Vec)\n3. Implement heuristic query functions in lore core (NOT in TUI crate)\n4. Wire insights into DashboardState as Optional>\n5. Render as a scrollable panel with severity-colored icons\n\n## Acceptance Criteria\n- [ ] InsightKind and Insight types defined\n- [ ] At least 2 heuristic queries implemented (stale P1, blocked MR)\n- [ ] Dashboard renders insights panel when data available\n- [ ] Insights panel is scrollable with j/k\n- [ ] Enter on insight navigates to related entity\n- [ ] Empty insights shows \"No insights\" or hides panel entirely\n\n## Status\nBLOCKED: Requires heuristic query functions that don't exist yet. This is NOT a TUI-only task — it requires backend query work first.\n\n## Files\n- CREATE: src/core/insights.rs (heuristic query functions — in main crate, not TUI)\n- MODIFY: crates/lore-tui/src/state/dashboard.rs (add insights field)\n- MODIFY: crates/lore-tui/src/view/dashboard.rs (add insights panel)\n\n## Edge Cases\n- Insights depend on data freshness: stale DB = stale insights. Show \"last updated\" timestamp.\n- Heuristic thresholds should be configurable (e.g., \"stale\" = P1 untouched for 7 days)\n- Large number of insights: cap at 20, show \"N more...\" link","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-12T18:08:15.172539Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:50.980246Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-1wa2","depends_on_id":"bd-35g5","type":"blocks","created_at":"2026-02-12T18:11:50.980054Z","created_by":"tayloreernisse"}]} {"id":"bd-1x6","title":"Implement lore sync CLI command","description":"## Background\nThe sync command is the unified orchestrator for the full pipeline: ingest -> generate-docs -> embed. It replaces the need to run three separate commands. It acquires a lock, runs each stage sequentially, and reports combined results. Individual stages can be skipped via flags (--no-embed, --no-docs). The command is designed for cron/scheduled execution. Individual commands (`lore generate-docs`, `lore embed`) still exist for manual recovery and debugging.\n\n## Approach\nCreate `src/cli/commands/sync.rs` per PRD Section 6.4.\n\n**IMPORTANT: run_sync is async** (embed_documents and search_hybrid are async).\n\n**Key types (PRD-exact):**\n```rust\n#[derive(Debug, Serialize)]\npub struct SyncResult {\n pub issues_updated: usize,\n pub mrs_updated: usize,\n pub discussions_fetched: usize,\n pub documents_regenerated: usize,\n pub documents_embedded: usize,\n}\n\n#[derive(Debug, Default)]\npub struct SyncOptions {\n pub full: bool, // Reset cursors, fetch everything\n pub force: bool, // Override stale lock\n pub no_embed: bool, // Skip embedding step\n pub no_docs: bool, // Skip document regeneration\n}\n```\n\n**Core function (async, PRD-exact):**\n```rust\npub async fn run_sync(config: &Config, options: SyncOptions) -> Result\n```\n\n**Pipeline (sequential steps per PRD):**\n1. Acquire app lock with heartbeat (via existing `src/core/lock.rs`)\n2. Ingest delta: fetch issues + MRs via cursor-based sync (calls existing ingestion orchestrator)\n - Each upserted entity marked dirty via `mark_dirty_tx(&tx)` inside ingestion transaction\n3. Process `pending_discussion_fetches` queue (bounded)\n - Discussion sweep uses CTE to capture stale IDs, then cascading deletes\n4. Regenerate documents from `dirty_sources` queue (unless --no-docs)\n5. Embed documents with changed content_hash (unless --no-embed; skipped gracefully if Ollama unavailable)\n6. Release lock, record sync_run\n\n**NOTE (PRD):** Rolling backfill window removed — the existing cursor + watermark design handles old issues with resumed activity. GitLab updates `updated_at` when new comments are added, so the cursor naturally picks up old issues that receive new activity.\n\n**CLI args (PRD-exact):**\n```rust\n#[derive(Args)]\npub struct SyncArgs {\n /// Reset cursors, fetch everything\n #[arg(long)]\n full: bool,\n /// Override stale lock\n #[arg(long)]\n force: bool,\n /// Skip embedding step\n #[arg(long)]\n no_embed: bool,\n /// Skip document regeneration\n #[arg(long)]\n no_docs: bool,\n}\n```\n\n**Human output:**\n```\nSync complete:\n Issues updated: 42\n MRs updated: 18\n Discussions fetched: 56\n Documents regenerated: 38\n Documents embedded: 38\n Elapsed: 2m 15s\n```\n\n**JSON output:**\n```json\n{\"ok\": true, \"data\": {...}, \"meta\": {\"elapsed_ms\": 135000}}\n```\n\n## Acceptance Criteria\n- [ ] Function is `async fn run_sync`\n- [ ] Takes `SyncOptions` struct (not separate params)\n- [ ] Returns `SyncResult` with flat fields (not nested sub-structs)\n- [ ] Full pipeline orchestrated: ingest -> discussion queue -> docs -> embed\n- [ ] --full resets cursors (passes through to ingest)\n- [ ] --force overrides stale sync lock\n- [ ] --no-embed skips embedding stage (Ollama not needed)\n- [ ] --no-docs skips document regeneration stage\n- [ ] Discussion queue processing bounded per run\n- [ ] Dirty sources marked inside ingestion transactions (via mark_dirty_tx)\n- [ ] Progress reporting: stage names + elapsed time\n- [ ] Lock acquired with heartbeat at start, released at end (even on error)\n- [ ] Embedding skipped gracefully if Ollama unavailable (warning, not error)\n- [ ] JSON summary in robot mode\n- [ ] Human-readable summary with elapsed time\n- [ ] `cargo build` succeeds\n\n## Files\n- `src/cli/commands/sync.rs` — new file\n- `src/cli/commands/mod.rs` — add `pub mod sync;`\n- `src/cli/mod.rs` — add SyncArgs, wire up sync subcommand\n- `src/main.rs` — add sync command handler (async dispatch)\n\n## TDD Loop\nRED: Integration test requiring full pipeline\nGREEN: Implement run_sync orchestration (async)\nVERIFY: `cargo build && cargo test sync`\n\n## Edge Cases\n- Ollama unavailable + --no-embed not set: sync should NOT fail — embed stage logs warning, returns 0 embedded\n- Lock already held: error unless --force (and lock is stale)\n- No dirty sources after ingest: regeneration stage returns 0 (not error)\n- --full with large dataset: keyset pagination prevents OFFSET degradation","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:27:09.577782Z","created_by":"tayloreernisse","updated_at":"2026-01-30T18:05:34.676100Z","closed_at":"2026-01-30T18:05:34.676035Z","close_reason":"Sync CLI: async run_sync orchestrator with 4-stage pipeline (ingest issues, ingest MRs, generate-docs, embed), SyncOptions/SyncResult, --full/--force/--no-embed/--no-docs flags, graceful Ollama degradation, human+JSON output, clean build, all tests pass","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1x6","depends_on_id":"bd-1i2","type":"blocks","created_at":"2026-01-30T15:29:35.287132Z","created_by":"tayloreernisse"},{"issue_id":"bd-1x6","depends_on_id":"bd-1je","type":"blocks","created_at":"2026-01-30T15:29:35.250622Z","created_by":"tayloreernisse"},{"issue_id":"bd-1x6","depends_on_id":"bd-2sx","type":"blocks","created_at":"2026-01-30T15:29:35.179059Z","created_by":"tayloreernisse"},{"issue_id":"bd-1x6","depends_on_id":"bd-38q","type":"blocks","created_at":"2026-01-30T15:29:35.213566Z","created_by":"tayloreernisse"},{"issue_id":"bd-1x6","depends_on_id":"bd-3qs","type":"blocks","created_at":"2026-01-30T15:29:35.144296Z","created_by":"tayloreernisse"}]} +{"id":"bd-1xuf","title":"Implement attention state SQL computation","description":"## Background\nThe attention state model (AC-4) is the core intelligence of the dashboard. It classifies each work item based on comment activity: who commented last, and when. The computation uses CTEs and a CASE expression with 5 branches, evaluated in priority order.\n\n## Approach\nCreate `src/cli/commands/me/attention.rs`:\n```rust\nuse rusqlite::Connection;\nuse crate::core::error::Result;\nuse super::types::{AttentionState, MeIssue, MeMrAuthored, MeMrReviewing};\n\npub fn enrich_attention_states(\n conn: &Connection,\n username: &str,\n issues: &mut [MeIssue],\n authored_mrs: &mut [MeMrAuthored],\n reviewing_mrs: &mut [MeMrReviewing],\n) -> Result<()>\n```\n\nThis runs AFTER the raw items are fetched. For each work item type, execute a single SQL query that computes the attention state for all items at once, then update the `attention_state` field on each struct.\n\n### Schema context for the SQL\n\n**notes** (`src/core/db.rs` migration 002): `id, gitlab_id, discussion_id (FK→discussions), project_id, note_type, is_system (INTEGER 0/1), author_username, body, created_at (ms epoch), updated_at, ...`\n\n**discussions** (migration 002): `id, gitlab_discussion_id, project_id, issue_id (FK→issues, nullable), merge_request_id (nullable), noteable_type ('Issue'|'MergeRequest'), ...`\n- CHECK constraint: exactly one of `issue_id`/`merge_request_id` is non-NULL based on `noteable_type`\n\n**mr_reviewers** (migration 006): `merge_request_id (FK), username TEXT, PRIMARY KEY(merge_request_id, username)`\n\n**merge_requests** (migration 006): `draft INTEGER NOT NULL DEFAULT 0` (0/1 boolean)\n\n### Attention SQL pattern\n\nFor issues, the query computes per-issue attention state:\n```sql\nWITH my_latest AS (\n SELECT d.issue_id,\n MAX(n.created_at) AS ts\n FROM notes n\n JOIN discussions d ON n.discussion_id = d.id\n WHERE n.is_system = 0\n AND n.author_username = ?username\n AND d.issue_id IS NOT NULL\n GROUP BY d.issue_id\n),\nothers_latest AS (\n SELECT d.issue_id,\n MAX(n.created_at) AS ts\n FROM notes n\n JOIN discussions d ON n.discussion_id = d.id\n WHERE n.is_system = 0\n AND n.author_username != ?username\n AND d.issue_id IS NOT NULL\n GROUP BY d.issue_id\n),\nany_latest AS (\n SELECT d.issue_id,\n MAX(n.created_at) AS ts\n FROM notes n\n JOIN discussions d ON n.discussion_id = d.id\n WHERE n.is_system = 0\n AND d.issue_id IS NOT NULL\n GROUP BY d.issue_id\n)\nSELECT i.id,\n CASE\n -- 2. needs_attention: others commented AND (I haven't OR others' latest > mine)\n WHEN ol.ts IS NOT NULL AND (ml.ts IS NULL OR ol.ts > ml.ts) THEN 'needs_attention'\n -- 3. stale: any notes exist AND latest > 30 days old\n WHEN al.ts IS NOT NULL AND al.ts < (?now_ms - 30 * 86400 * 1000) THEN 'stale'\n -- 4. awaiting_response: my latest >= others' latest\n WHEN ml.ts IS NOT NULL AND (ol.ts IS NULL OR ml.ts >= ol.ts) THEN 'awaiting_response'\n -- 5. not_started: zero non-system notes\n ELSE 'not_started'\n END AS attention\nFROM issues i\nLEFT JOIN my_latest ml ON ml.issue_id = i.id\nLEFT JOIN others_latest ol ON ol.issue_id = i.id\nLEFT JOIN any_latest al ON al.issue_id = i.id\nWHERE i.id IN (...)\n```\n\nFor MRs, prepend a `not_ready` check before the CASE:\n```sql\nWHEN mr.draft = 1 AND NOT EXISTS (\n SELECT 1 FROM mr_reviewers mrr WHERE mrr.merge_request_id = mr.id\n) THEN 'not_ready'\n```\n\n### Implementation strategy\n\n1. Collect all internal DB `id` values from the fetched issue/MR vecs\n2. Run the CTE query with those IDs as parameters (use `rusqlite::params_from_iter` or IN-list)\n3. Build a `HashMap` from results\n4. Iterate over the mutable slices, setting `attention_state` from the map\n5. Default to `NotStarted` for any ID not found (shouldn't happen but safe fallback)\n\nUse `AttentionState::from_sql_str(s)` (from bd-1vai) to convert the SQL CASE string to the enum.\n\nNote: The fetched structs (`MeIssue`, `MeMrAuthored`, `MeMrReviewing`) will need an internal `id: i64` field (the DB primary key) for this lookup. This field should NOT serialize to JSON (use `#[serde(skip)]`). Ensure bd-3bwh includes this.\n\n## Acceptance Criteria\n- [ ] not_ready: MR with draft=1 AND zero mr_reviewers → NotReady (AC-4.4.1)\n- [ ] not_ready: MR with draft=1 BUT has reviewers → falls through to other states\n- [ ] needs_attention: others commented, I haven't → NeedsAttention (AC-4.4.2)\n- [ ] needs_attention: others' latest > my latest → NeedsAttention\n- [ ] stale: notes exist but latest is >30 days old → Stale (AC-4.4.3)\n- [ ] stale: zero notes → NOT stale (falls to not_started)\n- [ ] not_started: zero non-system notes from anyone → NotStarted (AC-4.4.4)\n- [ ] awaiting_response: my latest >= others' latest → AwaitingResponse (AC-4.4.5)\n- [ ] awaiting_response: only my notes, no others → AwaitingResponse\n- [ ] Applied to issues, authored MRs, and reviewing MRs (AC-4.5)\n- [ ] Only considers non-system notes (is_system = 0) (AC-4.2)\n- [ ] Uses AttentionState::from_sql_str() for enum conversion\n\n## Files\n- CREATE: src/cli/commands/me/attention.rs\n- MODIFY: src/cli/commands/me/mod.rs (add `pub mod attention;`)\n\n## TDD Anchor\nRED: Write `test_needs_attention_when_others_commented_after_me` using in-memory DB (`create_connection(Path::new(\":memory:\"))` + `run_migrations(&conn)`). Insert a project, issue, issue_assignee, discussion (noteable_type='Issue', issue_id set), two notes: mine at created_at=100000, theirs at created_at=200000. Call `enrich_attention_states`. Assert issue's attention_state = NeedsAttention.\n\nGREEN: Implement the SQL CTE query.\nVERIFY: `cargo test attention`\n\nAdditional tests:\n- test_needs_attention_others_commented_i_havent\n- test_awaiting_response_i_commented_last\n- test_awaiting_response_only_my_notes\n- test_not_started_zero_notes\n- test_stale_old_notes (set any_latest.ts to > 30 days ago using a fixed \"now\" param)\n- test_stale_not_applied_to_zero_notes\n- test_not_ready_draft_no_reviewers (insert MR with draft=1, no mr_reviewers rows)\n- test_not_ready_draft_with_reviewers_falls_through\n- test_system_notes_excluded (insert is_system=1 note, verify not counted)\n\n## Edge Cases\n- notes join through discussions: `notes.discussion_id → discussions.id`, then `discussions.issue_id / discussions.merge_request_id`\n- \"30 days\" stale threshold: use current time (ms) minus `30 * 86_400 * 1_000` ms. Pass `now_ms` as a parameter for testability (don't call `SystemTime::now()` inside SQL).\n- An item with ONLY system notes → zero non-system notes → `not_started`\n- NULL handling: LEFT JOINs mean `ml.ts`, `ol.ts`, `al.ts` can all be NULL — the CASE handles this with IS NOT NULL / IS NULL checks\n- The `id IN (...)` list may be empty (no issues fetched) — skip the query entirely for empty slices\n\n## Dependency Context\nUses `AttentionState` enum and its `from_sql_str()` from bd-1vai (same `types.rs` file).\nUses `MeIssue`, `MeMrAuthored`, `MeMrReviewing` structs from bd-3bwh — these need an internal `id: i64` field with `#[serde(skip)]`.\nRuns after the fetch queries from bd-joja (issues), bd-1obt (authored MRs), bd-1fgr (reviewing MRs).\nCalled by the handler in bd-1vv8.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:37:34.521556Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.054002Z","closed_at":"2026-02-20T16:09:13.053951Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-1xuf","depends_on_id":"bd-1fgr","type":"blocks","created_at":"2026-02-19T19:41:17.966110Z","created_by":"tayloreernisse"},{"issue_id":"bd-1xuf","depends_on_id":"bd-1obt","type":"blocks","created_at":"2026-02-19T19:41:17.883403Z","created_by":"tayloreernisse"},{"issue_id":"bd-1xuf","depends_on_id":"bd-joja","type":"blocks","created_at":"2026-02-19T19:41:17.805459Z","created_by":"tayloreernisse"}]} {"id":"bd-1y7q","title":"Write invariant tests for ranking system","description":"## Background\nInvariant tests catch subtle ranking regressions that example-based tests miss. These test properties that must hold for ANY input, not specific values.\n\n## Approach\n\n### test_score_monotonicity_by_age:\nGenerate 50 random (age_ms, half_life_days) pairs using a simple LCG PRNG (deterministic seed for reproducibility). Assert decay(older) <= decay(newer) for all pairs where older > newer. Tests the pure half_life_decay() function only.\n\n### test_row_order_independence:\nInsert the same 5 signals in two orderings (forward and reverse). Run query_expert on both -> assert identical username ordering and identical scores (f64 bit-equal). Use a deterministic dataset with varied timestamps.\n\n### test_reviewer_split_is_exhaustive:\nSet up 3 reviewers on the same MR:\n1. Reviewer with substantive DiffNotes (>= 20 chars) -> must appear in participated ONLY\n2. Reviewer with no DiffNotes -> must appear in assigned-only ONLY\n3. Reviewer with trivial note (< 20 chars) -> must appear in assigned-only ONLY\nUse --explain-score to verify each reviewer's components: participated reviewer has reviewer_participated > 0 and reviewer_assigned == 0; others have reviewer_assigned > 0 and reviewer_participated == 0.\n\n### test_deterministic_accumulation_order:\nInsert signals for one user with 15 MRs at varied timestamps. Run query_expert 100 times in a loop. Assert all 100 runs produce the exact same f64 score (use == not approx, to verify bit-identical results from sorted accumulation).\n\n## Acceptance Criteria\n- [ ] All 4 tests pass\n- [ ] No flakiness across 10 consecutive cargo test runs\n- [ ] test_score_monotonicity covers at least 50 random pairs\n- [ ] test_deterministic_accumulation runs at least 100 iterations\n\n## Files\n- src/cli/commands/who.rs (test module)\n\n## Edge Cases\n- LCG PRNG for monotonicity test: use fixed seed, not rand crate (avoid dependency)\n- Bit-identical f64: use assert_eq!(a, b) not approx — the deterministic ordering guarantees this\n- Row order test: must insert in genuinely different orders, not just shuffled within same transaction","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-09T17:00:35.774542Z","created_by":"tayloreernisse","updated_at":"2026-02-09T17:17:18.920235Z","closed_at":"2026-02-09T17:17:18.920188Z","close_reason":"Tests distributed to implementation beads: monotonicity->bd-1soz, row_order+split+deterministic->bd-13q8","compaction_level":0,"original_size":0,"labels":["scoring","test"]} {"id":"bd-1y8","title":"Implement chunk ID encoding module","description":"## Background\nsqlite-vec uses a single integer rowid for embeddings. To store multiple chunks per document, we encode (document_id, chunk_index) into a single rowid using a multiplier. This module is shared between the embedding pipeline (encode on write) and vector search (decode on read). The encoding scheme supports up to 1000 chunks per document.\n\n## Approach\nCreate `src/embedding/chunk_ids.rs`:\n\n```rust\n/// Multiplier for encoding (document_id, chunk_index) into a single rowid.\n/// Supports up to 1000 chunks per document (32M chars at 32k/chunk).\npub const CHUNK_ROWID_MULTIPLIER: i64 = 1000;\n\n/// Encode (document_id, chunk_index) into a sqlite-vec rowid.\n///\n/// rowid = document_id * CHUNK_ROWID_MULTIPLIER + chunk_index\npub fn encode_rowid(document_id: i64, chunk_index: i64) -> i64 {\n document_id * CHUNK_ROWID_MULTIPLIER + chunk_index\n}\n\n/// Decode a sqlite-vec rowid back into (document_id, chunk_index).\npub fn decode_rowid(rowid: i64) -> (i64, i64) {\n let document_id = rowid / CHUNK_ROWID_MULTIPLIER;\n let chunk_index = rowid % CHUNK_ROWID_MULTIPLIER;\n (document_id, chunk_index)\n}\n```\n\nAlso create the parent module `src/embedding/mod.rs`:\n```rust\npub mod chunk_ids;\n// Later beads add: pub mod ollama; pub mod pipeline;\n```\n\nUpdate `src/lib.rs`: add `pub mod embedding;`\n\n## Acceptance Criteria\n- [ ] `encode_rowid(42, 0)` == 42000\n- [ ] `encode_rowid(42, 5)` == 42005\n- [ ] `decode_rowid(42005)` == (42, 5)\n- [ ] Roundtrip: decode(encode(doc_id, chunk_idx)) == (doc_id, chunk_idx) for all valid inputs\n- [ ] CHUNK_ROWID_MULTIPLIER is 1000\n- [ ] `cargo test chunk_ids` passes\n\n## Files\n- `src/embedding/chunk_ids.rs` — new file\n- `src/embedding/mod.rs` — new file (module root)\n- `src/lib.rs` — add `pub mod embedding;`\n\n## TDD Loop\nRED: Tests in `#[cfg(test)] mod tests`:\n- `test_encode_single_chunk` — encode(1, 0) == 1000\n- `test_encode_multi_chunk` — encode(1, 5) == 1005\n- `test_decode_roundtrip` — property test over range of doc_ids and chunk_indices\n- `test_decode_zero_chunk` — decode(42000) == (42, 0)\n- `test_multiplier_value` — assert CHUNK_ROWID_MULTIPLIER == 1000\nGREEN: Implement encode_rowid, decode_rowid\nVERIFY: `cargo test chunk_ids`\n\n## Edge Cases\n- chunk_index >= 1000: not expected (documents that large would be pathological), but no runtime panic — just incorrect decode. The embedding pipeline caps chunks well below this.\n- document_id = 0: valid (encode returns chunk_index directly)","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-30T15:26:34.060769Z","created_by":"tayloreernisse","updated_at":"2026-01-30T16:51:59.048910Z","closed_at":"2026-01-30T16:51:59.048843Z","close_reason":"Completed: chunk_ids module with encode_rowid/decode_rowid, CHUNK_ROWID_MULTIPLIER=1000, 6 tests pass","compaction_level":0,"original_size":0} {"id":"bd-1yu","title":"[CP1] GitLab types for issues, discussions, notes","description":"Add TypeScript interfaces for GitLab API responses.\n\nTypes to add to src/gitlab/types.ts:\n- GitLabIssue: id, iid, project_id, title, description, state, timestamps, author, labels[], labels_details?, web_url\n- GitLabDiscussion: id (string), individual_note, notes[]\n- GitLabNote: id, type, body, author, timestamps, system, resolvable, resolved, resolved_by, resolved_at, position?\n\nFiles: src/gitlab/types.ts\nDone when: Types compile and match GitLab API documentation","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:19:00.558718Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.153996Z","deleted_at":"2026-01-25T15:21:35.153993Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} @@ -136,9 +144,11 @@ {"id":"bd-2fm","title":"Add GitLab Resource Event serde types","description":"## Background\nNeed Rust types for deserializing GitLab Resource Events API responses. These map directly to the API JSON shape from three endpoints: resource_state_events, resource_label_events, resource_milestone_events.\n\nExisting pattern: types.rs uses #[derive(Debug, Clone, Deserialize)] with Option for nullable fields. GitLabAuthor is already defined (id, username, name). Tests in tests/gitlab_types_tests.rs use serde_json::from_str with sample payloads.\n\n## Approach\nAdd to src/gitlab/types.rs (after existing types):\n\n```rust\n/// Reference to an MR in state event's source_merge_request field\n#[derive(Debug, Clone, Deserialize, Serialize)]\npub struct GitLabMergeRequestRef {\n pub iid: i64,\n pub title: Option,\n pub web_url: Option,\n}\n\n/// Reference to a label in label event's label field\n#[derive(Debug, Clone, Deserialize, Serialize)]\npub struct GitLabLabelRef {\n pub id: i64,\n pub name: String,\n pub color: Option,\n pub description: Option,\n}\n\n/// Reference to a milestone in milestone event's milestone field\n#[derive(Debug, Clone, Deserialize, Serialize)]\npub struct GitLabMilestoneRef {\n pub id: i64,\n pub iid: i64,\n pub title: String,\n}\n\n#[derive(Debug, Clone, Deserialize, Serialize)]\npub struct GitLabStateEvent {\n pub id: i64,\n pub user: Option,\n pub created_at: String,\n pub resource_type: String, // \"Issue\" | \"MergeRequest\"\n pub resource_id: i64,\n pub state: String, // \"opened\" | \"closed\" | \"reopened\" | \"merged\" | \"locked\"\n pub source_commit: Option,\n pub source_merge_request: Option,\n}\n\n#[derive(Debug, Clone, Deserialize, Serialize)]\npub struct GitLabLabelEvent {\n pub id: i64,\n pub user: Option,\n pub created_at: String,\n pub resource_type: String,\n pub resource_id: i64,\n pub label: GitLabLabelRef,\n pub action: String, // \"add\" | \"remove\"\n}\n\n#[derive(Debug, Clone, Deserialize, Serialize)]\npub struct GitLabMilestoneEvent {\n pub id: i64,\n pub user: Option,\n pub created_at: String,\n pub resource_type: String,\n pub resource_id: i64,\n pub milestone: GitLabMilestoneRef,\n pub action: String, // \"add\" | \"remove\"\n}\n```\n\nAlso export from src/gitlab/mod.rs if needed.\n\n## Acceptance Criteria\n- [ ] All 6 types (3 events + 3 refs) compile\n- [ ] GitLabStateEvent deserializes from real GitLab API JSON (with and without source_merge_request)\n- [ ] GitLabLabelEvent deserializes with nested label object\n- [ ] GitLabMilestoneEvent deserializes with nested milestone object\n- [ ] All Optional fields handle null/missing correctly\n- [ ] Types exported from lore::gitlab::types\n\n## Files\n- src/gitlab/types.rs (add 6 new types)\n- tests/gitlab_types_tests.rs (add deserialization tests)\n\n## TDD Loop\nRED: Add to tests/gitlab_types_tests.rs:\n- `test_deserialize_state_event_closed_by_mr` - JSON with source_merge_request present\n- `test_deserialize_state_event_simple` - JSON with source_merge_request null, user null\n- `test_deserialize_label_event_add` - label add with full label object\n- `test_deserialize_label_event_remove` - label remove\n- `test_deserialize_milestone_event` - milestone add with nested milestone\nImport new types: `use lore::gitlab::types::{GitLabStateEvent, GitLabLabelEvent, GitLabMilestoneEvent, GitLabMergeRequestRef, GitLabLabelRef, GitLabMilestoneRef};`\n\nGREEN: Add the type definitions to types.rs\n\nVERIFY: `cargo test gitlab_types_tests -- --nocapture`\n\n## Edge Cases\n- GitLab sometimes returns user: null for system-generated events (e.g., auto-close on merge) — user must be Option\n- source_merge_request can be null even when state is \"closed\" (manually closed, not by MR)\n- label.color may be null for labels created via API without color\n- The resource_type field uses PascalCase (\"MergeRequest\" not \"merge_request\") — don't confuse with DB entity_type","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:31:24.081234Z","created_by":"tayloreernisse","updated_at":"2026-02-03T16:10:20.253407Z","closed_at":"2026-02-03T16:10:20.253344Z","close_reason":"Completed: Added 6 new types (GitLabMergeRequestRef, GitLabLabelRef, GitLabMilestoneRef, GitLabStateEvent, GitLabLabelEvent, GitLabMilestoneEvent) to types.rs with exports and 8 passing tests","compaction_level":0,"original_size":0,"labels":["gate-1","phase-b","types"],"dependencies":[{"issue_id":"bd-2fm","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-02T21:31:24.085809Z","created_by":"tayloreernisse"}]} {"id":"bd-2fp","title":"Implement discussion document extraction","description":"## Background\nDiscussion documents are the most complex extraction — they involve querying discussions + notes + parent entity (issue or MR) + parent labels + DiffNote file paths. The output includes a threaded conversation format with author/date prefixes per note. System notes (bot-generated) are excluded. DiffNote paths are extracted for the --path search filter.\n\n## Approach\nImplement `extract_discussion_document()` in `src/documents/extractor.rs`:\n\n```rust\n/// Extract a searchable document from a discussion thread.\n/// Returns None if the discussion or its parent has been deleted.\npub fn extract_discussion_document(conn: &Connection, discussion_id: i64) -> Result>\n```\n\n**SQL queries (from PRD Section 2.2):**\n```sql\n-- Discussion metadata\nSELECT d.id, d.noteable_type, d.issue_id, d.merge_request_id,\n p.path_with_namespace, p.id AS project_id\nFROM discussions d\nJOIN projects p ON p.id = d.project_id\nWHERE d.id = ?\n\n-- Parent entity (conditional on noteable_type)\n-- If Issue: SELECT i.iid, i.title, i.web_url FROM issues i WHERE i.id = ?\n-- If MR: SELECT m.iid, m.title, m.web_url FROM merge_requests m WHERE m.id = ?\n\n-- Parent labels (via issue_labels or mr_labels junction)\n\n-- Non-system notes in thread order\nSELECT n.author_username, n.body, n.created_at, n.gitlab_id,\n n.note_type, n.position_old_path, n.position_new_path\nFROM notes n\nWHERE n.discussion_id = ? AND n.is_system = 0\nORDER BY n.created_at ASC, n.id ASC\n```\n\n**Document format:**\n```\n[[Discussion]] Issue #234: Authentication redesign\nProject: group/project-one\nURL: https://gitlab.example.com/group/project-one/-/issues/234#note_12345\nLabels: [\"bug\", \"auth\"]\nFiles: [\"src/auth/login.ts\"]\n\n--- Thread ---\n\n@johndoe (2024-03-15):\nI think we should move to JWT-based auth...\n\n@janedoe (2024-03-15):\nAgreed. What about refresh token strategy?\n```\n\n**Implementation steps:**\n1. Query discussion row — if not found, return Ok(None)\n2. Determine parent type (Issue or MR) from noteable_type\n3. Query parent entity for iid, title, web_url — if not found, return Ok(None)\n4. Query parent labels via appropriate junction table\n5. Query non-system notes ordered by created_at ASC, id ASC\n6. Extract DiffNote paths: collect position_old_path and position_new_path, dedup\n7. Construct URL: `{parent_web_url}#note_{first_note_gitlab_id}`\n8. Format header with [[Discussion]] prefix\n9. Format thread body: `@author (YYYY-MM-DD):\\nbody\\n\\n` per note\n10. Apply discussion truncation via `truncate_discussion()` if needed\n11. Author = first non-system note's author_username\n12. Compute hashes, return DocumentData\n\n## Acceptance Criteria\n- [ ] System notes (is_system=1) excluded from content\n- [ ] DiffNote paths extracted from position_old_path and position_new_path\n- [ ] Paths deduplicated and sorted\n- [ ] URL constructed as `parent_web_url#note_GITLAB_ID`\n- [ ] Header uses parent entity type: \"Issue #N\" or \"MR !N\"\n- [ ] Parent title included in header\n- [ ] Labels come from PARENT entity (not the discussion itself)\n- [ ] First non-system note author used as document author\n- [ ] Thread formatted with `@author (date):` per note\n- [ ] Truncation applied for long threads via truncate_discussion()\n- [ ] `cargo test extract_discussion` passes\n\n## Files\n- `src/documents/extractor.rs` — implement `extract_discussion_document()`\n\n## TDD Loop\nRED: Tests in `#[cfg(test)] mod tests`:\n- `test_discussion_document_format` — verify header + thread format\n- `test_discussion_not_found` — returns Ok(None)\n- `test_discussion_parent_deleted` — returns Ok(None) when parent issue/MR missing\n- `test_discussion_system_notes_excluded` — system notes not in content\n- `test_discussion_diffnote_paths` — old_path + new_path extracted and deduped\n- `test_discussion_url_construction` — URL has #note_GITLAB_ID anchor\n- `test_discussion_uses_parent_labels` — labels from parent entity, not discussion\nGREEN: Implement extract_discussion_document\nVERIFY: `cargo test extract_discussion`\n\n## Edge Cases\n- Discussion with all system notes: no non-system notes -> return empty thread (or skip document entirely?)\n- Discussion with NULL parent (orphaned): return Ok(None)\n- DiffNote with same old_path and new_path: dedup produces single entry\n- Notes with NULL body: skip or use empty string\n- Discussion on MR: header shows \"MR !N\" (not \"MergeRequest !N\")","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-30T15:25:45.549099Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:34:43.597398Z","closed_at":"2026-01-30T17:34:43.597339Z","close_reason":"Implemented extract_discussion_document() with parent entity lookup, DiffNote paths, system note exclusion, URL construction + 9 tests","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2fp","depends_on_id":"bd-18t","type":"blocks","created_at":"2026-01-30T15:29:15.914098Z","created_by":"tayloreernisse"},{"issue_id":"bd-2fp","depends_on_id":"bd-36p","type":"blocks","created_at":"2026-01-30T15:29:15.847680Z","created_by":"tayloreernisse"},{"issue_id":"bd-2fp","depends_on_id":"bd-hrs","type":"blocks","created_at":"2026-01-30T15:29:15.880008Z","created_by":"tayloreernisse"}]} {"id":"bd-2fr7","title":"Implement crash_context ring buffer + panic hook","description":"## Background\ncrash_context.rs provides a ring buffer of the last 2000 app events (key presses, messages, state transitions) plus a panic hook that dumps this buffer to a crash file for post-mortem diagnostics. This is critical for debugging TUI issues that only reproduce under specific interaction sequences.\n\n## Approach\nCreate `crates/lore-tui/src/crash_context.rs`:\n\n**CrashContext struct:**\n- events: VecDeque (capacity 2000)\n- push(event: CrashEvent) — append, auto-evict oldest when full\n- dump_to_file(path: &Path) -> io::Result<()> — write all events as newline-delimited JSON\n- install_panic_hook() — set_hook that calls dump_to_file to ~/.local/share/lore/crash-.json\n\n**CrashEvent enum:**\n- KeyPress { key: String, mode: InputMode, screen: Screen }\n- MsgDispatched { msg_name: String, screen: Screen }\n- StateTransition { from: Screen, to: Screen }\n- Error { message: String }\n- Custom { tag: String, detail: String }\n\n**Retention policy:** Keep last 5 crash files, delete older ones on startup.\n\n**Integration:** LoreApp.update() calls crash_context.push() for every Msg before dispatch. The crash_context is a field on LoreApp.\n\n## Acceptance Criteria\n- [ ] CrashContext stores up to 2000 events in ring buffer\n- [ ] push() evicts oldest event when buffer is full\n- [ ] dump_to_file() writes all events as newline-delimited JSON\n- [ ] Panic hook installed via std::panic::set_hook\n- [ ] Crash file written to ~/.local/share/lore/ with timestamp in filename\n- [ ] Retention: only last 5 crash files kept, older deleted on startup\n- [ ] Unit test: push 2500 events, assert only last 2000 retained\n- [ ] Unit test: dump_to_file writes valid NDJSON\n\n## Files\n- CREATE: crates/lore-tui/src/crash_context.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add pub mod crash_context)\n- MODIFY: crates/lore-tui/src/app.rs (add crash_context field, push in update(), install hook in init())\n\n## TDD Anchor\nRED: Write test_ring_buffer_evicts_oldest that creates CrashContext, pushes 2500 events, asserts len()==2000 and first event is event #501.\nGREEN: Implement CrashContext with VecDeque and capacity check.\nVERIFY: cargo test -p lore-tui crash_context\n\n## Edge Cases\n- Crash file directory doesn't exist: create it with fs::create_dir_all\n- Disk full during dump: best-effort, don't panic in the panic hook\n- Concurrent access: CrashContext is only accessed from the main update() thread, no sync needed\n- Event serialization: use serde_json::to_string, fallback to debug format if serialization fails\n\n## Dependency Context\nUsed by LoreApp (bd-6pmy) — added as a field and called in update(). Uses Screen and InputMode from core types (bd-c9gk).","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T18:08:10.416241Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:26.066867Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2fr7","depends_on_id":"bd-2tr4","type":"blocks","created_at":"2026-02-12T18:11:26.066842Z","created_by":"tayloreernisse"}]} +{"id":"bd-2fuw","title":"Render open issues section in human mode","description":"## Background\nRender the Open Issues section for `lore me` human output. It must show assigned issues with attention signal, status, and recency in a compact two-line layout.\n\n## Approach\nImplement in `src/cli/commands/me/render_human.rs`:\n```rust\npub fn render_issues_section(issues: &[MeIssue], single_project: bool) -> String\n```\n\nLayout per item:\n- Line 1: attention icon + `#iid` + title + optional `[status_name]` + relative time.\n- Line 2: dimmed `project_path` unless `single_project == true`.\n\nUse existing render helpers:\n- `section_divider` for header\n- `Theme::issue_ref`, `Theme::dim`, `Theme::warning`, `Theme::info`, `Theme::muted`\n- icon helpers from `bd-1vxq`\n- `format_relative_time` for recency\n\nTimestamp conversion rule:\n- Convert `updated_at_iso` to ms with `crate::core::time::iso_to_ms` before `format_relative_time`.\n- If parse fails, render `updated_at_iso` date text as fallback (no panic).\n\nAttention mapping:\n- `NeedsAttention` -> warning icon\n- `NotStarted` -> info/muted icon\n- `AwaitingResponse` -> info/dim icon\n- `Stale` -> dim icon\n- `NotReady` -> empty icon for issues\n\n## Acceptance Criteria\n- [ ] Header is `Open Issues (N)` via `section_divider`\n- [ ] Each row shows icon, `#iid`, title, optional status, and relative time\n- [ ] Project path renders on second line and is hidden when `single_project=true`\n- [ ] Icon tier follows renderer icon mode (Nerd/Unicode/ASCII)\n- [ ] `NotReady` renders no icon for issues\n- [ ] `status_name=None` renders no empty brackets\n- [ ] Empty list renders `No open issues`\n- [ ] Titles are not truncated\n- [ ] Invalid `updated_at_iso` does not panic and still renders timestamp fallback text\n\n## Files\n- MODIFY: `src/cli/commands/me/render_human.rs`\n\n## TDD Anchor\nRED:\n- `test_issues_section_renders_iid`\n- `test_issues_section_empty_shows_message`\n- `test_issues_section_hides_project_when_single`\n- `test_issues_section_shows_status_name`\n- `test_issues_section_omits_status_when_none`\n- `test_issues_section_invalid_iso_fallback`\n\nGREEN:\n- Implement renderer with safe ISO->ms conversion and fallback.\n\nVERIFY:\n- `cargo test issues_section`\n\n## Edge Cases\n- Very long titles remain full-width (no truncation).\n- Icon glyph widths differ across font tiers; keep one separating space after icon to maintain readable alignment.\n- Sorting is upstream in handler; renderer preserves input order.\n\n## Dependency Context\nConsumes `MeIssue` from `bd-3bwh` and icon helpers from `bd-1vxq`.\nCalled by `bd-1vv8` handler.\n\nDependencies:\n -> bd-1vxq (blocks) - Render summary header and attention legend\n\nDependents:\n <- bd-1vv8 (blocks) - Implement me command handler: wire queries to renderers","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:39:08.891551Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.060071Z","closed_at":"2026-02-20T16:09:13.060033Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2fuw","depends_on_id":"bd-1vxq","type":"blocks","created_at":"2026-02-19T19:41:27.920474Z","created_by":"tayloreernisse"}]} {"id":"bd-2g50","title":"Audit and fill data gaps: lore detail view vs glab","description":"## Background\nFor lore to be the definitive read path, its single-entity detail view must return everything glab returns PLUS lore-exclusive enrichments.\n\n## Current Issue Detail Output (lore -J issues N)\nFields returned: assignees, author_username, closing_merge_requests, created_at, description, discussions, due_date, id, iid, labels, milestone, project_path, state, status_color, status_icon_name, status_name, status_synced_at, title, updated_at, web_url\n\n## Gap Analysis (Verified 2026-02-12)\n\n### Raw Payload Audit\nIssue raw_payloads store exactly 15 fields: assignees, author, closed_at, created_at, description, due_date, id, iid, labels, milestone, project_id, state, title, updated_at, web_url.\n\nFields NOT in raw payloads (require ingestion pipeline update to capture from GitLab API):\n- closed_by, confidential, upvotes, downvotes, weight, issue_type, time_stats, health_status, references\n\n### Phase 1 — Computed fields (NO schema change, NO ingestion change)\nThese can be derived from existing data:\n1. `references_full`: format!(\"{path_with_namespace}#{iid}\") — project_path already in show.rs:IssueDetail\n2. `user_notes_count`: SELECT COUNT(*) FROM notes n JOIN discussions d ON n.discussion_id = d.id WHERE d.noteable_type = 'Issue' AND d.noteable_id = ? AND n.is_system = 0\n3. `merge_requests_count`: COUNT from closing_merge_requests vec already loaded in show.rs (just .len())\n\n### Phase 2 — Extract from existing raw payloads (schema change, NO ingestion change)\n`closed_at` IS in raw_payloads for closed issues. Can be backfilled:\n1. Add `closed_at TEXT` column to issues table (migration 023)\n2. Backfill: UPDATE issues SET closed_at = json_extract((SELECT payload FROM raw_payloads WHERE id = issues.raw_payload_id), '$.closed_at') WHERE state = 'closed'\n3. Capture during ingestion going forward\n\n### Phase 3 — Requires ingestion pipeline update (schema change + API capture)\nThese fields are in the GitLab Issues API response but NOT captured by lore's ingestion:\n1. `closed_by` (object with username) — add closed_by_username TEXT to issues\n2. `confidential` (boolean) — add confidential INTEGER DEFAULT 0 to issues\n3. Both require updating src/ingestion/ to extract these fields during sync\n\n### Phase 4 — Same audit for MR detail view\nMR detail (src/cli/commands/show.rs MrDetail struct lines 14-33) already includes: closed_at, merged_at, draft, source/target branch, reviewers. Missing: approvers_count, pipeline_status.\n\n## Implementation: show.rs Modifications\n\n### IssueDetail struct (src/cli/commands/show.rs:69-91)\nAdd fields:\n```rust\npub references_full: String, // Phase 1: computed\npub user_notes_count: i64, // Phase 1: computed\npub merge_requests_count: usize, // Phase 1: computed (closing_merge_requests.len())\npub closed_at: Option, // Phase 2: from DB after migration\npub confidential: bool, // Phase 3: from DB after ingestion update\n```\n\n### SQL for computed fields\n```sql\n-- user_notes_count\nSELECT COUNT(*) FROM notes n\nJOIN discussions d ON n.discussion_id = d.id\nWHERE d.noteable_type = 'Issue' AND d.noteable_id = ?1 AND n.is_system = 0\n\n-- references_full (in Rust)\nformat!(\"{}#{}\", project_path, iid)\n\n-- merge_requests_count (in Rust)\nclosing_merge_requests.len()\n```\n\n## Migration 023 (after bd-2l3s takes 022)\n```sql\n-- migrations/023_issue_detail_fields.sql\nALTER TABLE issues ADD COLUMN closed_at TEXT;\nALTER TABLE issues ADD COLUMN confidential INTEGER NOT NULL DEFAULT 0;\n\n-- Backfill closed_at from raw_payloads\nUPDATE issues SET closed_at = (\n SELECT json_extract(rp.payload, '$.closed_at')\n FROM raw_payloads rp\n WHERE rp.id = issues.raw_payload_id\n) WHERE state = 'closed' AND raw_payload_id IS NOT NULL;\n\nINSERT INTO schema_version (version, applied_at, description)\nVALUES (23, strftime('%s', 'now') * 1000, 'Issue detail fields: closed_at, confidential');\n```\n\nNOTE: raw_payload_id column on issues — verify this exists. If issues don't have a direct FK to raw_payloads, the backfill SQL needs adjustment (may need to join through another path).\n\n## TDD Loop\nRED: Tests in src/cli/commands/show.rs:\n- test_show_issue_has_references_full: insert issue with known project_path, assert JSON output contains \"project/path#123\"\n- test_show_issue_has_notes_count: insert issue + 3 user notes + 1 system note, assert user_notes_count = 3\n- test_show_issue_closed_has_closed_at: insert closed issue with closed_at in raw_payload, run migration, verify closed_at appears\n\nGREEN: Add computed fields to IssueDetail, add migration 023 for closed_at + confidential columns\n\nVERIFY:\n```bash\ncargo test show:: && cargo clippy --all-targets -- -D warnings\ncargo run --release -- -J issues 3864 | jq '{references_full, user_notes_count, merge_requests_count}'\n```\n\n## Acceptance Criteria\n- [ ] lore -J issues N includes references_full (string, e.g., \"vs/typescript-code#3864\")\n- [ ] lore -J issues N includes user_notes_count (integer, excludes system notes)\n- [ ] lore -J issues N includes merge_requests_count (integer)\n- [ ] lore -J issues N includes closed_at (ISO string for closed issues, null for open)\n- [ ] lore -J issues N includes confidential (boolean, after Phase 3)\n- [ ] --fields minimal preset updated to include references_full\n- [ ] Migration 023 adds closed_at and confidential columns to issues table\n- [ ] Backfill SQL populates closed_at from existing raw_payloads\n- [ ] cargo test passes with new show:: tests\n\n## Edge Cases\n- Issue with zero notes: user_notes_count = 0 (not null)\n- Issue with no closing MRs: merge_requests_count = 0\n- Open issue: closed_at = null (serialized as JSON null, not omitted)\n- confidential before Phase 3: default false (safe default)\n- MR detail: different computed fields (approvers_count, pipeline_status if available)\n- Raw payload missing for very old issues (raw_payload_id = NULL): closed_at stays NULL\n- raw_payload_id column: verify it exists on the issues table before writing backfill SQL\n\n## Files to Modify\n- src/cli/commands/show.rs (IssueDetail struct + query logic)\n- src/core/db.rs (migration 023: wire into MIGRATIONS array)\n- NEW: migrations/023_issue_detail_fields.sql\n- src/ingestion/ (Phase 3: capture closed_by, confidential during sync — specify exact file after reviewing ingestion pipeline)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T15:45:16.512418Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:49:01.580183Z","closed_at":"2026-02-12T16:49:01.580133Z","close_reason":"Data gaps filled: references_full, user_notes_count, merge_requests_count, closed_at, confidential via migration 023","compaction_level":0,"original_size":0,"labels":["cli","cli-imp","robot-mode"],"dependencies":[{"issue_id":"bd-2g50","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-02-12T15:45:16.514148Z","created_by":"tayloreernisse"}]} {"id":"bd-2h0","title":"[CP1] gi list issues command","description":"List issues from the database.\n\n## Module\nsrc/cli/commands/list.rs\n\n## Clap Definition\nList {\n #[arg(value_parser = [\"issues\", \"mrs\"])]\n entity: String,\n \n #[arg(long, default_value = \"20\")]\n limit: usize,\n \n #[arg(long)]\n project: Option,\n \n #[arg(long, value_parser = [\"opened\", \"closed\", \"all\"])]\n state: Option,\n}\n\n## Output Format\nIssues (showing 20 of 3,801)\n\n #1234 Authentication redesign opened @johndoe 3 days ago\n #1233 Fix memory leak in cache closed @janedoe 5 days ago\n #1232 Add dark mode support opened @bobsmith 1 week ago\n ...\n\n## Implementation\n- Query issues table with filters\n- Join with projects table for display\n- Format updated_at as relative time (\"3 days ago\")\n- Truncate title if too long\n\nFiles: src/cli/commands/list.rs\nDone when: List displays issues with proper filtering and formatting","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:58:23.809829Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.898106Z","deleted_at":"2026-01-25T17:02:01.898102Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-2i10","title":"OBSERV: Add log file diagnostics to lore doctor","description":"## Background\nlore doctor is the diagnostic entry point. Adding log file info lets users verify logging is working and check disk usage. The existing DoctorChecks struct (src/cli/commands/doctor.rs:43-51) has checks for config, database, gitlab, projects, ollama.\n\n## Approach\nAdd a new LoggingCheck struct and field to DoctorChecks:\n\n```rust\n#[derive(Debug, Serialize)]\npub struct LoggingCheck {\n pub result: CheckResult,\n pub log_dir: String,\n pub file_count: usize,\n pub total_bytes: u64,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub oldest_file: Option,\n}\n```\n\nAdd to DoctorChecks (src/cli/commands/doctor.rs:43-51):\n```rust\npub logging: LoggingCheck,\n```\n\nImplement check_logging() function:\n```rust\nfn check_logging() -> LoggingCheck {\n let log_dir = get_log_dir(None); // TODO: accept config override\n let mut file_count = 0;\n let mut total_bytes = 0u64;\n let mut oldest: Option = None;\n\n if let Ok(entries) = std::fs::read_dir(&log_dir) {\n for entry in entries.flatten() {\n let name = entry.file_name().to_string_lossy().to_string();\n if name.starts_with(\"lore.\") && name.ends_with(\".log\") {\n file_count += 1;\n if let Ok(meta) = entry.metadata() {\n total_bytes += meta.len();\n }\n if oldest.as_ref().map_or(true, |o| name < *o) {\n oldest = Some(name);\n }\n }\n }\n }\n\n LoggingCheck {\n result: CheckResult { status: CheckStatus::Ok, message: None },\n log_dir: log_dir.display().to_string(),\n file_count,\n total_bytes,\n oldest_file: oldest,\n }\n}\n```\n\nCall from run_doctor() (src/cli/commands/doctor.rs:91-126) and add to DoctorChecks construction.\n\nFor interactive output in print_doctor_results(), add a section:\n```\nLogging\n Log directory: ~/.local/share/lore/logs/\n Log files: 7 (2.3 MB)\n Oldest: lore.2026-01-28.log\n```\n\n## Acceptance Criteria\n- [ ] lore doctor shows log directory path, file count, total size\n- [ ] lore --robot doctor JSON includes logging field with log_dir, file_count, total_bytes, oldest_file\n- [ ] When no log files exist: file_count=0, total_bytes=0, oldest_file=null\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/cli/commands/doctor.rs (add LoggingCheck struct, check_logging fn, wire into DoctorChecks)\n\n## TDD Loop\nRED: test_check_logging_with_files, test_check_logging_empty_dir\nGREEN: Implement LoggingCheck struct and check_logging function\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- Log directory doesn't exist yet (first run before any sync): report file_count=0, status Ok\n- Permission errors on read_dir: report status Warning with message","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:53:55.682986Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:15:04.520915Z","closed_at":"2026-02-04T17:15:04.520868Z","close_reason":"Added LoggingCheck to DoctorChecks with log_dir, file_count, total_bytes; shows in both interactive and robot output","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-2i10","depends_on_id":"bd-1k4","type":"blocks","created_at":"2026-02-04T15:55:19.686771Z","created_by":"tayloreernisse"},{"issue_id":"bd-2i10","depends_on_id":"bd-2nx","type":"parent-child","created_at":"2026-02-04T15:53:55.683866Z","created_by":"tayloreernisse"}]} +{"id":"bd-2ilv","title":"Implement robot JSON output for me command","description":"## Background\n`lore me` robot output must follow the existing lore envelope pattern and expose a stable machine contract. This bead is the canonical serializer for that contract.\n\nCritical schema decision: use `project_path` consistently across work items and activity payloads (matching `Me*` structs), not mixed `project`/`project_path` naming.\n\n## Approach\nImplement `render_robot` in `src/cli/commands/me/render_robot.rs` with a typed envelope:\n\n```rust\n#[derive(Serialize)]\nstruct MeJsonEnvelope {\n ok: bool,\n data: MeDashboard,\n meta: RobotMeta,\n}\n\npub fn render_robot(dashboard: &MeDashboard, elapsed_ms: u64) -> Result\n```\n\nRules:\n- Serialize `MeDashboard` directly under `data`.\n- Keep all section arrays present, including empty arrays.\n- Keep nullable fields as JSON `null`.\n- Emit compact JSON (single-line) for parity with existing robot commands.\n\nDo not apply `--fields` filtering in this bead; filtering behavior is layered by `bd-3jiq`.\n\n## Acceptance Criteria\n- [ ] Output is `{ok,data,meta}` envelope with `ok=true`\n- [ ] `meta.elapsed_ms` is present and numeric\n- [ ] `data` contains: `username`, `since_iso`, `summary`, `open_issues`, `open_mrs_authored`, `reviewing_mrs`, `activity`\n- [ ] Work item payloads use `project_path` field name (no `project` alias)\n- [ ] Activity payload uses `project_path` field name\n- [ ] `attention_state` serializes in snake_case\n- [ ] Issues include nullable `status_name`\n- [ ] Authored MRs include nullable `detailed_merge_status` and `draft`\n- [ ] Reviewing MRs include `author_username` and `draft`\n- [ ] Empty arrays serialize as `[]` (not omitted)\n- [ ] Serializer never panics in normal flow (returns Result and caller handles)\n\n## Files\n- CREATE: `src/cli/commands/me/render_robot.rs`\n- MODIFY: `src/cli/commands/me/mod.rs` (wire renderer)\n\n## TDD Anchor\nRED:\n- `test_robot_envelope_structure`\n- `test_robot_uses_project_path_field`\n- `test_robot_attention_state_snake_case`\n- `test_robot_empty_arrays_present`\n- `test_robot_nullable_fields_serialize_null`\n\nGREEN:\n- Implement typed envelope serializer and wire into handler.\n\nVERIFY:\n- `cargo test robot_envelope`\n- `cargo test me_robot`\n\n## Edge Cases\n- If JSON serialization fails unexpectedly, propagate error into command error path rather than falling back to ad-hoc JSON strings.\n- Keep key order deterministic only where serde guarantees struct order; tests should assert presence/shape, not strict key ordering.\n\n## Dependency Context\nConsumes data structs from `bd-3bwh` and `RobotMeta` from `src/cli/robot.rs`.\nFeeds `bd-3jiq` which applies section-array field projection on top of this full payload.\n\nDependencies:\n -> bd-3bwh (blocks) - Define dashboard data structs for me command\n\nDependents:\n <- bd-3jiq (blocks) - Implement --fields minimal preset for me robot mode","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:40:07.681875Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.063421Z","closed_at":"2026-02-20T16:09:13.063381Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2ilv","depends_on_id":"bd-3bwh","type":"blocks","created_at":"2026-02-19T19:41:28.176610Z","created_by":"tayloreernisse"}]} {"id":"bd-2iq","title":"[CP1] Database migration 002_issues.sql","description":"## Background\n\nThe 002_issues.sql migration creates tables for issues, labels, issue_labels, discussions, and notes. This is the data foundation for Checkpoint 1, enabling issue ingestion with cursor-based sync, label tracking, and discussion storage.\n\n## Approach\n\nCreate `migrations/002_issues.sql` with complete SQL statements.\n\n### Full Migration SQL\n\n```sql\n-- Migration 002: Issue Ingestion Tables\n-- Applies on top of 001_initial.sql\n\n-- Issues table\nCREATE TABLE issues (\n id INTEGER PRIMARY KEY,\n gitlab_id INTEGER UNIQUE NOT NULL,\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n iid INTEGER NOT NULL,\n title TEXT,\n description TEXT,\n state TEXT NOT NULL CHECK (state IN ('opened', 'closed')),\n author_username TEXT,\n created_at INTEGER NOT NULL, -- ms epoch UTC\n updated_at INTEGER NOT NULL, -- ms epoch UTC\n last_seen_at INTEGER NOT NULL, -- updated on every upsert\n discussions_synced_for_updated_at INTEGER, -- watermark for dependent sync\n web_url TEXT,\n raw_payload_id INTEGER REFERENCES raw_payloads(id)\n);\n\nCREATE INDEX idx_issues_project_updated ON issues(project_id, updated_at);\nCREATE INDEX idx_issues_author ON issues(author_username);\nCREATE UNIQUE INDEX uq_issues_project_iid ON issues(project_id, iid);\n\n-- Labels table (name-only for CP1)\nCREATE TABLE labels (\n id INTEGER PRIMARY KEY,\n gitlab_id INTEGER, -- optional, for future Labels API\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n name TEXT NOT NULL,\n color TEXT,\n description TEXT\n);\n\nCREATE UNIQUE INDEX uq_labels_project_name ON labels(project_id, name);\nCREATE INDEX idx_labels_name ON labels(name);\n\n-- Issue-label junction (DELETE before INSERT for stale removal)\nCREATE TABLE issue_labels (\n issue_id INTEGER NOT NULL REFERENCES issues(id) ON DELETE CASCADE,\n label_id INTEGER NOT NULL REFERENCES labels(id) ON DELETE CASCADE,\n PRIMARY KEY(issue_id, label_id)\n);\n\nCREATE INDEX idx_issue_labels_label ON issue_labels(label_id);\n\n-- Discussion threads for issues (MR discussions added in CP2)\nCREATE TABLE discussions (\n id INTEGER PRIMARY KEY,\n gitlab_discussion_id TEXT NOT NULL, -- GitLab string ID (e.g., \"6a9c1750b37d...\")\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n issue_id INTEGER REFERENCES issues(id) ON DELETE CASCADE,\n merge_request_id INTEGER, -- FK added in CP2 via ALTER TABLE\n noteable_type TEXT NOT NULL CHECK (noteable_type IN ('Issue', 'MergeRequest')),\n individual_note INTEGER NOT NULL DEFAULT 0, -- 0=threaded, 1=standalone\n first_note_at INTEGER, -- min(note.created_at) for ordering\n last_note_at INTEGER, -- max(note.created_at) for \"recently active\"\n last_seen_at INTEGER NOT NULL, -- updated on every upsert\n resolvable INTEGER NOT NULL DEFAULT 0, -- MR discussions can be resolved\n resolved INTEGER NOT NULL DEFAULT 0,\n CHECK (\n (noteable_type = 'Issue' AND issue_id IS NOT NULL AND merge_request_id IS NULL) OR\n (noteable_type = 'MergeRequest' AND merge_request_id IS NOT NULL AND issue_id IS NULL)\n )\n);\n\nCREATE UNIQUE INDEX uq_discussions_project_discussion_id ON discussions(project_id, gitlab_discussion_id);\nCREATE INDEX idx_discussions_issue ON discussions(issue_id);\nCREATE INDEX idx_discussions_mr ON discussions(merge_request_id);\nCREATE INDEX idx_discussions_last_note ON discussions(last_note_at);\n\n-- Notes belong to discussions\nCREATE TABLE notes (\n id INTEGER PRIMARY KEY,\n gitlab_id INTEGER UNIQUE NOT NULL,\n discussion_id INTEGER NOT NULL REFERENCES discussions(id) ON DELETE CASCADE,\n project_id INTEGER NOT NULL REFERENCES projects(id) ON DELETE CASCADE,\n note_type TEXT, -- 'DiscussionNote' | 'DiffNote' | null\n is_system INTEGER NOT NULL DEFAULT 0, -- 1 for system-generated notes\n author_username TEXT,\n body TEXT,\n created_at INTEGER NOT NULL, -- ms epoch\n updated_at INTEGER NOT NULL, -- ms epoch\n last_seen_at INTEGER NOT NULL, -- updated on every upsert\n position INTEGER, -- 0-indexed array order from API\n resolvable INTEGER NOT NULL DEFAULT 0,\n resolved INTEGER NOT NULL DEFAULT 0,\n resolved_by TEXT,\n resolved_at INTEGER,\n -- DiffNote position metadata (populated for MR DiffNotes in CP2)\n position_old_path TEXT,\n position_new_path TEXT,\n position_old_line INTEGER,\n position_new_line INTEGER,\n raw_payload_id INTEGER REFERENCES raw_payloads(id)\n);\n\nCREATE INDEX idx_notes_discussion ON notes(discussion_id);\nCREATE INDEX idx_notes_author ON notes(author_username);\nCREATE INDEX idx_notes_system ON notes(is_system);\n\n-- Update schema version\nINSERT INTO schema_version (version, applied_at, description)\nVALUES (2, strftime('%s', 'now') * 1000, 'Issue ingestion tables');\n```\n\n## Acceptance Criteria\n\n- [ ] Migration file exists at `migrations/002_issues.sql`\n- [ ] All tables created: issues, labels, issue_labels, discussions, notes\n- [ ] All indexes created as specified\n- [ ] CHECK constraints on state and noteable_type work correctly\n- [ ] CASCADE deletes work (project deletion cascades)\n- [ ] Migration applies cleanly on fresh DB after 001_initial.sql\n- [ ] schema_version updated to 2 after migration\n- [ ] `gi doctor` shows schema_version = 2\n\n## Files\n\n- migrations/002_issues.sql (create)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/migration_tests.rs\n#[test] fn migration_002_creates_issues_table()\n#[test] fn migration_002_creates_labels_table()\n#[test] fn migration_002_creates_discussions_table()\n#[test] fn migration_002_creates_notes_table()\n#[test] fn migration_002_enforces_state_check()\n#[test] fn migration_002_enforces_noteable_type_check()\n#[test] fn migration_002_cascades_on_project_delete()\n```\n\nGREEN: Create migration file with all SQL\n\nVERIFY:\n```bash\n# Apply migration to test DB\nsqlite3 :memory: < migrations/001_initial.sql\nsqlite3 :memory: < migrations/002_issues.sql\n\n# Verify schema_version\nsqlite3 test.db \"SELECT version FROM schema_version ORDER BY version DESC LIMIT 1\"\n# Expected: 2\n\ncargo test migration_002\n```\n\n## Edge Cases\n\n- Applying twice - should fail on UNIQUE constraint (idempotency via version check)\n- Missing 001 - foreign key to projects fails\n- Long label names - TEXT handles any length\n- NULL description - allowed by schema\n- Empty discussions_synced_for_updated_at - NULL means never synced","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.128594Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:25:10.309900Z","closed_at":"2026-01-25T22:25:10.309852Z","close_reason":"Created 002_issues.sql with issues/labels/issue_labels/discussions/notes tables, 8 passing tests verify schema, constraints, and cascades","compaction_level":0,"original_size":0} {"id":"bd-2iqk","title":"Implement Doctor + Stats screens","description":"## Background\nDoctor shows environment health checks (config, auth, DB, Ollama). Stats shows database statistics (entity counts, index sizes, FTS coverage). Both are informational screens using ftui JsonView or simple table layouts.\n\n## Approach\nState:\n- DoctorState: checks (Vec), overall_status (Healthy|Warning|Error)\n- StatsState: entity_stats (EntityStats), index_stats (IndexStats), fts_stats (FtsStats)\n\nAction:\n- run_doctor(config, conn) -> Vec: reuses existing lore doctor logic\n- fetch_stats(conn) -> StatsData: reuses existing lore stats logic\n\nView:\n- Doctor: vertical list of health checks with pass/fail/warn indicators\n- Stats: table of entity counts, index sizes, FTS document count, embedding coverage\n\n## Acceptance Criteria\n- [ ] Doctor shows config, auth, DB, and Ollama health status\n- [ ] Stats shows entity counts matching lore --robot stats output\n- [ ] Both screens accessible via navigation (gd for Doctor)\n- [ ] Health check results color-coded: green pass, yellow warn, red fail\n\n## Files\n- CREATE: crates/lore-tui/src/state/doctor.rs\n- CREATE: crates/lore-tui/src/state/stats.rs\n- CREATE: crates/lore-tui/src/view/doctor.rs\n- CREATE: crates/lore-tui/src/view/stats.rs\n- MODIFY: crates/lore-tui/src/action.rs (add run_doctor, fetch_stats)\n\n## TDD Anchor\nRED: Write test_fetch_stats_counts that creates DB with known data, asserts fetch_stats returns correct counts.\nGREEN: Implement fetch_stats with COUNT queries.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_stats\n\n## Edge Cases\n- Ollama not running: Doctor shows warning, not error (optional dependency)\n- Very large databases: stats queries should be fast (use shadow tables for FTS count)\n\n## Dependency Context\nUses existing doctor and stats logic from lore CLI commands.\nUses DbManager from \"Implement DbManager\" task.","status":"open","priority":3,"issue_type":"task","created_at":"2026-02-12T17:02:21.744226Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.357165Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2iqk","depends_on_id":"bd-1df9","type":"blocks","created_at":"2026-02-12T18:11:34.357108Z","created_by":"tayloreernisse"},{"issue_id":"bd-2iqk","depends_on_id":"bd-2x2h","type":"blocks","created_at":"2026-02-12T17:10:02.871533Z","created_by":"tayloreernisse"}]} {"id":"bd-2jzn","title":"Migration 021: Add status columns to issues table","description":"## Background\nGitLab issues have work item status (To do, In progress, Done, Won't do, Duplicate) only available via GraphQL. We need 5 nullable columns on the issues table to store this data after enrichment. The status_synced_at column tracks when enrichment last wrote/cleared each row (ms epoch UTC).\n\n## Approach\nCreate a new SQL migration file and register it in the MIGRATIONS array. SQLite ALTER TABLE ADD COLUMN is non-destructive — existing rows get NULL defaults. Add a compound index for --status filter performance.\n\n## Files\n- migrations/021_work_item_status.sql (NEW)\n- src/core/db.rs (add entry to MIGRATIONS array)\n\n## Implementation\n\nmigrations/021_work_item_status.sql:\n ALTER TABLE issues ADD COLUMN status_name TEXT;\n ALTER TABLE issues ADD COLUMN status_category TEXT;\n ALTER TABLE issues ADD COLUMN status_color TEXT;\n ALTER TABLE issues ADD COLUMN status_icon_name TEXT;\n ALTER TABLE issues ADD COLUMN status_synced_at INTEGER;\n CREATE INDEX IF NOT EXISTS idx_issues_project_status_name ON issues(project_id, status_name);\n\nIn src/core/db.rs, add as last entry in MIGRATIONS array:\n (\"021\", include_str!(\"../../migrations/021_work_item_status.sql\")),\nLATEST_SCHEMA_VERSION is computed as MIGRATIONS.len() as i32 — auto-becomes 21.\n\n## Acceptance Criteria\n- [ ] Migration file exists at migrations/021_work_item_status.sql\n- [ ] MIGRATIONS array has 21 entries ending with (\"021\", ...)\n- [ ] In-memory DB: PRAGMA table_info(issues) includes all 5 new columns\n- [ ] In-memory DB: PRAGMA index_list(issues) includes idx_issues_project_status_name\n- [ ] Existing rows have NULL for all 5 new columns\n- [ ] cargo check --all-targets passes\n\n## TDD Loop\nRED: test_migration_021_adds_columns, test_migration_021_adds_index\n Pattern: create_connection(Path::new(\":memory:\")) + run_migrations(&conn), then PRAGMA queries\nGREEN: Create SQL file + register in MIGRATIONS\nVERIFY: cargo test test_migration_021\n\n## Edge Cases\n- Migration has 5 columns (including status_synced_at INTEGER), not 4\n- Test project insert uses gitlab_project_id, path_with_namespace, web_url (no name/last_seen_at)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:41:40.806320Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.414434Z","closed_at":"2026-02-11T07:21:33.414387Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2jzn","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-11T06:41:40.807222Z","created_by":"tayloreernisse"}]} @@ -153,6 +163,7 @@ {"id":"bd-2nb","title":"[CP1] Issue ingestion module","description":"Fetch and store issues with cursor-based incremental sync.\n\nImplement ingestIssues(options) → { fetched, upserted, labelsCreated }\n\nLogic:\n1. Get current cursor from sync_cursors\n2. Paginate through issues updated after cursor\n3. Apply local filtering for tuple cursor semantics\n4. For each issue:\n - Store raw payload (compressed)\n - Upsert issue record\n - Extract and upsert labels\n - Link issue to labels via junction\n5. Update cursor after each page commit\n\nFiles: src/ingestion/issues.ts\nTests: tests/integration/issue-ingestion.test.ts\nDone when: Issues, labels, issue_labels populated correctly with resumable cursor","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:19:50.701180Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.154318Z","deleted_at":"2026-01-25T15:21:35.154316Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-2nfs","title":"Implement snapshot test infrastructure + terminal compat matrix","description":"## Background\nSnapshot tests ensure deterministic rendering using FakeClock and ftui's test backend. They capture rendered TUI output as styled text and compare against golden files, catching visual regressions without a real terminal. The terminal compatibility matrix is a separate documentation artifact, not an automated test.\n\n## Approach\n\n### Snapshot Infrastructure\n\n**Test Backend**: Use `ftui_harness::TestBackend` (or equivalent from ftui-harness crate) which captures rendered output as a Buffer without needing a real terminal. If ftui-harness is not available, create a minimal TestBackend that implements ftui's backend trait and stores cells in a `Vec>`.\n\n**Deterministic Rendering**:\n- Inject FakeClock (from bd-2lg6) to freeze all relative time computations (\"2 hours ago\" always renders the same)\n- Fix terminal size to 120x40 for all snapshot tests\n- Use synthetic DB fixture with known data (same fixture pattern as parity tests)\n\n**Snapshot Capture Flow**:\n```rust\nfn capture_snapshot(app: &LoreApp, size: (u16, u16)) -> String {\n let backend = TestBackend::new(size.0, size.1);\n // Render app.view() to backend\n // Convert buffer cells to plain text with ANSI annotations\n // Return as String\n}\n```\n\n**Golden File Management**:\n- Golden files stored in `crates/lore-tui/tests/snapshots/` as `.snap` files\n- Naming: `{test_name}.snap` (e.g., `dashboard_default.snap`)\n- Update mode: set env var `UPDATE_SNAPSHOTS=1` to overwrite golden files instead of comparing\n- Use `insta` crate (or manual file comparison) for snapshot assertion\n\n**Fixture Data** (synthetic, deterministic):\n- 50 issues (mix of opened/closed/locked states, various labels)\n- 25 MRs (mix of opened/merged/closed/draft)\n- 100 discussions with notes\n- Known timestamps relative to FakeClock's frozen time\n\n### Snapshot Tests\n\nEach test:\n1. Creates in-memory DB with fixture data\n2. Creates LoreApp with FakeClock frozen at 2026-01-15T12:00:00Z\n3. Sets initial screen state\n4. Renders via TestBackend at 120x40\n5. Compares output against golden file\n\nTests to implement:\n- `test_dashboard_snapshot`: Dashboard screen with fixture counts and recent activity\n- `test_issue_list_snapshot`: Issue list with default sort, showing state badges and relative times\n- `test_issue_detail_snapshot`: Single issue detail with description and discussion thread\n- `test_mr_list_snapshot`: MR list showing draft indicators and review status\n- `test_search_results_snapshot`: Search results with highlighted matches\n- `test_empty_state_snapshot`: Dashboard with empty DB (zero issues/MRs)\n\n### Terminal Compatibility Matrix (Documentation)\n\nThis is a manual verification checklist, NOT an automated test. Document results in `crates/lore-tui/TERMINAL_COMPAT.md`:\n\n| Feature | iTerm2 | tmux | Alacritty | kitty |\n|---------|--------|------|-----------|-------|\n| True color (RGB) | | | | |\n| Unicode width (CJK) | | | | |\n| Box-drawing chars | | | | |\n| Bold/italic/underline | | | | |\n| Mouse events | | | | |\n| Resize handling | | | | |\n| Alt screen | | | | |\n\nFill in during manual QA, not during automated test implementation.\n\n## Acceptance Criteria\n- [ ] At least 6 snapshot tests pass with golden files committed to repo\n- [ ] All snapshots use FakeClock frozen at 2026-01-15T12:00:00Z\n- [ ] All snapshots render at fixed 120x40 terminal size\n- [ ] Dashboard snapshot matches golden file (deterministic)\n- [ ] Issue list snapshot matches golden file (deterministic)\n- [ ] Empty state snapshot matches golden file\n- [ ] UPDATE_SNAPSHOTS=1 env var overwrites golden files for updates\n- [ ] Golden files are plain text (diffable in version control)\n- [ ] TERMINAL_COMPAT.md template created (to be filled during manual QA)\n\n## Files\n- CREATE: crates/lore-tui/tests/snapshot_tests.rs\n- CREATE: crates/lore-tui/tests/snapshots/ (directory for golden files)\n- CREATE: crates/lore-tui/tests/snapshots/dashboard_default.snap\n- CREATE: crates/lore-tui/tests/snapshots/issue_list_default.snap\n- CREATE: crates/lore-tui/tests/snapshots/issue_detail.snap\n- CREATE: crates/lore-tui/tests/snapshots/mr_list_default.snap\n- CREATE: crates/lore-tui/tests/snapshots/search_results.snap\n- CREATE: crates/lore-tui/tests/snapshots/empty_state.snap\n- CREATE: crates/lore-tui/TERMINAL_COMPAT.md (template)\n\n## TDD Anchor\nRED: Write `test_dashboard_snapshot` that creates LoreApp with FakeClock and fixture DB, renders Dashboard at 120x40, asserts output matches `snapshots/dashboard_default.snap`. Fails because golden file does not exist yet.\nGREEN: Render the Dashboard, run with UPDATE_SNAPSHOTS=1 to generate golden file, then run normally to verify match.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml snapshot\n\n## Edge Cases\n- Golden file encoding: always UTF-8, normalize line endings to LF\n- FakeClock must be injected into all components that compute relative time (e.g., \"2 hours ago\")\n- Snapshot diffs on CI: print a clear diff showing expected vs actual when mismatch occurs\n- Fixture data must NOT include non-deterministic values (random IDs, current timestamps)\n- If ftui-harness API changes, TestBackend shim may need updating\n\n## Dependency Context\n- Uses FakeClock from bd-2lg6 (Implement Clock trait)\n- Uses all screen views from Phase 2 (Dashboard, Issue List, MR List, Detail views)\n- Uses TestBackend from ftui-harness crate (or custom implementation)\n- Depends on bd-3h00 (session persistence) per phase ordering — screens must be complete before snapshotting\n- Downstream: bd-nu0d (fuzz tests) and bd-3fjk (race tests) depend on this infrastructure","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:03:54.220114Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:38.126586Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2nfs","depends_on_id":"bd-1b6k","type":"blocks","created_at":"2026-02-12T18:11:38.126505Z","created_by":"tayloreernisse"},{"issue_id":"bd-2nfs","depends_on_id":"bd-3h00","type":"blocks","created_at":"2026-02-12T17:10:02.925537Z","created_by":"tayloreernisse"}]} {"id":"bd-2ni","title":"OBSERV Epic: Phase 2 - Spans + Correlation IDs","description":"Add tracing spans to all sync stages and generate UUID-based run_id for correlation. Every log line within a sync run includes run_id in JSON span context. Nested spans produce correct parent-child chains.\n\nDepends on: Phase 1 (subscriber must support span recording)\nUnblocks: Phase 3 (metrics), Phase 5 (rate limit logging)\n\nFiles: src/cli/commands/sync.rs, src/cli/commands/ingest.rs, src/ingestion/orchestrator.rs, src/documents/regenerator.rs, src/embedding/pipeline.rs, src/main.rs\n\nAcceptance criteria (PRD Section 6.2):\n- Every log line includes run_id in JSON span context\n- Nested spans produce chain: fetch_pages includes parent ingest_issues span\n- run_id is 8-char hex (truncated UUIDv4)\n- Spans visible in -vv stderr output","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-02-04T15:53:08.935218Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:19:38.721297Z","closed_at":"2026-02-04T17:19:38.721241Z","close_reason":"Phase 2 complete: run_id correlation IDs generated at sync/ingest entry, root spans with .instrument() for async, #[instrument] on 5 key pipeline functions","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-2ni","depends_on_id":"bd-2nx","type":"blocks","created_at":"2026-02-04T15:55:19.044453Z","created_by":"tayloreernisse"}]} +{"id":"bd-2nl3","title":"Implement activity feed: assignment detection from system notes","description":"## Background\n`lore me` activity must surface assignment/reviewer events from system notes (`is_system=1`) as the fifth activity source. The main risk is false-positive username matching (`@ann` matching `@anna`) if naive `LIKE '%@ann%'` is used.\n\n## Approach\nImplement assignment-system-note extraction in `src/cli/commands/me/activity.rs` with explicit token-boundary matching.\n\n### 1. Normalize note body for stable matching\nCreate a normalized expression used in predicates:\n- lowercase body\n- replace common punctuation/newlines with spaces\n- pad leading/trailing spaces\n\nExample SQL expression:\n```sql\n' ' || LOWER(\n REPLACE(REPLACE(REPLACE(REPLACE(REPLACE(n.body, '\\n', ' '), ',', ' '), '.', ' '), ';', ' '), ':', ' ')\n) || ' '\n```\n\nCall this `body_norm` (inline or CTE).\n\n### 2. Event detection predicates (word-boundary safe)\nUse patterns with trailing-space boundary:\n- assign: `body_norm LIKE '% assigned to @' || LOWER(?username) || ' %'`\n- unassign: `body_norm LIKE '% unassigned @' || LOWER(?username) || ' %'`\n- review request: `body_norm LIKE '% requested review from @' || LOWER(?username) || ' %'`\n\nThis avoids matching `@jdoe` inside `@jdoe2`.\n\n### 3. Column contract\nReturn identical columns as other activity sources:\n`timestamp, event_type, entity_type, entity_iid, project_path, actor, is_own, summary, body_preview`\n\nRules:\n- `event_type`: `assign` | `unassign` | `review_request`\n- `actor`: `COALESCE(n.author_username, 'system')`\n- `is_own`: `(LOWER(COALESCE(n.author_username, '')) = LOWER(?username))`\n- `body_preview`: `NULL`\n- include `since` and `ProjectScope` filters\n- include current-association `my_items` filter\n\n## Acceptance Criteria\n- [ ] Detects assign events from `assigned to @username`\n- [ ] Detects unassign events from `unassigned @username`\n- [ ] Detects review-request events from `requested review from @username`\n- [ ] Username matching is boundary-safe (`@user` does not match `@user2`)\n- [ ] Matching is case-insensitive\n- [ ] Only system notes (`is_system = 1`) are considered\n- [ ] Result shape matches other activity-source queries exactly\n- [ ] `actor` is populated from note author with `'system'` fallback\n- [ ] `is_own` compares actor vs dashboard username\n- [ ] Respects `--since` and `ProjectScope`\n\n## Files\n- MODIFY: `src/cli/commands/me/activity.rs`\n\n## TDD Anchor\nRED:\n- `test_assignment_detection_assign`\n- `test_assignment_detection_unassign`\n- `test_assignment_detection_review_request`\n- `test_assignment_detection_case_insensitive`\n- `test_assignment_detection_username_boundary_no_false_match`\n- `test_assignment_detection_ignores_non_system_notes`\n\nGREEN:\n- Add normalized-body boundary-safe predicates and mapping.\n\nVERIFY:\n- `cargo test assignment_detection`\n- `cargo test me_activity`\n\n## Edge Cases\n- Multi-target notes (`assigned to @alice and @jdoe`) should still match the target username.\n- Punctuation adjacency (`@jdoe,`) is handled by punctuation-to-space normalization.\n- Unexpected note text variants should fail closed (not misclassified).\n\n## Dependency Context\nExtends activity query layer from `bd-b3r3` and is consumed by `bd-2tl5` UNION assembly.\n\nDependencies:\n -> bd-b3r3 (blocks) - Implement activity feed: note and resource event queries\n\nDependents:\n <- bd-2tl5 (blocks) - Implement activity feed UNION ALL assembly","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:38:15.157407Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.056445Z","closed_at":"2026-02-20T16:09:13.056408Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2nl3","depends_on_id":"bd-b3r3","type":"blocks","created_at":"2026-02-19T19:41:18.207350Z","created_by":"tayloreernisse"}]} {"id":"bd-2no","title":"Write integration tests","description":"## Background\nIntegration tests verify that modules work together with a real SQLite database. They test FTS search (stemming, empty results), embedding storage (sqlite-vec ops), hybrid search (combined retrieval), and sync orchestration (full pipeline). Each test creates a fresh in-memory DB with migrations applied.\n\n## Approach\nCreate integration test files in `tests/`:\n\n**1. tests/fts_search.rs:**\n- Create DB, apply migrations 001-008\n- Insert test documents via SQL\n- Verify FTS5 triggers fired (documents_fts has matching count)\n- Search with various queries: stemming, prefix, empty, special chars\n- Verify result ranking (BM25 ordering)\n- Verify snippet generation\n\n**2. tests/embedding.rs:**\n- Create DB, apply migrations 001-009 (requires sqlite-vec)\n- Insert test documents + embeddings with known vectors\n- Verify KNN search returns nearest neighbors\n- Verify chunk deduplication\n- Verify orphan cleanup trigger (delete document -> embeddings gone)\n\n**3. tests/hybrid_search.rs:**\n- Create DB, apply all migrations\n- Insert documents + embeddings\n- Test all three modes: lexical, semantic, hybrid\n- Verify RRF ranking produces expected order\n- Test graceful degradation (no embeddings -> FTS fallback)\n- Test adaptive recall with filters\n\n**4. tests/sync.rs:**\n- Test sync orchestration with mock/stub GitLab responses\n- Verify pipeline stages execute in order\n- Verify lock acquisition/release\n- Verify --no-embed and --no-docs flags\n\n**Test fixtures:**\n- Deterministic embedding vectors (no Ollama required): e.g., [1.0, 0.0, 0.0, ...] for doc1, [0.0, 1.0, 0.0, ...] for doc2\n- Known documents with predictable search results\n- Fixed timestamps for reproducibility\n\n## Acceptance Criteria\n- [ ] FTS search tests pass (stemming, prefix, empty, special chars)\n- [ ] Embedding tests pass (KNN, dedup, orphan cleanup)\n- [ ] Hybrid search tests pass (all 3 modes, graceful degradation)\n- [ ] Sync tests pass (pipeline orchestration)\n- [ ] All tests use in-memory DB (no file I/O)\n- [ ] No external dependencies (no Ollama, no GitLab) — use fixtures/stubs\n- [ ] `cargo test --test fts_search --test embedding --test hybrid_search --test sync` passes\n\n## Files\n- `tests/fts_search.rs` — new file\n- `tests/embedding.rs` — new file\n- `tests/hybrid_search.rs` — new file\n- `tests/sync.rs` — new file\n- `tests/fixtures/` — optional: test helper functions (shared DB setup)\n\n## TDD Loop\nThese ARE integration tests — they verify the combined behavior of multiple beads.\nVERIFY: `cargo test --test fts_search && cargo test --test embedding && cargo test --test hybrid_search && cargo test --test sync`\n\n## Edge Cases\n- sqlite-vec not available: embedding tests should skip gracefully (or require feature flag)\n- In-memory DB with WAL mode: may behave differently than file DB — test both if critical\n- Concurrent test execution: each test creates its own DB (no shared state)","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-30T15:27:21.751019Z","created_by":"tayloreernisse","updated_at":"2026-01-30T18:11:12.432092Z","closed_at":"2026-01-30T18:11:12.432036Z","close_reason":"Integration tests: 10 FTS search tests (stemming, empty, special chars, ordering, triggers, null title), 5 embedding tests (KNN, limit, dedup, orphan trigger, empty DB), 6 hybrid search tests (lexical mode, FTS-only, graceful degradation, RRF ranking, filters, mode variants). 310 total tests pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2no","depends_on_id":"bd-1x6","type":"blocks","created_at":"2026-01-30T15:29:35.607603Z","created_by":"tayloreernisse"},{"issue_id":"bd-2no","depends_on_id":"bd-3eu","type":"blocks","created_at":"2026-01-30T15:29:35.572825Z","created_by":"tayloreernisse"},{"issue_id":"bd-2no","depends_on_id":"bd-3lu","type":"blocks","created_at":"2026-01-30T15:29:35.499831Z","created_by":"tayloreernisse"},{"issue_id":"bd-2no","depends_on_id":"bd-am7","type":"blocks","created_at":"2026-01-30T15:29:35.535320Z","created_by":"tayloreernisse"}]} {"id":"bd-2nx","title":"OBSERV Epic: Phase 1 - Verbosity Flags + Structured File Logging","description":"Foundation layer for observability. Add -v/-vv/-vvv CLI flags, dual-layer tracing subscriber (stderr + file), daily log rotation via tracing-appender, log retention cleanup, --log-format json flag, and LoggingConfig.\n\nDepends on: nothing (first phase)\nUnblocks: Phase 2, and transitively all other phases\n\nFiles: Cargo.toml, src/cli/mod.rs, src/main.rs, src/core/config.rs, src/core/paths.rs, src/cli/commands/doctor.rs\n\nAcceptance criteria (PRD Section 6.1):\n- JSON log files written to ~/.local/share/lore/logs/ with zero config\n- -v/-vv/-vvv control stderr verbosity per table in PRD 4.3\n- RUST_LOG overrides -v for both layers\n- --log-format json emits JSON on stderr\n- Daily rotation, retention cleanup on startup\n- --quiet suppresses stderr, does NOT affect file layer\n- lore doctor reports log directory info","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-02-04T15:53:00.987774Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:15:09.465732Z","closed_at":"2026-02-04T17:15:09.465684Z","close_reason":"Phase 1 complete: dual-layer subscriber, -v/--verbose flags, --log-format json, LoggingConfig, get_log_dir(), log retention, doctor diagnostics","compaction_level":0,"original_size":0,"labels":["observability"]} {"id":"bd-2o49","title":"Epic: TUI Phase 5.6 — CLI/TUI Parity Pack","description":"## Background\nPhase 5.6 ensures the TUI displays the same data as the CLI robot mode, preventing drift between interfaces. Tests compare TUI query results against CLI --robot output for counts, list data, detail data, and search results.\n\n## Acceptance Criteria\n- [ ] Dashboard counts match lore --robot count output\n- [ ] Issue/MR list data matches lore --robot issues/mrs output\n- [ ] Issue/MR detail data matches lore --robot issues/mrs output\n- [ ] Search results identity (same IDs, same order) matches lore --robot search output\n- [ ] Terminal safety sanitization applied consistently in TUI and CLI","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T17:05:36.087371Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.586917Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2o49","depends_on_id":"bd-1b6k","type":"blocks","created_at":"2026-02-12T18:11:51.586894Z","created_by":"tayloreernisse"}]} @@ -163,6 +174,7 @@ {"id":"bd-2rr","title":"OBSERV: Replace subscriber init with dual-layer setup","description":"## Background\nThis is the core infrastructure bead for Phase 1. It replaces the single-layer subscriber (src/main.rs:44-58) with a dual-layer registry that separates stderr and file concerns. The file layer provides always-on post-mortem data; the stderr layer respects -v flags.\n\n## Approach\nReplace src/main.rs lines 44-58 with a function (e.g., init_tracing()) that:\n\n1. Build stderr filter from -v count (or RUST_LOG override):\n```rust\nfn build_stderr_filter(verbose: u8, quiet: bool) -> EnvFilter {\n if let Ok(rust_log) = std::env::var(\"RUST_LOG\") {\n return EnvFilter::new(rust_log);\n }\n if quiet {\n return EnvFilter::new(\"lore=warn,error\");\n }\n match verbose {\n 0 => EnvFilter::new(\"lore=info,warn\"),\n 1 => EnvFilter::new(\"lore=debug,warn\"),\n 2 => EnvFilter::new(\"lore=debug,info\"),\n _ => EnvFilter::new(\"trace,debug\"),\n }\n}\n```\n\n2. Build file filter (always lore=debug,warn unless RUST_LOG set):\n```rust\nfn build_file_filter() -> EnvFilter {\n if let Ok(rust_log) = std::env::var(\"RUST_LOG\") {\n return EnvFilter::new(rust_log);\n }\n EnvFilter::new(\"lore=debug,warn\")\n}\n```\n\n3. Assemble the registry:\n```rust\nlet stderr_layer = fmt::layer()\n .with_target(false)\n .with_writer(SuspendingWriter);\n// Conditionally add .json() based on log_format\n\nlet file_appender = tracing_appender::rolling::daily(log_dir, \"lore\");\nlet (non_blocking, _guard) = tracing_appender::non_blocking(file_appender);\nlet file_layer = fmt::layer()\n .json()\n .with_writer(non_blocking);\n\ntracing_subscriber::registry()\n .with(stderr_layer.with_filter(build_stderr_filter(cli.verbose, cli.quiet)))\n .with(file_layer.with_filter(build_file_filter()))\n .init();\n```\n\nCRITICAL: The non_blocking _guard must be held for the program's lifetime. Store it in main() scope, NOT in the init function. If the guard drops, the file writer thread stops and buffered logs are lost.\n\nCRITICAL: Per-layer filtering requires each .with_filter() to produce a Filtered type. The two layers will have different concrete types (one with json, one without). This is fine -- the registry accepts heterogeneous layers via .with().\n\nWhen --log-format json: wrap stderr_layer with .json() too. This requires conditional construction. Two approaches:\n A) Use Box> for dynamic dispatch (simpler, tiny perf hit)\n B) Use an enum wrapper (zero cost but more code)\nRecommend approach A for simplicity. The overhead is one vtable indirection per log event, dwarfed by I/O.\n\nWhen file_logging is false (LoggingConfig.file_logging == false): skip adding the file layer entirely.\n\n## Acceptance Criteria\n- [ ] lore sync writes JSON log lines to ~/.local/share/lore/logs/lore.YYYY-MM-DD.log\n- [ ] lore -v sync shows DEBUG lore::* on stderr, deps at WARN\n- [ ] lore -vv sync shows DEBUG lore::* + INFO deps on stderr\n- [ ] lore -vvv sync shows TRACE everything on stderr\n- [ ] RUST_LOG=lore::gitlab=trace overrides -v for both layers\n- [ ] lore --log-format json sync emits JSON on stderr\n- [ ] -q + -v: -q wins (stderr at WARN+)\n- [ ] -q does NOT affect file layer (still DEBUG+)\n- [ ] File layer does NOT use SuspendingWriter\n- [ ] Non-blocking guard kept alive for program duration\n- [ ] Existing behavior unchanged when no new flags passed\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/main.rs (replace lines 44-58, add init_tracing function or inline)\n\n## TDD Loop\nRED:\n - test_verbosity_filter_construction: assert filter directives for verbose=0,1,2,3\n - test_rust_log_overrides_verbose: set env, assert TRACE not DEBUG\n - test_quiet_overrides_verbose: -q + -v => WARN+\n - test_json_log_output_format: capture file output, parse as JSON\n - test_suspending_writer_dual_layer: no garbled stderr with progress bars\nGREEN: Implement build_stderr_filter, build_file_filter, assemble registry\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- _guard lifetime: if guard is dropped early, buffered log lines are lost. MUST hold in main() scope.\n- Type erasure: stderr layer with/without .json() produces different types. Use Box> or separate init paths.\n- Empty RUST_LOG string: env::var returns Ok(\"\"), which EnvFilter::new(\"\") defaults to TRACE. May want to check is_empty().\n- File I/O error on log dir: tracing-appender handles this gracefully (no panic), but logs will be silently lost. The doctor command (bd-2i10) can diagnose this.","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:53:55.577025Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:15:04.384114Z","closed_at":"2026-02-04T17:15:04.384062Z","close_reason":"Replaced single-layer subscriber with dual-layer setup: stderr (human/json, -v controlled) + file (always-on JSON, daily rotation via tracing-appender)","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-2rr","depends_on_id":"bd-17n","type":"blocks","created_at":"2026-02-04T15:55:19.397949Z","created_by":"tayloreernisse"},{"issue_id":"bd-2rr","depends_on_id":"bd-1k4","type":"blocks","created_at":"2026-02-04T15:55:19.461728Z","created_by":"tayloreernisse"},{"issue_id":"bd-2rr","depends_on_id":"bd-1o1","type":"blocks","created_at":"2026-02-04T15:55:19.327157Z","created_by":"tayloreernisse"},{"issue_id":"bd-2rr","depends_on_id":"bd-2nx","type":"parent-child","created_at":"2026-02-04T15:53:55.577882Z","created_by":"tayloreernisse"},{"issue_id":"bd-2rr","depends_on_id":"bd-gba","type":"blocks","created_at":"2026-02-04T15:55:19.262870Z","created_by":"tayloreernisse"}]} {"id":"bd-2sr2","title":"Robot sync envelope: status enrichment metadata","description":"## Background\nAgents need machine-readable status enrichment metadata in the robot sync output to detect issues like unsupported GraphQL, partial errors, or enrichment failures. Without this, enrichment problems are invisible to automation.\n\n## Approach\nWire IngestProjectResult status fields into the per-project robot sync JSON. Add aggregate error count to top-level summary.\n\n## Files\n- Wherever robot sync output JSON is constructed (likely src/cli/commands/ingest.rs or the sync output serialization path — search for IngestProjectResult -> JSON conversion)\n\n## Implementation\n\nPer-project status_enrichment object in robot sync JSON:\n{\n \"mode\": \"fetched\" | \"unsupported\" | \"skipped\",\n \"reason\": null | \"graphql_endpoint_missing\" | \"auth_forbidden\",\n \"seen\": N,\n \"enriched\": N,\n \"cleared\": N,\n \"without_widget\": N,\n \"partial_errors\": N,\n \"first_partial_error\": null | \"message\",\n \"error\": null | \"message\"\n}\n\nSource fields from IngestProjectResult:\n mode <- status_enrichment_mode\n reason <- status_unsupported_reason\n seen <- statuses_seen\n enriched <- statuses_enriched\n cleared <- statuses_cleared\n without_widget <- statuses_without_widget\n partial_errors <- partial_error_count\n first_partial_error <- first_partial_error\n error <- status_enrichment_error\n\nTop-level sync summary: add status_enrichment_errors: N (count of projects where error is Some)\n\nField semantics:\n mode \"fetched\": enrichment ran (even if 0 statuses or error occurred)\n mode \"unsupported\": 404/403 from GraphQL\n mode \"skipped\": config toggle off\n seen > 0 + enriched == 0: project has issues but none with status\n partial_errors > 0: some pages returned incomplete data\n\n## Acceptance Criteria\n- [ ] Robot sync JSON includes per-project status_enrichment object\n- [ ] All 9 fields present with correct types\n- [ ] mode reflects actual enrichment outcome (fetched/unsupported/skipped)\n- [ ] Top-level status_enrichment_errors count present\n- [ ] Test: full robot sync output validates structure\n\n## TDD Loop\nRED: test_robot_sync_includes_status_enrichment\nGREEN: Wire fields into JSON serialization\nVERIFY: cargo test robot_sync\n\n## Edge Cases\n- Find the exact location where IngestProjectResult is serialized to JSON — may be in a Serialize impl or manual json! macro\n- All numeric fields default to 0, all Option fields default to null in JSON\n- mode is always present (never null)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:42:29.127412Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.422233Z","closed_at":"2026-02-11T07:21:33.422193Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2sr2","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-11T06:42:29.130750Z","created_by":"tayloreernisse"},{"issue_id":"bd-2sr2","depends_on_id":"bd-3dum","type":"blocks","created_at":"2026-02-11T06:42:45.995816Z","created_by":"tayloreernisse"}]} {"id":"bd-2sx","title":"Implement lore embed CLI command","description":"## Background\nThe embed CLI command is the user-facing wrapper for the embedding pipeline. It runs Ollama health checks, selects documents to embed (pending or failed), shows progress, and reports results. This is the standalone command for building embeddings outside of the sync orchestrator.\n\n## Approach\nCreate `src/cli/commands/embed.rs` per PRD Section 4.4.\n\n**IMPORTANT: The embed command is async.** The underlying `embed_documents()` function is `async fn` (uses `FuturesUnordered` for concurrent HTTP to Ollama). The CLI runner must use tokio runtime.\n\n**Core function (async):**\n```rust\npub async fn run_embed(\n config: &Config,\n retry_failed: bool,\n) -> Result\n```\n\n**Pipeline:**\n1. Create OllamaClient from config.embedding (base_url, model, timeout_secs)\n2. Run `client.health_check().await` — fail early with clear error if Ollama unavailable or model missing\n3. Determine selection: `EmbedSelection::RetryFailed` if --retry-failed, else `EmbedSelection::Pending`\n4. Call `embed_documents(conn, &client, selection, concurrency, progress_callback).await`\n - `concurrency` param controls max in-flight HTTP requests to Ollama\n - `progress_callback` drives indicatif progress bar\n5. Show progress bar (indicatif) during embedding\n6. Return EmbedResult with counts\n\n**CLI args:**\n```rust\n#[derive(Args)]\npub struct EmbedArgs {\n #[arg(long)]\n retry_failed: bool,\n}\n```\n\n**Output:**\n- Human: \"Embedded 42 documents (15 chunks), 2 errors, 5 skipped (unchanged)\"\n- JSON: `{\"ok\": true, \"data\": {\"embedded\": 42, \"chunks\": 15, \"errors\": 2, \"skipped\": 5}}`\n\n**Tokio integration note:**\nThe embed command runs async code. Either:\n- Use `#[tokio::main]` on main and propagate async through CLI dispatch\n- Or use `tokio::runtime::Runtime::new()` in the embed command handler\n\n## Acceptance Criteria\n- [ ] Command is async (embed_documents is async, health_check is async)\n- [ ] OllamaClient created from config.embedding settings\n- [ ] Health check runs first — clear error if Ollama down (exit code 14)\n- [ ] Clear error if model not found: \"Pull the model: ollama pull nomic-embed-text\" (exit code 15)\n- [ ] Embeds pending documents (no existing embeddings or stale content_hash)\n- [ ] --retry-failed re-attempts documents with last_error\n- [ ] Progress bar shows during embedding (indicatif)\n- [ ] embed_documents called with concurrency parameter\n- [ ] embed_documents called with progress_callback for progress bar\n- [ ] Human + JSON output\n- [ ] `cargo build` succeeds\n\n## Files\n- `src/cli/commands/embed.rs` — new file\n- `src/cli/commands/mod.rs` — add `pub mod embed;`\n- `src/cli/mod.rs` — add EmbedArgs, wire up embed subcommand\n- `src/main.rs` — add embed command handler (async dispatch)\n\n## TDD Loop\nRED: Integration test needing Ollama\nGREEN: Implement run_embed (async)\nVERIFY: `cargo build && cargo test embed`\n\n## Edge Cases\n- No documents in DB: \"No documents to embed\" (not error)\n- All documents already embedded and unchanged: \"0 documents to embed (all up to date)\"\n- Ollama goes down mid-embedding: pipeline records errors for remaining docs, returns partial result\n- --retry-failed with no failed docs: \"No failed documents to retry\"","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:34.126482Z","created_by":"tayloreernisse","updated_at":"2026-01-30T18:02:38.633115Z","closed_at":"2026-01-30T18:02:38.633055Z","close_reason":"Embed CLI command fully wired: EmbedArgs, Commands::Embed variant, handle_embed handler, clean build, all tests pass","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2sx","depends_on_id":"bd-am7","type":"blocks","created_at":"2026-01-30T15:29:24.766104Z","created_by":"tayloreernisse"}]} +{"id":"bd-2tl5","title":"Implement activity feed UNION ALL assembly","description":"## Background\nThe activity feed combines 5 event sources (human comments, state events, label events, milestone events, assignment system notes) into a single chronologically sorted feed. This bead assembles the UNION ALL query, executes it, and returns the final `Vec`.\n\n## Approach\nAdd the top-level activity fetch function in `src/cli/commands/me/activity.rs` (same file as bd-b3r3 and bd-2nl3):\n\n```rust\nuse rusqlite::{Connection, params};\nuse crate::core::error::Result;\nuse super::types::MeActivityItem;\n// ProjectScope from bd-a7ba\n\npub fn fetch_my_activity(\n conn: &Connection,\n username: &str,\n scope: &ProjectScope,\n since_ms: i64,\n) -> Result>\n```\n\n### Assembly\n\nCombine the 5 SQL source fragments from bd-b3r3 and bd-2nl3:\n```sql\nSELECT * FROM (\n {notes_sql}\n UNION ALL\n {state_events_sql}\n UNION ALL\n {label_events_sql}\n UNION ALL\n {milestone_events_sql}\n UNION ALL\n {assignment_notes_sql}\n) ORDER BY timestamp DESC\n```\n\n### Row mapping\n\nMap each row to `MeActivityItem`:\n```rust\nMeActivityItem {\n timestamp_iso: ms_to_iso(row.get::<_, i64>(\"timestamp\")?),\n event_type: row.get(\"event_type\")?,\n entity_type: row.get(\"entity_type\")?,\n entity_iid: row.get(\"entity_iid\")?,\n project_path: row.get(\"project_path\")?,\n actor: row.get::<_, Option>(\"actor\")?.unwrap_or_default(),\n is_own: row.get::<_, i64>(\"is_own\")? != 0,\n summary: row.get(\"summary\")?,\n body_preview: row.get(\"body_preview\")?,\n}\n```\n\nUse `crate::core::time::ms_to_iso(ms)` to convert ms epoch → ISO string.\n\n### Parameter binding\n\nAll 5 subqueries share the same parameters: `?username`, `?since_ms`, and optionally `?project_id`. Use named parameters (`?1`, `?2`, etc.) or build the SQL with the same positional params repeated. Since rusqlite doesn't support named params in UNION ALL easily, the cleanest approach is to use `format!()` to build the SQL string, then bind shared params. Be careful with SQL injection — the `username` param must ALWAYS be bound, never interpolated.\n\n## Acceptance Criteria\n- [ ] Combines all 5 activity sources into one result set via UNION ALL\n- [ ] Sorted by timestamp DESC — newest first (AC-9.3)\n- [ ] is_own flag correctly set: `row[\"is_own\"] != 0` (SQLite returns 0/1 for boolean expressions)\n- [ ] Only includes activity on items the user is CURRENTLY associated with (AC-3.6)\n- [ ] Respects --since window (only activity within the window)\n- [ ] Respects ProjectScope filtering\n- [ ] Includes activity on closed items (AC-5.4)\n- [ ] timestamp converted to ISO string via `ms_to_iso()`\n- [ ] Returns `Vec` with all fields populated\n- [ ] actor NULL handled gracefully (COALESCE or unwrap_or_default)\n\n## Files\n- MODIFY: src/cli/commands/me/activity.rs (add top-level assembly function)\n\n## TDD Anchor\nRED: Write `test_activity_feed_combines_all_sources` using in-memory DB. Insert:\n- A project, issue, issue_assignee(username=\"jdoe\")\n- A discussion + human note (event_type=\"note\")\n- A resource_state_event (event_type=\"status\")\n- A system note with \"assigned to @jdoe\" (event_type=\"assign\")\nCall `fetch_my_activity`. Assert result contains all 3 event types, sorted newest first.\n\nGREEN: Implement the UNION ALL assembly.\nVERIFY: `cargo test activity_feed_combines`\n\nAdditional tests:\n- test_activity_feed_sorted_newest_first (3 events at different timestamps, verify order)\n- test_activity_feed_respects_since (event before since_ms excluded)\n- test_activity_feed_only_current_associations (event on unassigned issue excluded)\n- test_activity_feed_includes_closed_items (closed issue with assignee still shows activity)\n- test_activity_feed_is_own_flag (actor=me → is_own=true, actor=other → is_own=false)\n\n## Edge Cases\n- Large activity feeds: the UNION ALL has no LIMIT — returns everything within the since window. SQLite handles this fine for typical use.\n- No duplication between sources: notes source uses is_system=0, assignment source uses is_system=1, resource events are separate tables entirely — no overlap.\n- The \"current association\" filter is critical: if user was unassigned from an issue, activity on it should NOT appear even if it was recent.\n- Empty result (no activity) → return empty Vec, not an error (AC-10.3).\n- `actor_username` can be NULL on resource events — use `unwrap_or_default()` to get empty string.\n\n## Dependency Context\nCombines SQL fragments from bd-b3r3 (note + resource event queries) and bd-2nl3 (assignment detection).\nUses `MeActivityItem` from bd-3bwh, `ProjectScope` from bd-a7ba, `ms_to_iso` from `crate::core::time`.\nCalled by the handler in bd-1vv8.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:38:33.907533Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.057699Z","closed_at":"2026-02-20T16:09:13.057656Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-2tl5","depends_on_id":"bd-2nl3","type":"blocks","created_at":"2026-02-19T19:41:18.365868Z","created_by":"tayloreernisse"},{"issue_id":"bd-2tl5","depends_on_id":"bd-b3r3","type":"blocks","created_at":"2026-02-19T19:41:18.288289Z","created_by":"tayloreernisse"}]} {"id":"bd-2tr4","title":"Epic: TUI Phase 1 — Foundation","description":"## Background\nPhase 1 builds the foundational infrastructure that all screens depend on: the full LoreApp Model implementation with key dispatch, navigation stack, task supervisor for async work management, theme configuration, common widgets, and the state/action architecture. Phase 1 deliverables are the skeleton that Phase 2 screens plug into.\n\n## Acceptance Criteria\n- [ ] LoreApp update() dispatches all Msg variants through 5-stage key pipeline\n- [ ] NavigationStack supports push/pop/forward/jump with state preservation\n- [ ] TaskSupervisor manages background tasks with dedup, cancellation, and generation IDs\n- [ ] Theme renders correctly with adaptive light/dark colors\n- [ ] Status bar, breadcrumb, loading, error toast, and help overlay widgets render\n- [ ] CommandRegistry is the single source of truth for keybindings/help/palette\n- [ ] AppState composition with per-screen states and LoadState map\n\n## Scope\nBlocked by Phase 0 (Toolchain Gate). Blocks Phase 2 (Core Screens).","status":"open","priority":1,"issue_type":"epic","created_at":"2026-02-12T16:55:02.650495Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:51.059729Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-2tr4","depends_on_id":"bd-1cj0","type":"blocks","created_at":"2026-02-12T18:11:51.059704Z","created_by":"tayloreernisse"}]} {"id":"bd-2ug","title":"[CP1] gi ingest --type=issues command","description":"CLI command to orchestrate issue ingestion.\n\n## Module\nsrc/cli/commands/ingest.rs\n\n## Clap Definition\n#[derive(Subcommand)]\npub enum Commands {\n Ingest {\n #[arg(long, value_parser = [\"issues\", \"merge_requests\"])]\n r#type: String,\n \n #[arg(long)]\n project: Option,\n \n #[arg(long)]\n force: bool,\n },\n}\n\n## Implementation\n1. Acquire app lock with heartbeat (respect --force for stale lock)\n2. Create sync_run record (status='running')\n3. For each configured project (or filtered --project):\n - Call orchestrator to ingest issues and discussions\n - Show progress (spinner or progress bar)\n4. Update sync_run (status='succeeded', metrics_json with counts)\n5. Release lock\n\n## Output Format\nIngesting issues...\n\n group/project-one: 1,234 issues fetched, 45 new labels\n\nFetching discussions (312 issues with updates)...\n\n group/project-one: 312 issues → 1,234 discussions, 5,678 notes\n\nTotal: 1,234 issues, 1,234 discussions, 5,678 notes (excluding 1,234 system notes)\nSkipped discussion sync for 922 unchanged issues.\n\n## Error Handling\n- Lock acquisition failure: exit with DatabaseLockError message\n- Network errors: show GitLabNetworkError, exit non-zero\n- Rate limiting: respect backoff, show progress\n\nFiles: src/cli/commands/ingest.rs, src/cli/commands/mod.rs\nTests: tests/integration/sync_runs_tests.rs\nDone when: Full issue + discussion ingestion works end-to-end","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T16:57:58.552504Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.875613Z","deleted_at":"2026-01-25T17:02:01.875607Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-2um","title":"[CP1] Epic: Issue Ingestion","description":"Ingest all issues, labels, and issue discussions from configured GitLab repositories with resumable cursor-based incremental sync. This checkpoint establishes the core data ingestion pattern that will be reused for MRs in Checkpoint 2.\n\n## Success Criteria\n- gi ingest --type=issues fetches all issues (count matches GitLab UI)\n- Labels extracted from issue payloads (name-only)\n- Label linkage reflects current GitLab state (removed labels unlinked on re-sync)\n- Issue discussions fetched per-issue (dependent sync)\n- Cursor-based sync is resumable (re-running fetches 0 new items)\n- Discussion sync skips unchanged issues (per-issue watermark)\n- Sync tracking records all runs (sync_runs table)\n- Single-flight lock prevents concurrent runs\n\n## Internal Gates\n- **Gate A**: Issues only - cursor + upsert + raw payloads + list/count/show working\n- **Gate B**: Labels correct - stale-link removal verified; label count matches GitLab\n- **Gate C**: Dependent discussion sync - watermark prevents redundant refetch; concurrency bounded\n- **Gate D**: Resumability proof - kill mid-run, rerun; bounded redo and no redundant discussion refetch\n\n## Reference\ndocs/prd/checkpoint-1.md","status":"closed","priority":1,"issue_type":"epic","created_at":"2026-01-25T17:02:38.075224Z","created_by":"tayloreernisse","updated_at":"2026-01-25T23:27:15.347364Z","closed_at":"2026-01-25T23:27:15.347317Z","close_reason":"CP1 Issue Ingestion complete: all sub-tasks done, 71 tests pass, CLI commands working","compaction_level":0,"original_size":0} @@ -181,6 +193,7 @@ {"id":"bd-31i","title":"Epic: CP2 Gate B - Labels + Assignees + Reviewers","description":"## Background\nGate B validates junction tables for labels, assignees, and reviewers. Ensures relationships are tracked correctly and stale links are removed on resync. This is critical for filtering (`--reviewer=alice`) and display.\n\n## Acceptance Criteria (Pass/Fail)\n- [ ] `mr_labels` table has rows for MRs with labels\n- [ ] Label count per MR matches GitLab UI (spot check 3 MRs)\n- [ ] `mr_assignees` table has rows for MRs with assignees\n- [ ] Assignee usernames match GitLab UI (spot check 3 MRs)\n- [ ] `mr_reviewers` table has rows for MRs with reviewers\n- [ ] Reviewer usernames match GitLab UI (spot check 3 MRs)\n- [ ] Remove label in GitLab -> resync -> link removed from mr_labels\n- [ ] Add reviewer in GitLab -> resync -> link added to mr_reviewers\n- [ ] `gi list mrs --label=bugfix` filters correctly\n- [ ] `gi list mrs --reviewer=alice` filters correctly\n\n## Validation Script\n```bash\n#!/bin/bash\nset -e\n\nDB_PATH=\"${XDG_DATA_HOME:-$HOME/.local/share}/gitlab-inbox/db.sqlite3\"\n\necho \"=== Gate B: Labels + Assignees + Reviewers ===\"\n\n# 1. Check label linkage exists\necho \"Step 1: Check label linkage...\"\nLABEL_LINKS=$(sqlite3 \"$DB_PATH\" \"SELECT COUNT(*) FROM mr_labels;\")\necho \" Total label links: $LABEL_LINKS\"\n\n# 2. Show sample label linkage\necho \"Step 2: Sample label linkage...\"\nsqlite3 \"$DB_PATH\" \"\n SELECT m.iid, GROUP_CONCAT(l.name, ', ') as labels\n FROM merge_requests m\n JOIN mr_labels ml ON ml.merge_request_id = m.id\n JOIN labels l ON l.id = ml.label_id\n GROUP BY m.id\n LIMIT 5;\n\"\n\n# 3. Check assignee linkage\necho \"Step 3: Check assignee linkage...\"\nASSIGNEE_LINKS=$(sqlite3 \"$DB_PATH\" \"SELECT COUNT(*) FROM mr_assignees;\")\necho \" Total assignee links: $ASSIGNEE_LINKS\"\n\n# 4. Show sample assignee linkage\necho \"Step 4: Sample assignee linkage...\"\nsqlite3 \"$DB_PATH\" \"\n SELECT m.iid, GROUP_CONCAT(a.username, ', ') as assignees\n FROM merge_requests m\n JOIN mr_assignees a ON a.merge_request_id = m.id\n GROUP BY m.id\n LIMIT 5;\n\"\n\n# 5. Check reviewer linkage\necho \"Step 5: Check reviewer linkage...\"\nREVIEWER_LINKS=$(sqlite3 \"$DB_PATH\" \"SELECT COUNT(*) FROM mr_reviewers;\")\necho \" Total reviewer links: $REVIEWER_LINKS\"\n\n# 6. Show sample reviewer linkage\necho \"Step 6: Sample reviewer linkage...\"\nsqlite3 \"$DB_PATH\" \"\n SELECT m.iid, GROUP_CONCAT(r.username, ', ') as reviewers\n FROM merge_requests m\n JOIN mr_reviewers r ON r.merge_request_id = m.id\n GROUP BY m.id\n LIMIT 5;\n\"\n\n# 7. Test filter commands\necho \"Step 7: Test filter commands...\"\n# Get a label that exists\nLABEL=$(sqlite3 \"$DB_PATH\" \"SELECT name FROM labels LIMIT 1;\")\nif [ -n \"$LABEL\" ]; then\n echo \" Testing --label=$LABEL\"\n gi list mrs --label=\"$LABEL\" --limit=3\nfi\n\n# Get a reviewer that exists\nREVIEWER=$(sqlite3 \"$DB_PATH\" \"SELECT username FROM mr_reviewers LIMIT 1;\")\nif [ -n \"$REVIEWER\" ]; then\n echo \" Testing --reviewer=$REVIEWER\"\n gi list mrs --reviewer=\"$REVIEWER\" --limit=3\nfi\n\necho \"\"\necho \"=== Gate B: PASSED ===\"\n```\n\n## Stale Link Removal Test (Manual)\n```bash\n# 1. Pick an MR with labels in GitLab UI\nMR_IID=123\n\n# 2. Note current label count\nsqlite3 \"$DB_PATH\" \"\n SELECT COUNT(*) FROM mr_labels ml\n JOIN merge_requests m ON m.id = ml.merge_request_id\n WHERE m.iid = $MR_IID;\n\"\n# Example: 3 labels\n\n# 3. Remove a label in GitLab UI (manually)\n\n# 4. Resync\ngi ingest --type=merge_requests\n\n# 5. Verify label removed\nsqlite3 \"$DB_PATH\" \"\n SELECT COUNT(*) FROM mr_labels ml\n JOIN merge_requests m ON m.id = ml.merge_request_id\n WHERE m.iid = $MR_IID;\n\"\n# Should be: 2 labels (one less)\n```\n\n## Test Commands (Quick Verification)\n```bash\n# Check counts:\nsqlite3 ~/.local/share/gitlab-inbox/db.sqlite3 \"\n SELECT \n (SELECT COUNT(*) FROM mr_labels) as label_links,\n (SELECT COUNT(*) FROM mr_assignees) as assignee_links,\n (SELECT COUNT(*) FROM mr_reviewers) as reviewer_links;\n\"\n\n# Test filtering:\ngi list mrs --label=enhancement --limit=5\ngi list mrs --reviewer=alice --limit=5\ngi list mrs --assignee=bob --limit=5\n```\n\n## Dependencies\nThis gate requires:\n- bd-ser (MR ingestion with label/assignee/reviewer linking via clear-and-relink pattern)\n- Gate A must pass first\n\n## Edge Cases\n- MRs with no labels/assignees/reviewers: junction tables should have no rows for that MR\n- Labels shared across issues and MRs: labels table is shared, only junction differs\n- Usernames are case-sensitive: `Alice` != `alice`","status":"closed","priority":3,"issue_type":"task","created_at":"2026-01-26T22:06:01.292318Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:48:21.059422Z","closed_at":"2026-01-27T00:48:21.059378Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-31i","depends_on_id":"bd-ser","type":"blocks","created_at":"2026-01-26T22:08:55.684769Z","created_by":"tayloreernisse"}]} {"id":"bd-31m","title":"[CP1] Test fixtures for mocked GitLab responses","description":"Create mock response files for integration tests.\n\nFixtures to create:\n- gitlab-issue.json (single issue with labels)\n- gitlab-issues-page.json (paginated list)\n- gitlab-discussion.json (single discussion with notes)\n- gitlab-discussions-page.json (paginated list)\n\nInclude edge cases:\n- Issue with labels_details\n- Issue with no labels\n- Discussion with individual_note=true\n- System notes with system=true\n\nFiles: tests/fixtures/mock-responses/gitlab-issue*.json, gitlab-discussion*.json\nDone when: MSW handlers can use fixtures for deterministic tests","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T15:20:43.781288Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.155480Z","deleted_at":"2026-01-25T15:21:35.155478Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-327","title":"[CP0] Project scaffold","description":"## Background\n\nThis is the foundational scaffold for the GitLab Inbox CLI tool. Every subsequent bead depends on having the correct project structure, TypeScript configuration, and tooling in place. The configuration choices here (ESM modules, strict TypeScript, Vitest for testing) set constraints for all future code.\n\n## Approach\n\nCreate a Node.js 20+ ESM project with TypeScript strict mode. Use flat ESLint config (v9+) with TypeScript plugin. Configure Vitest with coverage. Create the directory structure matching the PRD exactly.\n\n**package.json essentials:**\n- `\"type\": \"module\"` for ESM\n- `\"bin\": { \"gi\": \"./dist/cli/index.js\" }` for CLI entry point\n- Runtime deps: better-sqlite3, sqlite-vec, commander, zod, pino, pino-pretty, ora, chalk, cli-table3, inquirer\n- Dev deps: typescript, @types/better-sqlite3, @types/node, vitest, msw, eslint, @typescript-eslint/*\n\n**tsconfig.json:**\n- `target: ES2022`, `module: Node16`, `moduleResolution: Node16`\n- `strict: true`, `noImplicitAny: true`, `strictNullChecks: true`\n- `outDir: ./dist`, `rootDir: ./src`\n\n**vitest.config.ts:**\n- Exclude `tests/live/**` unless `GITLAB_LIVE_TESTS=1`\n- Coverage with v8 provider\n\n## Acceptance Criteria\n\n- [ ] `npm install` completes without errors\n- [ ] `npm run build` compiles TypeScript to dist/\n- [ ] `npm run test` runs vitest (0 tests is fine at this stage)\n- [ ] `npm run lint` runs ESLint with no config errors\n- [ ] All directories exist: src/cli/commands/, src/core/, src/gitlab/, src/types/, tests/unit/, tests/integration/, tests/live/, tests/fixtures/mock-responses/, migrations/\n\n## Files\n\nCREATE:\n- package.json\n- tsconfig.json\n- vitest.config.ts\n- eslint.config.js\n- .gitignore\n- src/cli/index.ts (empty placeholder with shebang)\n- src/cli/commands/.gitkeep\n- src/core/.gitkeep\n- src/gitlab/.gitkeep\n- src/types/index.ts (empty)\n- tests/unit/.gitkeep\n- tests/integration/.gitkeep\n- tests/live/.gitkeep\n- tests/fixtures/mock-responses/.gitkeep\n- migrations/.gitkeep\n\n## TDD Loop\n\nN/A - scaffold only. Verify with:\n\n```bash\nnpm install\nnpm run build\nnpm run lint\nnpm run test\n```\n\n## Edge Cases\n\n- Node.js version < 20 will fail on ESM features - add `engines` field\n- better-sqlite3 requires native compilation - may need python/build-essential\n- sqlite-vec installation can fail on some platforms - document fallback","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:47.955044Z","created_by":"tayloreernisse","updated_at":"2026-01-25T02:51:25.347932Z","closed_at":"2026-01-25T02:51:25.347799Z","compaction_level":0,"original_size":0} +{"id":"bd-32aw","title":"Implement error handling paths for me command","description":"## Background\n`lore me` has several expected user-facing failure modes that are usage/data conditions, not internal faults. This bead ensures those paths return correct exit codes and structured error output in both human and robot modes.\n\nThis spec assumes runtime usage errors are represented by `LoreError::UsageError` mapped to `ErrorCode::UsageError` (exit code 2, robot code `USAGE_ERROR`) from `bd-1f1f`.\n\n## Approach\nValidate and harden these paths in the handler and shared error formatting:\n\n1. **No username configured**\n- source: username resolver\n- error: `LoreError::UsageError`\n- exit: `2`\n- message includes: configure `gitlab.username` or pass `--user`\n\n2. **No synced data**\n- source: `projects` table count is zero\n- error: `LoreError::NotFound(\"No synced data. Run 'lore sync' first.\")`\n- exit: `17`\n\n3. **Invalid scope flags (`--project` + `--all`)**\n- source: scope resolver\n- error: `LoreError::UsageError`\n- exit: `2`\n\n4. **Invalid `--since` value**\n- source: parse failure in handler\n- error: `LoreError::UsageError`\n- exit: `2`\n\n5. **Empty results**\n- not an error\n- render zero-state dashboard normally\n\nRobot mode must emit error envelope on stderr with deterministic code/message/suggestion, including `USAGE_ERROR` for usage failures.\n\n## Acceptance Criteria\n- [ ] No username path returns exit 2 with actionable usage message\n- [ ] `--project` + `--all` returns exit 2\n- [ ] Invalid `--since` returns exit 2\n- [ ] No synced data returns exit 17 and sync suggestion\n- [ ] Empty results render dashboard (not error)\n- [ ] Robot stderr envelope includes correct `error.code` (`USAGE_ERROR` or `NOT_FOUND`)\n- [ ] Human-mode stderr is readable and actionable\n- [ ] No usage-path error falls through to internal error exit 1\n\n## Files\n- MODIFY: `src/cli/commands/me/mod.rs`\n- MODIFY: `src/core/error.rs`\n- MODIFY: `src/main.rs` (if command-level error routing needs explicit usage handling)\n- MODIFY: `src/cli/robot.rs` (only if code mapping/suggestion formatting requires extension)\n\n## TDD Anchor\nRED:\n- `test_no_username_exits_2`\n- `test_project_all_conflict_exits_2`\n- `test_invalid_since_exits_2`\n- `test_no_synced_data_exits_17`\n- `test_empty_results_not_error`\n- `test_robot_usage_error_envelope`\n\nGREEN:\n- Wire usage/data errors to explicit `LoreError` variants.\n- Ensure formatter emits consistent code and suggestion.\n\nVERIFY:\n- `cargo test me_error_paths`\n\n## Edge Cases\n- A DB with projects but no matching items is not \"no synced data\"; it is a valid empty dashboard.\n- Error messages should not leak SQL internals to end users.\n\n## Dependency Context\nConsumes handler wiring from `bd-1vv8` and error type from `bd-1f1f`.\nFinalizes AC-10 compliance for `lore me`.\n\nDependencies:\n -> bd-1vv8 (blocks) - Implement me command handler: wire queries to renderers","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:40:59.654948Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.066507Z","closed_at":"2026-02-20T16:09:13.066472Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-32aw","depends_on_id":"bd-1vv8","type":"blocks","created_at":"2026-02-19T19:41:28.811394Z","created_by":"tayloreernisse"}]} {"id":"bd-32mc","title":"OBSERV: Implement log retention cleanup at startup","description":"## Background\nLog files accumulate at ~1-10 MB/day. Without cleanup, they grow unbounded. Retention runs BEFORE subscriber init so deleted file handles aren't held open by the appender.\n\n## Approach\nAdd a cleanup function, called from main.rs before the subscriber is initialized (before current line 44):\n\n```rust\n/// Delete log files older than retention_days.\n/// Matches files named lore.YYYY-MM-DD.log in the log directory.\npub fn cleanup_old_logs(log_dir: &Path, retention_days: u32) -> std::io::Result {\n if retention_days == 0 {\n return Ok(0); // 0 means file logging disabled, don't delete\n }\n let cutoff = SystemTime::now() - Duration::from_secs(u64::from(retention_days) * 86400);\n let mut deleted = 0;\n\n for entry in std::fs::read_dir(log_dir)? {\n let entry = entry?;\n let name = entry.file_name();\n let name_str = name.to_string_lossy();\n\n // Only match lore.YYYY-MM-DD.log pattern\n if !name_str.starts_with(\"lore.\") || !name_str.ends_with(\".log\") {\n continue;\n }\n\n if let Ok(metadata) = entry.metadata() {\n if let Ok(modified) = metadata.modified() {\n if modified < cutoff {\n std::fs::remove_file(entry.path())?;\n deleted += 1;\n }\n }\n }\n }\n Ok(deleted)\n}\n```\n\nPlace this function in src/core/paths.rs (next to get_log_dir) or a new src/core/log_retention.rs. Prefer paths.rs since it's small and related.\n\nCall from main.rs:\n```rust\nlet log_dir = get_log_dir(config.logging.log_dir.as_deref());\nlet _ = cleanup_old_logs(&log_dir, config.logging.retention_days);\n// THEN init subscriber\n```\n\nNote: Config must be loaded before cleanup runs. Current main.rs parses Cli at line 60, but config loading happens inside command handlers. This means we need to either:\n A) Load config early in main() before subscriber init (preferred)\n B) Defer cleanup to after config load\n\nSince the subscriber must also know log_dir, approach A is natural: load config -> cleanup -> init subscriber -> dispatch command.\n\n## Acceptance Criteria\n- [ ] Files matching lore.*.log older than retention_days are deleted\n- [ ] Files matching lore.*.log within retention_days are preserved\n- [ ] Non-matching files (e.g., other.txt) are never deleted\n- [ ] retention_days=0 skips cleanup entirely (no files deleted)\n- [ ] Errors on individual files don't prevent cleanup of remaining files\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/core/paths.rs (add cleanup_old_logs function)\n- src/main.rs (call cleanup before subscriber init)\n\n## TDD Loop\nRED:\n - test_log_retention_cleanup: create tempdir with lore.2026-01-01.log through lore.2026-02-04.log, run with retention_days=7, assert old deleted, recent preserved\n - test_log_retention_ignores_non_log_files: create other.txt alongside old log files, assert other.txt untouched\n - test_log_retention_zero_days: retention_days=0, assert nothing deleted\nGREEN: Implement cleanup_old_logs\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- SystemTime::now() precision varies by OS; use file modified time, not name parsing (simpler and more reliable)\n- read_dir on non-existent directory: get_log_dir creates it first, so this shouldn't happen. But handle gracefully.\n- Permissions error on individual file: log a warning, continue with remaining files (don't propagate)\n- Race condition: another process creates a file during cleanup. Not a concern -- we only delete old files.","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:53:55.627901Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:15:04.452086Z","closed_at":"2026-02-04T17:15:04.452039Z","close_reason":"Implemented cleanup_old_logs() with date-pattern matching and retention_days config, runs at startup before subscriber init","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-32mc","depends_on_id":"bd-17n","type":"blocks","created_at":"2026-02-04T15:55:19.523048Z","created_by":"tayloreernisse"},{"issue_id":"bd-32mc","depends_on_id":"bd-1k4","type":"blocks","created_at":"2026-02-04T15:55:19.583155Z","created_by":"tayloreernisse"},{"issue_id":"bd-32mc","depends_on_id":"bd-2nx","type":"parent-child","created_at":"2026-02-04T15:53:55.628795Z","created_by":"tayloreernisse"}]} {"id":"bd-32q","title":"Implement timeline seed phase: FTS5 keyword search to entity IDs","description":"## Background\n\nThe seed phase is steps 1-2 of the timeline pipeline (spec Section 3.2): SEED + HYDRATE. It converts a keyword query into entity IDs via FTS5 search and collects evidence note candidates.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 3.2 steps 1-2.\n\n## Codebase Context\n\n- FTS5 index exists: documents_fts table (migration 008)\n- documents table: id, source_type ('issue'|'merge_request'|'discussion'), source_id, project_id, created_at, content\n- discussions table: id, issue_id, merge_request_id\n- notes table: discussion_id, author_username, body, created_at, is_system, id (note_id)\n- Safe FTS query builder: src/search/fts.rs has to_fts_query(raw, FtsQueryMode::Safe) for sanitizing user input\n- projects table: path_with_namespace\n- issues/merge_requests: iid, project_id\n\n## Approach\n\nCreate `src/core/timeline_seed.rs`:\n\n```rust\nuse crate::core::timeline::{EntityRef, TimelineEvent, TimelineEventType};\nuse rusqlite::Connection;\n\npub struct SeedResult {\n pub seed_entities: Vec,\n pub evidence_notes: Vec, // NoteEvidence events\n}\n\npub fn seed_timeline(\n conn: &Connection,\n query: &str,\n project_id: Option,\n since_ms: Option,\n max_seeds: usize, // default 50\n) -> Result { ... }\n```\n\n### SQL for SEED + HYDRATE (entity discovery):\n```sql\nSELECT DISTINCT d.source_type, d.source_id, d.project_id,\n CASE d.source_type\n WHEN 'issue' THEN (SELECT iid FROM issues WHERE id = d.source_id)\n WHEN 'merge_request' THEN (SELECT iid FROM merge_requests WHERE id = d.source_id)\n WHEN 'discussion' THEN NULL -- discussions map to parent entity below\n END AS iid,\n CASE d.source_type\n WHEN 'issue' THEN (SELECT p.path_with_namespace FROM projects p JOIN issues i ON i.project_id = p.id WHERE i.id = d.source_id)\n WHEN 'merge_request' THEN (SELECT p.path_with_namespace FROM projects p JOIN merge_requests m ON m.project_id = p.id WHERE m.id = d.source_id)\n WHEN 'discussion' THEN NULL\n END AS project_path\nFROM documents_fts fts\nJOIN documents d ON d.id = fts.rowid\nWHERE documents_fts MATCH ?1\n AND (?2 IS NULL OR d.project_id = ?2)\nORDER BY rank\nLIMIT ?3\n```\n\nFor 'discussion' source_type: resolve to parent entity via discussions.issue_id or discussions.merge_request_id.\n\n### SQL for evidence notes (top 10 FTS5-matched notes):\n```sql\nSELECT n.id as note_id, n.body, n.created_at, n.author_username,\n disc.id as discussion_id,\n CASE WHEN disc.issue_id IS NOT NULL THEN 'issue' ELSE 'merge_request' END as parent_type,\n COALESCE(disc.issue_id, disc.merge_request_id) AS parent_entity_id\nFROM documents_fts fts\nJOIN documents d ON d.id = fts.rowid\nJOIN discussions disc ON disc.id = d.source_id AND d.source_type = 'discussion'\nJOIN notes n ON n.discussion_id = disc.id AND n.is_system = 0\nWHERE documents_fts MATCH ?1\nORDER BY rank\nLIMIT 10\n```\n\nEvidence notes become TimelineEvent with:\n- event_type: NoteEvidence { note_id, snippet (first 200 chars), discussion_id }\n- Use to_fts_query(query, FtsQueryMode::Safe) to sanitize user input before MATCH\n\nRegister in `src/core/mod.rs`: `pub mod timeline_seed;`\n\n## Acceptance Criteria\n\n- [ ] seed_timeline() returns entities from FTS5 search\n- [ ] Entities deduplicated (same entity from multiple docs appears once)\n- [ ] Discussion documents resolved to parent entity (issue or MR)\n- [ ] Evidence notes capped at 10\n- [ ] Evidence note snippets truncated to 200 chars (safe UTF-8 boundary)\n- [ ] Uses to_fts_query(query, FtsQueryMode::Safe) for input sanitization\n- [ ] --since filter works\n- [ ] -p filter works\n- [ ] Empty result for zero-match queries (not error)\n- [ ] Module registered in src/core/mod.rs\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n\n## Files\n\n- `src/core/timeline_seed.rs` (NEW)\n- `src/core/mod.rs` (add `pub mod timeline_seed;`)\n\n## TDD Loop\n\nRED:\n- `test_seed_deduplicates_entities`\n- `test_seed_resolves_discussion_to_parent`\n- `test_seed_empty_query_returns_empty`\n- `test_seed_evidence_capped_at_10`\n- `test_seed_evidence_snippet_truncated`\n- `test_seed_respects_since_filter`\n\nTests need in-memory DB with migrations 001-014 + documents/FTS test data.\n\nGREEN: Implement FTS5 queries and deduplication.\n\nVERIFY: `cargo test --lib -- timeline_seed`\n\n## Edge Cases\n\n- FTS5 MATCH invalid syntax: to_fts_query(query, FtsQueryMode::Safe) sanitizes\n- Discussion orphans: LEFT JOIN handles deleted notes\n- UTF-8 truncation: use char_indices() to find safe 200-char boundary\n- Discussion source resolving to both issue_id and merge_request_id: prefer issue_id (shouldn't happen but be defensive)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:33:08.615908Z","created_by":"tayloreernisse","updated_at":"2026-02-05T21:47:07.966488Z","closed_at":"2026-02-05T21:47:07.966437Z","close_reason":"Completed: Created src/core/timeline_seed.rs with seed_timeline() function. FTS5 search to entity IDs with discussion-to-parent resolution, entity deduplication, evidence note extraction (capped, snippet-truncated). 12 tests pass. All quality gates pass.","compaction_level":0,"original_size":0,"labels":["gate-3","phase-b","query"],"dependencies":[{"issue_id":"bd-32q","depends_on_id":"bd-20e","type":"blocks","created_at":"2026-02-02T21:33:37.368005Z","created_by":"tayloreernisse"},{"issue_id":"bd-32q","depends_on_id":"bd-ike","type":"parent-child","created_at":"2026-02-02T21:33:08.617483Z","created_by":"tayloreernisse"}]} {"id":"bd-335","title":"Implement Ollama API client","description":"## Background\nThe Ollama API client provides the HTTP interface to the local Ollama embedding server. It handles health checks (is Ollama running? does the model exist?), batch embedding requests (up to 32 texts per call), and error translation to LoreError variants. This is the lowest-level embedding component — the pipeline (bd-am7) builds on top of it.\n\n## Approach\nCreate \\`src/embedding/ollama.rs\\` per PRD Section 4.2. **Uses async reqwest (not blocking).**\n\n```rust\nuse reqwest::Client; // NOTE: async Client, not reqwest::blocking\nuse serde::{Deserialize, Serialize};\nuse crate::core::error::{LoreError, Result};\n\npub struct OllamaConfig {\n pub base_url: String, // default \\\"http://localhost:11434\\\"\n pub model: String, // default \\\"nomic-embed-text\\\"\n pub timeout_secs: u64, // default 60\n}\n\nimpl Default for OllamaConfig { /* PRD defaults */ }\n\npub struct OllamaClient {\n client: Client, // async reqwest::Client\n config: OllamaConfig,\n}\n\n#[derive(Serialize)]\nstruct EmbedRequest { model: String, input: Vec }\n\n#[derive(Deserialize)]\nstruct EmbedResponse { model: String, embeddings: Vec> }\n\n#[derive(Deserialize)]\nstruct TagsResponse { models: Vec }\n\n#[derive(Deserialize)]\nstruct ModelInfo { name: String }\n\nimpl OllamaClient {\n pub fn new(config: OllamaConfig) -> Self;\n\n /// Async health check: GET /api/tags\n /// Model matched via starts_with (\\\"nomic-embed-text\\\" matches \\\"nomic-embed-text:latest\\\")\n pub async fn health_check(&self) -> Result<()>;\n\n /// Async batch embedding: POST /api/embed\n /// Input: Vec of texts, Response: Vec> of 768-dim embeddings\n pub async fn embed_batch(&self, texts: Vec) -> Result>>;\n}\n\n/// Quick health check without full client (async).\npub async fn check_ollama_health(base_url: &str) -> bool;\n```\n\n**Error mapping (per PRD):**\n- Connection refused/timeout -> LoreError::OllamaUnavailable { base_url, source: Some(e) }\n- Model not in /api/tags -> LoreError::OllamaModelNotFound { model }\n- Non-200 from /api/embed -> LoreError::EmbeddingFailed { document_id: 0, reason: format!(\\\"HTTP {}: {}\\\", status, body) }\n\n**Key PRD detail:** Model matching uses \\`starts_with\\` (not exact match) so \\\"nomic-embed-text\\\" matches \\\"nomic-embed-text:latest\\\".\n\n## Acceptance Criteria\n- [ ] Uses async reqwest::Client (not blocking)\n- [ ] health_check() is async, detects server availability and model presence\n- [ ] Model matched via starts_with (handles \\\":latest\\\" suffix)\n- [ ] embed_batch() is async, sends POST /api/embed\n- [ ] Batch size up to 32 texts\n- [ ] Returns Vec> with 768 dimensions each\n- [ ] OllamaUnavailable error includes base_url and source error\n- [ ] OllamaModelNotFound error includes model name\n- [ ] Non-200 response mapped to EmbeddingFailed with status + body\n- [ ] Timeout: 60 seconds default (configurable via OllamaConfig)\n- [ ] \\`cargo build\\` succeeds\n\n## Files\n- \\`src/embedding/ollama.rs\\` — new file\n- \\`src/embedding/mod.rs\\` — add \\`pub mod ollama;\\` and re-exports\n\n## TDD Loop\nRED: Tests (unit tests with mock, integration needs Ollama):\n- \\`test_config_defaults\\` — verify default base_url, model, timeout\n- \\`test_health_check_model_starts_with\\` — \\\"nomic-embed-text\\\" matches \\\"nomic-embed-text:latest\\\"\n- \\`test_embed_batch_parse\\` — mock response parsed correctly\n- \\`test_connection_error_maps_to_ollama_unavailable\\`\nGREEN: Implement OllamaClient\nVERIFY: \\`cargo test ollama\\`\n\n## Edge Cases\n- Ollama returns model name with version tag (\\\"nomic-embed-text:latest\\\"): starts_with handles this\n- Empty texts array: send empty batch, Ollama returns empty embeddings\n- Ollama returns wrong number of embeddings (2 texts, 1 embedding): caller (pipeline) validates\n- Non-JSON response: reqwest deserialization error -> wrap appropriately","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:34.025099Z","created_by":"tayloreernisse","updated_at":"2026-01-30T16:58:17.546852Z","closed_at":"2026-01-30T16:58:17.546794Z","close_reason":"Completed: OllamaClient with async health_check (starts_with model matching), embed_batch, error mapping to LoreError variants, check_ollama_health helper, 4 tests pass","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-335","depends_on_id":"bd-ljf","type":"blocks","created_at":"2026-01-30T15:29:24.627951Z","created_by":"tayloreernisse"}]} @@ -204,6 +217,7 @@ {"id":"bd-3bec","title":"Wire surgical dispatch in run_sync and update robot-docs","description":"## Background\n\nThe existing `run_sync` function (lines 63-360 of `src/cli/commands/sync.rs`) handles the normal full-sync pipeline. Once `run_sync_surgical` (bd-1i4i) is implemented, this bead wires the dispatch: when `SyncOptions` contains issue or MR IIDs, route to the surgical path instead of the normal path. This also requires updating `handle_sync_cmd` (line 2120 of `src/main.rs`) to pass through the new CLI fields (bd-1lja), and updating the robot-docs schema to document the new surgical response fields.\n\n## Approach\n\nThree changes:\n\n**1. Dispatch in `run_sync` (src/cli/commands/sync.rs)**\n\nAdd an early check at the top of `run_sync` (after line 68):\n\n```rust\npub async fn run_sync(\n config: &Config,\n options: SyncOptions,\n run_id: Option<&str>,\n signal: &ShutdownSignal,\n) -> Result {\n // Surgical dispatch: if any IIDs specified, route to surgical pipeline\n if options.is_surgical() {\n return run_sync_surgical(config, options, run_id, signal).await;\n }\n\n // ... existing normal sync pipeline unchanged ...\n}\n```\n\n**2. Update `handle_sync_cmd` (src/main.rs line 2120)**\n\nPass new fields from `SyncArgs` into `SyncOptions`:\n\n```rust\nlet options = SyncOptions {\n full: args.full && !args.no_full,\n force: args.force && !args.no_force,\n no_embed: args.no_embed,\n no_docs: args.no_docs,\n no_events: args.no_events,\n robot_mode,\n dry_run,\n // New surgical fields (from bd-1lja)\n issue_iids: args.issue.clone(),\n mr_iids: args.mr.clone(),\n project: args.project.clone(),\n preflight_only: args.preflight_only,\n};\n```\n\nAlso: when surgical mode is detected (issues/MRs non-empty), skip the normal SyncRunRecorder setup in `handle_sync_cmd` since `run_sync_surgical` manages its own recorder.\n\n**3. Update robot-docs (src/main.rs handle_robot_docs)**\n\nAdd documentation for the surgical sync response format. The robot-docs output should include:\n- New CLI flags: `--issue`, `--mr`, `-p`/`--project`, `--preflight-only`\n- Surgical response fields: `surgical_mode`, `surgical_iids`, `entity_results`, `preflight_only`\n- `EntitySyncResult` schema: `entity_type`, `iid`, `outcome`, `error`, `toctou_reason`\n- Exit codes for surgical-specific errors\n\n## Acceptance Criteria\n\n1. `lore sync --issue 7 -p group/project` dispatches to `run_sync_surgical`, not normal sync\n2. `lore sync` (no IIDs) follows the existing normal pipeline unchanged\n3. `handle_sync_cmd` passes `issues`, `merge_requests`, `project`, `preflight_only` from args to options\n4. `lore robot-docs` output includes surgical sync documentation\n5. All existing sync tests pass without modification\n6. Robot mode JSON output for surgical sync matches documented schema\n\n## Files\n\n- `src/cli/commands/sync.rs` — add dispatch check at top of `run_sync`, add `use super::sync_surgical::run_sync_surgical`\n- `src/main.rs` — update `handle_sync_cmd` to pass new fields, update robot-docs text\n- `src/cli/commands/mod.rs` — ensure `sync_surgical` module is public (may already be done by bd-1i4i)\n\n## TDD Anchor\n\nTests in `src/cli/commands/sync.rs` or a companion test file:\n\n```rust\n#[cfg(test)]\nmod dispatch_tests {\n use super::*;\n\n #[test]\n fn sync_options_with_issues_is_surgical() {\n let options = SyncOptions {\n issue_iids: vec![7],\n ..SyncOptions::default()\n };\n assert!(options.is_surgical());\n }\n\n #[test]\n fn sync_options_without_iids_is_normal() {\n let options = SyncOptions::default();\n assert!(!options.is_surgical());\n }\n\n #[test]\n fn sync_options_with_mrs_is_surgical() {\n let options = SyncOptions {\n mr_iids: vec![10, 20],\n ..SyncOptions::default()\n };\n assert!(options.is_surgical());\n }\n\n #[tokio::test]\n async fn dispatch_routes_to_surgical_when_issues_present() {\n // Integration-level test: verify run_sync with IIDs calls surgical path.\n // This test uses wiremock to mock the surgical path's GitLab calls.\n // The key assertion: when options.issue_iids is non-empty, the function\n // does NOT attempt the normal ingest flow (no project cursor queries).\n let server = wiremock::MockServer::start().await;\n wiremock::Mock::given(wiremock::matchers::method(\"GET\"))\n .and(wiremock::matchers::path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(wiremock::ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([{\n \"id\": 100, \"iid\": 7, \"project_id\": 1, \"title\": \"Test\",\n \"state\": \"opened\", \"created_at\": \"2026-01-01T00:00:00Z\",\n \"updated_at\": \"2026-02-17T00:00:00Z\",\n \"author\": {\"id\": 1, \"username\": \"dev\", \"name\": \"Dev\"},\n \"web_url\": \"https://gitlab.example.com/group/project/-/issues/7\"\n }])))\n .mount(&server).await;\n\n let mut config = Config::default();\n config.gitlab.url = server.uri();\n config.gitlab.token = \"test-token\".to_string();\n let options = SyncOptions {\n issue_iids: vec![7],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n let result = run_sync(&config, options, Some(\"dispatch-test\"), &signal).await;\n\n // Should succeed via surgical path (or at least not panic from normal path)\n assert!(result.is_ok());\n let r = result.unwrap();\n assert_eq!(r.surgical_mode, Some(true));\n }\n\n #[test]\n fn robot_docs_includes_surgical_sync() {\n // Verify the robot-docs string contains surgical sync documentation\n // This tests the static text, not runtime behavior\n let docs = include_str!(\"../../../src/main.rs\");\n // The robot-docs handler should mention surgical sync\n // (Actual assertion depends on how robot-docs are generated)\n }\n}\n```\n\n## Edge Cases\n\n- **Dry-run + surgical**: `handle_sync_cmd` currently short-circuits dry-run before SyncRunRecorder setup (line 2149). Surgical dry-run should also short-circuit, but preflight-only is the surgical equivalent. Clarify: `--dry-run --issue 7` should be treated as `--preflight-only --issue 7`.\n- **Normal sync recorder vs surgical recorder**: `handle_sync_cmd` creates a `SyncRunRecorder` for normal sync (line 2159). When dispatching to surgical, skip this since `run_sync_surgical` creates its own. Use `!options.is_surgical()` to decide.\n- **Robot-docs backward compatibility**: New fields are additive. Existing robot-docs consumers that ignore unknown fields are unaffected.\n- **No project specified with IIDs**: If `--issue 7` is passed without `-p project`, the dispatch should fail with a clear usage error (validation in bd-1lja).\n\n## Dependency Context\n\n- **Depends on (upstream)**: bd-1i4i (the `run_sync_surgical` function to call), bd-1lja (SyncOptions extensions with `issues`, `merge_requests`, `project`, `preflight_only` fields), bd-wcja (SyncResult surgical fields for assertion)\n- **No downstream dependents** — this is the final wiring bead for the main code path.\n- Must NOT modify the normal sync pipeline behavior. The dispatch is a pure conditional branch at function entry.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-17T19:18:10.648172Z","created_by":"tayloreernisse","updated_at":"2026-02-18T20:36:35.149830Z","closed_at":"2026-02-18T20:36:35.149779Z","close_reason":"Surgical dispatch wired: run_sync routes to run_sync_surgical when is_surgical(), handle_sync_cmd skips recorder for surgical mode, dry-run+surgical→preflight-only, removed wrong embed validation, robot-docs updated with surgical schema","compaction_level":0,"original_size":0,"labels":["surgical-sync"]} {"id":"bd-3bo","title":"[CP1] gi count issues/discussions/notes commands","description":"Count entities in the database.\n\nCommands:\n- gi count issues → 'Issues: N'\n- gi count discussions --type=issue → 'Issue Discussions: N'\n- gi count notes --type=issue → 'Issue Notes: N (excluding M system)'\n\nFiles: src/cli/commands/count.ts\nDone when: Counts match expected values from GitLab","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T15:20:16.190875Z","created_by":"tayloreernisse","updated_at":"2026-01-25T15:21:35.156293Z","deleted_at":"2026-01-25T15:21:35.156290Z","deleted_by":"tayloreernisse","delete_reason":"delete","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-3bpk","title":"NOTE-0A: Upsert/sweep for issue discussion notes","description":"## Background\nIssue discussion note ingestion uses a delete/reinsert pattern (DELETE FROM notes WHERE discussion_id = ? at line 132-135 of src/ingestion/discussions.rs then re-insert). This makes notes.id unstable across syncs. MR discussion notes already use upsert (ON CONFLICT(gitlab_id) DO UPDATE at line 470-536 of src/ingestion/mr_discussions.rs) producing stable IDs. Phase 2 depends on stable notes.id as source_id for note documents.\n\n## Approach\nRefactor src/ingestion/discussions.rs to match the MR pattern in src/ingestion/mr_discussions.rs:\n\n1. Create shared NoteUpsertOutcome struct (in src/ingestion/discussions.rs, also used by mr_discussions.rs):\n pub struct NoteUpsertOutcome { pub local_note_id: i64, pub changed_semantics: bool }\n\n2. Replace insert_note() (line 201-233) with upsert_note_for_issue(). Current signature is:\n fn insert_note(conn: &Connection, discussion_id: i64, note: &NormalizedNote, payload_id: Option) -> Result<()>\n New signature:\n fn upsert_note_for_issue(conn: &Connection, discussion_id: i64, note: &NormalizedNote, last_seen_at: i64, payload_id: Option) -> Result\n\n Use ON CONFLICT(gitlab_id) DO UPDATE SET body, note_type, updated_at, last_seen_at, resolvable, resolved, resolved_by, resolved_at, position_old_path, position_new_path, position_old_line, position_new_line, position_type, position_line_range_start, position_line_range_end, position_base_sha, position_start_sha, position_head_sha\n\n IMPORTANT: The current issue insert_note() only populates: gitlab_id, discussion_id, project_id, note_type, is_system, author_username, body, created_at, updated_at, last_seen_at, position (integer array order), resolvable, resolved, resolved_by, resolved_at, raw_payload_id. It does NOT populate the decomposed position columns (position_new_path, etc.). The MR upsert_note() at line 470 DOES populate all decomposed position columns. Your upsert must include ALL columns from the MR pattern. The NormalizedNote struct (from src/gitlab/transformers.rs) has all position fields.\n\n3. Change detection via pre-read: SELECT existing note before upsert, compare semantic fields (body, note_type, resolved, resolved_by, positions). Exclude updated_at/last_seen_at from semantic comparison. Use IS NOT for NULL-safe comparison.\n\n4. Add sweep_stale_issue_notes(conn, discussion_id, last_seen_at) — DELETE FROM notes WHERE discussion_id = ? AND last_seen_at < ?\n\n5. Replace the delete-reinsert loop (lines 132-139) with:\n for note in notes { let outcome = upsert_note_for_issue(&tx, local_discussion_id, ¬e, last_seen_at, None)?; }\n sweep_stale_issue_notes(&tx, local_discussion_id, last_seen_at)?;\n\n6. Update upsert_note() in mr_discussions.rs (line 470) to return NoteUpsertOutcome with same semantic change detection. Current signature returns Result<()>.\n\nReference files:\n- src/ingestion/mr_discussions.rs: upsert_note() line 470, sweep_stale_notes() line 551\n- src/ingestion/discussions.rs: insert_note() line 201, delete pattern line 132-135\n- src/gitlab/transformers.rs: NormalizedNote struct definition\n\n## Files\n- MODIFY: src/ingestion/discussions.rs (refactor insert_note -> upsert + sweep, lines 132-233)\n- MODIFY: src/ingestion/mr_discussions.rs (return NoteUpsertOutcome from upsert_note at line 470)\n\n## TDD Anchor\nRED: test_issue_note_upsert_stable_id — insert 2 notes, record IDs, re-sync same gitlab_ids, assert IDs unchanged.\nGREEN: Implement upsert_note_for_issue with ON CONFLICT.\nVERIFY: cargo test upsert_stable_id -- --nocapture\nTests: test_issue_note_upsert_detects_body_change, test_issue_note_upsert_unchanged_returns_false, test_issue_note_upsert_updated_at_only_does_not_mark_semantic_change, test_issue_note_sweep_removes_stale, test_issue_note_upsert_returns_local_id\n\n## Acceptance Criteria\n- [ ] upsert_note_for_issue() uses ON CONFLICT(gitlab_id) DO UPDATE\n- [ ] Local note IDs stable across re-syncs of identical data\n- [ ] changed_semantics = true only for body/note_type/resolved/position changes\n- [ ] changed_semantics = false for updated_at-only changes\n- [ ] sweep removes notes with stale last_seen_at\n- [ ] MR upsert_note() returns NoteUpsertOutcome\n- [ ] Issue upsert populates ALL position columns (matching MR pattern)\n- [ ] All 6 tests pass, clippy clean\n\n## Edge Cases\n- NULL body: IS NOT comparison handles NULLs correctly\n- UNIQUE(gitlab_id) already exists on notes table (migration 002)\n- last_seen_at prevents stale-sweep of notes currently being ingested\n- Issue notes currently don't populate position_new_path etc. — the new upsert must extract these from NormalizedNote (check that the transformer populates them for issue DiffNotes)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:59:14.783336Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:24.151831Z","closed_at":"2026-02-12T18:13:24.151781Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"],"dependencies":[{"issue_id":"bd-3bpk","depends_on_id":"bd-18bf","type":"blocks","created_at":"2026-02-12T17:04:47.776788Z","created_by":"tayloreernisse"},{"issue_id":"bd-3bpk","depends_on_id":"bd-2b28","type":"blocks","created_at":"2026-02-12T17:04:47.932914Z","created_by":"tayloreernisse"},{"issue_id":"bd-3bpk","depends_on_id":"bd-2ezb","type":"blocks","created_at":"2026-02-12T17:04:49.450541Z","created_by":"tayloreernisse"},{"issue_id":"bd-3bpk","depends_on_id":"bd-jbfw","type":"blocks","created_at":"2026-02-12T17:04:48.008740Z","created_by":"tayloreernisse"}]} +{"id":"bd-3bwh","title":"Define dashboard data structs for me command","description":"## Background\nThe me command needs data structs for all dashboard sections. These are used by both renderers and must serialize cleanly to JSON. Labels are stored as Vec (populated from junction tables via GROUP_CONCAT in queries). All work item structs share a common shape with AttentionState.\n\n## Approach\nAdd to `src/cli/commands/me/types.rs` (alongside AttentionState):\n\n```rust\nuse serde::Serialize;\n\n#[derive(Debug, Serialize)]\npub struct MeIssue {\n pub project_path: String,\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub status_name: Option, // work item status (AC-7.4)\n pub attention_state: AttentionState,\n pub labels: Vec, // from issue_labels junction table\n pub updated_at_iso: String,\n pub web_url: String,\n}\n\n#[derive(Debug, Serialize)]\npub struct MeMrAuthored {\n pub project_path: String,\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub draft: bool, // AC-7.5\n pub detailed_merge_status: Option, // AC-7.5\n pub attention_state: AttentionState,\n pub labels: Vec, // from mr_labels junction table\n pub updated_at_iso: String,\n pub web_url: String,\n}\n\n#[derive(Debug, Serialize)]\npub struct MeMrReviewing {\n pub project_path: String,\n pub iid: i64,\n pub title: String,\n pub state: String,\n pub author_username: String, // who wrote the MR (AC-7.5)\n pub draft: bool,\n pub attention_state: AttentionState,\n pub labels: Vec,\n pub updated_at_iso: String,\n pub web_url: String,\n}\n\n#[derive(Debug, Serialize)]\npub struct MeActivityItem {\n pub timestamp_iso: String,\n pub event_type: String, // note, status, label, assign, unassign, review_request, milestone\n pub entity_type: String, // issue, merge_request\n pub entity_iid: i64,\n pub project_path: String,\n pub actor: String,\n pub is_own: bool,\n pub summary: String,\n pub body_preview: Option, // notes only, up to 200 chars (AC-7.6)\n}\n\n#[derive(Debug, Serialize)]\npub struct MeSummary {\n pub project_count: usize,\n pub open_issues: usize,\n pub authored_mrs: usize,\n pub reviewing_mrs: usize,\n pub needs_attention_count: usize, // AC-5.5\n}\n\n#[derive(Debug, Serialize)]\npub struct MeDashboard {\n pub username: String,\n pub since_iso: String,\n pub summary: MeSummary,\n pub open_issues: Vec,\n pub open_mrs_authored: Vec,\n pub reviewing_mrs: Vec,\n pub activity: Vec,\n}\n```\n\nNote: MeMrReviewing does NOT include `detailed_merge_status` (less relevant for reviewers — the spec says \"draft indicator, attention state\" for reviewing, and `detailed_merge_status` only for authored per AC-5.2 vs AC-5.3).\n\n## Acceptance Criteria\n- [ ] MeIssue with: project_path, iid, title, state, status_name (Option), attention_state, labels, updated_at_iso, web_url\n- [ ] MeMrAuthored with: + draft (bool), detailed_merge_status (Option)\n- [ ] MeMrReviewing with: + author_username, draft (bool) — NO detailed_merge_status\n- [ ] MeActivityItem with: timestamp_iso, event_type, entity_type, entity_iid, project_path, actor, is_own, summary, body_preview (Option)\n- [ ] MeSummary with: project_count, open_issues, authored_mrs, reviewing_mrs, needs_attention_count\n- [ ] MeDashboard with: username, since_iso, summary, and 4 section Vecs\n- [ ] All structs derive Debug + Serialize\n- [ ] Labels stored as Vec (populated by queries from junction tables)\n- [ ] Empty Vecs serialize as `[]` in JSON\n\n## Files\n- MODIFY: src/cli/commands/me/types.rs (add all structs)\n\n## TDD Anchor\nRED: Write `test_me_dashboard_serializes`:\n```rust\nlet dashboard = MeDashboard { username: \"test\".into(), since_iso: \"...\".into(),\n summary: MeSummary { project_count: 1, open_issues: 0, authored_mrs: 0, reviewing_mrs: 0, needs_attention_count: 0 },\n open_issues: vec![], open_mrs_authored: vec![], reviewing_mrs: vec![], activity: vec![] };\nlet json = serde_json::to_value(&dashboard).unwrap();\nassert_eq!(json[\"username\"], \"test\");\nassert!(json[\"open_issues\"].is_array());\n```\nGREEN: Define structs with Serialize.\nVERIFY: `cargo test me_dashboard_serializes`\n\n## Edge Cases\n- body_preview is Option: None for non-note events, Some for notes (truncated to 200 chars in query)\n- status_name is Option: None when issue hasn't had work item status enriched\n- detailed_merge_status is Option: None for older MRs\n- labels Vec is empty when no labels assigned (not Option — always a Vec)\n\n## Dependency Context\nUses AttentionState from bd-1vai (same file). Consumed by all query beads and rendering beads.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:36:27.297611Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.049912Z","closed_at":"2026-02-20T16:09:13.049871Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3bwh","depends_on_id":"bd-1vai","type":"blocks","created_at":"2026-02-19T19:41:08.595368Z","created_by":"tayloreernisse"}]} {"id":"bd-3cjp","title":"NOTE-2I: Batch parent metadata cache for note regeneration","description":"## Background\nextract_note_document() (from NOTE-2C) fetches parent entity metadata per note via SQL queries. During initial backfill of ~8K notes, this creates N+1 amplification — 50 notes on same MR = 50 identical parent lookups. This is a performance optimization for batch regeneration only.\n\n## Approach\n1. Add ParentMetadataCache struct in src/documents/extractor.rs:\n pub struct ParentMetadataCache {\n cache: HashMap<(String, i64), ParentMetadata>,\n }\n Key: (noteable_type: String, parent_local_id: i64)\n ParentMetadata struct: { iid: i64, title: String, web_url: String, labels: Vec, project_path: String }\n\n Methods:\n - pub fn new() -> Self\n - pub fn get_or_fetch(&mut self, conn: &Connection, noteable_type: &str, parent_id: i64) -> Result>\n get_or_fetch uses HashMap entry API: on miss, fetches from DB (same queries as extract_note_document), caches, returns ref.\n\n2. Add pub fn extract_note_document_cached(conn: &Connection, note_id: i64, cache: &mut ParentMetadataCache) -> Result>:\n Same logic as extract_note_document but calls cache.get_or_fetch() instead of inline parent queries. The uncached version remains for single-note use.\n\n3. Update batch regeneration loop in src/documents/regenerator.rs. The main regeneration loop is in regenerate_dirty_documents() (top of file, ~line 20). It processes dirty entries one at a time via regenerate_one() (line 86). For batch cache to work:\n - Create ParentMetadataCache before the loop\n - In the SourceType::Note arm of regenerate_one, pass the cache through\n - This requires either making regenerate_one() take an optional cache parameter, or restructuring to handle Note specially in the loop body.\n\n Cleanest approach: Add cache: &mut Option parameter to regenerate_one(). Initialize as Some(ParentMetadataCache::new()) before the loop. Only SourceType::Note uses it. Other types ignore it.\n\n Cache is created fresh per regenerate_dirty_documents() call — no cross-invocation persistence.\n\n## Files\n- MODIFY: src/documents/extractor.rs (add ParentMetadataCache struct + extract_note_document_cached)\n- MODIFY: src/documents/regenerator.rs (add cache parameter to regenerate_one, use in batch loop)\n- MODIFY: src/documents/mod.rs (export ParentMetadataCache if needed externally)\n\n## TDD Anchor\nRED: test_note_regeneration_batch_uses_cache — insert project, issue, 10 notes on same issue, mark all dirty, regenerate all, assert all 10 documents created correctly.\nGREEN: Implement ParentMetadataCache and extract_note_document_cached.\nVERIFY: cargo test note_regeneration_batch -- --nocapture\nTests: test_note_regeneration_cache_consistent_with_direct_extraction (cached output == uncached output), test_note_regeneration_cache_invalidates_across_parents (notes from different parents get correct metadata)\n\n## Acceptance Criteria\n- [ ] ParentMetadataCache reduces DB queries during batch regeneration (10 notes on 1 parent = 1 parent fetch, not 10)\n- [ ] Cached extraction produces identical DocumentData output to uncached\n- [ ] Cache keyed per (noteable_type, parent_id) — no cross-parent leakage\n- [ ] Cache scoped to single regenerate_dirty_documents call — no persistence or invalidation complexity\n- [ ] All 3 tests pass\n\n## Dependency Context\n- Depends on NOTE-2C (bd-18yh): extract_note_document function must exist to create the cached variant\n\n## Edge Cases\n- Parent deleted between cache creation and lookup: get_or_fetch returns None, extract_note_document_cached returns None (same as uncached)\n- Very large batch (10K+ notes): cache grows but is bounded by number of unique parents (typically <100 issues/MRs)\n- Cache miss for orphaned discussion: cached None result prevents repeated failed lookups","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-12T17:03:00.515490Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.870738Z","closed_at":"2026-02-12T18:13:15.870693Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"]} {"id":"bd-3ddw","title":"Create lore-tui crate scaffold","description":"## Background\nThe TUI is implemented as a separate binary crate (crates/lore-tui/) that uses nightly Rust for FrankenTUI. It is EXCLUDED from the root workspace to keep nightly-only deps isolated. The lore CLI spawns lore-tui at runtime via binary delegation (PATH lookup) — zero compile-time dependency from lore to lore-tui. lore-tui depends on lore as a library (src/lib.rs exists and exports all modules).\n\nFrankenTUI is published on crates.io as ftui (0.1.1), ftui-core, ftui-runtime, ftui-render, ftui-style. Use crates.io versions. Local clone exists at ~/projects/FrankenTUI/ for reference.\n\n## Approach\nCreate the crate directory structure:\n- crates/lore-tui/Cargo.toml with dependencies:\n - ftui = \"0.1.1\" (crates.io) and related ftui-* crates\n - lore = { path = \"../..\" } (library dependency for Config, db, ingestion, etc.)\n - clap, anyhow, chrono, dirs, rusqlite (bundled), crossterm\n- crates/lore-tui/rust-toolchain.toml pinning nightly-2026-02-08\n- crates/lore-tui/src/main.rs — binary entry point with TuiCli struct (clap Parser) supporting --config, --sync, --fresh, --render-mode, --ascii, --no-alt-screen\n- crates/lore-tui/src/lib.rs — public API: launch_tui(), launch_sync_tui(), LaunchOptions struct, module declarations\n- Root Cargo.toml: verify lore-tui is NOT in [workspace] members\n\n## Acceptance Criteria\n- [ ] crates/lore-tui/Cargo.toml exists with ftui (crates.io) and lore (path dep) dependencies\n- [ ] crates/lore-tui/rust-toolchain.toml pins nightly-2026-02-08\n- [ ] crates/lore-tui/src/main.rs compiles with clap CLI args\n- [ ] crates/lore-tui/src/lib.rs declares all module stubs and exports LaunchOptions, launch_tui, launch_sync_tui\n- [ ] cargo +stable check --workspace --all-targets passes (lore-tui excluded)\n- [ ] cargo +nightly check --manifest-path crates/lore-tui/Cargo.toml --all-targets passes\n- [ ] Root Cargo.toml does NOT include lore-tui in workspace members\n\n## Files\n- CREATE: crates/lore-tui/Cargo.toml\n- CREATE: crates/lore-tui/rust-toolchain.toml\n- CREATE: crates/lore-tui/src/main.rs\n- CREATE: crates/lore-tui/src/lib.rs\n- VERIFY: Cargo.toml (root — confirm lore-tui NOT in members)\n\n## TDD Anchor\nRED: Write a shell test that runs cargo +nightly check --manifest-path crates/lore-tui/Cargo.toml and asserts exit 0.\nGREEN: Create the full crate scaffold with all deps.\nVERIFY: cargo +stable check --workspace --all-targets && cargo +nightly check --manifest-path crates/lore-tui/Cargo.toml\n\n## Edge Cases\n- ftui crates may require specific nightly features — pin exact nightly date\n- Path dependency to lore means lore-tui sees lore's edition 2024 — verify compat\n- rusqlite bundled feature pulls in cc build — may need nightly-compatible cc version\n- If ftui 0.1.1 has breaking changes vs PRD assumptions, check ~/projects/FrankenTUI/ for latest API\n\n## Dependency Context\nRoot task — no dependencies. All other Phase 0 tasks depend on this scaffold existing.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T16:53:10.859837Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:21.782753Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3ddw","depends_on_id":"bd-1cj0","type":"blocks","created_at":"2026-02-12T18:11:21.782657Z","created_by":"tayloreernisse"}]} {"id":"bd-3dum","title":"Orchestrator: status enrichment phase with transactional writes","description":"## Background\nThe orchestrator controls the sync pipeline. Status enrichment is a new Phase 1.5 that runs after issue ingestion but before discussion sync. It must be non-fatal — errors skip enrichment but don't crash the sync.\n\n## Approach\nAdd enrichment phase to ingest_project_issues_with_progress. Use client.graphql_client() factory. Look up project path from DB via .optional()? for non-fatal failure. Transactional writes via enrich_issue_statuses_txn() with two phases: clear stale, then apply new.\n\n## Files\n- src/ingestion/orchestrator.rs (enrichment phase + txn helper + IngestProjectResult fields + ProgressEvent variants)\n- src/cli/commands/ingest.rs (add match arms for new ProgressEvent variants)\n\n## Implementation\n\nIngestProjectResult new fields:\n statuses_enriched: usize, statuses_cleared: usize, statuses_seen: usize,\n statuses_without_widget: usize, partial_error_count: usize,\n first_partial_error: Option, status_enrichment_error: Option,\n status_enrichment_mode: String, status_unsupported_reason: Option\n Default: all 0/None/\"\" as appropriate\n\nProgressEvent new variants:\n StatusEnrichmentComplete { enriched: usize, cleared: usize }\n StatusEnrichmentSkipped\n\nPhase 1.5 logic (after ingest_issues, before discussion sync):\n 1. Check config.sync.fetch_work_item_status && !signal.is_cancelled()\n 2. If false: set mode=\"skipped\", emit StatusEnrichmentSkipped\n 3. Look up project path: conn.query_row(\"SELECT path_with_namespace FROM projects WHERE id = ?1\", [project_id], |r| r.get(0)).optional()?\n 4. If None: warn, set status_enrichment_error=\"project_path_missing\", emit StatusEnrichmentComplete{0,0}\n 5. Create graphql_client via client.graphql_client()\n 6. Call fetch_issue_statuses(&graphql_client, &project_path).await\n 7. On Ok: map unsupported_reason to mode/reason, call enrich_issue_statuses_txn(), set counters\n 8. On Err: warn, set status_enrichment_error, mode=\"fetched\"\n 9. Emit StatusEnrichmentComplete\n\nenrich_issue_statuses_txn(conn, project_id, statuses, all_fetched_iids, now_ms) -> Result<(usize, usize)>:\n Uses conn.unchecked_transaction() (conn is &Connection not &mut)\n Phase 1 (clear): UPDATE issues SET status_*=NULL, status_synced_at=now_ms WHERE project_id=? AND iid=? AND status_name IS NOT NULL — for IIDs in all_fetched_iids but NOT in statuses\n Phase 2 (apply): UPDATE issues SET status_name=?, status_category=?, status_color=?, status_icon_name=?, status_synced_at=now_ms WHERE project_id=? AND iid=?\n tx.commit(), return (enriched, cleared)\n\nIn src/cli/commands/ingest.rs progress callback, add arms:\n ProgressEvent::StatusEnrichmentComplete { enriched, cleared } => { ... }\n ProgressEvent::StatusEnrichmentSkipped => { ... }\n\n## Acceptance Criteria\n- [ ] Enrichment runs after ingest_issues, before discussion sync\n- [ ] Gated by config.sync.fetch_work_item_status\n- [ ] Project path missing -> skipped with error=\"project_path_missing\", sync continues\n- [ ] enrich_issue_statuses_txn correctly UPDATEs status columns + status_synced_at\n- [ ] Stale status cleared: issue in all_fetched_iids but not statuses -> NULL + synced_at set\n- [ ] Transaction rollback on failure: no partial updates\n- [ ] Idempotent: running twice with same data produces same result\n- [ ] GraphQL error: logged, enrichment_error captured, sync continues\n- [ ] ingest.rs compiles with new ProgressEvent arms\n- [ ] cargo check --all-targets passes\n\n## TDD Loop\nRED: test_enrich_issue_statuses_txn, test_enrich_skips_unknown_iids, test_enrich_clears_removed_status, test_enrich_transaction_rolls_back_on_failure, test_enrich_idempotent_across_two_runs, test_enrich_sets_synced_at_on_clear, test_enrichment_error_captured_in_result, test_project_path_missing_skips_enrichment\n Tests use in-memory DB with migration 021 applied\nGREEN: Implement enrichment phase + txn helper + result fields + progress arms\nVERIFY: cargo test enrich && cargo test orchestrator\n\n## Edge Cases\n- unchecked_transaction() needed because conn is &Connection not &mut Connection\n- .optional()? requires use rusqlite::OptionalExtension\n- status_synced_at is set on BOTH clear and apply operations (not NULL on clear)\n- Clear SQL has WHERE status_name IS NOT NULL to avoid counting already-cleared rows\n- Progress callback match must be updated in SAME batch as enum change (compile error otherwise)\n- status_enrichment_mode must be set in ALL code paths (fetched/unsupported/skipped)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-11T06:42:11.254917Z","created_by":"tayloreernisse","updated_at":"2026-02-11T07:21:33.419310Z","closed_at":"2026-02-11T07:21:33.419268Z","close_reason":"Implemented by agent swarm — all quality gates pass (595 tests, 0 failures)","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3dum","depends_on_id":"bd-1gvg","type":"blocks","created_at":"2026-02-11T06:42:43.501683Z","created_by":"tayloreernisse"},{"issue_id":"bd-3dum","depends_on_id":"bd-2jzn","type":"blocks","created_at":"2026-02-11T06:42:43.553793Z","created_by":"tayloreernisse"},{"issue_id":"bd-3dum","depends_on_id":"bd-2y79","type":"parent-child","created_at":"2026-02-11T06:42:11.257123Z","created_by":"tayloreernisse"}]} @@ -221,6 +235,7 @@ {"id":"bd-3ir","title":"Add database migration 006_merge_requests.sql","description":"## Background\nFoundation for all CP2 MR features. This migration defines the schema that all other MR components depend on. Must complete BEFORE any other CP2 work can proceed.\n\n## Approach\nCreate migration file that adds:\n1. `merge_requests` table with all CP2 fields\n2. `mr_labels`, `mr_assignees`, `mr_reviewers` junction tables\n3. Indexes on discussions for MR queries\n4. DiffNote position columns on notes table\n\n## Files\n- `migrations/006_merge_requests.sql` - New migration file\n- `src/core/db.rs` - Update MIGRATIONS const to include version 6\n\n## Acceptance Criteria\n- [ ] Migration file exists at `migrations/006_merge_requests.sql`\n- [ ] `merge_requests` table has columns: id, gitlab_id, project_id, iid, title, description, state, draft, author_username, source_branch, target_branch, head_sha, references_short, references_full, detailed_merge_status, merge_user_username, created_at, updated_at, merged_at, closed_at, last_seen_at, discussions_synced_for_updated_at, discussions_sync_last_attempt_at, discussions_sync_attempts, discussions_sync_last_error, web_url, raw_payload_id\n- [ ] `mr_labels` junction table exists with (merge_request_id, label_id) PK\n- [ ] `mr_assignees` junction table exists with (merge_request_id, username) PK\n- [ ] `mr_reviewers` junction table exists with (merge_request_id, username) PK\n- [ ] `idx_discussions_mr_id` and `idx_discussions_mr_resolved` indexes exist\n- [ ] `notes` table has new columns: position_type, position_line_range_start, position_line_range_end, position_base_sha, position_start_sha, position_head_sha\n- [ ] `gi doctor` runs without migration errors\n- [ ] `cargo test` passes\n\n## TDD Loop\nRED: Cannot open DB with version 6 schema\nGREEN: Add migration file with full SQL\nVERIFY: `cargo run -- doctor` shows healthy DB\n\n## SQL Reference (from PRD)\n```sql\n-- Merge requests table\nCREATE TABLE merge_requests (\n id INTEGER PRIMARY KEY,\n gitlab_id INTEGER UNIQUE NOT NULL,\n project_id INTEGER NOT NULL REFERENCES projects(id),\n iid INTEGER NOT NULL,\n title TEXT,\n description TEXT,\n state TEXT, -- opened | merged | closed | locked\n draft INTEGER NOT NULL DEFAULT 0, -- SQLite boolean\n author_username TEXT,\n source_branch TEXT,\n target_branch TEXT,\n head_sha TEXT,\n references_short TEXT,\n references_full TEXT,\n detailed_merge_status TEXT,\n merge_user_username TEXT,\n created_at INTEGER, -- ms epoch UTC\n updated_at INTEGER,\n merged_at INTEGER,\n closed_at INTEGER,\n last_seen_at INTEGER NOT NULL,\n discussions_synced_for_updated_at INTEGER,\n discussions_sync_last_attempt_at INTEGER,\n discussions_sync_attempts INTEGER DEFAULT 0,\n discussions_sync_last_error TEXT,\n web_url TEXT,\n raw_payload_id INTEGER REFERENCES raw_payloads(id)\n);\nCREATE INDEX idx_mrs_project_updated ON merge_requests(project_id, updated_at);\nCREATE UNIQUE INDEX uq_mrs_project_iid ON merge_requests(project_id, iid);\n-- ... (see PRD for full index list)\n\n-- Junction tables\nCREATE TABLE mr_labels (\n merge_request_id INTEGER REFERENCES merge_requests(id) ON DELETE CASCADE,\n label_id INTEGER REFERENCES labels(id) ON DELETE CASCADE,\n PRIMARY KEY(merge_request_id, label_id)\n);\n\nCREATE TABLE mr_assignees (\n merge_request_id INTEGER REFERENCES merge_requests(id) ON DELETE CASCADE,\n username TEXT NOT NULL,\n PRIMARY KEY(merge_request_id, username)\n);\n\nCREATE TABLE mr_reviewers (\n merge_request_id INTEGER REFERENCES merge_requests(id) ON DELETE CASCADE,\n username TEXT NOT NULL,\n PRIMARY KEY(merge_request_id, username)\n);\n\n-- DiffNote position columns (ALTER TABLE)\nALTER TABLE notes ADD COLUMN position_type TEXT;\nALTER TABLE notes ADD COLUMN position_line_range_start INTEGER;\nALTER TABLE notes ADD COLUMN position_line_range_end INTEGER;\nALTER TABLE notes ADD COLUMN position_base_sha TEXT;\nALTER TABLE notes ADD COLUMN position_start_sha TEXT;\nALTER TABLE notes ADD COLUMN position_head_sha TEXT;\n\nINSERT INTO schema_version (version, applied_at, description)\nVALUES (6, strftime('%s', 'now') * 1000, 'Merge requests, MR labels, assignees, reviewers');\n```\n\n## Edge Cases\n- SQLite does not support ADD CONSTRAINT - FK defined as nullable in CP1\n- `locked` state is transitional (merge-in-progress) - store as first-class\n- discussions_synced_for_updated_at prevents redundant discussion refetch","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:40.101470Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:06:43.899079Z","closed_at":"2026-01-27T00:06:43.898875Z","close_reason":"Migration 006_merge_requests.sql created and verified. Schema v6 applied successfully with all tables, indexes, and position columns.","compaction_level":0,"original_size":0} {"id":"bd-3ir1","title":"Implement terminal safety module (sanitize + URL policy + redact)","description":"## Background\nGitLab content (issue descriptions, comments, MR descriptions) can contain arbitrary text including ANSI escape sequences, bidirectional text overrides, OSC hyperlinks, and C1 control codes. Displaying unsanitized content in a terminal can hijack cursor position, inject fake UI elements, or cause rendering corruption. This module provides a sanitization layer that strips dangerous sequences while preserving a safe ANSI subset for readability.\n\n## Approach\nCreate `crates/lore-tui/src/safety.rs` with:\n- `sanitize_for_terminal(input: &str) -> String` — the main entry point\n- Strip C1 control codes (0x80-0x9F)\n- Strip OSC sequences (ESC ] ... ST)\n- Strip cursor movement (CSI A/B/C/D/E/F/G/H/J/K)\n- Strip bidi overrides (U+202A-U+202E, U+2066-U+2069)\n- **PRESERVE safe ANSI subset**: SGR sequences for bold (1), italic (3), underline (4), reset (0), and standard foreground/background colors (30-37, 40-47, 90-97, 100-107). These improve readability of formatted GitLab content.\n- `UrlPolicy` enum: `Strip`, `Footnote`, `Passthrough` — controls how OSC 8 hyperlinks are handled\n- `RedactPattern` for optional PII/secret redaction (email, token patterns)\n- All functions are pure (no I/O), fully testable\n\nReference existing terminal safety patterns in ftui-core if available.\n\n## Acceptance Criteria\n- [ ] sanitize_for_terminal strips C1, OSC, cursor movement, bidi overrides\n- [ ] sanitize_for_terminal preserves bold, italic, underline, reset, and standard color SGR sequences\n- [ ] UrlPolicy::Strip removes OSC 8 hyperlinks entirely\n- [ ] UrlPolicy::Footnote converts OSC 8 hyperlinks to numbered footnotes [1] with URL list at end\n- [ ] RedactPattern matches common secret patterns (tokens, emails) and replaces with [REDACTED]\n- [ ] No unsafe code\n- [ ] Unit tests cover each dangerous sequence type AND verify safe sequences are preserved\n- [ ] Fuzz test with 1000 random byte sequences: no panic\n\n## Files\n- CREATE: crates/lore-tui/src/safety.rs\n- MODIFY: crates/lore-tui/src/lib.rs (add pub mod safety)\n\n## TDD Anchor\nRED: Write `test_strips_cursor_movement` that asserts CSI sequences for cursor up/down/left/right are removed from input while bold SGR is preserved.\nGREEN: Implement the sanitizer state machine that categorizes and filters escape sequences.\nVERIFY: cargo test -p lore-tui safety -- --nocapture\n\nAdditional tests:\n- test_strips_c1_control_codes\n- test_strips_bidi_overrides\n- test_strips_osc_sequences\n- test_preserves_bold_italic_underline_reset\n- test_preserves_standard_colors\n- test_url_policy_strip\n- test_url_policy_footnote\n- test_redact_patterns\n- test_fuzz_no_panic\n\n## Edge Cases\n- Malformed/truncated escape sequences (ESC without closing) — must not consume following text\n- Nested SGR sequences (e.g., bold+color combined in single CSI) — preserve entire sequence if all parameters are safe\n- UTF-8 multibyte chars adjacent to escape sequences — must not corrupt char boundaries\n- Empty input returns empty string\n- Input with only safe content passes through unchanged\n\n## Dependency Context\nDepends on bd-3ddw (scaffold) for the crate structure to exist. No other dependencies — this is a pure utility module.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T16:54:30.165761Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:21.987998Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-3ir1","depends_on_id":"bd-1cj0","type":"blocks","created_at":"2026-02-12T18:11:21.987966Z","created_by":"tayloreernisse"},{"issue_id":"bd-3ir1","depends_on_id":"bd-3ddw","type":"blocks","created_at":"2026-02-12T17:09:28.594948Z","created_by":"tayloreernisse"}]} {"id":"bd-3j6","title":"Add transform_mr_discussion and transform_notes_with_diff_position","description":"## Background\nExtends discussion transformer for MR context. MR discussions can contain DiffNotes with file position metadata. This is critical for code review context in CP3 document generation.\n\n## Approach\nAdd two new functions to existing `src/gitlab/transformers/discussion.rs`:\n1. `transform_mr_discussion()` - Transform discussion with MR reference\n2. `transform_notes_with_diff_position()` - Extract DiffNote position metadata\n\nCP1 already has the polymorphic `NormalizedDiscussion` with `NoteableRef` enum - reuse that pattern.\n\n## Files\n- `src/gitlab/transformers/discussion.rs` - Add new functions\n- `tests/diffnote_tests.rs` - DiffNote position extraction tests\n- `tests/mr_discussion_tests.rs` - MR discussion transform tests\n\n## Acceptance Criteria\n- [ ] `transform_mr_discussion()` returns `NormalizedDiscussion` with `merge_request_id: Some(local_mr_id)`\n- [ ] `transform_notes_with_diff_position()` returns `Result, String>`\n- [ ] DiffNote position fields extracted: `position_old_path`, `position_new_path`, `position_old_line`, `position_new_line`\n- [ ] Extended position fields extracted: `position_type`, `position_line_range_start`, `position_line_range_end`\n- [ ] SHA triplet extracted: `position_base_sha`, `position_start_sha`, `position_head_sha`\n- [ ] Strict timestamp parsing - returns `Err` on invalid timestamps (no `unwrap_or(0)`)\n- [ ] `cargo test diffnote` passes\n- [ ] `cargo test mr_discussion` passes\n\n## TDD Loop\nRED: `cargo test diffnote_position` -> test fails\nGREEN: Add position extraction logic\nVERIFY: `cargo test diffnote`\n\n## Function Signatures\n```rust\n/// Transform GitLab discussion for MR context.\n/// Reuses existing transform_discussion logic, just with MR reference.\npub fn transform_mr_discussion(\n gitlab_discussion: &GitLabDiscussion,\n local_project_id: i64,\n local_mr_id: i64,\n) -> NormalizedDiscussion {\n // Use existing transform_discussion with NoteableRef::MergeRequest(local_mr_id)\n transform_discussion(\n gitlab_discussion,\n local_project_id,\n NoteableRef::MergeRequest(local_mr_id),\n )\n}\n\n/// Transform notes with DiffNote position extraction.\n/// Returns Result to enforce strict timestamp parsing.\npub fn transform_notes_with_diff_position(\n gitlab_discussion: &GitLabDiscussion,\n local_project_id: i64,\n) -> Result, String>\n```\n\n## DiffNote Position Extraction\n```rust\n// Extract position metadata if present\nlet (old_path, new_path, old_line, new_line, position_type, lr_start, lr_end, base_sha, start_sha, head_sha) = note\n .position\n .as_ref()\n .map(|pos| (\n pos.old_path.clone(),\n pos.new_path.clone(),\n pos.old_line,\n pos.new_line,\n pos.position_type.clone(), // \"text\" | \"image\" | \"file\"\n pos.line_range.as_ref().map(|r| r.start_line),\n pos.line_range.as_ref().map(|r| r.end_line),\n pos.base_sha.clone(),\n pos.start_sha.clone(),\n pos.head_sha.clone(),\n ))\n .unwrap_or((None, None, None, None, None, None, None, None, None, None));\n```\n\n## Strict Timestamp Parsing\n```rust\n// CRITICAL: Return error on invalid timestamps, never zero\nlet created_at = iso_to_ms(¬e.created_at)\n .ok_or_else(|| format\\!(\n \"Invalid note.created_at for note {}: {}\",\n note.id, note.created_at\n ))?;\n```\n\n## NormalizedNote Fields for DiffNotes\n```rust\nNormalizedNote {\n // ... existing fields ...\n // DiffNote position metadata\n position_old_path: old_path,\n position_new_path: new_path,\n position_old_line: old_line,\n position_new_line: new_line,\n // Extended position\n position_type,\n position_line_range_start: lr_start,\n position_line_range_end: lr_end,\n // SHA triplet\n position_base_sha: base_sha,\n position_start_sha: start_sha,\n position_head_sha: head_sha,\n}\n```\n\n## Edge Cases\n- Notes without position should have all position fields as None\n- Invalid timestamp should fail the entire discussion (no partial results)\n- File renames: `old_path \\!= new_path` indicates a renamed file\n- Multi-line comments: `line_range` present means comment spans lines 45-48","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:41.208380Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:20:13.473091Z","closed_at":"2026-01-27T00:20:13.473031Z","close_reason":"Implemented transform_mr_discussion() and transform_notes_with_diff_position() with full DiffNote position extraction:\n- Extended NormalizedNote with 10 DiffNote position fields (path, line, type, line_range, SHA triplet)\n- Added strict timestamp parsing that returns Err on invalid timestamps\n- Created 13 diffnote_position_tests covering all extraction paths and error cases\n- Created 6 mr_discussion_tests verifying MR reference handling\n- All 161 tests passing","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3j6","depends_on_id":"bd-3ir","type":"blocks","created_at":"2026-01-26T22:08:54.207801Z","created_by":"tayloreernisse"},{"issue_id":"bd-3j6","depends_on_id":"bd-5ta","type":"blocks","created_at":"2026-01-26T22:08:54.244201Z","created_by":"tayloreernisse"}]} +{"id":"bd-3jiq","title":"Implement --fields minimal preset for me robot mode","description":"## Background\n`--fields` already exists in lore robot mode and `minimal` presets are entity-specific in `src/cli/robot.rs`. `lore me` needs equivalent behavior without breaking existing commands.\n\nThis bead defines how one requested field set is projected across four me arrays (`open_issues`, `open_mrs_authored`, `reviewing_mrs`, `activity`) while preserving non-array data (`username`, `since_iso`, `summary`, `meta`).\n\n## Approach\n### 1. Extend preset expansion in shared robot utility\nIn `src/cli/robot.rs`, add entity keys to `expand_fields_preset`:\n- `me_work_items` => `[\"iid\", \"title\", \"attention_state\", \"updated_at_iso\"]`\n- `me_activity` => `[\"timestamp_iso\", \"event_type\", \"entity_iid\", \"actor\"]`\n\n### 2. Apply filtering in me robot renderer path\nAfter full envelope is built (from `bd-2ilv`), apply filtering only to arrays:\n- `open_issues` uses expanded fields from `me_work_items`.\n- `open_mrs_authored` uses expanded fields from `me_work_items`.\n- `reviewing_mrs` uses expanded fields from `me_work_items`.\n- `activity` uses expanded fields from `me_activity`.\n\nFor non-`minimal` custom lists, pass through requested fields unchanged for all four arrays.\n\n### 3. Keep top-level and summary stable\nNever filter:\n- `data.username`\n- `data.since_iso`\n- `data.summary`\n- `meta`\n\n## Acceptance Criteria\n- [ ] `--fields minimal` projects work-item arrays to exactly: `iid`, `title`, `attention_state`, `updated_at_iso`\n- [ ] `--fields minimal` projects activity array to exactly: `timestamp_iso`, `event_type`, `entity_iid`, `actor`\n- [ ] `summary`, `username`, and `since_iso` remain present under `--fields minimal`\n- [ ] No `--fields` flag returns full unfiltered payload\n- [ ] Custom lists (e.g. `--fields iid,title,labels`) are applied to all 4 arrays\n- [ ] Unknown fields do not error; they yield empty objects where no keys match (existing behavior)\n- [ ] Existing non-me presets in `expand_fields_preset` remain unchanged\n\n## Files\n- MODIFY: `src/cli/robot.rs`\n- MODIFY: `src/cli/commands/me/render_robot.rs`\n- MODIFY: `src/cli/commands/me/mod.rs` (pass through `args.fields`)\n\n## TDD Anchor\nRED:\n- `test_me_fields_minimal_projects_work_item_arrays`\n- `test_me_fields_minimal_projects_activity_array`\n- `test_me_fields_minimal_preserves_summary_and_identity`\n- `test_me_fields_custom_list_passthrough`\n- `test_me_fields_unknown_names_do_not_error`\n\nGREEN:\n- Add me presets and wire array-level filtering in me renderer.\n\nVERIFY:\n- `cargo test fields_minimal`\n- `cargo test me_robot_fields`\n\n## Edge Cases\n- Applying custom fields that only exist on some arrays (e.g. `draft`) should keep matching keys where present and produce empty objects elsewhere.\n- Empty arrays should remain `[]` after filtering.\n\n## Dependency Context\nBuilds on `bd-2ilv` full me envelope and shared filtering helpers in `src/cli/robot.rs`.\nProvides final robot token-shaping behavior consumed by `bd-1vv8` command handler.\n\nDependencies:\n -> bd-2ilv (blocks) - Implement robot JSON output for me command\n\nDependents:\n <- bd-1vv8 (blocks) - Implement me command handler: wire queries to renderers","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:40:21.642386Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.064705Z","closed_at":"2026-02-20T16:09:13.064666Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3jiq","depends_on_id":"bd-2ilv","type":"blocks","created_at":"2026-02-19T19:41:28.258058Z","created_by":"tayloreernisse"}]} {"id":"bd-3jqx","title":"Implement async integration tests: cancellation, timeout, embed isolation, payload integrity","description":"## Background\n\nThe surgical sync pipeline involves async operations, cancellation signals, timeouts, scoped embedding, and multi-entity coordination. Unit tests in individual beads cover their own logic, but integration tests are needed to verify the full pipeline under realistic conditions: cancellation at different stages, timeout behavior with continuation, embedding scope isolation (only affected documents get embedded), and payload integrity (project_id mismatches rejected). These tests use wiremock for HTTP mocking and tokio for async runtime.\n\n## Approach\n\nCreate `tests/surgical_integration.rs` as an integration test file (Rust convention: `tests/` directory for integration tests). Six test functions covering the critical behavioral properties of the surgical pipeline:\n\n1. **Cancellation before preflight**: Signal cancelled before any HTTP call. Verify: recorder marked failed, no GitLab requests made, result has zero updates.\n2. **Cancellation during dependent stage**: Signal cancelled after preflight succeeds but during discussion fetch. Verify: partial results recorded, recorder marked failed, entities processed before cancellation have outcomes.\n3. **Per-entity timeout with continuation**: One entity's GitLab endpoint is slow (wiremock delay). Verify: that entity gets `failed` outcome with timeout error, remaining entities continue and succeed.\n4. **Embed scope isolation**: Sync two issues. Verify: only documents generated from those two issues are embedded, not the entire corpus. Assert by checking document IDs passed to embed function.\n5. **Payload project_id mismatch rejection**: Preflight returns an issue with `project_id` different from the resolved project. Verify: that entity gets `failed` outcome with clear error, other entities unaffected.\n6. **Successful full pipeline**: Sync one issue end-to-end through all stages. Verify: SyncResult has correct counts, entity_results has `synced` outcome, documents regenerated, embeddings created.\n\nAll tests use in-memory SQLite (`create_connection(Path::new(\":memory:\"))` + `run_migrations`) and wiremock `MockServer`.\n\n## Acceptance Criteria\n\n1. All 6 tests compile and pass\n2. Tests are isolated (each creates its own DB and mock server)\n3. Cancellation tests verify recorder state (failed status in sync_runs table)\n4. Timeout test uses wiremock delay, not `tokio::time::sleep` on the test side\n5. Embed isolation test verifies document-level scoping, not just function call\n6. Tests run in CI without flakiness (no real network, no real Ollama)\n\n## Files\n\n- `tests/surgical_integration.rs` — all 6 integration tests\n\n## TDD Anchor\n\n```rust\n// tests/surgical_integration.rs\n\nuse lore::cli::commands::sync::{SyncOptions, SyncResult};\nuse lore::core::db::{create_connection, run_migrations};\nuse lore::core::shutdown::ShutdownSignal;\nuse lore::Config;\nuse std::path::Path;\nuse std::time::Duration;\nuse wiremock::{Mock, MockServer, ResponseTemplate};\nuse wiremock::matchers::{method, path_regex};\n\nfn test_config(mock_url: &str) -> Config {\n let mut config = Config::default();\n config.gitlab.url = mock_url.to_string();\n config.gitlab.token = \"test-token\".to_string();\n config\n}\n\nfn setup_db() -> rusqlite::Connection {\n let conn = create_connection(Path::new(\":memory:\")).unwrap();\n run_migrations(&conn).unwrap();\n conn.execute(\n \"INSERT INTO projects (gitlab_project_id, path_with_namespace, web_url)\n VALUES (1, 'group/project', 'https://gitlab.example.com/group/project')\",\n [],\n ).unwrap();\n conn\n}\n\nfn mock_issue_json(iid: u64) -> serde_json::Value {\n serde_json::json!({\n \"id\": 100 + iid, \"iid\": iid, \"project_id\": 1, \"title\": format!(\"Issue {}\", iid),\n \"state\": \"opened\", \"created_at\": \"2026-01-01T00:00:00Z\",\n \"updated_at\": \"2026-02-17T00:00:00Z\",\n \"author\": {\"id\": 1, \"username\": \"dev\", \"name\": \"Dev\"},\n \"web_url\": format!(\"https://gitlab.example.com/group/project/-/issues/{}\", iid)\n })\n}\n\n#[tokio::test]\nasync fn cancellation_before_preflight() {\n let server = MockServer::start().await;\n // No mocks mounted — if any request is made, wiremock will return 404\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issue_iids: vec![7],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n signal.cancel(); // Cancel before anything starts\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"cancel-pre\"), &signal,\n ).await.unwrap();\n\n assert_eq!(result.issues_updated, 0);\n assert_eq!(result.mrs_updated, 0);\n // Verify no HTTP requests were made\n assert_eq!(server.received_requests().await.unwrap().len(), 0);\n}\n\n#[tokio::test]\nasync fn cancellation_during_dependent_stage() {\n let server = MockServer::start().await;\n // Mock issue fetch (preflight succeeds)\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([mock_issue_json(7)])))\n .mount(&server).await;\n // Mock discussion fetch with delay (gives time to cancel)\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues/7/discussions\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([]))\n .set_body_delay(Duration::from_secs(2)))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issue_iids: vec![7],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n // Cancel after a short delay (after preflight, during dependents)\n let signal_clone = signal.clone();\n tokio::spawn(async move {\n tokio::time::sleep(Duration::from_millis(200)).await;\n signal_clone.cancel();\n });\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"cancel-dep\"), &signal,\n ).await.unwrap();\n\n // Preflight should have run, but ingest may be partial\n assert!(result.surgical_mode == Some(true));\n}\n\n#[tokio::test]\nasync fn per_entity_timeout_with_continuation() {\n let server = MockServer::start().await;\n // Issue 7: slow response (simulates timeout)\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\\?.*iids\\[\\]=7\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([mock_issue_json(7)]))\n .set_body_delay(Duration::from_secs(30)))\n .mount(&server).await;\n // Issue 42: fast response\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\\?.*iids\\[\\]=42\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([mock_issue_json(42)])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issue_iids: vec![7, 42],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n // With a per-entity timeout, issue 7 should fail, issue 42 should succeed\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"timeout-test\"), &signal,\n ).await.unwrap();\n\n let entities = result.entity_results.as_ref().unwrap();\n // One should be failed (timeout), one should be synced\n let failed = entities.iter().filter(|e| e.outcome == \"failed\").count();\n let synced = entities.iter().filter(|e| e.outcome == \"synced\").count();\n assert!(failed >= 1 || synced >= 1, \"Expected mixed outcomes\");\n}\n\n#[tokio::test]\nasync fn embed_scope_isolation() {\n let server = MockServer::start().await;\n // Mock two issues\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([\n mock_issue_json(7), mock_issue_json(42)\n ])))\n .mount(&server).await;\n // Mock empty discussions for both\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues/\\d+/discussions\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issue_iids: vec![7, 42],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n no_embed: false,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"embed-iso\"), &signal,\n ).await.unwrap();\n\n // Embedding should only have processed documents from issues 7 and 42\n // Not the full corpus. Verify via document counts.\n assert!(result.documents_embedded <= 2,\n \"Expected at most 2 documents embedded (one per issue), got {}\",\n result.documents_embedded);\n}\n\n#[tokio::test]\nasync fn payload_project_id_mismatch_rejection() {\n let server = MockServer::start().await;\n // Return issue with project_id=999 (doesn't match resolved project_id=1)\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([{\n \"id\": 200, \"iid\": 7, \"project_id\": 999, \"title\": \"Wrong Project\",\n \"state\": \"opened\", \"created_at\": \"2026-01-01T00:00:00Z\",\n \"updated_at\": \"2026-02-17T00:00:00Z\",\n \"author\": {\"id\": 1, \"username\": \"dev\", \"name\": \"Dev\"},\n \"web_url\": \"https://gitlab.example.com/other/project/-/issues/7\"\n }])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issue_iids: vec![7],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"mismatch\"), &signal,\n ).await.unwrap();\n\n let entities = result.entity_results.as_ref().unwrap();\n assert_eq!(entities.len(), 1);\n assert_eq!(entities[0].outcome, \"failed\");\n assert!(entities[0].error.as_ref().unwrap().contains(\"project_id\"));\n}\n\n#[tokio::test]\nasync fn successful_full_pipeline() {\n let server = MockServer::start().await;\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues\"))\n .respond_with(ResponseTemplate::new(200)\n .set_body_json(serde_json::json!([mock_issue_json(7)])))\n .mount(&server).await;\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues/7/discussions\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([])))\n .mount(&server).await;\n // Mock any resource event endpoints\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/1/issues/7/resource_\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(serde_json::json!([])))\n .mount(&server).await;\n\n let config = test_config(&server.uri());\n let options = SyncOptions {\n issue_iids: vec![7],\n project: Some(\"group/project\".to_string()),\n robot_mode: true,\n no_embed: true, // Skip embed to avoid Ollama dependency\n ..SyncOptions::default()\n };\n let signal = ShutdownSignal::new();\n\n let result = lore::cli::commands::sync_surgical::run_sync_surgical(\n &config, options, Some(\"full-pipe\"), &signal,\n ).await.unwrap();\n\n assert_eq!(result.surgical_mode, Some(true));\n assert_eq!(result.surgical_iids.as_ref().unwrap().issues, vec![7]);\n assert_eq!(result.preflight_only, Some(false));\n\n let entities = result.entity_results.as_ref().unwrap();\n assert_eq!(entities.len(), 1);\n assert_eq!(entities[0].entity_type, \"issue\");\n assert_eq!(entities[0].iid, 7);\n assert_eq!(entities[0].outcome, \"synced\");\n assert!(entities[0].error.is_none());\n\n assert!(result.issues_updated >= 1);\n assert!(result.documents_regenerated >= 1);\n}\n```\n\n## Edge Cases\n\n- **Wiremock delay vs tokio timeout**: Use `set_body_delay` on wiremock, not `tokio::time::sleep` in tests. The per-entity timeout in the orchestrator (bd-1i4i) should use `tokio::time::timeout` around the HTTP call.\n- **Embed isolation without Ollama**: Tests that verify embed scoping should either mock Ollama or use `no_embed: true` and verify the document ID list passed to the embed function. The `successful_full_pipeline` test uses `no_embed: true` to avoid requiring a running Ollama server in CI.\n- **Test isolation**: Each test creates its own `MockServer`, in-memory DB, and `ShutdownSignal`. No shared state between tests.\n- **Flakiness prevention**: Cancellation timing tests (test 2) use deterministic delays (cancel after 200ms, response delayed 2s). If flaky, increase the gap between cancel time and response delay.\n- **CI compatibility**: No real GitLab, no real Ollama, no real filesystem locks (in-memory DB means AppLock may need adaptation for tests — consider a test-only lock bypass or use a temp file DB for lock tests).\n\n## Dependency Context\n\n- **Depends on (upstream)**: bd-1i4i (the `run_sync_surgical` function under test), bd-wcja (SyncResult surgical fields to assert), bd-1lja (SyncOptions extensions), bd-3sez (surgical ingest for TOCTOU test), bd-arka (SyncRunRecorder for recorder state assertions), bd-1elx (scoped embed for isolation test), bd-kanh (per-entity helpers)\n- **No downstream dependents** — this is a terminal test-only bead.\n- These tests validate the behavioral contracts that all upstream beads promise. They are the acceptance gate for the surgical sync feature.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-17T19:18:46.182356Z","created_by":"tayloreernisse","updated_at":"2026-02-18T19:24:23.804017Z","compaction_level":0,"original_size":0,"labels":["surgical-sync"]} {"id":"bd-3js","title":"Implement MR CLI commands (list, show, count)","description":"## Background\nCLI commands for viewing and filtering merge requests. Includes list, show, and count commands with MR-specific filters.\n\n## Approach\nUpdate existing CLI command files:\n1. `list.rs` - Add MR listing with filters\n2. `show.rs` - Add MR detail view with discussions\n3. `count.rs` - Add MR counting with state breakdown\n\n## Files\n- `src/cli/commands/list.rs` - Add MR subcommand\n- `src/cli/commands/show.rs` - Add MR detail view\n- `src/cli/commands/count.rs` - Add MR counting\n\n## Acceptance Criteria\n- [ ] `gi list mrs` shows MR table with iid, title, state, author, branches\n- [ ] `gi list mrs --state=merged` filters by state\n- [ ] `gi list mrs --state=locked` filters locally (not server-side)\n- [ ] `gi list mrs --draft` shows only draft MRs\n- [ ] `gi list mrs --no-draft` excludes draft MRs\n- [ ] `gi list mrs --reviewer=username` filters by reviewer\n- [ ] `gi list mrs --target-branch=main` filters by target branch\n- [ ] `gi list mrs --source-branch=feature/x` filters by source branch\n- [ ] Draft MRs show `[DRAFT]` prefix in title\n- [ ] `gi show mr ` displays full detail including discussions\n- [ ] DiffNote shows file context: `[src/file.ts:45]`\n- [ ] Multi-line DiffNote shows: `[src/file.ts:45-48]`\n- [ ] `gi show mr` shows `detailed_merge_status`\n- [ ] `gi count mrs` shows total with state breakdown\n- [ ] `gi sync-status` shows MR cursor positions\n- [ ] `cargo test cli_commands` passes\n\n## TDD Loop\nRED: `cargo test list_mrs` -> command not found\nGREEN: Add MR subcommand\nVERIFY: `gi list mrs --help`\n\n## gi list mrs Output\n```\nMerge Requests (showing 20 of 1,234)\n\n !847 Refactor auth to use JWT tokens merged @johndoe main <- feature/jwt 3 days ago\n !846 Fix memory leak in websocket handler opened @janedoe main <- fix/websocket 5 days ago\n !845 [DRAFT] Add dark mode CSS variables opened @bobsmith main <- ui/dark-mode 1 week ago\n```\n\n## SQL for MR Listing\n```sql\nSELECT \n m.iid, m.title, m.state, m.draft, m.author_username,\n m.target_branch, m.source_branch, m.updated_at\nFROM merge_requests m\nWHERE m.project_id = ?\n AND (? IS NULL OR m.state = ?) -- state filter\n AND (? IS NULL OR m.draft = ?) -- draft filter\n AND (? IS NULL OR m.author_username = ?) -- author filter\n AND (? IS NULL OR m.target_branch = ?) -- target-branch filter\n AND (? IS NULL OR m.source_branch = ?) -- source-branch filter\n AND (? IS NULL OR EXISTS ( -- reviewer filter\n SELECT 1 FROM mr_reviewers r \n WHERE r.merge_request_id = m.id AND r.username = ?\n ))\nORDER BY m.updated_at DESC\nLIMIT ?\n```\n\n## gi show mr Output\n```\nMerge Request !847: Refactor auth to use JWT tokens\n================================================================================\n\nProject: group/project-one\nState: merged\nDraft: No\nAuthor: @johndoe\nAssignees: @janedoe, @bobsmith\nReviewers: @alice, @charlie\nSource: feature/jwt\nTarget: main\nMerge Status: mergeable\nMerged By: @alice\nMerged At: 2024-03-20 14:30:00\nLabels: enhancement, auth, reviewed\n\nDescription:\n Moving away from session cookies to JWT-based authentication...\n\nDiscussions (8):\n\n @janedoe (2024-03-16) [src/auth/jwt.ts:45]:\n Should we use a separate signing key for refresh tokens?\n\n @johndoe (2024-03-16):\n Good point. I'll add a separate key with rotation support.\n\n @alice (2024-03-18) [RESOLVED]:\n Looks good! Just one nit about the token expiry constant.\n```\n\n## DiffNote File Context Display\n```rust\n// Build file context string\nlet file_context = match (note.position_new_path, note.position_new_line, note.position_line_range_end) {\n (Some(path), Some(line), Some(end_line)) if line != end_line => {\n format!(\"[{}:{}-{}]\", path, line, end_line)\n }\n (Some(path), Some(line), _) => {\n format!(\"[{}:{}]\", path, line)\n }\n _ => String::new(),\n};\n```\n\n## gi count mrs Output\n```\nMerge Requests: 1,234\n opened: 89\n merged: 1,045\n closed: 100\n```\n\n## Filter Arguments (clap)\n```rust\n#[derive(Parser)]\nstruct ListMrsArgs {\n #[arg(long)]\n state: Option, // opened|merged|closed|locked|all\n #[arg(long)]\n draft: bool,\n #[arg(long)]\n no_draft: bool,\n #[arg(long)]\n author: Option,\n #[arg(long)]\n assignee: Option,\n #[arg(long)]\n reviewer: Option,\n #[arg(long)]\n target_branch: Option,\n #[arg(long)]\n source_branch: Option,\n #[arg(long)]\n label: Vec,\n #[arg(long)]\n project: Option,\n #[arg(long, default_value = \"20\")]\n limit: u32,\n}\n```\n\n## Edge Cases\n- `--state=locked` must filter locally (GitLab API doesn't support it)\n- Ambiguous MR iid across projects: prompt for `--project`\n- Empty discussions: show \"No discussions\" message\n- Multi-line DiffNotes: show line range in context","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:43.354939Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:37:31.792569Z","closed_at":"2026-01-27T00:37:31.792504Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3js","depends_on_id":"bd-20h","type":"blocks","created_at":"2026-01-26T22:08:55.209249Z","created_by":"tayloreernisse"},{"issue_id":"bd-3js","depends_on_id":"bd-ser","type":"blocks","created_at":"2026-01-26T22:08:55.117728Z","created_by":"tayloreernisse"}]} {"id":"bd-3kj","title":"[CP0] gi version, backup, reset, sync-status commands","description":"## Background\n\nThese are the remaining utility commands for CP0. version is trivial. backup creates safety copies before destructive operations. reset provides clean-slate capability. sync-status is a stub for CP0 that will be implemented in CP1.\n\nReference: docs/prd/checkpoint-0.md sections \"gi version\", \"gi backup\", \"gi reset\", \"gi sync-status\"\n\n## Approach\n\n**src/cli/commands/version.ts:**\n```typescript\nimport { Command } from 'commander';\nimport { version } from '../../../package.json' with { type: 'json' };\n\nexport const versionCommand = new Command('version')\n .description('Show version information')\n .action(() => {\n console.log(\\`gi version \\${version}\\`);\n });\n```\n\n**src/cli/commands/backup.ts:**\n```typescript\nimport { Command } from 'commander';\nimport { copyFileSync, mkdirSync } from 'node:fs';\nimport { loadConfig } from '../../core/config';\nimport { getDbPath, getBackupDir } from '../../core/paths';\n\nexport const backupCommand = new Command('backup')\n .description('Create timestamped database backup')\n .action(async (options, command) => {\n const globalOpts = command.optsWithGlobals();\n const config = loadConfig(globalOpts.config);\n \n const dbPath = getDbPath(config.storage?.dbPath);\n const backupDir = getBackupDir(config.storage?.backupDir);\n \n mkdirSync(backupDir, { recursive: true });\n \n // Format: data-2026-01-24T10-30-00.db (colons replaced for Windows compat)\n const timestamp = new Date().toISOString().replace(/:/g, '-').replace(/\\\\..*/, '');\n const backupPath = \\`\\${backupDir}/data-\\${timestamp}.db\\`;\n \n copyFileSync(dbPath, backupPath);\n console.log(\\`Created backup: \\${backupPath}\\`);\n });\n```\n\n**src/cli/commands/reset.ts:**\n```typescript\nimport { Command } from 'commander';\nimport { unlinkSync, existsSync } from 'node:fs';\nimport { createInterface } from 'node:readline';\nimport { loadConfig } from '../../core/config';\nimport { getDbPath } from '../../core/paths';\n\nexport const resetCommand = new Command('reset')\n .description('Delete database and reset all state')\n .option('--confirm', 'Skip confirmation prompt')\n .action(async (options, command) => {\n const globalOpts = command.optsWithGlobals();\n const config = loadConfig(globalOpts.config);\n const dbPath = getDbPath(config.storage?.dbPath);\n \n if (!existsSync(dbPath)) {\n console.log('No database to reset.');\n return;\n }\n \n if (!options.confirm) {\n console.log(\\`This will delete:\\n - Database: \\${dbPath}\\n - All sync cursors\\n - All cached data\\n\\`);\n // Prompt for 'yes' confirmation\n // If not 'yes', exit 2\n }\n \n unlinkSync(dbPath);\n // Also delete WAL and SHM files if they exist\n if (existsSync(\\`\\${dbPath}-wal\\`)) unlinkSync(\\`\\${dbPath}-wal\\`);\n if (existsSync(\\`\\${dbPath}-shm\\`)) unlinkSync(\\`\\${dbPath}-shm\\`);\n \n console.log(\"Database reset. Run 'gi sync' to repopulate.\");\n });\n```\n\n**src/cli/commands/sync-status.ts:**\n```typescript\n// CP0 stub - full implementation in CP1\nexport const syncStatusCommand = new Command('sync-status')\n .description('Show sync state')\n .action(() => {\n console.log(\"No sync runs yet. Run 'gi sync' to start.\");\n });\n```\n\n## Acceptance Criteria\n\n- [ ] `gi version` outputs \"gi version X.Y.Z\"\n- [ ] `gi backup` creates timestamped copy of database\n- [ ] Backup filename is Windows-compatible (no colons)\n- [ ] Backup directory created if missing\n- [ ] `gi reset` prompts for 'yes' confirmation\n- [ ] `gi reset --confirm` skips prompt\n- [ ] Reset deletes .db, .db-wal, and .db-shm files\n- [ ] Reset exits 2 if user doesn't type 'yes'\n- [ ] `gi sync-status` outputs stub message\n\n## Files\n\nCREATE:\n- src/cli/commands/version.ts\n- src/cli/commands/backup.ts\n- src/cli/commands/reset.ts\n- src/cli/commands/sync-status.ts\n\n## TDD Loop\n\nN/A - simple commands, verify manually:\n\n```bash\ngi version\ngi backup\nls ~/.local/share/gi/backups/\ngi reset # type 'no'\ngi reset --confirm\nls ~/.local/share/gi/data.db # should not exist\ngi sync-status\n```\n\n## Edge Cases\n\n- Backup when database doesn't exist - show clear error\n- Reset when database doesn't exist - show \"No database to reset\"\n- WAL/SHM files may not exist - check before unlinking\n- Timestamp with milliseconds could cause very long filename\n- readline prompt in non-interactive terminal - handle SIGINT","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:51.774210Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:31:46.227285Z","closed_at":"2026-01-25T03:31:46.227220Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-3kj","depends_on_id":"bd-13b","type":"blocks","created_at":"2026-01-24T16:13:10.810953Z","created_by":"tayloreernisse"},{"issue_id":"bd-3kj","depends_on_id":"bd-3ng","type":"blocks","created_at":"2026-01-24T16:13:10.827689Z","created_by":"tayloreernisse"}]} @@ -259,12 +274,15 @@ {"id":"bd-8t4","title":"Extract cross-references from resource_state_events","description":"## Background\nresource_state_events includes source_merge_request (with iid) for 'closed by MR' events. After state events are stored (Gate 1), post-processing extracts these into entity_references for the cross-reference graph.\n\n## Approach\nCreate src/core/references.rs (new module) or add to events_db.rs:\n\n```rust\n/// Extract cross-references from stored state events and insert into entity_references.\n/// Looks for state events with source_merge_request_id IS NOT NULL (meaning \"closed by MR\").\n/// \n/// Directionality: source = MR (that caused the close), target = issue (that was closed)\npub fn extract_refs_from_state_events(\n conn: &Connection,\n project_id: i64,\n) -> Result // returns count of new references inserted\n```\n\nSQL logic:\n```sql\nINSERT OR IGNORE INTO entity_references (\n source_entity_type, source_entity_id,\n target_entity_type, target_entity_id,\n reference_type, source_method, created_at\n)\nSELECT\n 'merge_request',\n mr.id,\n 'issue',\n rse.issue_id,\n 'closes',\n 'api_state_event',\n rse.created_at\nFROM resource_state_events rse\nJOIN merge_requests mr ON mr.project_id = rse.project_id AND mr.iid = rse.source_merge_request_id\nWHERE rse.source_merge_request_id IS NOT NULL\n AND rse.issue_id IS NOT NULL\n AND rse.project_id = ?1;\n```\n\nKey: source_merge_request_id stores the MR iid, so we JOIN on merge_requests.iid to get the local DB id.\n\nRegister in src/core/mod.rs: `pub mod references;`\n\nCall this after drain_dependent_queue in the sync pipeline (after all state events are stored).\n\n## Acceptance Criteria\n- [ ] State events with source_merge_request_id produce 'closes' references\n- [ ] Source = MR (resolved by iid), target = issue\n- [ ] source_method = 'api_state_event'\n- [ ] INSERT OR IGNORE prevents duplicates with api_closes_issues data\n- [ ] Returns count of newly inserted references\n- [ ] No-op when no state events have source_merge_request_id\n\n## Files\n- src/core/references.rs (new)\n- src/core/mod.rs (add `pub mod references;`)\n- src/cli/commands/sync.rs (call after drain step)\n\n## TDD Loop\nRED: tests/references_tests.rs:\n- `test_extract_refs_from_state_events_basic` - seed a \"closed\" state event with source_merge_request_id, verify entity_reference created\n- `test_extract_refs_dedup_with_closes_issues` - insert ref from closes_issues API first, verify state event extraction doesn't duplicate\n- `test_extract_refs_no_source_mr` - state events without source_merge_request_id produce no refs\n\nSetup: create_test_db with migrations 001-011, seed project + issue + MR + state events.\n\nGREEN: Implement extract_refs_from_state_events\n\nVERIFY: `cargo test references -- --nocapture`\n\n## Edge Cases\n- source_merge_request_id may reference an MR not synced locally (cross-project close) — the JOIN will produce no match, which is correct behavior (ref simply not created)\n- Multiple state events can reference the same MR for the same issue (reopen + re-close) — INSERT OR IGNORE handles dedup\n- The merge_requests table might not have the MR yet if sync is still running — call this after all dependent fetches complete","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:32:33.619606Z","created_by":"tayloreernisse","updated_at":"2026-02-04T20:13:28.219791Z","closed_at":"2026-02-04T20:13:28.219633Z","compaction_level":0,"original_size":0,"labels":["extraction","gate-2","phase-b"],"dependencies":[{"issue_id":"bd-8t4","depends_on_id":"bd-1ep","type":"blocks","created_at":"2026-02-02T21:32:42.945176Z","created_by":"tayloreernisse"},{"issue_id":"bd-8t4","depends_on_id":"bd-1se","type":"parent-child","created_at":"2026-02-02T21:32:33.621025Z","created_by":"tayloreernisse"},{"issue_id":"bd-8t4","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-02T22:41:50.562935Z","created_by":"tayloreernisse"}]} {"id":"bd-91j1","title":"Comprehensive robot-docs as agent bootstrap","description":"## Background\nAgents reach for glab because they already know it from training data. lore robot-docs exists but is not comprehensive enough to serve as a zero-training bootstrap. An agent encountering lore for the first time should be able to use any command correctly after reading robot-docs output alone.\n\n## Current State (Verified 2026-02-12)\n- `handle_robot_docs()` at src/main.rs:2069\n- Called at no-args in robot mode (main.rs:165) and via Commands::RobotDocs { brief } (main.rs:229)\n- Current output top-level keys: name, version, description, activation, commands, aliases, exit_codes, clap_error_codes, error_format, workflows\n- Missing: response_schema per command, example_output per command, quick_start section, glab equivalence table\n- --brief flag exists but returns shorter version of same structure\n- main.rs is 2579 lines total\n\n## Current robot-docs Output Structure\n```json\n{\n \"name\": \"lore\",\n \"version\": \"0.6.1\",\n \"description\": \"...\",\n \"activation\": { \"flags\": [\"--robot\", \"-J\"], \"env\": \"LORE_ROBOT=1\", \"auto_detect\": \"non-TTY\" },\n \"commands\": [{ \"name\": \"...\", \"description\": \"...\", \"flags\": [...], \"example\": \"...\" }],\n \"aliases\": { ... },\n \"exit_codes\": { ... },\n \"clap_error_codes\": { ... },\n \"error_format\": { ... },\n \"workflows\": { ... }\n}\n```\n\n## Approach\n\n### 1. Add quick_start section\nTop-level key with glab-to-lore translation and lore-exclusive feature summary:\n```json\n\"quick_start\": {\n \"glab_equivalents\": [\n { \"glab\": \"glab issue list\", \"lore\": \"lore -J issues -n 50\", \"note\": \"Richer: includes labels, status, closing MRs\" },\n { \"glab\": \"glab issue view 123\", \"lore\": \"lore -J issues 123\", \"note\": \"Includes discussions, work-item status\" },\n { \"glab\": \"glab mr list\", \"lore\": \"lore -J mrs\", \"note\": \"Includes draft status, reviewers\" },\n { \"glab\": \"glab mr view 456\", \"lore\": \"lore -J mrs 456\", \"note\": \"Includes discussions, file changes\" },\n { \"glab\": \"glab api '/projects/:id/issues'\", \"lore\": \"lore -J issues -p project\", \"note\": \"Fuzzy project matching\" }\n ],\n \"lore_exclusive\": [\n \"search: FTS5 + vector hybrid search across all entities\",\n \"who: Expert/workload/reviews analysis per file path or person\",\n \"timeline: Chronological event reconstruction across entities\",\n \"stats: Database statistics with document/note/discussion counts\",\n \"count: Entity counts with state breakdowns\"\n ]\n}\n```\n\n### 2. Add response_schema per command\nFor each command in the commands array, add a `response_schema` field showing the JSON shape:\n```json\n{\n \"name\": \"issues\",\n \"response_schema\": {\n \"ok\": \"boolean\",\n \"data\": { \"type\": \"array|object\", \"fields\": [\"iid\", \"title\", \"state\", \"...\"] },\n \"meta\": { \"elapsed_ms\": \"integer\" }\n }\n}\n```\nCommands with multiple output shapes (list vs detail) need both documented.\n\n### 3. Add example_output per command\nRealistic truncated JSON for each command. Keep each example under 500 bytes.\n\n### 4. Token budget enforcement\n- --brief mode: ONLY quick_start + command names + invocation syntax. Target <4000 tokens (~16000 bytes).\n- Full mode: everything. Target <12000 tokens (~48000 bytes).\n- Measure with: `cargo run --release -- --robot robot-docs --brief | wc -c`\n\n## TDD Loop\nRED: Tests in src/main.rs or new src/cli/commands/robot_docs.rs:\n- test_robot_docs_has_quick_start: parse output JSON, assert quick_start.glab_equivalents array has >= 5 entries\n- test_robot_docs_brief_size: --brief output < 16000 bytes\n- test_robot_docs_full_size: full output < 48000 bytes\n- test_robot_docs_has_response_schemas: every command entry has response_schema key\n- test_robot_docs_commands_complete: assert all registered commands appear (issues, mrs, search, who, timeline, count, stats, sync, embed, doctor, health, ingest, generate-docs, show)\n\nGREEN: Add quick_start, response_schema, example_output to robot-docs output\n\nVERIFY:\n```bash\ncargo test robot_docs && cargo clippy --all-targets -- -D warnings\ncargo run --release -- --robot robot-docs | jq '.quick_start.glab_equivalents | length'\n# Should return >= 5\ncargo run --release -- --robot robot-docs --brief | wc -c\n# Should be < 16000\n```\n\n## Acceptance Criteria\n- [ ] robot-docs JSON has quick_start.glab_equivalents array with >= 5 entries\n- [ ] robot-docs JSON has quick_start.lore_exclusive array\n- [ ] Every command entry has response_schema showing the JSON shape\n- [ ] Every command entry has example_output with realistic truncated data\n- [ ] --brief output is under 16000 bytes (~4000 tokens)\n- [ ] Full output is under 48000 bytes (~12000 tokens)\n- [ ] An agent reading ONLY robot-docs can correctly invoke any lore command\n- [ ] cargo test passes with new robot_docs tests\n\n## Edge Cases\n- Commands with multiple output shapes (e.g., issues list vs issues detail via iid) need both schemas documented\n- --fields flag changes output shape -- document the effect in the response_schema\n- robot-docs output must be stable across versions (agents may cache it)\n- Version field should match Cargo.toml version\n\n## Files to Modify\n- src/main.rs fn handle_robot_docs() (~line 2069) — add quick_start section, response_schema, example_output\n- Consider extracting to src/cli/commands/robot_docs.rs if the function exceeds 200 lines","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-12T15:44:40.495479Z","created_by":"tayloreernisse","updated_at":"2026-02-12T16:49:01.043915Z","closed_at":"2026-02-12T16:49:01.043832Z","close_reason":"Robot-docs enhanced with quick_start (glab equivalents, lore exclusives, read/write split) and example_output for issues/mrs/search/who","compaction_level":0,"original_size":0,"labels":["cli","cli-imp","robot-mode"],"dependencies":[{"issue_id":"bd-91j1","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-02-12T15:44:40.497236Z","created_by":"tayloreernisse"}]} {"id":"bd-9av","title":"[CP1] gi sync-status enhancement","description":"Enhance sync-status from CP0 stub to show issue cursors.\n\n## Changes to src/cli/commands/sync_status.rs\n\nUpdate the existing stub to show:\n- Last run timestamp and duration\n- Cursor positions per project (issues resource_type)\n- Entity counts (issues, discussions, notes)\n\n## Output Format\nLast sync: 2026-01-25 10:30:00 (succeeded, 45s)\n\nCursors:\n group/project-one\n issues: 2026-01-25T10:25:00Z (gitlab_id: 12345678)\n\nCounts:\n Issues: 1,234\n Discussions: 5,678\n Notes: 23,456 (4,567 system)\n\nFiles: src/cli/commands/sync_status.rs\nDone when: Shows cursor positions and counts after ingestion","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:58:27.246825Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.968507Z","deleted_at":"2026-01-25T17:02:01.968503Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} +{"id":"bd-9cob","title":"Render MR sections (authored + reviewing) in human mode","description":"## Background\nRender the two MR sections in `lore me` human mode: authored MRs and reviewing MRs. Both share layout primitives but differ in section-specific fields.\n\n## Approach\nImplement in `src/cli/commands/me/render_human.rs`:\n```rust\npub fn render_authored_mrs_section(mrs: &[MeMrAuthored], single_project: bool) -> String\npub fn render_reviewing_mrs_section(mrs: &[MeMrReviewing], single_project: bool) -> String\n```\n\nShared row layout:\n- Line 1: attention icon + `!iid` + title + section-specific fields + relative time.\n- Line 2: dimmed `project_path` unless `single_project == true`.\n\nSection-specific fields:\n- Authored: optional `detailed_merge_status` and optional `DRAFT` indicator.\n- Reviewing: `@author_username` and optional `DRAFT` indicator.\n\nStyling:\n- `Theme::mr_ref()` for `!iid`\n- `Theme::username()` for `@author`\n- `Theme::state_draft()` for `DRAFT`\n- `Theme::dim()` for recency and project path\n- `section_divider()` for headers\n\nTimestamp conversion rule:\n- Convert `updated_at_iso` via `crate::core::time::iso_to_ms` before `format_relative_time`.\n- On parse failure, render `updated_at_iso` text fallback; do not panic.\n\nAttention rule:\n- `NotReady` renders no attention icon; `DRAFT` remains visible.\n\n## Acceptance Criteria\n- [ ] Headers are `Authored MRs (N)` and `Reviewing MRs (N)` via `section_divider`\n- [ ] `!iid` uses MR ref styling\n- [ ] `DRAFT` appears only when `draft=true`\n- [ ] Authored rows show `detailed_merge_status` only when `Some`\n- [ ] Reviewing rows show `@author_username`\n- [ ] Attention icon rendering matches state/tier rules\n- [ ] `NotReady` shows no icon but keeps draft indicator\n- [ ] Project path hidden when `single_project=true`\n- [ ] Empty authored section -> `No authored MRs`\n- [ ] Empty reviewing section -> `No MRs to review`\n- [ ] Invalid `updated_at_iso` does not panic and renders fallback text\n\n## Files\n- MODIFY: `src/cli/commands/me/render_human.rs`\n\n## TDD Anchor\nRED:\n- `test_authored_mr_shows_merge_status`\n- `test_authored_mr_shows_draft_indicator`\n- `test_authored_mr_hides_draft_when_false`\n- `test_reviewing_mr_shows_author_username`\n- `test_reviewing_mr_empty_section`\n- `test_authored_mr_omits_status_when_none`\n- `test_mr_section_invalid_iso_fallback`\n\nGREEN:\n- Implement both section renderers with shared helper + safe timestamp conversion.\n\nVERIFY:\n- `cargo test mr_section`\n\n## Edge Cases\n- Same MR can appear in both sections (author and reviewer); render independently.\n- `detailed_merge_status` may be absent; omit cleanly.\n- Sorting remains handler-owned; renderer does no resorting.\n\n## Dependency Context\nConsumes `MeMrAuthored`/`MeMrReviewing` from `bd-3bwh`, icon/summary context from `bd-1vxq`, and is called by `bd-1vv8`.\n\nDependencies:\n -> bd-1vxq (blocks) - Render summary header and attention legend\n\nDependents:\n <- bd-1vv8 (blocks) - Implement me command handler: wire queries to renderers","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:39:25.770680Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.061026Z","closed_at":"2026-02-20T16:09:13.060991Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-9cob","depends_on_id":"bd-1vxq","type":"blocks","created_at":"2026-02-19T19:41:27.999746Z","created_by":"tayloreernisse"}]} {"id":"bd-9dd","title":"Implement 'lore trace' command with human and robot output","description":"## Background\n\nThe trace command is Gate 5's capstone CLI. It answers 'Why was this code introduced?' by building file -> MR -> issue -> discussion chains.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 5.3.\n\n## Codebase Context\n\n- CLI pattern: same as file-history (Commands enum, handler in main.rs)\n- trace.rs (bd-2n4): run_trace() returns TraceResult with chains\n- Path parsing: support 'src/foo.rs:45' syntax (line number for future Tier 2)\n- merge_requests.merged_at exists (migration 006) — use COALESCE(merged_at, updated_at) for ordering\n\n## Approach\n\n### 1. TraceArgs (`src/cli/mod.rs`):\n```rust\n#[derive(Parser)]\npub struct TraceArgs {\n pub path: String, // supports :line suffix\n #[arg(short = 'p', long)] pub project: Option,\n #[arg(long)] pub discussions: bool,\n #[arg(long = \"no-follow-renames\")] pub no_follow_renames: bool,\n #[arg(short = 'n', long = \"limit\", default_value = \"20\")] pub limit: usize,\n}\n```\n\n### 2. Path parsing:\n```rust\nfn parse_trace_path(input: &str) -> (String, Option) {\n if let Some((path, line)) = input.rsplit_once(':') {\n if let Ok(n) = line.parse::() { return (path.to_string(), Some(n)); }\n }\n (input.to_string(), None)\n}\n```\nIf line present: warn 'Line-level tracing requires Tier 2. Showing file-level results.'\n\n### 3. Human output shows chains with MR -> issue -> discussion context\n\n### 4. Robot JSON:\n```json\n{\"ok\": true, \"data\": {\"path\": \"...\", \"resolved_paths\": [...], \"trace_chains\": [...]}, \"meta\": {\"tier\": \"api_only\", \"line_requested\": null}}\n```\n\n## Acceptance Criteria\n\n- [ ] `lore trace src/foo.rs` with human output\n- [ ] `lore --robot trace src/foo.rs` with JSON\n- [ ] :line suffix parses and emits Tier 2 warning\n- [ ] -p, --discussions, --no-follow-renames, -n all work\n- [ ] Rename-aware via resolve_rename_chain\n- [ ] meta.tier = 'api_only'\n- [ ] Added to VALID_COMMANDS and robot-docs\n- [ ] `cargo check --all-targets` passes\n\n## Files\n\n- `src/cli/mod.rs` (TraceArgs + Commands::Trace)\n- `src/cli/commands/trace.rs` (NEW)\n- `src/cli/commands/mod.rs` (re-export)\n- `src/main.rs` (handler + VALID_COMMANDS + robot-docs)\n\n## TDD Loop\n\nRED:\n- `test_parse_trace_path_simple` - \"src/foo.rs\" -> (path, None)\n- `test_parse_trace_path_with_line` - \"src/foo.rs:42\" -> (path, Some(42))\n- `test_parse_trace_path_windows` - \"C:/foo.rs\" -> (path, None) — don't misparse drive letter\n\nGREEN: Implement CLI wiring and handlers.\n\nVERIFY: `cargo check --all-targets`\n\n## Edge Cases\n\n- Windows paths: don't misparse C: as line number\n- No MR data: friendly message with suggestion to sync\n- Very deep rename chain: bounded by resolve_rename_chain","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:34:32.788530Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:10:55.708488Z","closed_at":"2026-02-18T21:10:55.708445Z","close_reason":"Trace CLI implemented","compaction_level":0,"original_size":0,"labels":["cli","gate-5","phase-b"],"dependencies":[{"issue_id":"bd-9dd","depends_on_id":"bd-1ht","type":"parent-child","created_at":"2026-02-02T21:34:32.789920Z","created_by":"tayloreernisse"},{"issue_id":"bd-9dd","depends_on_id":"bd-2n4","type":"blocks","created_at":"2026-02-02T21:34:37.941327Z","created_by":"tayloreernisse"}]} {"id":"bd-9lbr","title":"lore explain: auto-generate issue/MR narrative","description":"## Background\nGiven an issue or MR, auto-generate a structured narrative of what happened: who was involved, what decisions were made, what changed, and what is unresolved. Template-based v1 (no LLM dependency), deterministic and reproducible.\n\n## Current Infrastructure (Verified 2026-02-12)\n- show.rs: IssueDetail (line 69) and MrDetail (line 14) — entity detail with discussions\n- timeline.rs: 5-stage pipeline SHIPPED — chronological event reconstruction\n- notes table: 282K rows with body, author, created_at, is_system, discussion_id\n- discussions table: links notes to parent entity (noteable_type, noteable_id), has resolved flag\n- resource_state_events table: state changes with created_at, user_username (src/core/events_db.rs)\n- resource_label_events table: label add/remove with created_at, user_username\n- entity_references table (src/core/references.rs): cross-references between entities (closing MRs, related issues). Column names: `source_entity_type`, `source_entity_id`, `target_entity_type`, `target_entity_id`, `target_project_path`, `target_entity_iid`, `reference_type`, `source_method`\n\n## Approach\nNew command: `lore explain issues N` / `lore explain mrs N`\n\n### Data Assembly (reuse existing internals as library calls)\n1. Entity detail: reuse show.rs query logic for IssueDetail/MrDetail\n2. Timeline events: reuse timeline pipeline with entity-scoped seed\n3. Discussion notes:\n```sql\nSELECT n.id, n.body, n.author_username, n.created_at\nFROM notes n\nJOIN discussions d ON n.discussion_id = d.id\nWHERE d.noteable_type = ? AND d.noteable_id = ?\n AND n.is_system = 0\nORDER BY n.created_at\n```\n4. Cross-references:\n```sql\nSELECT target_entity_type, target_entity_id, target_project_path,\n target_entity_iid, reference_type, source_method\nFROM entity_references\nWHERE (source_entity_type = ?1 AND source_entity_id = ?2)\nUNION ALL\nSELECT source_entity_type, source_entity_id, NULL,\n NULL, reference_type, source_method\nFROM entity_references\nWHERE (target_entity_type = ?1 AND target_entity_id = ?2)\n```\n\n### Key Decisions Heuristic\nNotes from assignees/author that follow state or label changes within 1 hour:\n```rust\nstruct StateOrLabelEvent {\n created_at: i64, // ms epoch\n user: String,\n description: String, // e.g. \"state: opened -> closed\" or \"label: +bug\"\n}\n\nfn extract_key_decisions(\n state_events: &[ResourceStateEvent],\n label_events: &[ResourceLabelEvent],\n notes: &[Note],\n) -> Vec {\n // Merge both event types into a unified chronological list\n let mut events: Vec = Vec::new();\n for e in state_events {\n events.push(StateOrLabelEvent {\n created_at: e.created_at,\n user: e.user_username.clone(),\n description: format!(\"state: {} -> {}\", e.from_state.as_deref().unwrap_or(\"?\"), e.to_state),\n });\n }\n for e in label_events {\n let action = if e.action == \"add\" { \"+\" } else { \"-\" };\n events.push(StateOrLabelEvent {\n created_at: e.created_at,\n user: e.user_username.clone(),\n description: format!(\"label: {}{}\", action, e.label_name.as_deref().unwrap_or(\"?\")),\n });\n }\n events.sort_by_key(|e| e.created_at);\n\n let mut decisions = Vec::new();\n let one_hour_ms: i64 = 60 * 60 * 1000;\n\n for event in &events {\n // Find notes by same actor within 60 min after the event\n for note in notes {\n if note.author_username == event.user\n && note.created_at >= event.created_at\n && note.created_at <= event.created_at + one_hour_ms\n {\n decisions.push(KeyDecision {\n timestamp: event.created_at,\n actor: event.user.clone(),\n action: event.description.clone(),\n context_note: truncate(¬e.body, 500),\n });\n break; // one note per event\n }\n }\n }\n decisions.truncate(10); // Cap at 10 key decisions\n decisions\n}\n```\n\n### Narrative Sections\n1. **Header**: title, author, opened date, state, assignees, labels, status_name\n2. **Description excerpt**: first 500 chars of description (or full if shorter)\n3. **Key decisions**: notes correlated with state/label changes (heuristic above)\n4. **Activity summary**: counts of state changes, label changes, notes, time range\n5. **Open threads**: discussions WHERE resolved = false\n6. **Related entities**: closing MRs (with state), related issues from entity_references\n7. **Timeline excerpt**: first 20 events from timeline pipeline\n\n## Robot Mode Output Schema\n```json\n{\n \"ok\": true,\n \"data\": {\n \"entity\": {\n \"type\": \"issue\", \"iid\": 3864, \"title\": \"...\", \"state\": \"opened\",\n \"author\": \"teernisse\", \"assignees\": [\"teernisse\"],\n \"labels\": [\"customer:BNSF\"], \"created_at\": \"...\", \"updated_at\": \"...\",\n \"url\": \"...\", \"status_name\": \"In progress\"\n },\n \"description_excerpt\": \"First 500 chars of description...\",\n \"key_decisions\": [{\n \"timestamp\": \"2026-01-15T...\",\n \"actor\": \"teernisse\",\n \"action\": \"state: opened -> in_progress\",\n \"context_note\": \"Starting work on the BNSF throw time integration...\"\n }],\n \"activity\": {\n \"state_changes\": 3, \"label_changes\": 5, \"notes\": 42,\n \"first_event\": \"2026-01-10T...\", \"last_event\": \"2026-02-12T...\"\n },\n \"open_threads\": [{\n \"discussion_id\": \"abc123\",\n \"started_by\": \"cseiber\",\n \"started_at\": \"2026-02-01T...\",\n \"note_count\": 5,\n \"last_note_at\": \"2026-02-10T...\"\n }],\n \"related\": {\n \"closing_mrs\": [{ \"iid\": 200, \"title\": \"...\", \"state\": \"merged\" }],\n \"related_issues\": [{ \"iid\": 3800, \"title\": \"Rail Break Card\", \"relation\": \"related\" }]\n },\n \"timeline_excerpt\": [{ \"timestamp\": \"...\", \"event_type\": \"...\", \"actor\": \"...\", \"summary\": \"...\" }]\n },\n \"meta\": { \"elapsed_ms\": 350 }\n}\n```\n\n## Clap Registration\n```rust\n// In src/main.rs Commands enum, add:\nExplain {\n /// Entity type: \"issues\" or \"mrs\"\n entity_type: String,\n /// Entity IID\n iid: i64,\n /// Scope to project (fuzzy match)\n #[arg(short, long)]\n project: Option,\n},\n```\n\n## TDD Loop\nRED: Tests in src/cli/commands/explain.rs:\n- test_explain_issue_basic: insert issue + notes + state events, run explain, assert all sections present (entity, description_excerpt, key_decisions, activity, open_threads, related, timeline_excerpt)\n- test_explain_key_decision_heuristic: insert state change event + note by same author within 30 min, assert note appears in key_decisions\n- test_explain_key_decision_ignores_unrelated_notes: insert note by different author, assert it does NOT appear in key_decisions\n- test_explain_open_threads: insert 2 discussions (1 resolved, 1 unresolved), assert only unresolved in open_threads\n- test_explain_no_notes: issue with zero notes produces header + description + empty sections\n- test_explain_mr: insert MR with merged_at, assert entity includes type=\"merge_request\"\n- test_explain_activity_counts: insert 3 state events + 2 label events + 10 notes, assert counts match\n\nGREEN: Implement explain command with section assembly\n\nVERIFY:\n```bash\ncargo test explain:: && cargo clippy --all-targets -- -D warnings\ncargo run --release -- -J explain issues 3864 | jq '.data | keys'\n# Should include: entity, description_excerpt, key_decisions, activity, open_threads, related, timeline_excerpt\n```\n\n## Acceptance Criteria\n- [ ] lore explain issues N produces structured output for any synced issue\n- [ ] lore explain mrs N produces structured output for any synced MR\n- [ ] Robot mode returns all 7 sections\n- [ ] Human mode renders readable narrative with headers and indentation\n- [ ] Key decisions heuristic: captures notes within 60 min of state/label changes by same actor\n- [ ] Works fully offline (no API calls, no LLM)\n- [ ] Performance: <500ms for issue with 50 notes\n- [ ] Command registered in main.rs and robot-docs\n- [ ] key_decisions capped at 10, timeline_excerpt capped at 20 events\n\n## Edge Cases\n- Issue with empty description: description_excerpt = \"(no description)\"\n- Issue with 500+ notes: timeline_excerpt capped at 20, key_decisions capped at 10\n- Issue not found in local DB: exit code 17 with suggestion to sync\n- Ambiguous project: exit code 18 with suggestion to use -p flag\n- MR with no review activity: activity section shows zeros\n- Cross-project references: show as unresolved with project path hint\n- Notes that are pure code blocks: include in key_decisions if correlated with events (they may contain implementation decisions)\n- ResourceStateEvent/ResourceLabelEvent field names: check src/core/events_db.rs for exact struct definitions before implementing\n\n## Dependency Context\n- **bd-2g50 (data gaps)**: BLOCKER. Provides `closed_at` field on IssueDetail for the header section. Without it, explain can still show state=\"closed\" but won't have the exact close timestamp.\n\n## Files to Create/Modify\n- NEW: src/cli/commands/explain.rs\n- src/cli/commands/mod.rs (add pub mod explain; re-export)\n- src/main.rs (register Explain subcommand in Commands enum, add handle_explain fn)\n- Reuse: show.rs queries, timeline pipeline, notes/discussions/resource_events queries from src/core/events_db.rs","status":"open","priority":2,"issue_type":"feature","created_at":"2026-02-12T15:46:41.386454Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:44:58.542217Z","compaction_level":0,"original_size":0,"labels":["cli-imp","intelligence"],"dependencies":[{"issue_id":"bd-9lbr","depends_on_id":"bd-13lp","type":"parent-child","created_at":"2026-02-12T15:46:41.389472Z","created_by":"tayloreernisse"}]} {"id":"bd-9wl5","title":"NOTE-2G: Parent metadata change propagation to note documents","description":"## Background\nNote documents inherit labels and title from parent issue/MR. When parent metadata changes, note documents become stale. The existing pipeline already marks discussion documents dirty on parent changes — note documents need the same treatment.\n\n## Approach\nFind where ingestion detects parent entity changes and marks discussion documents dirty. The dirty marking for discussions happens in:\n- src/ingestion/discussions.rs line 127: mark_dirty_tx(&tx, SourceType::Discussion, local_discussion_id)\n- src/ingestion/mr_discussions.rs line 162 and 362: mark_dirty_tx(&tx, SourceType::Discussion, local_discussion_id)\n\nThese fire when a discussion is upserted (which happens when parent entity is re-ingested). For note documents, we need to additionally mark all non-system notes of that discussion as dirty:\n\nAfter each mark_dirty_tx for Discussion, add:\n // Mark child note documents dirty (they inherit parent metadata)\n let note_ids: Vec = tx.prepare(\"SELECT id FROM notes WHERE discussion_id = ? AND is_system = 0\")?\n .query_map([local_discussion_id], |r| r.get(0))?\n .collect::, _>>()?;\n for note_id in note_ids {\n dirty_tracker::mark_dirty_tx(&tx, SourceType::Note, note_id)?;\n }\n\nAlternative (more efficient, set-based):\n INSERT INTO dirty_sources (source_type, source_id, queued_at)\n SELECT 'note', n.id, ?1\n FROM notes n\n WHERE n.discussion_id = ?2 AND n.is_system = 0\n ON CONFLICT(source_type, source_id) DO UPDATE SET queued_at = excluded.queued_at, attempt_count = 0\n\nUse the set-based approach for better performance with large discussions.\n\n## Files\n- MODIFY: src/ingestion/discussions.rs (add note dirty marking after line 127)\n- MODIFY: src/ingestion/mr_discussions.rs (add note dirty marking after lines 162 and 362)\n\n## TDD Anchor\nRED: test_parent_title_change_marks_notes_dirty — change issue title, re-ingest discussions, assert note documents appear in dirty_sources.\nGREEN: Add set-based INSERT INTO dirty_sources after discussion dirty marking.\nVERIFY: cargo test parent_title_change_marks_notes -- --nocapture\nTests: test_parent_label_change_marks_notes_dirty (modify issue labels, re-ingest, check dirty queue)\n\n## Acceptance Criteria\n- [ ] Discussion upsert for issue marks child non-system note documents dirty\n- [ ] Discussion upsert for MR marks child non-system note documents dirty (both call sites)\n- [ ] Only non-system notes marked dirty (is_system = 0 filter)\n- [ ] Set-based SQL (not per-note loop) for performance\n- [ ] Both tests pass\n\n## Dependency Context\n- Depends on NOTE-2D (bd-2ezb): dirty tracking infrastructure for notes must exist (dirty_sources accepts source_type='note', regenerator handles it)\n\n## Edge Cases\n- Discussion with 0 non-system notes: set-based INSERT is a no-op\n- Discussion with 100+ notes: set-based approach handles efficiently in one SQL statement\n- Concurrent discussion ingestion: ON CONFLICT DO UPDATE handles race safely","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:02:40.292874Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.717576Z","closed_at":"2026-02-12T18:13:15.717528Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"]} +{"id":"bd-a7ba","title":"Implement project scope resolution for me command","description":"## Background\nThe `lore me` command needs to resolve which project(s) to query. There are three modes: single project (--project), all projects (--all), or default (use default_project if set, else all). The existing `Config.effective_project()` method already handles CLI flag > default_project fallback, and `resolve_project()` in `src/core/project.rs` handles fuzzy matching.\n\n## Approach\nCreate in `src/cli/commands/me/mod.rs` (or a submodule):\n```rust\npub enum ProjectScope {\n Single(i64), // internal DB project.id (NOT gitlab_project_id)\n All,\n}\n\npub fn resolve_project_scope(\n project_flag: Option<&str>,\n all_flag: bool,\n config: &Config,\n conn: &Connection,\n) -> Result {\n // AC-8.4: mutual exclusivity\n if project_flag.is_some() && all_flag {\n return Err(LoreError::UsageError(\n \"Cannot use --project and --all together.\".to_string()\n ));\n }\n // AC-8.2: --all overrides everything\n if all_flag {\n return Ok(ProjectScope::All);\n }\n // Use effective_project: CLI flag > config.default_project\n let effective = project_flag.or(config.default_project.as_deref());\n match effective {\n Some(p) => Ok(ProjectScope::Single(resolve_project(conn, p)?)),\n None => Ok(ProjectScope::All), // AC-8.1: no default → all\n }\n}\n```\n\nImports:\n```rust\nuse crate::Config;\nuse crate::core::error::{LoreError, Result};\nuse crate::core::project::resolve_project;\nuse rusqlite::Connection;\n```\n\nNote: `resolve_project()` returns the internal DB `id` (not `gitlab_project_id`). Match strategy: exact → case-insensitive → suffix → substring. Returns `LoreError::Ambiguous` (exit 18) on multiple matches, `LoreError::Other` on no match.\n\n## Acceptance Criteria\n- [ ] `--project` and `--all` both passed → `LoreError::UsageError` with exit code 2 (AC-8.4)\n- [ ] `--project \"repo\"` → resolves via `resolve_project()` → `Single(id)` (AC-8.3)\n- [ ] `--all` → `All` (AC-8.2)\n- [ ] No flags + `config.default_project` set → resolve it → `Single(id)` (AC-8.1)\n- [ ] No flags + no default_project → `All` (AC-8.1)\n- [ ] Unknown --project → `LoreError::Other` (from resolve_project)\n- [ ] Ambiguous --project → `LoreError::Ambiguous` exit 18 (from resolve_project)\n\n## Files\n- MODIFY: src/cli/commands/me/mod.rs (ProjectScope enum + resolve function)\n\n## TDD Anchor\nRED: Write `test_project_and_all_mutually_exclusive` in an in-memory DB test. Pass both flags, assert error matches `LoreError::UsageError`.\nGREEN: Implement resolve_project_scope.\nVERIFY: `cargo test project_scope`\n\nAdditional tests:\n- test_project_flag_resolves (insert project, pass --project with matching path)\n- test_all_flag_returns_all\n- test_default_project_used (no CLI flag, config has default_project)\n- test_no_default_no_flags_returns_all\n\n## Edge Cases\n- `resolve_project` returns internal DB `id`, not `gitlab_project_id` — use the returned id for WHERE clauses on `project_id`\n- default_project value in config might not exist in DB (deleted/unsynced) — resolve_project will error\n\n## Dependency Context\nUses `LoreError::UsageError` variant from bd-1f1f.\nUses `resolve_project` from `src/core/project.rs` (existing, no bead needed).\nUses `Config.default_project` field from `src/core/config.rs`.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:35:50.328852Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.047676Z","closed_at":"2026-02-20T16:09:13.047627Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-a7ba","depends_on_id":"bd-utt4","type":"blocks","created_at":"2026-02-19T19:41:08.429644Z","created_by":"tayloreernisse"}]} {"id":"bd-am7","title":"Implement embedding pipeline with chunking","description":"## Background\nThe embedding pipeline takes documents, chunks them (paragraph-boundary splitting with overlap), sends chunks to Ollama for embedding via async HTTP, and stores vectors in sqlite-vec + metadata. It uses keyset pagination, concurrent HTTP requests via FuturesUnordered, per-batch transactions, and dimension validation.\n\n## Approach\nCreate \\`src/embedding/pipeline.rs\\` per PRD Section 4.4. **The pipeline is async.**\n\n**Constants (per PRD):**\n```rust\nconst BATCH_SIZE: usize = 32; // texts per Ollama API call\nconst DB_PAGE_SIZE: usize = 500; // keyset pagination page size\nconst EXPECTED_DIMS: usize = 768; // nomic-embed-text dimensions\nconst CHUNK_MAX_CHARS: usize = 32_000; // max chars per chunk\nconst CHUNK_OVERLAP_CHARS: usize = 500; // overlap between chunks\n```\n\n**Core async function:**\n```rust\npub async fn embed_documents(\n conn: &Connection,\n client: &OllamaClient,\n selection: EmbedSelection,\n concurrency: usize, // max in-flight HTTP requests\n progress_callback: Option>,\n) -> Result\n```\n\n**EmbedSelection:** Pending | RetryFailed\n**EmbedResult:** { embedded, failed, skipped }\n\n**Algorithm (per PRD):**\n1. count_pending_documents(conn, selection) for progress total\n2. Keyset pagination loop: find_pending_documents(conn, DB_PAGE_SIZE, last_id, selection)\n3. For each page:\n a. Begin transaction\n b. For each doc: clear_document_embeddings(&tx, doc.id), split_into_chunks(&doc.content)\n c. Build ChunkWork items with doc_hash + chunk_hash\n d. Commit clearing transaction\n4. Batch ChunkWork texts into Ollama calls (BATCH_SIZE=32)\n5. Use **FuturesUnordered** for concurrent HTTP, cap at \\`concurrency\\`\n6. collect_writes() in per-batch transactions: validate dims (768), store LE bytes, write metadata\n7. On error: record_embedding_error per chunk (not abort)\n8. Advance keyset cursor\n\n**ChunkWork struct:**\n```rust\nstruct ChunkWork {\n doc_id: i64,\n chunk_index: usize,\n doc_hash: String, // SHA-256 of FULL document (staleness detection)\n chunk_hash: String, // SHA-256 of THIS chunk (provenance)\n text: String,\n}\n```\n\n**Splitting:** split_into_chunks(content) -> Vec<(usize, String)>\n- Documents <= CHUNK_MAX_CHARS: single chunk (index 0)\n- Longer: split at paragraph boundaries (\\\\n\\\\n), fallback to sentence/word, with CHUNK_OVERLAP_CHARS overlap\n\n**Storage:** embeddings as raw LE bytes, rowid = encode_rowid(doc_id, chunk_idx)\n**Staleness detection:** uses document_hash (not chunk_hash) because it's document-level\n\nAlso create \\`src/embedding/change_detector.rs\\` (referenced in PRD module structure):\n```rust\npub fn detect_embedding_changes(conn: &Connection) -> Result>;\n```\n\n## Acceptance Criteria\n- [ ] Pipeline is async (uses FuturesUnordered for concurrent HTTP)\n- [ ] concurrency parameter caps in-flight HTTP requests\n- [ ] progress_callback reports (processed, total)\n- [ ] New documents embedded, changed re-embedded, unchanged skipped\n- [ ] clear_document_embeddings before re-embedding (range delete vec0 + metadata)\n- [ ] Chunking at paragraph boundaries with 500-char overlap\n- [ ] Short documents (<32k chars) produce exactly 1 chunk\n- [ ] Embeddings stored as raw LE bytes in vec0\n- [ ] Rowids encoded via encode_rowid(doc_id, chunk_index)\n- [ ] Dimension validation: 768 floats per embedding (mismatch -> record error, not store)\n- [ ] Per-batch transactions for writes\n- [ ] Errors recorded in embedding_metadata per chunk (last_error, attempt_count)\n- [ ] Keyset pagination (d.id > last_id, not OFFSET)\n- [ ] Pending detection uses document_hash (not chunk_hash)\n- [ ] \\`cargo build\\` succeeds\n\n## Files\n- \\`src/embedding/pipeline.rs\\` — new file (async)\n- \\`src/embedding/change_detector.rs\\` — new file\n- \\`src/embedding/mod.rs\\` — add \\`pub mod pipeline; pub mod change_detector;\\` + re-exports\n\n## TDD Loop\nRED: Unit tests for chunking:\n- \\`test_short_document_single_chunk\\` — <32k produces [(0, full_content)]\n- \\`test_long_document_multiple_chunks\\` — >32k splits at paragraph boundaries\n- \\`test_chunk_overlap\\` — adjacent chunks share 500-char overlap\n- \\`test_no_paragraph_boundary\\` — falls back to char boundary\nIntegration tests need Ollama or mock.\nGREEN: Implement split_into_chunks, embed_documents (async)\nVERIFY: \\`cargo test pipeline\\`\n\n## Edge Cases\n- Empty document content_text: skip (don't embed)\n- No paragraph boundaries: split at CHUNK_MAX_CHARS with overlap\n- Ollama error for one batch: record error per chunk, continue with next batch\n- Dimension mismatch (model returns 512 instead of 768): record error, don't store corrupt data\n- Document deleted between pagination and embedding: skip gracefully","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:34.093701Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:58:58.908585Z","closed_at":"2026-01-30T17:58:58.908525Z","close_reason":"Implemented embedding pipeline: chunking at paragraph boundaries with 500-char overlap, change detector (keyset pagination, hash-based staleness), async embed via Ollama with batch processing, dimension validation, per-chunk error recording, LE byte vector storage. 7 chunking tests pass. 289 total tests.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-am7","depends_on_id":"bd-1y8","type":"blocks","created_at":"2026-01-30T15:29:24.697418Z","created_by":"tayloreernisse"},{"issue_id":"bd-am7","depends_on_id":"bd-2ac","type":"blocks","created_at":"2026-01-30T15:29:24.732567Z","created_by":"tayloreernisse"},{"issue_id":"bd-am7","depends_on_id":"bd-335","type":"blocks","created_at":"2026-01-30T15:29:24.660199Z","created_by":"tayloreernisse"}]} {"id":"bd-apmo","title":"OBSERV: Create migration 014 for sync_runs enrichment","description":"## Background\nThe sync_runs table (created in migration 001) has columns id, started_at, heartbeat_at, finished_at, status, command, error, metrics_json but NOTHING writes to it. This migration adds columns for the observability correlation ID and aggregate counts, enabling queryable sync history.\n\n## Approach\nCreate migrations/014_sync_runs_enrichment.sql:\n\n```sql\n-- Migration 014: sync_runs enrichment for observability\n-- Adds correlation ID and aggregate counts for queryable sync history\n\nALTER TABLE sync_runs ADD COLUMN run_id TEXT;\nALTER TABLE sync_runs ADD COLUMN total_items_processed INTEGER DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN total_errors INTEGER DEFAULT 0;\n\n-- Index for correlation queries (find run by run_id from logs)\nCREATE INDEX IF NOT EXISTS idx_sync_runs_run_id ON sync_runs(run_id);\n```\n\nMigration naming convention: check migrations/ directory. Current latest is 013_resource_event_watermarks.sql. Next is 014.\n\nNote: SQLite ALTER TABLE ADD COLUMN is always safe -- it sets NULL for existing rows. DEFAULT 0 applies to new INSERTs only.\n\n## Acceptance Criteria\n- [ ] Migration 014 applies cleanly on a fresh DB (all migrations 001-014)\n- [ ] Migration 014 applies cleanly on existing DB with 001-013 already applied\n- [ ] sync_runs table has run_id TEXT column\n- [ ] sync_runs table has total_items_processed INTEGER DEFAULT 0 column\n- [ ] sync_runs table has total_errors INTEGER DEFAULT 0 column\n- [ ] idx_sync_runs_run_id index exists\n- [ ] Existing sync_runs rows (if any) have NULL run_id, 0 for counts\n- [ ] cargo clippy --all-targets -- -D warnings passes (no code changes, but verify migration is picked up)\n\n## Files\n- migrations/014_sync_runs_enrichment.sql (new file)\n\n## TDD Loop\nRED:\n - test_migration_014_applies: apply all migrations on fresh in-memory DB, query sync_runs schema\n - test_migration_014_idempotent: CREATE INDEX IF NOT EXISTS makes re-run safe; ALTER TABLE ADD COLUMN is NOT idempotent in SQLite (will error). Consider: skip this test or use IF NOT EXISTS workaround\nGREEN: Create migration file\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- ALTER TABLE ADD COLUMN in SQLite: NOT idempotent. Running migration twice will error \"duplicate column name.\" The migration system should prevent re-runs, but IF NOT EXISTS is not available for ALTER TABLE in SQLite. Rely on migration tracking.\n- Migration numbering conflict: if another PR adds 014 first, renumber to 015. Check before merging.\n- metrics_json already exists (from migration 001): we don't touch it. The new columns supplement it with queryable aggregates.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-04T15:54:51.311879Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:34:05.309761Z","closed_at":"2026-02-04T17:34:05.309714Z","close_reason":"Created migration 014 adding run_id TEXT, total_items_processed INTEGER, total_errors INTEGER to sync_runs, with idx_sync_runs_run_id index","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-apmo","depends_on_id":"bd-3pz","type":"parent-child","created_at":"2026-02-04T15:54:51.314770Z","created_by":"tayloreernisse"}]} {"id":"bd-arka","title":"Extend SyncRunRecorder with surgical mode lifecycle methods","description":"## Background\nThe existing `SyncRunRecorder` in `src/core/sync_run.rs` manages sync run lifecycle with three methods: `start()` (creates row, returns Self), `succeed(self, ...)` (consumes self, sets succeeded), and `fail(self, ...)` (consumes self, sets failed). Both `succeed()` and `fail()` take ownership of `self` — this is intentional to prevent double-finalization.\n\nSurgical sync needs additional lifecycle methods to:\n1. Set surgical-specific metadata (mode, phase, IIDs JSON) after `start()`\n2. Record per-entity results (increment counters, store entity-level outcomes)\n3. Cancel a run (distinct from failure — user-initiated or timeout)\n4. Update phase progression during the surgical pipeline\n\nThese methods operate on the columns added by migration 027 (bead bd-tiux).\n\n## Approach\n\n### Step 1: Add `set_surgical_metadata` method\n\nCalled once after `start()` to set the surgical mode columns:\n\n```rust\npub fn set_surgical_metadata(\n &self,\n conn: &Connection,\n mode: &str,\n phase: &str,\n iids_json: &str,\n) -> Result<()> {\n conn.execute(\n \"UPDATE sync_runs SET mode = ?1, phase = ?2, surgical_iids_json = ?3 WHERE id = ?4\",\n rusqlite::params![mode, phase, iids_json, self.row_id],\n )?;\n Ok(())\n}\n```\n\nTakes `&self` (not `self`) because the recorder continues to be used after metadata is set.\n\n### Step 2: Add `update_phase` method\n\nCalled as the surgical pipeline progresses through phases:\n\n```rust\npub fn update_phase(&self, conn: &Connection, phase: &str) -> Result<()> {\n conn.execute(\n \"UPDATE sync_runs SET phase = ?1, heartbeat_at = ?2 WHERE id = ?3\",\n rusqlite::params![phase, now_ms(), self.row_id],\n )?;\n Ok(())\n}\n```\n\n### Step 3: Add `record_entity_result` method\n\nCalled after each entity (issue or MR) is processed to increment counters:\n\n```rust\npub fn record_entity_result(\n &self,\n conn: &Connection,\n entity_type: &str,\n stage: &str,\n) -> Result<()> {\n let column = match (entity_type, stage) {\n (\"issue\", \"fetched\") => \"issues_fetched\",\n (\"issue\", \"ingested\") => \"issues_ingested\",\n (\"mr\", \"fetched\") => \"mrs_fetched\",\n (\"mr\", \"ingested\") => \"mrs_ingested\",\n (\"issue\" | \"mr\", \"skipped_stale\") => \"skipped_stale\",\n (\"doc\", \"regenerated\") => \"docs_regenerated\",\n (\"doc\", \"embedded\") => \"docs_embedded\",\n (_, \"warning\") => \"warnings_count\",\n _ => return Ok(()), // Unknown combinations are silently ignored\n };\n conn.execute(\n &format!(\"UPDATE sync_runs SET {column} = {column} + 1 WHERE id = ?1\"),\n rusqlite::params![self.row_id],\n )?;\n Ok(())\n}\n```\n\nNote: The column name comes from a hardcoded match, NOT from user input — no SQL injection risk.\n\n### Step 4: Add `cancel` method\n\nConsumes self (like succeed/fail) to finalize the run as cancelled:\n\n```rust\npub fn cancel(self, conn: &Connection, reason: &str) -> Result<()> {\n let now = now_ms();\n conn.execute(\n \"UPDATE sync_runs SET finished_at = ?1, cancelled_at = ?2, status = 'cancelled', error = ?3 WHERE id = ?4\",\n rusqlite::params![now, now, reason, self.row_id],\n )?;\n Ok(())\n}\n```\n\nTakes `self` (ownership) like `succeed()` and `fail()` — prevents further use after cancellation.\n\n### Step 5: Expose `row_id` getter\n\nThe orchestrator (bd-1i4i) may need the row_id for logging/tracing:\n\n```rust\npub fn row_id(&self) -> i64 {\n self.row_id\n}\n```\n\n## Acceptance Criteria\n- [ ] `set_surgical_metadata(&self, conn, mode, phase, iids_json)` writes mode/phase/surgical_iids_json columns\n- [ ] `update_phase(&self, conn, phase)` updates phase and heartbeat_at\n- [ ] `record_entity_result(&self, conn, entity_type, stage)` increments the correct counter column\n- [ ] `record_entity_result` silently ignores unknown entity_type/stage combinations\n- [ ] `cancel(self, conn, reason)` consumes self, sets status='cancelled', finished_at, cancelled_at, error\n- [ ] `row_id()` returns the internal row_id\n- [ ] `succeed(self, ...)` still works after `set_surgical_metadata` + `record_entity_result` calls\n- [ ] `fail(self, ...)` still works after `set_surgical_metadata` + `update_phase` calls\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo clippy --all-targets -- -D warnings` passes\n- [ ] All existing sync_run tests continue to pass\n\n## Files\n- MODIFY: src/core/sync_run.rs (add methods to SyncRunRecorder impl block)\n- MODIFY: src/core/sync_run_tests.rs (add new tests)\n\n## TDD Anchor\nRED: Write tests in `src/core/sync_run_tests.rs`:\n\n```rust\n#[test]\nfn surgical_lifecycle_start_metadata_succeed() {\n let conn = setup_test_db();\n let recorder = SyncRunRecorder::start(&conn, \"sync\", \"surg001\").unwrap();\n let row_id = recorder.row_id();\n\n recorder.set_surgical_metadata(\n &conn, \"surgical\", \"preflight\", r#\"{\"issues\":[7,8],\"mrs\":[101]}\"#,\n ).unwrap();\n\n recorder.update_phase(&conn, \"ingest\").unwrap();\n recorder.record_entity_result(&conn, \"issue\", \"fetched\").unwrap();\n recorder.record_entity_result(&conn, \"issue\", \"fetched\").unwrap();\n recorder.record_entity_result(&conn, \"issue\", \"ingested\").unwrap();\n recorder.record_entity_result(&conn, \"mr\", \"fetched\").unwrap();\n recorder.record_entity_result(&conn, \"mr\", \"ingested\").unwrap();\n\n recorder.succeed(&conn, &[], 3, 0).unwrap();\n\n let (mode, phase, iids, issues_fetched, mrs_fetched, issues_ingested, mrs_ingested, status): (\n String, String, String, i64, i64, i64, i64, String,\n ) = conn.query_row(\n \"SELECT mode, phase, surgical_iids_json, issues_fetched, mrs_fetched, issues_ingested, mrs_ingested, status\n FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?, r.get(3)?, r.get(4)?, r.get(5)?, r.get(6)?, r.get(7)?)),\n ).unwrap();\n\n assert_eq!(mode, \"surgical\");\n assert_eq!(phase, \"ingest\"); // Last phase set before succeed\n assert!(iids.contains(\"101\"));\n assert_eq!(issues_fetched, 2);\n assert_eq!(mrs_fetched, 1);\n assert_eq!(issues_ingested, 1);\n assert_eq!(mrs_ingested, 1);\n assert_eq!(status, \"succeeded\");\n}\n\n#[test]\nfn surgical_lifecycle_cancel() {\n let conn = setup_test_db();\n let recorder = SyncRunRecorder::start(&conn, \"sync\", \"cancel01\").unwrap();\n let row_id = recorder.row_id();\n\n recorder.set_surgical_metadata(&conn, \"surgical\", \"preflight\", \"{}\").unwrap();\n recorder.cancel(&conn, \"User requested cancellation\").unwrap();\n\n let (status, error, cancelled_at, finished_at): (String, Option, Option, Option) = conn.query_row(\n \"SELECT status, error, cancelled_at, finished_at FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?, r.get(3)?)),\n ).unwrap();\n\n assert_eq!(status, \"cancelled\");\n assert_eq!(error.as_deref(), Some(\"User requested cancellation\"));\n assert!(cancelled_at.is_some());\n assert!(finished_at.is_some());\n}\n\n#[test]\nfn record_entity_result_ignores_unknown() {\n let conn = setup_test_db();\n let recorder = SyncRunRecorder::start(&conn, \"sync\", \"unk001\").unwrap();\n // Should not panic or error on unknown combinations\n recorder.record_entity_result(&conn, \"widget\", \"exploded\").unwrap();\n}\n\n#[test]\nfn record_entity_result_json_counters() {\n let conn = setup_test_db();\n let recorder = SyncRunRecorder::start(&conn, \"sync\", \"cnt001\").unwrap();\n let row_id = recorder.row_id();\n\n recorder.record_entity_result(&conn, \"doc\", \"regenerated\").unwrap();\n recorder.record_entity_result(&conn, \"doc\", \"regenerated\").unwrap();\n recorder.record_entity_result(&conn, \"doc\", \"embedded\").unwrap();\n recorder.record_entity_result(&conn, \"issue\", \"skipped_stale\").unwrap();\n\n let (docs_regen, docs_embed, skipped): (i64, i64, i64) = conn.query_row(\n \"SELECT docs_regenerated, docs_embedded, skipped_stale FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?)),\n ).unwrap();\n\n assert_eq!(docs_regen, 2);\n assert_eq!(docs_embed, 1);\n assert_eq!(skipped, 1);\n}\n```\n\nGREEN: Add all methods to `SyncRunRecorder`.\nVERIFY: `cargo test surgical_lifecycle && cargo test record_entity_result`\n\n## Edge Cases\n- `succeed()` and `fail()` consume `self` — the compiler enforces that no methods are called after finalization. `cancel()` also consumes self for the same reason.\n- `set_surgical_metadata`, `update_phase`, and `record_entity_result` take `&self` — they can be called multiple times before finalization.\n- The `record_entity_result` match uses a hardcoded column name derived from known string constants, not user input. The `format!` is safe because the column name is always one of the hardcoded strings.\n- `record_entity_result` silently returns Ok(()) for unknown entity_type/stage combos rather than erroring — this avoids breaking the pipeline for non-critical telemetry.\n- Phase is NOT overwritten by `succeed()`/`fail()`/`cancel()` — the last phase set via `update_phase()` is preserved as the \"phase at completion\" for observability.\n\n## Dependency Context\nDepends on bd-tiux (migration 027) for the surgical columns to exist. Downstream beads bd-1i4i (orchestrator) and bd-3jqx (integration tests) use these methods.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-17T19:13:50.827946Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:04:58.568108Z","closed_at":"2026-02-18T21:04:58.568067Z","close_reason":"Completed: all implementation work done, code reviewed, tests passing","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-arka","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-17T19:19:24.596403Z","created_by":"tayloreernisse"}]} +{"id":"bd-b3r3","title":"Implement activity feed: note and resource event queries","description":"## Background\nThe activity feed (AC-5.4) aggregates events from multiple sources. This bead covers the first 4 sources: human comments (notes.is_system=0), state events, label events, and milestone events. Each source is a SQL subquery that returns a unified column set for UNION ALL assembly.\n\n## Approach\nCreate `src/cli/commands/me/activity.rs`. Define individual query-building functions that return SQL strings or execute directly and return `Vec`.\n\n### Schema context\n\n**notes** (migration 002): `id, discussion_id (FK→discussions), is_system (0/1), author_username, body, created_at (ms epoch)`\n\n**discussions** (migration 002): `id, project_id, issue_id (nullable FK→issues), merge_request_id (nullable), noteable_type ('Issue'|'MergeRequest')`\n- CHECK constraint: exactly one of issue_id/merge_request_id is non-NULL\n\n**resource_state_events** (migration 011): `id, gitlab_id, project_id, issue_id (nullable), merge_request_id (nullable), state TEXT NOT NULL, actor_username TEXT, created_at (ms epoch)`\n- CHECK constraint: exactly one of issue_id/merge_request_id is non-NULL\n\n**resource_label_events** (migration 011): `id, gitlab_id, project_id, issue_id (nullable), merge_request_id (nullable), action TEXT ('add'|'remove'), label_name TEXT NOT NULL, actor_username TEXT, created_at (ms epoch)`\n- CHECK: one of issue_id/merge_request_id non-NULL. Note: `label_name` is NOT NULL in migration 011 but was made NULLABLE in migration 012.\n\n**resource_milestone_events** (migration 011): `id, gitlab_id, project_id, issue_id (nullable), merge_request_id (nullable), action TEXT ('add'|'remove'), milestone_title TEXT NOT NULL, actor_username TEXT, created_at (ms epoch)`\n- CHECK: one of issue_id/merge_request_id non-NULL. Note: `milestone_title` is NOT NULL in 011, NULLABLE after 012.\n\n**issue_assignees**: `issue_id, username` (PK both). Index: `idx_issue_assignees_username(username, issue_id)`.\n\n**mr_reviewers**: `merge_request_id, username` (PK both).\n\n### Unified output columns (all 4 sources must match)\n\n```\ntimestamp (ms epoch) | event_type | entity_type | entity_iid | project_path | actor | is_own | summary | body_preview\n```\n\n### Source 1: Human comments (notes with is_system=0)\n```sql\nSELECT n.created_at AS timestamp,\n 'note' AS event_type,\n CASE WHEN d.issue_id IS NOT NULL THEN 'issue' ELSE 'merge_request' END AS entity_type,\n COALESCE(i.iid, mr.iid) AS entity_iid,\n p.path_with_namespace AS project_path,\n n.author_username AS actor,\n (n.author_username = ?username) AS is_own,\n 'commented' AS summary,\n SUBSTR(n.body, 1, 200) AS body_preview\nFROM notes n\nJOIN discussions d ON n.discussion_id = d.id\nLEFT JOIN issues i ON d.issue_id = i.id\nLEFT JOIN merge_requests mr ON d.merge_request_id = mr.id\nJOIN projects p ON p.id = COALESCE(i.project_id, mr.project_id)\nWHERE n.is_system = 0\n AND n.created_at >= ?since_ms\n AND (my_items_filter)\n AND (project_scope_filter)\n```\n\n### Source 2: State events\n```sql\nSELECT rse.created_at AS timestamp,\n 'status' AS event_type,\n CASE WHEN rse.issue_id IS NOT NULL THEN 'issue' ELSE 'merge_request' END AS entity_type,\n COALESCE(i.iid, mr.iid) AS entity_iid,\n p.path_with_namespace AS project_path,\n rse.actor_username AS actor,\n (rse.actor_username = ?username) AS is_own,\n rse.state || ' ' || CASE WHEN rse.issue_id IS NOT NULL THEN 'issue' ELSE 'MR' END AS summary,\n NULL AS body_preview\nFROM resource_state_events rse\nLEFT JOIN issues i ON rse.issue_id = i.id\nLEFT JOIN merge_requests mr ON rse.merge_request_id = mr.id\nJOIN projects p ON p.id = rse.project_id\nWHERE rse.created_at >= ?since_ms\n AND (my_items_filter)\n AND (project_scope_filter)\n```\n\n### Source 3: Label events\n```sql\nSELECT rle.created_at AS timestamp,\n 'label' AS event_type,\n CASE WHEN rle.issue_id IS NOT NULL THEN 'issue' ELSE 'merge_request' END AS entity_type,\n COALESCE(i.iid, mr.iid) AS entity_iid,\n p.path_with_namespace AS project_path,\n rle.actor_username AS actor,\n (rle.actor_username = ?username) AS is_own,\n rle.action || ' label ' || COALESCE(rle.label_name, '?') AS summary,\n NULL AS body_preview\nFROM resource_label_events rle\nLEFT JOIN issues i ON rle.issue_id = i.id\nLEFT JOIN merge_requests mr ON rle.merge_request_id = mr.id\nJOIN projects p ON p.id = rle.project_id\nWHERE rle.created_at >= ?since_ms\n AND (my_items_filter)\n AND (project_scope_filter)\n```\n\n### Source 4: Milestone events\n```sql\nSELECT rme.created_at AS timestamp,\n 'milestone' AS event_type,\n CASE WHEN rme.issue_id IS NOT NULL THEN 'issue' ELSE 'merge_request' END AS entity_type,\n COALESCE(i.iid, mr.iid) AS entity_iid,\n p.path_with_namespace AS project_path,\n rme.actor_username AS actor,\n (rme.actor_username = ?username) AS is_own,\n rme.action || ' milestone ' || COALESCE(rme.milestone_title, '?') AS summary,\n NULL AS body_preview\nFROM resource_milestone_events rme\nLEFT JOIN issues i ON rme.issue_id = i.id\nLEFT JOIN merge_requests mr ON rme.merge_request_id = mr.id\nJOIN projects p ON p.id = rme.project_id\nWHERE rme.created_at >= ?since_ms\n AND (my_items_filter)\n AND (project_scope_filter)\n```\n\n### \"My items\" filter (AC-3.6 — current associations)\n```sql\n(\n (d.issue_id IS NOT NULL AND EXISTS (\n SELECT 1 FROM issue_assignees ia WHERE ia.issue_id = d.issue_id AND ia.username = ?username\n ))\n OR\n (d.merge_request_id IS NOT NULL AND (\n EXISTS (SELECT 1 FROM merge_requests m2 WHERE m2.id = d.merge_request_id AND m2.author_username = ?username)\n OR EXISTS (SELECT 1 FROM mr_reviewers mrr WHERE mrr.merge_request_id = d.merge_request_id AND mrr.username = ?username)\n ))\n)\n```\nFor resource event tables, use `rse.issue_id`/`rse.merge_request_id` instead of `d.issue_id`/`d.merge_request_id`.\n\n### ProjectScope filter\nUse the `ProjectScope` enum from bd-a7ba:\n- `All` → no filter\n- `Single(project_id)` → `AND p.id = ?project_id`\n\n### Function signature\n```rust\npub fn build_notes_activity_sql(scope: &ProjectScope) -> String\npub fn build_state_events_sql(scope: &ProjectScope) -> String\npub fn build_label_events_sql(scope: &ProjectScope) -> String\npub fn build_milestone_events_sql(scope: &ProjectScope) -> String\n```\n\nOr a single function that returns all 4 SQL fragments for UNION ALL assembly.\n\n## Acceptance Criteria\n- [ ] Notes query returns human comments (is_system=0) on my items within --since window\n- [ ] State events query returns state changes on my items within --since window\n- [ ] Label events query returns label changes on my items within --since window\n- [ ] Milestone events query returns milestone changes on my items within --since window\n- [ ] All 4 queries return identical column shape: timestamp, event_type, entity_type, entity_iid, project_path, actor, is_own, summary, body_preview\n- [ ] body_preview: SUBSTR(body, 1, 200) for notes (AC-7.6), NULL for events\n- [ ] is_own flag: `(actor_username = ?username)` returns 0/1 (AC-5.4)\n- [ ] \"My items\" uses CURRENT association only (AC-3.6): assignee, MR author, or MR reviewer\n- [ ] Includes activity on items regardless of open/closed state (AC-5.4)\n- [ ] Respects ProjectScope filtering\n- [ ] label_name and milestone_title handled as potentially NULL (COALESCE with '?')\n\n## Files\n- CREATE: src/cli/commands/me/activity.rs\n- MODIFY: src/cli/commands/me/mod.rs (add `pub mod activity;`)\n\n## TDD Anchor\nRED: Write `test_note_activity_returns_human_comments` using in-memory DB. Insert: project, issue, issue_assignee(username=me), discussion(issue_id=issue, noteable_type='Issue'), note(is_system=0, created_at within since window). Execute notes query. Assert 1 result with event_type=\"note\".\n\nGREEN: Implement the notes query.\nVERIFY: `cargo test note_activity`\n\nAdditional tests:\n- test_note_activity_excludes_system_notes (insert is_system=1 note, assert 0 results)\n- test_note_activity_respects_since_window (insert note before since_ms, assert 0 results)\n- test_state_event_appears_in_activity (insert resource_state_event with issue_id set)\n- test_label_event_appears_in_activity (insert resource_label_event)\n- test_milestone_event_appears_in_activity (insert resource_milestone_event)\n- test_activity_only_for_currently_associated_items (insert note on unassigned issue, assert 0)\n- test_is_own_flag_correct (two notes: one by me, one by other; check is_own on each)\n\n## Edge Cases\n- resource event tables use CHECK constraint: exactly one of issue_id/merge_request_id is non-NULL — LEFT JOINs handle this\n- `label_name` is NULLABLE after migration 012 — use `COALESCE(rle.label_name, '?')` in summary\n- `milestone_title` is NULLABLE after migration 012 — same COALESCE\n- `actor_username` on resource events can be NULL (rare: API returns null actor) — handle with COALESCE in actor column\n- The \"my items\" filter joins differently for notes (through discussions) vs resource events (direct issue_id/merge_request_id columns)\n- Activity on closed items SHOULD appear — no `state = 'opened'` filter\n\n## Dependency Context\nUses `MeActivityItem` struct from bd-3bwh. Uses `ProjectScope` enum from bd-a7ba.\nConsumed by bd-2tl5 (UNION ALL assembly) and bd-2nl3 (assignment detection adds a 5th source).","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:37:56.988620Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.055105Z","closed_at":"2026-02-20T16:09:13.055064Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-b3r3","depends_on_id":"bd-3bwh","type":"blocks","created_at":"2026-02-19T19:41:18.048767Z","created_by":"tayloreernisse"},{"issue_id":"bd-b3r3","depends_on_id":"bd-a7ba","type":"blocks","created_at":"2026-02-19T19:41:18.129824Z","created_by":"tayloreernisse"}]} {"id":"bd-b51e","title":"WHO: Overlap mode query (query_overlap)","description":"## Background\n\nOverlap mode answers \"Who else has MRs/notes touching my files?\" — helps identify potential reviewers, collaborators, or conflicting work at a path. Tracks author and reviewer roles separately for richer signal.\n\n## Approach\n\n### SQL: two static variants (prefix/exact) with reviewer + author UNION ALL\n\nBoth branches return: username, role, touch_count (COUNT DISTINCT m.id), last_seen_at, mr_refs (GROUP_CONCAT of project-qualified refs).\n\nKey differences from Expert:\n- No scoring formula — just touch_count ranking\n- mr_refs collected for actionable output (group/project!iid format)\n- Rust-side merge needed (can't fully aggregate in SQL due to HashSet dedup of mr_refs across branches)\n\n### Reviewer branch includes:\n- Self-review exclusion: `n.author_username != m.author_username`\n- MR state filter: `m.state IN ('opened','merged')`\n- Project-qualified refs: `GROUP_CONCAT(DISTINCT (p.path_with_namespace || '!' || m.iid))`\n\n### Rust accumulator pattern:\n```rust\nstruct OverlapAcc {\n username: String,\n author_touch_count: u32,\n review_touch_count: u32,\n touch_count: u32,\n last_seen_at: i64,\n mr_refs: HashSet, // O(1) dedup from the start\n}\n// Build HashMap from rows\n// Convert to Vec, sort, bound mr_refs\n```\n\n### Bounded mr_refs:\n```rust\nconst MAX_MR_REFS_PER_USER: usize = 50;\nlet mr_refs_total = mr_refs.len() as u32;\nlet mr_refs_truncated = mr_refs.len() > MAX_MR_REFS_PER_USER;\n```\n\n### Deterministic sort: touch_count DESC, last_seen_at DESC, username ASC\n\n### format_overlap_role():\n```rust\nfn format_overlap_role(user: &OverlapUser) -> &'static str {\n match (user.author_touch_count > 0, user.review_touch_count > 0) {\n (true, true) => \"A+R\", (true, false) => \"A\",\n (false, true) => \"R\", (false, false) => \"-\",\n }\n}\n```\n\n### OverlapResult/OverlapUser structs include path_match (\"exact\"/\"prefix\"), truncated bool, per-user mr_refs_total + mr_refs_truncated\n\n## Files\n\n- `src/cli/commands/who.rs`\n\n## TDD Loop\n\nRED:\n```\ntest_overlap_dual_roles — user is author of MR 1 and reviewer of MR 2 at same path; verify A+R role, both touch counts > 0, mr_refs contain \"team/backend!\"\ntest_overlap_multi_project_mr_refs — same iid 100 in two projects; verify both \"team/backend!100\" and \"team/frontend!100\" present\ntest_overlap_excludes_self_review_notes — author comments on own MR; review_touch_count must be 0\n```\n\nGREEN: Implement query_overlap with both SQL variants + accumulator\nVERIFY: `cargo test -- overlap`\n\n## Acceptance Criteria\n\n- [ ] test_overlap_dual_roles passes (A+R role detection)\n- [ ] test_overlap_multi_project_mr_refs passes (project-qualified refs unique)\n- [ ] test_overlap_excludes_self_review_notes passes\n- [ ] Default since window: 30d\n- [ ] mr_refs sorted alphabetically for deterministic output\n- [ ] touch_count uses coherent units (COUNT DISTINCT m.id on BOTH branches)\n\n## Edge Cases\n\n- Both branches count MRs (not DiffNotes) for coherent touch_count — mixing units produces misleading totals\n- mr_refs from GROUP_CONCAT may contain duplicates across branches — HashSet handles dedup\n- Project scoping on n.project_id (not m.project_id) for index alignment\n- mr_refs sorted before output (HashSet iteration is nondeterministic)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:40:46.729921Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.598708Z","closed_at":"2026-02-08T04:10:29.598673Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-b51e","depends_on_id":"bd-2ldg","type":"blocks","created_at":"2026-02-08T02:43:37.563924Z","created_by":"tayloreernisse"},{"issue_id":"bd-b51e","depends_on_id":"bd-34rr","type":"blocks","created_at":"2026-02-08T02:43:37.618217Z","created_by":"tayloreernisse"}]} {"id":"bd-bcte","title":"Implement filter DSL parser state machine","description":"## Background\n\nThe Issue List and MR List filter bars accept typed filter expressions (e.g., `state:opened author:@asmith label:\"high priority\" -milestone:v2.0`). The PRD Appendix B defines a full state machine: Inactive -> Active -> FieldSelect/FreeText -> ValueInput. The parser needs to handle field:value pairs, negation prefix (`-`), quoted values with spaces, bare text as free-text search, and inline error diagnostics when an unrecognized field name is typed. This is a substantial subsystem that the entity table filter bar widget (bd-18qs) depends on for its core functionality.\n\n## Approach\n\nCreate a `filter_dsl.rs` module with:\n\n1. **FilterToken enum** — `Field { name: String, value: String, negated: bool }` | `FreeText(String)` | `Error { position: usize, message: String }`\n2. **`parse_filter(input: &str) -> Vec`** — Tokenizer that handles:\n - `field:value` — recognized fields: state, author, assignee, label, milestone, since, project (issue); + reviewer, draft, target, source (MR)\n - `-field:value` — negation prefix strips the `-` and sets `negated: true`\n - `field:\"quoted value\"` — double-quoted values preserve spaces\n - bare words — collected as `FreeText` tokens\n - unrecognized field names — produce `Error` token with position and message\n3. **FilterBarState** state machine:\n - `Inactive` — filter bar not focused\n - `Active(Typing)` — user typing, no suggestion yet\n - `Active(Suggesting)` — 200ms pause triggers field name suggestions\n - `FieldSelect` — dropdown showing recognized field names after `:`\n - `ValueInput` — context-dependent completions (e.g., state values: opened/closed/all)\n4. **`apply_issue_filter(tokens: &[FilterToken]) -> IssueFilterParams`** — converts tokens to query parameters\n5. **`apply_mr_filter(tokens: &[FilterToken]) -> MrFilterParams`** — MR variant with reviewer, draft, target/source fields\n\n## Acceptance Criteria\n- [ ] `parse_filter(\"state:opened\")` returns one Field token with name=\"state\", value=\"opened\", negated=false\n- [ ] `parse_filter(\"-label:bug\")` returns one Field with negated=true\n- [ ] `parse_filter('author:\"Jane Doe\"')` returns one Field with value=\"Jane Doe\" (quotes stripped)\n- [ ] `parse_filter(\"foo:bar\")` where \"foo\" is not a recognized field returns Error token with position\n- [ ] `parse_filter(\"state:opened some text\")` returns Field + FreeText tokens\n- [ ] `parse_filter(\"\")` returns empty vec\n- [ ] FilterBarState transitions match the Appendix B state machine diagram\n- [ ] apply_issue_filter correctly maps all 7 issue fields (state, author, assignee, label, milestone, since, project)\n- [ ] apply_mr_filter correctly maps additional MR fields (reviewer, draft, target, source)\n- [ ] Inline error diagnostics include the character position of the unrecognized field\n\n## Files\n- CREATE: crates/lore-tui/src/widgets/filter_dsl.rs\n- MODIFY: crates/lore-tui/src/widgets/mod.rs (add `pub mod filter_dsl;`)\n\n## TDD Anchor\nRED: Write `test_parse_simple_field_value` that asserts `parse_filter(\"state:opened\")` returns `[Field { name: \"state\", value: \"opened\", negated: false }]`.\nGREEN: Implement the tokenizer for the simplest case.\nVERIFY: cargo test -p lore-tui parse_simple\n\nAdditional tests:\n- test_parse_negation\n- test_parse_quoted_value\n- test_parse_unrecognized_field_produces_error\n- test_parse_mixed_tokens\n- test_parse_empty_input\n- test_apply_issue_filter_maps_all_fields\n- test_apply_mr_filter_maps_additional_fields\n- test_filter_bar_state_transitions\n\n## Edge Cases\n- Unclosed quote (`author:\"Jane`) — treat rest of input as the value, produce warning token\n- Empty value (`state:`) — produce Error token, not a Field with empty value\n- Multiple colons (`field:val:ue`) — first colon splits, rest is part of value\n- Unicode in field values (`author:@rene`) — must handle multi-byte chars correctly\n- Very long filter strings (>1000 chars) — must not allocate unbounded; truncate with error\n\n## Dependency Context\n- Depends on bd-18qs (entity table + filter bar widgets) which provides the TextInput widget and filter bar rendering. This bead provides the PARSER that bd-18qs's filter bar CALLS.\n- Consumed by bd-3ei1 (Issue List) and bd-2kr0 (MR List) for converting user filter input into query parameters.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T19:29:37.516695Z","created_by":"tayloreernisse","updated_at":"2026-02-12T19:29:47.312394Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-bcte","depends_on_id":"bd-18qs","type":"blocks","created_at":"2026-02-12T19:29:47.312364Z","created_by":"tayloreernisse"}]} {"id":"bd-bjo","title":"Implement vector search function","description":"## Background\nVector search queries the sqlite-vec virtual table for nearest-neighbor documents. Because documents may have multiple chunks, the raw KNN results need deduplication by document_id (keeping the best/lowest distance per document). The function over-fetches 3x to ensure enough unique documents after dedup.\n\n## Approach\nCreate `src/search/vector.rs`:\n\n```rust\npub struct VectorResult {\n pub document_id: i64,\n pub distance: f64, // Lower = closer match\n}\n\n/// Search documents using sqlite-vec KNN query.\n/// Over-fetches 3x limit to handle chunk dedup.\npub fn search_vector(\n conn: &Connection,\n query_embedding: &[f32], // 768-dim embedding of search query\n limit: usize,\n) -> Result>\n```\n\n**SQL (KNN query):**\n```sql\nSELECT rowid, distance\nFROM embeddings\nWHERE embedding MATCH ?\n AND k = ?\nORDER BY distance\n```\n\n**Algorithm:**\n1. Convert query_embedding to raw LE bytes\n2. Execute KNN with k = limit * 3 (over-fetch for dedup)\n3. Decode each rowid via decode_rowid() -> (document_id, chunk_index)\n4. Group by document_id, keep minimum distance (best chunk)\n5. Sort by distance ascending\n6. Take first `limit` results\n\n## Acceptance Criteria\n- [ ] Returns deduplicated document-level results (not chunk-level)\n- [ ] Best chunk distance kept per document (lowest distance wins)\n- [ ] KNN with k parameter (3x limit)\n- [ ] Query embedding passed as raw LE bytes\n- [ ] Results sorted by distance ascending (closest first)\n- [ ] Returns at most `limit` results\n- [ ] Empty embeddings table returns empty Vec\n- [ ] `cargo build` succeeds\n\n## Files\n- `src/search/vector.rs` — new file\n- `src/search/mod.rs` — add `pub use vector::{search_vector, VectorResult};`\n\n## TDD Loop\nRED: Integration tests need sqlite-vec + seeded embeddings:\n- `test_vector_search_basic` — finds nearest document\n- `test_vector_search_dedup` — multi-chunk doc returns once with best distance\n- `test_vector_search_empty` — empty table returns empty\n- `test_vector_search_limit` — respects limit parameter\nGREEN: Implement search_vector\nVERIFY: `cargo test vector`\n\n## Edge Cases\n- All chunks belong to same document: returns single result\n- Query embedding wrong dimension: sqlite-vec may error — handle gracefully\n- Over-fetch returns fewer than limit unique docs: return what we have\n- Distance = 0.0: exact match (valid result)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:50.270357Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:44:56.233611Z","closed_at":"2026-01-30T17:44:56.233512Z","close_reason":"Implemented search_vector with KNN query, 3x over-fetch, chunk dedup. 3 tests pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-bjo","depends_on_id":"bd-1y8","type":"blocks","created_at":"2026-01-30T15:29:24.842469Z","created_by":"tayloreernisse"},{"issue_id":"bd-bjo","depends_on_id":"bd-2ac","type":"blocks","created_at":"2026-01-30T15:29:24.878048Z","created_by":"tayloreernisse"}]} @@ -273,6 +291,7 @@ {"id":"bd-cq2","title":"[CP1] Integration tests for label linkage","description":"Integration tests verifying label linkage and stale removal.\n\n## Tests (tests/label_linkage_tests.rs)\n\n- clears_existing_labels_before_linking_new_set\n- removes_stale_label_links_on_issue_update\n- handles_issue_with_all_labels_removed\n- preserves_labels_that_still_exist\n\n## Test Scenario\n1. Create issue with labels [A, B]\n2. Verify issue_labels has links to A and B\n3. Update issue with labels [B, C]\n4. Verify A link removed, B preserved, C added\n\n## Why This Matters\nThe clear-and-relink pattern ensures GitLab reality is reflected locally.\nIf we only INSERT, removed labels would persist incorrectly.\n\nFiles: tests/label_linkage_tests.rs\nDone when: Stale label links correctly removed on resync","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:59:10.665771Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:02.062192Z","deleted_at":"2026-01-25T17:02:02.062188Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-czk","title":"Add entity_references table to migration 010","description":"## Background\nThe entity_references table is now part of migration 011 (combined with resource event tables and dependent fetch queue). This bead is satisfied by bd-hu3 since the entity_references table schema is included in the same migration.\n\n## Approach\nThis bead's work is folded into bd-hu3 (Write migration 011). The entity_references table from Phase B spec §2.2 is included in migrations/011_resource_events.sql alongside the event tables and queue.\n\nThe entity_references schema includes:\n- source/target entity type + id with reference_type and source_method\n- Unresolved reference support (target_entity_id NULL with target_project_path + target_entity_iid)\n- UNIQUE constraint using COALESCE for nullable columns\n- Partial indexes for source, target (where not null), and unresolved refs\n\nNo separate migration file needed — this is in 011.\n\n## Acceptance Criteria\n- [ ] entity_references table exists in migration 011 (verified by bd-hu3)\n- [ ] UNIQUE constraint handles NULL columns via COALESCE\n- [ ] Indexes created: source composite, target composite (partial), unresolved (partial)\n- [ ] reference_type CHECK includes 'closes', 'mentioned', 'related'\n- [ ] source_method CHECK includes 'api_closes_issues', 'api_state_event', 'system_note_parse'\n\n## Files\n- migrations/011_resource_events.sql (part of bd-hu3)\n\n## TDD Loop\nCovered by bd-hu3's test_migration_011_entity_references_dedup test.\n\nVERIFY: `cargo test migration_tests -- --nocapture`\n\n## Edge Cases\n- Same as bd-hu3's entity_references edge cases","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:32:33.506883Z","created_by":"tayloreernisse","updated_at":"2026-02-02T22:42:06.104237Z","closed_at":"2026-02-02T22:42:06.104190Z","close_reason":"Work folded into bd-hu3 (migration 011 includes entity_references table)","compaction_level":0,"original_size":0,"labels":["gate-2","phase-b","schema"]} {"id":"bd-dty","title":"Implement timeline robot mode JSON output","description":"## Background\n\nRobot mode JSON for timeline follows the {ok, data, meta} envelope pattern. The JSON schema MUST match spec Section 3.5 exactly — this is the contract for AI agent consumers.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 3.5 (Robot Mode JSON).\n\n## Codebase Context\n\n- Robot mode pattern: all commands use {ok: true, data: {...}, meta: {...}} envelope\n- Timestamps: internal ms epoch UTC -> output ISO 8601 via core::time::ms_to_iso()\n- source_method values in DB: 'api', 'note_parse', 'description_parse' (NOT spec's api_closes_issues etc.)\n- Serde rename: use #[serde(rename = \"type\")] for entity objects per spec\n\n## Approach\n\nCreate `print_timeline_json()` in `src/cli/commands/timeline.rs`:\n\n### Key JSON structure (spec Section 3.5):\n- data.seed_entities: [{type, iid, project}] — note \"type\" not \"entity_type\", \"project\" not \"project_path\"\n- data.expanded_entities: [{type, iid, project, depth, via: {from: {type,iid,project}, reference_type, source_method}}]\n- data.unresolved_references: [{source: {type,iid,project}, target_project, target_type, target_iid, reference_type}]\n- data.events: [{timestamp (ISO 8601), entity_type, entity_iid, project, event_type, summary, actor, url, is_seed, details}]\n- meta: {search_mode: \"lexical\", expansion_depth, expand_mentions, total_entities, total_events, evidence_notes_included, unresolved_references, showing}\n\n### Details object per event type:\n- created: {labels: [...]}\n- note_evidence: {note_id, snippet}\n- state_changed: {state}\n- label_added: {label}\n\n### Rust JSON Structs\n\n```rust\n#[derive(Serialize)]\nstruct TimelineJson {\n ok: bool,\n data: TimelineDataJson,\n meta: TimelineMetaJson,\n}\n\n#[derive(Serialize)]\nstruct TimelineDataJson {\n query: String,\n event_count: usize,\n seed_entities: Vec,\n expanded_entities: Vec,\n unresolved_references: Vec,\n events: Vec,\n}\n\n#[derive(Serialize)]\nstruct EntityJson {\n #[serde(rename = \"type\")]\n entity_type: String,\n iid: i64,\n project: String,\n}\n\n#[derive(Serialize)]\nstruct TimelineMetaJson {\n search_mode: String, // always \"lexical\"\n expansion_depth: u32,\n expand_mentions: bool,\n total_entities: usize,\n total_events: usize, // before limit\n evidence_notes_included: usize,\n unresolved_references: usize,\n showing: usize, // after limit\n}\n```\n\n### source_method values: use CODEBASE values (api/note_parse/description_parse), not spec values\n\n## Acceptance Criteria\n\n- [ ] Valid JSON to stdout\n- [ ] {ok, data, meta} envelope\n- [ ] ISO 8601 timestamps\n- [ ] Entity objects use \"type\" and \"project\" keys per spec\n- [ ] Nested \"via\" object on expanded entities per spec\n- [ ] Events include url and details fields\n- [ ] meta.total_events before limit; meta.showing after limit\n- [ ] source_method uses codebase values\n- [ ] `cargo check --all-targets` passes\n\n## Files\n\n- `src/cli/commands/timeline.rs` (add print_timeline_json + JSON structs)\n- `src/cli/commands/mod.rs` (re-export)\n\n## TDD Loop\n\nVerify: `lore --robot timeline \"test\" | jq '.data.expanded_entities[0].via.from'`\n\n## Edge Cases\n\n- Empty results: events=[], meta.showing=0\n- Null actor/url: serialize as null (not omitted)\n- source_method: use actual DB values, not spec originals","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:33:28.374690Z","created_by":"tayloreernisse","updated_at":"2026-02-06T13:49:12.653118Z","closed_at":"2026-02-06T13:49:12.653067Z","close_reason":"Implemented print_timeline_json_with_meta() robot JSON output in src/cli/commands/timeline.rs with {ok,data,meta} envelope, ISO timestamps, entity/expanded/unresolved JSON structs, event details per type","compaction_level":0,"original_size":0,"labels":["cli","gate-3","phase-b","robot-mode"],"dependencies":[{"issue_id":"bd-dty","depends_on_id":"bd-3as","type":"blocks","created_at":"2026-02-02T21:33:37.703617Z","created_by":"tayloreernisse"},{"issue_id":"bd-dty","depends_on_id":"bd-ike","type":"parent-child","created_at":"2026-02-02T21:33:28.377349Z","created_by":"tayloreernisse"}]} +{"id":"bd-e48d","title":"Render activity feed with event badges in human mode","description":"## Background\nHuman-mode activity output needs fast scanning: event badge, actor, entity reference, summary, time, optional preview, and optional project path. Badge color semantics must match AC-6.4 while covering all event types emitted by me activity queries (`note`, `status`, `label`, `milestone`, `assign`, `unassign`, `review_request`).\n\n## Approach\nImplement activity rendering in `src/cli/commands/me/render_human.rs`.\n\n### 1. Badge renderer\nAdd helper:\n```rust\nfn render_event_badge(event_type: &str) -> String\n```\n\nMapping:\n- `note` -> cyan\n- `status` -> amber\n- `label` -> purple\n- `assign` / `unassign` / `review_request` -> green\n- `milestone` -> magenta\n- fallback -> dim bracket label\n\nMode behavior:\n- Color-capable mode: background pill style (fg/bg contrast).\n- ASCII/no-color mode: bracketed text label using foreground color only (`[note]`, `[status]`, etc).\n\n### 2. Row renderer\nFor each activity item:\n- line 1: badge + `@actor` + entity ref (`#iid`/`!iid`) + summary + optional `(you)` + relative timestamp\n- line 2: quoted `body_preview` (only for `note`, newline-collapsed)\n- line 3: `project_path` dimmed unless single-project scope\n\n### 3. Own-action styling\nIf `is_own`:\n- append `(you)`\n- dim the full first line to reduce visual priority\n\n## Acceptance Criteria\n- [ ] Badge colors: note=cyan, status=amber, label=purple, assign/unassign/review_request=green, milestone=magenta\n- [ ] ASCII fallback renders bracket labels with colored foreground text\n- [ ] `@actor` rendered with username color\n- [ ] Issue refs (`#iid`) and MR refs (`!iid`) use existing ref palette\n- [ ] Own actions include `(you)` and dimmed emphasis\n- [ ] `body_preview` only appears for `note` events\n- [ ] `body_preview` newlines are replaced with spaces\n- [ ] Project path line suppressed for single-project scope\n- [ ] Section header is `Activity (N)` via `section_divider`\n- [ ] Empty state renders `No recent activity`\n\n## Files\n- MODIFY: `src/cli/commands/me/render_human.rs`\n\n## TDD Anchor\nRED:\n- `test_event_badge_color_mapping_includes_unassign`\n- `test_activity_own_action_includes_you_suffix`\n- `test_activity_preview_only_for_note`\n- `test_activity_project_path_suppressed_single_project`\n- `test_activity_ascii_badges`\n\nGREEN:\n- Implement badge + row rendering with event coverage parity.\n\nVERIFY:\n- `cargo test me_render_activity`\n\n## Edge Cases\n- Unknown event types should still render safely with a neutral badge.\n- `actor` may be empty/system fallback; renderer must not panic.\n- `review_request` labels are long; spacing should remain readable without fixed-width assumptions.\n\n## Dependency Context\nConsumes `MeActivityItem` from `bd-3bwh` and event types produced by `bd-b3r3` + `bd-2nl3`.\nIntegrated by `bd-1vv8` handler.\n\nDependencies:\n -> bd-1vxq (blocks) - Render summary header and attention legend\n\nDependents:\n <- bd-1vv8 (blocks) - Implement me command handler: wire queries to renderers","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:39:46.381793Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.062218Z","closed_at":"2026-02-20T16:09:13.062179Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-e48d","depends_on_id":"bd-1vxq","type":"blocks","created_at":"2026-02-19T19:41:28.089645Z","created_by":"tayloreernisse"}]} {"id":"bd-ef0u","title":"NOTE-2B: SourceType enum extension for notes","description":"## Background\nThe SourceType enum in src/documents/extractor.rs (line 15-19) needs a Note variant for the document pipeline to handle note-type documents.\n\n## Approach\nIn src/documents/extractor.rs:\n1. Add Note variant to SourceType enum (line 15-19, after Discussion):\n pub enum SourceType { Issue, MergeRequest, Discussion, Note }\n\n2. Add match arm to as_str() (line 22-28): Self::Note => \"note\"\n\n3. Add parse aliases (line 30-37): \"note\" | \"notes\" => Some(Self::Note)\n\n4. Display impl (line 40-43) already delegates to as_str() — no change needed.\n\n5. IMPORTANT: Also update seed_dirty() in src/cli/commands/generate_docs.rs (line 66-70) which has a match on SourceType that maps to table names. SourceType::Note should NOT be added to this match — notes are seeded differently (by querying the notes table, not by table name pattern). This is handled by NOTE-2E.\n\n## Files\n- MODIFY: src/documents/extractor.rs (SourceType enum at line 15, as_str at line 22, parse at line 30)\n\n## TDD Anchor\nRED: test_source_type_parse_note — assert SourceType::parse(\"note\") == Some(SourceType::Note)\nGREEN: Add Note variant and match arms.\nVERIFY: cargo test source_type_parse_note -- --nocapture\nTests: test_source_type_note_as_str (assert as_str() == \"note\"), test_source_type_note_display (assert format!(\"{}\", SourceType::Note) == \"note\"), test_source_type_parse_notes_alias (assert parse(\"notes\") works)\n\n## Acceptance Criteria\n- [ ] SourceType::Note variant exists\n- [ ] as_str() returns \"note\"\n- [ ] parse() accepts \"note\", \"notes\" (case-insensitive via to_lowercase)\n- [ ] Display trait works via as_str delegation\n- [ ] No change to seed_dirty() match — that's a separate bead (NOTE-2E)\n- [ ] All 4 tests pass, clippy clean\n- [ ] CRITICAL: regenerate_one() in src/documents/regenerator.rs (line 86-91) has exhaustive match on SourceType — adding Note variant will cause a compile error until NOTE-2D adds the match arm. Either add a temporary todo!() or coordinate with NOTE-2D.\n\n## Dependency Context\n- Depends on NOTE-2A (bd-1oi7): migration 024 must exist so test DBs accept source_type='note' in documents/dirty_sources tables\n\n## Edge Cases\n- Exhaustive match: Adding the variant breaks regenerate_one() (line 86-91) and seed_dirty() (line 66-70) until downstream beads handle it. Agent should add temporary unreachable!() arms with comments referencing the downstream bead IDs.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:45.555568Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:24.004157Z","closed_at":"2026-02-12T18:13:24.004106Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"],"dependencies":[{"issue_id":"bd-ef0u","depends_on_id":"bd-18yh","type":"blocks","created_at":"2026-02-12T17:04:49.376312Z","created_by":"tayloreernisse"},{"issue_id":"bd-ef0u","depends_on_id":"bd-2ezb","type":"blocks","created_at":"2026-02-12T17:04:49.521665Z","created_by":"tayloreernisse"}]} {"id":"bd-epj","title":"[CP0] Config loading with Zod validation","description":"## Background\n\nConfig loading is critical infrastructure - every CLI command needs the config. Uses Zod for schema validation with sensible defaults. Must handle missing files gracefully with typed errors.\n\nReference: docs/prd/checkpoint-0.md sections \"Configuration Schema\", \"Config Resolution Order\"\n\n## Approach\n\n**src/core/config.ts:**\n```typescript\nimport { z } from 'zod';\nimport { readFileSync } from 'node:fs';\nimport { ConfigNotFoundError, ConfigValidationError } from './errors';\nimport { getConfigPath } from './paths';\n\nexport const ConfigSchema = z.object({\n gitlab: z.object({\n baseUrl: z.string().url(),\n tokenEnvVar: z.string().default('GITLAB_TOKEN'),\n }),\n projects: z.array(z.object({\n path: z.string().min(1),\n })).min(1),\n sync: z.object({\n backfillDays: z.number().int().positive().default(14),\n staleLockMinutes: z.number().int().positive().default(10),\n heartbeatIntervalSeconds: z.number().int().positive().default(30),\n cursorRewindSeconds: z.number().int().nonnegative().default(2),\n primaryConcurrency: z.number().int().positive().default(4),\n dependentConcurrency: z.number().int().positive().default(2),\n }).default({}),\n storage: z.object({\n dbPath: z.string().optional(),\n backupDir: z.string().optional(),\n compressRawPayloads: z.boolean().default(true),\n }).default({}),\n embedding: z.object({\n provider: z.literal('ollama').default('ollama'),\n model: z.string().default('nomic-embed-text'),\n baseUrl: z.string().url().default('http://localhost:11434'),\n concurrency: z.number().int().positive().default(4),\n }).default({}),\n});\n\nexport type Config = z.infer;\n\nexport function loadConfig(cliOverride?: string): Config {\n const path = getConfigPath(cliOverride);\n // throws ConfigNotFoundError if missing\n // throws ConfigValidationError if invalid\n}\n```\n\n## Acceptance Criteria\n\n- [ ] `loadConfig()` returns validated Config object\n- [ ] `loadConfig()` throws ConfigNotFoundError if file missing\n- [ ] `loadConfig()` throws ConfigValidationError with Zod errors if invalid\n- [ ] Empty optional fields get default values\n- [ ] projects array must have at least 1 item\n- [ ] gitlab.baseUrl must be valid URL\n- [ ] All number fields must be positive integers\n- [ ] tests/unit/config.test.ts passes (8 tests)\n\n## Files\n\nCREATE:\n- src/core/config.ts\n- tests/unit/config.test.ts\n- tests/fixtures/mock-responses/valid-config.json\n- tests/fixtures/mock-responses/invalid-config.json\n\n## TDD Loop\n\nRED:\n```typescript\n// tests/unit/config.test.ts\ndescribe('Config', () => {\n it('loads config from file path')\n it('throws ConfigNotFoundError if file missing')\n it('throws ConfigValidationError if required fields missing')\n it('validates project paths are non-empty strings')\n it('applies default values for optional fields')\n it('loads from XDG path by default')\n it('respects GI_CONFIG_PATH override')\n it('respects --config flag override')\n})\n```\n\nGREEN: Implement loadConfig() function\n\nVERIFY: `npm run test -- tests/unit/config.test.ts`\n\n## Edge Cases\n\n- JSON parse error should wrap in ConfigValidationError\n- Zod error messages should be human-readable\n- File exists but empty → ConfigValidationError\n- File has extra fields → should pass (Zod strips by default)","status":"closed","priority":1,"issue_type":"task","created_at":"2026-01-24T16:09:49.091078Z","created_by":"tayloreernisse","updated_at":"2026-01-25T03:04:32.592139Z","closed_at":"2026-01-25T03:04:32.592003Z","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-epj","depends_on_id":"bd-gg1","type":"blocks","created_at":"2026-01-24T16:13:07.835800Z","created_by":"tayloreernisse"}]} {"id":"bd-flwo","title":"Interactive path selection for ambiguous matches (TTY picker)","description":"When a partial file path matches multiple files, show an interactive numbered picker in TTY mode instead of a hard error. In robot mode, return candidates as structured JSON in the error envelope. Use dialoguer crate for selection UI. The path_resolver module already detects ambiguity via SuffixResult::Ambiguous and limits to 11 candidates.","status":"open","priority":3,"issue_type":"feature","created_at":"2026-02-13T16:31:50.005222Z","created_by":"tayloreernisse","updated_at":"2026-02-13T16:31:50.007520Z","compaction_level":0,"original_size":0,"labels":["cli-ux","gate-4"]} @@ -288,6 +307,7 @@ {"id":"bd-ike","title":"Epic: Gate 3 - Decision Timeline (lore timeline)","description":"## Background\n\nGate 3 is the first user-facing temporal feature: `lore timeline `. It answers \"What happened with X?\" by finding matching entities via FTS5, expanding cross-references, collecting all temporal events, and rendering a chronological narrative.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Gate 3 (Sections 3.1-3.6).\n\n## Prerequisites (All Complete)\n\n- Gates 1-2 COMPLETE: resource_state_events, resource_label_events, resource_milestone_events, entity_references all populated\n- FTS5 search index (CP3): working search infrastructure for keyword matching\n- Migration 015 (commit SHAs, closes watermark) exists on disk (registered by bd-1oo)\n\n## Architecture — 5-Stage Pipeline\n\n```\n1. SEED: FTS5 keyword search -> matched document IDs (issues, MRs, notes)\n2. HYDRATE: Map document IDs -> source entities + top matched notes as evidence\n3. EXPAND: BFS over entity_references (depth-limited, edge-type filtered)\n4. COLLECT: Gather events from all tables for seed + expanded entities\n5. RENDER: Sort chronologically, format as human or robot output\n```\n\nNo new tables required. All reads are from existing tables at query time.\n\n## Children (Execution Order)\n\n1. **bd-20e** — Define TimelineEvent model and TimelineEventType enum (types first)\n2. **bd-32q** — Implement timeline seed phase: FTS5 keyword search to entity IDs\n3. **bd-ypa** — Implement timeline expand phase: BFS cross-reference expansion\n4. **bd-3as** — Implement timeline event collection and chronological interleaving\n5. **bd-1nf** — Register lore timeline command with all flags (CLI wiring)\n6. **bd-2f2** — Implement timeline human output renderer\n7. **bd-dty** — Implement timeline robot mode JSON output\n\n## Gate Completion Criteria\n\n- [ ] `lore timeline ` returns chronologically ordered events\n- [ ] Seed entities found via FTS5 keyword search (issues, MRs, and notes)\n- [ ] State, label, and milestone events interleaved from resource event tables\n- [ ] Entity creation and merge events included\n- [ ] Evidence-bearing notes included as note_evidence events (top FTS5 matches, bounded default 10)\n- [ ] Cross-reference expansion follows entity_references to configurable depth\n- [ ] Default: follows closes + related edges; --expand-mentions adds mentioned\n- [ ] --depth 0 disables expansion\n- [ ] --since filters by event timestamp\n- [ ] -p scopes to project\n- [ ] Human output is colored and readable\n- [ ] Robot mode returns structured JSON with expansion provenance\n- [ ] Unresolved (external) references included in JSON output\n","status":"closed","priority":1,"issue_type":"feature","created_at":"2026-02-02T21:31:01.036474Z","created_by":"tayloreernisse","updated_at":"2026-02-06T13:49:21.285350Z","closed_at":"2026-02-06T13:49:21.285302Z","close_reason":"Gate 3 complete: all 7 children closed. Timeline pipeline fully implemented with SEED->HYDRATE->EXPAND->COLLECT->RENDER stages, human+robot renderers, CLI wiring with 9 flags, robot-docs manifest entry","compaction_level":0,"original_size":0,"labels":["epic","gate-3","phase-b"],"dependencies":[{"issue_id":"bd-ike","depends_on_id":"bd-1se","type":"blocks","created_at":"2026-02-02T21:33:37.875622Z","created_by":"tayloreernisse"},{"issue_id":"bd-ike","depends_on_id":"bd-2zl","type":"blocks","created_at":"2026-02-02T21:33:37.831914Z","created_by":"tayloreernisse"}]} {"id":"bd-jbfw","title":"NOTE-0D: Capture immutable author_id in note upserts","description":"## Background\nCore use case is year-scale reviewer profiling. GitLab usernames are mutable — a user can change username mid-year, fragmenting author queries. GitLab note payloads include note.author.id (immutable integer). Capturing this provides a stable identity anchor for longitudinal analysis.\n\n## Approach\n1. The author_id column and index are added by migration 022 (NOTE-1E, bd-296a). This bead only handles the ingestion code changes.\n\n2. Populate author_id during upsert: In both upsert_note_for_issue() (from NOTE-0A) and upsert_note() in mr_discussions.rs, add author_id to INSERT column list and ON CONFLICT DO UPDATE SET clause. The value comes from the GitLab API note.author.id field.\n\n3. Check NormalizedNote in src/gitlab/transformers.rs — if it doesn't have an author_id field yet, add it there. The GitLab REST API returns notes with: { \"author\": { \"id\": 12345, \"username\": \"jdefting\", ... } }. Extract author.id in the transformer.\n\n4. Semantic change detection: author_id changes do NOT trigger changed_semantics = true. It's an identity anchor, not content. Do not include author_id in the pre-read comparison fields.\n\n## Files\n- MODIFY: src/ingestion/discussions.rs (add author_id to upsert_note_for_issue INSERT and ON CONFLICT SET)\n- MODIFY: src/ingestion/mr_discussions.rs (add author_id to upsert_note INSERT and ON CONFLICT SET, line 470)\n- MODIFY: src/gitlab/transformers.rs (add author_id: Option to NormalizedNote if missing, extract from API note.author.id)\n\n## TDD Anchor\nRED: test_issue_note_upsert_captures_author_id — insert note with author_id=12345, assert stored correctly.\nGREEN: Add author_id to INSERT/UPDATE clauses and transformer.\nVERIFY: cargo test captures_author_id -- --nocapture\nTests: test_mr_note_upsert_captures_author_id, test_note_upsert_author_id_nullable (old API responses without author.id), test_note_author_id_survives_username_change\n\n## Acceptance Criteria\n- [ ] author_id populated in upsert_note_for_issue INSERT and ON CONFLICT SET\n- [ ] author_id populated in MR upsert_note INSERT and ON CONFLICT SET\n- [ ] NormalizedNote has author_id: Option field\n- [ ] Transformer extracts author.id from GitLab API note payload\n- [ ] author_id = None handled gracefully (older API responses)\n- [ ] author_id change does NOT trigger changed_semantics\n- [ ] All 4 tests pass\n\n## Dependency Context\n- Depends on NOTE-0A (bd-3bpk): modifies upsert functions created in NOTE-0A\n- Depends on NOTE-1E (bd-296a): migration 022 adds the author_id column + index to the notes table. Column must exist before ingestion code can write to it.\n\n## Edge Cases\n- Old GitLab instances: author.id may be missing from API response — use None\n- Self-hosted GitLab: some versions may not include author block — handle gracefully\n- author_id is nullable INTEGER — no NOT NULL constraint","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:59:55.097158Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:13:15.254328Z","closed_at":"2026-02-12T18:13:15.254247Z","close_reason":"Implemented by agent swarm","compaction_level":0,"original_size":0,"labels":["per-note","search"]} {"id":"bd-jec","title":"Add fetchMrFileChanges config flag","description":"## Background\n\nConfig flag controlling whether MR diff fetching is enabled, following the fetchResourceEvents pattern.\n\n**Spec reference:** `docs/phase-b-temporal-intelligence.md` Section 4.2.\n\n## Codebase Context\n\n- src/core/config.rs has SyncConfig with fetch_resource_events: bool (serde rename 'fetchResourceEvents', default true)\n- Default impl exists for SyncConfig\n- CLI sync options in src/cli/mod.rs have --no-events flag pattern\n- Orchestrator checks config.sync.fetch_resource_events before enqueuing resource_events jobs\n\n## Approach\n\n### 1. Add to SyncConfig (`src/core/config.rs`):\n```rust\n#[serde(rename = \"fetchMrFileChanges\", default = \"default_true\")]\npub fetch_mr_file_changes: bool,\n```\n\nUpdate Default impl to include fetch_mr_file_changes: true.\n\n### 2. CLI override (`src/cli/mod.rs`):\n```rust\n#[arg(long = \"no-file-changes\")]\npub no_file_changes: bool,\n```\n\n### 3. Apply in main.rs:\n```rust\nif args.no_file_changes { config.sync.fetch_mr_file_changes = false; }\n```\n\n### 4. Guard in orchestrator:\n```rust\nif config.sync.fetch_mr_file_changes { enqueue mr_diffs jobs }\n```\n\n## Acceptance Criteria\n\n- [ ] fetchMrFileChanges in SyncConfig, default true\n- [ ] Config without field defaults to true\n- [ ] --no-file-changes disables diff fetching\n- [ ] Orchestrator skips mr_diffs when false\n- [ ] `cargo check --all-targets` passes\n\n## Files\n\n- `src/core/config.rs` (add field + Default)\n- `src/cli/mod.rs` (add --no-file-changes)\n- `src/main.rs` (apply override)\n- `src/ingestion/orchestrator.rs` (guard enqueue)\n\n## TDD Loop\n\nRED:\n- `test_config_default_fetch_mr_file_changes` - default is true\n- `test_config_deserialize_false` - JSON with false\n\nGREEN: Add field, default, serde attribute.\n\nVERIFY: `cargo test --lib -- config`\n\n## Edge Cases\n\n- Config missing fetchMrFileChanges key entirely: serde default_true fills in true\n- Config explicitly set to false: no mr_diffs jobs enqueued, mr_file_changes table empty\n- --no-file-changes with --full sync: overrides config, no diffs fetched even on full resync\n- sync.fetchMrFileChanges = false in config + no --no-file-changes flag: respects config (no override)","status":"closed","priority":3,"issue_type":"task","created_at":"2026-02-02T21:34:08.892666Z","created_by":"tayloreernisse","updated_at":"2026-02-08T18:18:36.409511Z","closed_at":"2026-02-08T18:18:36.409467Z","close_reason":"Added fetch_mr_file_changes to SyncConfig (default true, serde rename fetchMrFileChanges), --no-file-changes CLI flag in SyncArgs, override in main.rs. Orchestrator guard deferred to bd-2yo which implements the actual drain.","compaction_level":0,"original_size":0,"labels":["config","gate-4","phase-b"],"dependencies":[{"issue_id":"bd-jec","depends_on_id":"bd-14q","type":"parent-child","created_at":"2026-02-02T21:34:08.895167Z","created_by":"tayloreernisse"}]} +{"id":"bd-joja","title":"Implement open issues query for me command","description":"## Background\nThe first dashboard section shows issues assigned to the current user in opened state. Issues are linked to users via `issue_assignees(issue_id, username)` junction table. Labels are in `issue_labels(issue_id, label_id)` joined to `labels(id, name, ...)` — NOT a column on issues.\n\n## Approach\nCreate `src/cli/commands/me/issues.rs`:\n```rust\nuse rusqlite::Connection;\nuse crate::core::error::Result;\nuse super::types::{MeIssue, ProjectScope, AttentionState};\n\npub fn fetch_my_issues(\n conn: &Connection,\n username: &str,\n scope: &ProjectScope,\n) -> Result> {\n let project_filter = match scope {\n ProjectScope::Single(id) => format!(\"AND p.id = {id}\"),\n ProjectScope::All => String::new(),\n };\n let sql = format!(r#\"\n SELECT p.path_with_namespace, i.iid, i.title, i.state,\n i.status_name, i.updated_at, i.web_url,\n (SELECT GROUP_CONCAT(l.name, ',')\n FROM issue_labels il\n JOIN labels l ON l.id = il.label_id\n WHERE il.issue_id = i.id) AS label_names\n FROM issues i\n JOIN issue_assignees ia ON ia.issue_id = i.id\n JOIN projects p ON p.id = i.project_id\n WHERE ia.username = ?1\n AND i.state = 'opened'\n {project_filter}\n ORDER BY i.updated_at DESC\n \"#);\n let mut stmt = conn.prepare(&sql)?;\n let rows = stmt.query_map([username], |row| {\n let label_str: Option = row.get(7)?;\n Ok(MeIssue {\n project_path: row.get(0)?,\n iid: row.get(1)?,\n title: row.get(2)?,\n state: row.get(3)?,\n status_name: row.get(4)?,\n attention_state: AttentionState::NotStarted, // placeholder — enriched later\n labels: label_str\n .map(|s| s.split(',').map(String::from).collect())\n .unwrap_or_default(),\n updated_at_iso: crate::core::time::ms_to_iso(row.get::<_, i64>(5)?),\n web_url: row.get(6)?,\n })\n })?;\n rows.collect::, _>>()\n .map_err(Into::into)\n}\n```\n\nKey details:\n- Labels via GROUP_CONCAT subquery on `issue_labels` + `labels` tables (NOT a column on issues)\n- `updated_at` stored as epoch ms — convert to ISO with `crate::core::time::ms_to_iso()`\n- `attention_state` set to placeholder — enriched by separate attention bead (bd-1xuf)\n- ProjectScope::Single uses `p.id` (internal DB id from resolve_project)\n\n## Acceptance Criteria\n- [ ] Returns Vec for issues where `issue_assignees.username` matches (AC-3.1)\n- [ ] Only returns `state = 'opened'` issues (AC-5.1)\n- [ ] Includes `project_path` from `projects.path_with_namespace` (AC-8.5)\n- [ ] Includes `status_name` (nullable) from issues table (AC-7.4)\n- [ ] Labels fetched via GROUP_CONCAT on `issue_labels` JOIN `labels`\n- [ ] Respects `ProjectScope::Single(id)` — filters by `p.id`\n- [ ] Respects `ProjectScope::All` — no project filter\n- [ ] `updated_at` converted to ISO string via `ms_to_iso()`\n- [ ] No limit, no truncation (AC-5.1)\n- [ ] Issues where user is author but NOT assignee do NOT appear (AC-3.1)\n\n## Files\n- CREATE: src/cli/commands/me/issues.rs\n- MODIFY: src/cli/commands/me/mod.rs (add `pub mod issues;`)\n\n## TDD Anchor\nRED: Write `test_fetch_my_issues_returns_assigned` using in-memory DB:\n1. `run_migrations(&conn)`\n2. Insert project (gitlab_project_id, path_with_namespace, web_url)\n3. Insert issue (project_id, iid=42, state='opened', title, updated_at, web_url, last_seen_at)\n4. Insert issue_assignee (issue_id, username=\"jdoe\")\n5. Call `fetch_my_issues(&conn, \"jdoe\", &ProjectScope::All)`\n6. Assert returns 1 issue with iid=42\n\nGREEN: Implement the SQL query.\nVERIFY: `cargo test fetch_my_issues`\n\nAdditional tests:\n- test_fetch_my_issues_excludes_closed (state='closed' → not returned)\n- test_fetch_my_issues_excludes_unassigned (no assignee row → not returned)\n- test_fetch_my_issues_filters_by_project (Single scope)\n- test_fetch_my_issues_includes_labels (insert label + issue_label, verify labels vec)\n\n## Edge Cases\n- Issues with zero labels → labels vec is empty (GROUP_CONCAT returns NULL → unwrap_or_default)\n- Multiple assignees on same issue → user is one of them → appears once (JOIN, not DISTINCT needed since PK is unique)\n- `status_name` may be NULL (not all issues have work item status enrichment)\n\n## Dependency Context\nUses `MeIssue` and `ProjectScope` from bd-3bwh and bd-a7ba.\nUses `ms_to_iso` from `src/core/time.rs`.\nIn-memory DB test pattern: `create_connection(Path::new(\":memory:\"))` + `run_migrations(&conn)` from `src/core/db.rs`.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:36:46.162335Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.050929Z","closed_at":"2026-02-20T16:09:13.050889Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-joja","depends_on_id":"bd-3bwh","type":"blocks","created_at":"2026-02-19T19:41:17.321242Z","created_by":"tayloreernisse"},{"issue_id":"bd-joja","depends_on_id":"bd-a7ba","type":"blocks","created_at":"2026-02-19T19:41:17.403830Z","created_by":"tayloreernisse"}]} {"id":"bd-jov","title":"[CP1] Discussion and note transformers","description":"Transform GitLab discussion/note payloads to normalized database schema.\n\n## Module\nsrc/gitlab/transformers/discussion.rs\n\n## Structs\n\n### NormalizedDiscussion\n- gitlab_discussion_id: String\n- project_id: i64\n- issue_id: i64\n- noteable_type: String (\"Issue\")\n- individual_note: bool\n- first_note_at, last_note_at: Option\n- last_seen_at: i64\n- resolvable, resolved: bool\n\n### NormalizedNote\n- gitlab_id: i64\n- project_id: i64\n- note_type: Option\n- is_system: bool\n- author_username: String\n- body: String\n- created_at, updated_at, last_seen_at: i64\n- position: i32 (array index in notes[])\n- resolvable, resolved: bool\n- resolved_by: Option\n- resolved_at: Option\n\n## Functions\n\n### transform_discussion(gitlab_discussion, local_project_id, local_issue_id) -> NormalizedDiscussion\n- Compute first_note_at/last_note_at from notes array min/max created_at\n- Compute resolvable (any note resolvable)\n- Compute resolved (resolvable AND all resolvable notes resolved)\n\n### transform_notes(gitlab_discussion, local_project_id) -> Vec\n- Enumerate notes to get position (array index)\n- Set is_system from note.system\n- Convert timestamps to ms epoch\n\nFiles: src/gitlab/transformers/discussion.rs\nTests: tests/discussion_transformer_tests.rs\nDone when: Unit tests pass for discussion/note transformation with system note flagging","status":"tombstone","priority":2,"issue_type":"task","created_at":"2026-01-25T15:43:04.481361Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.759691Z","deleted_at":"2026-01-25T17:02:01.759684Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-k7b","title":"[CP1] gi show issue command","description":"Show issue details with discussions.\n\n## Module\nsrc/cli/commands/show.rs\n\n## Clap Definition\nShow {\n #[arg(value_parser = [\"issue\", \"mr\"])]\n entity: String,\n \n iid: i64,\n \n #[arg(long)]\n project: Option,\n}\n\n## Output Format\nIssue #1234: Authentication redesign\n━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━\n\nProject: group/project-one\nState: opened\nAuthor: @johndoe\nCreated: 2024-01-15\nUpdated: 2024-03-20\nLabels: enhancement, auth\nURL: https://gitlab.example.com/group/project-one/-/issues/1234\n\nDescription:\n We need to redesign the authentication flow to support...\n\nDiscussions (5):\n\n @janedoe (2024-01-16):\n I agree we should move to JWT-based auth...\n\n @johndoe (2024-01-16):\n What about refresh token strategy?\n\n @bobsmith (2024-01-17):\n Have we considered OAuth2?\n\n## Ambiguity Handling\nIf multiple projects have same iid, either:\n- Prompt for --project flag\n- Show error listing which projects have that iid\n\nFiles: src/cli/commands/show.rs\nDone when: Issue detail view displays all fields including threaded discussions","status":"tombstone","priority":3,"issue_type":"task","created_at":"2026-01-25T16:58:26.904813Z","created_by":"tayloreernisse","updated_at":"2026-01-25T17:02:01.944183Z","deleted_at":"2026-01-25T17:02:01.944179Z","deleted_by":"tayloreernisse","delete_reason":"recreating with correct deps","original_type":"task","compaction_level":0,"original_size":0} {"id":"bd-kanh","title":"Extract orchestrator per-entity logic and implement inline dependent helpers","description":"## Background\n\nThe orchestrator's drain functions (`drain_resource_events` at line 932, `drain_mr_closes_issues` at line 1254, `drain_mr_diffs` at line 1514) are private and tightly coupled to the job queue system (`pending_dependent_fetches`, `claim_jobs`, `complete_job`). They batch-process all entities for a project, not individual ones. Surgical sync needs per-entity versions of these operations.\n\nThe underlying storage functions already exist and are usable:\n- `store_resource_events(conn, project_id, entity_type, entity_local_id, state_events, label_events, milestone_events)` (orchestrator.rs:1100) — calls `upsert_state_events`, `upsert_label_events`, `upsert_milestone_events`\n- `store_closes_issues_refs(conn, project_id, mr_local_id, closes_issues)` (orchestrator.rs:1409) — inserts entity references\n- `upsert_mr_file_changes(conn, project_id, mr_local_id, diffs)` (mr_diffs.rs:26) — already pub\n\nThe GitLabClient methods for fetching are also already pub:\n- `fetch_all_resource_events(gitlab_project_id, entity_type, iid)` -> (state, label, milestone) events\n- `fetch_mr_closes_issues(gitlab_project_id, iid)` -> Vec\n- `fetch_mr_diffs(gitlab_project_id, iid)` -> Vec\n\nThe gap: no standalone per-entity functions that fetch + store for a single entity without the job queue machinery.\n\n## Approach\n\nCreate standalone helper functions in `src/ingestion/surgical.rs` (or a new `src/ingestion/surgical_dependents.rs` sub-module) that surgical.rs calls after ingesting each entity:\n\n1. **`fetch_and_store_resource_events_for_entity`** (async): Takes `client`, `conn`, `project_id`, `gitlab_project_id`, `entity_type` (\"issue\"|\"merge_request\"), `entity_iid`, `entity_local_id`. Calls `client.fetch_all_resource_events()`, then `store_resource_events()` (needs `pub(crate)` visibility, currently private in orchestrator.rs). Updates the watermark column (`resource_events_synced_for_updated_at`).\n\n2. **`fetch_and_store_discussions_for_entity`** (async): For issues, calls existing `ingest_issue_discussions()`. For MRs, calls `ingest_mr_discussions()`. Both are already pub. This is a thin routing wrapper.\n\n3. **`fetch_and_store_closes_issues_for_entity`** (async, MR-only): Calls `client.fetch_mr_closes_issues()`, then `store_closes_issues_refs()` (needs `pub(crate)`). Updates watermark.\n\n4. **`fetch_and_store_file_changes_for_entity`** (async, MR-only): Calls `client.fetch_mr_diffs()`, then `upsert_mr_file_changes()` (already pub). Updates watermark.\n\nVisibility changes needed in orchestrator.rs (part of bd-1sc6):\n- `store_resource_events` -> `pub(crate)`\n- `store_closes_issues_refs` -> `pub(crate)`\n- `update_resource_event_watermark_tx` -> `pub(crate)` (or inline the SQL)\n- `update_closes_issues_watermark_tx` -> `pub(crate)` (or inline)\n\n## Acceptance Criteria\n\n- [ ] `fetch_and_store_resource_events_for_entity` fetches all 3 event types and stores them in one transaction\n- [ ] `fetch_and_store_discussions_for_entity` routes to correct discussion ingest function by entity type\n- [ ] `fetch_and_store_closes_issues_for_entity` fetches and stores closes_issues refs for MRs\n- [ ] `fetch_and_store_file_changes_for_entity` fetches and stores MR diffs\n- [ ] Each helper updates the appropriate watermark column after successful store\n- [ ] Each helper returns a result struct with counts (fetched, stored, skipped)\n- [ ] All helpers are `pub(crate)` for use by the orchestration function (bd-1i4i)\n- [ ] Config-gated: resource events only fetched if `config.sync.fetch_resource_events == true`, file changes only if `config.sync.fetch_mr_file_changes == true`\n\n## Files\n\n- `src/ingestion/surgical.rs` (add helper functions, or create `surgical_dependents.rs` sub-module)\n- `src/ingestion/orchestrator.rs` (change `store_resource_events`, `store_closes_issues_refs`, watermark functions to `pub(crate)` — via bd-1sc6)\n\n## TDD Anchor\n\nTests in `src/ingestion/surgical_tests.rs` (bd-x8oq):\n\n```rust\n#[tokio::test]\nasync fn test_fetch_and_store_resource_events_for_issue() {\n let conn = setup_db();\n let mock = MockServer::start().await;\n // Mock state/label/milestone event endpoints\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/\\d+/issues/\\d+/resource_state_events\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(json!([])))\n .mount(&mock).await;\n // ... similar for label and milestone\n let client = make_test_client(&mock);\n let result = fetch_and_store_resource_events_for_entity(\n &client, &conn, /*project_id=*/1, /*gitlab_project_id=*/100,\n \"issue\", /*iid=*/42, /*local_id=*/1,\n ).await.unwrap();\n assert_eq!(result.fetched, 0); // empty events\n // Verify watermark updated\n let watermark: Option = conn.query_row(\n \"SELECT resource_events_synced_for_updated_at FROM issues WHERE id = 1\",\n [], |r| r.get(0),\n ).unwrap();\n assert!(watermark.is_some());\n}\n\n#[tokio::test]\nasync fn test_fetch_and_store_closes_issues_for_mr() {\n let conn = setup_db();\n let mock = MockServer::start().await;\n Mock::given(method(\"GET\"))\n .and(path_regex(r\"/api/v4/projects/\\d+/merge_requests/\\d+/closes_issues\"))\n .respond_with(ResponseTemplate::new(200).set_body_json(json!([\n {\"iid\": 10, \"project_id\": 100}\n ])))\n .mount(&mock).await;\n let client = make_test_client(&mock);\n let result = fetch_and_store_closes_issues_for_entity(\n &client, &conn, 1, 100, /*mr_iid=*/5, /*mr_local_id=*/1,\n ).await.unwrap();\n assert_eq!(result.stored, 1);\n}\n\n#[tokio::test]\nasync fn test_fetch_and_store_file_changes_for_mr() {\n // Similar: mock /diffs endpoint, verify upsert_mr_file_changes called\n}\n\n#[tokio::test]\nasync fn test_resource_events_skipped_when_config_disabled() {\n // config.sync.fetch_resource_events = false -> returns Ok with 0 counts\n}\n```\n\n## Edge Cases\n\n- `fetch_all_resource_events` returns 3 separate Results (state, label, milestone). If one fails (e.g., 403 on milestone events), the others should still be stored. Partial success handling.\n- `fetch_mr_closes_issues` on a deleted MR returns 404: `coalesce_not_found` already handles this in the client, returning empty vec.\n- Watermark update must happen AFTER successful store, not before, to avoid marking as synced when store failed.\n- Discussion ingest for MRs uses `prefetch_mr_discussions` (async) + `write_prefetched_mr_discussions` (sync) two-phase pattern. The helper must handle both phases.\n- If `config.sync.fetch_resource_events` is false, skip resource event fetch entirely (return empty result).\n- If `config.sync.fetch_mr_file_changes` is false, skip file changes fetch entirely.\n\n## Dependency Context\n\n- **Blocked by bd-3sez**: surgical.rs must exist before adding helpers to it\n- **Blocked by bd-1sc6 (indirectly via bd-3sez)**: `store_resource_events` and `store_closes_issues_refs` need `pub(crate)` visibility\n- **Blocks bd-1i4i**: Orchestration function calls these helpers after each entity ingest\n- **Blocks bd-3jqx**: Integration tests exercise the full surgical pipeline including these helpers\n- **Uses existing pub APIs**: `GitLabClient::fetch_all_resource_events`, `fetch_mr_closes_issues`, `fetch_mr_diffs`, `upsert_mr_file_changes`, `ingest_issue_discussions`, `ingest_mr_discussions`","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-17T19:15:42.863072Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:04:58.569185Z","closed_at":"2026-02-18T21:04:58.569141Z","close_reason":"Completed: all implementation work done, code reviewed, tests passing","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-kanh","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-17T19:19:24.814888Z","created_by":"tayloreernisse"}]} @@ -304,6 +324,7 @@ {"id":"bd-ozy","title":"[CP1] Ingestion orchestrator","description":"## Background\n\nThe ingestion orchestrator coordinates issue sync followed by dependent discussion sync. It implements the CP1 canonical pattern: fetch issues, identify which need discussion sync (updated_at advanced), then execute discussion sync with bounded concurrency.\n\n## Approach\n\n### Module: src/ingestion/orchestrator.rs\n\n### Main Function\n\n```rust\npub async fn ingest_project_issues(\n conn: &Connection,\n client: &GitLabClient,\n config: &Config,\n project_id: i64, // Local DB project ID\n gitlab_project_id: i64,\n) -> Result\n\n#[derive(Debug, Default)]\npub struct IngestProjectResult {\n pub issues_fetched: usize,\n pub issues_upserted: usize,\n pub labels_created: usize,\n pub discussions_fetched: usize,\n pub notes_fetched: usize,\n pub system_notes_count: usize,\n pub issues_skipped_discussion_sync: usize,\n}\n```\n\n### Orchestration Steps\n\n1. **Call issue ingestion**: `ingest_issues(conn, client, config, project_id, gitlab_project_id)`\n2. **Get issues needing discussion sync**: From IngestIssuesResult.issues_needing_discussion_sync\n3. **Execute bounded discussion sync**:\n - Use `tokio::task::LocalSet` for single-threaded runtime\n - Respect `config.sync.dependent_concurrency` (default: 5)\n - For each IssueForDiscussionSync:\n - Call `ingest_issue_discussions(...)`\n - Aggregate results\n4. **Calculate skipped count**: total_issues - issues_needing_discussion_sync.len()\n\n### Bounded Concurrency Pattern\n\n```rust\nuse futures::stream::{self, StreamExt};\n\nlet local_set = LocalSet::new();\nlocal_set.run_until(async {\n stream::iter(issues_needing_sync)\n .map(|issue| async {\n ingest_issue_discussions(\n conn, client, config,\n project_id, gitlab_project_id,\n issue.iid, issue.local_issue_id, issue.updated_at,\n ).await\n })\n .buffer_unordered(config.sync.dependent_concurrency)\n .try_collect::>()\n .await\n}).await\n```\n\nNote: Single-threaded runtime means concurrency is I/O-bound, not parallel execution.\n\n## Acceptance Criteria\n\n- [ ] Orchestrator calls issue ingestion first\n- [ ] Only issues with updated_at > discussions_synced_for_updated_at get discussion sync\n- [ ] Bounded concurrency respects dependent_concurrency config\n- [ ] Results aggregated from both issue and discussion ingestion\n- [ ] issues_skipped_discussion_sync accurately reflects unchanged issues\n\n## Files\n\n- src/ingestion/mod.rs (add `pub mod orchestrator;`)\n- src/ingestion/orchestrator.rs (create)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/orchestrator_tests.rs\n#[tokio::test] async fn orchestrates_issue_then_discussion_sync()\n#[tokio::test] async fn skips_discussion_sync_for_unchanged_issues()\n#[tokio::test] async fn respects_bounded_concurrency()\n#[tokio::test] async fn aggregates_results_correctly()\n```\n\nGREEN: Implement orchestrator with bounded concurrency\n\nVERIFY: `cargo test orchestrator`\n\n## Edge Cases\n\n- All issues unchanged - no discussion sync calls\n- All issues new - all get discussion sync\n- dependent_concurrency=1 - sequential discussion fetches\n- Issue ingestion fails - orchestrator returns error, no discussion sync","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.289941Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:54:07.447647Z","closed_at":"2026-01-25T22:54:07.447577Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-ozy","depends_on_id":"bd-208","type":"blocks","created_at":"2026-01-25T17:04:05.583955Z","created_by":"tayloreernisse"},{"issue_id":"bd-ozy","depends_on_id":"bd-hbo","type":"blocks","created_at":"2026-01-25T17:04:05.605851Z","created_by":"tayloreernisse"}]} {"id":"bd-pgdw","title":"OBSERV: Add root tracing span with run_id to sync and ingest","description":"## Background\nA root tracing span per command invocation provides the top of the span hierarchy. All child spans (ingest_issues, fetch_pages, etc.) inherit the run_id field, making every log line within a run filterable by jq.\n\n## Approach\nIn run_sync() (src/cli/commands/sync.rs:54), after generating run_id, create a root span:\n\n```rust\npub async fn run_sync(config: &Config, options: SyncOptions) -> Result {\n let run_id = &uuid::Uuid::new_v4().to_string()[..8];\n let _root = tracing::info_span!(\"sync\", %run_id).entered();\n // ... existing sync pipeline code\n}\n```\n\nIn run_ingest() (src/cli/commands/ingest.rs:107), same pattern:\n\n```rust\npub async fn run_ingest(...) -> Result {\n let run_id = &uuid::Uuid::new_v4().to_string()[..8];\n let _root = tracing::info_span!(\"ingest\", %run_id, resource_type).entered();\n // ... existing ingest code\n}\n```\n\nCRITICAL: The _root guard must live for the entire function scope. If it drops early (e.g., shadowed or moved into a block), child spans lose their parent context. Use let _root (underscore prefix) to signal intentional unused binding that's kept alive for its Drop impl.\n\nFor async functions, use .entered() NOT .enter(). In async Rust, Span::enter() returns a guard that is NOT Send, which prevents the future from being sent across threads. However, .entered() on an info_span! creates an Entered which is also !Send. For async, prefer:\n\n```rust\nlet root_span = tracing::info_span!(\"sync\", %run_id);\nasync move {\n // ... body\n}.instrument(root_span).await\n```\n\nOr use #[instrument] on the function itself with the run_id field.\n\n## Acceptance Criteria\n- [ ] Root span established for every sync and ingest invocation\n- [ ] run_id appears in span context of all child log lines\n- [ ] jq 'select(.spans[]? | .run_id)' can extract all lines from a run\n- [ ] Span is active for entire function duration (not dropped early)\n- [ ] Works correctly with async/await (span propagated across .await points)\n- [ ] cargo clippy --all-targets -- -D warnings passes\n\n## Files\n- src/cli/commands/sync.rs (add root span in run_sync, line ~54)\n- src/cli/commands/ingest.rs (add root span in run_ingest, line ~107)\n\n## TDD Loop\nRED: test_root_span_propagates_run_id (capture JSON log output, verify run_id in span context)\nGREEN: Add root spans to run_sync and run_ingest\nVERIFY: cargo test && cargo clippy --all-targets -- -D warnings\n\n## Edge Cases\n- Async span propagation: .entered() is !Send. For async functions, use .instrument() or #[instrument]. The run_sync function is async (line 54: pub async fn run_sync).\n- Nested command calls: run_sync calls run_ingest internally. If both create root spans, we get a nested hierarchy: sync > ingest. This is correct behavior -- the ingest span becomes a child of sync.\n- Span storage: tracing-subscriber registry handles span storage automatically. No manual setup needed beyond adding the layer.","status":"closed","priority":1,"issue_type":"task","created_at":"2026-02-04T15:54:07.771605Z","created_by":"tayloreernisse","updated_at":"2026-02-04T17:19:33.006274Z","closed_at":"2026-02-04T17:19:33.006227Z","close_reason":"Added root tracing spans with run_id to run_sync() and run_ingest() using .instrument() pattern for async compatibility","compaction_level":0,"original_size":0,"labels":["observability"],"dependencies":[{"issue_id":"bd-pgdw","depends_on_id":"bd-2ni","type":"parent-child","created_at":"2026-02-04T15:54:07.772319Z","created_by":"tayloreernisse"},{"issue_id":"bd-pgdw","depends_on_id":"bd-37qw","type":"blocks","created_at":"2026-02-04T15:55:19.742022Z","created_by":"tayloreernisse"}]} {"id":"bd-pr1","title":"Implement lore stats CLI command","description":"## Background\nThe stats command provides visibility into the document/search/embedding pipeline health. It reports counts (DocumentStats, EmbeddingStats, FtsStats, QueueStats), verifies consistency between tables (--check), and repairs inconsistencies (--repair). This is essential for diagnosing sync issues and validating Gate A/B/C correctness.\n\n## Approach\nCreate `src/cli/commands/stats.rs` per PRD Section 4.6.\n\n**Stats structs (PRD-exact):**\n```rust\n#[derive(Debug, Serialize)]\npub struct Stats {\n pub documents: DocumentStats,\n pub embeddings: EmbeddingStats,\n pub fts: FtsStats,\n pub queues: QueueStats,\n}\n\n#[derive(Debug, Serialize)]\npub struct DocumentStats {\n pub issues: usize,\n pub mrs: usize,\n pub discussions: usize,\n pub total: usize,\n pub truncated: usize,\n}\n\n#[derive(Debug, Serialize)]\npub struct EmbeddingStats {\n /// Documents with at least one embedding (chunk_index=0 exists in embedding_metadata)\n pub embedded: usize,\n pub pending: usize,\n pub failed: usize,\n /// embedded / total_documents * 100 (document-level, not chunk-level)\n pub coverage_pct: f64,\n /// Total chunks across all embedded documents\n pub total_chunks: usize,\n}\n\n#[derive(Debug, Serialize)]\npub struct FtsStats { pub indexed: usize }\n\n#[derive(Debug, Serialize)]\npub struct QueueStats {\n pub dirty_sources: usize,\n pub dirty_sources_failed: usize,\n pub pending_discussion_fetches: usize,\n pub pending_discussion_fetches_failed: usize,\n}\n```\n\n**IntegrityCheck struct (PRD-exact):**\n```rust\n#[derive(Debug, Serialize)]\npub struct IntegrityCheck {\n pub documents_count: usize,\n pub fts_count: usize,\n pub embeddings_count: usize,\n pub metadata_count: usize,\n pub orphaned_embeddings: usize,\n pub hash_mismatches: usize,\n pub ok: bool,\n}\n```\n\n**RepairResult struct (PRD-exact):**\n```rust\n#[derive(Debug, Serialize)]\npub struct RepairResult {\n pub orphaned_embeddings_deleted: usize,\n pub stale_embeddings_cleared: usize,\n pub missing_fts_repopulated: usize,\n}\n```\n\n**Core functions:**\n- `run_stats(config) -> Result` — gather all stats\n- `run_integrity_check(config) -> Result` — verify consistency\n- `run_repair(config) -> Result` — fix issues\n\n**Integrity checks (per PRD):**\n1. documents count == documents_fts count\n2. All `embeddings.rowid / 1000` map to valid `documents.id` (orphan detection)\n3. `embedding_metadata.document_hash == documents.content_hash` for chunk_index=0 rows (staleness uses `document_hash`, NOT `chunk_hash`)\n\n**Repair operations (PRD-exact):**\n1. Delete orphaned embedding_metadata (document_id NOT IN documents)\n2. Delete orphaned vec0 rows: `DELETE FROM embeddings WHERE rowid / 1000 NOT IN (SELECT id FROM documents)` — uses `rowid / 1000` for chunked scheme\n3. Clear stale embeddings: find documents where `embedding_metadata.document_hash != documents.content_hash` (chunk_index=0 comparison), delete ALL chunks for those docs (range-based: `rowid >= doc_id * 1000 AND rowid < (doc_id + 1) * 1000`)\n4. FTS rebuild: `INSERT INTO documents_fts(documents_fts) VALUES('rebuild')` — full rebuild, NOT optimize. PRD note: partial fix is fragile with external-content FTS; rebuild is guaranteed correct.\n\n**CLI args (PRD-exact):**\n```rust\n#[derive(Args)]\npub struct StatsArgs {\n #[arg(long)]\n check: bool,\n #[arg(long, requires = \"check\")]\n repair: bool, // --repair requires --check\n}\n```\n\n## Acceptance Criteria\n- [ ] Document counts by type: issues, mrs, discussions, total, truncated\n- [ ] Embedding coverage is document-level (not chunk-level): `embedded / total * 100`\n- [ ] Embedding stats include total_chunks count\n- [ ] FTS indexed count reported\n- [ ] Queue stats: dirty_sources + dirty_sources_failed, pending_discussion_fetches + pending_discussion_fetches_failed\n- [ ] --check verifies: FTS count == documents count, orphan embeddings, hash mismatches\n- [ ] Orphan detection uses `rowid / 1000` for chunked embedding scheme\n- [ ] Hash mismatch uses `document_hash` (not `chunk_hash`) for document-level staleness\n- [ ] --repair deletes orphaned embeddings (range-based for chunks)\n- [ ] --repair clears stale metadata (document_hash != content_hash at chunk_index=0)\n- [ ] --repair uses FTS `rebuild` (not `optimize`) for correct-by-construction repair\n- [ ] --repair requires --check (Clap `requires` attribute)\n- [ ] Human output: formatted with aligned columns\n- [ ] JSON output: `{\"ok\": true, \"data\": stats}`\n- [ ] `cargo build` succeeds\n\n## Files\n- `src/cli/commands/stats.rs` — new file\n- `src/cli/commands/mod.rs` — add `pub mod stats;`\n- `src/cli/mod.rs` — add StatsArgs, wire up stats subcommand\n- `src/main.rs` — add stats command handler\n\n## TDD Loop\nRED: Integration tests:\n- `test_stats_empty_db` — all counts 0, coverage 0%\n- `test_stats_with_documents` — correct counts by type\n- `test_integrity_check_healthy` — ok=true when consistent\n- `test_integrity_check_fts_mismatch` — detects FTS/doc count divergence\n- `test_integrity_check_orphan_embeddings` — detects orphaned rowids\n- `test_repair_rebuilds_fts` — FTS count matches after repair\n- `test_repair_cleans_orphans` — orphaned embeddings deleted\n- `test_repair_clears_stale` — stale metadata cleared (doc_hash mismatch)\nGREEN: Implement stats, integrity check, repair\nVERIFY: `cargo build && cargo test stats`\n\n## Edge Cases\n- Empty database: all counts 0, coverage 0%, no integrity issues\n- Gate A only (no embeddings table): skip embedding stats gracefully\n- --repair on healthy DB: no-op, reports \"no issues found\" / zero counts\n- FTS rebuild on large DB: may be slow\n- --repair without --check: Clap rejects (requires attribute enforces dependency)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-30T15:26:50.232629Z","created_by":"tayloreernisse","updated_at":"2026-01-30T17:54:31.065586Z","closed_at":"2026-01-30T17:54:31.065501Z","close_reason":"Implemented stats CLI with document counts by type, embedding coverage, FTS index count, queue stats, --check integrity (FTS mismatch, orphan embeddings, stale metadata), --repair (rebuild FTS, delete orphans, clear stale). Human + JSON output. Builds clean.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-pr1","depends_on_id":"bd-3qs","type":"blocks","created_at":"2026-01-30T15:29:24.806108Z","created_by":"tayloreernisse"}]} +{"id":"bd-qpk3","title":"Add gitlab.username config field","description":"## Background\nThe `lore me` command needs to know the current user's GitLab username to query for assigned issues, authored MRs, and reviewing MRs. Currently `GitLabConfig` in `src/core/config.rs` has `base_url`, `token_env_var`, and optional `token` — but no username field.\n\n## Approach\nAdd an optional `username` field to `GitLabConfig` at `src/core/config.rs:8-19`:\n```rust\n#[derive(Debug, Clone, Deserialize)]\npub struct GitLabConfig {\n #[serde(rename = \"baseUrl\")]\n pub base_url: String,\n\n #[serde(rename = \"tokenEnvVar\", default = \"default_token_env_var\")]\n pub token_env_var: String,\n\n #[serde(default)]\n pub token: Option,\n\n #[serde(default)]\n pub username: Option, // NEW — AC-1.1\n}\n```\n\nThe field is single-word so it needs NO `serde(rename)` — just `#[serde(default)]` so existing configs without it parse cleanly (same pattern as `token`). Username is case-sensitive (AC-1.3) — store as-is, no normalization.\n\n## Acceptance Criteria\n- [ ] `GitLabConfig` has `pub username: Option` field\n- [ ] Field uses `#[serde(default)]` (like `token` does on line 17)\n- [ ] Field deserializes from `gitlab.username` in config.json\n- [ ] Field is optional — existing configs without it still parse correctly (Option + default)\n- [ ] Username stored as-is (case-sensitive, no lowercasing)\n- [ ] cargo test passes (no existing tests should break)\n\n## Files\n- MODIFY: src/core/config.rs (add field to GitLabConfig struct around line 18)\n\n## TDD Anchor\nRED: Write `test_config_parses_username` in `src/core/config.rs` tests:\n```rust\nlet json = r#\"{\"gitlab\":{\"baseUrl\":\"https://gitlab.com\",\"username\":\"jdoe\"},\"projects\":[]}\"#;\nlet config: Config = serde_json::from_str(json).unwrap();\nassert_eq!(config.gitlab.username, Some(\"jdoe\".to_string()));\n```\nAlso test missing field:\n```rust\nlet json = r#\"{\"gitlab\":{\"baseUrl\":\"https://gitlab.com\"},\"projects\":[]}\"#;\nlet config: Config = serde_json::from_str(json).unwrap();\nassert_eq!(config.gitlab.username, None);\n```\nGREEN: Add the field to the struct.\nVERIFY: `cargo test config_parses_username`\n\n## Edge Cases\n- Existing config files without the field must not break (Option + #[serde(default)] handles this)\n- Empty string `\"\"` → Some(\"\") — let the resolution function (bd-1f1f) handle treating it as None\n- The `ScoringConfig.excluded_usernames: Vec` is unrelated — different purpose\n\n## Dependency Context\nNo upstream dependencies. Consumed by bd-1f1f (username resolution reads this field).","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:35:03.806072Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.039716Z","closed_at":"2026-02-20T16:09:13.039655Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0} {"id":"bd-r3wm","title":"Description","description":"Another test","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:52:04.745618Z","updated_at":"2026-02-12T16:52:10.757707Z","closed_at":"2026-02-12T16:52:10.757667Z","close_reason":"test artifacts","compaction_level":0,"original_size":0} {"id":"bd-s3rc","title":"WHO: Workload mode query (query_workload)","description":"## Background\n\nWorkload mode answers \"What is person X working on?\" — a four-section snapshot of a user's active work items: assigned issues, authored MRs, MRs they're reviewing, and unresolved discussions they participate in.\n\n## Approach\n\nFour independent SQL queries, all using the same parameter pattern: `rusqlite::params![username, project_id, since_ms, limit_plus_one]`\n\n### Key design decisions:\n- **since_ms is Option**: unlike other modes, Workload has NO default time window. Unresolved discussions and open issues are relevant regardless of age. When --since is explicitly provided, (?3 IS NULL OR ...) activates filtering.\n- **Canonical refs**: SQL computes project-qualified references directly:\n - Issues: `p.path_with_namespace || '#' || i.iid` -> \"group/project#42\"\n - MRs: `p.path_with_namespace || '!' || m.iid` -> \"group/project!100\"\n- **Discussions**: use EXISTS subquery to check user participation, CASE for ref separator (# vs !)\n\n### Query 1: Open issues assigned to user\n```sql\nSELECT i.iid, (p.path_with_namespace || '#' || i.iid) AS ref,\n i.title, p.path_with_namespace, i.updated_at\nFROM issues i\nJOIN issue_assignees ia ON ia.issue_id = i.id\nJOIN projects p ON i.project_id = p.id\nWHERE ia.username = ?1 AND i.state = 'opened'\n AND (?2 IS NULL OR i.project_id = ?2)\n AND (?3 IS NULL OR i.updated_at >= ?3)\nORDER BY i.updated_at DESC LIMIT ?4\n```\n\n### Query 2: Open MRs authored (similar pattern, m.author_username = ?1)\n### Query 3: Open MRs where user is reviewer (JOIN mr_reviewers, includes m.author_username in output)\n### Query 4: Unresolved discussions where user participated (EXISTS notes subquery)\n\n### Per-section truncation:\n```rust\nlet assigned_issues_truncated = assigned_issues.len() > limit;\nlet assigned_issues = assigned_issues.into_iter().take(limit).collect();\n// ... same for all 4 sections\n```\n\n### WorkloadResult struct:\n```rust\npub struct WorkloadResult {\n pub username: String,\n pub assigned_issues: Vec,\n pub authored_mrs: Vec,\n pub reviewing_mrs: Vec,\n pub unresolved_discussions: Vec,\n pub assigned_issues_truncated: bool,\n pub authored_mrs_truncated: bool,\n pub reviewing_mrs_truncated: bool,\n pub unresolved_discussions_truncated: bool,\n}\n```\n\n## Files\n\n- `src/cli/commands/who.rs`\n\n## TDD Loop\n\nRED: `test_workload_query` — insert project, issue+assignee, MR; verify assigned_issues.len()=1, authored_mrs.len()=1\nGREEN: Implement all 4 queries with prepare_cached()\nVERIFY: `cargo test -- workload`\n\n## Acceptance Criteria\n\n- [ ] test_workload_query passes\n- [ ] Canonical refs contain project path (group/project#iid format)\n- [ ] since_ms=None means no time filtering (all open items returned)\n- [ ] All 4 sections have independent truncation flags\n\n## Edge Cases\n\n- since_ms is Option (not i64) — Workload is the only mode with optional time window\n- Discussions: --since filters on d.last_note_at (recent activity), not creation time\n- Reviewing MRs: include m.author_username in output (who wrote the MR being reviewed)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-08T02:40:27.800169Z","created_by":"tayloreernisse","updated_at":"2026-02-08T04:10:29.597273Z","closed_at":"2026-02-08T04:10:29.597228Z","close_reason":"Implemented by agent team: migration 017, CLI skeleton, all 5 query modes, human+robot output, 20 tests. All quality gates pass.","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-s3rc","depends_on_id":"bd-2ldg","type":"blocks","created_at":"2026-02-08T02:43:36.958720Z","created_by":"tayloreernisse"},{"issue_id":"bd-s3rc","depends_on_id":"bd-34rr","type":"blocks","created_at":"2026-02-08T02:43:37.097732Z","created_by":"tayloreernisse"}]} {"id":"bd-ser","title":"Implement MR ingestion module","description":"## Background\nMR ingestion module with cursor-based sync. Follows the same pattern as issue ingestion from CP1. Discussion sync eligibility is determined via DB query AFTER ingestion (not in-memory collection) to avoid memory growth on large projects.\n\n## Approach\nCreate `src/ingestion/merge_requests.rs` with:\n1. `IngestMergeRequestsResult` - Aggregated stats\n2. `ingest_merge_requests()` - Main ingestion function\n3. `upsert_merge_request()` - Single MR upsert\n4. Helper functions for labels, assignees, reviewers, cursor management\n\n## Files\n- `src/ingestion/merge_requests.rs` - New module\n- `src/ingestion/mod.rs` - Export new module\n- `tests/mr_ingestion_tests.rs` - Integration tests\n\n## Acceptance Criteria\n- [ ] `IngestMergeRequestsResult` has: fetched, upserted, labels_created, assignees_linked, reviewers_linked\n- [ ] `ingest_merge_requests()` returns `Result`\n- [ ] Page-boundary cursor updates (not item-count modulo)\n- [ ] Tuple-based cursor filtering: `(updated_at, gitlab_id)`\n- [ ] Transaction per MR for atomicity\n- [ ] Raw payload stored for each MR\n- [ ] Labels: clear-and-relink pattern (removes stale)\n- [ ] Assignees: clear-and-relink pattern\n- [ ] Reviewers: clear-and-relink pattern\n- [ ] `reset_discussion_watermarks()` for --full sync\n- [ ] `cargo test mr_ingestion` passes\n\n## TDD Loop\nRED: `cargo test ingest_mr` -> module not found\nGREEN: Add ingestion module with full logic\nVERIFY: `cargo test mr_ingestion`\n\n## Main Function Signature\n```rust\npub async fn ingest_merge_requests(\n conn: &Connection,\n client: &GitLabClient,\n config: &Config,\n project_id: i64, // Local DB project ID\n gitlab_project_id: i64, // GitLab project ID\n full_sync: bool, // Reset cursor if true\n) -> Result\n```\n\n## Ingestion Loop (page-based)\n```rust\nlet mut page = 1u32;\nloop {\n let page_result = client.fetch_merge_requests_page(...).await?;\n \n for mr in &page_result.items {\n // Tuple cursor filtering\n if let (Some(cursor_ts), Some(cursor_id)) = (cursor_updated_at, cursor_gitlab_id) {\n if mr_updated_at < cursor_ts { continue; }\n if mr_updated_at == cursor_ts && mr.id <= cursor_id { continue; }\n }\n \n // Begin transaction\n let tx = conn.unchecked_transaction()?;\n \n // Store raw payload\n let payload_id = store_payload(&tx, ...)?;\n \n // Transform and upsert\n let transformed = transform_merge_request(&mr, project_id)?;\n let upsert_result = upsert_merge_request(&tx, &transformed.merge_request, payload_id)?;\n \n // Clear-and-relink labels\n clear_mr_labels(&tx, local_mr_id)?;\n for label in &labels { ... }\n \n // Clear-and-relink assignees\n clear_mr_assignees(&tx, local_mr_id)?;\n for username in &transformed.assignee_usernames { ... }\n \n // Clear-and-relink reviewers\n clear_mr_reviewers(&tx, local_mr_id)?;\n for username in &transformed.reviewer_usernames { ... }\n \n tx.commit()?;\n \n // Track for cursor\n last_updated_at = Some(mr_updated_at);\n last_gitlab_id = Some(mr.id);\n }\n \n // Page-boundary cursor flush\n if let (Some(updated_at), Some(gitlab_id)) = (last_updated_at, last_gitlab_id) {\n update_cursor(conn, project_id, \"merge_requests\", updated_at, gitlab_id)?;\n }\n \n if page_result.is_last_page { break; }\n page = page_result.next_page.unwrap_or(page + 1);\n}\n```\n\n## Full Sync Watermark Reset\n```rust\nfn reset_discussion_watermarks(conn: &Connection, project_id: i64) -> Result<()> {\n conn.execute(\n \"UPDATE merge_requests\n SET discussions_synced_for_updated_at = NULL,\n discussions_sync_attempts = 0,\n discussions_sync_last_error = NULL\n WHERE project_id = ?\",\n [project_id],\n )?;\n Ok(())\n}\n```\n\n## DB Helper Functions\n- `get_cursor(conn, project_id) -> (Option, Option)` - Get (updated_at, gitlab_id)\n- `update_cursor(conn, project_id, resource_type, updated_at, gitlab_id)`\n- `reset_cursor(conn, project_id, resource_type)`\n- `upsert_merge_request(conn, mr, payload_id) -> Result`\n- `clear_mr_labels(conn, mr_id)`\n- `link_mr_label(conn, mr_id, label_id)`\n- `clear_mr_assignees(conn, mr_id)`\n- `upsert_mr_assignee(conn, mr_id, username)`\n- `clear_mr_reviewers(conn, mr_id)`\n- `upsert_mr_reviewer(conn, mr_id, username)`\n\n## Edge Cases\n- Cursor rewind may cause refetch of already-seen MRs (tuple filtering handles this)\n- Large projects: 10k+ MRs - page-based cursor prevents massive refetch on crash\n- Labels/assignees/reviewers may change - clear-and-relink ensures correctness","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-26T22:06:41.967459Z","created_by":"tayloreernisse","updated_at":"2026-01-27T00:15:24.526208Z","closed_at":"2026-01-27T00:15:24.526142Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-ser","depends_on_id":"bd-34o","type":"blocks","created_at":"2026-01-26T22:08:54.519486Z","created_by":"tayloreernisse"},{"issue_id":"bd-ser","depends_on_id":"bd-3ir","type":"blocks","created_at":"2026-01-26T22:08:54.440174Z","created_by":"tayloreernisse"},{"issue_id":"bd-ser","depends_on_id":"bd-iba","type":"blocks","created_at":"2026-01-26T22:08:54.593550Z","created_by":"tayloreernisse"}]} @@ -312,6 +333,7 @@ {"id":"bd-tir","title":"Implement generic dependent fetch queue (enqueue + drain)","description":"## Background\nThe pending_dependent_fetches table (migration 011) provides a generic job queue for all dependent resource fetches across Gates 1, 2, and 4. This module implements the queue operations: enqueue, claim, complete, fail, and stale lock reclamation. It generalizes the existing discussion_queue.rs pattern.\n\n## Approach\nCreate src/core/dependent_queue.rs with:\n\n```rust\nuse rusqlite::Connection;\nuse super::error::Result;\n\n/// A pending job from the dependent fetch queue.\npub struct PendingJob {\n pub id: i64,\n pub project_id: i64,\n pub entity_type: String, // \"issue\" | \"merge_request\"\n pub entity_iid: i64,\n pub entity_local_id: i64,\n pub job_type: String, // \"resource_events\" | \"mr_closes_issues\" | \"mr_diffs\"\n pub payload_json: Option,\n pub attempts: i32,\n}\n\n/// Enqueue a dependent fetch job. Idempotent via UNIQUE constraint (INSERT OR IGNORE).\npub fn enqueue_job(\n conn: &Connection,\n project_id: i64,\n entity_type: &str,\n entity_iid: i64,\n entity_local_id: i64,\n job_type: &str,\n payload_json: Option<&str>,\n) -> Result // returns true if actually inserted (not deduped)\n\n/// Claim a batch of jobs for processing. Atomically sets locked_at.\n/// Only claims jobs where locked_at IS NULL AND (next_retry_at IS NULL OR next_retry_at <= now).\npub fn claim_jobs(\n conn: &Connection,\n job_type: &str,\n batch_size: usize,\n) -> Result>\n\n/// Mark a job as complete (DELETE the row).\npub fn complete_job(conn: &Connection, job_id: i64) -> Result<()>\n\n/// Mark a job as failed. Increment attempts, set next_retry_at with exponential backoff, clear locked_at.\n/// Backoff: 30s * 2^(attempts-1), capped at 480s.\npub fn fail_job(conn: &Connection, job_id: i64, error: &str) -> Result<()>\n\n/// Reclaim stale locks (locked_at older than threshold).\n/// Returns count of reclaimed jobs.\npub fn reclaim_stale_locks(conn: &Connection, stale_threshold_minutes: u32) -> Result\n\n/// Count pending jobs by job_type (for stats/progress).\npub fn count_pending_jobs(conn: &Connection) -> Result>\n```\n\nRegister in src/core/mod.rs: `pub mod dependent_queue;`\n\n**Key implementation details:**\n- claim_jobs uses a two-step approach: SELECT ids WHERE available, then UPDATE SET locked_at for those ids. Use a single transaction.\n- enqueued_at = current time in ms epoch UTC\n- locked_at = current time in ms epoch UTC when claimed\n- Backoff formula: next_retry_at = now + min(30_000 * 2^(attempts-1), 480_000) ms\n\n## Acceptance Criteria\n- [ ] enqueue_job is idempotent (INSERT OR IGNORE on UNIQUE constraint)\n- [ ] enqueue_job returns true on insert, false on dedup\n- [ ] claim_jobs only claims unlocked, non-retrying jobs\n- [ ] claim_jobs respects batch_size limit\n- [ ] complete_job DELETEs the row\n- [ ] fail_job increments attempts, sets next_retry_at, clears locked_at, records last_error\n- [ ] Backoff: 30s, 60s, 120s, 240s, 480s (capped)\n- [ ] reclaim_stale_locks clears locked_at for jobs older than threshold\n- [ ] count_pending_jobs returns accurate counts by job_type\n\n## Files\n- src/core/dependent_queue.rs (new)\n- src/core/mod.rs (add `pub mod dependent_queue;`)\n\n## TDD Loop\nRED: tests/dependent_queue_tests.rs (new):\n- `test_enqueue_job_basic` - enqueue a job, verify it exists\n- `test_enqueue_job_idempotent` - enqueue same job twice, verify single row\n- `test_claim_jobs_batch` - enqueue 5, claim 3, verify 3 returned and locked\n- `test_claim_jobs_skips_locked` - lock a job, claim again, verify it's skipped\n- `test_claim_jobs_respects_retry_at` - set next_retry_at in future, verify skipped\n- `test_claim_jobs_includes_retryable` - set next_retry_at in past, verify claimed\n- `test_complete_job_deletes` - complete a job, verify gone\n- `test_fail_job_backoff` - fail 3 times, verify exponential next_retry_at values\n- `test_reclaim_stale_locks` - set old locked_at, reclaim, verify cleared\n\nSetup: create_test_db() with migrations 001-011, seed project + issue.\n\nGREEN: Implement all functions\n\nVERIFY: `cargo test dependent_queue -- --nocapture`\n\n## Edge Cases\n- claim_jobs with batch_size=0 should return empty vec (not error)\n- enqueue_job with invalid job_type will be rejected by CHECK constraint — map rusqlite error to LoreError\n- fail_job on a non-existent job_id should be a no-op (job may have been completed by another path)\n- reclaim_stale_locks with 0 threshold would reclaim everything — ensure threshold is reasonable (minimum 1 min)\n- Timestamps must use consistent ms epoch UTC (not seconds)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-02T21:31:57.290181Z","created_by":"tayloreernisse","updated_at":"2026-02-03T16:19:14.222626Z","closed_at":"2026-02-03T16:19:14.222579Z","close_reason":"Implemented PendingJob struct, enqueue_job, claim_jobs, complete_job, fail_job (with exponential backoff), reclaim_stale_locks, count_pending_jobs in src/core/dependent_queue.rs.","compaction_level":0,"original_size":0,"labels":["gate-1","phase-b","queue"],"dependencies":[{"issue_id":"bd-tir","depends_on_id":"bd-2zl","type":"parent-child","created_at":"2026-02-02T21:31:57.291894Z","created_by":"tayloreernisse"},{"issue_id":"bd-tir","depends_on_id":"bd-hu3","type":"blocks","created_at":"2026-02-02T21:31:57.292472Z","created_by":"tayloreernisse"}]} {"id":"bd-tiux","title":"Add sync_runs migration 027 for surgical mode columns","description":"## Background\nThe `sync_runs` table (created in migration 001, enriched in 014) tracks sync run lifecycle for observability and crash recovery. Surgical sync needs additional columns to track its distinct mode, phase progression, IID targeting, and per-stage counters. This is a schema-only change — no Rust struct changes beyond registering the migration SQL file.\n\nThe migration system uses a `MIGRATIONS` array in `src/core/db.rs`. Each entry is a `(version, sql_file_name)` tuple. SQL files live in `src/core/migrations/`. The current latest migration is 026 (`026_scoring_indexes.sql`), so this will be migration 027. `LATEST_SCHEMA_VERSION` is computed as `MIGRATIONS.len() as i32` and automatically becomes 27.\n\n## Approach\n\n### Step 1: Create migration SQL file: `src/core/migrations/027_surgical_sync_runs.sql`\n\n```sql\n-- Migration 027: Extend sync_runs for surgical sync observability\n-- Adds mode/phase tracking and surgical-specific counters.\n\nALTER TABLE sync_runs ADD COLUMN mode TEXT;\nALTER TABLE sync_runs ADD COLUMN phase TEXT;\nALTER TABLE sync_runs ADD COLUMN surgical_iids_json TEXT;\nALTER TABLE sync_runs ADD COLUMN issues_fetched INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN mrs_fetched INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN issues_ingested INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN mrs_ingested INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN skipped_stale INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN docs_regenerated INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN docs_embedded INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN warnings_count INTEGER NOT NULL DEFAULT 0;\nALTER TABLE sync_runs ADD COLUMN cancelled_at INTEGER;\n\nCREATE INDEX IF NOT EXISTS idx_sync_runs_mode_started\n ON sync_runs(mode, started_at DESC);\nCREATE INDEX IF NOT EXISTS idx_sync_runs_status_phase_started\n ON sync_runs(status, phase, started_at DESC);\n```\n\n**Column semantics:**\n- `mode`: \"standard\" or \"surgical\" (NULL for pre-migration rows)\n- `phase`: preflight, ingest, dependents, docs, embed, done, failed, cancelled\n- `surgical_iids_json`: JSON like `{\"issues\":[7,8],\"mrs\":[101]}`\n- Counter columns: integers with DEFAULT 0 for backward compat\n- `cancelled_at`: ms-epoch timestamp, NULL unless cancelled\n\n### Step 2: Register in MIGRATIONS array (src/core/db.rs)\n\nAdd to the `MIGRATIONS` array (currently 26 entries ending with `026_scoring_indexes.sql`):\n\n```rust\n(27, include_str!(\"migrations/027_surgical_sync_runs.sql\")),\n```\n\n## Acceptance Criteria\n- [ ] File `src/core/migrations/027_surgical_sync_runs.sql` exists with all ALTER TABLE and CREATE INDEX statements\n- [ ] Migration 027 is registered in MIGRATIONS array in `src/core/db.rs`\n- [ ] `LATEST_SCHEMA_VERSION` evaluates to 27\n- [ ] Migration runs successfully on fresh databases (in-memory test)\n- [ ] Pre-existing sync_runs rows are unaffected (NULL mode/phase, 0 counters)\n- [ ] New columns accept expected values via INSERT and SELECT round-trip\n- [ ] NULL defaults work for mode, phase, surgical_iids_json, cancelled_at\n- [ ] DEFAULT 0 works for all counter columns\n- [ ] `cargo check --all-targets` passes\n- [ ] `cargo test` passes (all migration tests use in-memory DB)\n\n## Files\n- CREATE: src/core/migrations/027_surgical_sync_runs.sql\n- MODIFY: src/core/db.rs (add entry to MIGRATIONS array)\n\n## TDD Anchor\nRED: Write tests in `src/core/sync_run_tests.rs` (which is already `#[path]`-included from `sync_run.rs`):\n\n```rust\n#[test]\nfn sync_run_surgical_columns_exist() {\n let conn = setup_test_db();\n conn.execute(\n \"INSERT INTO sync_runs (started_at, heartbeat_at, status, command, mode, phase, surgical_iids_json)\n VALUES (1000, 1000, 'running', 'sync', 'surgical', 'preflight', '{\\\"issues\\\":[7],\\\"mrs\\\":[]}')\",\n [],\n ).unwrap();\n let (mode, phase, iids_json): (String, String, String) = conn.query_row(\n \"SELECT mode, phase, surgical_iids_json FROM sync_runs WHERE mode = 'surgical'\",\n [],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?)),\n ).unwrap();\n assert_eq!(mode, \"surgical\");\n assert_eq!(phase, \"preflight\");\n assert!(iids_json.contains(\"7\"));\n}\n\n#[test]\nfn sync_run_counter_defaults_are_zero() {\n let conn = setup_test_db();\n conn.execute(\n \"INSERT INTO sync_runs (started_at, heartbeat_at, status, command)\n VALUES (2000, 2000, 'running', 'sync')\",\n [],\n ).unwrap();\n let row_id = conn.last_insert_rowid();\n let (issues_fetched, mrs_fetched, docs_regenerated, warnings_count): (i64, i64, i64, i64) = conn.query_row(\n \"SELECT issues_fetched, mrs_fetched, docs_regenerated, warnings_count FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?, r.get(3)?)),\n ).unwrap();\n assert_eq!(issues_fetched, 0);\n assert_eq!(mrs_fetched, 0);\n assert_eq!(docs_regenerated, 0);\n assert_eq!(warnings_count, 0);\n}\n\n#[test]\nfn sync_run_nullable_columns_default_to_null() {\n let conn = setup_test_db();\n conn.execute(\n \"INSERT INTO sync_runs (started_at, heartbeat_at, status, command)\n VALUES (3000, 3000, 'running', 'sync')\",\n [],\n ).unwrap();\n let row_id = conn.last_insert_rowid();\n let (mode, phase, cancelled_at): (Option, Option, Option) = conn.query_row(\n \"SELECT mode, phase, cancelled_at FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?)),\n ).unwrap();\n assert!(mode.is_none());\n assert!(phase.is_none());\n assert!(cancelled_at.is_none());\n}\n\n#[test]\nfn sync_run_counter_round_trip() {\n let conn = setup_test_db();\n conn.execute(\n \"INSERT INTO sync_runs (started_at, heartbeat_at, status, command, mode, issues_fetched, mrs_ingested, docs_embedded)\n VALUES (4000, 4000, 'succeeded', 'sync', 'surgical', 3, 2, 5)\",\n [],\n ).unwrap();\n let row_id = conn.last_insert_rowid();\n let (issues_fetched, mrs_ingested, docs_embedded): (i64, i64, i64) = conn.query_row(\n \"SELECT issues_fetched, mrs_ingested, docs_embedded FROM sync_runs WHERE id = ?1\",\n [row_id],\n |r| Ok((r.get(0)?, r.get(1)?, r.get(2)?)),\n ).unwrap();\n assert_eq!(issues_fetched, 3);\n assert_eq!(mrs_ingested, 2);\n assert_eq!(docs_embedded, 5);\n}\n```\n\nGREEN: Create the SQL file and register the migration.\nVERIFY: `cargo test sync_run_surgical && cargo test sync_run_counter && cargo test sync_run_nullable`\n\n## Edge Cases\n- SQLite ALTER TABLE ADD COLUMN requires DEFAULT for NOT NULL columns. All counter columns use `DEFAULT 0`.\n- mode/phase/surgical_iids_json/cancelled_at are nullable TEXT/INTEGER — no DEFAULT needed.\n- Pre-migration rows get NULL for new nullable columns and 0 for counter columns — backward compatible.\n- The indexes (`idx_sync_runs_mode_started`, `idx_sync_runs_status_phase_started`) use `IF NOT EXISTS` for idempotency.\n\n## Dependency Context\nThis is a leaf/foundation bead with no upstream dependencies. Downstream bead bd-arka (SyncRunRecorder extensions) depends on these columns existing to write surgical mode lifecycle data.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-17T19:13:19.914672Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:04:58.565943Z","closed_at":"2026-02-18T21:04:58.565894Z","close_reason":"Completed: all implementation work done, code reviewed, tests passing","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-tiux","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-17T19:19:24.494031Z","created_by":"tayloreernisse"}]} {"id":"bd-u7se","title":"Implement Who screen (5 modes: expert/workload/reviews/active/overlap)","description":"## Background\nThe Who screen is the people explorer, showing contributor expertise and workload across 5 modes. Each mode renders differently: Expert shows file-path expertise scores, Workload shows issue/MR assignment counts, Reviews shows review activity, Active shows recent contributors, Overlap shows shared file knowledge.\n\n## Approach\nState (state/who.rs):\n- WhoState: mode (WhoMode), results (WhoResult), path (String), path_input (TextInput), path_focused (bool), selected_index (usize)\n- WhoMode: Expert, Workload, Reviews, Active, Overlap\n- WhoResult: variant per mode with different data shapes\n\nAction (action.rs):\n- fetch_who(conn, mode, path, limit) -> Result: dispatches to existing who query functions in lore CLI (query_experts, query_workload, etc.)\n\nView (view/who.rs):\n- Mode tabs at top: E(xpert) | W(orkload) | R(eviews) | A(ctive) | O(verlap)\n- Expert: path input + sorted table of authors by expertise score + bar chart\n- Workload: stacked bar chart of open issues/MRs per person\n- Reviews: table of review counts (given/received) per person\n- Active: time-sorted list of recent contributors\n- Overlap: matrix or pair-wise table showing shared file knowledge\n- Keyboard: 1-5 or Tab to switch modes, j/k scroll, / focus path input\n\n## Acceptance Criteria\n- [ ] 5 modes switchable via Tab or number keys\n- [ ] Expert mode: path input filters by file path, shows expertise scores\n- [ ] Workload mode: shows assignment counts per person\n- [ ] Reviews mode: shows review activity counts\n- [ ] Active mode: shows recent contributors sorted by activity\n- [ ] Overlap mode: shows shared knowledge between contributors\n- [ ] Each mode renders appropriate visualization\n- [ ] Enter on a person navigates to their issues (scoped issue list)\n\n## Files\n- MODIFY: crates/lore-tui/src/state/who.rs (expand from stub)\n- MODIFY: crates/lore-tui/src/action.rs (add fetch_who)\n- CREATE: crates/lore-tui/src/view/who.rs\n\n## TDD Anchor\nRED: Write test_fetch_who_expert that creates notes with diff paths, calls fetch_who(Expert, \"src/\"), asserts authors sorted by expertise score.\nGREEN: Implement fetch_who dispatching to existing who queries.\nVERIFY: cargo test --manifest-path crates/lore-tui/Cargo.toml test_fetch_who\n\n## Edge Cases\n- Empty results for a mode: show \"No data for this mode\" message\n- Expert mode with no diff notes: explain that expert data requires diff notes to be synced\n- Very long file paths in Expert mode: truncate from left (show ...path/to/file.rs)\n\n## Dependency Context\nUses existing who query functions from src/cli/commands/who.rs (made pub).\nUses WhoState from \"Implement AppState composition\" task.","status":"open","priority":2,"issue_type":"task","created_at":"2026-02-12T17:01:22.734056Z","created_by":"tayloreernisse","updated_at":"2026-02-12T18:11:34.085483Z","compaction_level":0,"original_size":0,"labels":["TUI"],"dependencies":[{"issue_id":"bd-u7se","depends_on_id":"bd-29qw","type":"blocks","created_at":"2026-02-12T17:10:02.843151Z","created_by":"tayloreernisse"},{"issue_id":"bd-u7se","depends_on_id":"bd-nwux","type":"blocks","created_at":"2026-02-12T18:11:34.085447Z","created_by":"tayloreernisse"}]} +{"id":"bd-utt4","title":"Define MeArgs struct and register me subcommand","description":"## Background\nThe `lore me` command needs a CLI argument struct and handler wiring. The existing pattern is `WhoArgs` in `src/cli/mod.rs` (line ~964) and `Commands::Who(WhoArgs)` variant. The handler in `src/main.rs` follows: `Some(Commands::Who(args)) => handle_who(cli.config.as_deref(), args, robot_mode)`.\n\n## Approach\n1. Define `MeArgs` in `src/cli/mod.rs` (alongside WhoArgs):\n```rust\n#[derive(Args, Debug)]\npub struct MeArgs {\n /// Show only issues section\n #[arg(long)]\n pub issues: bool,\n /// Show only MRs section (authored + reviewing)\n #[arg(long)]\n pub mrs: bool,\n /// Show only activity feed\n #[arg(long)]\n pub activity: bool,\n /// Activity window (e.g., \"30d\", \"7d\") — default 30d\n #[arg(long, default_value = \"30d\")]\n pub since: String,\n /// Scope to one project (fuzzy match)\n #[arg(short, long)]\n pub project: Option,\n /// Override configured username\n #[arg(long)]\n pub user: Option,\n /// Show all synced projects (overrides default_project)\n #[arg(long)]\n pub all: bool,\n /// Select output fields (preset: \"minimal\", or comma-separated)\n #[arg(long, value_delimiter = ',')]\n pub fields: Option>,\n}\n```\n\n2. Add variant to Commands enum (around line 114):\n```rust\n/// Personal work dashboard — my issues, MRs, and activity\nMe(MeArgs),\n```\n\n3. Create `src/cli/commands/me/mod.rs` with stub:\n```rust\nuse crate::Config;\nuse crate::cli::MeArgs;\nuse crate::core::error::Result;\n\npub fn handle_me(\n config_override: Option<&str>,\n args: MeArgs,\n robot_mode: bool,\n) -> std::result::Result<(), Box> {\n eprintln!(\"lore me: not yet implemented\");\n std::process::exit(1);\n}\n```\n\n4. Wire in `src/main.rs` — add match arm (near line ~3164 where handle_who is):\n```rust\nSome(Commands::Me(args)) => handle_me(cli.config.as_deref(), args, robot_mode),\n```\n\n5. Add `pub mod me;` to `src/cli/commands/mod.rs`.\n\n## Acceptance Criteria\n- [ ] `MeArgs` struct defined with all flags: --issues, --mrs, --activity, --since, --project, --user, --all, --fields\n- [ ] --since has default_value \"30d\"\n- [ ] --fields uses value_delimiter=',' for comma-separated list\n- [ ] `Me(MeArgs)` variant in Commands enum\n- [ ] `src/cli/commands/me/mod.rs` exists with stub handler\n- [ ] Handler wired in main.rs match arm\n- [ ] `lore me --help` shows all flags with descriptions\n- [ ] `lore me` runs without panic (stub prints \"not yet implemented\" and exits)\n- [ ] Standard global flags (--robot/-J, --color, --icons) inherited from Cli struct\n- [ ] --project and --all are separate flags (mutual exclusivity is runtime, not clap)\n\n## Files\n- MODIFY: src/cli/mod.rs (MeArgs struct + Commands::Me variant)\n- CREATE: src/cli/commands/me/mod.rs (stub handler)\n- MODIFY: src/cli/commands/mod.rs (add `pub mod me;`)\n- MODIFY: src/main.rs (add match arm calling handle_me)\n\n## TDD Anchor\nRED: Write `test_me_args_parse` that parses `[\"lore\", \"me\", \"--issues\", \"--since\", \"7d\", \"--user\", \"jdoe\"]` via `Cli::try_parse_from` and asserts `issues=true, since=\"7d\", user=Some(\"jdoe\")`.\nGREEN: Define MeArgs struct and Commands variant.\nVERIFY: `cargo test me_args`\n\n## Edge Cases\n- `--since` default must be \"30d\" when not provided (test by parsing without --since)\n- `--fields` with no value should be None, `--fields minimal` should be Some(vec![\"minimal\"])\n- The handler signature matches the who pattern: `(config_override, args, robot_mode)`\n\n## Dependency Context\nUses resolve_username from bd-1f1f (called within the handler when fully implemented).\nPattern reference: WhoArgs at `src/cli/mod.rs:964`, handle_who at `src/main.rs:3164`.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-19T19:35:34.340060Z","created_by":"tayloreernisse","updated_at":"2026-02-20T16:09:13.046435Z","closed_at":"2026-02-20T16:09:13.046391Z","close_reason":"Implemented by lore-me agent swarm","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-utt4","depends_on_id":"bd-1f1f","type":"blocks","created_at":"2026-02-19T19:41:08.328646Z","created_by":"tayloreernisse"}]} {"id":"bd-v6i","title":"[CP1] gi ingest --type=issues command","description":"## Background\n\nThe `gi ingest --type=issues` command is the main entry point for issue ingestion. It acquires a single-flight lock, calls the orchestrator for each configured project, and outputs progress/summary to the user.\n\n## Approach\n\n### Module: src/cli/commands/ingest.rs\n\n### Clap Definition\n\n```rust\n#[derive(Args)]\npub struct IngestArgs {\n /// Resource type to ingest\n #[arg(long, value_parser = [\"issues\", \"merge_requests\"])]\n pub r#type: String,\n\n /// Filter to single project\n #[arg(long)]\n pub project: Option,\n\n /// Override stale sync lock\n #[arg(long)]\n pub force: bool,\n}\n```\n\n### Handler Function\n\n```rust\npub async fn handle_ingest(args: IngestArgs, config: &Config) -> Result<()>\n```\n\n### Logic\n\n1. **Acquire single-flight lock**: `acquire_sync_lock(conn, args.force)?`\n2. **Get projects to sync**:\n - If `args.project` specified, filter to that one\n - Otherwise, get all configured projects from DB\n3. **For each project**:\n - Print \"Ingesting issues for {project_path}...\"\n - Call `ingest_project_issues(conn, client, config, project_id, gitlab_project_id)`\n - Print \"{N} issues fetched, {M} new labels\"\n4. **Print discussion sync summary**:\n - \"Fetching discussions ({N} issues with updates)...\"\n - \"{N} discussions, {M} notes (excluding {K} system notes)\"\n - \"Skipped discussion sync for {N} unchanged issues.\"\n5. **Release lock**: Lock auto-released when handler returns\n\n### Output Format (matches PRD)\n\n```\nIngesting issues...\n\n group/project-one: 1,234 issues fetched, 45 new labels\n\nFetching discussions (312 issues with updates)...\n\n group/project-one: 312 issues → 1,234 discussions, 5,678 notes\n\nTotal: 1,234 issues, 1,234 discussions, 5,678 notes (excluding 1,234 system notes)\nSkipped discussion sync for 922 unchanged issues.\n```\n\n## Acceptance Criteria\n\n- [ ] Clap args parse --type, --project, --force correctly\n- [ ] Single-flight lock acquired before sync starts\n- [ ] Lock error message is clear if concurrent run attempted\n- [ ] Progress output shows per-project counts\n- [ ] Summary includes unchanged issues skipped count\n- [ ] --force flag allows overriding stale lock\n\n## Files\n\n- src/cli/commands/mod.rs (add `pub mod ingest;`)\n- src/cli/commands/ingest.rs (create)\n- src/cli/mod.rs (add Ingest variant to Commands enum)\n\n## TDD Loop\n\nRED:\n```rust\n// tests/cli_ingest_tests.rs\n#[tokio::test] async fn ingest_issues_acquires_lock()\n#[tokio::test] async fn ingest_issues_fails_on_concurrent_run()\n#[tokio::test] async fn ingest_issues_respects_project_filter()\n#[tokio::test] async fn ingest_issues_force_overrides_stale_lock()\n```\n\nGREEN: Implement handler with lock and orchestrator calls\n\nVERIFY: `cargo test cli_ingest`\n\n## Edge Cases\n\n- No projects configured - return early with helpful message\n- Project filter matches nothing - error with \"project not found\"\n- Lock already held - clear error \"Sync already in progress\"\n- Ctrl-C during sync - lock should be released (via Drop or SIGINT handler)","status":"closed","priority":2,"issue_type":"task","created_at":"2026-01-25T17:02:38.312565Z","created_by":"tayloreernisse","updated_at":"2026-01-25T22:56:44.090142Z","closed_at":"2026-01-25T22:56:44.090086Z","close_reason":"done","compaction_level":0,"original_size":0,"dependencies":[{"issue_id":"bd-v6i","depends_on_id":"bd-ozy","type":"blocks","created_at":"2026-01-25T17:04:05.629772Z","created_by":"tayloreernisse"}]} {"id":"bd-v6tc","title":"Description","description":"This is a test","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-12T16:52:04.745618Z","updated_at":"2026-02-12T16:52:10.755235Z","closed_at":"2026-02-12T16:52:10.755188Z","close_reason":"test artifacts","compaction_level":0,"original_size":0} {"id":"bd-wcja","title":"Extend SyncResult with surgical mode fields for robot output","description":"## Background\n\nRobot mode (`--robot`) serializes `SyncResult` as JSON for machine consumers. Currently `SyncResult` (lines 31-52 of `src/cli/commands/sync.rs`) only has fields for normal full sync. Surgical sync needs additional metadata in the JSON response: whether surgical mode was active, which IIDs were requested, per-entity outcomes, and whether it was a preflight-only run. These must be `Option` fields so normal sync serialization is unchanged (serde `skip_serializing_if = \"Option::is_none\"`).\n\n## Approach\n\nAdd four `Option` fields to the existing `SyncResult` struct:\n\n```rust\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub surgical_mode: Option,\n\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub surgical_iids: Option,\n\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub entity_results: Option>,\n\n#[serde(skip_serializing_if = \"Option::is_none\")]\npub preflight_only: Option,\n```\n\nDefine two new supporting structs in the same file:\n\n```rust\n#[derive(Debug, Default, Serialize)]\npub struct SurgicalIids {\n pub issues: Vec,\n pub merge_requests: Vec,\n}\n\n#[derive(Debug, Serialize)]\npub struct EntitySyncResult {\n pub entity_type: String, // \"issue\" or \"merge_request\"\n pub iid: u64,\n pub outcome: String, // \"synced\", \"skipped_toctou\", \"failed\", \"not_found\"\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub error: Option,\n #[serde(skip_serializing_if = \"Option::is_none\")]\n pub toctou_reason: Option,\n}\n```\n\nBecause `SyncResult` derives `Default`, the new `Option` fields default to `None` automatically. Non-surgical callers need zero changes.\n\n## Acceptance Criteria\n\n1. `SyncResult` compiles with all four new `Option` fields\n2. `SurgicalIids` and `EntitySyncResult` are defined with `Serialize` derive\n3. Serializing a `SyncResult` with surgical fields set produces JSON with `surgical_mode`, `surgical_iids`, `entity_results`, `preflight_only` keys\n4. Serializing a default `SyncResult` (all `None`) produces JSON identical to current output (no surgical keys)\n5. `SyncResult::default()` still works without specifying new fields\n6. All existing tests pass unchanged\n\n## Files\n\n- `src/cli/commands/sync.rs` — add fields to `SyncResult`, define `SurgicalIids` and `EntitySyncResult`\n\n## TDD Anchor\n\nAdd a test module or extend the existing one in `src/cli/commands/sync.rs` (or a new `sync_tests.rs` file):\n\n```rust\n#[cfg(test)]\nmod surgical_result_tests {\n use super::*;\n\n #[test]\n fn sync_result_default_omits_surgical_fields() {\n let result = SyncResult::default();\n let json = serde_json::to_value(&result).unwrap();\n assert!(json.get(\"surgical_mode\").is_none());\n assert!(json.get(\"surgical_iids\").is_none());\n assert!(json.get(\"entity_results\").is_none());\n assert!(json.get(\"preflight_only\").is_none());\n }\n\n #[test]\n fn sync_result_with_surgical_fields_serializes_correctly() {\n let result = SyncResult {\n surgical_mode: Some(true),\n surgical_iids: Some(SurgicalIids {\n issues: vec![7, 42],\n merge_requests: vec![10],\n }),\n entity_results: Some(vec![\n EntitySyncResult {\n entity_type: \"issue\".to_string(),\n iid: 7,\n outcome: \"synced\".to_string(),\n error: None,\n toctou_reason: None,\n },\n EntitySyncResult {\n entity_type: \"issue\".to_string(),\n iid: 42,\n outcome: \"skipped_toctou\".to_string(),\n error: None,\n toctou_reason: Some(\"updated_at changed\".to_string()),\n },\n ]),\n preflight_only: Some(false),\n ..SyncResult::default()\n };\n let json = serde_json::to_value(&result).unwrap();\n assert_eq!(json[\"surgical_mode\"], true);\n assert_eq!(json[\"surgical_iids\"][\"issues\"], serde_json::json!([7, 42]));\n assert_eq!(json[\"entity_results\"].as_array().unwrap().len(), 2);\n assert_eq!(json[\"entity_results\"][1][\"outcome\"], \"skipped_toctou\");\n assert_eq!(json[\"preflight_only\"], false);\n }\n\n #[test]\n fn entity_sync_result_omits_none_fields() {\n let entity = EntitySyncResult {\n entity_type: \"merge_request\".to_string(),\n iid: 10,\n outcome: \"synced\".to_string(),\n error: None,\n toctou_reason: None,\n };\n let json = serde_json::to_value(&entity).unwrap();\n assert!(json.get(\"error\").is_none());\n assert!(json.get(\"toctou_reason\").is_none());\n assert!(json.get(\"entity_type\").is_some());\n }\n}\n```\n\n## Edge Cases\n\n- `entity_results: Some(vec![])` — empty vec serializes as `[]`, not omitted. This is correct for \"surgical mode ran but had no entities to process.\"\n- `surgical_iids` with empty vecs — valid for edge case where user passes `--issue` but all IIDs are filtered out before sync.\n- Ensure `EntitySyncResult.outcome` uses a fixed set of string values. Consider a future enum, but `String` is fine for initial implementation to keep serialization simple.\n\n## Dependency Context\n\n- **No upstream dependencies** — this bead only adds struct fields, no behavioral changes.\n- **Downstream**: bd-1i4i (orchestrator) populates these fields. bd-3bec (wiring) passes them through.\n- The `#[derive(Default)]` on `SyncResult` means all `Option` fields are `None` by default, so this is a fully additive change.","status":"closed","priority":2,"issue_type":"task","created_at":"2026-02-17T19:17:03.915330Z","created_by":"tayloreernisse","updated_at":"2026-02-18T21:03:46.649727Z","closed_at":"2026-02-18T21:03:46.649679Z","close_reason":"Completed: SyncResult extended with surgical_mode, surgical_iids, entity_results fields","compaction_level":0,"original_size":0,"labels":["surgical-sync"],"dependencies":[{"issue_id":"bd-wcja","depends_on_id":"bd-1i4i","type":"blocks","created_at":"2026-02-17T19:19:25.150441Z","created_by":"tayloreernisse"}]} diff --git a/.beads/last-touched b/.beads/last-touched index 417fea9..ec8bd5b 100644 --- a/.beads/last-touched +++ b/.beads/last-touched @@ -1 +1 @@ -bd-2fc +bd-1tv8