feat(tui): Phase 3 power features — Who, Search, Timeline, Trace, File History screens

Complete TUI Phase 3 implementation with all 5 power feature screens:

- Who screen: 5 modes (expert/workload/reviews/active/overlap) with
  mode tabs, input bar, result rendering, and hint bar
- Search screen: full-text search with result list and scoring display
- Timeline screen: chronological event feed with time-relative display
- Trace screen: file provenance chains with expand/collapse, rename
  tracking, and linked issues/discussions
- File History screen: per-file MR timeline with rename chain display
  and discussion snippets

Also includes:
- Command palette overlay (fuzzy search)
- Bootstrap screen (initial sync flow)
- Action layer split from monolithic action.rs to per-screen modules
- Entity and render cache infrastructure
- Shared who_types module in core crate
- All screens wired into view/mod.rs dispatch
- 597 tests passing, clippy clean (pedantic + nursery), fmt clean
This commit is contained in:
teernisse
2026-02-18 22:56:24 -05:00
parent f8d6180f06
commit fb40fdc677
44 changed files with 14650 additions and 2905 deletions

View File

@@ -0,0 +1,298 @@
#![allow(dead_code)]
use anyhow::{Context, Result};
use rusqlite::Connection;
use crate::state::bootstrap::{DataReadiness, SchemaCheck};
/// Minimum schema version required by this TUI version.
pub const MINIMUM_SCHEMA_VERSION: i32 = 20;
/// Check the schema version of the database.
///
/// Returns [`SchemaCheck::NoDB`] if the `schema_version` table doesn't exist,
/// [`SchemaCheck::Incompatible`] if the version is below the minimum,
/// or [`SchemaCheck::Compatible`] if all is well.
pub fn check_schema_version(conn: &Connection, minimum: i32) -> SchemaCheck {
// Check if schema_version table exists.
let table_exists: bool = conn
.query_row(
"SELECT COUNT(*) FROM sqlite_master WHERE type='table' AND name='schema_version'",
[],
|r| r.get::<_, i64>(0),
)
.map(|c| c > 0)
.unwrap_or(false);
if !table_exists {
return SchemaCheck::NoDB;
}
// Read the current version.
match conn.query_row("SELECT version FROM schema_version LIMIT 1", [], |r| {
r.get::<_, i32>(0)
}) {
Ok(version) if version >= minimum => SchemaCheck::Compatible { version },
Ok(found) => SchemaCheck::Incompatible { found, minimum },
Err(_) => SchemaCheck::NoDB,
}
}
/// Check whether the database has enough data to skip the bootstrap screen.
///
/// Counts issues, merge requests, and search documents. The `documents` table
/// may not exist on older schemas, so its absence is treated as "no documents."
pub fn check_data_readiness(conn: &Connection) -> Result<DataReadiness> {
let has_issues: bool = conn
.query_row("SELECT EXISTS(SELECT 1 FROM issues LIMIT 1)", [], |r| {
r.get(0)
})
.context("checking issues")?;
let has_mrs: bool = conn
.query_row(
"SELECT EXISTS(SELECT 1 FROM merge_requests LIMIT 1)",
[],
|r| r.get(0),
)
.context("checking merge requests")?;
// documents table may not exist yet (created by generate-docs).
let has_documents: bool = conn
.query_row("SELECT EXISTS(SELECT 1 FROM documents LIMIT 1)", [], |r| {
r.get(0)
})
.unwrap_or(false);
let schema_version = conn
.query_row("SELECT version FROM schema_version LIMIT 1", [], |r| {
r.get::<_, i32>(0)
})
.unwrap_or(0);
Ok(DataReadiness {
has_issues,
has_mrs,
has_documents,
schema_version,
})
}
#[cfg(test)]
mod tests {
use super::*;
/// Create the minimal schema needed for bootstrap / data-readiness queries.
fn create_dashboard_schema(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
);
CREATE TABLE issues (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT NOT NULL,
author_username TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE merge_requests (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT,
author_username TEXT,
created_at INTEGER,
updated_at INTEGER,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE discussions (
id INTEGER PRIMARY KEY,
gitlab_discussion_id TEXT NOT NULL,
project_id INTEGER NOT NULL,
noteable_type TEXT NOT NULL,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE notes (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
discussion_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
is_system INTEGER NOT NULL DEFAULT 0,
author_username TEXT,
body TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE documents (
id INTEGER PRIMARY KEY,
source_type TEXT NOT NULL,
source_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
content_text TEXT NOT NULL,
content_hash TEXT NOT NULL
);
CREATE TABLE embedding_metadata (
document_id INTEGER NOT NULL,
chunk_index INTEGER NOT NULL DEFAULT 0,
model TEXT NOT NULL,
dims INTEGER NOT NULL,
document_hash TEXT NOT NULL,
chunk_hash TEXT NOT NULL,
created_at INTEGER NOT NULL,
PRIMARY KEY(document_id, chunk_index)
);
CREATE TABLE sync_runs (
id INTEGER PRIMARY KEY,
started_at INTEGER NOT NULL,
heartbeat_at INTEGER NOT NULL,
finished_at INTEGER,
status TEXT NOT NULL,
command TEXT NOT NULL,
error TEXT
);
",
)
.expect("create dashboard schema");
}
fn insert_issue(conn: &Connection, iid: i64, state: &str, updated_at: i64) {
conn.execute(
"INSERT INTO issues (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at)
VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)",
rusqlite::params![iid * 100, iid, format!("Issue {iid}"), state, updated_at],
)
.expect("insert issue");
}
fn insert_mr(conn: &Connection, iid: i64, state: &str, updated_at: i64) {
conn.execute(
"INSERT INTO merge_requests (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at)
VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)",
rusqlite::params![iid * 100 + 50, iid, format!("MR {iid}"), state, updated_at],
)
.expect("insert mr");
}
/// TDD anchor test from bead spec.
#[test]
fn test_schema_preflight_rejects_old() {
let conn = Connection::open_in_memory().unwrap();
conn.execute_batch(
"CREATE TABLE schema_version (version INTEGER);
INSERT INTO schema_version (version) VALUES (1);",
)
.unwrap();
let result = check_schema_version(&conn, 20);
assert!(matches!(
result,
SchemaCheck::Incompatible {
found: 1,
minimum: 20
}
));
}
#[test]
fn test_schema_preflight_accepts_compatible() {
let conn = Connection::open_in_memory().unwrap();
conn.execute_batch(
"CREATE TABLE schema_version (version INTEGER);
INSERT INTO schema_version (version) VALUES (26);",
)
.unwrap();
let result = check_schema_version(&conn, 20);
assert!(matches!(result, SchemaCheck::Compatible { version: 26 }));
}
#[test]
fn test_schema_preflight_exact_minimum() {
let conn = Connection::open_in_memory().unwrap();
conn.execute_batch(
"CREATE TABLE schema_version (version INTEGER);
INSERT INTO schema_version (version) VALUES (20);",
)
.unwrap();
let result = check_schema_version(&conn, 20);
assert!(matches!(result, SchemaCheck::Compatible { version: 20 }));
}
#[test]
fn test_schema_preflight_no_db() {
let conn = Connection::open_in_memory().unwrap();
let result = check_schema_version(&conn, 20);
assert!(matches!(result, SchemaCheck::NoDB));
}
#[test]
fn test_schema_preflight_empty_schema_version_table() {
let conn = Connection::open_in_memory().unwrap();
conn.execute_batch("CREATE TABLE schema_version (version INTEGER);")
.unwrap();
let result = check_schema_version(&conn, 20);
assert!(matches!(result, SchemaCheck::NoDB));
}
#[test]
fn test_check_data_readiness_empty() {
let conn = Connection::open_in_memory().unwrap();
create_dashboard_schema(&conn);
conn.execute_batch(
"CREATE TABLE schema_version (version INTEGER);
INSERT INTO schema_version (version) VALUES (26);",
)
.unwrap();
let readiness = check_data_readiness(&conn).unwrap();
assert!(!readiness.has_issues);
assert!(!readiness.has_mrs);
assert!(!readiness.has_documents);
assert_eq!(readiness.schema_version, 26);
assert!(!readiness.has_any_data());
}
#[test]
fn test_check_data_readiness_with_data() {
let conn = Connection::open_in_memory().unwrap();
create_dashboard_schema(&conn);
conn.execute_batch(
"CREATE TABLE schema_version (version INTEGER);
INSERT INTO schema_version (version) VALUES (26);",
)
.unwrap();
insert_issue(&conn, 1, "opened", 1_700_000_000_000);
insert_mr(&conn, 1, "merged", 1_700_000_000_000);
let readiness = check_data_readiness(&conn).unwrap();
assert!(readiness.has_issues);
assert!(readiness.has_mrs);
assert!(!readiness.has_documents);
assert_eq!(readiness.schema_version, 26);
assert!(readiness.has_any_data());
}
#[test]
fn test_check_data_readiness_documents_table_missing() {
let conn = Connection::open_in_memory().unwrap();
create_dashboard_schema(&conn);
// No documents table — should still work.
let readiness = check_data_readiness(&conn).unwrap();
assert!(!readiness.has_documents);
}
}

View File

@@ -0,0 +1,485 @@
#![allow(dead_code)]
use anyhow::{Context, Result};
use rusqlite::Connection;
use crate::clock::Clock;
use crate::state::dashboard::{
DashboardData, EntityCounts, LastSyncInfo, ProjectSyncInfo, RecentActivityItem,
};
/// Fetch all data for the dashboard screen.
///
/// Runs aggregation queries for entity counts, per-project sync freshness,
/// recent activity, and the last sync run summary.
pub fn fetch_dashboard(conn: &Connection, clock: &dyn Clock) -> Result<DashboardData> {
let counts = fetch_entity_counts(conn)?;
let projects = fetch_project_sync_info(conn, clock)?;
let recent = fetch_recent_activity(conn, clock)?;
let last_sync = fetch_last_sync(conn)?;
Ok(DashboardData {
counts,
projects,
recent,
last_sync,
})
}
/// Count all entities in the database.
fn fetch_entity_counts(conn: &Connection) -> Result<EntityCounts> {
let issues_total: i64 = conn
.query_row("SELECT COUNT(*) FROM issues", [], |r| r.get(0))
.context("counting issues")?;
let issues_open: i64 = conn
.query_row(
"SELECT COUNT(*) FROM issues WHERE state = 'opened'",
[],
|r| r.get(0),
)
.context("counting open issues")?;
let mrs_total: i64 = conn
.query_row("SELECT COUNT(*) FROM merge_requests", [], |r| r.get(0))
.context("counting merge requests")?;
let mrs_open: i64 = conn
.query_row(
"SELECT COUNT(*) FROM merge_requests WHERE state = 'opened'",
[],
|r| r.get(0),
)
.context("counting open merge requests")?;
let discussions: i64 = conn
.query_row("SELECT COUNT(*) FROM discussions", [], |r| r.get(0))
.context("counting discussions")?;
let notes_total: i64 = conn
.query_row("SELECT COUNT(*) FROM notes", [], |r| r.get(0))
.context("counting notes")?;
let notes_system: i64 = conn
.query_row("SELECT COUNT(*) FROM notes WHERE is_system = 1", [], |r| {
r.get(0)
})
.context("counting system notes")?;
let notes_system_pct = if notes_total > 0 {
u8::try_from(notes_system * 100 / notes_total).unwrap_or(100)
} else {
0
};
let documents: i64 = conn
.query_row("SELECT COUNT(*) FROM documents", [], |r| r.get(0))
.context("counting documents")?;
let embeddings: i64 = conn
.query_row("SELECT COUNT(*) FROM embedding_metadata", [], |r| r.get(0))
.context("counting embeddings")?;
#[allow(clippy::cast_sign_loss)] // SQL COUNT(*) is always >= 0
Ok(EntityCounts {
issues_open: issues_open as u64,
issues_total: issues_total as u64,
mrs_open: mrs_open as u64,
mrs_total: mrs_total as u64,
discussions: discussions as u64,
notes_total: notes_total as u64,
notes_system_pct,
documents: documents as u64,
embeddings: embeddings as u64,
})
}
/// Per-project sync freshness based on the most recent sync_runs entry.
fn fetch_project_sync_info(conn: &Connection, clock: &dyn Clock) -> Result<Vec<ProjectSyncInfo>> {
let now_ms = clock.now_ms();
let mut stmt = conn
.prepare(
"SELECT p.path_with_namespace,
MAX(sr.finished_at) as last_sync_ms
FROM projects p
LEFT JOIN sync_runs sr ON sr.status = 'succeeded'
AND sr.finished_at IS NOT NULL
GROUP BY p.id
ORDER BY p.path_with_namespace",
)
.context("preparing project sync query")?;
let rows = stmt
.query_map([], |row| {
let path: String = row.get(0)?;
let last_sync_ms: Option<i64> = row.get(1)?;
Ok((path, last_sync_ms))
})
.context("querying project sync info")?;
let mut result = Vec::new();
for row in rows {
let (path, last_sync_ms) = row.context("reading project sync row")?;
let minutes_since_sync = match last_sync_ms {
Some(ms) => {
let elapsed_ms = now_ms.saturating_sub(ms);
u64::try_from(elapsed_ms / 60_000).unwrap_or(u64::MAX)
}
None => u64::MAX, // Never synced.
};
result.push(ProjectSyncInfo {
path,
minutes_since_sync,
});
}
Ok(result)
}
/// Recent activity: the 20 most recently updated issues and MRs.
fn fetch_recent_activity(conn: &Connection, clock: &dyn Clock) -> Result<Vec<RecentActivityItem>> {
let now_ms = clock.now_ms();
let mut stmt = conn
.prepare(
"SELECT entity_type, iid, title, state, updated_at FROM (
SELECT 'issue' AS entity_type, iid, title, state, updated_at
FROM issues
UNION ALL
SELECT 'mr' AS entity_type, iid, title, state, updated_at
FROM merge_requests
)
ORDER BY updated_at DESC
LIMIT 20",
)
.context("preparing recent activity query")?;
let rows = stmt
.query_map([], |row| {
let entity_type: String = row.get(0)?;
let iid: i64 = row.get(1)?;
let title: String = row.get::<_, Option<String>>(2)?.unwrap_or_default();
let state: String = row.get::<_, Option<String>>(3)?.unwrap_or_default();
let updated_at: i64 = row.get(4)?;
Ok((entity_type, iid, title, state, updated_at))
})
.context("querying recent activity")?;
let mut result = Vec::new();
for row in rows {
let (entity_type, iid, title, state, updated_at) =
row.context("reading recent activity row")?;
let elapsed_ms = now_ms.saturating_sub(updated_at);
let minutes_ago = u64::try_from(elapsed_ms / 60_000).unwrap_or(u64::MAX);
result.push(RecentActivityItem {
entity_type,
iid: iid as u64,
title,
state,
minutes_ago,
});
}
Ok(result)
}
/// The most recent sync run summary.
fn fetch_last_sync(conn: &Connection) -> Result<Option<LastSyncInfo>> {
let result = conn.query_row(
"SELECT status, finished_at, command, error
FROM sync_runs
ORDER BY id DESC
LIMIT 1",
[],
|row| {
Ok(LastSyncInfo {
status: row.get(0)?,
finished_at: row.get(1)?,
command: row.get(2)?,
error: row.get(3)?,
})
},
);
match result {
Ok(info) => Ok(Some(info)),
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e).context("querying last sync run"),
}
}
#[cfg(test)]
mod tests {
use super::*;
use crate::clock::FakeClock;
/// Create the minimal schema needed for dashboard queries.
fn create_dashboard_schema(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
);
CREATE TABLE issues (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT NOT NULL,
author_username TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE merge_requests (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT,
author_username TEXT,
created_at INTEGER,
updated_at INTEGER,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE discussions (
id INTEGER PRIMARY KEY,
gitlab_discussion_id TEXT NOT NULL,
project_id INTEGER NOT NULL,
noteable_type TEXT NOT NULL,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE notes (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
discussion_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
is_system INTEGER NOT NULL DEFAULT 0,
author_username TEXT,
body TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE documents (
id INTEGER PRIMARY KEY,
source_type TEXT NOT NULL,
source_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
content_text TEXT NOT NULL,
content_hash TEXT NOT NULL
);
CREATE TABLE embedding_metadata (
document_id INTEGER NOT NULL,
chunk_index INTEGER NOT NULL DEFAULT 0,
model TEXT NOT NULL,
dims INTEGER NOT NULL,
document_hash TEXT NOT NULL,
chunk_hash TEXT NOT NULL,
created_at INTEGER NOT NULL,
PRIMARY KEY(document_id, chunk_index)
);
CREATE TABLE sync_runs (
id INTEGER PRIMARY KEY,
started_at INTEGER NOT NULL,
heartbeat_at INTEGER NOT NULL,
finished_at INTEGER,
status TEXT NOT NULL,
command TEXT NOT NULL,
error TEXT
);
",
)
.expect("create dashboard schema");
}
/// Insert a test issue.
fn insert_issue(conn: &Connection, iid: i64, state: &str, updated_at: i64) {
conn.execute(
"INSERT INTO issues (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at)
VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)",
rusqlite::params![iid * 100, iid, format!("Issue {iid}"), state, updated_at],
)
.expect("insert issue");
}
/// Insert a test MR.
fn insert_mr(conn: &Connection, iid: i64, state: &str, updated_at: i64) {
conn.execute(
"INSERT INTO merge_requests (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at)
VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)",
rusqlite::params![iid * 100 + 50, iid, format!("MR {iid}"), state, updated_at],
)
.expect("insert mr");
}
#[test]
fn test_fetch_dashboard_counts() {
let conn = Connection::open_in_memory().unwrap();
create_dashboard_schema(&conn);
// 5 issues: 3 open, 2 closed.
let now_ms = 1_700_000_000_000_i64;
insert_issue(&conn, 1, "opened", now_ms - 10_000);
insert_issue(&conn, 2, "opened", now_ms - 20_000);
insert_issue(&conn, 3, "opened", now_ms - 30_000);
insert_issue(&conn, 4, "closed", now_ms - 40_000);
insert_issue(&conn, 5, "closed", now_ms - 50_000);
let clock = FakeClock::from_ms(now_ms);
let data = fetch_dashboard(&conn, &clock).unwrap();
assert_eq!(data.counts.issues_open, 3);
assert_eq!(data.counts.issues_total, 5);
}
#[test]
fn test_fetch_dashboard_mr_counts() {
let conn = Connection::open_in_memory().unwrap();
create_dashboard_schema(&conn);
let now_ms = 1_700_000_000_000_i64;
insert_mr(&conn, 1, "opened", now_ms);
insert_mr(&conn, 2, "merged", now_ms);
insert_mr(&conn, 3, "opened", now_ms);
insert_mr(&conn, 4, "closed", now_ms);
let clock = FakeClock::from_ms(now_ms);
let data = fetch_dashboard(&conn, &clock).unwrap();
assert_eq!(data.counts.mrs_open, 2);
assert_eq!(data.counts.mrs_total, 4);
}
#[test]
fn test_fetch_dashboard_empty_database() {
let conn = Connection::open_in_memory().unwrap();
create_dashboard_schema(&conn);
let clock = FakeClock::from_ms(1_700_000_000_000);
let data = fetch_dashboard(&conn, &clock).unwrap();
assert_eq!(data.counts.issues_open, 0);
assert_eq!(data.counts.issues_total, 0);
assert_eq!(data.counts.mrs_open, 0);
assert_eq!(data.counts.mrs_total, 0);
assert_eq!(data.counts.notes_system_pct, 0);
assert!(data.projects.is_empty());
assert!(data.recent.is_empty());
assert!(data.last_sync.is_none());
}
#[test]
fn test_fetch_dashboard_notes_system_pct() {
let conn = Connection::open_in_memory().unwrap();
create_dashboard_schema(&conn);
// 4 notes: 1 system, 3 user -> 25% system.
for i in 0..4 {
conn.execute(
"INSERT INTO notes (gitlab_id, discussion_id, project_id, is_system, created_at, updated_at, last_seen_at)
VALUES (?1, 1, 1, ?2, 1000, 1000, 1000)",
rusqlite::params![i, if i == 0 { 1 } else { 0 }],
)
.unwrap();
}
let clock = FakeClock::from_ms(1_700_000_000_000);
let data = fetch_dashboard(&conn, &clock).unwrap();
assert_eq!(data.counts.notes_total, 4);
assert_eq!(data.counts.notes_system_pct, 25);
}
#[test]
fn test_fetch_dashboard_project_sync_info() {
let conn = Connection::open_in_memory().unwrap();
create_dashboard_schema(&conn);
conn.execute(
"INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'group/alpha')",
[],
)
.unwrap();
conn.execute(
"INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (2, 'group/beta')",
[],
)
.unwrap();
// Sync ran 30 minutes ago. sync_runs is global (no project_id),
// so all projects see the same last-sync time.
let now_ms = 1_700_000_000_000_i64;
conn.execute(
"INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command)
VALUES (?1, ?1, ?2, 'succeeded', 'sync')",
[now_ms - 30 * 60_000, now_ms - 30 * 60_000],
)
.unwrap();
let clock = FakeClock::from_ms(now_ms);
let data = fetch_dashboard(&conn, &clock).unwrap();
assert_eq!(data.projects.len(), 2);
assert_eq!(data.projects[0].path, "group/alpha");
assert_eq!(data.projects[0].minutes_since_sync, 30);
assert_eq!(data.projects[1].path, "group/beta");
assert_eq!(data.projects[1].minutes_since_sync, 30); // Same: sync_runs is global.
}
#[test]
fn test_fetch_dashboard_recent_activity_ordered() {
let conn = Connection::open_in_memory().unwrap();
create_dashboard_schema(&conn);
let now_ms = 1_700_000_000_000_i64;
insert_issue(&conn, 1, "opened", now_ms - 60_000); // 1 min ago
insert_mr(&conn, 1, "merged", now_ms - 120_000); // 2 min ago
insert_issue(&conn, 2, "closed", now_ms - 180_000); // 3 min ago
let clock = FakeClock::from_ms(now_ms);
let data = fetch_dashboard(&conn, &clock).unwrap();
assert_eq!(data.recent.len(), 3);
assert_eq!(data.recent[0].entity_type, "issue");
assert_eq!(data.recent[0].iid, 1);
assert_eq!(data.recent[0].minutes_ago, 1);
assert_eq!(data.recent[1].entity_type, "mr");
assert_eq!(data.recent[1].minutes_ago, 2);
assert_eq!(data.recent[2].entity_type, "issue");
assert_eq!(data.recent[2].minutes_ago, 3);
}
#[test]
fn test_fetch_dashboard_last_sync() {
let conn = Connection::open_in_memory().unwrap();
create_dashboard_schema(&conn);
let now_ms = 1_700_000_000_000_i64;
conn.execute(
"INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command, error)
VALUES (?1, ?1, ?2, 'failed', 'sync', 'network timeout')",
[now_ms - 60_000, now_ms - 50_000],
)
.unwrap();
conn.execute(
"INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command)
VALUES (?1, ?1, ?2, 'succeeded', 'sync')",
[now_ms - 30_000, now_ms - 20_000],
)
.unwrap();
let clock = FakeClock::from_ms(now_ms);
let data = fetch_dashboard(&conn, &clock).unwrap();
let sync = data.last_sync.unwrap();
assert_eq!(sync.status, "succeeded");
assert_eq!(sync.command, "sync");
assert!(sync.error.is_none());
}
}

View File

@@ -0,0 +1,383 @@
#![allow(dead_code)]
//! File History screen actions — query MRs that touched a file path.
//!
//! Wraps the SQL queries from `lore::cli::commands::file_history` but uses
//! an injected `Connection` (TUI manages its own DB connection).
use anyhow::Result;
use rusqlite::Connection;
use lore::core::file_history::resolve_rename_chain;
use crate::state::file_history::{FileDiscussion, FileHistoryMr, FileHistoryResult};
/// Maximum rename chain BFS depth.
const MAX_RENAME_HOPS: usize = 10;
/// Default result limit.
const DEFAULT_LIMIT: usize = 50;
/// Fetch file history: MRs that touched a file path, with optional rename resolution.
pub fn fetch_file_history(
conn: &Connection,
project_id: Option<i64>,
path: &str,
follow_renames: bool,
merged_only: bool,
include_discussions: bool,
) -> Result<FileHistoryResult> {
// Resolve rename chain unless disabled.
let (all_paths, renames_followed) = if !follow_renames {
(vec![path.to_string()], false)
} else if let Some(pid) = project_id {
let chain = resolve_rename_chain(conn, pid, path, MAX_RENAME_HOPS)?;
let followed = chain.len() > 1;
(chain, followed)
} else {
// Without project scope, can't resolve renames.
(vec![path.to_string()], false)
};
let paths_searched = all_paths.len();
// Build IN clause placeholders.
let placeholders: Vec<String> = (0..all_paths.len())
.map(|i| format!("?{}", i + 2))
.collect();
let in_clause = placeholders.join(", ");
let merged_filter = if merged_only {
" AND mr.state = 'merged'"
} else {
""
};
let project_filter = if project_id.is_some() {
"AND mfc.project_id = ?1"
} else {
""
};
let limit_param = all_paths.len() + 2;
let sql = format!(
"SELECT DISTINCT \
mr.iid, mr.title, mr.state, mr.author_username, \
mfc.change_type, mr.merged_at, mr.updated_at, mr.merge_commit_sha \
FROM mr_file_changes mfc \
JOIN merge_requests mr ON mr.id = mfc.merge_request_id \
WHERE mfc.new_path IN ({in_clause}) {project_filter} {merged_filter} \
ORDER BY COALESCE(mr.merged_at, mr.updated_at) DESC \
LIMIT ?{limit_param}"
);
let mut stmt = conn.prepare(&sql)?;
// Bind: ?1=project_id, ?2..?N+1=paths, ?N+2=limit.
let mut params: Vec<Box<dyn rusqlite::types::ToSql>> = Vec::new();
params.push(Box::new(project_id.unwrap_or(0)));
for p in &all_paths {
params.push(Box::new(p.clone()));
}
params.push(Box::new(DEFAULT_LIMIT as i64));
let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|p| p.as_ref()).collect();
let merge_requests: Vec<FileHistoryMr> = stmt
.query_map(param_refs.as_slice(), |row| {
Ok(FileHistoryMr {
iid: row.get(0)?,
title: row.get(1)?,
state: row.get(2)?,
author_username: row.get(3)?,
change_type: row.get(4)?,
merged_at_ms: row.get(5)?,
updated_at_ms: row.get::<_, i64>(6)?,
merge_commit_sha: row.get(7)?,
})
})?
.filter_map(std::result::Result::ok)
.collect();
let total_mrs = merge_requests.len();
// Optionally fetch DiffNote discussions.
let discussions = if include_discussions && !merge_requests.is_empty() {
fetch_file_discussions(conn, &all_paths, project_id)?
} else {
Vec::new()
};
Ok(FileHistoryResult {
path: path.to_string(),
rename_chain: all_paths,
renames_followed,
merge_requests,
discussions,
total_mrs,
paths_searched,
})
}
/// Fetch DiffNote discussions referencing the given file paths.
fn fetch_file_discussions(
conn: &Connection,
paths: &[String],
project_id: Option<i64>,
) -> Result<Vec<FileDiscussion>> {
let placeholders: Vec<String> = (0..paths.len()).map(|i| format!("?{}", i + 2)).collect();
let in_clause = placeholders.join(", ");
let project_filter = if project_id.is_some() {
"AND d.project_id = ?1"
} else {
""
};
let sql = format!(
"SELECT d.gitlab_discussion_id, n.author_username, n.body, n.new_path, n.created_at \
FROM notes n \
JOIN discussions d ON d.id = n.discussion_id \
WHERE n.new_path IN ({in_clause}) {project_filter} \
AND n.is_system = 0 \
ORDER BY n.created_at DESC \
LIMIT 50"
);
let mut stmt = conn.prepare(&sql)?;
let mut params: Vec<Box<dyn rusqlite::types::ToSql>> = Vec::new();
params.push(Box::new(project_id.unwrap_or(0)));
for p in paths {
params.push(Box::new(p.clone()));
}
let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|p| p.as_ref()).collect();
let discussions: Vec<FileDiscussion> = stmt
.query_map(param_refs.as_slice(), |row| {
let body: String = row.get(2)?;
let snippet = if body.len() > 200 {
format!("{}...", &body[..body.floor_char_boundary(200)])
} else {
body
};
Ok(FileDiscussion {
discussion_id: row.get(0)?,
author_username: row.get(1)?,
body_snippet: snippet,
path: row.get(3)?,
created_at_ms: row.get(4)?,
})
})?
.filter_map(std::result::Result::ok)
.collect();
Ok(discussions)
}
/// Fetch distinct file paths from mr_file_changes for autocomplete.
pub fn fetch_file_history_paths(conn: &Connection, project_id: Option<i64>) -> Result<Vec<String>> {
let sql = if project_id.is_some() {
"SELECT DISTINCT new_path FROM mr_file_changes WHERE project_id = ?1 ORDER BY new_path LIMIT 5000"
} else {
"SELECT DISTINCT new_path FROM mr_file_changes ORDER BY new_path LIMIT 5000"
};
let mut stmt = conn.prepare(sql)?;
let paths: Vec<String> = if let Some(pid) = project_id {
stmt.query_map([pid], |row| row.get(0))?
.filter_map(std::result::Result::ok)
.collect()
} else {
stmt.query_map([], |row| row.get(0))?
.filter_map(std::result::Result::ok)
.collect()
};
Ok(paths)
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
/// Minimal schema for file history queries.
fn create_file_history_schema(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
);
CREATE TABLE merge_requests (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT,
author_id INTEGER,
author_username TEXT,
draft INTEGER NOT NULL DEFAULT 0,
created_at INTEGER,
updated_at INTEGER,
merged_at INTEGER,
merge_commit_sha TEXT,
web_url TEXT,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE mr_file_changes (
id INTEGER PRIMARY KEY,
merge_request_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
new_path TEXT NOT NULL,
old_path TEXT,
change_type TEXT NOT NULL
);
CREATE TABLE discussions (
id INTEGER PRIMARY KEY,
gitlab_discussion_id TEXT NOT NULL,
project_id INTEGER NOT NULL,
noteable_type TEXT NOT NULL,
issue_id INTEGER,
merge_request_id INTEGER,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE notes (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
discussion_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
is_system INTEGER NOT NULL DEFAULT 0,
author_username TEXT,
body TEXT,
note_type TEXT,
new_path TEXT,
old_path TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_seen_at INTEGER NOT NULL
);
",
)
.expect("create file history schema");
}
#[test]
fn test_fetch_file_history_empty_db() {
let conn = Connection::open_in_memory().unwrap();
create_file_history_schema(&conn);
let result = fetch_file_history(&conn, None, "src/lib.rs", false, false, false).unwrap();
assert!(result.merge_requests.is_empty());
assert_eq!(result.total_mrs, 0);
assert_eq!(result.path, "src/lib.rs");
}
#[test]
fn test_fetch_file_history_returns_mrs() {
let conn = Connection::open_in_memory().unwrap();
create_file_history_schema(&conn);
// Insert project, MR, and file change.
conn.execute(
"INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'grp/repo')",
[],
).unwrap();
conn.execute(
"INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state, author_username, updated_at, last_seen_at) \
VALUES (1, 1000, 1, 42, 'Fix auth', 'merged', 'alice', 1700000000000, 1700000000000)",
[],
).unwrap();
conn.execute(
"INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, change_type) \
VALUES (1, 1, 'src/auth.rs', 'modified')",
[],
)
.unwrap();
let result =
fetch_file_history(&conn, Some(1), "src/auth.rs", false, false, false).unwrap();
assert_eq!(result.merge_requests.len(), 1);
assert_eq!(result.merge_requests[0].iid, 42);
assert_eq!(result.merge_requests[0].title, "Fix auth");
assert_eq!(result.merge_requests[0].change_type, "modified");
}
#[test]
fn test_fetch_file_history_merged_only() {
let conn = Connection::open_in_memory().unwrap();
create_file_history_schema(&conn);
conn.execute(
"INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'grp/repo')",
[],
).unwrap();
// Merged MR.
conn.execute(
"INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state, author_username, updated_at, last_seen_at) \
VALUES (1, 1000, 1, 42, 'Merged MR', 'merged', 'alice', 1700000000000, 1700000000000)",
[],
).unwrap();
// Open MR.
conn.execute(
"INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state, author_username, updated_at, last_seen_at) \
VALUES (2, 1001, 1, 43, 'Open MR', 'opened', 'bob', 1700000000000, 1700000000000)",
[],
).unwrap();
conn.execute(
"INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, change_type) VALUES (1, 1, 'src/lib.rs', 'modified')",
[],
).unwrap();
conn.execute(
"INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, change_type) VALUES (2, 1, 'src/lib.rs', 'modified')",
[],
).unwrap();
// Without merged_only: both MRs.
let all = fetch_file_history(&conn, Some(1), "src/lib.rs", false, false, false).unwrap();
assert_eq!(all.merge_requests.len(), 2);
// With merged_only: only the merged one.
let merged = fetch_file_history(&conn, Some(1), "src/lib.rs", false, true, false).unwrap();
assert_eq!(merged.merge_requests.len(), 1);
assert_eq!(merged.merge_requests[0].state, "merged");
}
#[test]
fn test_fetch_file_history_paths_empty() {
let conn = Connection::open_in_memory().unwrap();
create_file_history_schema(&conn);
let paths = fetch_file_history_paths(&conn, None).unwrap();
assert!(paths.is_empty());
}
#[test]
fn test_fetch_file_history_paths_returns_distinct() {
let conn = Connection::open_in_memory().unwrap();
create_file_history_schema(&conn);
conn.execute(
"INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, change_type) VALUES (1, 1, 'src/a.rs', 'modified')",
[],
).unwrap();
conn.execute(
"INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, change_type) VALUES (2, 1, 'src/a.rs', 'modified')",
[],
).unwrap();
conn.execute(
"INSERT INTO mr_file_changes (merge_request_id, project_id, new_path, change_type) VALUES (3, 1, 'src/b.rs', 'added')",
[],
).unwrap();
let paths = fetch_file_history_paths(&conn, None).unwrap();
assert_eq!(paths, vec!["src/a.rs", "src/b.rs"]);
}
}

View File

@@ -0,0 +1,611 @@
#![allow(dead_code)]
use anyhow::{Context, Result};
use rusqlite::Connection;
use crate::message::EntityKey;
use crate::state::issue_detail::{IssueDetailData, IssueMetadata};
use crate::view::common::cross_ref::{CrossRef, CrossRefKind};
use crate::view::common::discussion_tree::{DiscussionNode, NoteNode};
/// Fetch issue metadata and cross-references (Phase 1 load).
///
/// Runs inside a single read transaction for snapshot consistency.
/// Returns metadata + cross-refs; discussions are loaded separately.
pub fn fetch_issue_detail(conn: &Connection, key: &EntityKey) -> Result<IssueDetailData> {
let metadata = fetch_issue_metadata(conn, key)?;
let cross_refs = fetch_issue_cross_refs(conn, key)?;
Ok(IssueDetailData {
metadata,
cross_refs,
})
}
/// Fetch issue metadata from the local DB.
fn fetch_issue_metadata(conn: &Connection, key: &EntityKey) -> Result<IssueMetadata> {
let row = conn
.query_row(
"SELECT i.iid, p.path_with_namespace, i.title,
COALESCE(i.description, ''), i.state, i.author_username,
COALESCE(i.milestone_title, ''),
i.due_date, i.created_at, i.updated_at,
COALESCE(i.web_url, ''),
(SELECT COUNT(*) FROM discussions d
WHERE d.issue_id = i.id AND d.noteable_type = 'Issue')
FROM issues i
JOIN projects p ON p.id = i.project_id
WHERE i.project_id = ?1 AND i.iid = ?2",
rusqlite::params![key.project_id, key.iid],
|row| {
Ok(IssueMetadata {
iid: row.get(0)?,
project_path: row.get(1)?,
title: row.get(2)?,
description: row.get(3)?,
state: row.get(4)?,
author: row.get::<_, Option<String>>(5)?.unwrap_or_default(),
assignees: Vec::new(), // Fetched separately below.
labels: Vec::new(), // Fetched separately below.
milestone: {
let m: String = row.get(6)?;
if m.is_empty() { None } else { Some(m) }
},
due_date: row.get(7)?,
created_at: row.get(8)?,
updated_at: row.get(9)?,
web_url: row.get(10)?,
discussion_count: row.get::<_, i64>(11)? as usize,
})
},
)
.context("fetching issue metadata")?;
// Fetch assignees.
let mut assignees_stmt = conn
.prepare("SELECT username FROM issue_assignees WHERE issue_id = (SELECT id FROM issues WHERE project_id = ?1 AND iid = ?2)")
.context("preparing assignees query")?;
let assignees: Vec<String> = assignees_stmt
.query_map(rusqlite::params![key.project_id, key.iid], |r| r.get(0))
.context("fetching assignees")?
.collect::<std::result::Result<Vec<_>, _>>()
.context("reading assignee row")?;
// Fetch labels.
let mut labels_stmt = conn
.prepare(
"SELECT l.name FROM issue_labels il
JOIN labels l ON l.id = il.label_id
WHERE il.issue_id = (SELECT id FROM issues WHERE project_id = ?1 AND iid = ?2)
ORDER BY l.name",
)
.context("preparing labels query")?;
let labels: Vec<String> = labels_stmt
.query_map(rusqlite::params![key.project_id, key.iid], |r| r.get(0))
.context("fetching labels")?
.collect::<std::result::Result<Vec<_>, _>>()
.context("reading label row")?;
Ok(IssueMetadata {
assignees,
labels,
..row
})
}
/// Fetch cross-references for an issue from the entity_references table.
fn fetch_issue_cross_refs(conn: &Connection, key: &EntityKey) -> Result<Vec<CrossRef>> {
let mut stmt = conn
.prepare(
"SELECT er.reference_type, er.target_entity_type, er.target_entity_id,
er.target_entity_iid, er.target_project_path,
CASE
WHEN er.target_entity_type = 'issue'
THEN (SELECT title FROM issues WHERE id = er.target_entity_id)
WHEN er.target_entity_type = 'merge_request'
THEN (SELECT title FROM merge_requests WHERE id = er.target_entity_id)
ELSE NULL
END as entity_title,
CASE
WHEN er.target_entity_id IS NOT NULL
THEN (SELECT project_id FROM issues WHERE id = er.target_entity_id
UNION ALL
SELECT project_id FROM merge_requests WHERE id = er.target_entity_id
LIMIT 1)
ELSE NULL
END as target_project_id
FROM entity_references er
WHERE er.source_entity_type = 'issue'
AND er.source_entity_id = (SELECT id FROM issues WHERE project_id = ?1 AND iid = ?2)
ORDER BY er.reference_type, er.target_entity_iid",
)
.context("preparing cross-ref query")?;
let refs = stmt
.query_map(rusqlite::params![key.project_id, key.iid], |row| {
let ref_type: String = row.get(0)?;
let target_type: String = row.get(1)?;
let target_id: Option<i64> = row.get(2)?;
let target_iid: Option<i64> = row.get(3)?;
let target_path: Option<String> = row.get(4)?;
let title: Option<String> = row.get(5)?;
let target_project_id: Option<i64> = row.get(6)?;
let kind = match (ref_type.as_str(), target_type.as_str()) {
("closes", "merge_request") => CrossRefKind::ClosingMr,
("related", "issue") => CrossRefKind::RelatedIssue,
_ => CrossRefKind::MentionedIn,
};
let iid = target_iid.unwrap_or(0);
let project_id = target_project_id.unwrap_or(key.project_id);
let entity_key = match target_type.as_str() {
"merge_request" => EntityKey::mr(project_id, iid),
_ => EntityKey::issue(project_id, iid),
};
let label = title.unwrap_or_else(|| {
let prefix = if target_type == "merge_request" {
"!"
} else {
"#"
};
let path = target_path.unwrap_or_default();
if path.is_empty() {
format!("{prefix}{iid}")
} else {
format!("{path}{prefix}{iid}")
}
});
let navigable = target_id.is_some();
Ok(CrossRef {
kind,
entity_key,
label,
navigable,
})
})
.context("fetching cross-refs")?
.collect::<std::result::Result<Vec<_>, _>>()
.context("reading cross-ref row")?;
Ok(refs)
}
/// Fetch discussions for an issue (Phase 2 async load).
///
/// Returns `DiscussionNode` tree suitable for the discussion tree widget.
pub fn fetch_issue_discussions(conn: &Connection, key: &EntityKey) -> Result<Vec<DiscussionNode>> {
let issue_id: i64 = conn
.query_row(
"SELECT id FROM issues WHERE project_id = ?1 AND iid = ?2",
rusqlite::params![key.project_id, key.iid],
|r| r.get(0),
)
.context("looking up issue id")?;
let mut disc_stmt = conn
.prepare(
"SELECT d.id, d.gitlab_discussion_id, d.resolvable, d.resolved
FROM discussions d
WHERE d.issue_id = ?1 AND d.noteable_type = 'Issue'
ORDER BY d.first_note_at ASC, d.id ASC",
)
.context("preparing discussions query")?;
let mut note_stmt = conn
.prepare(
"SELECT n.author_username, n.body, n.created_at, n.is_system,
n.note_type, n.position_new_path, n.position_new_line
FROM notes n
WHERE n.discussion_id = ?1
ORDER BY n.position ASC, n.created_at ASC",
)
.context("preparing notes query")?;
let disc_rows: Vec<_> = disc_stmt
.query_map(rusqlite::params![issue_id], |row| {
Ok((
row.get::<_, i64>(0)?, // id
row.get::<_, String>(1)?, // gitlab_discussion_id
row.get::<_, bool>(2)?, // resolvable
row.get::<_, bool>(3)?, // resolved
))
})
.context("fetching discussions")?
.collect::<std::result::Result<Vec<_>, _>>()
.context("reading discussion row")?;
let mut discussions = Vec::new();
for (disc_db_id, discussion_id, resolvable, resolved) in disc_rows {
let notes: Vec<NoteNode> = note_stmt
.query_map(rusqlite::params![disc_db_id], |row| {
Ok(NoteNode {
author: row.get::<_, Option<String>>(0)?.unwrap_or_default(),
body: row.get::<_, Option<String>>(1)?.unwrap_or_default(),
created_at: row.get(2)?,
is_system: row.get(3)?,
is_diff_note: row.get::<_, Option<String>>(4)?.as_deref() == Some("DiffNote"),
diff_file_path: row.get(5)?,
diff_new_line: row.get(6)?,
})
})
.context("fetching notes")?
.collect::<std::result::Result<Vec<_>, _>>()
.context("reading note row")?;
discussions.push(DiscussionNode {
discussion_id,
notes,
resolvable,
resolved,
});
}
Ok(discussions)
}
#[cfg(test)]
mod tests {
use super::*;
fn create_issue_detail_schema(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
);
CREATE TABLE issues (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT NOT NULL,
description TEXT,
state TEXT NOT NULL DEFAULT 'opened',
author_username TEXT,
milestone_title TEXT,
due_date TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
web_url TEXT,
UNIQUE(project_id, iid)
);
CREATE TABLE issue_assignees (
issue_id INTEGER NOT NULL,
username TEXT NOT NULL,
UNIQUE(issue_id, username)
);
CREATE TABLE labels (
id INTEGER PRIMARY KEY,
project_id INTEGER NOT NULL,
name TEXT NOT NULL
);
CREATE TABLE issue_labels (
issue_id INTEGER NOT NULL,
label_id INTEGER NOT NULL,
UNIQUE(issue_id, label_id)
);
CREATE TABLE discussions (
id INTEGER PRIMARY KEY,
gitlab_discussion_id TEXT NOT NULL,
project_id INTEGER NOT NULL,
issue_id INTEGER,
merge_request_id INTEGER,
noteable_type TEXT NOT NULL,
resolvable INTEGER NOT NULL DEFAULT 0,
resolved INTEGER NOT NULL DEFAULT 0,
first_note_at INTEGER
);
CREATE TABLE notes (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
discussion_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
note_type TEXT,
is_system INTEGER NOT NULL DEFAULT 0,
author_username TEXT,
body TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
position INTEGER,
position_new_path TEXT,
position_new_line INTEGER
);
CREATE TABLE entity_references (
id INTEGER PRIMARY KEY,
project_id INTEGER NOT NULL,
source_entity_type TEXT NOT NULL,
source_entity_id INTEGER NOT NULL,
target_entity_type TEXT NOT NULL,
target_entity_id INTEGER,
target_project_path TEXT,
target_entity_iid INTEGER,
reference_type TEXT NOT NULL,
source_method TEXT NOT NULL DEFAULT 'api',
created_at INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE merge_requests (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT NOT NULL,
state TEXT NOT NULL DEFAULT 'opened',
UNIQUE(project_id, iid)
);
",
)
.unwrap();
}
fn setup_issue_detail_data(conn: &Connection) {
// Project.
conn.execute(
"INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'group/project')",
[],
)
.unwrap();
// Issue.
conn.execute(
"INSERT INTO issues (id, gitlab_id, project_id, iid, title, description, state, author_username, milestone_title, due_date, created_at, updated_at, web_url)
VALUES (1, 1000, 1, 42, 'Fix authentication flow', 'Detailed description here', 'opened', 'alice', 'v1.0', '2026-03-01', 1700000000000, 1700000060000, 'https://gitlab.com/group/project/-/issues/42')",
[],
)
.unwrap();
// Assignees.
conn.execute(
"INSERT INTO issue_assignees (issue_id, username) VALUES (1, 'bob')",
[],
)
.unwrap();
conn.execute(
"INSERT INTO issue_assignees (issue_id, username) VALUES (1, 'charlie')",
[],
)
.unwrap();
// Labels.
conn.execute(
"INSERT INTO labels (id, project_id, name) VALUES (1, 1, 'backend')",
[],
)
.unwrap();
conn.execute(
"INSERT INTO labels (id, project_id, name) VALUES (2, 1, 'urgent')",
[],
)
.unwrap();
conn.execute(
"INSERT INTO issue_labels (issue_id, label_id) VALUES (1, 1)",
[],
)
.unwrap();
conn.execute(
"INSERT INTO issue_labels (issue_id, label_id) VALUES (1, 2)",
[],
)
.unwrap();
// Discussions + notes.
conn.execute(
"INSERT INTO discussions (id, gitlab_discussion_id, project_id, issue_id, noteable_type, resolvable, resolved, first_note_at)
VALUES (1, 'disc-aaa', 1, 1, 'Issue', 0, 0, 1700000010000)",
[],
)
.unwrap();
conn.execute(
"INSERT INTO notes (id, gitlab_id, discussion_id, project_id, author_username, body, created_at, updated_at, position, is_system, note_type)
VALUES (1, 10001, 1, 1, 'alice', 'This looks good overall', 1700000010000, 1700000010000, 0, 0, 'DiscussionNote')",
[],
)
.unwrap();
conn.execute(
"INSERT INTO notes (id, gitlab_id, discussion_id, project_id, author_username, body, created_at, updated_at, position, is_system, note_type)
VALUES (2, 10002, 1, 1, 'bob', 'Agreed, but see my comment below', 1700000020000, 1700000020000, 1, 0, 'DiscussionNote')",
[],
)
.unwrap();
// System note discussion.
conn.execute(
"INSERT INTO discussions (id, gitlab_discussion_id, project_id, issue_id, noteable_type, first_note_at)
VALUES (2, 'disc-bbb', 1, 1, 'Issue', 1700000030000)",
[],
)
.unwrap();
conn.execute(
"INSERT INTO notes (id, gitlab_id, discussion_id, project_id, author_username, body, created_at, updated_at, position, is_system, note_type)
VALUES (3, 10003, 2, 1, 'system', 'changed the description', 1700000030000, 1700000030000, 0, 1, NULL)",
[],
)
.unwrap();
// Closing MR cross-ref.
conn.execute(
"INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state)
VALUES (1, 2000, 1, 10, 'Fix auth MR', 'opened')",
[],
)
.unwrap();
conn.execute(
"INSERT INTO entity_references (project_id, source_entity_type, source_entity_id, target_entity_type, target_entity_id, target_entity_iid, reference_type)
VALUES (1, 'issue', 1, 'merge_request', 1, 10, 'closes')",
[],
)
.unwrap();
}
#[test]
fn test_fetch_issue_detail_basic() {
let conn = Connection::open_in_memory().unwrap();
create_issue_detail_schema(&conn);
setup_issue_detail_data(&conn);
let key = EntityKey::issue(1, 42);
let data = fetch_issue_detail(&conn, &key).unwrap();
assert_eq!(data.metadata.iid, 42);
assert_eq!(data.metadata.title, "Fix authentication flow");
assert_eq!(data.metadata.state, "opened");
assert_eq!(data.metadata.author, "alice");
assert_eq!(data.metadata.project_path, "group/project");
assert_eq!(data.metadata.milestone, Some("v1.0".to_string()));
assert_eq!(data.metadata.due_date, Some("2026-03-01".to_string()));
assert_eq!(
data.metadata.web_url,
"https://gitlab.com/group/project/-/issues/42"
);
}
#[test]
fn test_fetch_issue_detail_assignees() {
let conn = Connection::open_in_memory().unwrap();
create_issue_detail_schema(&conn);
setup_issue_detail_data(&conn);
let key = EntityKey::issue(1, 42);
let data = fetch_issue_detail(&conn, &key).unwrap();
assert_eq!(data.metadata.assignees.len(), 2);
assert!(data.metadata.assignees.contains(&"bob".to_string()));
assert!(data.metadata.assignees.contains(&"charlie".to_string()));
}
#[test]
fn test_fetch_issue_detail_labels() {
let conn = Connection::open_in_memory().unwrap();
create_issue_detail_schema(&conn);
setup_issue_detail_data(&conn);
let key = EntityKey::issue(1, 42);
let data = fetch_issue_detail(&conn, &key).unwrap();
assert_eq!(data.metadata.labels, vec!["backend", "urgent"]);
}
#[test]
fn test_fetch_issue_detail_cross_refs() {
let conn = Connection::open_in_memory().unwrap();
create_issue_detail_schema(&conn);
setup_issue_detail_data(&conn);
let key = EntityKey::issue(1, 42);
let data = fetch_issue_detail(&conn, &key).unwrap();
assert_eq!(data.cross_refs.len(), 1);
assert_eq!(data.cross_refs[0].kind, CrossRefKind::ClosingMr);
assert_eq!(data.cross_refs[0].entity_key, EntityKey::mr(1, 10));
assert_eq!(data.cross_refs[0].label, "Fix auth MR");
assert!(data.cross_refs[0].navigable);
}
#[test]
fn test_fetch_issue_detail_discussion_count() {
let conn = Connection::open_in_memory().unwrap();
create_issue_detail_schema(&conn);
setup_issue_detail_data(&conn);
let key = EntityKey::issue(1, 42);
let data = fetch_issue_detail(&conn, &key).unwrap();
assert_eq!(data.metadata.discussion_count, 2);
}
#[test]
fn test_fetch_issue_discussions_basic() {
let conn = Connection::open_in_memory().unwrap();
create_issue_detail_schema(&conn);
setup_issue_detail_data(&conn);
let key = EntityKey::issue(1, 42);
let discussions = fetch_issue_discussions(&conn, &key).unwrap();
assert_eq!(discussions.len(), 2);
}
#[test]
fn test_fetch_issue_discussions_notes() {
let conn = Connection::open_in_memory().unwrap();
create_issue_detail_schema(&conn);
setup_issue_detail_data(&conn);
let key = EntityKey::issue(1, 42);
let discussions = fetch_issue_discussions(&conn, &key).unwrap();
// First discussion has 2 notes.
assert_eq!(discussions[0].notes.len(), 2);
assert_eq!(discussions[0].notes[0].author, "alice");
assert_eq!(discussions[0].notes[0].body, "This looks good overall");
assert_eq!(discussions[0].notes[1].author, "bob");
assert!(!discussions[0].notes[0].is_system);
}
#[test]
fn test_fetch_issue_discussions_system_note() {
let conn = Connection::open_in_memory().unwrap();
create_issue_detail_schema(&conn);
setup_issue_detail_data(&conn);
let key = EntityKey::issue(1, 42);
let discussions = fetch_issue_discussions(&conn, &key).unwrap();
// Second discussion is a system note.
assert_eq!(discussions[1].notes.len(), 1);
assert!(discussions[1].notes[0].is_system);
assert_eq!(discussions[1].notes[0].body, "changed the description");
}
#[test]
fn test_fetch_issue_discussions_ordering() {
let conn = Connection::open_in_memory().unwrap();
create_issue_detail_schema(&conn);
setup_issue_detail_data(&conn);
let key = EntityKey::issue(1, 42);
let discussions = fetch_issue_discussions(&conn, &key).unwrap();
// Ordered by first_note_at.
assert_eq!(discussions[0].discussion_id, "disc-aaa");
assert_eq!(discussions[1].discussion_id, "disc-bbb");
}
#[test]
fn test_fetch_issue_detail_not_found() {
let conn = Connection::open_in_memory().unwrap();
create_issue_detail_schema(&conn);
setup_issue_detail_data(&conn);
let key = EntityKey::issue(1, 999);
let result = fetch_issue_detail(&conn, &key);
assert!(result.is_err());
}
#[test]
fn test_fetch_issue_detail_no_description() {
let conn = Connection::open_in_memory().unwrap();
create_issue_detail_schema(&conn);
conn.execute(
"INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'g/p')",
[],
)
.unwrap();
conn.execute(
"INSERT INTO issues (id, gitlab_id, project_id, iid, title, description, state, created_at, updated_at)
VALUES (1, 1000, 1, 1, 'No desc', NULL, 'opened', 0, 0)",
[],
)
.unwrap();
let key = EntityKey::issue(1, 1);
let data = fetch_issue_detail(&conn, &key).unwrap();
assert_eq!(data.metadata.description, "");
}
}

View File

@@ -0,0 +1,532 @@
#![allow(dead_code)]
use anyhow::{Context, Result};
use rusqlite::Connection;
use crate::state::issue_list::{
IssueCursor, IssueFilter, IssueListPage, IssueListRow, SortField, SortOrder,
};
/// Page size for issue list queries.
const ISSUE_PAGE_SIZE: usize = 50;
/// Fetch a page of issues matching the given filter and sort.
///
/// Uses keyset pagination: when `cursor` is `Some`, returns rows after
/// (less-than for DESC, greater-than for ASC) the cursor boundary.
/// When `snapshot_fence` is `Some`, limits results to rows updated_at <= fence
/// to prevent newly synced items from shifting the page window.
pub fn fetch_issue_list(
conn: &Connection,
filter: &IssueFilter,
sort_field: SortField,
sort_order: SortOrder,
cursor: Option<&IssueCursor>,
snapshot_fence: Option<i64>,
) -> Result<IssueListPage> {
// -- Build dynamic WHERE conditions and params --------------------------
let mut conditions: Vec<String> = Vec::new();
let mut params: Vec<Box<dyn rusqlite::types::ToSql>> = Vec::new();
// Filter: project_id
if let Some(pid) = filter.project_id {
conditions.push("i.project_id = ?".into());
params.push(Box::new(pid));
}
// Filter: state
if let Some(ref state) = filter.state {
conditions.push("i.state = ?".into());
params.push(Box::new(state.clone()));
}
// Filter: author
if let Some(ref author) = filter.author {
conditions.push("i.author_username = ?".into());
params.push(Box::new(author.clone()));
}
// Filter: label (via join)
let label_join = if let Some(ref label) = filter.label {
conditions.push("fl.name = ?".into());
params.push(Box::new(label.clone()));
"JOIN issue_labels fil ON fil.issue_id = i.id \
JOIN labels fl ON fl.id = fil.label_id"
} else {
""
};
// Filter: free_text (LIKE on title)
if let Some(ref text) = filter.free_text {
conditions.push("i.title LIKE ?".into());
params.push(Box::new(format!("%{text}%")));
}
// Snapshot fence
if let Some(fence) = snapshot_fence {
conditions.push("i.updated_at <= ?".into());
params.push(Box::new(fence));
}
// -- Count query (before cursor filter) ---------------------------------
let where_clause = if conditions.is_empty() {
String::new()
} else {
format!("WHERE {}", conditions.join(" AND "))
};
let count_sql = format!(
"SELECT COUNT(DISTINCT i.id) FROM issues i \
JOIN projects p ON p.id = i.project_id \
{label_join} {where_clause}"
);
let count_params: Vec<&dyn rusqlite::types::ToSql> =
params.iter().map(|b| b.as_ref()).collect();
let total_count: i64 = conn
.query_row(&count_sql, count_params.as_slice(), |r| r.get(0))
.context("counting issues for list")?;
// -- Keyset cursor condition -------------------------------------------
let (sort_col, sort_dir) = sort_column_and_dir(sort_field, sort_order);
let cursor_op = if sort_dir == "DESC" { "<" } else { ">" };
if let Some(c) = cursor {
conditions.push(format!("({sort_col}, i.iid) {cursor_op} (?, ?)"));
params.push(Box::new(c.updated_at));
params.push(Box::new(c.iid));
}
// -- Data query ---------------------------------------------------------
let where_clause_full = if conditions.is_empty() {
String::new()
} else {
format!("WHERE {}", conditions.join(" AND "))
};
let data_sql = format!(
"SELECT p.path_with_namespace, i.iid, i.title, i.state, \
i.author_username, i.updated_at, \
GROUP_CONCAT(DISTINCT l.name) AS label_names \
FROM issues i \
JOIN projects p ON p.id = i.project_id \
{label_join} \
LEFT JOIN issue_labels il ON il.issue_id = i.id \
LEFT JOIN labels l ON l.id = il.label_id \
{where_clause_full} \
GROUP BY i.id \
ORDER BY {sort_col} {sort_dir}, i.iid {sort_dir} \
LIMIT ?"
);
// +1 to detect if there's a next page
let fetch_limit = (ISSUE_PAGE_SIZE + 1) as i64;
params.push(Box::new(fetch_limit));
let all_params: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|b| b.as_ref()).collect();
let mut stmt = conn
.prepare(&data_sql)
.context("preparing issue list query")?;
let rows_result = stmt
.query_map(all_params.as_slice(), |row| {
let project_path: String = row.get(0)?;
let iid: i64 = row.get(1)?;
let title: String = row.get::<_, Option<String>>(2)?.unwrap_or_default();
let state: String = row.get::<_, Option<String>>(3)?.unwrap_or_default();
let author: String = row.get::<_, Option<String>>(4)?.unwrap_or_default();
let updated_at: i64 = row.get(5)?;
let label_names: Option<String> = row.get(6)?;
let labels = label_names
.map(|s| s.split(',').map(String::from).collect())
.unwrap_or_default();
Ok(IssueListRow {
project_path,
iid,
title,
state,
author,
labels,
updated_at,
})
})
.context("querying issue list")?;
let mut rows: Vec<IssueListRow> = Vec::new();
for row in rows_result {
rows.push(row.context("reading issue list row")?);
}
// Determine next cursor from the last row (if we got more than page size)
let has_next = rows.len() > ISSUE_PAGE_SIZE;
if has_next {
rows.truncate(ISSUE_PAGE_SIZE);
}
let next_cursor = if has_next {
rows.last().map(|r| IssueCursor {
updated_at: r.updated_at,
iid: r.iid,
})
} else {
None
};
#[allow(clippy::cast_sign_loss)]
Ok(IssueListPage {
rows,
next_cursor,
total_count: total_count as u64,
})
}
/// Map sort field + order to SQL column name and direction keyword.
fn sort_column_and_dir(field: SortField, order: SortOrder) -> (&'static str, &'static str) {
let col = match field {
SortField::UpdatedAt => "i.updated_at",
SortField::Iid => "i.iid",
SortField::Title => "i.title",
SortField::State => "i.state",
SortField::Author => "i.author_username",
};
let dir = match order {
SortOrder::Desc => "DESC",
SortOrder::Asc => "ASC",
};
(col, dir)
}
#[cfg(test)]
mod tests {
use super::*;
/// Create the minimal schema needed for issue list queries.
fn create_issue_list_schema(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
);
CREATE TABLE issues (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT NOT NULL,
author_username TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE labels (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER,
project_id INTEGER NOT NULL,
name TEXT NOT NULL,
color TEXT,
description TEXT
);
CREATE TABLE issue_labels (
issue_id INTEGER NOT NULL,
label_id INTEGER NOT NULL,
PRIMARY KEY(issue_id, label_id)
);
",
)
.expect("create issue list schema");
}
/// Insert a test issue with an author.
fn insert_issue_full(conn: &Connection, iid: i64, state: &str, author: &str, updated_at: i64) {
conn.execute(
"INSERT INTO issues (gitlab_id, project_id, iid, title, state, author_username, created_at, updated_at, last_seen_at)
VALUES (?1, 1, ?2, ?3, ?4, ?5, ?6, ?6, ?6)",
rusqlite::params![
iid * 100,
iid,
format!("Issue {iid}"),
state,
author,
updated_at
],
)
.expect("insert issue full");
}
/// Attach a label to an issue.
fn attach_label(conn: &Connection, issue_iid: i64, label_name: &str) {
// Find issue id.
let issue_id: i64 = conn
.query_row("SELECT id FROM issues WHERE iid = ?", [issue_iid], |r| {
r.get(0)
})
.expect("find issue");
// Ensure label exists.
conn.execute(
"INSERT OR IGNORE INTO labels (project_id, name) VALUES (1, ?)",
[label_name],
)
.expect("insert label");
let label_id: i64 = conn
.query_row("SELECT id FROM labels WHERE name = ?", [label_name], |r| {
r.get(0)
})
.expect("find label");
conn.execute(
"INSERT INTO issue_labels (issue_id, label_id) VALUES (?, ?)",
[issue_id, label_id],
)
.expect("attach label");
}
fn setup_issue_list_data(conn: &Connection) {
let base = 1_700_000_000_000_i64;
conn.execute(
"INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'group/project')",
[],
)
.unwrap();
insert_issue_full(conn, 1, "opened", "alice", base - 10_000);
insert_issue_full(conn, 2, "opened", "bob", base - 20_000);
insert_issue_full(conn, 3, "closed", "alice", base - 30_000);
insert_issue_full(conn, 4, "opened", "charlie", base - 40_000);
insert_issue_full(conn, 5, "closed", "bob", base - 50_000);
attach_label(conn, 1, "bug");
attach_label(conn, 1, "critical");
attach_label(conn, 2, "feature");
attach_label(conn, 4, "bug");
}
#[test]
fn test_fetch_issue_list_basic() {
let conn = Connection::open_in_memory().unwrap();
create_issue_list_schema(&conn);
setup_issue_list_data(&conn);
let filter = IssueFilter::default();
let page = fetch_issue_list(
&conn,
&filter,
SortField::UpdatedAt,
SortOrder::Desc,
None,
None,
)
.unwrap();
assert_eq!(page.total_count, 5);
assert_eq!(page.rows.len(), 5);
// Newest first.
assert_eq!(page.rows[0].iid, 1);
assert_eq!(page.rows[4].iid, 5);
assert!(page.next_cursor.is_none());
}
#[test]
fn test_fetch_issue_list_filter_state() {
let conn = Connection::open_in_memory().unwrap();
create_issue_list_schema(&conn);
setup_issue_list_data(&conn);
let filter = IssueFilter {
state: Some("opened".into()),
..Default::default()
};
let page = fetch_issue_list(
&conn,
&filter,
SortField::UpdatedAt,
SortOrder::Desc,
None,
None,
)
.unwrap();
assert_eq!(page.total_count, 3);
assert_eq!(page.rows.len(), 3);
assert!(page.rows.iter().all(|r| r.state == "opened"));
}
#[test]
fn test_fetch_issue_list_filter_author() {
let conn = Connection::open_in_memory().unwrap();
create_issue_list_schema(&conn);
setup_issue_list_data(&conn);
let filter = IssueFilter {
author: Some("alice".into()),
..Default::default()
};
let page = fetch_issue_list(
&conn,
&filter,
SortField::UpdatedAt,
SortOrder::Desc,
None,
None,
)
.unwrap();
assert_eq!(page.total_count, 2);
assert_eq!(page.rows.len(), 2);
assert!(page.rows.iter().all(|r| r.author == "alice"));
}
#[test]
fn test_fetch_issue_list_filter_label() {
let conn = Connection::open_in_memory().unwrap();
create_issue_list_schema(&conn);
setup_issue_list_data(&conn);
let filter = IssueFilter {
label: Some("bug".into()),
..Default::default()
};
let page = fetch_issue_list(
&conn,
&filter,
SortField::UpdatedAt,
SortOrder::Desc,
None,
None,
)
.unwrap();
assert_eq!(page.total_count, 2); // issues 1 and 4
assert_eq!(page.rows.len(), 2);
}
#[test]
fn test_fetch_issue_list_labels_aggregated() {
let conn = Connection::open_in_memory().unwrap();
create_issue_list_schema(&conn);
setup_issue_list_data(&conn);
let filter = IssueFilter::default();
let page = fetch_issue_list(
&conn,
&filter,
SortField::UpdatedAt,
SortOrder::Desc,
None,
None,
)
.unwrap();
// Issue 1 has labels "bug" and "critical".
let issue1 = page.rows.iter().find(|r| r.iid == 1).unwrap();
assert_eq!(issue1.labels.len(), 2);
assert!(issue1.labels.contains(&"bug".to_string()));
assert!(issue1.labels.contains(&"critical".to_string()));
// Issue 5 has no labels.
let issue5 = page.rows.iter().find(|r| r.iid == 5).unwrap();
assert!(issue5.labels.is_empty());
}
#[test]
fn test_fetch_issue_list_sort_ascending() {
let conn = Connection::open_in_memory().unwrap();
create_issue_list_schema(&conn);
setup_issue_list_data(&conn);
let filter = IssueFilter::default();
let page = fetch_issue_list(
&conn,
&filter,
SortField::UpdatedAt,
SortOrder::Asc,
None,
None,
)
.unwrap();
// Oldest first.
assert_eq!(page.rows[0].iid, 5);
assert_eq!(page.rows[4].iid, 1);
}
#[test]
fn test_fetch_issue_list_snapshot_fence() {
let conn = Connection::open_in_memory().unwrap();
create_issue_list_schema(&conn);
setup_issue_list_data(&conn);
let base = 1_700_000_000_000_i64;
// Fence at base-25000: should exclude issues 1 (at base-10000) and 2 (at base-20000).
let fence = base - 25_000;
let filter = IssueFilter::default();
let page = fetch_issue_list(
&conn,
&filter,
SortField::UpdatedAt,
SortOrder::Desc,
None,
Some(fence),
)
.unwrap();
assert_eq!(page.total_count, 3);
assert_eq!(page.rows.len(), 3);
assert!(page.rows.iter().all(|r| r.updated_at <= fence));
}
#[test]
fn test_fetch_issue_list_empty() {
let conn = Connection::open_in_memory().unwrap();
create_issue_list_schema(&conn);
conn.execute(
"INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'g/p')",
[],
)
.unwrap();
let page = fetch_issue_list(
&conn,
&IssueFilter::default(),
SortField::UpdatedAt,
SortOrder::Desc,
None,
None,
)
.unwrap();
assert_eq!(page.total_count, 0);
assert!(page.rows.is_empty());
assert!(page.next_cursor.is_none());
}
#[test]
fn test_fetch_issue_list_free_text() {
let conn = Connection::open_in_memory().unwrap();
create_issue_list_schema(&conn);
setup_issue_list_data(&conn);
let filter = IssueFilter {
free_text: Some("Issue 3".into()),
..Default::default()
};
let page = fetch_issue_list(
&conn,
&filter,
SortField::UpdatedAt,
SortOrder::Desc,
None,
None,
)
.unwrap();
assert_eq!(page.total_count, 1);
assert_eq!(page.rows[0].iid, 3);
}
}

View File

@@ -0,0 +1,29 @@
//! Action layer — pure data-fetching functions for TUI screens.
//!
//! Actions query the local SQLite database and return data structs.
//! They never touch terminal state, never spawn tasks, and use injected
//! [`Clock`] for time calculations (deterministic tests).
mod bootstrap;
mod dashboard;
mod file_history;
mod issue_detail;
mod issue_list;
mod mr_detail;
mod mr_list;
mod search;
mod timeline;
mod trace;
mod who;
pub use bootstrap::*;
pub use dashboard::*;
pub use file_history::*;
pub use issue_detail::*;
pub use issue_list::*;
pub use mr_detail::*;
pub use mr_list::*;
pub use search::*;
pub use timeline::*;
pub use trace::*;
pub use who::*;

View File

@@ -0,0 +1,694 @@
#![allow(dead_code)]
use anyhow::{Context, Result};
use rusqlite::Connection;
use crate::message::EntityKey;
use crate::state::mr_detail::{FileChange, FileChangeType, MrDetailData, MrMetadata};
use crate::view::common::cross_ref::{CrossRef, CrossRefKind};
use crate::view::common::discussion_tree::{DiscussionNode, NoteNode};
/// Fetch MR metadata + cross-refs + file changes (Phase 1 composite).
pub fn fetch_mr_detail(conn: &Connection, key: &EntityKey) -> Result<MrDetailData> {
let metadata = fetch_mr_metadata(conn, key)?;
let cross_refs = fetch_mr_cross_refs(conn, key)?;
let file_changes = fetch_mr_file_changes(conn, key)?;
Ok(MrDetailData {
metadata,
cross_refs,
file_changes,
})
}
/// Fetch MR metadata from the local DB.
fn fetch_mr_metadata(conn: &Connection, key: &EntityKey) -> Result<MrMetadata> {
let row = conn
.query_row(
"SELECT m.iid, p.path_with_namespace, m.title,
COALESCE(m.description, ''), m.state, m.draft,
m.author_username, m.source_branch, m.target_branch,
COALESCE(m.detailed_merge_status, ''),
m.created_at, m.updated_at, m.merged_at,
COALESCE(m.web_url, ''),
(SELECT COUNT(*) FROM discussions d WHERE d.merge_request_id = m.id) AS disc_count,
(SELECT COUNT(*) FROM mr_file_changes fc WHERE fc.merge_request_id = m.id) AS fc_count
FROM merge_requests m
JOIN projects p ON p.id = m.project_id
WHERE m.project_id = ?1 AND m.iid = ?2",
rusqlite::params![key.project_id, key.iid],
|row| {
Ok(MrMetadata {
iid: row.get(0)?,
project_path: row.get(1)?,
title: row.get::<_, Option<String>>(2)?.unwrap_or_default(),
description: row.get(3)?,
state: row.get::<_, Option<String>>(4)?.unwrap_or_default(),
draft: row.get(5)?,
author: row.get::<_, Option<String>>(6)?.unwrap_or_default(),
assignees: Vec::new(),
reviewers: Vec::new(),
labels: Vec::new(),
source_branch: row.get::<_, Option<String>>(7)?.unwrap_or_default(),
target_branch: row.get::<_, Option<String>>(8)?.unwrap_or_default(),
merge_status: row.get(9)?,
created_at: row.get(10)?,
updated_at: row.get(11)?,
merged_at: row.get(12)?,
web_url: row.get(13)?,
discussion_count: row.get::<_, i64>(14)? as usize,
file_change_count: row.get::<_, i64>(15)? as usize,
})
},
)
.context("fetching MR metadata")?;
// Fetch assignees.
let mut assignees_stmt = conn
.prepare(
"SELECT username FROM mr_assignees
WHERE merge_request_id = (
SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2
)
ORDER BY username",
)
.context("preparing assignees query")?;
let assignees: Vec<String> = assignees_stmt
.query_map(rusqlite::params![key.project_id, key.iid], |row| row.get(0))
.context("fetching assignees")?
.collect::<std::result::Result<Vec<_>, _>>()
.context("reading assignee row")?;
// Fetch reviewers.
let mut reviewers_stmt = conn
.prepare(
"SELECT username FROM mr_reviewers
WHERE merge_request_id = (
SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2
)
ORDER BY username",
)
.context("preparing reviewers query")?;
let reviewers: Vec<String> = reviewers_stmt
.query_map(rusqlite::params![key.project_id, key.iid], |row| row.get(0))
.context("fetching reviewers")?
.collect::<std::result::Result<Vec<_>, _>>()
.context("reading reviewer row")?;
// Fetch labels.
let mut labels_stmt = conn
.prepare(
"SELECT l.name FROM mr_labels ml
JOIN labels l ON ml.label_id = l.id
WHERE ml.merge_request_id = (
SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2
)
ORDER BY l.name",
)
.context("preparing labels query")?;
let labels: Vec<String> = labels_stmt
.query_map(rusqlite::params![key.project_id, key.iid], |row| row.get(0))
.context("fetching labels")?
.collect::<std::result::Result<Vec<_>, _>>()
.context("reading label row")?;
let mut result = row;
result.assignees = assignees;
result.reviewers = reviewers;
result.labels = labels;
Ok(result)
}
/// Fetch cross-references for an MR.
fn fetch_mr_cross_refs(conn: &Connection, key: &EntityKey) -> Result<Vec<CrossRef>> {
let mut stmt = conn
.prepare(
"SELECT er.reference_type, er.target_entity_type,
er.target_entity_id, er.target_entity_iid,
er.target_project_path,
CASE
WHEN er.target_entity_type = 'issue'
THEN (SELECT title FROM issues WHERE id = er.target_entity_id)
WHEN er.target_entity_type = 'merge_request'
THEN (SELECT title FROM merge_requests WHERE id = er.target_entity_id)
ELSE NULL
END as entity_title,
CASE
WHEN er.target_entity_id IS NOT NULL
THEN (SELECT project_id FROM issues WHERE id = er.target_entity_id
UNION ALL
SELECT project_id FROM merge_requests WHERE id = er.target_entity_id
LIMIT 1)
ELSE NULL
END as target_project_id
FROM entity_references er
WHERE er.source_entity_type = 'merge_request'
AND er.source_entity_id = (SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2)
ORDER BY er.reference_type, er.target_entity_iid",
)
.context("preparing MR cross-refs query")?;
let refs: Vec<CrossRef> = stmt
.query_map(rusqlite::params![key.project_id, key.iid], |row| {
let ref_type: String = row.get(0)?;
let target_type: String = row.get(1)?;
let _target_id: Option<i64> = row.get(2)?;
let target_iid: Option<i64> = row.get(3)?;
let target_path: Option<String> = row.get(4)?;
let title: Option<String> = row.get(5)?;
let target_project_id: Option<i64> = row.get(6)?;
let kind = match (ref_type.as_str(), target_type.as_str()) {
("closes", "issue") => CrossRefKind::ClosingMr,
("related", "issue") => CrossRefKind::RelatedIssue,
_ => CrossRefKind::MentionedIn,
};
let iid = target_iid.unwrap_or(0);
let project_id = target_project_id.unwrap_or(key.project_id);
let entity_key = match target_type.as_str() {
"merge_request" => EntityKey::mr(project_id, iid),
_ => EntityKey::issue(project_id, iid),
};
let label = title.unwrap_or_else(|| {
let prefix = if target_type == "merge_request" {
"!"
} else {
"#"
};
let path = target_path.clone().unwrap_or_default();
if path.is_empty() {
format!("{prefix}{iid}")
} else {
format!("{path}{prefix}{iid}")
}
});
Ok(CrossRef {
kind,
entity_key,
label,
navigable: target_project_id.is_some(),
})
})
.context("fetching MR cross-refs")?
.collect::<std::result::Result<Vec<_>, _>>()
.context("reading cross-ref row")?;
Ok(refs)
}
/// Fetch file changes for an MR.
fn fetch_mr_file_changes(conn: &Connection, key: &EntityKey) -> Result<Vec<FileChange>> {
let mut stmt = conn
.prepare(
"SELECT fc.old_path, fc.new_path, fc.change_type
FROM mr_file_changes fc
WHERE fc.merge_request_id = (
SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2
)
ORDER BY fc.new_path",
)
.context("preparing file changes query")?;
let changes: Vec<FileChange> = stmt
.query_map(rusqlite::params![key.project_id, key.iid], |row| {
Ok(FileChange {
old_path: row.get(0)?,
new_path: row.get(1)?,
change_type: FileChangeType::parse_db(&row.get::<_, String>(2).unwrap_or_default()),
})
})
.context("fetching file changes")?
.collect::<std::result::Result<Vec<_>, _>>()
.context("reading file change row")?;
Ok(changes)
}
/// Fetch discussions for an MR (Phase 2 async load).
pub fn fetch_mr_discussions(conn: &Connection, key: &EntityKey) -> Result<Vec<DiscussionNode>> {
let mr_id: i64 = conn
.query_row(
"SELECT id FROM merge_requests WHERE project_id = ?1 AND iid = ?2",
rusqlite::params![key.project_id, key.iid],
|row| row.get(0),
)
.context("looking up MR id for discussions")?;
let mut disc_stmt = conn
.prepare(
"SELECT d.id, d.gitlab_discussion_id, d.resolvable, d.resolved
FROM discussions d
WHERE d.merge_request_id = ?1
ORDER BY d.first_note_at ASC",
)
.context("preparing MR discussions query")?;
let mut note_stmt = conn
.prepare(
"SELECT n.author_username, n.body, n.created_at, n.is_system,
n.note_type, n.position_new_path, n.position_new_line
FROM notes n
WHERE n.discussion_id = ?1
ORDER BY n.position ASC, n.created_at ASC",
)
.context("preparing MR notes query")?;
let disc_rows: Vec<_> = disc_stmt
.query_map(rusqlite::params![mr_id], |row| {
Ok((
row.get::<_, i64>(0)?, // id
row.get::<_, String>(1)?, // gitlab_discussion_id
row.get::<_, bool>(2)?, // resolvable
row.get::<_, bool>(3)?, // resolved
))
})
.context("fetching MR discussions")?
.collect::<std::result::Result<Vec<_>, _>>()
.context("reading discussion row")?;
let mut discussions = Vec::new();
for (disc_db_id, discussion_id, resolvable, resolved) in disc_rows {
let notes: Vec<NoteNode> = note_stmt
.query_map(rusqlite::params![disc_db_id], |row| {
Ok(NoteNode {
author: row.get::<_, Option<String>>(0)?.unwrap_or_default(),
body: row.get::<_, Option<String>>(1)?.unwrap_or_default(),
created_at: row.get(2)?,
is_system: row.get(3)?,
is_diff_note: row.get::<_, Option<String>>(4)?.as_deref() == Some("DiffNote"),
diff_file_path: row.get(5)?,
diff_new_line: row.get(6)?,
})
})
.context("fetching notes")?
.collect::<std::result::Result<Vec<_>, _>>()
.context("reading note row")?;
discussions.push(DiscussionNode {
discussion_id,
notes,
resolvable,
resolved,
});
}
Ok(discussions)
}
#[cfg(test)]
mod tests {
use super::*;
fn create_issue_detail_schema(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
);
CREATE TABLE issues (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT NOT NULL,
description TEXT,
state TEXT NOT NULL DEFAULT 'opened',
author_username TEXT,
milestone_title TEXT,
due_date TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
web_url TEXT,
UNIQUE(project_id, iid)
);
CREATE TABLE issue_assignees (
issue_id INTEGER NOT NULL,
username TEXT NOT NULL,
UNIQUE(issue_id, username)
);
CREATE TABLE labels (
id INTEGER PRIMARY KEY,
project_id INTEGER NOT NULL,
name TEXT NOT NULL
);
CREATE TABLE issue_labels (
issue_id INTEGER NOT NULL,
label_id INTEGER NOT NULL,
UNIQUE(issue_id, label_id)
);
CREATE TABLE discussions (
id INTEGER PRIMARY KEY,
gitlab_discussion_id TEXT NOT NULL,
project_id INTEGER NOT NULL,
issue_id INTEGER,
merge_request_id INTEGER,
noteable_type TEXT NOT NULL,
resolvable INTEGER NOT NULL DEFAULT 0,
resolved INTEGER NOT NULL DEFAULT 0,
first_note_at INTEGER
);
CREATE TABLE notes (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
discussion_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
note_type TEXT,
is_system INTEGER NOT NULL DEFAULT 0,
author_username TEXT,
body TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
position INTEGER,
position_new_path TEXT,
position_new_line INTEGER
);
CREATE TABLE entity_references (
id INTEGER PRIMARY KEY,
project_id INTEGER NOT NULL,
source_entity_type TEXT NOT NULL,
source_entity_id INTEGER NOT NULL,
target_entity_type TEXT NOT NULL,
target_entity_id INTEGER,
target_project_path TEXT,
target_entity_iid INTEGER,
reference_type TEXT NOT NULL,
source_method TEXT NOT NULL DEFAULT 'api',
created_at INTEGER NOT NULL DEFAULT 0
);
CREATE TABLE merge_requests (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT NOT NULL,
state TEXT NOT NULL DEFAULT 'opened',
UNIQUE(project_id, iid)
);
",
)
.unwrap();
}
fn create_mr_detail_schema(conn: &Connection) {
create_issue_detail_schema(conn);
// Add MR-specific columns and tables on top of the base schema.
conn.execute_batch(
"
-- Add columns to merge_requests that the detail query needs.
ALTER TABLE merge_requests ADD COLUMN description TEXT;
ALTER TABLE merge_requests ADD COLUMN draft INTEGER NOT NULL DEFAULT 0;
ALTER TABLE merge_requests ADD COLUMN author_username TEXT;
ALTER TABLE merge_requests ADD COLUMN source_branch TEXT;
ALTER TABLE merge_requests ADD COLUMN target_branch TEXT;
ALTER TABLE merge_requests ADD COLUMN detailed_merge_status TEXT;
ALTER TABLE merge_requests ADD COLUMN created_at INTEGER;
ALTER TABLE merge_requests ADD COLUMN updated_at INTEGER;
ALTER TABLE merge_requests ADD COLUMN merged_at INTEGER;
ALTER TABLE merge_requests ADD COLUMN web_url TEXT;
CREATE TABLE mr_assignees (
merge_request_id INTEGER NOT NULL,
username TEXT NOT NULL,
UNIQUE(merge_request_id, username)
);
CREATE TABLE mr_reviewers (
merge_request_id INTEGER NOT NULL,
username TEXT NOT NULL,
UNIQUE(merge_request_id, username)
);
CREATE TABLE mr_labels (
merge_request_id INTEGER NOT NULL,
label_id INTEGER NOT NULL,
UNIQUE(merge_request_id, label_id)
);
CREATE TABLE mr_file_changes (
id INTEGER PRIMARY KEY,
merge_request_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
old_path TEXT,
new_path TEXT NOT NULL,
change_type TEXT NOT NULL
);
",
)
.unwrap();
}
fn setup_mr_detail_data(conn: &Connection) {
// Project (if not already inserted).
conn.execute(
"INSERT OR IGNORE INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'group/project')",
[],
)
.unwrap();
// MR.
conn.execute(
"INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, description, state, draft, author_username, source_branch, target_branch, detailed_merge_status, created_at, updated_at, merged_at, web_url)
VALUES (1, 2000, 1, 10, 'Fix auth flow', 'MR description', 'opened', 0, 'alice', 'fix-auth', 'main', 'mergeable', 1700000000000, 1700000060000, NULL, 'https://gitlab.com/group/project/-/merge_requests/10')",
[],
)
.unwrap();
// Assignees.
conn.execute(
"INSERT INTO mr_assignees (merge_request_id, username) VALUES (1, 'bob')",
[],
)
.unwrap();
// Reviewers.
conn.execute(
"INSERT INTO mr_reviewers (merge_request_id, username) VALUES (1, 'carol')",
[],
)
.unwrap();
// Labels.
conn.execute(
"INSERT OR IGNORE INTO labels (id, project_id, name) VALUES (10, 1, 'backend')",
[],
)
.unwrap();
conn.execute(
"INSERT INTO mr_labels (merge_request_id, label_id) VALUES (1, 10)",
[],
)
.unwrap();
// File changes.
conn.execute(
"INSERT INTO mr_file_changes (merge_request_id, project_id, old_path, new_path, change_type)
VALUES (1, 1, NULL, 'src/auth.rs', 'modified')",
[],
)
.unwrap();
conn.execute(
"INSERT INTO mr_file_changes (merge_request_id, project_id, old_path, new_path, change_type)
VALUES (1, 1, NULL, 'src/lib.rs', 'added')",
[],
)
.unwrap();
conn.execute(
"INSERT INTO mr_file_changes (merge_request_id, project_id, old_path, new_path, change_type)
VALUES (1, 1, 'src/old.rs', 'src/new.rs', 'renamed')",
[],
)
.unwrap();
// Discussion with a note.
conn.execute(
"INSERT INTO discussions (id, gitlab_discussion_id, project_id, merge_request_id, noteable_type, resolvable, resolved, first_note_at)
VALUES (1, 'mr_disc_1', 1, 1, 'MergeRequest', 1, 0, 1700000010000)",
[],
)
.unwrap();
conn.execute(
"INSERT INTO notes (id, gitlab_id, discussion_id, project_id, note_type, is_system, author_username, body, created_at, updated_at, position, position_new_path, position_new_line)
VALUES (1, 5001, 1, 1, 'DiffNote', 0, 'alice', 'Please fix this', 1700000010000, 1700000010000, 0, 'src/auth.rs', 42)",
[],
)
.unwrap();
// Cross-reference (MR closes issue).
conn.execute(
"INSERT INTO issues (id, gitlab_id, project_id, iid, title, state, created_at, updated_at)
VALUES (1, 1000, 1, 5, 'Auth bug', 'opened', 0, 0)",
[],
)
.unwrap();
conn.execute(
"INSERT INTO entity_references (project_id, source_entity_type, source_entity_id, target_entity_type, target_entity_id, target_project_path, target_entity_iid, reference_type, source_method)
VALUES (1, 'merge_request', 1, 'issue', 1, 'group/project', 5, 'closes', 'api')",
[],
)
.unwrap();
}
#[test]
fn test_fetch_mr_detail_basic_metadata() {
let conn = Connection::open_in_memory().unwrap();
create_mr_detail_schema(&conn);
setup_mr_detail_data(&conn);
let key = EntityKey::mr(1, 10);
let data = fetch_mr_detail(&conn, &key).unwrap();
assert_eq!(data.metadata.iid, 10);
assert_eq!(data.metadata.title, "Fix auth flow");
assert_eq!(data.metadata.description, "MR description");
assert_eq!(data.metadata.state, "opened");
assert!(!data.metadata.draft);
assert_eq!(data.metadata.author, "alice");
assert_eq!(data.metadata.source_branch, "fix-auth");
assert_eq!(data.metadata.target_branch, "main");
assert_eq!(data.metadata.merge_status, "mergeable");
assert!(data.metadata.merged_at.is_none());
assert_eq!(
data.metadata.web_url,
"https://gitlab.com/group/project/-/merge_requests/10"
);
}
#[test]
fn test_fetch_mr_detail_assignees_reviewers_labels() {
let conn = Connection::open_in_memory().unwrap();
create_mr_detail_schema(&conn);
setup_mr_detail_data(&conn);
let key = EntityKey::mr(1, 10);
let data = fetch_mr_detail(&conn, &key).unwrap();
assert_eq!(data.metadata.assignees, vec!["bob"]);
assert_eq!(data.metadata.reviewers, vec!["carol"]);
assert_eq!(data.metadata.labels, vec!["backend"]);
}
#[test]
fn test_fetch_mr_detail_file_changes() {
let conn = Connection::open_in_memory().unwrap();
create_mr_detail_schema(&conn);
setup_mr_detail_data(&conn);
let key = EntityKey::mr(1, 10);
let data = fetch_mr_detail(&conn, &key).unwrap();
assert_eq!(data.file_changes.len(), 3);
assert_eq!(data.metadata.file_change_count, 3);
// Ordered by new_path.
assert_eq!(data.file_changes[0].new_path, "src/auth.rs");
assert_eq!(data.file_changes[0].change_type, FileChangeType::Modified);
assert_eq!(data.file_changes[1].new_path, "src/lib.rs");
assert_eq!(data.file_changes[1].change_type, FileChangeType::Added);
assert_eq!(data.file_changes[2].new_path, "src/new.rs");
assert_eq!(data.file_changes[2].change_type, FileChangeType::Renamed);
assert_eq!(data.file_changes[2].old_path.as_deref(), Some("src/old.rs"));
}
#[test]
fn test_fetch_mr_detail_cross_refs() {
let conn = Connection::open_in_memory().unwrap();
create_mr_detail_schema(&conn);
setup_mr_detail_data(&conn);
let key = EntityKey::mr(1, 10);
let data = fetch_mr_detail(&conn, &key).unwrap();
assert_eq!(data.cross_refs.len(), 1);
assert_eq!(data.cross_refs[0].kind, CrossRefKind::ClosingMr);
assert_eq!(data.cross_refs[0].label, "Auth bug");
}
#[test]
fn test_fetch_mr_discussions() {
let conn = Connection::open_in_memory().unwrap();
create_mr_detail_schema(&conn);
setup_mr_detail_data(&conn);
let key = EntityKey::mr(1, 10);
let discussions = fetch_mr_discussions(&conn, &key).unwrap();
assert_eq!(discussions.len(), 1);
assert_eq!(discussions[0].discussion_id, "mr_disc_1");
assert!(discussions[0].resolvable);
assert!(!discussions[0].resolved);
assert_eq!(discussions[0].notes.len(), 1);
assert_eq!(discussions[0].notes[0].author, "alice");
assert_eq!(discussions[0].notes[0].body, "Please fix this");
assert!(discussions[0].notes[0].is_diff_note);
assert_eq!(
discussions[0].notes[0].diff_file_path.as_deref(),
Some("src/auth.rs")
);
assert_eq!(discussions[0].notes[0].diff_new_line, Some(42));
}
#[test]
fn test_fetch_mr_detail_not_found() {
let conn = Connection::open_in_memory().unwrap();
create_mr_detail_schema(&conn);
// Insert project but no MR.
conn.execute(
"INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'g/p')",
[],
)
.unwrap();
let key = EntityKey::mr(1, 99);
assert!(fetch_mr_detail(&conn, &key).is_err());
}
#[test]
fn test_fetch_mr_detail_no_file_changes() {
let conn = Connection::open_in_memory().unwrap();
create_mr_detail_schema(&conn);
conn.execute(
"INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'g/p')",
[],
)
.unwrap();
conn.execute(
"INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state, created_at, updated_at, web_url)
VALUES (1, 2000, 1, 10, 'Empty MR', 'opened', 0, 0, '')",
[],
)
.unwrap();
let key = EntityKey::mr(1, 10);
let data = fetch_mr_detail(&conn, &key).unwrap();
assert!(data.file_changes.is_empty());
assert_eq!(data.metadata.file_change_count, 0);
}
#[test]
fn test_fetch_mr_detail_draft() {
let conn = Connection::open_in_memory().unwrap();
create_mr_detail_schema(&conn);
conn.execute(
"INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'g/p')",
[],
)
.unwrap();
conn.execute(
"INSERT INTO merge_requests (id, gitlab_id, project_id, iid, title, state, draft, created_at, updated_at, web_url)
VALUES (1, 2000, 1, 10, 'Draft: WIP', 'opened', 1, 0, 0, '')",
[],
)
.unwrap();
let key = EntityKey::mr(1, 10);
let data = fetch_mr_detail(&conn, &key).unwrap();
assert!(data.metadata.draft);
}
}

View File

@@ -0,0 +1,629 @@
#![allow(dead_code)]
use anyhow::{Context, Result};
use rusqlite::Connection;
use crate::state::mr_list::{MrCursor, MrFilter, MrListPage, MrListRow, MrSortField, MrSortOrder};
/// Page size for MR list queries.
const MR_PAGE_SIZE: usize = 50;
/// Fetch a page of merge requests matching the given filter and sort.
///
/// Uses keyset pagination and snapshot fence — same pattern as issues.
pub fn fetch_mr_list(
conn: &Connection,
filter: &MrFilter,
sort_field: MrSortField,
sort_order: MrSortOrder,
cursor: Option<&MrCursor>,
snapshot_fence: Option<i64>,
) -> Result<MrListPage> {
// -- Build dynamic WHERE conditions and params --------------------------
let mut conditions: Vec<String> = Vec::new();
let mut params: Vec<Box<dyn rusqlite::types::ToSql>> = Vec::new();
if let Some(pid) = filter.project_id {
conditions.push("m.project_id = ?".into());
params.push(Box::new(pid));
}
if let Some(ref state) = filter.state {
conditions.push("m.state = ?".into());
params.push(Box::new(state.clone()));
}
if let Some(ref author) = filter.author {
conditions.push("m.author_username = ?".into());
params.push(Box::new(author.clone()));
}
if let Some(draft) = filter.draft {
conditions.push("m.draft = ?".into());
params.push(Box::new(i64::from(draft)));
}
if let Some(ref target) = filter.target_branch {
conditions.push("m.target_branch = ?".into());
params.push(Box::new(target.clone()));
}
if let Some(ref source) = filter.source_branch {
conditions.push("m.source_branch = ?".into());
params.push(Box::new(source.clone()));
}
// Filter: reviewer (via join on mr_reviewers)
let reviewer_join = if let Some(ref reviewer) = filter.reviewer {
conditions.push("rv.username = ?".into());
params.push(Box::new(reviewer.clone()));
"JOIN mr_reviewers rv ON rv.merge_request_id = m.id"
} else {
""
};
// Filter: label (via join on mr_labels + labels)
let label_join = if let Some(ref label) = filter.label {
conditions.push("fl.name = ?".into());
params.push(Box::new(label.clone()));
"JOIN mr_labels fil ON fil.merge_request_id = m.id \
JOIN labels fl ON fl.id = fil.label_id"
} else {
""
};
// Filter: free_text (LIKE on title)
if let Some(ref text) = filter.free_text {
conditions.push("m.title LIKE ?".into());
params.push(Box::new(format!("%{text}%")));
}
// Snapshot fence
if let Some(fence) = snapshot_fence {
conditions.push("m.updated_at <= ?".into());
params.push(Box::new(fence));
}
// -- Count query (before cursor filter) ---------------------------------
let where_clause = if conditions.is_empty() {
String::new()
} else {
format!("WHERE {}", conditions.join(" AND "))
};
let count_sql = format!(
"SELECT COUNT(DISTINCT m.id) FROM merge_requests m \
JOIN projects p ON p.id = m.project_id \
{reviewer_join} {label_join} {where_clause}"
);
let count_params: Vec<&dyn rusqlite::types::ToSql> =
params.iter().map(|b| b.as_ref()).collect();
let total_count: i64 = conn
.query_row(&count_sql, count_params.as_slice(), |r| r.get(0))
.context("counting MRs for list")?;
// -- Keyset cursor condition -------------------------------------------
let (sort_col, sort_dir) = mr_sort_column_and_dir(sort_field, sort_order);
let cursor_op = if sort_dir == "DESC" { "<" } else { ">" };
if let Some(c) = cursor {
conditions.push(format!("({sort_col}, m.iid) {cursor_op} (?, ?)"));
params.push(Box::new(c.updated_at));
params.push(Box::new(c.iid));
}
// -- Data query ---------------------------------------------------------
let where_clause_full = if conditions.is_empty() {
String::new()
} else {
format!("WHERE {}", conditions.join(" AND "))
};
let data_sql = format!(
"SELECT p.path_with_namespace, m.iid, m.title, m.state, \
m.author_username, m.target_branch, m.updated_at, m.draft, \
GROUP_CONCAT(DISTINCT l.name) AS label_names \
FROM merge_requests m \
JOIN projects p ON p.id = m.project_id \
{reviewer_join} \
{label_join} \
LEFT JOIN mr_labels ml ON ml.merge_request_id = m.id \
LEFT JOIN labels l ON l.id = ml.label_id \
{where_clause_full} \
GROUP BY m.id \
ORDER BY {sort_col} {sort_dir}, m.iid {sort_dir} \
LIMIT ?"
);
let fetch_limit = (MR_PAGE_SIZE + 1) as i64;
params.push(Box::new(fetch_limit));
let all_params: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(|b| b.as_ref()).collect();
let mut stmt = conn.prepare(&data_sql).context("preparing MR list query")?;
let rows_result = stmt
.query_map(all_params.as_slice(), |row| {
let project_path: String = row.get(0)?;
let iid: i64 = row.get(1)?;
let title: String = row.get::<_, Option<String>>(2)?.unwrap_or_default();
let state: String = row.get::<_, Option<String>>(3)?.unwrap_or_default();
let author: String = row.get::<_, Option<String>>(4)?.unwrap_or_default();
let target_branch: String = row.get::<_, Option<String>>(5)?.unwrap_or_default();
let updated_at: i64 = row.get(6)?;
let draft_int: i64 = row.get(7)?;
let label_names: Option<String> = row.get(8)?;
let labels = label_names
.map(|s| s.split(',').map(String::from).collect())
.unwrap_or_default();
Ok(MrListRow {
project_path,
iid,
title,
state,
author,
target_branch,
labels,
updated_at,
draft: draft_int != 0,
})
})
.context("querying MR list")?;
let mut rows: Vec<MrListRow> = Vec::new();
for row in rows_result {
rows.push(row.context("reading MR list row")?);
}
let has_next = rows.len() > MR_PAGE_SIZE;
if has_next {
rows.truncate(MR_PAGE_SIZE);
}
let next_cursor = if has_next {
rows.last().map(|r| MrCursor {
updated_at: r.updated_at,
iid: r.iid,
})
} else {
None
};
#[allow(clippy::cast_sign_loss)]
Ok(MrListPage {
rows,
next_cursor,
total_count: total_count as u64,
})
}
/// Map MR sort field + order to SQL column name and direction keyword.
fn mr_sort_column_and_dir(field: MrSortField, order: MrSortOrder) -> (&'static str, &'static str) {
let col = match field {
MrSortField::UpdatedAt => "m.updated_at",
MrSortField::Iid => "m.iid",
MrSortField::Title => "m.title",
MrSortField::State => "m.state",
MrSortField::Author => "m.author_username",
MrSortField::TargetBranch => "m.target_branch",
};
let dir = match order {
MrSortOrder::Desc => "DESC",
MrSortOrder::Asc => "ASC",
};
(col, dir)
}
#[cfg(test)]
mod tests {
use super::*;
/// Create the schema needed for MR list tests.
fn create_mr_list_schema(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
);
CREATE TABLE merge_requests (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT,
author_username TEXT,
created_at INTEGER,
updated_at INTEGER,
last_seen_at INTEGER NOT NULL,
draft INTEGER NOT NULL DEFAULT 0,
target_branch TEXT,
source_branch TEXT
);
CREATE TABLE labels (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER,
project_id INTEGER NOT NULL,
name TEXT NOT NULL,
color TEXT,
description TEXT
);
CREATE TABLE mr_labels (
merge_request_id INTEGER NOT NULL,
label_id INTEGER NOT NULL,
PRIMARY KEY(merge_request_id, label_id)
);
CREATE TABLE mr_reviewers (
merge_request_id INTEGER NOT NULL,
username TEXT NOT NULL,
PRIMARY KEY(merge_request_id, username)
);
",
)
.expect("create MR list schema");
}
/// Insert a test MR with full fields.
fn insert_mr_full(
conn: &Connection,
iid: i64,
state: &str,
author: &str,
target_branch: &str,
draft: bool,
updated_at: i64,
) {
conn.execute(
"INSERT INTO merge_requests \
(gitlab_id, project_id, iid, title, state, author_username, \
target_branch, draft, created_at, updated_at, last_seen_at) \
VALUES (?1, 1, ?2, ?3, ?4, ?5, ?6, ?7, ?8, ?8, ?8)",
rusqlite::params![
iid * 100 + 50,
iid,
format!("MR {iid}"),
state,
author,
target_branch,
i64::from(draft),
updated_at,
],
)
.expect("insert mr full");
}
/// Attach a label to an MR.
fn attach_mr_label(conn: &Connection, mr_iid: i64, label_name: &str) {
let mr_id: i64 = conn
.query_row(
"SELECT id FROM merge_requests WHERE iid = ?",
[mr_iid],
|r| r.get(0),
)
.expect("find mr");
conn.execute(
"INSERT OR IGNORE INTO labels (project_id, name) VALUES (1, ?)",
[label_name],
)
.expect("insert label");
let label_id: i64 = conn
.query_row("SELECT id FROM labels WHERE name = ?", [label_name], |r| {
r.get(0)
})
.expect("find label");
conn.execute(
"INSERT INTO mr_labels (merge_request_id, label_id) VALUES (?, ?)",
[mr_id, label_id],
)
.expect("attach mr label");
}
/// Add a reviewer to an MR.
fn add_mr_reviewer(conn: &Connection, mr_iid: i64, username: &str) {
let mr_id: i64 = conn
.query_row(
"SELECT id FROM merge_requests WHERE iid = ?",
[mr_iid],
|r| r.get(0),
)
.expect("find mr");
conn.execute(
"INSERT INTO mr_reviewers (merge_request_id, username) VALUES (?, ?)",
rusqlite::params![mr_id, username],
)
.expect("add mr reviewer");
}
fn setup_mr_list_data(conn: &Connection) {
let base = 1_700_000_000_000_i64;
conn.execute(
"INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'group/project')",
[],
)
.unwrap();
insert_mr_full(conn, 1, "opened", "alice", "main", false, base - 10_000);
insert_mr_full(conn, 2, "opened", "bob", "main", true, base - 20_000);
insert_mr_full(conn, 3, "merged", "alice", "develop", false, base - 30_000);
insert_mr_full(conn, 4, "opened", "charlie", "main", true, base - 40_000);
insert_mr_full(conn, 5, "closed", "bob", "release", false, base - 50_000);
attach_mr_label(conn, 1, "backend");
attach_mr_label(conn, 1, "urgent");
attach_mr_label(conn, 2, "frontend");
attach_mr_label(conn, 4, "backend");
add_mr_reviewer(conn, 1, "diana");
add_mr_reviewer(conn, 2, "diana");
add_mr_reviewer(conn, 3, "edward");
}
#[test]
fn test_fetch_mr_list_basic() {
let conn = Connection::open_in_memory().unwrap();
create_mr_list_schema(&conn);
setup_mr_list_data(&conn);
let filter = MrFilter::default();
let page = fetch_mr_list(
&conn,
&filter,
MrSortField::UpdatedAt,
MrSortOrder::Desc,
None,
None,
)
.unwrap();
assert_eq!(page.total_count, 5);
assert_eq!(page.rows.len(), 5);
assert_eq!(page.rows[0].iid, 1); // newest first
assert_eq!(page.rows[4].iid, 5);
assert!(page.next_cursor.is_none());
}
#[test]
fn test_fetch_mr_list_filter_state() {
let conn = Connection::open_in_memory().unwrap();
create_mr_list_schema(&conn);
setup_mr_list_data(&conn);
let filter = MrFilter {
state: Some("opened".into()),
..Default::default()
};
let page = fetch_mr_list(
&conn,
&filter,
MrSortField::UpdatedAt,
MrSortOrder::Desc,
None,
None,
)
.unwrap();
assert_eq!(page.total_count, 3);
assert!(page.rows.iter().all(|r| r.state == "opened"));
}
#[test]
fn test_fetch_mr_list_filter_draft() {
let conn = Connection::open_in_memory().unwrap();
create_mr_list_schema(&conn);
setup_mr_list_data(&conn);
let filter = MrFilter {
draft: Some(true),
..Default::default()
};
let page = fetch_mr_list(
&conn,
&filter,
MrSortField::UpdatedAt,
MrSortOrder::Desc,
None,
None,
)
.unwrap();
assert_eq!(page.total_count, 2); // MRs 2 and 4
assert!(page.rows.iter().all(|r| r.draft));
}
#[test]
fn test_fetch_mr_list_filter_target_branch() {
let conn = Connection::open_in_memory().unwrap();
create_mr_list_schema(&conn);
setup_mr_list_data(&conn);
let filter = MrFilter {
target_branch: Some("main".into()),
..Default::default()
};
let page = fetch_mr_list(
&conn,
&filter,
MrSortField::UpdatedAt,
MrSortOrder::Desc,
None,
None,
)
.unwrap();
assert_eq!(page.total_count, 3); // MRs 1, 2, 4
assert!(page.rows.iter().all(|r| r.target_branch == "main"));
}
#[test]
fn test_fetch_mr_list_filter_reviewer() {
let conn = Connection::open_in_memory().unwrap();
create_mr_list_schema(&conn);
setup_mr_list_data(&conn);
let filter = MrFilter {
reviewer: Some("diana".into()),
..Default::default()
};
let page = fetch_mr_list(
&conn,
&filter,
MrSortField::UpdatedAt,
MrSortOrder::Desc,
None,
None,
)
.unwrap();
assert_eq!(page.total_count, 2); // MRs 1 and 2
}
#[test]
fn test_fetch_mr_list_filter_label() {
let conn = Connection::open_in_memory().unwrap();
create_mr_list_schema(&conn);
setup_mr_list_data(&conn);
let filter = MrFilter {
label: Some("backend".into()),
..Default::default()
};
let page = fetch_mr_list(
&conn,
&filter,
MrSortField::UpdatedAt,
MrSortOrder::Desc,
None,
None,
)
.unwrap();
assert_eq!(page.total_count, 2); // MRs 1 and 4
}
#[test]
fn test_fetch_mr_list_labels_aggregated() {
let conn = Connection::open_in_memory().unwrap();
create_mr_list_schema(&conn);
setup_mr_list_data(&conn);
let filter = MrFilter::default();
let page = fetch_mr_list(
&conn,
&filter,
MrSortField::UpdatedAt,
MrSortOrder::Desc,
None,
None,
)
.unwrap();
let mr1 = page.rows.iter().find(|r| r.iid == 1).unwrap();
assert_eq!(mr1.labels.len(), 2);
assert!(mr1.labels.contains(&"backend".to_string()));
assert!(mr1.labels.contains(&"urgent".to_string()));
let mr5 = page.rows.iter().find(|r| r.iid == 5).unwrap();
assert!(mr5.labels.is_empty());
}
#[test]
fn test_fetch_mr_list_sort_ascending() {
let conn = Connection::open_in_memory().unwrap();
create_mr_list_schema(&conn);
setup_mr_list_data(&conn);
let filter = MrFilter::default();
let page = fetch_mr_list(
&conn,
&filter,
MrSortField::UpdatedAt,
MrSortOrder::Asc,
None,
None,
)
.unwrap();
assert_eq!(page.rows[0].iid, 5); // oldest first
assert_eq!(page.rows[4].iid, 1);
}
#[test]
fn test_fetch_mr_list_snapshot_fence() {
let conn = Connection::open_in_memory().unwrap();
create_mr_list_schema(&conn);
setup_mr_list_data(&conn);
let base = 1_700_000_000_000_i64;
let fence = base - 25_000;
let filter = MrFilter::default();
let page = fetch_mr_list(
&conn,
&filter,
MrSortField::UpdatedAt,
MrSortOrder::Desc,
None,
Some(fence),
)
.unwrap();
assert_eq!(page.total_count, 3);
assert!(page.rows.iter().all(|r| r.updated_at <= fence));
}
#[test]
fn test_fetch_mr_list_empty() {
let conn = Connection::open_in_memory().unwrap();
create_mr_list_schema(&conn);
conn.execute(
"INSERT INTO projects (gitlab_project_id, path_with_namespace) VALUES (1, 'g/p')",
[],
)
.unwrap();
let page = fetch_mr_list(
&conn,
&MrFilter::default(),
MrSortField::UpdatedAt,
MrSortOrder::Desc,
None,
None,
)
.unwrap();
assert_eq!(page.total_count, 0);
assert!(page.rows.is_empty());
assert!(page.next_cursor.is_none());
}
#[test]
fn test_fetch_mr_list_free_text() {
let conn = Connection::open_in_memory().unwrap();
create_mr_list_schema(&conn);
setup_mr_list_data(&conn);
let filter = MrFilter {
free_text: Some("MR 3".into()),
..Default::default()
};
let page = fetch_mr_list(
&conn,
&filter,
MrSortField::UpdatedAt,
MrSortOrder::Desc,
None,
None,
)
.unwrap();
assert_eq!(page.total_count, 1);
assert_eq!(page.rows[0].iid, 3);
}
}

View File

@@ -0,0 +1,361 @@
#![allow(dead_code)]
use anyhow::{Context, Result};
use rusqlite::Connection;
use crate::message::{EntityKey, EntityKind, SearchMode, SearchResult};
use crate::state::search::SearchCapabilities;
/// Probe the database to detect available search indexes.
///
/// Checks for FTS5 documents and embedding metadata. Returns capabilities
/// that the UI uses to gate available search modes.
pub fn fetch_search_capabilities(conn: &Connection) -> Result<SearchCapabilities> {
// FTS: check if documents_fts has rows via the docsize shadow table
// (B-tree, not virtual table scan).
let has_fts = conn
.query_row(
"SELECT EXISTS(SELECT 1 FROM documents_fts_docsize LIMIT 1)",
[],
|r| r.get::<_, bool>(0),
)
.unwrap_or(false);
// Embeddings: count rows in embedding_metadata.
let embedding_count: i64 = conn
.query_row("SELECT COUNT(*) FROM embedding_metadata", [], |r| r.get(0))
.unwrap_or(0);
let has_embeddings = embedding_count > 0;
// Coverage: embeddings / documents percentage.
let doc_count: i64 = conn
.query_row("SELECT COUNT(*) FROM documents", [], |r| r.get(0))
.unwrap_or(0);
let embedding_coverage_pct = if doc_count > 0 {
(embedding_count as f32 / doc_count as f32 * 100.0).min(100.0)
} else {
0.0
};
Ok(SearchCapabilities {
has_fts,
has_embeddings,
embedding_coverage_pct,
})
}
/// Execute a search query against the local database.
///
/// Dispatches to the correct search backend based on mode:
/// - Lexical: FTS5 only (documents_fts)
/// - Hybrid: FTS5 + vector merge via RRF
/// - Semantic: vector cosine similarity only
///
/// Returns results sorted by score descending.
pub fn execute_search(
conn: &Connection,
query: &str,
mode: SearchMode,
limit: usize,
) -> Result<Vec<SearchResult>> {
if query.trim().is_empty() {
return Ok(Vec::new());
}
match mode {
SearchMode::Lexical => execute_fts_search(conn, query, limit),
SearchMode::Hybrid | SearchMode::Semantic => {
// Hybrid and Semantic require the full search pipeline from the
// core crate (async, Ollama client). For now, fall back to FTS
// for Hybrid and return empty for Semantic-only.
// TODO: Wire up async search dispatch when core search is integrated.
if mode == SearchMode::Hybrid {
execute_fts_search(conn, query, limit)
} else {
Ok(Vec::new())
}
}
}
}
/// FTS5 full-text search against the documents table.
fn execute_fts_search(conn: &Connection, query: &str, limit: usize) -> Result<Vec<SearchResult>> {
// Sanitize the query for FTS5 (escape special chars, wrap terms in quotes).
let safe_query = sanitize_fts_query(query);
if safe_query.is_empty() {
return Ok(Vec::new());
}
// Resolve project_path via JOIN through projects table.
// Resolve iid via JOIN through the source entity table (issues or merge_requests).
// snippet column 1 = content_text (column 0 is title).
let mut stmt = conn
.prepare(
"SELECT d.source_type, d.source_id, d.title, d.project_id,
p.path_with_namespace,
snippet(documents_fts, 1, '>>>', '<<<', '...', 32) AS snip,
bm25(documents_fts) AS score,
COALESCE(i.iid, mr.iid) AS entity_iid
FROM documents_fts
JOIN documents d ON documents_fts.rowid = d.id
JOIN projects p ON p.id = d.project_id
LEFT JOIN issues i ON d.source_type = 'issue' AND i.id = d.source_id
LEFT JOIN merge_requests mr ON d.source_type = 'merge_request' AND mr.id = d.source_id
WHERE documents_fts MATCH ?1
ORDER BY score
LIMIT ?2",
)
.context("preparing FTS search query")?;
let rows = stmt
.query_map(rusqlite::params![safe_query, limit as i64], |row| {
let source_type: String = row.get(0)?;
let _source_id: i64 = row.get(1)?;
let title: String = row.get::<_, Option<String>>(2)?.unwrap_or_default();
let project_id: i64 = row.get(3)?;
let project_path: String = row.get::<_, Option<String>>(4)?.unwrap_or_default();
let snippet: String = row.get::<_, Option<String>>(5)?.unwrap_or_default();
let score: f64 = row.get(6)?;
let entity_iid: Option<i64> = row.get(7)?;
Ok((
source_type,
project_id,
title,
project_path,
snippet,
score,
entity_iid,
))
})
.context("executing FTS search")?;
let mut results = Vec::new();
for row in rows {
let (source_type, project_id, title, project_path, snippet, score, entity_iid) =
row.context("reading FTS search row")?;
let kind = match source_type.as_str() {
"issue" => EntityKind::Issue,
"merge_request" | "mr" => EntityKind::MergeRequest,
_ => continue, // Skip unknown source types (discussion, note).
};
// Skip if we couldn't resolve the entity's iid (orphaned document).
let Some(iid) = entity_iid else {
continue;
};
let key = EntityKey {
project_id,
iid,
kind,
};
results.push(SearchResult {
key,
title,
score: score.abs(), // bm25 returns negative scores; lower = better.
snippet,
project_path,
});
}
Ok(results)
}
/// Sanitize a user query for FTS5 MATCH syntax.
///
/// Wraps individual terms in double quotes to prevent FTS5 syntax errors
/// from user-typed operators (AND, OR, NOT, *, etc.).
fn sanitize_fts_query(query: &str) -> String {
query
.split_whitespace()
.map(|term| {
// Strip any existing quotes and re-wrap.
let clean = term.replace('"', "");
if clean.is_empty() {
String::new()
} else {
format!("\"{clean}\"")
}
})
.filter(|s| !s.is_empty())
.collect::<Vec<_>>()
.join(" ")
}
#[cfg(test)]
mod tests {
use super::*;
/// Create the minimal schema needed for search queries.
fn create_dashboard_schema(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
);
CREATE TABLE issues (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT NOT NULL,
author_username TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE merge_requests (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT,
author_username TEXT,
created_at INTEGER,
updated_at INTEGER,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE discussions (
id INTEGER PRIMARY KEY,
gitlab_discussion_id TEXT NOT NULL,
project_id INTEGER NOT NULL,
noteable_type TEXT NOT NULL,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE notes (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
discussion_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
is_system INTEGER NOT NULL DEFAULT 0,
author_username TEXT,
body TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE documents (
id INTEGER PRIMARY KEY,
source_type TEXT NOT NULL,
source_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
content_text TEXT NOT NULL,
content_hash TEXT NOT NULL
);
CREATE TABLE embedding_metadata (
document_id INTEGER NOT NULL,
chunk_index INTEGER NOT NULL DEFAULT 0,
model TEXT NOT NULL,
dims INTEGER NOT NULL,
document_hash TEXT NOT NULL,
chunk_hash TEXT NOT NULL,
created_at INTEGER NOT NULL,
PRIMARY KEY(document_id, chunk_index)
);
CREATE TABLE sync_runs (
id INTEGER PRIMARY KEY,
started_at INTEGER NOT NULL,
heartbeat_at INTEGER NOT NULL,
finished_at INTEGER,
status TEXT NOT NULL,
command TEXT NOT NULL,
error TEXT
);
",
)
.expect("create dashboard schema");
}
#[test]
fn test_sanitize_fts_query_wraps_terms() {
let result = sanitize_fts_query("hello world");
assert_eq!(result, r#""hello" "world""#);
}
#[test]
fn test_sanitize_fts_query_strips_quotes() {
let result = sanitize_fts_query(r#""hello" "world""#);
assert_eq!(result, r#""hello" "world""#);
}
#[test]
fn test_sanitize_fts_query_empty() {
assert_eq!(sanitize_fts_query(""), "");
assert_eq!(sanitize_fts_query(" "), "");
}
#[test]
fn test_sanitize_fts_query_special_chars() {
// FTS5 operators should be safely wrapped in quotes.
let result = sanitize_fts_query("NOT AND OR");
assert_eq!(result, r#""NOT" "AND" "OR""#);
}
#[test]
fn test_fetch_search_capabilities_no_tables() {
let conn = Connection::open_in_memory().unwrap();
create_dashboard_schema(&conn);
let caps = fetch_search_capabilities(&conn).unwrap();
assert!(!caps.has_fts);
assert!(!caps.has_embeddings);
assert!(!caps.has_any_index());
}
#[test]
fn test_fetch_search_capabilities_with_fts() {
let conn = Connection::open_in_memory().unwrap();
create_dashboard_schema(&conn);
// Create FTS table and its shadow table.
conn.execute_batch(
"CREATE VIRTUAL TABLE documents_fts USING fts5(content);
INSERT INTO documents_fts(content) VALUES ('test document');",
)
.unwrap();
let caps = fetch_search_capabilities(&conn).unwrap();
assert!(caps.has_fts);
assert!(!caps.has_embeddings);
}
#[test]
fn test_fetch_search_capabilities_with_embeddings() {
let conn = Connection::open_in_memory().unwrap();
create_dashboard_schema(&conn);
// Insert a document so coverage calculation works.
conn.execute_batch(
"INSERT INTO documents(id, source_type, source_id, project_id, content_text, content_hash)
VALUES (1, 'issue', 1, 1, 'body text', 'abc');
INSERT INTO embedding_metadata(document_id, chunk_index, model, dims, document_hash, chunk_hash, created_at)
VALUES (1, 0, 'test', 384, 'abc', 'def', 1700000000);",
)
.unwrap();
let caps = fetch_search_capabilities(&conn).unwrap();
assert!(!caps.has_fts);
assert!(caps.has_embeddings);
assert!(caps.embedding_coverage_pct > 0.0);
}
#[test]
fn test_execute_search_empty_query_returns_empty() {
let conn = Connection::open_in_memory().unwrap();
let results = execute_search(&conn, "", SearchMode::Lexical, 10).unwrap();
assert!(results.is_empty());
}
#[test]
fn test_execute_search_whitespace_only_returns_empty() {
let conn = Connection::open_in_memory().unwrap();
let results = execute_search(&conn, " ", SearchMode::Lexical, 10).unwrap();
assert!(results.is_empty());
}
}

View File

@@ -0,0 +1,845 @@
#![allow(dead_code)]
use anyhow::{Context, Result};
use rusqlite::Connection;
use crate::message::{EntityKey, EntityKind, TimelineEvent, TimelineEventKind};
use crate::state::timeline::TimelineScope;
/// Internal filter resolved from a [`TimelineScope`].
///
/// Translates the user-facing scope (which uses `EntityKey` with project_id + iid)
/// into internal DB ids for efficient querying.
enum TimelineFilter {
/// No filtering — return all events.
All,
/// Filter to events for a specific issue (internal DB id).
Issue(i64),
/// Filter to events for a specific MR (internal DB id).
MergeRequest(i64),
/// Filter to events by a specific actor.
Actor(String),
}
/// Resolve a [`TimelineScope`] into a concrete [`TimelineFilter`].
fn resolve_timeline_scope(conn: &Connection, scope: &TimelineScope) -> Result<TimelineFilter> {
match scope {
TimelineScope::All => Ok(TimelineFilter::All),
TimelineScope::Entity(key) => {
let (table, kind_label) = match key.kind {
EntityKind::Issue => ("issues", "issue"),
EntityKind::MergeRequest => ("merge_requests", "merge request"),
};
let sql = format!("SELECT id FROM {table} WHERE project_id = ?1 AND iid = ?2");
let id: i64 = conn
.query_row(&sql, rusqlite::params![key.project_id, key.iid], |r| {
r.get(0)
})
.with_context(|| {
format!(
"resolving {kind_label} #{} in project {}",
key.iid, key.project_id
)
})?;
match key.kind {
EntityKind::Issue => Ok(TimelineFilter::Issue(id)),
EntityKind::MergeRequest => Ok(TimelineFilter::MergeRequest(id)),
}
}
TimelineScope::Author(name) => Ok(TimelineFilter::Actor(name.clone())),
}
}
/// Fetch timeline events from raw resource event tables.
///
/// Queries `issues`/`merge_requests` for Created events, plus
/// `resource_state_events`, `resource_label_events`, and
/// `resource_milestone_events` for lifecycle events. Results are sorted
/// by timestamp descending (most recent first) and truncated to `limit`.
pub fn fetch_timeline_events(
conn: &Connection,
scope: &TimelineScope,
limit: usize,
) -> Result<Vec<TimelineEvent>> {
let filter = resolve_timeline_scope(conn, scope)?;
let mut events = Vec::new();
collect_tl_created_events(conn, &filter, &mut events)?;
collect_tl_state_events(conn, &filter, &mut events)?;
collect_tl_label_events(conn, &filter, &mut events)?;
collect_tl_milestone_events(conn, &filter, &mut events)?;
// Sort by timestamp descending (most recent first), with stable tiebreak.
events.sort_by(|a, b| {
b.timestamp_ms
.cmp(&a.timestamp_ms)
.then_with(|| a.entity_key.kind.cmp(&b.entity_key.kind))
.then_with(|| a.entity_key.iid.cmp(&b.entity_key.iid))
});
events.truncate(limit);
Ok(events)
}
/// Collect Created events from issues and merge_requests tables.
fn collect_tl_created_events(
conn: &Connection,
filter: &TimelineFilter,
events: &mut Vec<TimelineEvent>,
) -> Result<()> {
// Issue created events.
if !matches!(filter, TimelineFilter::MergeRequest(_)) {
let (where_clause, params) = match filter {
TimelineFilter::All => (
"1=1".to_string(),
Vec::<Box<dyn rusqlite::types::ToSql>>::new(),
),
TimelineFilter::Issue(id) => (
"i.id = ?1".to_string(),
vec![Box::new(*id) as Box<dyn rusqlite::types::ToSql>],
),
TimelineFilter::Actor(name) => (
"i.author_username = ?1".to_string(),
vec![Box::new(name.clone()) as Box<dyn rusqlite::types::ToSql>],
),
TimelineFilter::MergeRequest(_) => unreachable!(),
};
let sql = format!(
"SELECT i.created_at, i.iid, i.title, i.author_username, i.project_id, p.path_with_namespace
FROM issues i
JOIN projects p ON p.id = i.project_id
WHERE {where_clause}"
);
let mut stmt = conn
.prepare(&sql)
.context("preparing issue created query")?;
let param_refs: Vec<&dyn rusqlite::types::ToSql> =
params.iter().map(AsRef::as_ref).collect();
let rows = stmt
.query_map(param_refs.as_slice(), |row| {
Ok((
row.get::<_, i64>(0)?,
row.get::<_, i64>(1)?,
row.get::<_, Option<String>>(2)?,
row.get::<_, Option<String>>(3)?,
row.get::<_, i64>(4)?,
row.get::<_, String>(5)?,
))
})
.context("querying issue created events")?;
for row in rows {
let (created_at, iid, title, author, project_id, project_path) =
row.context("reading issue created row")?;
let title_str = title.as_deref().unwrap_or("(untitled)");
events.push(TimelineEvent {
timestamp_ms: created_at,
entity_key: EntityKey::issue(project_id, iid),
event_kind: TimelineEventKind::Created,
summary: format!("Issue #{iid} created: {title_str}"),
detail: title,
actor: author,
project_path,
});
}
}
// MR created events.
if !matches!(filter, TimelineFilter::Issue(_)) {
let (where_clause, params) = match filter {
TimelineFilter::All => (
"1=1".to_string(),
Vec::<Box<dyn rusqlite::types::ToSql>>::new(),
),
TimelineFilter::MergeRequest(id) => (
"mr.id = ?1".to_string(),
vec![Box::new(*id) as Box<dyn rusqlite::types::ToSql>],
),
TimelineFilter::Actor(name) => (
"mr.author_username = ?1".to_string(),
vec![Box::new(name.clone()) as Box<dyn rusqlite::types::ToSql>],
),
TimelineFilter::Issue(_) => unreachable!(),
};
let sql = format!(
"SELECT mr.created_at, mr.iid, mr.title, mr.author_username, mr.project_id, p.path_with_namespace
FROM merge_requests mr
JOIN projects p ON p.id = mr.project_id
WHERE {where_clause}"
);
let mut stmt = conn.prepare(&sql).context("preparing MR created query")?;
let param_refs: Vec<&dyn rusqlite::types::ToSql> =
params.iter().map(AsRef::as_ref).collect();
let rows = stmt
.query_map(param_refs.as_slice(), |row| {
Ok((
row.get::<_, i64>(0)?,
row.get::<_, i64>(1)?,
row.get::<_, Option<String>>(2)?,
row.get::<_, Option<String>>(3)?,
row.get::<_, i64>(4)?,
row.get::<_, String>(5)?,
))
})
.context("querying MR created events")?;
for row in rows {
let (created_at, iid, title, author, project_id, project_path) =
row.context("reading MR created row")?;
let title_str = title.as_deref().unwrap_or("(untitled)");
events.push(TimelineEvent {
timestamp_ms: created_at,
entity_key: EntityKey::mr(project_id, iid),
event_kind: TimelineEventKind::Created,
summary: format!("MR !{iid} created: {title_str}"),
detail: title,
actor: author,
project_path,
});
}
}
Ok(())
}
/// Helper: build WHERE clause and params for resource event tables.
///
/// Resource event tables have `issue_id` and `merge_request_id` columns
/// (exactly one is non-NULL per row), plus `actor_username`.
fn resource_event_where(filter: &TimelineFilter) -> (String, Vec<Box<dyn rusqlite::types::ToSql>>) {
match filter {
TimelineFilter::All => ("1=1".to_string(), Vec::new()),
TimelineFilter::Issue(id) => (
"e.issue_id = ?1".to_string(),
vec![Box::new(*id) as Box<dyn rusqlite::types::ToSql>],
),
TimelineFilter::MergeRequest(id) => (
"e.merge_request_id = ?1".to_string(),
vec![Box::new(*id) as Box<dyn rusqlite::types::ToSql>],
),
TimelineFilter::Actor(name) => (
"e.actor_username = ?1".to_string(),
vec![Box::new(name.clone()) as Box<dyn rusqlite::types::ToSql>],
),
}
}
/// Resolve a resource event row's entity to an EntityKey.
fn resolve_event_entity(
issue_id: Option<i64>,
mr_id: Option<i64>,
issue_iid: Option<i64>,
mr_iid: Option<i64>,
issue_project_id: Option<i64>,
mr_project_id: Option<i64>,
) -> Option<(EntityKey, i64)> {
if let (Some(iid), Some(pid)) = (issue_iid, issue_project_id) {
Some((EntityKey::issue(pid, iid), pid))
} else if let (Some(iid), Some(pid)) = (mr_iid, mr_project_id) {
Some((EntityKey::mr(pid, iid), pid))
} else {
// Orphaned event — entity was deleted.
let _ = (issue_id, mr_id); // suppress unused warnings
None
}
}
/// Collect state change events from `resource_state_events`.
fn collect_tl_state_events(
conn: &Connection,
filter: &TimelineFilter,
events: &mut Vec<TimelineEvent>,
) -> Result<()> {
let (where_clause, params) = resource_event_where(filter);
let sql = format!(
"SELECT e.created_at, e.state, e.actor_username,
e.issue_id, e.merge_request_id,
i.iid, mr.iid, i.project_id, mr.project_id,
COALESCE(pi.path_with_namespace, pm.path_with_namespace) AS project_path
FROM resource_state_events e
LEFT JOIN issues i ON i.id = e.issue_id
LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id
LEFT JOIN projects pi ON pi.id = i.project_id
LEFT JOIN projects pm ON pm.id = mr.project_id
WHERE {where_clause}"
);
let mut stmt = conn.prepare(&sql).context("preparing state events query")?;
let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(AsRef::as_ref).collect();
let rows = stmt
.query_map(param_refs.as_slice(), |row| {
Ok((
row.get::<_, i64>(0)?,
row.get::<_, String>(1)?,
row.get::<_, Option<String>>(2)?,
row.get::<_, Option<i64>>(3)?,
row.get::<_, Option<i64>>(4)?,
row.get::<_, Option<i64>>(5)?,
row.get::<_, Option<i64>>(6)?,
row.get::<_, Option<i64>>(7)?,
row.get::<_, Option<i64>>(8)?,
row.get::<_, Option<String>>(9)?,
))
})
.context("querying state events")?;
for row in rows {
let (
created_at,
state,
actor,
issue_id,
mr_id,
issue_iid,
mr_iid,
issue_pid,
mr_pid,
project_path,
) = row.context("reading state event row")?;
let Some((entity_key, _pid)) =
resolve_event_entity(issue_id, mr_id, issue_iid, mr_iid, issue_pid, mr_pid)
else {
continue;
};
let (event_kind, summary) = if state == "merged" {
(
TimelineEventKind::Merged,
format!("MR !{} merged", entity_key.iid),
)
} else {
(
TimelineEventKind::StateChanged,
format!("State changed to {state}"),
)
};
events.push(TimelineEvent {
timestamp_ms: created_at,
entity_key,
event_kind,
summary,
detail: Some(state),
actor,
project_path: project_path.unwrap_or_default(),
});
}
Ok(())
}
/// Collect label change events from `resource_label_events`.
fn collect_tl_label_events(
conn: &Connection,
filter: &TimelineFilter,
events: &mut Vec<TimelineEvent>,
) -> Result<()> {
let (where_clause, params) = resource_event_where(filter);
let sql = format!(
"SELECT e.created_at, e.action, e.label_name, e.actor_username,
e.issue_id, e.merge_request_id,
i.iid, mr.iid, i.project_id, mr.project_id,
COALESCE(pi.path_with_namespace, pm.path_with_namespace) AS project_path
FROM resource_label_events e
LEFT JOIN issues i ON i.id = e.issue_id
LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id
LEFT JOIN projects pi ON pi.id = i.project_id
LEFT JOIN projects pm ON pm.id = mr.project_id
WHERE {where_clause}"
);
let mut stmt = conn.prepare(&sql).context("preparing label events query")?;
let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(AsRef::as_ref).collect();
let rows = stmt
.query_map(param_refs.as_slice(), |row| {
Ok((
row.get::<_, i64>(0)?,
row.get::<_, String>(1)?,
row.get::<_, String>(2)?,
row.get::<_, Option<String>>(3)?,
row.get::<_, Option<i64>>(4)?,
row.get::<_, Option<i64>>(5)?,
row.get::<_, Option<i64>>(6)?,
row.get::<_, Option<i64>>(7)?,
row.get::<_, Option<i64>>(8)?,
row.get::<_, Option<i64>>(9)?,
row.get::<_, Option<String>>(10)?,
))
})
.context("querying label events")?;
for row in rows {
let (
created_at,
action,
label_name,
actor,
issue_id,
mr_id,
issue_iid,
mr_iid,
issue_pid,
mr_pid,
project_path,
) = row.context("reading label event row")?;
let Some((entity_key, _pid)) =
resolve_event_entity(issue_id, mr_id, issue_iid, mr_iid, issue_pid, mr_pid)
else {
continue;
};
let (event_kind, summary) = match action.as_str() {
"add" => (
TimelineEventKind::LabelAdded,
format!("Label added: {label_name}"),
),
"remove" => (
TimelineEventKind::LabelRemoved,
format!("Label removed: {label_name}"),
),
_ => continue,
};
events.push(TimelineEvent {
timestamp_ms: created_at,
entity_key,
event_kind,
summary,
detail: Some(label_name),
actor,
project_path: project_path.unwrap_or_default(),
});
}
Ok(())
}
/// Collect milestone change events from `resource_milestone_events`.
fn collect_tl_milestone_events(
conn: &Connection,
filter: &TimelineFilter,
events: &mut Vec<TimelineEvent>,
) -> Result<()> {
let (where_clause, params) = resource_event_where(filter);
let sql = format!(
"SELECT e.created_at, e.action, e.milestone_title, e.actor_username,
e.issue_id, e.merge_request_id,
i.iid, mr.iid, i.project_id, mr.project_id,
COALESCE(pi.path_with_namespace, pm.path_with_namespace) AS project_path
FROM resource_milestone_events e
LEFT JOIN issues i ON i.id = e.issue_id
LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id
LEFT JOIN projects pi ON pi.id = i.project_id
LEFT JOIN projects pm ON pm.id = mr.project_id
WHERE {where_clause}"
);
let mut stmt = conn
.prepare(&sql)
.context("preparing milestone events query")?;
let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(AsRef::as_ref).collect();
let rows = stmt
.query_map(param_refs.as_slice(), |row| {
Ok((
row.get::<_, i64>(0)?,
row.get::<_, String>(1)?,
row.get::<_, String>(2)?,
row.get::<_, Option<String>>(3)?,
row.get::<_, Option<i64>>(4)?,
row.get::<_, Option<i64>>(5)?,
row.get::<_, Option<i64>>(6)?,
row.get::<_, Option<i64>>(7)?,
row.get::<_, Option<i64>>(8)?,
row.get::<_, Option<i64>>(9)?,
row.get::<_, Option<String>>(10)?,
))
})
.context("querying milestone events")?;
for row in rows {
let (
created_at,
action,
milestone_title,
actor,
issue_id,
mr_id,
issue_iid,
mr_iid,
issue_pid,
mr_pid,
project_path,
) = row.context("reading milestone event row")?;
let Some((entity_key, _pid)) =
resolve_event_entity(issue_id, mr_id, issue_iid, mr_iid, issue_pid, mr_pid)
else {
continue;
};
let (event_kind, summary) = match action.as_str() {
"add" => (
TimelineEventKind::MilestoneSet,
format!("Milestone set: {milestone_title}"),
),
"remove" => (
TimelineEventKind::MilestoneRemoved,
format!("Milestone removed: {milestone_title}"),
),
_ => continue,
};
events.push(TimelineEvent {
timestamp_ms: created_at,
entity_key,
event_kind,
summary,
detail: Some(milestone_title),
actor,
project_path: project_path.unwrap_or_default(),
});
}
Ok(())
}
#[cfg(test)]
mod tests {
use super::*;
/// Create the minimal schema needed for timeline queries.
fn create_dashboard_schema(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
);
CREATE TABLE issues (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT NOT NULL,
author_username TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE merge_requests (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT,
author_username TEXT,
created_at INTEGER,
updated_at INTEGER,
last_seen_at INTEGER NOT NULL
);
",
)
.expect("create dashboard schema");
}
fn insert_issue(conn: &Connection, iid: i64, state: &str, updated_at: i64) {
conn.execute(
"INSERT INTO issues (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at)
VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)",
rusqlite::params![iid * 100, iid, format!("Issue {iid}"), state, updated_at],
)
.expect("insert issue");
}
fn insert_mr(conn: &Connection, iid: i64, state: &str, updated_at: i64) {
conn.execute(
"INSERT INTO merge_requests (gitlab_id, project_id, iid, title, state, created_at, updated_at, last_seen_at)
VALUES (?1, 1, ?2, ?3, ?4, ?5, ?5, ?5)",
rusqlite::params![iid * 100 + 50, iid, format!("MR {iid}"), state, updated_at],
)
.expect("insert mr");
}
/// Add resource event tables to an existing schema.
fn add_resource_event_tables(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE IF NOT EXISTS resource_state_events (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
issue_id INTEGER,
merge_request_id INTEGER,
state TEXT NOT NULL,
actor_gitlab_id INTEGER,
actor_username TEXT,
created_at INTEGER NOT NULL,
source_commit TEXT,
source_merge_request_iid INTEGER
);
CREATE TABLE IF NOT EXISTS resource_label_events (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
issue_id INTEGER,
merge_request_id INTEGER,
action TEXT NOT NULL,
label_name TEXT NOT NULL,
actor_gitlab_id INTEGER,
actor_username TEXT,
created_at INTEGER NOT NULL
);
CREATE TABLE IF NOT EXISTS resource_milestone_events (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
issue_id INTEGER,
merge_request_id INTEGER,
action TEXT NOT NULL,
milestone_title TEXT NOT NULL,
milestone_id INTEGER,
actor_gitlab_id INTEGER,
actor_username TEXT,
created_at INTEGER NOT NULL
);
",
)
.expect("create resource event tables");
}
/// Create a full timeline test schema (dashboard schema + resource events).
fn create_timeline_schema(conn: &Connection) {
create_dashboard_schema(conn);
add_resource_event_tables(conn);
// Insert a project for test entities.
conn.execute(
"INSERT INTO projects (id, gitlab_project_id, path_with_namespace) VALUES (1, 100, 'group/project')",
[],
)
.expect("insert test project");
}
fn insert_state_event(
conn: &Connection,
gitlab_id: i64,
issue_id: Option<i64>,
mr_id: Option<i64>,
state: &str,
actor: &str,
created_at: i64,
) {
conn.execute(
"INSERT INTO resource_state_events (gitlab_id, project_id, issue_id, merge_request_id, state, actor_username, created_at)
VALUES (?1, 1, ?2, ?3, ?4, ?5, ?6)",
rusqlite::params![gitlab_id, issue_id, mr_id, state, actor, created_at],
)
.expect("insert state event");
}
#[allow(clippy::too_many_arguments)]
fn insert_label_event(
conn: &Connection,
gitlab_id: i64,
issue_id: Option<i64>,
mr_id: Option<i64>,
action: &str,
label: &str,
actor: &str,
created_at: i64,
) {
conn.execute(
"INSERT INTO resource_label_events (gitlab_id, project_id, issue_id, merge_request_id, action, label_name, actor_username, created_at)
VALUES (?1, 1, ?2, ?3, ?4, ?5, ?6, ?7)",
rusqlite::params![gitlab_id, issue_id, mr_id, action, label, actor, created_at],
)
.expect("insert label event");
}
#[test]
fn test_fetch_timeline_scoped() {
let conn = Connection::open_in_memory().unwrap();
create_timeline_schema(&conn);
// Create two issues.
let now = 1_700_000_000_000_i64;
insert_issue(&conn, 1, "opened", now - 100_000);
insert_issue(&conn, 2, "opened", now - 50_000);
// Get internal IDs.
let issue1_id: i64 = conn
.query_row("SELECT id FROM issues WHERE iid = 1", [], |r| r.get(0))
.unwrap();
let issue2_id: i64 = conn
.query_row("SELECT id FROM issues WHERE iid = 2", [], |r| r.get(0))
.unwrap();
// State events: issue 1 closed, issue 2 label added.
insert_state_event(
&conn,
1,
Some(issue1_id),
None,
"closed",
"alice",
now - 80_000,
);
insert_label_event(
&conn,
2,
Some(issue2_id),
None,
"add",
"bug",
"bob",
now - 30_000,
);
// Fetch scoped to issue 1.
let scope = TimelineScope::Entity(EntityKey::issue(1, 1));
let events = fetch_timeline_events(&conn, &scope, 100).unwrap();
// Should only have issue 1's events: Created + StateChanged.
assert_eq!(events.len(), 2);
for event in &events {
assert_eq!(event.entity_key.iid, 1, "All events should be for issue #1");
}
// Most recent first.
assert!(events[0].timestamp_ms >= events[1].timestamp_ms);
}
#[test]
fn test_fetch_timeline_all_scope() {
let conn = Connection::open_in_memory().unwrap();
create_timeline_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_issue(&conn, 1, "opened", now - 100_000);
insert_issue(&conn, 2, "opened", now - 50_000);
let events = fetch_timeline_events(&conn, &TimelineScope::All, 100).unwrap();
// Should have Created events for both issues.
assert_eq!(events.len(), 2);
}
#[test]
fn test_fetch_timeline_author_scope() {
let conn = Connection::open_in_memory().unwrap();
create_timeline_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_issue(&conn, 1, "opened", now - 100_000); // default: no author_username in insert_issue
let issue1_id: i64 = conn
.query_row("SELECT id FROM issues WHERE iid = 1", [], |r| r.get(0))
.unwrap();
// State events by different actors.
insert_state_event(
&conn,
1,
Some(issue1_id),
None,
"closed",
"alice",
now - 80_000,
);
insert_state_event(
&conn,
2,
Some(issue1_id),
None,
"reopened",
"bob",
now - 60_000,
);
let scope = TimelineScope::Author("alice".into());
let events = fetch_timeline_events(&conn, &scope, 100).unwrap();
// Should only get alice's state event (Created events don't have author set via insert_issue).
assert!(events.iter().all(|e| e.actor.as_deref() == Some("alice")));
}
#[test]
fn test_fetch_timeline_respects_limit() {
let conn = Connection::open_in_memory().unwrap();
create_timeline_schema(&conn);
let now = 1_700_000_000_000_i64;
for i in 1..=10 {
insert_issue(&conn, i, "opened", now - (i * 10_000));
}
let events = fetch_timeline_events(&conn, &TimelineScope::All, 3).unwrap();
assert_eq!(events.len(), 3);
}
#[test]
fn test_fetch_timeline_sorted_most_recent_first() {
let conn = Connection::open_in_memory().unwrap();
create_timeline_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_issue(&conn, 1, "opened", now - 200_000);
insert_issue(&conn, 2, "opened", now - 100_000);
insert_issue(&conn, 3, "opened", now - 300_000);
let events = fetch_timeline_events(&conn, &TimelineScope::All, 100).unwrap();
for window in events.windows(2) {
assert!(
window[0].timestamp_ms >= window[1].timestamp_ms,
"Events should be sorted most-recent-first"
);
}
}
#[test]
fn test_fetch_timeline_state_merged_is_merged_kind() {
let conn = Connection::open_in_memory().unwrap();
create_timeline_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_mr(&conn, 1, "merged", now - 100_000);
let mr_id: i64 = conn
.query_row("SELECT id FROM merge_requests WHERE iid = 1", [], |r| {
r.get(0)
})
.unwrap();
insert_state_event(&conn, 1, None, Some(mr_id), "merged", "alice", now - 50_000);
let scope = TimelineScope::Entity(EntityKey::mr(1, 1));
let events = fetch_timeline_events(&conn, &scope, 100).unwrap();
let merged_events: Vec<_> = events
.iter()
.filter(|e| e.event_kind == TimelineEventKind::Merged)
.collect();
assert_eq!(merged_events.len(), 1);
assert_eq!(merged_events[0].summary, "MR !1 merged");
}
#[test]
fn test_fetch_timeline_empty_db() {
let conn = Connection::open_in_memory().unwrap();
create_timeline_schema(&conn);
let events = fetch_timeline_events(&conn, &TimelineScope::All, 100).unwrap();
assert!(events.is_empty());
}
}

View File

@@ -0,0 +1,234 @@
#![allow(dead_code)]
//! Trace screen actions — fetch file provenance chains from the local database.
//!
//! Wraps `run_trace()` from `lore::core::trace` and provides an autocomplete
//! path query for the input field.
use anyhow::Result;
use rusqlite::Connection;
use lore::core::trace::{self, TraceResult};
/// Default limit for trace chain results in TUI queries.
const DEFAULT_LIMIT: usize = 50;
/// Fetch trace chains for a file path.
///
/// Wraps [`trace::run_trace()`] with TUI defaults.
pub fn fetch_trace(
conn: &Connection,
project_id: Option<i64>,
path: &str,
follow_renames: bool,
include_discussions: bool,
) -> Result<TraceResult> {
Ok(trace::run_trace(
conn,
project_id,
path,
follow_renames,
include_discussions,
DEFAULT_LIMIT,
)?)
}
/// Fetch known file paths from `mr_file_changes` for autocomplete.
///
/// Returns distinct `new_path` values scoped to the given project (or all
/// projects if `None`), sorted alphabetically.
pub fn fetch_known_paths(conn: &Connection, project_id: Option<i64>) -> Result<Vec<String>> {
let mut paths = if let Some(pid) = project_id {
let mut stmt = conn.prepare(
"SELECT DISTINCT new_path FROM mr_file_changes WHERE project_id = ?1 ORDER BY new_path",
)?;
let rows = stmt.query_map([pid], |row| row.get::<_, String>(0))?;
rows.filter_map(Result::ok).collect::<Vec<_>>()
} else {
let mut stmt =
conn.prepare("SELECT DISTINCT new_path FROM mr_file_changes ORDER BY new_path")?;
let rows = stmt.query_map([], |row| row.get::<_, String>(0))?;
rows.filter_map(Result::ok).collect::<Vec<_>>()
};
paths.sort();
paths.dedup();
Ok(paths)
}
#[cfg(test)]
mod tests {
use super::*;
/// Minimal schema for trace queries.
fn create_trace_schema(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
);
CREATE TABLE merge_requests (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT,
author_username TEXT,
draft INTEGER NOT NULL DEFAULT 0,
created_at INTEGER,
updated_at INTEGER,
merged_at INTEGER,
closed_at INTEGER,
web_url TEXT,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE mr_file_changes (
id INTEGER PRIMARY KEY,
merge_request_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
new_path TEXT NOT NULL,
old_path TEXT,
change_type TEXT NOT NULL
);
CREATE TABLE entity_references (
id INTEGER PRIMARY KEY,
source_entity_type TEXT NOT NULL,
source_entity_id INTEGER NOT NULL,
target_entity_type TEXT NOT NULL,
target_entity_id INTEGER,
target_iid INTEGER NOT NULL,
project_id INTEGER NOT NULL,
reference_type TEXT NOT NULL
);
CREATE TABLE issues (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT NOT NULL,
author_username TEXT,
web_url TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE discussions (
id INTEGER PRIMARY KEY,
gitlab_discussion_id TEXT NOT NULL,
project_id INTEGER NOT NULL,
noteable_type TEXT NOT NULL,
issue_id INTEGER,
merge_request_id INTEGER,
resolvable INTEGER NOT NULL DEFAULT 0,
resolved INTEGER NOT NULL DEFAULT 0,
last_note_at INTEGER NOT NULL DEFAULT 0,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE notes (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
discussion_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
is_system INTEGER NOT NULL DEFAULT 0,
author_username TEXT,
body TEXT,
note_type TEXT,
position_new_path TEXT,
position_old_path TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_seen_at INTEGER NOT NULL
);
CREATE INDEX idx_mfc_new_path_project_mr
ON mr_file_changes(new_path, project_id, merge_request_id);
CREATE INDEX idx_mfc_old_path_project_mr
ON mr_file_changes(old_path, project_id, merge_request_id);
",
)
.expect("create trace schema");
}
#[test]
fn test_fetch_trace_empty_db() {
let conn = Connection::open_in_memory().unwrap();
create_trace_schema(&conn);
let result = fetch_trace(&conn, None, "src/main.rs", true, true).unwrap();
assert!(result.trace_chains.is_empty());
assert_eq!(result.total_chains, 0);
}
#[test]
fn test_fetch_trace_with_mr() {
let conn = Connection::open_in_memory().unwrap();
create_trace_schema(&conn);
// Insert a project, MR, and file change.
conn.execute_batch(
"
INSERT INTO projects(id, gitlab_project_id, path_with_namespace)
VALUES (1, 100, 'group/project');
INSERT INTO merge_requests(id, gitlab_id, project_id, iid, title, state, author_username, updated_at, last_seen_at)
VALUES (1, 200, 1, 42, 'Add main.rs', 'merged', 'alice', 1700000000000, 1700000000000);
INSERT INTO mr_file_changes(id, merge_request_id, project_id, new_path, change_type)
VALUES (1, 1, 1, 'src/main.rs', 'added');
",
)
.unwrap();
let result = fetch_trace(&conn, Some(1), "src/main.rs", true, false).unwrap();
assert_eq!(result.trace_chains.len(), 1);
assert_eq!(result.trace_chains[0].mr_iid, 42);
assert_eq!(result.trace_chains[0].mr_author, "alice");
assert_eq!(result.trace_chains[0].change_type, "added");
}
#[test]
fn test_fetch_known_paths_empty() {
let conn = Connection::open_in_memory().unwrap();
create_trace_schema(&conn);
let paths = fetch_known_paths(&conn, None).unwrap();
assert!(paths.is_empty());
}
#[test]
fn test_fetch_known_paths_with_data() {
let conn = Connection::open_in_memory().unwrap();
create_trace_schema(&conn);
conn.execute_batch(
"
INSERT INTO mr_file_changes(id, merge_request_id, project_id, new_path, change_type)
VALUES (1, 1, 1, 'src/b.rs', 'added'),
(2, 1, 1, 'src/a.rs', 'modified'),
(3, 2, 1, 'src/b.rs', 'modified');
",
)
.unwrap();
let paths = fetch_known_paths(&conn, None).unwrap();
assert_eq!(paths, vec!["src/a.rs", "src/b.rs"]);
}
#[test]
fn test_fetch_known_paths_scoped_to_project() {
let conn = Connection::open_in_memory().unwrap();
create_trace_schema(&conn);
conn.execute_batch(
"
INSERT INTO mr_file_changes(id, merge_request_id, project_id, new_path, change_type)
VALUES (1, 1, 1, 'src/a.rs', 'added'),
(2, 2, 2, 'src/b.rs', 'added');
",
)
.unwrap();
let paths = fetch_known_paths(&conn, Some(1)).unwrap();
assert_eq!(paths, vec!["src/a.rs"]);
}
}

View File

@@ -0,0 +1,285 @@
#![allow(dead_code)]
//! Who screen actions — fetch people-intelligence data from the local database.
//!
//! Each function wraps a `query_*` function from `lore::cli::commands::who`
//! and returns the appropriate [`WhoResult`] variant.
use anyhow::Result;
use rusqlite::Connection;
use lore::cli::commands::who;
use lore::core::config::ScoringConfig;
use lore::core::who_types::WhoResult;
/// Default limit for result rows in TUI who queries.
const DEFAULT_LIMIT: usize = 20;
/// Default time window: 6 months in milliseconds.
const SIX_MONTHS_MS: i64 = 180 * 24 * 60 * 60 * 1000;
/// Fetch expert results for a file path.
pub fn fetch_who_expert(
conn: &Connection,
path: &str,
project_id: Option<i64>,
scoring: &ScoringConfig,
now_ms: i64,
) -> Result<WhoResult> {
let since_ms = now_ms - SIX_MONTHS_MS;
let result = who::query_expert(
conn,
path,
project_id,
since_ms,
now_ms,
DEFAULT_LIMIT,
scoring,
false, // detail
false, // explain_score
false, // include_bots
)?;
Ok(WhoResult::Expert(result))
}
/// Fetch workload summary for a username.
pub fn fetch_who_workload(
conn: &Connection,
username: &str,
project_id: Option<i64>,
include_closed: bool,
) -> Result<WhoResult> {
let result = who::query_workload(
conn,
username,
project_id,
None, // since_ms — show all for workload
DEFAULT_LIMIT,
include_closed,
)?;
Ok(WhoResult::Workload(result))
}
/// Fetch review activity breakdown for a username.
pub fn fetch_who_reviews(
conn: &Connection,
username: &str,
project_id: Option<i64>,
now_ms: i64,
) -> Result<WhoResult> {
let since_ms = now_ms - SIX_MONTHS_MS;
let result = who::query_reviews(conn, username, project_id, since_ms)?;
Ok(WhoResult::Reviews(result))
}
/// Fetch recent active (unresolved) discussions.
pub fn fetch_who_active(
conn: &Connection,
project_id: Option<i64>,
include_closed: bool,
now_ms: i64,
) -> Result<WhoResult> {
// Active mode default window: 7 days.
let seven_days_ms: i64 = 7 * 24 * 60 * 60 * 1000;
let since_ms = now_ms - seven_days_ms;
let result = who::query_active(conn, project_id, since_ms, DEFAULT_LIMIT, include_closed)?;
Ok(WhoResult::Active(result))
}
/// Fetch overlap (shared file knowledge) for a path.
pub fn fetch_who_overlap(
conn: &Connection,
path: &str,
project_id: Option<i64>,
now_ms: i64,
) -> Result<WhoResult> {
let since_ms = now_ms - SIX_MONTHS_MS;
let result = who::query_overlap(conn, path, project_id, since_ms, DEFAULT_LIMIT)?;
Ok(WhoResult::Overlap(result))
}
#[cfg(test)]
mod tests {
use super::*;
/// Minimal schema for who queries (matches the real DB schema).
fn create_who_schema(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
);
CREATE TABLE issues (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT NOT NULL,
author_username TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE merge_requests (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
project_id INTEGER NOT NULL,
iid INTEGER NOT NULL,
title TEXT,
state TEXT,
author_username TEXT,
draft INTEGER NOT NULL DEFAULT 0,
created_at INTEGER,
updated_at INTEGER,
merged_at INTEGER,
closed_at INTEGER,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE issue_assignees (
issue_id INTEGER NOT NULL,
username TEXT NOT NULL,
PRIMARY KEY(issue_id, username)
);
CREATE TABLE mr_reviewers (
merge_request_id INTEGER NOT NULL,
username TEXT NOT NULL,
PRIMARY KEY(merge_request_id, username)
);
CREATE TABLE mr_file_changes (
id INTEGER PRIMARY KEY,
merge_request_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
new_path TEXT NOT NULL,
old_path TEXT,
change_type TEXT NOT NULL
);
CREATE TABLE discussions (
id INTEGER PRIMARY KEY,
gitlab_discussion_id TEXT NOT NULL,
project_id INTEGER NOT NULL,
noteable_type TEXT NOT NULL,
issue_id INTEGER,
merge_request_id INTEGER,
resolvable INTEGER NOT NULL DEFAULT 0,
resolved INTEGER NOT NULL DEFAULT 0,
last_note_at INTEGER NOT NULL DEFAULT 0,
last_seen_at INTEGER NOT NULL
);
CREATE TABLE notes (
id INTEGER PRIMARY KEY,
gitlab_id INTEGER UNIQUE NOT NULL,
discussion_id INTEGER NOT NULL,
project_id INTEGER NOT NULL,
is_system INTEGER NOT NULL DEFAULT 0,
author_username TEXT,
body TEXT,
note_type TEXT,
position_new_path TEXT,
position_old_path TEXT,
created_at INTEGER NOT NULL,
updated_at INTEGER NOT NULL,
last_seen_at INTEGER NOT NULL
);
-- Indexes needed by who queries
CREATE INDEX idx_notes_diffnote_path_created
ON notes(position_new_path, created_at)
WHERE note_type = 'DiffNote' AND is_system = 0;
CREATE INDEX idx_notes_old_path_author
ON notes(position_old_path, author_username)
WHERE note_type = 'DiffNote' AND is_system = 0;
CREATE INDEX idx_mfc_new_path_project_mr
ON mr_file_changes(new_path, project_id, merge_request_id);
CREATE INDEX idx_mfc_old_path_project_mr
ON mr_file_changes(old_path, project_id, merge_request_id);
",
)
.expect("create who schema");
}
fn default_scoring() -> ScoringConfig {
ScoringConfig::default()
}
fn now_ms() -> i64 {
1_700_000_000_000 // Fixed timestamp for deterministic tests.
}
#[test]
fn test_fetch_who_expert_empty_db_returns_empty() {
let conn = Connection::open_in_memory().unwrap();
create_who_schema(&conn);
let result = fetch_who_expert(&conn, "src/", None, &default_scoring(), now_ms()).unwrap();
match result {
WhoResult::Expert(expert) => {
assert!(expert.experts.is_empty());
assert!(!expert.truncated);
}
_ => panic!("Expected Expert variant"),
}
}
#[test]
fn test_fetch_who_workload_empty_db_returns_empty() {
let conn = Connection::open_in_memory().unwrap();
create_who_schema(&conn);
let result = fetch_who_workload(&conn, "alice", None, false).unwrap();
match result {
WhoResult::Workload(wl) => {
assert_eq!(wl.username, "alice");
assert!(wl.assigned_issues.is_empty());
assert!(wl.authored_mrs.is_empty());
}
_ => panic!("Expected Workload variant"),
}
}
#[test]
fn test_fetch_who_reviews_empty_db_returns_empty() {
let conn = Connection::open_in_memory().unwrap();
create_who_schema(&conn);
let result = fetch_who_reviews(&conn, "alice", None, now_ms()).unwrap();
match result {
WhoResult::Reviews(rev) => {
assert_eq!(rev.username, "alice");
assert_eq!(rev.total_diffnotes, 0);
}
_ => panic!("Expected Reviews variant"),
}
}
#[test]
fn test_fetch_who_active_empty_db_returns_empty() {
let conn = Connection::open_in_memory().unwrap();
create_who_schema(&conn);
let result = fetch_who_active(&conn, None, false, now_ms()).unwrap();
match result {
WhoResult::Active(active) => {
assert!(active.discussions.is_empty());
assert_eq!(active.total_unresolved_in_window, 0);
}
_ => panic!("Expected Active variant"),
}
}
#[test]
fn test_fetch_who_overlap_empty_db_returns_empty() {
let conn = Connection::open_in_memory().unwrap();
create_who_schema(&conn);
let result = fetch_who_overlap(&conn, "src/", None, now_ms()).unwrap();
match result {
WhoResult::Overlap(overlap) => {
assert!(overlap.users.is_empty());
assert!(!overlap.truncated);
}
_ => panic!("Expected Overlap variant"),
}
}
}