feat(tui): Phase 4 completion + Phase 5 session/lock/text-width

Phase 4 (bd-1df9) — all 5 acceptance criteria met:
- Sync screen with delta ledger (bd-2x2h, bd-y095)
- Doctor screen with health checks (bd-2iqk)
- Stats screen with document counts (bd-2iqk)
- CLI integration: lore tui subcommand (bd-26lp)
- CLI integration: lore sync --tui flag (bd-3l56)

Phase 5 (bd-3h00) — session persistence + instance lock + text width:
- text_width.rs: Unicode-aware measurement, truncation, padding (16 tests)
- instance_lock.rs: Advisory PID lock with stale recovery (6 tests)
- session.rs: Atomic write + CRC32 checksum + quarantine (9 tests)

Closes: bd-26lp, bd-3h00, bd-3l56, bd-1df9, bd-y095
This commit is contained in:
teernisse
2026-02-18 23:40:30 -05:00
parent 418417b0f4
commit 146eb61623
45 changed files with 5216 additions and 207 deletions

View File

@@ -96,8 +96,7 @@ pub fn fetch_file_history(
merge_commit_sha: row.get(7)?,
})
})?
.filter_map(std::result::Result::ok)
.collect();
.collect::<std::result::Result<Vec<_>, _>>()?;
let total_mrs = merge_requests.len();
@@ -170,8 +169,7 @@ fn fetch_file_discussions(
created_at_ms: row.get(4)?,
})
})?
.filter_map(std::result::Result::ok)
.collect();
.collect::<std::result::Result<Vec<_>, _>>()?;
Ok(discussions)
}
@@ -187,12 +185,10 @@ pub fn fetch_file_history_paths(conn: &Connection, project_id: Option<i64>) -> R
let mut stmt = conn.prepare(sql)?;
let paths: Vec<String> = if let Some(pid) = project_id {
stmt.query_map([pid], |row| row.get(0))?
.filter_map(std::result::Result::ok)
.collect()
.collect::<std::result::Result<Vec<_>, _>>()?
} else {
stmt.query_map([], |row| row.get(0))?
.filter_map(std::result::Result::ok)
.collect()
.collect::<std::result::Result<Vec<_>, _>>()?
};
Ok(paths)

View File

@@ -12,6 +12,7 @@ mod issue_list;
mod mr_detail;
mod mr_list;
mod search;
mod sync;
mod timeline;
mod trace;
mod who;
@@ -24,6 +25,7 @@ pub use issue_list::*;
pub use mr_detail::*;
pub use mr_list::*;
pub use search::*;
pub use sync::*;
pub use timeline::*;
pub use trace::*;
pub use who::*;

View File

@@ -0,0 +1,587 @@
#![allow(dead_code)]
//! Sync screen actions — query sync run history and detect running syncs.
//!
//! With cron-driven syncs as the primary mechanism, the TUI's sync screen
//! acts as a status dashboard. These pure query functions read `sync_runs`
//! and `projects` to populate the screen.
use anyhow::{Context, Result};
use rusqlite::Connection;
use crate::clock::Clock;
/// How many recent runs to display in the sync history.
const HISTORY_LIMIT: usize = 10;
/// If a "running" sync hasn't heartbeated in this many milliseconds,
/// consider it stale (likely crashed).
const STALE_HEARTBEAT_MS: i64 = 120_000; // 2 minutes
// ---------------------------------------------------------------------------
// Data types
// ---------------------------------------------------------------------------
/// Overview data for the sync screen.
#[derive(Debug, Default)]
pub struct SyncOverview {
/// Info about a currently running sync, if any.
pub running: Option<RunningSyncInfo>,
/// Most recent completed (succeeded or failed) run.
pub last_completed: Option<SyncRunInfo>,
/// Recent sync run history (newest first).
pub recent_runs: Vec<SyncRunInfo>,
/// Configured project paths.
pub projects: Vec<String>,
}
/// A sync that is currently in progress.
#[derive(Debug, Clone)]
pub struct RunningSyncInfo {
/// Row ID in sync_runs.
pub id: i64,
/// When this sync started (ms epoch).
pub started_at: i64,
/// Last heartbeat (ms epoch).
pub heartbeat_at: i64,
/// How long it's been running (ms).
pub elapsed_ms: u64,
/// Whether the heartbeat is stale (sync may have crashed).
pub stale: bool,
/// Items processed so far.
pub items_processed: u64,
}
/// Summary of a single sync run.
#[derive(Debug, Clone)]
pub struct SyncRunInfo {
/// Row ID in sync_runs.
pub id: i64,
/// 'succeeded', 'failed', or 'running'.
pub status: String,
/// The command that was run (e.g., 'sync', 'ingest issues').
pub command: String,
/// When this sync started (ms epoch).
pub started_at: i64,
/// When this sync finished (ms epoch), if completed.
pub finished_at: Option<i64>,
/// Duration in ms (computed from started_at/finished_at).
pub duration_ms: Option<u64>,
/// Total items processed.
pub items_processed: u64,
/// Total errors encountered.
pub errors: u64,
/// Error message if the run failed.
pub error: Option<String>,
/// Correlation ID for log matching.
pub run_id: Option<String>,
}
// ---------------------------------------------------------------------------
// Public API
// ---------------------------------------------------------------------------
/// Fetch the complete sync overview for the sync screen.
///
/// Combines running sync detection, last completed run, recent history,
/// and configured projects into a single struct.
pub fn fetch_sync_overview(conn: &Connection, clock: &dyn Clock) -> Result<SyncOverview> {
let running = detect_running_sync(conn, clock)?;
let recent_runs = fetch_recent_runs(conn, HISTORY_LIMIT)?;
let last_completed = recent_runs
.iter()
.find(|r| r.status == "succeeded" || r.status == "failed")
.cloned();
let projects = fetch_configured_projects(conn)?;
Ok(SyncOverview {
running,
last_completed,
recent_runs,
projects,
})
}
/// Detect a currently running sync from the `sync_runs` table.
///
/// A sync is considered "running" if `status = 'running'`. It's marked
/// stale if the heartbeat is older than [`STALE_HEARTBEAT_MS`].
pub fn detect_running_sync(
conn: &Connection,
clock: &dyn Clock,
) -> Result<Option<RunningSyncInfo>> {
let result = conn.query_row(
"SELECT id, started_at, heartbeat_at, total_items_processed
FROM sync_runs
WHERE status = 'running'
ORDER BY id DESC
LIMIT 1",
[],
|row| {
let id: i64 = row.get(0)?;
let started_at: i64 = row.get(1)?;
let heartbeat_at: i64 = row.get(2)?;
let items: Option<i64> = row.get(3)?;
Ok((id, started_at, heartbeat_at, items.unwrap_or(0)))
},
);
match result {
Ok((id, started_at, heartbeat_at, items)) => {
let now = clock.now_ms();
let elapsed_ms = now.saturating_sub(started_at);
let stale = (now - heartbeat_at) > STALE_HEARTBEAT_MS;
#[allow(clippy::cast_sign_loss)]
Ok(Some(RunningSyncInfo {
id,
started_at,
heartbeat_at,
elapsed_ms: elapsed_ms as u64,
stale,
items_processed: items as u64,
}))
}
Err(rusqlite::Error::QueryReturnedNoRows) => Ok(None),
Err(e) => Err(e).context("detecting running sync"),
}
}
/// Fetch recent sync runs (newest first).
pub fn fetch_recent_runs(conn: &Connection, limit: usize) -> Result<Vec<SyncRunInfo>> {
let mut stmt = conn
.prepare(
"SELECT id, status, command, started_at, finished_at,
total_items_processed, total_errors, error, run_id
FROM sync_runs
ORDER BY id DESC
LIMIT ?1",
)
.context("preparing sync runs query")?;
let rows = stmt
.query_map([limit as i64], |row| {
let id: i64 = row.get(0)?;
let status: String = row.get(1)?;
let command: String = row.get(2)?;
let started_at: i64 = row.get(3)?;
let finished_at: Option<i64> = row.get(4)?;
let items: Option<i64> = row.get(5)?;
let errors: Option<i64> = row.get(6)?;
let error: Option<String> = row.get(7)?;
let run_id: Option<String> = row.get(8)?;
Ok((
id, status, command, started_at, finished_at, items, errors, error, run_id,
))
})
.context("querying sync runs")?;
let mut result = Vec::new();
for row in rows {
let (id, status, command, started_at, finished_at, items, errors, error, run_id) =
row.context("reading sync run row")?;
#[allow(clippy::cast_sign_loss)]
let duration_ms = finished_at.map(|f| (f - started_at) as u64);
#[allow(clippy::cast_sign_loss)]
result.push(SyncRunInfo {
id,
status,
command,
started_at,
finished_at,
duration_ms,
items_processed: items.unwrap_or(0) as u64,
errors: errors.unwrap_or(0) as u64,
error,
run_id,
});
}
Ok(result)
}
/// Fetch configured project paths from the `projects` table.
pub fn fetch_configured_projects(conn: &Connection) -> Result<Vec<String>> {
let mut stmt = conn
.prepare("SELECT path_with_namespace FROM projects ORDER BY path_with_namespace")
.context("preparing projects query")?;
let rows = stmt
.query_map([], |row| row.get::<_, String>(0))
.context("querying projects")?;
let mut result = Vec::new();
for row in rows {
result.push(row.context("reading project row")?);
}
Ok(result)
}
// ---------------------------------------------------------------------------
// Tests
// ---------------------------------------------------------------------------
#[cfg(test)]
mod tests {
use super::*;
use crate::clock::FakeClock;
/// Create the minimal schema needed for sync queries.
fn create_sync_schema(conn: &Connection) {
conn.execute_batch(
"
CREATE TABLE projects (
id INTEGER PRIMARY KEY,
gitlab_project_id INTEGER UNIQUE NOT NULL,
path_with_namespace TEXT NOT NULL
);
CREATE TABLE sync_runs (
id INTEGER PRIMARY KEY,
started_at INTEGER NOT NULL,
heartbeat_at INTEGER NOT NULL,
finished_at INTEGER,
status TEXT NOT NULL,
command TEXT NOT NULL,
error TEXT,
metrics_json TEXT,
run_id TEXT,
total_items_processed INTEGER DEFAULT 0,
total_errors INTEGER DEFAULT 0
);
",
)
.expect("create sync schema");
}
fn insert_project(conn: &Connection, id: i64, path: &str) {
conn.execute(
"INSERT INTO projects (id, gitlab_project_id, path_with_namespace)
VALUES (?1, ?2, ?3)",
rusqlite::params![id, id * 100, path],
)
.expect("insert project");
}
fn insert_sync_run(
conn: &Connection,
started_at: i64,
finished_at: Option<i64>,
status: &str,
command: &str,
items: i64,
errors: i64,
error: Option<&str>,
) -> i64 {
conn.execute(
"INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command,
total_items_processed, total_errors, error)
VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?8)",
rusqlite::params![
started_at,
finished_at.unwrap_or(started_at),
finished_at,
status,
command,
items,
errors,
error,
],
)
.expect("insert sync run");
conn.last_insert_rowid()
}
// -----------------------------------------------------------------------
// detect_running_sync
// -----------------------------------------------------------------------
#[test]
fn test_detect_running_sync_none_when_empty() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let clock = FakeClock::from_ms(1_700_000_000_000);
let result = detect_running_sync(&conn, &clock).unwrap();
assert!(result.is_none());
}
#[test]
fn test_detect_running_sync_none_when_all_completed() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_sync_run(&conn, now - 60_000, Some(now - 30_000), "succeeded", "sync", 100, 0, None);
insert_sync_run(&conn, now - 120_000, Some(now - 90_000), "failed", "sync", 50, 2, Some("timeout"));
let clock = FakeClock::from_ms(now);
let result = detect_running_sync(&conn, &clock).unwrap();
assert!(result.is_none());
}
#[test]
fn test_detect_running_sync_found() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
let started = now - 30_000; // 30 seconds ago
// Heartbeat at started_at (fresh since we just set it)
conn.execute(
"INSERT INTO sync_runs (started_at, heartbeat_at, status, command, total_items_processed)
VALUES (?1, ?2, 'running', 'sync', 42)",
[started, now - 5_000], // heartbeat 5 seconds ago
)
.unwrap();
let clock = FakeClock::from_ms(now);
let running = detect_running_sync(&conn, &clock).unwrap().unwrap();
assert_eq!(running.elapsed_ms, 30_000);
assert_eq!(running.items_processed, 42);
assert!(!running.stale);
}
#[test]
fn test_detect_running_sync_stale_heartbeat() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
let started = now - 300_000; // 5 minutes ago
// Heartbeat 3 minutes ago — stale
conn.execute(
"INSERT INTO sync_runs (started_at, heartbeat_at, status, command)
VALUES (?1, ?2, 'running', 'sync')",
[started, now - 180_000],
)
.unwrap();
let clock = FakeClock::from_ms(now);
let running = detect_running_sync(&conn, &clock).unwrap().unwrap();
assert!(running.stale);
assert_eq!(running.elapsed_ms, 300_000);
}
// -----------------------------------------------------------------------
// fetch_recent_runs
// -----------------------------------------------------------------------
#[test]
fn test_fetch_recent_runs_empty() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let runs = fetch_recent_runs(&conn, 10).unwrap();
assert!(runs.is_empty());
}
#[test]
fn test_fetch_recent_runs_ordered_newest_first() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_sync_run(&conn, now - 120_000, Some(now - 90_000), "succeeded", "sync", 100, 0, None);
insert_sync_run(&conn, now - 60_000, Some(now - 30_000), "succeeded", "sync", 200, 0, None);
let runs = fetch_recent_runs(&conn, 10).unwrap();
assert_eq!(runs.len(), 2);
// Newest first (higher id)
assert_eq!(runs[0].items_processed, 200);
assert_eq!(runs[1].items_processed, 100);
}
#[test]
fn test_fetch_recent_runs_respects_limit() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
for i in 0..5 {
insert_sync_run(
&conn,
now - (5 - i) * 60_000,
Some(now - (5 - i) * 60_000 + 30_000),
"succeeded",
"sync",
i * 10,
0,
None,
);
}
let runs = fetch_recent_runs(&conn, 3).unwrap();
assert_eq!(runs.len(), 3);
}
#[test]
fn test_fetch_recent_runs_duration_computed() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_sync_run(&conn, now - 60_000, Some(now - 15_000), "succeeded", "sync", 0, 0, None);
let runs = fetch_recent_runs(&conn, 10).unwrap();
assert_eq!(runs[0].duration_ms, Some(45_000));
}
#[test]
fn test_fetch_recent_runs_running_no_duration() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_sync_run(&conn, now - 60_000, None, "running", "sync", 0, 0, None);
let runs = fetch_recent_runs(&conn, 10).unwrap();
assert_eq!(runs[0].status, "running");
assert!(runs[0].duration_ms.is_none());
}
#[test]
fn test_fetch_recent_runs_failed_with_error() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_sync_run(
&conn,
now - 60_000,
Some(now - 30_000),
"failed",
"sync",
50,
3,
Some("network timeout"),
);
let runs = fetch_recent_runs(&conn, 10).unwrap();
assert_eq!(runs[0].status, "failed");
assert_eq!(runs[0].errors, 3);
assert_eq!(runs[0].error.as_deref(), Some("network timeout"));
}
// -----------------------------------------------------------------------
// fetch_configured_projects
// -----------------------------------------------------------------------
#[test]
fn test_fetch_configured_projects_empty() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let projects = fetch_configured_projects(&conn).unwrap();
assert!(projects.is_empty());
}
#[test]
fn test_fetch_configured_projects_sorted() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
insert_project(&conn, 1, "group/beta");
insert_project(&conn, 2, "group/alpha");
insert_project(&conn, 3, "other/gamma");
let projects = fetch_configured_projects(&conn).unwrap();
assert_eq!(projects, vec!["group/alpha", "group/beta", "other/gamma"]);
}
// -----------------------------------------------------------------------
// fetch_sync_overview (integration)
// -----------------------------------------------------------------------
#[test]
fn test_fetch_sync_overview_empty_db() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let clock = FakeClock::from_ms(1_700_000_000_000);
let overview = fetch_sync_overview(&conn, &clock).unwrap();
assert!(overview.running.is_none());
assert!(overview.last_completed.is_none());
assert!(overview.recent_runs.is_empty());
assert!(overview.projects.is_empty());
}
#[test]
fn test_fetch_sync_overview_with_history() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_project(&conn, 1, "group/repo");
insert_sync_run(&conn, now - 120_000, Some(now - 90_000), "succeeded", "sync", 150, 0, None);
insert_sync_run(&conn, now - 60_000, Some(now - 30_000), "failed", "sync", 50, 2, Some("db locked"));
let clock = FakeClock::from_ms(now);
let overview = fetch_sync_overview(&conn, &clock).unwrap();
assert!(overview.running.is_none());
assert_eq!(overview.recent_runs.len(), 2);
assert_eq!(overview.projects, vec!["group/repo"]);
// last_completed should be the newest completed run (failed, id=2)
let last = overview.last_completed.unwrap();
assert_eq!(last.status, "failed");
assert_eq!(last.errors, 2);
}
#[test]
fn test_fetch_sync_overview_with_running_sync() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
insert_project(&conn, 1, "group/repo");
// A completed run.
insert_sync_run(&conn, now - 600_000, Some(now - 570_000), "succeeded", "sync", 200, 0, None);
// A currently running sync.
conn.execute(
"INSERT INTO sync_runs (started_at, heartbeat_at, status, command, total_items_processed)
VALUES (?1, ?2, 'running', 'sync', 75)",
[now - 20_000, now - 2_000],
)
.unwrap();
let clock = FakeClock::from_ms(now);
let overview = fetch_sync_overview(&conn, &clock).unwrap();
assert!(overview.running.is_some());
let running = overview.running.unwrap();
assert_eq!(running.elapsed_ms, 20_000);
assert_eq!(running.items_processed, 75);
assert!(!running.stale);
// last_completed should find the succeeded run, not the running one.
let last = overview.last_completed.unwrap();
assert_eq!(last.status, "succeeded");
assert_eq!(last.items_processed, 200);
}
#[test]
fn test_sync_run_info_with_run_id() {
let conn = Connection::open_in_memory().unwrap();
create_sync_schema(&conn);
let now = 1_700_000_000_000_i64;
conn.execute(
"INSERT INTO sync_runs (started_at, heartbeat_at, finished_at, status, command,
total_items_processed, total_errors, run_id)
VALUES (?1, ?1, ?2, 'succeeded', 'sync', 100, 0, 'abc-123')",
[now - 60_000, now - 30_000],
)
.unwrap();
let runs = fetch_recent_runs(&conn, 10).unwrap();
assert_eq!(runs[0].run_id.as_deref(), Some("abc-123"));
}
}

View File

@@ -64,10 +64,12 @@ pub fn fetch_timeline_events(
let filter = resolve_timeline_scope(conn, scope)?;
let mut events = Vec::new();
collect_tl_created_events(conn, &filter, &mut events)?;
collect_tl_state_events(conn, &filter, &mut events)?;
collect_tl_label_events(conn, &filter, &mut events)?;
collect_tl_milestone_events(conn, &filter, &mut events)?;
// Each collector is given the full limit. After merge-sorting, we truncate
// to `limit`. Worst case we hold 4*limit events in memory (bounded).
collect_tl_created_events(conn, &filter, limit, &mut events)?;
collect_tl_state_events(conn, &filter, limit, &mut events)?;
collect_tl_label_events(conn, &filter, limit, &mut events)?;
collect_tl_milestone_events(conn, &filter, limit, &mut events)?;
// Sort by timestamp descending (most recent first), with stable tiebreak.
events.sort_by(|a, b| {
@@ -85,11 +87,12 @@ pub fn fetch_timeline_events(
fn collect_tl_created_events(
conn: &Connection,
filter: &TimelineFilter,
limit: usize,
events: &mut Vec<TimelineEvent>,
) -> Result<()> {
// Issue created events.
if !matches!(filter, TimelineFilter::MergeRequest(_)) {
let (where_clause, params) = match filter {
let (where_clause, mut params) = match filter {
TimelineFilter::All => (
"1=1".to_string(),
Vec::<Box<dyn rusqlite::types::ToSql>>::new(),
@@ -105,12 +108,16 @@ fn collect_tl_created_events(
TimelineFilter::MergeRequest(_) => unreachable!(),
};
let limit_param = params.len() + 1;
let sql = format!(
"SELECT i.created_at, i.iid, i.title, i.author_username, i.project_id, p.path_with_namespace
FROM issues i
JOIN projects p ON p.id = i.project_id
WHERE {where_clause}"
WHERE {where_clause}
ORDER BY i.created_at DESC
LIMIT ?{limit_param}"
);
params.push(Box::new(limit as i64));
let mut stmt = conn
.prepare(&sql)
@@ -148,7 +155,7 @@ fn collect_tl_created_events(
// MR created events.
if !matches!(filter, TimelineFilter::Issue(_)) {
let (where_clause, params) = match filter {
let (where_clause, mut params) = match filter {
TimelineFilter::All => (
"1=1".to_string(),
Vec::<Box<dyn rusqlite::types::ToSql>>::new(),
@@ -164,12 +171,16 @@ fn collect_tl_created_events(
TimelineFilter::Issue(_) => unreachable!(),
};
let limit_param = params.len() + 1;
let sql = format!(
"SELECT mr.created_at, mr.iid, mr.title, mr.author_username, mr.project_id, p.path_with_namespace
FROM merge_requests mr
JOIN projects p ON p.id = mr.project_id
WHERE {where_clause}"
WHERE {where_clause}
ORDER BY mr.created_at DESC
LIMIT ?{limit_param}"
);
params.push(Box::new(limit as i64));
let mut stmt = conn.prepare(&sql).context("preparing MR created query")?;
let param_refs: Vec<&dyn rusqlite::types::ToSql> =
@@ -252,9 +263,11 @@ fn resolve_event_entity(
fn collect_tl_state_events(
conn: &Connection,
filter: &TimelineFilter,
limit: usize,
events: &mut Vec<TimelineEvent>,
) -> Result<()> {
let (where_clause, params) = resource_event_where(filter);
let (where_clause, mut params) = resource_event_where(filter);
let limit_param = params.len() + 1;
let sql = format!(
"SELECT e.created_at, e.state, e.actor_username,
@@ -266,8 +279,11 @@ fn collect_tl_state_events(
LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id
LEFT JOIN projects pi ON pi.id = i.project_id
LEFT JOIN projects pm ON pm.id = mr.project_id
WHERE {where_clause}"
WHERE {where_clause}
ORDER BY e.created_at DESC
LIMIT ?{limit_param}"
);
params.push(Box::new(limit as i64));
let mut stmt = conn.prepare(&sql).context("preparing state events query")?;
let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(AsRef::as_ref).collect();
@@ -338,9 +354,11 @@ fn collect_tl_state_events(
fn collect_tl_label_events(
conn: &Connection,
filter: &TimelineFilter,
limit: usize,
events: &mut Vec<TimelineEvent>,
) -> Result<()> {
let (where_clause, params) = resource_event_where(filter);
let (where_clause, mut params) = resource_event_where(filter);
let limit_param = params.len() + 1;
let sql = format!(
"SELECT e.created_at, e.action, e.label_name, e.actor_username,
@@ -352,8 +370,11 @@ fn collect_tl_label_events(
LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id
LEFT JOIN projects pi ON pi.id = i.project_id
LEFT JOIN projects pm ON pm.id = mr.project_id
WHERE {where_clause}"
WHERE {where_clause}
ORDER BY e.created_at DESC
LIMIT ?{limit_param}"
);
params.push(Box::new(limit as i64));
let mut stmt = conn.prepare(&sql).context("preparing label events query")?;
let param_refs: Vec<&dyn rusqlite::types::ToSql> = params.iter().map(AsRef::as_ref).collect();
@@ -426,9 +447,11 @@ fn collect_tl_label_events(
fn collect_tl_milestone_events(
conn: &Connection,
filter: &TimelineFilter,
limit: usize,
events: &mut Vec<TimelineEvent>,
) -> Result<()> {
let (where_clause, params) = resource_event_where(filter);
let (where_clause, mut params) = resource_event_where(filter);
let limit_param = params.len() + 1;
let sql = format!(
"SELECT e.created_at, e.action, e.milestone_title, e.actor_username,
@@ -440,8 +463,11 @@ fn collect_tl_milestone_events(
LEFT JOIN merge_requests mr ON mr.id = e.merge_request_id
LEFT JOIN projects pi ON pi.id = i.project_id
LEFT JOIN projects pm ON pm.id = mr.project_id
WHERE {where_clause}"
WHERE {where_clause}
ORDER BY e.created_at DESC
LIMIT ?{limit_param}"
);
params.push(Box::new(limit as i64));
let mut stmt = conn
.prepare(&sql)

View File

@@ -38,20 +38,18 @@ pub fn fetch_trace(
/// Returns distinct `new_path` values scoped to the given project (or all
/// projects if `None`), sorted alphabetically.
pub fn fetch_known_paths(conn: &Connection, project_id: Option<i64>) -> Result<Vec<String>> {
let mut paths = if let Some(pid) = project_id {
let paths = if let Some(pid) = project_id {
let mut stmt = conn.prepare(
"SELECT DISTINCT new_path FROM mr_file_changes WHERE project_id = ?1 ORDER BY new_path",
)?;
let rows = stmt.query_map([pid], |row| row.get::<_, String>(0))?;
rows.filter_map(Result::ok).collect::<Vec<_>>()
rows.collect::<std::result::Result<Vec<_>, _>>()?
} else {
let mut stmt =
conn.prepare("SELECT DISTINCT new_path FROM mr_file_changes ORDER BY new_path")?;
let rows = stmt.query_map([], |row| row.get::<_, String>(0))?;
rows.filter_map(Result::ok).collect::<Vec<_>>()
rows.collect::<std::result::Result<Vec<_>, _>>()?
};
paths.sort();
paths.dedup();
Ok(paths)
}