- Add SourceType::Note with extract_note_document() and ParentMetadataCache - Migration 022: composite indexes for notes queries + author_id column - Migration 024: table rebuild adding 'note' to CHECK constraints, defense triggers - Migration 025: backfill existing non-system notes into dirty queue - Add lore notes CLI command with 17 filter options (author, path, resolution, etc.) - Support table/json/jsonl/csv output formats with field selection - Wire note dirty tracking through discussion and MR discussion ingestion - Fix test_migration_024_preserves_existing_data off-by-one (tested wrong migration) - Fix upsert_document_inner returning false for label/path-only changes
967 lines
32 KiB
Rust
967 lines
32 KiB
Rust
use rusqlite::Connection;
|
|
use sqlite_vec::sqlite3_vec_init;
|
|
use std::fs;
|
|
use std::path::Path;
|
|
use tracing::{debug, info};
|
|
|
|
use super::error::{LoreError, Result};
|
|
|
|
pub const LATEST_SCHEMA_VERSION: i32 = MIGRATIONS.len() as i32;
|
|
|
|
const MIGRATIONS: &[(&str, &str)] = &[
|
|
("001", include_str!("../../migrations/001_initial.sql")),
|
|
("002", include_str!("../../migrations/002_issues.sql")),
|
|
("003", include_str!("../../migrations/003_indexes.sql")),
|
|
(
|
|
"004",
|
|
include_str!("../../migrations/004_discussions_payload.sql"),
|
|
),
|
|
(
|
|
"005",
|
|
include_str!("../../migrations/005_assignees_milestone_duedate.sql"),
|
|
),
|
|
(
|
|
"006",
|
|
include_str!("../../migrations/006_merge_requests.sql"),
|
|
),
|
|
("007", include_str!("../../migrations/007_documents.sql")),
|
|
("008", include_str!("../../migrations/008_fts5.sql")),
|
|
("009", include_str!("../../migrations/009_embeddings.sql")),
|
|
("010", include_str!("../../migrations/010_chunk_config.sql")),
|
|
(
|
|
"011",
|
|
include_str!("../../migrations/011_resource_events.sql"),
|
|
),
|
|
(
|
|
"012",
|
|
include_str!("../../migrations/012_nullable_label_milestone.sql"),
|
|
),
|
|
(
|
|
"013",
|
|
include_str!("../../migrations/013_resource_event_watermarks.sql"),
|
|
),
|
|
(
|
|
"014",
|
|
include_str!("../../migrations/014_sync_runs_enrichment.sql"),
|
|
),
|
|
(
|
|
"015",
|
|
include_str!("../../migrations/015_commit_shas_and_closes_watermark.sql"),
|
|
),
|
|
(
|
|
"016",
|
|
include_str!("../../migrations/016_mr_file_changes.sql"),
|
|
),
|
|
("017", include_str!("../../migrations/017_who_indexes.sql")),
|
|
(
|
|
"018",
|
|
include_str!("../../migrations/018_fix_assignees_composite_index.sql"),
|
|
),
|
|
(
|
|
"019",
|
|
include_str!("../../migrations/019_list_performance.sql"),
|
|
),
|
|
(
|
|
"020",
|
|
include_str!("../../migrations/020_mr_diffs_watermark.sql"),
|
|
),
|
|
(
|
|
"021",
|
|
include_str!("../../migrations/021_work_item_status.sql"),
|
|
),
|
|
(
|
|
"022",
|
|
include_str!("../../migrations/022_notes_query_index.sql"),
|
|
),
|
|
(
|
|
"023",
|
|
include_str!("../../migrations/023_issue_detail_fields.sql"),
|
|
),
|
|
(
|
|
"024",
|
|
include_str!("../../migrations/024_note_documents.sql"),
|
|
),
|
|
(
|
|
"025",
|
|
include_str!("../../migrations/025_note_dirty_backfill.sql"),
|
|
),
|
|
];
|
|
|
|
pub fn create_connection(db_path: &Path) -> Result<Connection> {
|
|
// SAFETY: `sqlite3_vec_init` is an extern "C" function provided by the sqlite-vec
|
|
// crate with the exact signature expected by `sqlite3_auto_extension`. The transmute
|
|
// converts the concrete function pointer to the `Option<unsafe extern "C" fn()>` type
|
|
// that the FFI expects. This is safe because:
|
|
// 1. The function is a C-ABI init callback with a stable signature.
|
|
// 2. SQLite calls it once per new connection, matching sqlite-vec's contract.
|
|
// 3. `sqlite3_auto_extension` is idempotent for the same function pointer.
|
|
#[allow(clippy::missing_transmute_annotations)]
|
|
unsafe {
|
|
rusqlite::ffi::sqlite3_auto_extension(Some(std::mem::transmute(
|
|
sqlite3_vec_init as *const (),
|
|
)));
|
|
}
|
|
|
|
if let Some(parent) = db_path.parent() {
|
|
fs::create_dir_all(parent)?;
|
|
}
|
|
|
|
let conn = Connection::open(db_path)?;
|
|
|
|
conn.pragma_update(None, "journal_mode", "WAL")?;
|
|
conn.pragma_update(None, "synchronous", "NORMAL")?;
|
|
conn.pragma_update(None, "foreign_keys", "ON")?;
|
|
conn.pragma_update(None, "busy_timeout", 5000)?;
|
|
conn.pragma_update(None, "temp_store", "MEMORY")?;
|
|
conn.pragma_update(None, "cache_size", -64000)?;
|
|
conn.pragma_update(None, "mmap_size", 268_435_456)?;
|
|
|
|
debug!(db_path = %db_path.display(), "Database connection created");
|
|
|
|
Ok(conn)
|
|
}
|
|
|
|
pub fn run_migrations(conn: &Connection) -> Result<()> {
|
|
let has_version_table: bool = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) > 0 FROM sqlite_master WHERE type='table' AND name='schema_version'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap_or(false);
|
|
|
|
let current_version: i32 = if has_version_table {
|
|
conn.query_row(
|
|
"SELECT COALESCE(MAX(version), 0) FROM schema_version",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap_or(0)
|
|
} else {
|
|
0
|
|
};
|
|
|
|
for (version_str, sql) in MIGRATIONS {
|
|
let version: i32 = version_str.parse().expect("Invalid migration version");
|
|
|
|
if version <= current_version {
|
|
debug!(version, "Migration already applied");
|
|
continue;
|
|
}
|
|
|
|
let savepoint_name = format!("migration_{}", version);
|
|
conn.execute_batch(&format!("SAVEPOINT {}", savepoint_name))
|
|
.map_err(|e| LoreError::MigrationFailed {
|
|
version,
|
|
message: format!("Failed to create savepoint: {}", e),
|
|
source: Some(e),
|
|
})?;
|
|
|
|
match conn.execute_batch(sql) {
|
|
Ok(()) => {
|
|
// Framework-managed version bookkeeping: ensures the version is
|
|
// always recorded even if a migration .sql omits the INSERT.
|
|
// Uses OR REPLACE so legacy migrations that self-register are harmless.
|
|
conn.execute(
|
|
"INSERT OR REPLACE INTO schema_version (version, applied_at, description) \
|
|
VALUES (?1, strftime('%s', 'now') * 1000, ?2)",
|
|
rusqlite::params![version, version_str],
|
|
)
|
|
.map_err(|e| LoreError::MigrationFailed {
|
|
version,
|
|
message: format!("Failed to record schema version: {e}"),
|
|
source: Some(e),
|
|
})?;
|
|
|
|
conn.execute_batch(&format!("RELEASE {}", savepoint_name))
|
|
.map_err(|e| LoreError::MigrationFailed {
|
|
version,
|
|
message: format!("Failed to release savepoint: {}", e),
|
|
source: Some(e),
|
|
})?;
|
|
}
|
|
Err(e) => {
|
|
let _ = conn.execute_batch(&format!("ROLLBACK TO {}", savepoint_name));
|
|
let _ = conn.execute_batch(&format!("RELEASE {}", savepoint_name));
|
|
return Err(LoreError::MigrationFailed {
|
|
version,
|
|
message: e.to_string(),
|
|
source: Some(e),
|
|
});
|
|
}
|
|
}
|
|
|
|
info!(version, "Migration applied");
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
#[allow(dead_code)]
|
|
pub fn run_migrations_from_dir(conn: &Connection, migrations_dir: &Path) -> Result<()> {
|
|
let has_version_table: bool = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) > 0 FROM sqlite_master WHERE type='table' AND name='schema_version'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap_or(false);
|
|
|
|
let current_version: i32 = if has_version_table {
|
|
conn.query_row(
|
|
"SELECT COALESCE(MAX(version), 0) FROM schema_version",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap_or(0)
|
|
} else {
|
|
0
|
|
};
|
|
|
|
let mut migrations: Vec<_> = fs::read_dir(migrations_dir)?
|
|
.filter_map(|entry| entry.ok())
|
|
.filter(|entry| entry.path().extension().is_some_and(|ext| ext == "sql"))
|
|
.collect();
|
|
|
|
migrations.sort_by_key(|entry| entry.file_name());
|
|
|
|
for entry in migrations {
|
|
let filename = entry.file_name();
|
|
let filename_str = filename.to_string_lossy();
|
|
|
|
let version: i32 = match filename_str.split('_').next().and_then(|v| v.parse().ok()) {
|
|
Some(v) => v,
|
|
None => continue,
|
|
};
|
|
|
|
if version <= current_version {
|
|
continue;
|
|
}
|
|
|
|
let sql = fs::read_to_string(entry.path())?;
|
|
|
|
let savepoint_name = format!("migration_{}", version);
|
|
conn.execute_batch(&format!("SAVEPOINT {}", savepoint_name))
|
|
.map_err(|e| LoreError::MigrationFailed {
|
|
version,
|
|
message: format!("Failed to create savepoint: {}", e),
|
|
source: Some(e),
|
|
})?;
|
|
|
|
match conn.execute_batch(&sql) {
|
|
Ok(()) => {
|
|
conn.execute_batch(&format!("RELEASE {}", savepoint_name))
|
|
.map_err(|e| LoreError::MigrationFailed {
|
|
version,
|
|
message: format!("Failed to release savepoint: {}", e),
|
|
source: Some(e),
|
|
})?;
|
|
}
|
|
Err(e) => {
|
|
let _ = conn.execute_batch(&format!("ROLLBACK TO {}", savepoint_name));
|
|
let _ = conn.execute_batch(&format!("RELEASE {}", savepoint_name));
|
|
return Err(LoreError::MigrationFailed {
|
|
version,
|
|
message: e.to_string(),
|
|
source: Some(e),
|
|
});
|
|
}
|
|
}
|
|
|
|
info!(version, file = %filename_str, "Migration applied");
|
|
}
|
|
|
|
Ok(())
|
|
}
|
|
|
|
pub fn verify_pragmas(conn: &Connection) -> (bool, Vec<String>) {
|
|
let mut issues = Vec::new();
|
|
|
|
let journal_mode: String = conn
|
|
.pragma_query_value(None, "journal_mode", |row| row.get(0))
|
|
.unwrap_or_default();
|
|
if journal_mode != "wal" {
|
|
issues.push(format!("journal_mode is {journal_mode}, expected 'wal'"));
|
|
}
|
|
|
|
let foreign_keys: i32 = conn
|
|
.pragma_query_value(None, "foreign_keys", |row| row.get(0))
|
|
.unwrap_or(0);
|
|
if foreign_keys != 1 {
|
|
issues.push(format!("foreign_keys is {foreign_keys}, expected 1"));
|
|
}
|
|
|
|
let busy_timeout: i32 = conn
|
|
.pragma_query_value(None, "busy_timeout", |row| row.get(0))
|
|
.unwrap_or(0);
|
|
if busy_timeout != 5000 {
|
|
issues.push(format!("busy_timeout is {busy_timeout}, expected 5000"));
|
|
}
|
|
|
|
let synchronous: i32 = conn
|
|
.pragma_query_value(None, "synchronous", |row| row.get(0))
|
|
.unwrap_or(0);
|
|
if synchronous != 1 {
|
|
issues.push(format!("synchronous is {synchronous}, expected 1 (NORMAL)"));
|
|
}
|
|
|
|
(issues.is_empty(), issues)
|
|
}
|
|
|
|
pub fn get_schema_version(conn: &Connection) -> i32 {
|
|
let has_version_table: bool = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) > 0 FROM sqlite_master WHERE type='table' AND name='schema_version'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap_or(false);
|
|
|
|
if !has_version_table {
|
|
return 0;
|
|
}
|
|
|
|
conn.query_row(
|
|
"SELECT COALESCE(MAX(version), 0) FROM schema_version",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap_or(0)
|
|
}
|
|
|
|
#[cfg(test)]
|
|
mod tests {
|
|
use super::*;
|
|
|
|
fn setup_migrated_db() -> Connection {
|
|
let conn = create_connection(Path::new(":memory:")).unwrap();
|
|
run_migrations(&conn).unwrap();
|
|
conn
|
|
}
|
|
|
|
fn index_exists(conn: &Connection, index_name: &str) -> bool {
|
|
conn.query_row(
|
|
"SELECT COUNT(*) > 0 FROM sqlite_master WHERE type='index' AND name=?1",
|
|
[index_name],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap_or(false)
|
|
}
|
|
|
|
fn column_exists(conn: &Connection, table: &str, column: &str) -> bool {
|
|
let sql = format!("PRAGMA table_info({})", table);
|
|
let mut stmt = conn.prepare(&sql).unwrap();
|
|
let columns: Vec<String> = stmt
|
|
.query_map([], |row| row.get::<_, String>(1))
|
|
.unwrap()
|
|
.filter_map(|r| r.ok())
|
|
.collect();
|
|
columns.contains(&column.to_string())
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_022_indexes_exist() {
|
|
let conn = setup_migrated_db();
|
|
|
|
// New indexes from migration 022
|
|
assert!(
|
|
index_exists(&conn, "idx_notes_user_created"),
|
|
"idx_notes_user_created should exist"
|
|
);
|
|
assert!(
|
|
index_exists(&conn, "idx_notes_project_created"),
|
|
"idx_notes_project_created should exist"
|
|
);
|
|
assert!(
|
|
index_exists(&conn, "idx_notes_author_id"),
|
|
"idx_notes_author_id should exist"
|
|
);
|
|
|
|
// Discussion JOIN indexes (idx_discussions_issue_id is new;
|
|
// idx_discussions_mr_id already existed from migration 006 but
|
|
// IF NOT EXISTS makes it safe)
|
|
assert!(
|
|
index_exists(&conn, "idx_discussions_issue_id"),
|
|
"idx_discussions_issue_id should exist"
|
|
);
|
|
assert!(
|
|
index_exists(&conn, "idx_discussions_mr_id"),
|
|
"idx_discussions_mr_id should exist"
|
|
);
|
|
|
|
// author_id column on notes
|
|
assert!(
|
|
column_exists(&conn, "notes", "author_id"),
|
|
"notes.author_id column should exist"
|
|
);
|
|
}
|
|
|
|
// -- Helper: insert a minimal project for FK satisfaction --
|
|
fn insert_test_project(conn: &Connection) -> i64 {
|
|
conn.execute(
|
|
"INSERT INTO projects (gitlab_project_id, path_with_namespace, web_url) \
|
|
VALUES (1000, 'test/project', 'https://example.com/test/project')",
|
|
[],
|
|
)
|
|
.unwrap();
|
|
conn.last_insert_rowid()
|
|
}
|
|
|
|
// -- Helper: insert a minimal issue --
|
|
fn insert_test_issue(conn: &Connection, project_id: i64) -> i64 {
|
|
conn.execute(
|
|
"INSERT INTO issues (gitlab_id, project_id, iid, state, author_username, \
|
|
created_at, updated_at, last_seen_at) \
|
|
VALUES (100, ?1, 1, 'opened', 'alice', 1000, 1000, 1000)",
|
|
[project_id],
|
|
)
|
|
.unwrap();
|
|
conn.last_insert_rowid()
|
|
}
|
|
|
|
// -- Helper: insert a minimal discussion --
|
|
fn insert_test_discussion(conn: &Connection, project_id: i64, issue_id: i64) -> i64 {
|
|
conn.execute(
|
|
"INSERT INTO discussions (gitlab_discussion_id, project_id, issue_id, \
|
|
noteable_type, last_seen_at) \
|
|
VALUES ('disc-001', ?1, ?2, 'Issue', 1000)",
|
|
rusqlite::params![project_id, issue_id],
|
|
)
|
|
.unwrap();
|
|
conn.last_insert_rowid()
|
|
}
|
|
|
|
// -- Helper: insert a minimal non-system note --
|
|
#[allow(clippy::too_many_arguments)]
|
|
fn insert_test_note(
|
|
conn: &Connection,
|
|
gitlab_id: i64,
|
|
discussion_id: i64,
|
|
project_id: i64,
|
|
is_system: bool,
|
|
) -> i64 {
|
|
conn.execute(
|
|
"INSERT INTO notes (gitlab_id, discussion_id, project_id, is_system, \
|
|
author_username, body, created_at, updated_at, last_seen_at) \
|
|
VALUES (?1, ?2, ?3, ?4, 'alice', 'note body', 1000, 1000, 1000)",
|
|
rusqlite::params![gitlab_id, discussion_id, project_id, is_system as i32],
|
|
)
|
|
.unwrap();
|
|
conn.last_insert_rowid()
|
|
}
|
|
|
|
// -- Helper: insert a document --
|
|
fn insert_test_document(
|
|
conn: &Connection,
|
|
source_type: &str,
|
|
source_id: i64,
|
|
project_id: i64,
|
|
) -> i64 {
|
|
conn.execute(
|
|
"INSERT INTO documents (source_type, source_id, project_id, content_text, content_hash) \
|
|
VALUES (?1, ?2, ?3, 'test content', 'hash123')",
|
|
rusqlite::params![source_type, source_id, project_id],
|
|
)
|
|
.unwrap();
|
|
conn.last_insert_rowid()
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_allows_note_source_type() {
|
|
let conn = setup_migrated_db();
|
|
let pid = insert_test_project(&conn);
|
|
|
|
// Should succeed — 'note' is now allowed
|
|
conn.execute(
|
|
"INSERT INTO documents (source_type, source_id, project_id, content_text, content_hash) \
|
|
VALUES ('note', 1, ?1, 'note content', 'hash-note')",
|
|
[pid],
|
|
)
|
|
.expect("INSERT with source_type='note' into documents should succeed");
|
|
|
|
// dirty_sources should also accept 'note'
|
|
conn.execute(
|
|
"INSERT INTO dirty_sources (source_type, source_id, queued_at) \
|
|
VALUES ('note', 1, 1000)",
|
|
[],
|
|
)
|
|
.expect("INSERT with source_type='note' into dirty_sources should succeed");
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_preserves_existing_data() {
|
|
// Run migrations up to 023 only, insert data, then apply 024
|
|
// Migration 024 is at index 23 (0-based). Use hardcoded index so adding
|
|
// later migrations doesn't silently shift what this test exercises.
|
|
let conn = create_connection(Path::new(":memory:")).unwrap();
|
|
|
|
// Apply migrations 001-023 (indices 0..23)
|
|
run_migrations_up_to(&conn, 23);
|
|
|
|
let pid = insert_test_project(&conn);
|
|
|
|
// Insert a document with existing source_type
|
|
conn.execute(
|
|
"INSERT INTO documents (source_type, source_id, project_id, content_text, content_hash, title) \
|
|
VALUES ('issue', 1, ?1, 'issue content', 'hash-issue', 'Test Issue')",
|
|
[pid],
|
|
)
|
|
.unwrap();
|
|
let doc_id: i64 = conn.last_insert_rowid();
|
|
|
|
// Insert junction data
|
|
conn.execute(
|
|
"INSERT INTO document_labels (document_id, label_name) VALUES (?1, 'bug')",
|
|
[doc_id],
|
|
)
|
|
.unwrap();
|
|
conn.execute(
|
|
"INSERT INTO document_paths (document_id, path) VALUES (?1, 'src/main.rs')",
|
|
[doc_id],
|
|
)
|
|
.unwrap();
|
|
|
|
// Insert dirty_sources row
|
|
conn.execute(
|
|
"INSERT INTO dirty_sources (source_type, source_id, queued_at) VALUES ('issue', 1, 1000)",
|
|
[],
|
|
)
|
|
.unwrap();
|
|
|
|
// Now apply migration 024 (index 23) — the table-rebuild migration
|
|
run_single_migration(&conn, 23);
|
|
|
|
// Verify document still exists with correct data
|
|
let (st, content, title): (String, String, String) = conn
|
|
.query_row(
|
|
"SELECT source_type, content_text, title FROM documents WHERE id = ?1",
|
|
[doc_id],
|
|
|row| Ok((row.get(0)?, row.get(1)?, row.get(2)?)),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(st, "issue");
|
|
assert_eq!(content, "issue content");
|
|
assert_eq!(title, "Test Issue");
|
|
|
|
// Verify junction data preserved
|
|
let label_count: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM document_labels WHERE document_id = ?1",
|
|
[doc_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(label_count, 1);
|
|
|
|
let path_count: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM document_paths WHERE document_id = ?1",
|
|
[doc_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(path_count, 1);
|
|
|
|
// Verify dirty_sources preserved
|
|
let dirty_count: i64 = conn
|
|
.query_row("SELECT COUNT(*) FROM dirty_sources", [], |row| row.get(0))
|
|
.unwrap();
|
|
assert_eq!(dirty_count, 1);
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_fts_triggers_intact() {
|
|
let conn = setup_migrated_db();
|
|
let pid = insert_test_project(&conn);
|
|
|
|
// Insert a document after migration — FTS trigger should fire
|
|
let doc_id = insert_test_document(&conn, "note", 1, pid);
|
|
|
|
// Verify FTS entry exists
|
|
let fts_count: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents_fts WHERE documents_fts MATCH 'test'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert!(fts_count > 0, "FTS trigger should have created an entry");
|
|
|
|
// Verify update trigger works
|
|
conn.execute(
|
|
"UPDATE documents SET content_text = 'updated content' WHERE id = ?1",
|
|
[doc_id],
|
|
)
|
|
.unwrap();
|
|
|
|
let fts_updated: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents_fts WHERE documents_fts MATCH 'updated'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert!(
|
|
fts_updated > 0,
|
|
"FTS update trigger should reflect new content"
|
|
);
|
|
|
|
// Verify delete trigger works
|
|
conn.execute("DELETE FROM documents WHERE id = ?1", [doc_id])
|
|
.unwrap();
|
|
|
|
let fts_after_delete: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents_fts WHERE documents_fts MATCH 'updated'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
fts_after_delete, 0,
|
|
"FTS delete trigger should remove the entry"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_row_counts_preserved() {
|
|
let conn = setup_migrated_db();
|
|
|
|
// After full migration, tables should exist and be queryable
|
|
let doc_count: i64 = conn
|
|
.query_row("SELECT COUNT(*) FROM documents", [], |row| row.get(0))
|
|
.unwrap();
|
|
assert_eq!(doc_count, 0, "Fresh DB should have 0 documents");
|
|
|
|
let dirty_count: i64 = conn
|
|
.query_row("SELECT COUNT(*) FROM dirty_sources", [], |row| row.get(0))
|
|
.unwrap();
|
|
assert_eq!(dirty_count, 0, "Fresh DB should have 0 dirty_sources");
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_integrity_checks_pass() {
|
|
let conn = setup_migrated_db();
|
|
|
|
// PRAGMA integrity_check
|
|
let integrity: String = conn
|
|
.query_row("PRAGMA integrity_check", [], |row| row.get(0))
|
|
.unwrap();
|
|
assert_eq!(integrity, "ok", "Database integrity check should pass");
|
|
|
|
// PRAGMA foreign_key_check (returns rows only if there are violations)
|
|
let fk_violations: i64 = conn
|
|
.query_row("SELECT COUNT(*) FROM pragma_foreign_key_check", [], |row| {
|
|
row.get(0)
|
|
})
|
|
.unwrap();
|
|
assert_eq!(fk_violations, 0, "No foreign key violations should exist");
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_note_delete_trigger_cleans_document() {
|
|
let conn = setup_migrated_db();
|
|
let pid = insert_test_project(&conn);
|
|
let issue_id = insert_test_issue(&conn, pid);
|
|
let disc_id = insert_test_discussion(&conn, pid, issue_id);
|
|
let note_id = insert_test_note(&conn, 200, disc_id, pid, false);
|
|
|
|
// Create a document for this note
|
|
insert_test_document(&conn, "note", note_id, pid);
|
|
|
|
let doc_before: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents WHERE source_type = 'note' AND source_id = ?1",
|
|
[note_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(doc_before, 1);
|
|
|
|
// Delete the note — trigger should remove the document
|
|
conn.execute("DELETE FROM notes WHERE id = ?1", [note_id])
|
|
.unwrap();
|
|
|
|
let doc_after: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents WHERE source_type = 'note' AND source_id = ?1",
|
|
[note_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
doc_after, 0,
|
|
"notes_ad_cleanup trigger should delete the document"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_note_system_flip_trigger_cleans_document() {
|
|
let conn = setup_migrated_db();
|
|
let pid = insert_test_project(&conn);
|
|
let issue_id = insert_test_issue(&conn, pid);
|
|
let disc_id = insert_test_discussion(&conn, pid, issue_id);
|
|
let note_id = insert_test_note(&conn, 201, disc_id, pid, false);
|
|
|
|
// Create a document for this note
|
|
insert_test_document(&conn, "note", note_id, pid);
|
|
|
|
let doc_before: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents WHERE source_type = 'note' AND source_id = ?1",
|
|
[note_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(doc_before, 1);
|
|
|
|
// Flip is_system from 0 to 1 — trigger should remove the document
|
|
conn.execute("UPDATE notes SET is_system = 1 WHERE id = ?1", [note_id])
|
|
.unwrap();
|
|
|
|
let doc_after: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents WHERE source_type = 'note' AND source_id = ?1",
|
|
[note_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
doc_after, 0,
|
|
"notes_au_system_cleanup trigger should delete the document"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_system_note_delete_trigger_does_not_fire() {
|
|
let conn = setup_migrated_db();
|
|
let pid = insert_test_project(&conn);
|
|
let issue_id = insert_test_issue(&conn, pid);
|
|
let disc_id = insert_test_discussion(&conn, pid, issue_id);
|
|
|
|
// Insert a system note (is_system = true)
|
|
let note_id = insert_test_note(&conn, 202, disc_id, pid, true);
|
|
|
|
// Manually insert a document (shouldn't exist for system notes in practice,
|
|
// but we test the trigger guard)
|
|
insert_test_document(&conn, "note", note_id, pid);
|
|
|
|
let doc_before: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents WHERE source_type = 'note' AND source_id = ?1",
|
|
[note_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(doc_before, 1);
|
|
|
|
// Delete system note — trigger has WHEN old.is_system = 0 so it should NOT fire
|
|
conn.execute("DELETE FROM notes WHERE id = ?1", [note_id])
|
|
.unwrap();
|
|
|
|
let doc_after: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents WHERE source_type = 'note' AND source_id = ?1",
|
|
[note_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
doc_after, 1,
|
|
"notes_ad_cleanup trigger should NOT fire for system notes"
|
|
);
|
|
}
|
|
|
|
/// Run migrations only up to version `up_to` (inclusive).
|
|
fn run_migrations_up_to(conn: &Connection, up_to: usize) {
|
|
conn.execute_batch(
|
|
"CREATE TABLE IF NOT EXISTS schema_version ( \
|
|
version INTEGER PRIMARY KEY, applied_at INTEGER NOT NULL, description TEXT);",
|
|
)
|
|
.unwrap();
|
|
|
|
for (version_str, sql) in &MIGRATIONS[..up_to] {
|
|
let version: i32 = version_str.parse().unwrap();
|
|
conn.execute_batch(sql).unwrap();
|
|
conn.execute(
|
|
"INSERT OR REPLACE INTO schema_version (version, applied_at, description) \
|
|
VALUES (?1, strftime('%s', 'now') * 1000, ?2)",
|
|
rusqlite::params![version, version_str],
|
|
)
|
|
.unwrap();
|
|
}
|
|
}
|
|
|
|
/// Run a single migration by index (0-based).
|
|
fn run_single_migration(conn: &Connection, index: usize) {
|
|
let (version_str, sql) = MIGRATIONS[index];
|
|
let version: i32 = version_str.parse().unwrap();
|
|
conn.execute_batch(sql).unwrap();
|
|
conn.execute(
|
|
"INSERT OR REPLACE INTO schema_version (version, applied_at, description) \
|
|
VALUES (?1, strftime('%s', 'now') * 1000, ?2)",
|
|
rusqlite::params![version, version_str],
|
|
)
|
|
.unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_025_backfills_existing_notes() {
|
|
let conn = create_connection(Path::new(":memory:")).unwrap();
|
|
// Run all migrations through 024 (index 0..24)
|
|
run_migrations_up_to(&conn, 24);
|
|
|
|
let pid = insert_test_project(&conn);
|
|
let issue_id = insert_test_issue(&conn, pid);
|
|
let disc_id = insert_test_discussion(&conn, pid, issue_id);
|
|
|
|
// Insert 5 non-system notes
|
|
for i in 1..=5 {
|
|
insert_test_note(&conn, 300 + i, disc_id, pid, false);
|
|
}
|
|
// Insert 2 system notes
|
|
for i in 1..=2 {
|
|
insert_test_note(&conn, 400 + i, disc_id, pid, true);
|
|
}
|
|
|
|
// Run migration 025
|
|
run_single_migration(&conn, 24);
|
|
|
|
let dirty_count: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM dirty_sources WHERE source_type = 'note'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
dirty_count, 5,
|
|
"Migration 025 should backfill 5 non-system notes"
|
|
);
|
|
|
|
// Verify system notes were not backfilled
|
|
let system_note_ids: Vec<i64> = {
|
|
let mut stmt = conn
|
|
.prepare(
|
|
"SELECT source_id FROM dirty_sources WHERE source_type = 'note' ORDER BY source_id",
|
|
)
|
|
.unwrap();
|
|
stmt.query_map([], |row| row.get(0))
|
|
.unwrap()
|
|
.collect::<std::result::Result<Vec<_>, _>>()
|
|
.unwrap()
|
|
};
|
|
// System note ids should not appear
|
|
let all_system_note_ids: Vec<i64> = {
|
|
let mut stmt = conn
|
|
.prepare("SELECT id FROM notes WHERE is_system = 1 ORDER BY id")
|
|
.unwrap();
|
|
stmt.query_map([], |row| row.get(0))
|
|
.unwrap()
|
|
.collect::<std::result::Result<Vec<_>, _>>()
|
|
.unwrap()
|
|
};
|
|
for sys_id in &all_system_note_ids {
|
|
assert!(
|
|
!system_note_ids.contains(sys_id),
|
|
"System note id {} should not be in dirty_sources",
|
|
sys_id
|
|
);
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_025_idempotent_with_existing_documents() {
|
|
let conn = create_connection(Path::new(":memory:")).unwrap();
|
|
run_migrations_up_to(&conn, 24);
|
|
|
|
let pid = insert_test_project(&conn);
|
|
let issue_id = insert_test_issue(&conn, pid);
|
|
let disc_id = insert_test_discussion(&conn, pid, issue_id);
|
|
|
|
// Insert 3 non-system notes
|
|
let note_ids: Vec<i64> = (1..=3)
|
|
.map(|i| insert_test_note(&conn, 500 + i, disc_id, pid, false))
|
|
.collect();
|
|
|
|
// Create documents for 2 of 3 notes (simulating already-generated docs)
|
|
insert_test_document(&conn, "note", note_ids[0], pid);
|
|
insert_test_document(&conn, "note", note_ids[1], pid);
|
|
|
|
// Run migration 025
|
|
run_single_migration(&conn, 24);
|
|
|
|
let dirty_count: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM dirty_sources WHERE source_type = 'note'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
dirty_count, 1,
|
|
"Only the note without a document should be backfilled"
|
|
);
|
|
|
|
// Verify the correct note was queued
|
|
let queued_id: i64 = conn
|
|
.query_row(
|
|
"SELECT source_id FROM dirty_sources WHERE source_type = 'note'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(queued_id, note_ids[2]);
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_025_skips_notes_already_in_dirty_queue() {
|
|
let conn = create_connection(Path::new(":memory:")).unwrap();
|
|
run_migrations_up_to(&conn, 24);
|
|
|
|
let pid = insert_test_project(&conn);
|
|
let issue_id = insert_test_issue(&conn, pid);
|
|
let disc_id = insert_test_discussion(&conn, pid, issue_id);
|
|
|
|
// Insert 3 non-system notes
|
|
let note_ids: Vec<i64> = (1..=3)
|
|
.map(|i| insert_test_note(&conn, 600 + i, disc_id, pid, false))
|
|
.collect();
|
|
|
|
// Pre-queue one note in dirty_sources
|
|
conn.execute(
|
|
"INSERT INTO dirty_sources (source_type, source_id, queued_at) VALUES ('note', ?1, 999)",
|
|
[note_ids[0]],
|
|
)
|
|
.unwrap();
|
|
|
|
// Run migration 025
|
|
run_single_migration(&conn, 24);
|
|
|
|
let dirty_count: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM dirty_sources WHERE source_type = 'note'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
dirty_count, 3,
|
|
"All 3 notes should be in dirty_sources (1 pre-existing + 2 new)"
|
|
);
|
|
|
|
// Verify the pre-existing entry preserved its original queued_at
|
|
let original_queued_at: i64 = conn
|
|
.query_row(
|
|
"SELECT queued_at FROM dirty_sources WHERE source_type = 'note' AND source_id = ?1",
|
|
[note_ids[0]],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
original_queued_at, 999,
|
|
"ON CONFLICT DO NOTHING should preserve the original queued_at"
|
|
);
|
|
}
|
|
}
|