Move inline #[cfg(test)] mod tests { ... } blocks from 22 source files
into dedicated _tests.rs companion files, wired via:
#[cfg(test)]
#[path = "module_tests.rs"]
mod tests;
This keeps implementation-focused source files leaner and more scannable
while preserving full access to private items through `use super::*;`.
Modules extracted:
core: db, note_parser, payloads, project, references, sync_run,
timeline_collect, timeline_expand, timeline_seed
cli: list (55 tests), who (75 tests)
documents: extractor (43 tests), regenerator
embedding: change_detector, chunking
gitlab: graphql (wiremock async tests), transformers/issue
ingestion: dirty_tracker, discussions, issues, mr_diffs
Also adds conflicts_with("explain_score") to the --detail flag in the
who command to prevent mutually exclusive flags from being combined.
All 629 unit tests pass. No behavior changes.
Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
633 lines
19 KiB
Rust
633 lines
19 KiB
Rust
use super::*;
|
|
|
|
fn setup_migrated_db() -> Connection {
|
|
let conn = create_connection(Path::new(":memory:")).unwrap();
|
|
run_migrations(&conn).unwrap();
|
|
conn
|
|
}
|
|
|
|
fn index_exists(conn: &Connection, index_name: &str) -> bool {
|
|
conn.query_row(
|
|
"SELECT COUNT(*) > 0 FROM sqlite_master WHERE type='index' AND name=?1",
|
|
[index_name],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap_or(false)
|
|
}
|
|
|
|
fn column_exists(conn: &Connection, table: &str, column: &str) -> bool {
|
|
let sql = format!("PRAGMA table_info({})", table);
|
|
let mut stmt = conn.prepare(&sql).unwrap();
|
|
let columns: Vec<String> = stmt
|
|
.query_map([], |row| row.get::<_, String>(1))
|
|
.unwrap()
|
|
.filter_map(|r| r.ok())
|
|
.collect();
|
|
columns.contains(&column.to_string())
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_022_indexes_exist() {
|
|
let conn = setup_migrated_db();
|
|
|
|
// New indexes from migration 022
|
|
assert!(
|
|
index_exists(&conn, "idx_notes_user_created"),
|
|
"idx_notes_user_created should exist"
|
|
);
|
|
assert!(
|
|
index_exists(&conn, "idx_notes_project_created"),
|
|
"idx_notes_project_created should exist"
|
|
);
|
|
assert!(
|
|
index_exists(&conn, "idx_notes_author_id"),
|
|
"idx_notes_author_id should exist"
|
|
);
|
|
|
|
// Discussion JOIN indexes (idx_discussions_issue_id is new;
|
|
// idx_discussions_mr_id already existed from migration 006 but
|
|
// IF NOT EXISTS makes it safe)
|
|
assert!(
|
|
index_exists(&conn, "idx_discussions_issue_id"),
|
|
"idx_discussions_issue_id should exist"
|
|
);
|
|
assert!(
|
|
index_exists(&conn, "idx_discussions_mr_id"),
|
|
"idx_discussions_mr_id should exist"
|
|
);
|
|
|
|
// author_id column on notes
|
|
assert!(
|
|
column_exists(&conn, "notes", "author_id"),
|
|
"notes.author_id column should exist"
|
|
);
|
|
}
|
|
|
|
// -- Helper: insert a minimal project for FK satisfaction --
|
|
fn insert_test_project(conn: &Connection) -> i64 {
|
|
conn.execute(
|
|
"INSERT INTO projects (gitlab_project_id, path_with_namespace, web_url) \
|
|
VALUES (1000, 'test/project', 'https://example.com/test/project')",
|
|
[],
|
|
)
|
|
.unwrap();
|
|
conn.last_insert_rowid()
|
|
}
|
|
|
|
// -- Helper: insert a minimal issue --
|
|
fn insert_test_issue(conn: &Connection, project_id: i64) -> i64 {
|
|
conn.execute(
|
|
"INSERT INTO issues (gitlab_id, project_id, iid, state, author_username, \
|
|
created_at, updated_at, last_seen_at) \
|
|
VALUES (100, ?1, 1, 'opened', 'alice', 1000, 1000, 1000)",
|
|
[project_id],
|
|
)
|
|
.unwrap();
|
|
conn.last_insert_rowid()
|
|
}
|
|
|
|
// -- Helper: insert a minimal discussion --
|
|
fn insert_test_discussion(conn: &Connection, project_id: i64, issue_id: i64) -> i64 {
|
|
conn.execute(
|
|
"INSERT INTO discussions (gitlab_discussion_id, project_id, issue_id, \
|
|
noteable_type, last_seen_at) \
|
|
VALUES ('disc-001', ?1, ?2, 'Issue', 1000)",
|
|
rusqlite::params![project_id, issue_id],
|
|
)
|
|
.unwrap();
|
|
conn.last_insert_rowid()
|
|
}
|
|
|
|
// -- Helper: insert a minimal non-system note --
|
|
#[allow(clippy::too_many_arguments)]
|
|
fn insert_test_note(
|
|
conn: &Connection,
|
|
gitlab_id: i64,
|
|
discussion_id: i64,
|
|
project_id: i64,
|
|
is_system: bool,
|
|
) -> i64 {
|
|
conn.execute(
|
|
"INSERT INTO notes (gitlab_id, discussion_id, project_id, is_system, \
|
|
author_username, body, created_at, updated_at, last_seen_at) \
|
|
VALUES (?1, ?2, ?3, ?4, 'alice', 'note body', 1000, 1000, 1000)",
|
|
rusqlite::params![gitlab_id, discussion_id, project_id, is_system as i32],
|
|
)
|
|
.unwrap();
|
|
conn.last_insert_rowid()
|
|
}
|
|
|
|
// -- Helper: insert a document --
|
|
fn insert_test_document(
|
|
conn: &Connection,
|
|
source_type: &str,
|
|
source_id: i64,
|
|
project_id: i64,
|
|
) -> i64 {
|
|
conn.execute(
|
|
"INSERT INTO documents (source_type, source_id, project_id, content_text, content_hash) \
|
|
VALUES (?1, ?2, ?3, 'test content', 'hash123')",
|
|
rusqlite::params![source_type, source_id, project_id],
|
|
)
|
|
.unwrap();
|
|
conn.last_insert_rowid()
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_allows_note_source_type() {
|
|
let conn = setup_migrated_db();
|
|
let pid = insert_test_project(&conn);
|
|
|
|
// Should succeed -- 'note' is now allowed
|
|
conn.execute(
|
|
"INSERT INTO documents (source_type, source_id, project_id, content_text, content_hash) \
|
|
VALUES ('note', 1, ?1, 'note content', 'hash-note')",
|
|
[pid],
|
|
)
|
|
.expect("INSERT with source_type='note' into documents should succeed");
|
|
|
|
// dirty_sources should also accept 'note'
|
|
conn.execute(
|
|
"INSERT INTO dirty_sources (source_type, source_id, queued_at) \
|
|
VALUES ('note', 1, 1000)",
|
|
[],
|
|
)
|
|
.expect("INSERT with source_type='note' into dirty_sources should succeed");
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_preserves_existing_data() {
|
|
// Run migrations up to 023 only, insert data, then apply 024
|
|
// Migration 024 is at index 23 (0-based). Use hardcoded index so adding
|
|
// later migrations doesn't silently shift what this test exercises.
|
|
let conn = create_connection(Path::new(":memory:")).unwrap();
|
|
|
|
// Apply migrations 001-023 (indices 0..23)
|
|
run_migrations_up_to(&conn, 23);
|
|
|
|
let pid = insert_test_project(&conn);
|
|
|
|
// Insert a document with existing source_type
|
|
conn.execute(
|
|
"INSERT INTO documents (source_type, source_id, project_id, content_text, content_hash, title) \
|
|
VALUES ('issue', 1, ?1, 'issue content', 'hash-issue', 'Test Issue')",
|
|
[pid],
|
|
)
|
|
.unwrap();
|
|
let doc_id: i64 = conn.last_insert_rowid();
|
|
|
|
// Insert junction data
|
|
conn.execute(
|
|
"INSERT INTO document_labels (document_id, label_name) VALUES (?1, 'bug')",
|
|
[doc_id],
|
|
)
|
|
.unwrap();
|
|
conn.execute(
|
|
"INSERT INTO document_paths (document_id, path) VALUES (?1, 'src/main.rs')",
|
|
[doc_id],
|
|
)
|
|
.unwrap();
|
|
|
|
// Insert dirty_sources row
|
|
conn.execute(
|
|
"INSERT INTO dirty_sources (source_type, source_id, queued_at) VALUES ('issue', 1, 1000)",
|
|
[],
|
|
)
|
|
.unwrap();
|
|
|
|
// Now apply migration 024 (index 23) -- the table-rebuild migration
|
|
run_single_migration(&conn, 23);
|
|
|
|
// Verify document still exists with correct data
|
|
let (st, content, title): (String, String, String) = conn
|
|
.query_row(
|
|
"SELECT source_type, content_text, title FROM documents WHERE id = ?1",
|
|
[doc_id],
|
|
|row| Ok((row.get(0)?, row.get(1)?, row.get(2)?)),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(st, "issue");
|
|
assert_eq!(content, "issue content");
|
|
assert_eq!(title, "Test Issue");
|
|
|
|
// Verify junction data preserved
|
|
let label_count: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM document_labels WHERE document_id = ?1",
|
|
[doc_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(label_count, 1);
|
|
|
|
let path_count: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM document_paths WHERE document_id = ?1",
|
|
[doc_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(path_count, 1);
|
|
|
|
// Verify dirty_sources preserved
|
|
let dirty_count: i64 = conn
|
|
.query_row("SELECT COUNT(*) FROM dirty_sources", [], |row| row.get(0))
|
|
.unwrap();
|
|
assert_eq!(dirty_count, 1);
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_fts_triggers_intact() {
|
|
let conn = setup_migrated_db();
|
|
let pid = insert_test_project(&conn);
|
|
|
|
// Insert a document after migration -- FTS trigger should fire
|
|
let doc_id = insert_test_document(&conn, "note", 1, pid);
|
|
|
|
// Verify FTS entry exists
|
|
let fts_count: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents_fts WHERE documents_fts MATCH 'test'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert!(fts_count > 0, "FTS trigger should have created an entry");
|
|
|
|
// Verify update trigger works
|
|
conn.execute(
|
|
"UPDATE documents SET content_text = 'updated content' WHERE id = ?1",
|
|
[doc_id],
|
|
)
|
|
.unwrap();
|
|
|
|
let fts_updated: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents_fts WHERE documents_fts MATCH 'updated'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert!(
|
|
fts_updated > 0,
|
|
"FTS update trigger should reflect new content"
|
|
);
|
|
|
|
// Verify delete trigger works
|
|
conn.execute("DELETE FROM documents WHERE id = ?1", [doc_id])
|
|
.unwrap();
|
|
|
|
let fts_after_delete: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents_fts WHERE documents_fts MATCH 'updated'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
fts_after_delete, 0,
|
|
"FTS delete trigger should remove the entry"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_row_counts_preserved() {
|
|
let conn = setup_migrated_db();
|
|
|
|
// After full migration, tables should exist and be queryable
|
|
let doc_count: i64 = conn
|
|
.query_row("SELECT COUNT(*) FROM documents", [], |row| row.get(0))
|
|
.unwrap();
|
|
assert_eq!(doc_count, 0, "Fresh DB should have 0 documents");
|
|
|
|
let dirty_count: i64 = conn
|
|
.query_row("SELECT COUNT(*) FROM dirty_sources", [], |row| row.get(0))
|
|
.unwrap();
|
|
assert_eq!(dirty_count, 0, "Fresh DB should have 0 dirty_sources");
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_integrity_checks_pass() {
|
|
let conn = setup_migrated_db();
|
|
|
|
// PRAGMA integrity_check
|
|
let integrity: String = conn
|
|
.query_row("PRAGMA integrity_check", [], |row| row.get(0))
|
|
.unwrap();
|
|
assert_eq!(integrity, "ok", "Database integrity check should pass");
|
|
|
|
// PRAGMA foreign_key_check (returns rows only if there are violations)
|
|
let fk_violations: i64 = conn
|
|
.query_row("SELECT COUNT(*) FROM pragma_foreign_key_check", [], |row| {
|
|
row.get(0)
|
|
})
|
|
.unwrap();
|
|
assert_eq!(fk_violations, 0, "No foreign key violations should exist");
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_note_delete_trigger_cleans_document() {
|
|
let conn = setup_migrated_db();
|
|
let pid = insert_test_project(&conn);
|
|
let issue_id = insert_test_issue(&conn, pid);
|
|
let disc_id = insert_test_discussion(&conn, pid, issue_id);
|
|
let note_id = insert_test_note(&conn, 200, disc_id, pid, false);
|
|
|
|
// Create a document for this note
|
|
insert_test_document(&conn, "note", note_id, pid);
|
|
|
|
let doc_before: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents WHERE source_type = 'note' AND source_id = ?1",
|
|
[note_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(doc_before, 1);
|
|
|
|
// Delete the note -- trigger should remove the document
|
|
conn.execute("DELETE FROM notes WHERE id = ?1", [note_id])
|
|
.unwrap();
|
|
|
|
let doc_after: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents WHERE source_type = 'note' AND source_id = ?1",
|
|
[note_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
doc_after, 0,
|
|
"notes_ad_cleanup trigger should delete the document"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_note_system_flip_trigger_cleans_document() {
|
|
let conn = setup_migrated_db();
|
|
let pid = insert_test_project(&conn);
|
|
let issue_id = insert_test_issue(&conn, pid);
|
|
let disc_id = insert_test_discussion(&conn, pid, issue_id);
|
|
let note_id = insert_test_note(&conn, 201, disc_id, pid, false);
|
|
|
|
// Create a document for this note
|
|
insert_test_document(&conn, "note", note_id, pid);
|
|
|
|
let doc_before: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents WHERE source_type = 'note' AND source_id = ?1",
|
|
[note_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(doc_before, 1);
|
|
|
|
// Flip is_system from 0 to 1 -- trigger should remove the document
|
|
conn.execute("UPDATE notes SET is_system = 1 WHERE id = ?1", [note_id])
|
|
.unwrap();
|
|
|
|
let doc_after: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents WHERE source_type = 'note' AND source_id = ?1",
|
|
[note_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
doc_after, 0,
|
|
"notes_au_system_cleanup trigger should delete the document"
|
|
);
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_024_system_note_delete_trigger_does_not_fire() {
|
|
let conn = setup_migrated_db();
|
|
let pid = insert_test_project(&conn);
|
|
let issue_id = insert_test_issue(&conn, pid);
|
|
let disc_id = insert_test_discussion(&conn, pid, issue_id);
|
|
|
|
// Insert a system note (is_system = true)
|
|
let note_id = insert_test_note(&conn, 202, disc_id, pid, true);
|
|
|
|
// Manually insert a document (shouldn't exist for system notes in practice,
|
|
// but we test the trigger guard)
|
|
insert_test_document(&conn, "note", note_id, pid);
|
|
|
|
let doc_before: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents WHERE source_type = 'note' AND source_id = ?1",
|
|
[note_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(doc_before, 1);
|
|
|
|
// Delete system note -- trigger has WHEN old.is_system = 0 so it should NOT fire
|
|
conn.execute("DELETE FROM notes WHERE id = ?1", [note_id])
|
|
.unwrap();
|
|
|
|
let doc_after: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM documents WHERE source_type = 'note' AND source_id = ?1",
|
|
[note_id],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
doc_after, 1,
|
|
"notes_ad_cleanup trigger should NOT fire for system notes"
|
|
);
|
|
}
|
|
|
|
/// Run migrations only up to version `up_to` (inclusive).
|
|
fn run_migrations_up_to(conn: &Connection, up_to: usize) {
|
|
conn.execute_batch(
|
|
"CREATE TABLE IF NOT EXISTS schema_version ( \
|
|
version INTEGER PRIMARY KEY, applied_at INTEGER NOT NULL, description TEXT);",
|
|
)
|
|
.unwrap();
|
|
|
|
for (version_str, sql) in &MIGRATIONS[..up_to] {
|
|
let version: i32 = version_str.parse().unwrap();
|
|
conn.execute_batch(sql).unwrap();
|
|
conn.execute(
|
|
"INSERT OR REPLACE INTO schema_version (version, applied_at, description) \
|
|
VALUES (?1, strftime('%s', 'now') * 1000, ?2)",
|
|
rusqlite::params![version, version_str],
|
|
)
|
|
.unwrap();
|
|
}
|
|
}
|
|
|
|
/// Run a single migration by index (0-based).
|
|
fn run_single_migration(conn: &Connection, index: usize) {
|
|
let (version_str, sql) = MIGRATIONS[index];
|
|
let version: i32 = version_str.parse().unwrap();
|
|
conn.execute_batch(sql).unwrap();
|
|
conn.execute(
|
|
"INSERT OR REPLACE INTO schema_version (version, applied_at, description) \
|
|
VALUES (?1, strftime('%s', 'now') * 1000, ?2)",
|
|
rusqlite::params![version, version_str],
|
|
)
|
|
.unwrap();
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_025_backfills_existing_notes() {
|
|
let conn = create_connection(Path::new(":memory:")).unwrap();
|
|
// Run all migrations through 024 (index 0..24)
|
|
run_migrations_up_to(&conn, 24);
|
|
|
|
let pid = insert_test_project(&conn);
|
|
let issue_id = insert_test_issue(&conn, pid);
|
|
let disc_id = insert_test_discussion(&conn, pid, issue_id);
|
|
|
|
// Insert 5 non-system notes
|
|
for i in 1..=5 {
|
|
insert_test_note(&conn, 300 + i, disc_id, pid, false);
|
|
}
|
|
// Insert 2 system notes
|
|
for i in 1..=2 {
|
|
insert_test_note(&conn, 400 + i, disc_id, pid, true);
|
|
}
|
|
|
|
// Run migration 025
|
|
run_single_migration(&conn, 24);
|
|
|
|
let dirty_count: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM dirty_sources WHERE source_type = 'note'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
dirty_count, 5,
|
|
"Migration 025 should backfill 5 non-system notes"
|
|
);
|
|
|
|
// Verify system notes were not backfilled
|
|
let system_note_ids: Vec<i64> = {
|
|
let mut stmt = conn
|
|
.prepare(
|
|
"SELECT source_id FROM dirty_sources WHERE source_type = 'note' ORDER BY source_id",
|
|
)
|
|
.unwrap();
|
|
stmt.query_map([], |row| row.get(0))
|
|
.unwrap()
|
|
.collect::<std::result::Result<Vec<_>, _>>()
|
|
.unwrap()
|
|
};
|
|
// System note ids should not appear
|
|
let all_system_note_ids: Vec<i64> = {
|
|
let mut stmt = conn
|
|
.prepare("SELECT id FROM notes WHERE is_system = 1 ORDER BY id")
|
|
.unwrap();
|
|
stmt.query_map([], |row| row.get(0))
|
|
.unwrap()
|
|
.collect::<std::result::Result<Vec<_>, _>>()
|
|
.unwrap()
|
|
};
|
|
for sys_id in &all_system_note_ids {
|
|
assert!(
|
|
!system_note_ids.contains(sys_id),
|
|
"System note id {} should not be in dirty_sources",
|
|
sys_id
|
|
);
|
|
}
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_025_idempotent_with_existing_documents() {
|
|
let conn = create_connection(Path::new(":memory:")).unwrap();
|
|
run_migrations_up_to(&conn, 24);
|
|
|
|
let pid = insert_test_project(&conn);
|
|
let issue_id = insert_test_issue(&conn, pid);
|
|
let disc_id = insert_test_discussion(&conn, pid, issue_id);
|
|
|
|
// Insert 3 non-system notes
|
|
let note_ids: Vec<i64> = (1..=3)
|
|
.map(|i| insert_test_note(&conn, 500 + i, disc_id, pid, false))
|
|
.collect();
|
|
|
|
// Create documents for 2 of 3 notes (simulating already-generated docs)
|
|
insert_test_document(&conn, "note", note_ids[0], pid);
|
|
insert_test_document(&conn, "note", note_ids[1], pid);
|
|
|
|
// Run migration 025
|
|
run_single_migration(&conn, 24);
|
|
|
|
let dirty_count: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM dirty_sources WHERE source_type = 'note'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
dirty_count, 1,
|
|
"Only the note without a document should be backfilled"
|
|
);
|
|
|
|
// Verify the correct note was queued
|
|
let queued_id: i64 = conn
|
|
.query_row(
|
|
"SELECT source_id FROM dirty_sources WHERE source_type = 'note'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(queued_id, note_ids[2]);
|
|
}
|
|
|
|
#[test]
|
|
fn test_migration_025_skips_notes_already_in_dirty_queue() {
|
|
let conn = create_connection(Path::new(":memory:")).unwrap();
|
|
run_migrations_up_to(&conn, 24);
|
|
|
|
let pid = insert_test_project(&conn);
|
|
let issue_id = insert_test_issue(&conn, pid);
|
|
let disc_id = insert_test_discussion(&conn, pid, issue_id);
|
|
|
|
// Insert 3 non-system notes
|
|
let note_ids: Vec<i64> = (1..=3)
|
|
.map(|i| insert_test_note(&conn, 600 + i, disc_id, pid, false))
|
|
.collect();
|
|
|
|
// Pre-queue one note in dirty_sources
|
|
conn.execute(
|
|
"INSERT INTO dirty_sources (source_type, source_id, queued_at) VALUES ('note', ?1, 999)",
|
|
[note_ids[0]],
|
|
)
|
|
.unwrap();
|
|
|
|
// Run migration 025
|
|
run_single_migration(&conn, 24);
|
|
|
|
let dirty_count: i64 = conn
|
|
.query_row(
|
|
"SELECT COUNT(*) FROM dirty_sources WHERE source_type = 'note'",
|
|
[],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
dirty_count, 3,
|
|
"All 3 notes should be in dirty_sources (1 pre-existing + 2 new)"
|
|
);
|
|
|
|
// Verify the pre-existing entry preserved its original queued_at
|
|
let original_queued_at: i64 = conn
|
|
.query_row(
|
|
"SELECT queued_at FROM dirty_sources WHERE source_type = 'note' AND source_id = ?1",
|
|
[note_ids[0]],
|
|
|row| row.get(0),
|
|
)
|
|
.unwrap();
|
|
assert_eq!(
|
|
original_queued_at, 999,
|
|
"ON CONFLICT DO NOTHING should preserve the original queued_at"
|
|
);
|
|
}
|