2 Commits

Author SHA1 Message Date
teernisse
2da1a228b3 feat(timeline): collect and render full discussion threads
Implements the downstream consumption of matched discussions from the seed
phase, completing the discussion thread feature across collect, CLI, and
integration tests.

Collect phase (timeline_collect.rs):
- New collect_discussion_threads() function assembles full threads by
  querying notes for each matched discussion_id, filtering out system notes
  (is_system = 0), ordering chronologically, and capping at THREAD_MAX_NOTES
  with a synthetic "[N more notes not shown]" summary note
- build_entity_lookup() creates a (type, id) -> (iid, path) map from seed
  and expanded entities to provide display metadata for thread events
- Thread timestamp is set to the first note's created_at for correct
  chronological interleaving with other timeline events
- collect_events() gains a matched_discussions parameter; threads are
  collected after entity events and before evidence note merging

CLI rendering (cli/commands/timeline.rs):
- Human mode: threads render with box-drawing borders, bold @author tags,
  date-stamped notes, and word-wrapped bodies (60 char width)
- Robot mode: DiscussionThread serializes as discussion_thread kind with
  note_count, full notes array (note_id, author, body, ISO created_at)
- THREAD tag in yellow for human event tag styling
- TimelineMeta gains discussion_threads_included count

Tests:
- 8 new collect tests: basic thread assembly, system note filtering, empty
  thread skipping, body truncation to THREAD_NOTE_MAX_CHARS, note cap with
  synthetic summary, timestamp from first note, chronological sort position,
  and deduplication of duplicate discussion_ids
- Integration tests updated for new collect_events signature

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-13 14:18:36 -05:00
teernisse
0e65202778 feat(timeline): add DiscussionThread types and seed-phase discussion matching
Introduces the foundation for full discussion thread support in the
timeline pipeline. Adds three new domain types to timeline.rs:

- ThreadNote: individual note within a thread (id, author, body, timestamp)
- MatchedDiscussion: tracks discussions matched during seeding with their
  parent entity (issue or MR) for downstream collection
- DiscussionThread variant on TimelineEventType: carries a full thread of
  notes, sorted between NoteEvidence and CrossReferenced

Moves truncate_to_chars() from timeline_seed.rs to timeline.rs as pub(crate)
for reuse by the collect phase. Adds THREAD_NOTE_MAX_CHARS (2000) and
THREAD_MAX_NOTES (50) constants.

Upgrades the seed SQL in resolve_documents_to_entities() to resolve note
documents to their parent discussion via an additional LEFT JOIN chain
(notes -> discussions), using COALESCE to unify the entity resolution path
for both discussion and note source types. SeedResult gains a
matched_discussions field that captures deduplicated discussion matches.

Tests cover: discussion matching from discussion docs, note-to-parent
resolution, deduplication of same discussion across multiple docs, and
correct parent entity type (issue vs MR).

Co-Authored-By: Claude Opus 4.6 <noreply@anthropic.com>
2026-02-13 14:18:18 -05:00
7 changed files with 868 additions and 64 deletions

View File

@@ -86,6 +86,7 @@ pub async fn run_timeline(config: &Config, params: &TimelineParams) -> Result<Ti
&seed_result.seed_entities, &seed_result.seed_entities,
&expand_result.expanded_entities, &expand_result.expanded_entities,
&seed_result.evidence_notes, &seed_result.evidence_notes,
&seed_result.matched_discussions,
since_ms, since_ms,
params.limit, params.limit,
)?; )?;
@@ -162,6 +163,25 @@ fn print_timeline_event(event: &TimelineEvent) {
); );
} }
} }
// Show full discussion thread
if let TimelineEventType::DiscussionThread { notes, .. } = &event.event_type {
let bar = "\u{2500}".repeat(44);
println!(" \u{2500}\u{2500} Discussion {bar}");
for note in notes {
let note_date = format_date(note.created_at);
let author = note
.author
.as_deref()
.map(|a| format!("@{a}"))
.unwrap_or_else(|| "unknown".to_owned());
println!(" {} ({note_date}):", style(author).bold());
for line in wrap_text(&note.body, 60) {
println!(" {line}");
}
}
println!(" {}", "\u{2500}".repeat(60));
}
} }
fn print_timeline_footer(result: &TimelineResult) { fn print_timeline_footer(result: &TimelineResult) {
@@ -206,6 +226,7 @@ fn format_event_tag(event_type: &TimelineEventType) -> String {
TimelineEventType::MilestoneRemoved { .. } => style("MILESTONE-").magenta().to_string(), TimelineEventType::MilestoneRemoved { .. } => style("MILESTONE-").magenta().to_string(),
TimelineEventType::Merged => style("MERGED").cyan().to_string(), TimelineEventType::Merged => style("MERGED").cyan().to_string(),
TimelineEventType::NoteEvidence { .. } => style("NOTE").dim().to_string(), TimelineEventType::NoteEvidence { .. } => style("NOTE").dim().to_string(),
TimelineEventType::DiscussionThread { .. } => style("THREAD").yellow().to_string(),
TimelineEventType::CrossReferenced { .. } => style("REF").dim().to_string(), TimelineEventType::CrossReferenced { .. } => style("REF").dim().to_string(),
} }
} }
@@ -232,6 +253,28 @@ fn truncate_summary(s: &str, max: usize) -> String {
} }
} }
fn wrap_text(text: &str, width: usize) -> Vec<String> {
let mut lines = Vec::new();
let mut current = String::new();
for word in text.split_whitespace() {
if current.is_empty() {
current = word.to_string();
} else if current.len() + 1 + word.len() <= width {
current.push(' ');
current.push_str(word);
} else {
lines.push(current);
current = word.to_string();
}
}
if !current.is_empty() {
lines.push(current);
}
lines
}
fn wrap_snippet(text: &str, width: usize) -> Vec<String> { fn wrap_snippet(text: &str, width: usize) -> Vec<String> {
let mut lines = Vec::new(); let mut lines = Vec::new();
let mut current = String::new(); let mut current = String::new();
@@ -276,6 +319,7 @@ pub fn print_timeline_json_with_meta(
total_entities: result.seed_entities.len() + result.expanded_entities.len(), total_entities: result.seed_entities.len() + result.expanded_entities.len(),
total_events: total_events_before_limit, total_events: total_events_before_limit,
evidence_notes_included: count_evidence_notes(&result.events), evidence_notes_included: count_evidence_notes(&result.events),
discussion_threads_included: count_discussion_threads(&result.events),
unresolved_references: result.unresolved_references.len(), unresolved_references: result.unresolved_references.len(),
showing: result.events.len(), showing: result.events.len(),
}, },
@@ -473,6 +517,22 @@ fn event_type_to_json(event_type: &TimelineEventType) -> (String, serde_json::Va
"discussion_id": discussion_id, "discussion_id": discussion_id,
}), }),
), ),
TimelineEventType::DiscussionThread {
discussion_id,
notes,
} => (
"discussion_thread".to_owned(),
serde_json::json!({
"discussion_id": discussion_id,
"note_count": notes.len(),
"notes": notes.iter().map(|n| serde_json::json!({
"note_id": n.note_id,
"author": n.author,
"body": n.body,
"created_at": ms_to_iso(n.created_at),
})).collect::<Vec<_>>(),
}),
),
TimelineEventType::CrossReferenced { target } => ( TimelineEventType::CrossReferenced { target } => (
"cross_referenced".to_owned(), "cross_referenced".to_owned(),
serde_json::json!({ "target": target }), serde_json::json!({ "target": target }),
@@ -488,6 +548,7 @@ struct TimelineMetaJson {
total_entities: usize, total_entities: usize,
total_events: usize, total_events: usize,
evidence_notes_included: usize, evidence_notes_included: usize,
discussion_threads_included: usize,
unresolved_references: usize, unresolved_references: usize,
showing: usize, showing: usize,
} }
@@ -498,3 +559,10 @@ fn count_evidence_notes(events: &[TimelineEvent]) -> usize {
.filter(|e| matches!(e.event_type, TimelineEventType::NoteEvidence { .. })) .filter(|e| matches!(e.event_type, TimelineEventType::NoteEvidence { .. }))
.count() .count()
} }
fn count_discussion_threads(events: &[TimelineEvent]) -> usize {
events
.iter()
.filter(|e| matches!(e.event_type, TimelineEventType::DiscussionThread { .. }))
.count()
}

View File

@@ -49,6 +49,21 @@ impl Ord for TimelineEvent {
} }
} }
/// Maximum characters per note body in a discussion thread.
pub const THREAD_NOTE_MAX_CHARS: usize = 2000;
/// Maximum notes per discussion thread before truncation.
pub const THREAD_MAX_NOTES: usize = 50;
/// A single note within a discussion thread.
#[derive(Debug, Clone, PartialEq, Eq, PartialOrd, Ord, Serialize)]
pub struct ThreadNote {
pub note_id: i64,
pub author: Option<String>,
pub body: String,
pub created_at: i64,
}
/// Per spec Section 3.3. Serde tagged enum for JSON output. /// Per spec Section 3.3. Serde tagged enum for JSON output.
/// ///
/// Variant declaration order defines the sort order within a timestamp+entity /// Variant declaration order defines the sort order within a timestamp+entity
@@ -78,11 +93,39 @@ pub enum TimelineEventType {
snippet: String, snippet: String,
discussion_id: Option<i64>, discussion_id: Option<i64>,
}, },
DiscussionThread {
discussion_id: i64,
notes: Vec<ThreadNote>,
},
CrossReferenced { CrossReferenced {
target: String, target: String,
}, },
} }
/// Truncate a string to at most `max_chars` characters on a safe UTF-8 boundary.
pub(crate) fn truncate_to_chars(s: &str, max_chars: usize) -> String {
let char_count = s.chars().count();
if char_count <= max_chars {
return s.to_owned();
}
let byte_end = s
.char_indices()
.nth(max_chars)
.map(|(i, _)| i)
.unwrap_or(s.len());
s[..byte_end].to_owned()
}
/// A discussion matched during the seed phase, to be collected as a full thread.
#[derive(Debug, Clone)]
pub struct MatchedDiscussion {
pub discussion_id: i64,
pub entity_type: String,
pub entity_id: i64,
pub project_id: i64,
}
/// Internal entity reference used across pipeline stages. /// Internal entity reference used across pipeline stages.
#[derive(Debug, Clone, Serialize)] #[derive(Debug, Clone, Serialize)]
pub struct EntityRef { pub struct EntityRef {
@@ -250,7 +293,7 @@ mod tests {
#[test] #[test]
fn test_timeline_event_type_variant_count() { fn test_timeline_event_type_variant_count() {
// Verify all 9 variants serialize without panic // Verify all 10 variants serialize without panic
let variants: Vec<TimelineEventType> = vec![ let variants: Vec<TimelineEventType> = vec![
TimelineEventType::Created, TimelineEventType::Created,
TimelineEventType::StateChanged { TimelineEventType::StateChanged {
@@ -274,13 +317,96 @@ mod tests {
snippet: "text".to_owned(), snippet: "text".to_owned(),
discussion_id: None, discussion_id: None,
}, },
TimelineEventType::DiscussionThread {
discussion_id: 1,
notes: vec![ThreadNote {
note_id: 1,
author: Some("alice".to_owned()),
body: "hello".to_owned(),
created_at: 1000,
}],
},
TimelineEventType::CrossReferenced { TimelineEventType::CrossReferenced {
target: "!567".to_owned(), target: "!567".to_owned(),
}, },
]; ];
assert_eq!(variants.len(), 9); assert_eq!(variants.len(), 10);
for v in &variants { for v in &variants {
serde_json::to_value(v).unwrap(); serde_json::to_value(v).unwrap();
} }
} }
#[test]
fn test_discussion_thread_serializes_tagged() {
let event_type = TimelineEventType::DiscussionThread {
discussion_id: 42,
notes: vec![
ThreadNote {
note_id: 1,
author: Some("alice".to_owned()),
body: "first note".to_owned(),
created_at: 1000,
},
ThreadNote {
note_id: 2,
author: Some("bob".to_owned()),
body: "second note".to_owned(),
created_at: 2000,
},
],
};
let json = serde_json::to_value(&event_type).unwrap();
assert_eq!(json["kind"], "discussion_thread");
assert_eq!(json["discussion_id"], 42);
assert_eq!(json["notes"].as_array().unwrap().len(), 2);
assert_eq!(json["notes"][0]["note_id"], 1);
assert_eq!(json["notes"][0]["author"], "alice");
assert_eq!(json["notes"][0]["body"], "first note");
assert_eq!(json["notes"][1]["note_id"], 2);
}
#[test]
fn test_discussion_thread_sort_order() {
// DiscussionThread should sort after NoteEvidence, before CrossReferenced
let note_ev = TimelineEventType::NoteEvidence {
note_id: 1,
snippet: "a".to_owned(),
discussion_id: None,
};
let thread = TimelineEventType::DiscussionThread {
discussion_id: 1,
notes: vec![],
};
let cross_ref = TimelineEventType::CrossReferenced {
target: "!1".to_owned(),
};
assert!(note_ev < thread);
assert!(thread < cross_ref);
}
#[test]
fn test_thread_note_ord() {
let a = ThreadNote {
note_id: 1,
author: Some("alice".to_owned()),
body: "first".to_owned(),
created_at: 1000,
};
let b = ThreadNote {
note_id: 2,
author: Some("bob".to_owned()),
body: "second".to_owned(),
created_at: 2000,
};
// ThreadNote derives Ord — note_id is the first field, so ordering is by note_id
assert!(a < b);
}
#[test]
fn test_truncate_to_chars() {
assert_eq!(truncate_to_chars("hello", 200), "hello");
let long = "a".repeat(300);
assert_eq!(truncate_to_chars(&long, 200).chars().count(), 200);
}
} }

View File

@@ -1,20 +1,27 @@
use rusqlite::Connection; use rusqlite::Connection;
use std::collections::HashSet;
use crate::core::error::{LoreError, Result}; use crate::core::error::{LoreError, Result};
use crate::core::timeline::{EntityRef, ExpandedEntityRef, TimelineEvent, TimelineEventType}; use crate::core::timeline::{
EntityRef, ExpandedEntityRef, MatchedDiscussion, THREAD_MAX_NOTES, THREAD_NOTE_MAX_CHARS,
ThreadNote, TimelineEvent, TimelineEventType, truncate_to_chars,
};
/// Collect all events for seed and expanded entities, interleave chronologically. /// Collect all events for seed and expanded entities, interleave chronologically.
/// ///
/// Steps 4-5 of the timeline pipeline: /// Steps 4-5 of the timeline pipeline:
/// 1. For each entity, collect Created, StateChanged, Label, Milestone, Merged events /// 1. For each entity, collect Created, StateChanged, Label, Milestone, Merged events
/// 2. Merge in evidence notes from the seed phase /// 2. Collect discussion threads from matched discussions
/// 3. Sort chronologically with stable tiebreak /// 3. Merge in evidence notes from the seed phase
/// 4. Apply --since filter and --limit /// 4. Sort chronologically with stable tiebreak
/// 5. Apply --since filter and --limit
pub fn collect_events( pub fn collect_events(
conn: &Connection, conn: &Connection,
seed_entities: &[EntityRef], seed_entities: &[EntityRef],
expanded_entities: &[ExpandedEntityRef], expanded_entities: &[ExpandedEntityRef],
evidence_notes: &[TimelineEvent], evidence_notes: &[TimelineEvent],
matched_discussions: &[MatchedDiscussion],
since_ms: Option<i64>, since_ms: Option<i64>,
limit: usize, limit: usize,
) -> Result<(Vec<TimelineEvent>, usize)> { ) -> Result<(Vec<TimelineEvent>, usize)> {
@@ -30,6 +37,10 @@ pub fn collect_events(
collect_entity_events(conn, &expanded.entity_ref, false, &mut all_events)?; collect_entity_events(conn, &expanded.entity_ref, false, &mut all_events)?;
} }
// Collect discussion threads
let entity_lookup = build_entity_lookup(seed_entities, expanded_entities);
collect_discussion_threads(conn, matched_discussions, &entity_lookup, &mut all_events)?;
// Add evidence notes from seed phase // Add evidence notes from seed phase
all_events.extend(evidence_notes.iter().cloned()); all_events.extend(evidence_notes.iter().cloned());
@@ -369,6 +380,117 @@ fn entity_id_column(entity: &EntityRef) -> Result<(&'static str, i64)> {
} }
} }
/// Lookup key: (entity_type, entity_id) -> (iid, project_path)
type EntityLookup = std::collections::HashMap<(String, i64), (i64, String)>;
fn build_entity_lookup(seeds: &[EntityRef], expanded: &[ExpandedEntityRef]) -> EntityLookup {
let mut lookup = EntityLookup::new();
for e in seeds {
lookup.insert(
(e.entity_type.clone(), e.entity_id),
(e.entity_iid, e.project_path.clone()),
);
}
for exp in expanded {
let e = &exp.entity_ref;
lookup.insert(
(e.entity_type.clone(), e.entity_id),
(e.entity_iid, e.project_path.clone()),
);
}
lookup
}
/// Collect full discussion threads for matched discussions.
fn collect_discussion_threads(
conn: &Connection,
matched_discussions: &[MatchedDiscussion],
entity_lookup: &EntityLookup,
events: &mut Vec<TimelineEvent>,
) -> Result<()> {
// Deduplicate by discussion_id
let mut seen = HashSet::new();
for disc in matched_discussions {
if !seen.insert(disc.discussion_id) {
continue;
}
let (iid, project_path) =
match entity_lookup.get(&(disc.entity_type.clone(), disc.entity_id)) {
Some(val) => val.clone(),
None => continue, // entity not in seed or expanded set
};
let mut stmt = conn.prepare(
"SELECT id, author_username, body, created_at FROM notes
WHERE discussion_id = ?1 AND is_system = 0
ORDER BY created_at ASC",
)?;
let rows = stmt.query_map(rusqlite::params![disc.discussion_id], |row| {
Ok((
row.get::<_, i64>(0)?, // id
row.get::<_, Option<String>>(1)?, // author_username
row.get::<_, Option<String>>(2)?, // body
row.get::<_, i64>(3)?, // created_at
))
})?;
let mut notes = Vec::new();
for row_result in rows {
let (note_id, author, body, created_at) = row_result?;
let body = truncate_to_chars(body.as_deref().unwrap_or(""), THREAD_NOTE_MAX_CHARS);
notes.push(ThreadNote {
note_id,
author,
body,
created_at,
});
}
// Skip empty threads (all notes were system notes)
if notes.is_empty() {
continue;
}
let first_created_at = notes[0].created_at;
// Cap notes per thread
let total_notes = notes.len();
if total_notes > THREAD_MAX_NOTES {
notes.truncate(THREAD_MAX_NOTES);
notes.push(ThreadNote {
note_id: -1,
author: None,
body: format!("[{} more notes not shown]", total_notes - THREAD_MAX_NOTES),
created_at: notes.last().map_or(first_created_at, |n| n.created_at),
});
}
let note_count = notes.len();
let actor = notes.first().and_then(|n| n.author.clone());
events.push(TimelineEvent {
timestamp: first_created_at,
entity_type: disc.entity_type.clone(),
entity_id: disc.entity_id,
entity_iid: iid,
project_path,
event_type: TimelineEventType::DiscussionThread {
discussion_id: disc.discussion_id,
notes,
},
summary: format!("Discussion ({note_count} notes)"),
actor,
url: None,
is_seed: true,
});
}
Ok(())
}
#[cfg(test)] #[cfg(test)]
#[path = "timeline_collect_tests.rs"] #[path = "timeline_collect_tests.rs"]
mod tests; mod tests;

View File

@@ -101,7 +101,7 @@ fn test_collect_creation_event() {
let issue_id = insert_issue(&conn, project_id, 1); let issue_id = insert_issue(&conn, project_id, 1);
let seeds = vec![make_entity_ref("issue", issue_id, 1)]; let seeds = vec![make_entity_ref("issue", issue_id, 1)];
let (events, _) = collect_events(&conn, &seeds, &[], &[], None, 100).unwrap(); let (events, _) = collect_events(&conn, &seeds, &[], &[], &[], None, 100).unwrap();
assert_eq!(events.len(), 1); assert_eq!(events.len(), 1);
assert!(matches!(events[0].event_type, TimelineEventType::Created)); assert!(matches!(events[0].event_type, TimelineEventType::Created));
assert_eq!(events[0].timestamp, 1000); assert_eq!(events[0].timestamp, 1000);
@@ -119,7 +119,7 @@ fn test_collect_state_events() {
insert_state_event(&conn, project_id, Some(issue_id), None, "reopened", 4000); insert_state_event(&conn, project_id, Some(issue_id), None, "reopened", 4000);
let seeds = vec![make_entity_ref("issue", issue_id, 1)]; let seeds = vec![make_entity_ref("issue", issue_id, 1)];
let (events, _) = collect_events(&conn, &seeds, &[], &[], None, 100).unwrap(); let (events, _) = collect_events(&conn, &seeds, &[], &[], &[], None, 100).unwrap();
// Created + 2 state changes = 3 // Created + 2 state changes = 3
assert_eq!(events.len(), 3); assert_eq!(events.len(), 3);
@@ -144,7 +144,7 @@ fn test_collect_merged_dedup() {
insert_state_event(&conn, project_id, None, Some(mr_id), "merged", 5000); insert_state_event(&conn, project_id, None, Some(mr_id), "merged", 5000);
let seeds = vec![make_entity_ref("merge_request", mr_id, 10)]; let seeds = vec![make_entity_ref("merge_request", mr_id, 10)];
let (events, _) = collect_events(&conn, &seeds, &[], &[], None, 100).unwrap(); let (events, _) = collect_events(&conn, &seeds, &[], &[], &[], None, 100).unwrap();
// Should have Created + Merged (not Created + StateChanged{merged} + Merged) // Should have Created + Merged (not Created + StateChanged{merged} + Merged)
let merged_count = events let merged_count = events
@@ -169,7 +169,7 @@ fn test_collect_null_label_fallback() {
insert_label_event(&conn, project_id, Some(issue_id), None, "add", None, 2000); insert_label_event(&conn, project_id, Some(issue_id), None, "add", None, 2000);
let seeds = vec![make_entity_ref("issue", issue_id, 1)]; let seeds = vec![make_entity_ref("issue", issue_id, 1)];
let (events, _) = collect_events(&conn, &seeds, &[], &[], None, 100).unwrap(); let (events, _) = collect_events(&conn, &seeds, &[], &[], &[], None, 100).unwrap();
let label_event = events.iter().find(|e| { let label_event = events.iter().find(|e| {
matches!(&e.event_type, TimelineEventType::LabelAdded { label } if label == "[deleted label]") matches!(&e.event_type, TimelineEventType::LabelAdded { label } if label == "[deleted label]")
@@ -186,7 +186,7 @@ fn test_collect_null_milestone_fallback() {
insert_milestone_event(&conn, project_id, Some(issue_id), None, "add", None, 2000); insert_milestone_event(&conn, project_id, Some(issue_id), None, "add", None, 2000);
let seeds = vec![make_entity_ref("issue", issue_id, 1)]; let seeds = vec![make_entity_ref("issue", issue_id, 1)];
let (events, _) = collect_events(&conn, &seeds, &[], &[], None, 100).unwrap(); let (events, _) = collect_events(&conn, &seeds, &[], &[], &[], None, 100).unwrap();
let ms_event = events.iter().find(|e| { let ms_event = events.iter().find(|e| {
matches!(&e.event_type, TimelineEventType::MilestoneSet { milestone } if milestone == "[deleted milestone]") matches!(&e.event_type, TimelineEventType::MilestoneSet { milestone } if milestone == "[deleted milestone]")
@@ -206,7 +206,7 @@ fn test_collect_since_filter() {
let seeds = vec![make_entity_ref("issue", issue_id, 1)]; let seeds = vec![make_entity_ref("issue", issue_id, 1)];
// Since 4000: should exclude Created (1000) and closed (3000) // Since 4000: should exclude Created (1000) and closed (3000)
let (events, _) = collect_events(&conn, &seeds, &[], &[], Some(4000), 100).unwrap(); let (events, _) = collect_events(&conn, &seeds, &[], &[], &[], Some(4000), 100).unwrap();
assert_eq!(events.len(), 1); assert_eq!(events.len(), 1);
assert_eq!(events[0].timestamp, 5000); assert_eq!(events[0].timestamp, 5000);
} }
@@ -233,7 +233,7 @@ fn test_collect_chronological_sort() {
make_entity_ref("issue", issue_id, 1), make_entity_ref("issue", issue_id, 1),
make_entity_ref("merge_request", mr_id, 10), make_entity_ref("merge_request", mr_id, 10),
]; ];
let (events, _) = collect_events(&conn, &seeds, &[], &[], None, 100).unwrap(); let (events, _) = collect_events(&conn, &seeds, &[], &[], &[], None, 100).unwrap();
// Verify chronological order // Verify chronological order
for window in events.windows(2) { for window in events.windows(2) {
@@ -259,7 +259,7 @@ fn test_collect_respects_limit() {
} }
let seeds = vec![make_entity_ref("issue", issue_id, 1)]; let seeds = vec![make_entity_ref("issue", issue_id, 1)];
let (events, total) = collect_events(&conn, &seeds, &[], &[], None, 5).unwrap(); let (events, total) = collect_events(&conn, &seeds, &[], &[], &[], None, 5).unwrap();
assert_eq!(events.len(), 5); assert_eq!(events.len(), 5);
// 20 state changes + 1 created = 21 total before limit // 20 state changes + 1 created = 21 total before limit
assert_eq!(total, 21); assert_eq!(total, 21);
@@ -289,7 +289,7 @@ fn test_collect_evidence_notes_included() {
}]; }];
let seeds = vec![make_entity_ref("issue", issue_id, 1)]; let seeds = vec![make_entity_ref("issue", issue_id, 1)];
let (events, _) = collect_events(&conn, &seeds, &[], &evidence, None, 100).unwrap(); let (events, _) = collect_events(&conn, &seeds, &[], &evidence, &[], None, 100).unwrap();
let note_event = events.iter().find(|e| { let note_event = events.iter().find(|e| {
matches!( matches!(
@@ -311,7 +311,7 @@ fn test_collect_merged_fallback_to_state_event() {
insert_state_event(&conn, project_id, None, Some(mr_id), "merged", 5000); insert_state_event(&conn, project_id, None, Some(mr_id), "merged", 5000);
let seeds = vec![make_entity_ref("merge_request", mr_id, 10)]; let seeds = vec![make_entity_ref("merge_request", mr_id, 10)];
let (events, _) = collect_events(&conn, &seeds, &[], &[], None, 100).unwrap(); let (events, _) = collect_events(&conn, &seeds, &[], &[], &[], None, 100).unwrap();
let merged = events let merged = events
.iter() .iter()
@@ -319,3 +319,386 @@ fn test_collect_merged_fallback_to_state_event() {
assert!(merged.is_some()); assert!(merged.is_some());
assert_eq!(merged.unwrap().timestamp, 5000); assert_eq!(merged.unwrap().timestamp, 5000);
} }
// ─── Discussion thread tests ────────────────────────────────────────────────
fn insert_discussion(
conn: &Connection,
project_id: i64,
issue_id: Option<i64>,
mr_id: Option<i64>,
) -> i64 {
let noteable_type = if issue_id.is_some() {
"Issue"
} else {
"MergeRequest"
};
conn.execute(
"INSERT INTO discussions (gitlab_discussion_id, project_id, issue_id, merge_request_id, noteable_type, last_seen_at) VALUES (?1, ?2, ?3, ?4, ?5, 0)",
rusqlite::params![format!("disc_{}", rand::random::<u32>()), project_id, issue_id, mr_id, noteable_type],
)
.unwrap();
conn.last_insert_rowid()
}
#[allow(clippy::too_many_arguments)]
fn insert_note(
conn: &Connection,
discussion_id: i64,
project_id: i64,
author: &str,
body: &str,
is_system: bool,
created_at: i64,
) -> i64 {
let gitlab_id: i64 = rand::random::<u32>().into();
conn.execute(
"INSERT INTO notes (gitlab_id, discussion_id, project_id, is_system, author_username, body, created_at, updated_at, last_seen_at) VALUES (?1, ?2, ?3, ?4, ?5, ?6, ?7, ?7, ?7)",
rusqlite::params![gitlab_id, discussion_id, project_id, is_system as i32, author, body, created_at],
)
.unwrap();
conn.last_insert_rowid()
}
fn make_matched_discussion(
discussion_id: i64,
entity_type: &str,
entity_id: i64,
project_id: i64,
) -> MatchedDiscussion {
MatchedDiscussion {
discussion_id,
entity_type: entity_type.to_owned(),
entity_id,
project_id,
}
}
#[test]
fn test_collect_discussion_thread_basic() {
let conn = setup_test_db();
let project_id = insert_project(&conn);
let issue_id = insert_issue(&conn, project_id, 1);
let disc_id = insert_discussion(&conn, project_id, Some(issue_id), None);
insert_note(
&conn,
disc_id,
project_id,
"alice",
"First note",
false,
2000,
);
insert_note(&conn, disc_id, project_id, "bob", "Reply here", false, 3000);
insert_note(
&conn,
disc_id,
project_id,
"alice",
"Follow up",
false,
4000,
);
let seeds = [make_entity_ref("issue", issue_id, 1)];
let discussions = [make_matched_discussion(
disc_id, "issue", issue_id, project_id,
)];
let (events, _) = collect_events(&conn, &seeds, &[], &[], &discussions, None, 100).unwrap();
let thread = events
.iter()
.find(|e| matches!(&e.event_type, TimelineEventType::DiscussionThread { .. }));
assert!(thread.is_some(), "Should have a DiscussionThread event");
let thread = thread.unwrap();
if let TimelineEventType::DiscussionThread {
discussion_id,
notes,
} = &thread.event_type
{
assert_eq!(*discussion_id, disc_id);
assert_eq!(notes.len(), 3);
assert_eq!(notes[0].author.as_deref(), Some("alice"));
assert_eq!(notes[0].body, "First note");
assert_eq!(notes[1].author.as_deref(), Some("bob"));
assert_eq!(notes[2].body, "Follow up");
} else {
panic!("Expected DiscussionThread variant");
}
}
#[test]
fn test_collect_discussion_thread_skips_system_notes() {
let conn = setup_test_db();
let project_id = insert_project(&conn);
let issue_id = insert_issue(&conn, project_id, 1);
let disc_id = insert_discussion(&conn, project_id, Some(issue_id), None);
insert_note(
&conn,
disc_id,
project_id,
"alice",
"User note",
false,
2000,
);
insert_note(
&conn,
disc_id,
project_id,
"system",
"added label ~bug",
true,
3000,
);
insert_note(
&conn,
disc_id,
project_id,
"bob",
"Another user note",
false,
4000,
);
let seeds = [make_entity_ref("issue", issue_id, 1)];
let discussions = [make_matched_discussion(
disc_id, "issue", issue_id, project_id,
)];
let (events, _) = collect_events(&conn, &seeds, &[], &[], &discussions, None, 100).unwrap();
let thread = events
.iter()
.find(|e| matches!(&e.event_type, TimelineEventType::DiscussionThread { .. }));
assert!(thread.is_some());
if let TimelineEventType::DiscussionThread { notes, .. } = &thread.unwrap().event_type {
assert_eq!(notes.len(), 2, "System notes should be filtered out");
assert_eq!(notes[0].body, "User note");
assert_eq!(notes[1].body, "Another user note");
} else {
panic!("Expected DiscussionThread");
}
}
#[test]
fn test_collect_discussion_thread_empty_after_system_filter() {
let conn = setup_test_db();
let project_id = insert_project(&conn);
let issue_id = insert_issue(&conn, project_id, 1);
let disc_id = insert_discussion(&conn, project_id, Some(issue_id), None);
// Only system notes
insert_note(
&conn,
disc_id,
project_id,
"system",
"added label",
true,
2000,
);
insert_note(
&conn,
disc_id,
project_id,
"system",
"removed label",
true,
3000,
);
let seeds = [make_entity_ref("issue", issue_id, 1)];
let discussions = [make_matched_discussion(
disc_id, "issue", issue_id, project_id,
)];
let (events, _) = collect_events(&conn, &seeds, &[], &[], &discussions, None, 100).unwrap();
let thread_count = events
.iter()
.filter(|e| matches!(&e.event_type, TimelineEventType::DiscussionThread { .. }))
.count();
assert_eq!(
thread_count, 0,
"All-system-note discussion should produce no thread"
);
}
#[test]
fn test_collect_discussion_thread_body_truncation() {
let conn = setup_test_db();
let project_id = insert_project(&conn);
let issue_id = insert_issue(&conn, project_id, 1);
let disc_id = insert_discussion(&conn, project_id, Some(issue_id), None);
let long_body = "x".repeat(10_000);
insert_note(&conn, disc_id, project_id, "alice", &long_body, false, 2000);
let seeds = [make_entity_ref("issue", issue_id, 1)];
let discussions = [make_matched_discussion(
disc_id, "issue", issue_id, project_id,
)];
let (events, _) = collect_events(&conn, &seeds, &[], &[], &discussions, None, 100).unwrap();
let thread = events
.iter()
.find(|e| matches!(&e.event_type, TimelineEventType::DiscussionThread { .. }))
.unwrap();
if let TimelineEventType::DiscussionThread { notes, .. } = &thread.event_type {
assert!(
notes[0].body.chars().count() <= crate::core::timeline::THREAD_NOTE_MAX_CHARS,
"Body should be truncated to THREAD_NOTE_MAX_CHARS"
);
} else {
panic!("Expected DiscussionThread");
}
}
#[test]
fn test_collect_discussion_thread_note_cap() {
let conn = setup_test_db();
let project_id = insert_project(&conn);
let issue_id = insert_issue(&conn, project_id, 1);
let disc_id = insert_discussion(&conn, project_id, Some(issue_id), None);
// Insert 60 notes, exceeding THREAD_MAX_NOTES (50)
for i in 0..60 {
insert_note(
&conn,
disc_id,
project_id,
"alice",
&format!("Note {i}"),
false,
2000 + i * 100,
);
}
let seeds = [make_entity_ref("issue", issue_id, 1)];
let discussions = [make_matched_discussion(
disc_id, "issue", issue_id, project_id,
)];
let (events, _) = collect_events(&conn, &seeds, &[], &[], &discussions, None, 100).unwrap();
let thread = events
.iter()
.find(|e| matches!(&e.event_type, TimelineEventType::DiscussionThread { .. }))
.unwrap();
if let TimelineEventType::DiscussionThread { notes, .. } = &thread.event_type {
// 50 notes + 1 synthetic summary = 51
assert_eq!(
notes.len(),
crate::core::timeline::THREAD_MAX_NOTES + 1,
"Should cap at THREAD_MAX_NOTES + synthetic summary"
);
let last = notes.last().unwrap();
assert!(last.body.contains("more notes not shown"));
} else {
panic!("Expected DiscussionThread");
}
}
#[test]
fn test_collect_discussion_thread_timestamp_is_first_note() {
let conn = setup_test_db();
let project_id = insert_project(&conn);
let issue_id = insert_issue(&conn, project_id, 1);
let disc_id = insert_discussion(&conn, project_id, Some(issue_id), None);
insert_note(&conn, disc_id, project_id, "alice", "First", false, 5000);
insert_note(&conn, disc_id, project_id, "bob", "Second", false, 8000);
let seeds = [make_entity_ref("issue", issue_id, 1)];
let discussions = [make_matched_discussion(
disc_id, "issue", issue_id, project_id,
)];
let (events, _) = collect_events(&conn, &seeds, &[], &[], &discussions, None, 100).unwrap();
let thread = events
.iter()
.find(|e| matches!(&e.event_type, TimelineEventType::DiscussionThread { .. }))
.unwrap();
assert_eq!(
thread.timestamp, 5000,
"Thread timestamp should be first note's created_at"
);
}
#[test]
fn test_collect_discussion_thread_sort_position() {
let conn = setup_test_db();
let project_id = insert_project(&conn);
let issue_id = insert_issue(&conn, project_id, 1);
let disc_id = insert_discussion(&conn, project_id, Some(issue_id), None);
// Note at t=2000 (between Created at t=1000 and state change at t=3000)
insert_note(
&conn,
disc_id,
project_id,
"alice",
"discussion",
false,
2000,
);
insert_state_event(&conn, project_id, Some(issue_id), None, "closed", 3000);
let seeds = [make_entity_ref("issue", issue_id, 1)];
let discussions = [make_matched_discussion(
disc_id, "issue", issue_id, project_id,
)];
let (events, _) = collect_events(&conn, &seeds, &[], &[], &discussions, None, 100).unwrap();
// Expected order: Created(1000), DiscussionThread(2000), StateChanged(3000)
assert!(events.len() >= 3);
assert!(matches!(events[0].event_type, TimelineEventType::Created));
assert!(matches!(
events[1].event_type,
TimelineEventType::DiscussionThread { .. }
));
assert!(matches!(
events[2].event_type,
TimelineEventType::StateChanged { .. }
));
}
#[test]
fn test_collect_discussion_thread_dedup() {
let conn = setup_test_db();
let project_id = insert_project(&conn);
let issue_id = insert_issue(&conn, project_id, 1);
let disc_id = insert_discussion(&conn, project_id, Some(issue_id), None);
insert_note(&conn, disc_id, project_id, "alice", "hello", false, 2000);
let seeds = [make_entity_ref("issue", issue_id, 1)];
// Same discussion_id twice
let discussions = [
make_matched_discussion(disc_id, "issue", issue_id, project_id),
make_matched_discussion(disc_id, "issue", issue_id, project_id),
];
let (events, _) = collect_events(&conn, &seeds, &[], &[], &discussions, None, 100).unwrap();
let thread_count = events
.iter()
.filter(|e| matches!(&e.event_type, TimelineEventType::DiscussionThread { .. }))
.count();
assert_eq!(
thread_count, 1,
"Duplicate discussion_id should produce one thread"
);
}

View File

@@ -4,7 +4,10 @@ use rusqlite::Connection;
use tracing::debug; use tracing::debug;
use crate::core::error::Result; use crate::core::error::Result;
use crate::core::timeline::{EntityRef, TimelineEvent, TimelineEventType, resolve_entity_ref}; use crate::core::timeline::{
EntityRef, MatchedDiscussion, TimelineEvent, TimelineEventType, resolve_entity_ref,
truncate_to_chars,
};
use crate::embedding::ollama::OllamaClient; use crate::embedding::ollama::OllamaClient;
use crate::search::{FtsQueryMode, SearchFilters, SearchMode, search_hybrid, to_fts_query}; use crate::search::{FtsQueryMode, SearchFilters, SearchMode, search_hybrid, to_fts_query};
@@ -12,6 +15,8 @@ use crate::search::{FtsQueryMode, SearchFilters, SearchMode, search_hybrid, to_f
pub struct SeedResult { pub struct SeedResult {
pub seed_entities: Vec<EntityRef>, pub seed_entities: Vec<EntityRef>,
pub evidence_notes: Vec<TimelineEvent>, pub evidence_notes: Vec<TimelineEvent>,
/// Discussions matched during seeding, to be collected as full threads.
pub matched_discussions: Vec<MatchedDiscussion>,
/// The search mode actually used (hybrid with fallback info). /// The search mode actually used (hybrid with fallback info).
pub search_mode: String, pub search_mode: String,
} }
@@ -38,6 +43,7 @@ pub async fn seed_timeline(
return Ok(SeedResult { return Ok(SeedResult {
seed_entities: Vec::new(), seed_entities: Vec::new(),
evidence_notes: Vec::new(), evidence_notes: Vec::new(),
matched_discussions: Vec::new(),
search_mode: "lexical".to_owned(), search_mode: "lexical".to_owned(),
}); });
} }
@@ -76,7 +82,7 @@ pub async fn seed_timeline(
debug!(warning = %w, "hybrid search warning during timeline seeding"); debug!(warning = %w, "hybrid search warning during timeline seeding");
} }
let seed_entities = resolve_documents_to_entities( let (seed_entities, matched_discussions) = resolve_documents_to_entities(
conn, conn,
&hybrid_results &hybrid_results
.iter() .iter()
@@ -91,19 +97,21 @@ pub async fn seed_timeline(
Ok(SeedResult { Ok(SeedResult {
seed_entities, seed_entities,
evidence_notes, evidence_notes,
matched_discussions,
search_mode, search_mode,
}) })
} }
/// Resolve a list of document IDs to deduplicated entity refs. /// Resolve a list of document IDs to deduplicated entity refs and matched discussions.
/// Discussion documents are resolved to their parent entity (issue or MR). /// Discussion and note documents are resolved to their parent entity (issue or MR).
/// Returns (entities, matched_discussions).
fn resolve_documents_to_entities( fn resolve_documents_to_entities(
conn: &Connection, conn: &Connection,
document_ids: &[i64], document_ids: &[i64],
max_entities: usize, max_entities: usize,
) -> Result<Vec<EntityRef>> { ) -> Result<(Vec<EntityRef>, Vec<MatchedDiscussion>)> {
if document_ids.is_empty() { if document_ids.is_empty() {
return Ok(Vec::new()); return Ok((Vec::new(), Vec::new()));
} }
let placeholders: String = document_ids let placeholders: String = document_ids
@@ -114,9 +122,13 @@ fn resolve_documents_to_entities(
let sql = format!( let sql = format!(
r" r"
SELECT d.source_type, d.source_id, d.project_id, SELECT d.source_type, d.source_id, d.project_id,
disc.issue_id, disc.merge_request_id COALESCE(disc.issue_id, note_disc.issue_id) AS issue_id,
COALESCE(disc.merge_request_id, note_disc.merge_request_id) AS mr_id,
COALESCE(disc.id, note_disc.id) AS discussion_id
FROM documents d FROM documents d
LEFT JOIN discussions disc ON disc.id = d.source_id AND d.source_type = 'discussion' LEFT JOIN discussions disc ON disc.id = d.source_id AND d.source_type = 'discussion'
LEFT JOIN notes n ON n.id = d.source_id AND d.source_type = 'note'
LEFT JOIN discussions note_disc ON note_disc.id = n.discussion_id AND d.source_type = 'note'
WHERE d.id IN ({placeholders}) WHERE d.id IN ({placeholders})
ORDER BY CASE d.id {order_clause} END ORDER BY CASE d.id {order_clause} END
", ",
@@ -135,37 +147,55 @@ fn resolve_documents_to_entities(
.collect(); .collect();
let rows = stmt.query_map(params.as_slice(), |row| { let rows = stmt.query_map(params.as_slice(), |row| {
Ok(( Ok((
row.get::<_, String>(0)?, row.get::<_, String>(0)?, // source_type
row.get::<_, i64>(1)?, row.get::<_, i64>(1)?, // source_id
row.get::<_, i64>(2)?, row.get::<_, i64>(2)?, // project_id
row.get::<_, Option<i64>>(3)?, row.get::<_, Option<i64>>(3)?, // issue_id (coalesced)
row.get::<_, Option<i64>>(4)?, row.get::<_, Option<i64>>(4)?, // mr_id (coalesced)
row.get::<_, Option<i64>>(5)?, // discussion_id (coalesced)
)) ))
})?; })?;
let mut seen = HashSet::new(); let mut seen_entities = HashSet::new();
let mut seen_discussions = HashSet::new();
let mut entities = Vec::new(); let mut entities = Vec::new();
let mut matched_discussions = Vec::new();
for row_result in rows { for row_result in rows {
let (source_type, source_id, proj_id, disc_issue_id, disc_mr_id) = row_result?; let (source_type, source_id, proj_id, disc_issue_id, disc_mr_id, discussion_id) =
row_result?;
let (entity_type, entity_id) = match source_type.as_str() { let (entity_type, entity_id) = match source_type.as_str() {
"issue" => ("issue".to_owned(), source_id), "issue" => ("issue".to_owned(), source_id),
"merge_request" => ("merge_request".to_owned(), source_id), "merge_request" => ("merge_request".to_owned(), source_id),
"discussion" => { "discussion" | "note" => {
if let Some(issue_id) = disc_issue_id { if let Some(issue_id) = disc_issue_id {
("issue".to_owned(), issue_id) ("issue".to_owned(), issue_id)
} else if let Some(mr_id) = disc_mr_id { } else if let Some(mr_id) = disc_mr_id {
("merge_request".to_owned(), mr_id) ("merge_request".to_owned(), mr_id)
} else { } else {
continue; // orphaned discussion continue; // orphaned discussion/note
} }
} }
_ => continue, _ => continue,
}; };
// Capture matched discussion (deduplicated)
if let Some(disc_id) = discussion_id
&& (source_type == "discussion" || source_type == "note")
&& seen_discussions.insert(disc_id)
{
matched_discussions.push(MatchedDiscussion {
discussion_id: disc_id,
entity_type: entity_type.clone(),
entity_id,
project_id: proj_id,
});
}
// Entity dedup
let key = (entity_type.clone(), entity_id); let key = (entity_type.clone(), entity_id);
if !seen.insert(key) { if !seen_entities.insert(key) {
continue; continue;
} }
@@ -179,7 +209,7 @@ fn resolve_documents_to_entities(
} }
} }
Ok(entities) Ok((entities, matched_discussions))
} }
/// Find evidence notes: FTS5-matched discussion notes that provide context. /// Find evidence notes: FTS5-matched discussion notes that provide context.
@@ -275,21 +305,6 @@ fn find_evidence_notes(
Ok(events) Ok(events)
} }
/// Truncate a string to at most `max_chars` characters on a safe UTF-8 boundary.
fn truncate_to_chars(s: &str, max_chars: usize) -> String {
let char_count = s.chars().count();
if char_count <= max_chars {
return s.to_owned();
}
let byte_end = s
.char_indices()
.nth(max_chars)
.map(|(i, _)| i)
.unwrap_or(s.len());
s[..byte_end].to_owned()
}
#[cfg(test)] #[cfg(test)]
#[path = "timeline_seed_tests.rs"] #[path = "timeline_seed_tests.rs"]
mod tests; mod tests;

View File

@@ -316,23 +316,110 @@ async fn test_seed_respects_project_filter() {
assert_eq!(result.seed_entities[0].project_path, "group/project"); assert_eq!(result.seed_entities[0].project_path, "group/project");
} }
#[test] // ─── Matched discussion tests ───────────────────────────────────────────────
fn test_truncate_to_chars_short() {
assert_eq!(truncate_to_chars("hello", 200), "hello"); #[tokio::test]
async fn test_seed_captures_matched_discussions_from_discussion_doc() {
let conn = setup_test_db();
let project_id = insert_test_project(&conn);
let issue_id = insert_test_issue(&conn, project_id, 1);
let disc_id = insert_discussion(&conn, project_id, Some(issue_id), None);
insert_document(
&conn,
"discussion",
disc_id,
project_id,
"deployment pipeline authentication",
);
let result = seed_timeline(&conn, None, "deployment", None, None, 50, 10)
.await
.unwrap();
assert_eq!(result.matched_discussions.len(), 1);
assert_eq!(result.matched_discussions[0].discussion_id, disc_id);
assert_eq!(result.matched_discussions[0].entity_type, "issue");
assert_eq!(result.matched_discussions[0].entity_id, issue_id);
} }
#[test] #[tokio::test]
fn test_truncate_to_chars_long() { async fn test_seed_captures_matched_discussions_from_note_doc() {
let long = "a".repeat(300); let conn = setup_test_db();
let result = truncate_to_chars(&long, 200); let project_id = insert_test_project(&conn);
assert_eq!(result.chars().count(), 200); let issue_id = insert_test_issue(&conn, project_id, 1);
let disc_id = insert_discussion(&conn, project_id, Some(issue_id), None);
let note_id = insert_note(&conn, disc_id, project_id, "note about deployment", false);
insert_document(
&conn,
"note",
note_id,
project_id,
"deployment configuration details",
);
let result = seed_timeline(&conn, None, "deployment", None, None, 50, 10)
.await
.unwrap();
assert_eq!(
result.matched_discussions.len(),
1,
"Note doc should resolve to parent discussion"
);
assert_eq!(result.matched_discussions[0].discussion_id, disc_id);
assert_eq!(result.matched_discussions[0].entity_type, "issue");
} }
#[test] #[tokio::test]
fn test_truncate_to_chars_multibyte() { async fn test_seed_deduplicates_matched_discussions() {
let s = "\u{1F600}".repeat(300); // emoji let conn = setup_test_db();
let result = truncate_to_chars(&s, 200); let project_id = insert_test_project(&conn);
assert_eq!(result.chars().count(), 200); let issue_id = insert_test_issue(&conn, project_id, 1);
// Verify valid UTF-8 let disc_id = insert_discussion(&conn, project_id, Some(issue_id), None);
assert!(std::str::from_utf8(result.as_bytes()).is_ok());
// Two docs referencing the same discussion
insert_document(
&conn,
"discussion",
disc_id,
project_id,
"deployment pipeline first doc",
);
let note_id = insert_note(&conn, disc_id, project_id, "deployment note", false);
insert_document(
&conn,
"note",
note_id,
project_id,
"deployment pipeline second doc",
);
let result = seed_timeline(&conn, None, "deployment", None, None, 50, 10)
.await
.unwrap();
assert_eq!(
result.matched_discussions.len(),
1,
"Same discussion_id from two docs should deduplicate"
);
}
#[tokio::test]
async fn test_seed_matched_discussions_have_correct_parent_entity() {
let conn = setup_test_db();
let project_id = insert_test_project(&conn);
let mr_id = insert_test_mr(&conn, project_id, 99);
let disc_id = insert_discussion(&conn, project_id, None, Some(mr_id));
insert_document(
&conn,
"discussion",
disc_id,
project_id,
"deployment pipeline for merge request",
);
let result = seed_timeline(&conn, None, "deployment", None, None, 50, 10)
.await
.unwrap();
assert_eq!(result.matched_discussions.len(), 1);
assert_eq!(result.matched_discussions[0].entity_type, "merge_request");
assert_eq!(result.matched_discussions[0].entity_id, mr_id);
} }

View File

@@ -177,6 +177,7 @@ async fn pipeline_seed_expand_collect_end_to_end() {
&seed_result.seed_entities, &seed_result.seed_entities,
&expand_result.expanded_entities, &expand_result.expanded_entities,
&seed_result.evidence_notes, &seed_result.evidence_notes,
&seed_result.matched_discussions,
None, None,
1000, 1000,
) )
@@ -233,6 +234,7 @@ async fn pipeline_empty_query_produces_empty_result() {
&seed_result.seed_entities, &seed_result.seed_entities,
&expand_result.expanded_entities, &expand_result.expanded_entities,
&seed_result.evidence_notes, &seed_result.evidence_notes,
&seed_result.matched_discussions,
None, None,
1000, 1000,
) )
@@ -270,6 +272,7 @@ async fn pipeline_since_filter_excludes_old_events() {
&seed_result.seed_entities, &seed_result.seed_entities,
&expand_result.expanded_entities, &expand_result.expanded_entities,
&seed_result.evidence_notes, &seed_result.evidence_notes,
&seed_result.matched_discussions,
Some(5000), Some(5000),
1000, 1000,
) )