refactor: Remove redundant doc comments throughout codebase
Removes module-level doc comments (//! lines) and excessive inline doc comments that were duplicating information already evident from: - Function/struct names (self-documenting code) - Type signatures (the what is clear from types) - Implementation context (the how is clear from code) Affected modules: - cli/* - Removed command descriptions duplicating clap help text - core/* - Removed module headers and obvious function docs - documents/* - Removed extractor/regenerator/truncation docs - embedding/* - Removed pipeline and chunking docs - gitlab/* - Removed client and transformer docs (kept type definitions) - ingestion/* - Removed orchestrator and ingestion docs - search/* - Removed FTS and vector search docs Philosophy: Code should be self-documenting. Comments should explain "why" (business decisions, non-obvious constraints) not "what" (which the code itself shows). This change reduces noise and maintenance burden while keeping the codebase just as understandable. Retains comments for: - Non-obvious business logic - Important safety invariants - Complex algorithm explanations - Public API boundaries where generated docs matter Co-Authored-By: Claude Opus 4.5 <noreply@anthropic.com>
This commit is contained in:
@@ -4,7 +4,6 @@ use std::time::Duration;
|
||||
|
||||
use crate::core::error::{LoreError, Result};
|
||||
|
||||
/// Configuration for Ollama embedding service.
|
||||
pub struct OllamaConfig {
|
||||
pub base_url: String,
|
||||
pub model: String,
|
||||
@@ -21,7 +20,6 @@ impl Default for OllamaConfig {
|
||||
}
|
||||
}
|
||||
|
||||
/// Async client for Ollama embedding API.
|
||||
pub struct OllamaClient {
|
||||
client: Client,
|
||||
config: OllamaConfig,
|
||||
@@ -60,10 +58,6 @@ impl OllamaClient {
|
||||
Self { client, config }
|
||||
}
|
||||
|
||||
/// Health check: verifies Ollama is reachable and the configured model exists.
|
||||
///
|
||||
/// Model matching uses `starts_with` so "nomic-embed-text" matches
|
||||
/// "nomic-embed-text:latest".
|
||||
pub async fn health_check(&self) -> Result<()> {
|
||||
let url = format!("{}/api/tags", self.config.base_url);
|
||||
|
||||
@@ -100,9 +94,6 @@ impl OllamaClient {
|
||||
Ok(())
|
||||
}
|
||||
|
||||
/// Embed a batch of texts using the configured model.
|
||||
///
|
||||
/// Returns one embedding vector per input text.
|
||||
pub async fn embed_batch(&self, texts: Vec<String>) -> Result<Vec<Vec<f32>>> {
|
||||
let url = format!("{}/api/embed", self.config.base_url);
|
||||
|
||||
@@ -144,7 +135,6 @@ impl OllamaClient {
|
||||
}
|
||||
}
|
||||
|
||||
/// Quick health check without creating a full client.
|
||||
pub async fn check_ollama_health(base_url: &str) -> bool {
|
||||
let client = Client::builder()
|
||||
.timeout(Duration::from_secs(5))
|
||||
@@ -173,12 +163,10 @@ mod tests {
|
||||
|
||||
#[test]
|
||||
fn test_health_check_model_starts_with() {
|
||||
// Verify the matching logic: "nomic-embed-text" should match "nomic-embed-text:latest"
|
||||
let model = "nomic-embed-text";
|
||||
let tag_name = "nomic-embed-text:latest";
|
||||
assert!(tag_name.starts_with(model));
|
||||
|
||||
// Non-matching model
|
||||
let wrong_model = "llama2";
|
||||
assert!(!tag_name.starts_with(wrong_model));
|
||||
}
|
||||
|
||||
Reference in New Issue
Block a user