feat(runtime): replace tokio+reqwest with asupersync async runtime

- Add HTTP adapter layer (src/http.rs) wrapping asupersync h1 client
- Migrate gitlab client, graphql, and ollama to HTTP adapter
- Swap entrypoint from #[tokio::main] to RuntimeBuilder::new().block_on()
- Rewrite signal handler for asupersync (RuntimeHandle::spawn + ctrl_c())
- Migrate rate limiter sleeps to asupersync::time::sleep(wall_now(), d)
- Add asupersync-native HTTP integration tests
- Convert timeline_seed_tests to RuntimeBuilder pattern

Phases 1-3 of asupersync migration (atomic: code won't compile without all pieces).
This commit is contained in:
teernisse
2026-03-06 15:23:55 -05:00
parent bf977eca1a
commit e8d6c5b15f
16 changed files with 1974 additions and 1189 deletions

View File

@@ -1,9 +1,8 @@
use reqwest::Client;
use serde::{Deserialize, Serialize};
use std::time::Duration;
use tracing::warn;
use crate::core::error::{LoreError, Result};
use crate::http::Client;
pub struct OllamaConfig {
pub base_url: String,
@@ -51,17 +50,7 @@ struct ModelInfo {
impl OllamaClient {
pub fn new(config: OllamaConfig) -> Self {
let client = Client::builder()
.timeout(Duration::from_secs(config.timeout_secs))
.build()
.unwrap_or_else(|e| {
warn!(
error = %e,
"Failed to build configured Ollama HTTP client; falling back to default client"
);
Client::new()
});
let client = Client::with_timeout(Duration::from_secs(config.timeout_secs));
Self { client, config }
}
@@ -70,22 +59,17 @@ impl OllamaClient {
let response =
self.client
.get(&url)
.send()
.get(&url, &[])
.await
.map_err(|e| LoreError::OllamaUnavailable {
base_url: self.config.base_url.clone(),
detail: Some(format!("{e:?}")),
})?;
let tags: TagsResponse =
response
.json()
.await
.map_err(|e| LoreError::OllamaUnavailable {
base_url: self.config.base_url.clone(),
detail: Some(format!("{e:?}")),
})?;
let tags: TagsResponse = response.json().map_err(|e| LoreError::OllamaUnavailable {
base_url: self.config.base_url.clone(),
detail: Some(format!("{e:?}")),
})?;
let model_found = tags.models.iter().any(|m| {
m.name == self.config.model || m.name.starts_with(&format!("{}:", self.config.model))
@@ -110,49 +94,36 @@ impl OllamaClient {
let response = self
.client
.post(&url)
.json(&request)
.send()
.post_json(&url, &[], &request)
.await
.map_err(|e| LoreError::OllamaUnavailable {
base_url: self.config.base_url.clone(),
detail: Some(format!("{e:?}")),
})?;
let status = response.status();
if !status.is_success() {
let body = response.text().await.unwrap_or_default();
if !response.is_success() {
let status = response.status;
let body = response.text().unwrap_or_default();
return Err(LoreError::EmbeddingFailed {
document_id: 0,
reason: format!("HTTP {}: {}", status, body),
reason: format!("HTTP {status}: {body}"),
});
}
let embed_response: EmbedResponse =
response
.json()
.await
.map_err(|e| LoreError::EmbeddingFailed {
document_id: 0,
reason: format!("Failed to parse embed response: {}", e),
})?;
response.json().map_err(|e| LoreError::EmbeddingFailed {
document_id: 0,
reason: format!("Failed to parse embed response: {e}"),
})?;
Ok(embed_response.embeddings)
}
}
pub async fn check_ollama_health(base_url: &str) -> bool {
let client = Client::builder()
.timeout(Duration::from_secs(5))
.build()
.ok();
let Some(client) = client else {
return false;
};
let client = Client::with_timeout(Duration::from_secs(5));
let url = format!("{base_url}/api/tags");
client.get(&url).send().await.is_ok()
client.get(&url, &[]).await.is_ok_and(|r| r.is_success())
}
#[cfg(test)]