Constellation, Spacedust, Slingshot, UFOs: atproto crates and services for microcosm

Merge pull request #19 from at-microcosm/ufos-jetstream-backfill

UFOs: jetstream backfill

+6 -9
Cargo.lock
···
[[package]]
name = "atrium-api"
-
version = "0.25.2"
-
source = "registry+https://github.com/rust-lang/crates.io-index"
-
checksum = "0d4eb9b4787aba546015c8ccda1d3924c157cee13d67848997fba74ac8144a07"
+
version = "0.25.3"
+
source = "git+https://github.com/uniphil/atrium?branch=fix%2Fnsid-allow-nonleading-name-digits#c4364f318d337bbc3e3e3aaf97c9f971e95f5f7e"
dependencies = [
"atrium-common",
"atrium-xrpc",
···
[[package]]
name = "atrium-common"
-
version = "0.1.1"
-
source = "registry+https://github.com/rust-lang/crates.io-index"
-
checksum = "ba30d2f9e1a8b3db8fc97d0a5f91ee5a28f8acdddb771ad74c1b08eda357ca3d"
+
version = "0.1.2"
+
source = "git+https://github.com/uniphil/atrium?branch=fix%2Fnsid-allow-nonleading-name-digits#c4364f318d337bbc3e3e3aaf97c9f971e95f5f7e"
dependencies = [
"dashmap",
"lru",
···
[[package]]
name = "atrium-xrpc"
-
version = "0.12.2"
-
source = "registry+https://github.com/rust-lang/crates.io-index"
-
checksum = "18a9e526cb2ed3e0a2ca78c3ce2a943d9041a68e067dadf42923b523771e07df"
+
version = "0.12.3"
+
source = "git+https://github.com/uniphil/atrium?branch=fix%2Fnsid-allow-nonleading-name-digits#c4364f318d337bbc3e3e3aaf97c9f971e95f5f7e"
dependencies = [
"http",
"serde",
+1 -1
jetstream/Cargo.toml
···
[dependencies]
async-trait = "0.1.83"
-
atrium-api = { version = "0.25.2", default-features = false, features = [
+
atrium-api = { git = "https://github.com/uniphil/atrium", branch = "fix/nsid-allow-nonleading-name-digits", default-features = false, features = [
"namespace-appbsky",
] }
tokio = { version = "1.44.2", features = ["full", "sync", "time"] }
+2 -2
jetstream/src/lib.rs
···
if let Some(last) = last_cursor {
if event_cursor <= *last {
-
log::warn!("event cursor {event_cursor:?} was older than the last one: {last:?}. dropping event.");
+
log::warn!("event cursor {event_cursor:?} was not newer than the last one: {last:?}. dropping event.");
continue;
}
}
···
if let Some(last) = last_cursor {
if event_cursor <= *last {
-
log::warn!("event cursor {event_cursor:?} was older than the last one: {last:?}. dropping event.");
+
log::warn!("event cursor {event_cursor:?} was not newer than the last one: {last:?}. dropping event.");
continue;
}
}
+32 -12
ufos/src/consumer.rs
···
use std::mem;
use std::time::Duration;
use tokio::sync::mpsc::{channel, Receiver, Sender};
+
use tokio::time::{timeout, Interval};
use crate::error::{BatchInsertError, FirehoseEventError};
use crate::{DeleteAccount, EventBatch, UFOsCommit};
···
batch_sender: Sender<LimitedBatch>,
current_batch: CurrentBatch,
sketch_secret: SketchSecretPrefix,
+
rate_limit: Interval,
}
pub async fn consume(
···
.await?;
let (batch_sender, batch_reciever) = channel::<LimitedBatch>(BATCH_QUEUE_SIZE);
let mut batcher = Batcher::new(jetstream_receiver, batch_sender, sketch_secret);
-
tokio::task::spawn(async move { batcher.run().await });
+
tokio::task::spawn(async move {
+
let r = batcher.run().await;
+
log::info!("batcher ended: {r:?}");
+
});
Ok(batch_reciever)
}
···
batch_sender: Sender<LimitedBatch>,
sketch_secret: SketchSecretPrefix,
) -> Self {
+
let mut rate_limit = tokio::time::interval(std::time::Duration::from_millis(5));
+
rate_limit.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
Self {
jetstream_receiver,
batch_sender,
current_batch: Default::default(),
sketch_secret,
+
rate_limit,
}
}
pub async fn run(&mut self) -> anyhow::Result<()> {
+
// TODO: report errors *from here* probably, since this gets shipped off into a spawned task that might just vanish
loop {
-
if let Some(event) = self.jetstream_receiver.recv().await {
-
self.handle_event(event).await?
-
} else {
-
anyhow::bail!("channel closed");
+
match timeout(Duration::from_millis(9_000), self.jetstream_receiver.recv()).await {
+
Err(_elapsed) => self.no_events_step().await?,
+
Ok(Some(event)) => self.handle_event(event).await?,
+
Ok(None) => anyhow::bail!("channel closed"),
}
}
}
+
async fn no_events_step(&mut self) -> anyhow::Result<()> {
+
let empty = self.current_batch.batch.is_empty();
+
log::info!("no events received, stepping batcher (empty? {empty})");
+
if !empty {
+
self.send_current_batch_now(true, "no events step").await?;
+
}
+
Ok(())
+
}
+
async fn handle_event(&mut self, event: JetstreamEvent) -> anyhow::Result<()> {
if let Some(earliest) = &self.current_batch.initial_cursor {
if event.cursor.duration_since(earliest)? > Duration::from_secs_f64(MAX_BATCH_SPAN_SECS)
{
-
self.send_current_batch_now(false).await?;
+
self.send_current_batch_now(false, "time since event")
+
.await?;
}
} else {
self.current_batch.initial_cursor = Some(event.cursor);
···
if event.cursor.duration_since(earliest)?.as_secs_f64() > MIN_BATCH_SPAN_SECS
&& self.batch_sender.capacity() == BATCH_QUEUE_SIZE
{
-
self.send_current_batch_now(true).await?;
+
self.send_current_batch_now(true, "available queue").await?;
}
}
Ok(())
···
);
if let Err(BatchInsertError::BatchFull(commit)) = optimistic_res {
-
self.send_current_batch_now(false).await?;
+
self.send_current_batch_now(false, "handle commit").await?;
self.current_batch.batch.insert_commit_by_nsid(
&collection,
commit,
···
async fn handle_delete_account(&mut self, did: Did, cursor: Cursor) -> anyhow::Result<()> {
if self.current_batch.batch.account_removes.len() >= MAX_ACCOUNT_REMOVES {
-
self.send_current_batch_now(false).await?;
+
self.send_current_batch_now(false, "delete account").await?;
}
self.current_batch
.batch
···
// holds up all consumer progress until it can send to the channel
// use this when the current batch is too full to add more to it
-
async fn send_current_batch_now(&mut self, small: bool) -> anyhow::Result<()> {
+
async fn send_current_batch_now(&mut self, small: bool, referrer: &str) -> anyhow::Result<()> {
let beginning = match self.current_batch.initial_cursor.map(|c| c.elapsed()) {
None => "unknown".to_string(),
Some(Ok(t)) => format!("{:?}", t),
Some(Err(e)) => format!("+{:?}", e.duration()),
};
-
log::info!(
-
"sending batch now from {beginning}, {}, queue capacity: {}",
+
log::trace!(
+
"sending batch now from {beginning}, {}, queue capacity: {}, referrer: {referrer}",
if small { "small" } else { "full" },
self.batch_sender.capacity(),
);
let current = mem::take(&mut self.current_batch);
+
self.rate_limit.tick().await;
self.batch_sender
.send_timeout(current.batch, Duration::from_secs_f64(SEND_TIMEOUT_S))
.await?;
+24 -8
ufos/src/file_consumer.rs
···
async fn read_jsonl(f: File, sender: Sender<JetstreamEvent>) -> Result<()> {
let mut lines = BufReader::new(f).lines();
while let Some(line) = lines.next_line().await? {
-
let event: JetstreamEvent =
-
serde_json::from_str(&line).map_err(JetstreamEventError::ReceivedMalformedJSON)?;
-
if sender.send(event).await.is_err() {
-
log::warn!("All receivers for the jsonl fixture have been dropped, bye.");
-
return Err(JetstreamEventError::ReceiverClosedError.into());
+
match serde_json::from_str::<JetstreamEvent>(&line) {
+
Ok(event) => match sender.send(event).await {
+
Ok(_) => {}
+
Err(e) => {
+
log::warn!("All receivers for the jsonl fixture have been dropped, bye: {e:?}");
+
return Err(JetstreamEventError::ReceiverClosedError.into());
+
}
+
},
+
Err(parse_err) => {
+
log::warn!("failed to parse event: {parse_err:?} from event:\n{line}");
+
continue;
+
}
}
}
-
Ok(())
+
log::info!("reached end of jsonl file, looping on noop to keep server alive.");
+
loop {
+
tokio::time::sleep(std::time::Duration::from_secs_f64(10.)).await;
+
}
}
pub async fn consume(
···
let (jsonl_sender, jsonl_receiver) = channel::<JetstreamEvent>(16);
let (batch_sender, batch_reciever) = channel::<LimitedBatch>(BATCH_QUEUE_SIZE);
let mut batcher = Batcher::new(jsonl_receiver, batch_sender, sketch_secret);
-
tokio::task::spawn(async move { read_jsonl(f, jsonl_sender).await });
-
tokio::task::spawn(async move { batcher.run().await });
+
tokio::task::spawn(async move {
+
let r = read_jsonl(f, jsonl_sender).await;
+
log::info!("read_jsonl finished: {r:?}");
+
});
+
tokio::task::spawn(async move {
+
let r = batcher.run().await;
+
log::info!("batcher finished: {r:?}");
+
});
Ok(batch_reciever)
}
+1
ufos/src/lib.rs
···
endpoint: String,
started_at: u64,
latest_cursor: Option<u64>,
+
rollup_cursor: Option<u64>,
},
}
+143 -4
ufos/src/main.rs
···
use clap::Parser;
use jetstream::events::Cursor;
use std::path::PathBuf;
+
use std::time::{Duration, SystemTime};
use ufos::consumer;
use ufos::file_consumer;
use ufos::server;
···
use ufos::storage_fjall::FjallStorage;
use ufos::storage_mem::MemStorage;
use ufos::store_types::SketchSecretPrefix;
+
use ufos::ConsumerInfo;
#[cfg(not(target_env = "msvc"))]
use tikv_jemallocator::Jemalloc;
···
#[arg(long)]
data: PathBuf,
/// DEBUG: don't start the jetstream consumer or its write loop
-
/// todo: restore this
#[arg(long, action)]
pause_writer: bool,
+
/// Adjust runtime settings like background task intervals for efficient backfill
+
#[arg(long, action)]
+
backfill: bool,
/// DEBUG: force the rw loop to fall behind by pausing it
/// todo: restore this
#[arg(long, action)]
···
/// DEBUG: use an in-memory store instead of fjall
#[arg(long, action)]
in_mem: bool,
+
/// reset the rollup cursor, scrape through missed things in the past (backfill)
+
#[arg(long, action)]
+
reroll: bool,
/// DEBUG: interpret jetstream as a file fixture
#[arg(long, action)]
jetstream_fixture: bool,
···
args.jetstream,
args.jetstream_fixture,
args.pause_writer,
+
args.backfill,
+
args.reroll,
read_store,
write_store,
cursor,
···
args.jetstream,
args.jetstream_fixture,
args.pause_writer,
+
args.backfill,
+
args.reroll,
read_store,
write_store,
cursor,
···
Ok(())
}
+
#[allow(clippy::too_many_arguments)]
async fn go<B: StoreBackground>(
jetstream: String,
jetstream_fixture: bool,
pause_writer: bool,
-
read_store: impl StoreReader + 'static,
+
backfill: bool,
+
reroll: bool,
+
read_store: impl StoreReader + 'static + Clone,
mut write_store: impl StoreWriter<B> + 'static,
cursor: Option<Cursor>,
sketch_secret: SketchSecretPrefix,
) -> anyhow::Result<()> {
println!("starting server with storage...");
-
let serving = server::serve(read_store);
+
let serving = server::serve(read_store.clone());
if pause_writer {
log::info!("not starting jetstream or the write loop.");
···
consumer::consume(&jetstream, cursor, false, sketch_secret).await?
};
-
let rolling = write_store.background_tasks()?.run();
+
let rolling = write_store.background_tasks(reroll)?.run(backfill);
let storing = write_store.receive_batches(batches);
+
+
let stating = do_update_stuff(read_store);
tokio::select! {
z = serving => log::warn!("serve task ended: {z:?}"),
z = rolling => log::warn!("rollup task ended: {z:?}"),
z = storing => log::warn!("storage task ended: {z:?}"),
+
z = stating => log::warn!("status task ended: {z:?}"),
};
println!("bye!");
Ok(())
}
+
+
async fn do_update_stuff(read_store: impl StoreReader) {
+
let started_at = std::time::SystemTime::now();
+
let mut first_cursor = None;
+
let mut first_rollup = None;
+
let mut last_at = std::time::SystemTime::now();
+
let mut last_cursor = None;
+
let mut last_rollup = None;
+
loop {
+
tokio::time::sleep(std::time::Duration::from_secs_f64(4.)).await;
+
match read_store.get_consumer_info().await {
+
Err(e) => log::warn!("failed to get jetstream consumer info: {e:?}"),
+
Ok(ConsumerInfo::Jetstream {
+
latest_cursor,
+
rollup_cursor,
+
..
+
}) => {
+
let now = std::time::SystemTime::now();
+
let latest_cursor = latest_cursor.map(Cursor::from_raw_u64);
+
let rollup_cursor = rollup_cursor.map(Cursor::from_raw_u64);
+
backfill_info(
+
latest_cursor,
+
rollup_cursor,
+
last_cursor,
+
last_rollup,
+
last_at,
+
first_cursor,
+
first_rollup,
+
started_at,
+
now,
+
);
+
first_cursor = first_cursor.or(latest_cursor);
+
first_rollup = first_rollup.or(rollup_cursor);
+
last_cursor = latest_cursor;
+
last_rollup = rollup_cursor;
+
last_at = now;
+
}
+
}
+
}
+
}
+
+
#[allow(clippy::too_many_arguments)]
+
fn backfill_info(
+
latest_cursor: Option<Cursor>,
+
rollup_cursor: Option<Cursor>,
+
last_cursor: Option<Cursor>,
+
last_rollup: Option<Cursor>,
+
last_at: SystemTime,
+
first_cursor: Option<Cursor>,
+
first_rollup: Option<Cursor>,
+
started_at: SystemTime,
+
now: SystemTime,
+
) {
+
let nice_duration = |dt: Duration| {
+
let secs = dt.as_secs_f64();
+
if secs < 1. {
+
return format!("{:.0}ms", secs * 1000.);
+
}
+
if secs < 60. {
+
return format!("{secs:.02}s");
+
}
+
let mins = (secs / 60.).floor();
+
let rsecs = secs - (mins * 60.);
+
if mins < 60. {
+
return format!("{mins:.0}m{rsecs:.0}s");
+
}
+
let hrs = (mins / 60.).floor();
+
let rmins = mins - (hrs * 60.);
+
if hrs < 24. {
+
return format!("{hrs:.0}h{rmins:.0}m{rsecs:.0}s");
+
}
+
let days = (hrs / 24.).floor();
+
let rhrs = hrs - (days * 24.);
+
format!("{days:.0}d{rhrs:.0}h{rmins:.0}m{rsecs:.0}s")
+
};
+
+
let nice_dt_two_maybes = |earlier: Option<Cursor>, later: Option<Cursor>| match (earlier, later)
+
{
+
(Some(earlier), Some(later)) => match later.duration_since(&earlier) {
+
Ok(dt) => nice_duration(dt),
+
Err(e) => {
+
let rev_dt = e.duration();
+
format!("+{}", nice_duration(rev_dt))
+
}
+
},
+
_ => "unknown".to_string(),
+
};
+
+
let rate = |mlatest: Option<Cursor>, msince: Option<Cursor>, real: Duration| {
+
mlatest
+
.zip(msince)
+
.map(|(latest, since)| {
+
latest
+
.duration_since(&since)
+
.unwrap_or(Duration::from_millis(1))
+
})
+
.map(|dtc| format!("{:.2}", dtc.as_secs_f64() / real.as_secs_f64()))
+
.unwrap_or("??".into())
+
};
+
+
let dt_real = now
+
.duration_since(last_at)
+
.unwrap_or(Duration::from_millis(1));
+
+
let dt_real_total = now
+
.duration_since(started_at)
+
.unwrap_or(Duration::from_millis(1));
+
+
let cursor_rate = rate(latest_cursor, last_cursor, dt_real);
+
let cursor_avg = rate(latest_cursor, first_cursor, dt_real_total);
+
+
let rollup_rate = rate(rollup_cursor, last_rollup, dt_real);
+
let rollup_avg = rate(rollup_cursor, first_rollup, dt_real_total);
+
+
log::info!(
+
"cursor: {} behind (→{}, {cursor_rate}x, {cursor_avg}x avg). rollup: {} behind (→{}, {rollup_rate}x, {rollup_avg}x avg).",
+
latest_cursor.map(|c| c.elapsed().map(nice_duration).unwrap_or("++".to_string())).unwrap_or("?".to_string()),
+
nice_dt_two_maybes(last_cursor, latest_cursor),
+
rollup_cursor.map(|c| c.elapsed().map(nice_duration).unwrap_or("++".to_string())).unwrap_or("?".to_string()),
+
nice_dt_two_maybes(last_rollup, rollup_cursor),
+
);
+
}
+4 -3
ufos/src/storage.rs
···
where
Self: 'static,
{
-
fn background_tasks(&mut self) -> StorageResult<B>;
+
fn background_tasks(&mut self, reroll: bool) -> StorageResult<B>;
fn receive_batches<const LIMIT: usize>(
mut self,
···
fn step_rollup(&mut self) -> StorageResult<(usize, HashSet<Nsid>)>;
-
fn trim_collection(&mut self, collection: &Nsid, limit: usize) -> StorageResult<()>;
+
fn trim_collection(&mut self, collection: &Nsid, limit: usize)
+
-> StorageResult<(usize, usize)>;
fn delete_account(&mut self, did: &Did) -> StorageResult<usize>;
}
#[async_trait]
pub trait StoreBackground: Send + Sync {
-
async fn run(mut self) -> StorageResult<()>;
+
async fn run(mut self, backfill: bool) -> StorageResult<()>;
}
#[async_trait]
+41 -27
ufos/src/storage_fjall.rs
···
let keyspace = {
let config = Config::new(path);
-
#[cfg(not(test))]
-
let config = config.fsync_ms(Some(4_000));
+
// #[cfg(not(test))]
+
// let config = config.fsync_ms(Some(4_000));
config.open()?
};
···
)?;
} else {
return Err(StorageError::InitError(format!(
-
"stored js_endpoint {stored:?} differs from provided {endpoint:?}, refusing to start.")));
+
"stored js_endpoint {stored:?} differs from provided {endpoint:?}, refusing to start without --jetstream-force.")));
}
}
stored_secret
···
get_snapshot_static_neu::<JetstreamCursorKey, JetstreamCursorValue>(&global)?
.map(|c| c.to_raw_u64());
+
let rollup_cursor =
+
get_snapshot_static_neu::<NewRollupCursorKey, NewRollupCursorValue>(&global)?
+
.map(|c| c.to_raw_u64());
+
Ok(ConsumerInfo::Jetstream {
endpoint,
started_at,
latest_cursor,
+
rollup_cursor,
})
}
···
}
impl StoreWriter<FjallBackground> for FjallWriter {
-
fn background_tasks(&mut self) -> StorageResult<FjallBackground> {
+
fn background_tasks(&mut self, reroll: bool) -> StorageResult<FjallBackground> {
if self.bg_taken.swap(true, Ordering::SeqCst) {
Err(StorageError::BackgroundAlreadyStarted)
} else {
+
if reroll {
+
insert_static_neu::<NewRollupCursorKey>(&self.global, Cursor::from_start())?;
+
}
Ok(FjallBackground(self.clone()))
}
}
···
Ok((cursors_stepped, dirty_nsids))
}
-
fn trim_collection(&mut self, collection: &Nsid, limit: usize) -> StorageResult<()> {
+
fn trim_collection(
+
&mut self,
+
collection: &Nsid,
+
limit: usize,
+
) -> StorageResult<(usize, usize)> {
let mut dangling_feed_keys_cleaned = 0;
let mut records_deleted = 0;
···
let mut live_records_found = 0;
let mut latest_expired_feed_cursor = None;
let mut batch = self.keyspace.batch();
-
for kv in self.feeds.range(live_range).rev() {
+
for (i, kv) in self.feeds.range(live_range).rev().enumerate() {
+
if i > 1_000_000 {
+
log::info!("stopping collection trim early: already scanned 1M elements");
+
break;
+
}
let (key_bytes, val_bytes) = kv?;
let feed_key = db_complete::<NsidRecordFeedKey>(&key_bytes)?;
let feed_val = db_complete::<NsidRecordFeedVal>(&val_bytes)?;
···
batch.commit()?;
-
log::info!("trim_collection ({collection:?}) removed {dangling_feed_keys_cleaned} dangling feed entries and {records_deleted} records");
-
Ok(())
+
log::trace!("trim_collection ({collection:?}) removed {dangling_feed_keys_cleaned} dangling feed entries and {records_deleted} records");
+
Ok((dangling_feed_keys_cleaned, records_deleted))
fn delete_account(&mut self, did: &Did) -> Result<usize, StorageError> {
···
#[async_trait]
impl StoreBackground for FjallBackground {
-
async fn run(mut self) -> StorageResult<()> {
+
async fn run(mut self, backfill: bool) -> StorageResult<()> {
let mut dirty_nsids = HashSet::new();
-
let mut rollup = tokio::time::interval(Duration::from_millis(81));
+
let mut rollup =
+
tokio::time::interval(Duration::from_millis(if backfill { 1 } else { 81 }));
rollup.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
-
let mut trim = tokio::time::interval(Duration::from_millis(6_000));
+
let mut trim =
+
tokio::time::interval(Duration::from_millis(if backfill { 3_000 } else { 6_000 }));
trim.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Skip);
loop {
···
rollup.reset_after(Duration::from_millis(1_200)); // we're caught up, take a break
dirty_nsids.extend(dirty);
-
log::info!("rolled up {n} items ({} collections now dirty)", dirty_nsids.len());
+
log::trace!("rolled up {n} items ({} collections now dirty)", dirty_nsids.len());
},
_ = trim.tick() => {
-
log::info!("trimming {} nsids: {dirty_nsids:?}", dirty_nsids.len());
+
let n = dirty_nsids.len();
+
log::trace!("trimming {n} nsids: {dirty_nsids:?}");
let t0 = Instant::now();
+
let (mut total_danglers, mut total_deleted) = (0, 0);
for collection in &dirty_nsids {
-
self.0.trim_collection(collection, 512).inspect_err(|e| log::error!("trim error: {e:?}"))?;
+
let (danglers, deleted) = self.0.trim_collection(collection, 512).inspect_err(|e| log::error!("trim error: {e:?}"))?;
+
total_danglers += danglers;
+
total_deleted += deleted;
+
if total_deleted > 1_000_000 {
+
log::info!("trim stopped early, more than 1M records already deleted.");
+
break;
+
}
-
log::info!("finished trimming in {:?}", t0.elapsed());
+
log::info!("finished trimming {n} nsids in {:?}: {total_danglers} dangling and {total_deleted} total removed.", t0.elapsed());
dirty_nsids.clear();
},
};
···
////////// temp stuff to remove:
-
-
// fn summarize_batch<const LIMIT: usize>(batch: &EventBatch<LIMIT>) -> String {
-
// format!(
-
// "batch of {: >3} samples from {: >4} records in {: >2} collections from ~{: >4} DIDs, {} acct removes, cursor {: <12?}",
-
// batch.total_records(),
-
// batch.total_seen(),
-
// batch.total_collections(),
-
// batch.estimate_dids(),
-
// batch.account_removes(),
-
// batch.latest_cursor().map(|c| c.elapsed()),
-
// )
-
// }
#[cfg(test)]
mod tests {
+9 -4
ufos/src/storage_mem.rs
···
get_snapshot_static_neu::<JetstreamCursorKey, JetstreamCursorValue>(&global)?
.map(|c| c.to_raw_u64());
+
let rollup_cursor =
+
get_snapshot_static_neu::<NewRollupCursorKey, JetstreamCursorValue>(&global)?
+
.map(|c| c.to_raw_u64());
+
Ok(ConsumerInfo::Jetstream {
endpoint,
started_at,
latest_cursor,
+
rollup_cursor,
})
}
···
}
impl StoreWriter<MemBackground> for MemWriter {
-
fn background_tasks(&mut self) -> StorageResult<MemBackground> {
+
fn background_tasks(&mut self, _reroll: bool) -> StorageResult<MemBackground> {
Ok(MemBackground {})
}
···
collection: &Nsid,
limit: usize,
// TODO: could add a start cursor limit to avoid iterating deleted stuff at the start (/end)
-
) -> StorageResult<()> {
+
) -> StorageResult<(usize, usize)> {
let mut dangling_feed_keys_cleaned = 0;
let mut records_deleted = 0;
···
batch.commit()?;
log::info!("trim_collection ({collection:?}) removed {dangling_feed_keys_cleaned} dangling feed entries and {records_deleted} records");
-
Ok(())
+
Ok((dangling_feed_keys_cleaned, records_deleted))
fn delete_account(&mut self, did: &Did) -> Result<usize, StorageError> {
···
#[async_trait]
impl StoreBackground for MemBackground {
-
async fn run(mut self) -> StorageResult<()> {
+
async fn run(mut self, _backfill: bool) -> StorageResult<()> {
// noop for mem (is there a nicer way to do this?)
loop {
tokio::time::sleep(std::time::Duration::from_secs_f64(10.)).await;