···
4
-
use crate::db_types::{db_complete, DbBytes, DbStaticStr, StaticStr};
5
-
use crate::error::StorageError;
6
-
use crate::storage::{StorageResult, StorageWhatever, StoreBackground, StoreReader, StoreWriter};
7
-
use crate::store_types::{
8
-
AllTimeRollupKey, CommitCounts, CountsValue, DeleteAccountQueueKey, DeleteAccountQueueVal,
9
-
HourTruncatedCursor, HourlyRollupKey, JetstreamCursorKey, JetstreamCursorValue,
10
-
JetstreamEndpointKey, JetstreamEndpointValue, LiveCountsKey, NewRollupCursorKey,
11
-
NewRollupCursorValue, NsidRecordFeedKey, NsidRecordFeedVal, RecordLocationKey,
12
-
RecordLocationMeta, RecordLocationVal, RecordRawValue, SketchSecretPrefix, TakeoffKey,
13
-
TakeoffValue, WeekTruncatedCursor, WeeklyRollupKey, WithCollection,
16
-
CommitAction, ConsumerInfo, Did, EventBatch, Nsid, NsidCount, OrderCollectionsBy, UFOsRecord,
18
-
use async_trait::async_trait;
19
-
use jetstream::events::Cursor;
20
-
use lsm_tree::range::prefix_to_range;
21
-
use std::collections::{BTreeMap, HashMap, HashSet};
22
-
use std::path::Path;
23
-
use std::sync::{Mutex, RwLock};
24
-
use std::time::SystemTime;
26
-
const MAX_BATCHED_CLEANUP_SIZE: usize = 1024; // try to commit progress for longer feeds
27
-
const MAX_BATCHED_ACCOUNT_DELETE_RECORDS: usize = 1024;
28
-
const MAX_BATCHED_ROLLUP_COUNTS: usize = 256;
31
-
/// new data format, roughly:
33
-
/// Partion: 'global'
35
-
/// - Global sequence counter (is the jetstream cursor -- monotonic with many gaps)
36
-
/// - key: "js_cursor" (literal)
39
-
/// - Jetstream server endpoint (persisted because the cursor can't be used on another instance without data loss)
40
-
/// - key: "js_endpoint" (literal)
41
-
/// - val: string (URL of the instance)
44
-
/// - key: "takeoff" (literal)
45
-
/// - val: u64 (micros timestamp, not from jetstream for now so not precise)
47
-
/// - Rollup cursor (bg work: roll stats into hourlies, delete accounts, old record deletes)
48
-
/// - key: "rollup_cursor" (literal)
49
-
/// - val: u64 (tracks behind js_cursor)
52
-
/// Partition: 'feed'
54
-
/// - Per-collection list of record references ordered by jetstream cursor
55
-
/// - key: nullstr || u64 (collection nsid null-terminated, jetstream cursor)
56
-
/// - val: nullstr || nullstr || nullstr (did, rkey, rev. rev is mostly a sanity-check for now.)
59
-
/// Partition: 'records'
61
-
/// - Actual records by their atproto location
62
-
/// - key: nullstr || nullstr || nullstr (did, collection, rkey)
63
-
/// - val: u64 || bool || nullstr || rawval (js_cursor, is_update, rev, actual record)
66
-
/// Partition: 'rollups'
68
-
/// - Live (batched) records counts and dids estimate per collection
69
-
/// - key: "live_counts" || u64 || nullstr (js_cursor, nsid)
70
-
/// - val: u64 || HLL (count (not cursor), estimator)
72
-
/// - Hourly total record counts and dids estimate per collection
73
-
/// - key: "hourly_counts" || u64 || nullstr (hour, nsid)
74
-
/// - val: u64 || HLL (count (not cursor), estimator)
76
-
/// - Weekly total record counts and dids estimate per collection
77
-
/// - key: "weekly_counts" || u64 || nullstr (hour, nsid)
78
-
/// - val: u64 || HLL (count (not cursor), estimator)
80
-
/// - All-time total record counts and dids estimate per collection
81
-
/// - key: "ever_counts" || nullstr (nsid)
82
-
/// - val: u64 || HLL (count (not cursor), estimator)
84
-
/// - TODO: sorted indexes for all-times?
87
-
/// Partition: 'queues'
89
-
/// - Delete account queue
90
-
/// - key: "delete_acount" || u64 (js_cursor)
91
-
/// - val: nullstr (did)
94
-
/// TODO: moderation actions
95
-
/// TODO: account privacy preferences. Might wait for the protocol-level (PDS-level?) stuff to land. Will probably do lazy fetching + caching on read.
97
-
pub struct MemStorage {}
99
-
#[derive(Debug, Default)]
100
-
pub struct MemConfig {
101
-
/// drop the db when the storage is dropped
103
-
/// this is only meant for tests
115
-
struct BatchSentinel {}
118
-
struct MemKeyspace {
119
-
keyspace_guard: Arc<RwLock<BatchSentinel>>,
123
-
pub fn open() -> Self {
125
-
keyspace_guard: Arc::new(RwLock::new(BatchSentinel {})),
128
-
pub fn open_partition(&self, _name: &str) -> StorageResult<MemPartion> {
130
-
// name: name.to_string(),
131
-
keyspace_guard: self.keyspace_guard.clone(),
132
-
contents: Default::default(),
135
-
pub fn batch(&self) -> MemBatch {
137
-
keyspace_guard: self.keyspace_guard.clone(),
141
-
pub fn instant(&self) -> u64 {
158
-
keyspace_guard: Arc<RwLock<BatchSentinel>>,
159
-
tasks: Vec<BatchTask>,
162
-
pub fn insert(&mut self, p: &MemPartion, key: &[u8], val: &[u8]) {
163
-
self.tasks.push(BatchTask::Insert {
169
-
pub fn remove(&mut self, p: &MemPartion, key: &[u8]) {
170
-
self.tasks.push(BatchTask::Remove {
175
-
pub fn len(&self) -> usize {
178
-
pub fn commit(&mut self) -> StorageResult<()> {
179
-
let _guard = self.keyspace_guard.write().unwrap();
180
-
for task in &mut self.tasks {
182
-
BatchTask::Insert { p, key, val } => p
186
-
.insert(key.to_vec(), val.to_vec()),
187
-
BatchTask::Remove { p, key } => p.contents.try_lock().unwrap().remove(key),
195
-
struct MemPartion {
197
-
keyspace_guard: Arc<RwLock<BatchSentinel>>,
198
-
contents: Arc<Mutex<BTreeMap<Vec<u8>, Vec<u8>>>>,
201
-
pub fn get(&self, key: &[u8]) -> StorageResult<Option<Vec<u8>>> {
202
-
let _guard = self.keyspace_guard.read().unwrap();
203
-
Ok(self.contents.lock().unwrap().get(key).cloned())
205
-
pub fn prefix(&self, pre: &[u8]) -> Vec<StorageResult<(Vec<u8>, Vec<u8>)>> {
206
-
// let prefix_bytes = prefix.to_db_bytes()?;
207
-
let (_, Bound::Excluded(range_end)) = prefix_to_range(pre) else {
208
-
panic!("bad range thing");
211
-
return self.range(pre.to_vec()..range_end.to_vec());
213
-
pub fn range(&self, r: std::ops::Range<Vec<u8>>) -> Vec<StorageResult<(Vec<u8>, Vec<u8>)>> {
214
-
let _guard = self.keyspace_guard.read().unwrap();
219
-
.map(|(k, v)| Ok((k.clone(), v.clone())))
222
-
pub fn insert(&self, key: &[u8], val: &[u8]) -> StorageResult<()> {
223
-
let _guard = self.keyspace_guard.read().unwrap();
227
-
.insert(key.to_vec(), val.to_vec());
230
-
// pub fn remove(&self, key: &[u8]) -> StorageResult<()> {
231
-
// let _guard = self.keyspace_guard.read().unwrap();
238
-
pub fn snapshot_at(&self, _instant: u64) -> Self {
241
-
pub fn snapshot(&self) -> Self {
253
-
impl StorageWhatever<MemReader, MemWriter, MemBackground, MemConfig> for MemStorage {
255
-
_path: impl AsRef<Path>,
257
-
force_endpoint: bool,
258
-
_config: MemConfig,
259
-
) -> StorageResult<(MemReader, MemWriter, Option<Cursor>, SketchSecretPrefix)> {
260
-
let keyspace = MemKeyspace::open();
262
-
let global = keyspace.open_partition("global")?;
263
-
let feeds = keyspace.open_partition("feeds")?;
264
-
let records = keyspace.open_partition("records")?;
265
-
let rollups = keyspace.open_partition("rollups")?;
266
-
let queues = keyspace.open_partition("queues")?;
268
-
let js_cursor = get_static_neu::<JetstreamCursorKey, JetstreamCursorValue>(&global)?;
270
-
if js_cursor.is_some() {
271
-
let stored_endpoint =
272
-
get_static_neu::<JetstreamEndpointKey, JetstreamEndpointValue>(&global)?;
274
-
let JetstreamEndpointValue(stored) = stored_endpoint.ok_or(StorageError::InitError(
275
-
"found cursor but missing js_endpoint, refusing to start.".to_string(),
278
-
if stored != endpoint {
279
-
if force_endpoint {
280
-
log::warn!("forcing a jetstream switch from {stored:?} to {endpoint:?}");
281
-
insert_static_neu::<JetstreamEndpointKey>(
283
-
JetstreamEndpointValue(endpoint.to_string()),
286
-
return Err(StorageError::InitError(format!(
287
-
"stored js_endpoint {stored:?} differs from provided {endpoint:?}, refusing to start.")));
291
-
insert_static_neu::<JetstreamEndpointKey>(
293
-
JetstreamEndpointValue(endpoint.to_string()),
295
-
insert_static_neu::<TakeoffKey>(&global, Cursor::at(SystemTime::now()))?;
296
-
insert_static_neu::<NewRollupCursorKey>(&global, Cursor::from_start())?;
299
-
let reader = MemReader {
300
-
keyspace: keyspace.clone(),
301
-
global: global.clone(),
302
-
feeds: feeds.clone(),
303
-
records: records.clone(),
304
-
rollups: rollups.clone(),
306
-
let writer = MemWriter {
314
-
let secret_prefix = [0u8; 16]; // in-mem store is always deterministic: no secret
315
-
Ok((reader, writer, js_cursor, secret_prefix))
319
-
type MemRKV = StorageResult<(Vec<u8>, Vec<u8>)>;
322
-
pub struct MemReader {
323
-
keyspace: MemKeyspace,
324
-
global: MemPartion,
326
-
records: MemPartion,
327
-
rollups: MemPartion,
330
-
/// An iterator that knows how to skip over deleted/invalidated records
331
-
struct RecordIterator {
332
-
db_iter: Box<dyn Iterator<Item = MemRKV>>,
333
-
records: MemPartion,
337
-
impl RecordIterator {
339
-
feeds: &MemPartion,
340
-
records: MemPartion,
343
-
) -> StorageResult<Self> {
344
-
let prefix = NsidRecordFeedKey::from_prefix_to_db_bytes(collection)?;
345
-
let db_iter = feeds.prefix(&prefix).into_iter().rev();
347
-
db_iter: Box::new(db_iter),
353
-
fn get_record(&self, db_next: MemRKV) -> StorageResult<Option<UFOsRecord>> {
354
-
let (key_bytes, val_bytes) = db_next?;
355
-
let feed_key = db_complete::<NsidRecordFeedKey>(&key_bytes)?;
356
-
let feed_val = db_complete::<NsidRecordFeedVal>(&val_bytes)?;
357
-
let location_key: RecordLocationKey = (&feed_key, &feed_val).into();
359
-
let Some(location_val_bytes) = self.records.get(&location_key.to_db_bytes()?)? else {
360
-
// record was deleted (hopefully)
364
-
let (meta, n) = RecordLocationMeta::from_db_bytes(&location_val_bytes)?;
366
-
if meta.cursor() != feed_key.cursor() {
367
-
// older/different version
370
-
if meta.rev != feed_val.rev() {
372
-
log::warn!("record lookup: cursor match but rev did not...? excluding.");
375
-
let Some(raw_value_bytes) = location_val_bytes.get(n..) else {
377
-
"record lookup: found record but could not get bytes to decode the record??"
381
-
let rawval = db_complete::<RecordRawValue>(raw_value_bytes)?;
382
-
Ok(Some(UFOsRecord {
383
-
collection: feed_key.collection().clone(),
384
-
cursor: feed_key.cursor(),
385
-
did: feed_val.did().clone(),
386
-
rkey: feed_val.rkey().clone(),
387
-
rev: meta.rev.to_string(),
388
-
record: rawval.try_into()?,
389
-
is_update: meta.is_update,
393
-
impl Iterator for RecordIterator {
394
-
type Item = StorageResult<Option<UFOsRecord>>;
395
-
fn next(&mut self) -> Option<Self::Item> {
396
-
if self.fetched == self.limit {
397
-
return Some(Ok(None));
399
-
let record = loop {
400
-
let db_next = self.db_iter.next()?; // None short-circuits here
401
-
match self.get_record(db_next) {
402
-
Err(e) => return Some(Err(e)),
403
-
Ok(Some(record)) => break record,
404
-
Ok(None) => continue,
408
-
Some(Ok(Some(record)))
413
-
fn get_storage_stats(&self) -> StorageResult<serde_json::Value> {
414
-
let rollup_cursor =
415
-
get_static_neu::<NewRollupCursorKey, NewRollupCursorValue>(&self.global)?
416
-
.map(|c| c.to_raw_u64());
418
-
Ok(serde_json::json!({
419
-
"rollup_cursor": rollup_cursor,
423
-
fn get_consumer_info(&self) -> StorageResult<ConsumerInfo> {
424
-
let global = self.global.snapshot();
427
-
get_snapshot_static_neu::<JetstreamEndpointKey, JetstreamEndpointValue>(&global)?
428
-
.ok_or(StorageError::BadStateError(
429
-
"Could not find jetstream endpoint".to_string(),
433
-
let started_at = get_snapshot_static_neu::<TakeoffKey, TakeoffValue>(&global)?
434
-
.ok_or(StorageError::BadStateError(
435
-
"Could not find jetstream takeoff time".to_string(),
439
-
let latest_cursor =
440
-
get_snapshot_static_neu::<JetstreamCursorKey, JetstreamCursorValue>(&global)?
441
-
.map(|c| c.to_raw_u64());
443
-
let rollup_cursor =
444
-
get_snapshot_static_neu::<NewRollupCursorKey, JetstreamCursorValue>(&global)?
445
-
.map(|c| c.to_raw_u64());
447
-
Ok(ConsumerInfo::Jetstream {
455
-
fn get_counts_by_collection(&self, collection: &Nsid) -> StorageResult<(u64, u64)> {
456
-
// 0. grab a snapshot in case rollups happen while we're working
457
-
let instant = self.keyspace.instant();
458
-
let global = self.global.snapshot_at(instant);
459
-
let rollups = self.rollups.snapshot_at(instant);
461
-
// 1. all-time counts
462
-
let all_time_key = AllTimeRollupKey::new(collection).to_db_bytes()?;
463
-
let mut total_counts = rollups
464
-
.get(&all_time_key)?
466
-
.map(db_complete::<CountsValue>)
468
-
.unwrap_or_default();
470
-
// 2. live counts that haven't been rolled into all-time yet.
471
-
let rollup_cursor =
472
-
get_snapshot_static_neu::<NewRollupCursorKey, NewRollupCursorValue>(&global)?.ok_or(
473
-
StorageError::BadStateError("Could not find current rollup cursor".to_string()),
476
-
let full_range = LiveCountsKey::range_from_cursor(rollup_cursor)?;
477
-
for kv in rollups.range(full_range) {
478
-
let (key_bytes, val_bytes) = kv?;
479
-
let key = db_complete::<LiveCountsKey>(&key_bytes)?;
480
-
if key.collection() == collection {
481
-
let counts = db_complete::<CountsValue>(&val_bytes)?;
482
-
total_counts.merge(&counts);
486
-
total_counts.counts().creates,
487
-
total_counts.dids().estimate() as u64,
491
-
fn get_records_by_collections(
493
-
collections: HashSet<Nsid>,
495
-
_expand_each_collection: bool,
496
-
) -> StorageResult<Vec<UFOsRecord>> {
497
-
if collections.is_empty() {
500
-
let mut record_iterators = Vec::new();
501
-
for collection in collections {
502
-
let iter = RecordIterator::new(&self.feeds, self.records.clone(), &collection, limit)?;
503
-
record_iterators.push(iter.peekable());
505
-
let mut merged = Vec::new();
507
-
let mut latest: Option<(Cursor, usize)> = None; // ugh
508
-
for (i, iter) in record_iterators.iter_mut().enumerate() {
509
-
let Some(it) = iter.peek_mut() else {
512
-
let it = match it {
514
-
Err(e) => Err(std::mem::replace(e, StorageError::Stolen))?,
516
-
let Some(rec) = it else {
519
-
if let Some((cursor, _)) = latest {
520
-
if rec.cursor > cursor {
521
-
latest = Some((rec.cursor, i))
524
-
latest = Some((rec.cursor, i));
527
-
let Some((_, idx)) = latest else {
530
-
// yeah yeah whateverrrrrrrrrrrrrrrr
531
-
merged.push(record_iterators[idx].next().unwrap().unwrap().unwrap());
538
-
impl StoreReader for MemReader {
539
-
fn name(&self) -> String {
540
-
"in-memory store".into()
542
-
async fn get_storage_stats(&self) -> StorageResult<serde_json::Value> {
543
-
let s = self.clone();
544
-
tokio::task::spawn_blocking(move || MemReader::get_storage_stats(&s)).await?
546
-
async fn get_consumer_info(&self) -> StorageResult<ConsumerInfo> {
547
-
let s = self.clone();
548
-
tokio::task::spawn_blocking(move || MemReader::get_consumer_info(&s)).await?
550
-
async fn get_collections(
553
-
_: OrderCollectionsBy,
554
-
_: Option<HourTruncatedCursor>,
555
-
_: Option<HourTruncatedCursor>,
556
-
) -> StorageResult<(Vec<NsidCount>, Option<Vec<u8>>)> {
559
-
async fn get_timeseries(
562
-
_: HourTruncatedCursor,
563
-
_: Option<HourTruncatedCursor>,
565
-
) -> StorageResult<(Vec<HourTruncatedCursor>, HashMap<Nsid, Vec<CountsValue>>)> {
568
-
async fn get_counts_by_collection(&self, collection: &Nsid) -> StorageResult<(u64, u64)> {
569
-
let s = self.clone();
570
-
let collection = collection.clone();
571
-
tokio::task::spawn_blocking(move || MemReader::get_counts_by_collection(&s, &collection))
574
-
async fn get_records_by_collections(
576
-
collections: HashSet<Nsid>,
578
-
expand_each_collection: bool,
579
-
) -> StorageResult<Vec<UFOsRecord>> {
580
-
let s = self.clone();
581
-
tokio::task::spawn_blocking(move || {
582
-
MemReader::get_records_by_collections(&s, collections, limit, expand_each_collection)
588
-
pub struct MemWriter {
589
-
keyspace: MemKeyspace,
590
-
global: MemPartion,
592
-
records: MemPartion,
593
-
rollups: MemPartion,
594
-
queues: MemPartion,
598
-
fn rollup_delete_account(
603
-
) -> StorageResult<usize> {
604
-
let did = db_complete::<DeleteAccountQueueVal>(val_bytes)?;
605
-
self.delete_account(&did)?;
606
-
let mut batch = self.keyspace.batch();
607
-
batch.remove(&self.queues, key_bytes);
608
-
insert_batch_static_neu::<NewRollupCursorKey>(&mut batch, &self.global, cursor)?;
613
-
fn rollup_live_counts(
615
-
timelies: impl Iterator<Item = Result<(Vec<u8>, Vec<u8>), StorageError>>,
616
-
cursor_exclusive_limit: Option<Cursor>,
617
-
rollup_limit: usize,
618
-
) -> StorageResult<usize> {
619
-
// current strategy is to buffer counts in mem before writing the rollups
620
-
// we *could* read+write every single batch to rollup.. but their merge is associative so
621
-
// ...so save the db some work up front? is this worth it? who knows...
623
-
log::warn!("sup!!!");
625
-
#[derive(Eq, Hash, PartialEq)]
627
-
Hourly(HourTruncatedCursor),
628
-
Weekly(WeekTruncatedCursor),
632
-
let mut batch = self.keyspace.batch();
633
-
let mut cursors_advanced = 0;
634
-
let mut last_cursor = Cursor::from_start();
635
-
let mut counts_by_rollup: HashMap<(Nsid, Rollup), CountsValue> = HashMap::new();
637
-
log::warn!("about to loop....");
638
-
for (i, kv) in timelies.enumerate() {
639
-
log::warn!("loop {i} {kv:?}...");
640
-
if i >= rollup_limit {
644
-
let (key_bytes, val_bytes) = kv?;
645
-
let key = db_complete::<LiveCountsKey>(&key_bytes)
646
-
.inspect_err(|e| log::warn!("rlc: key: {e:?}"))?;
648
-
if cursor_exclusive_limit
649
-
.map(|limit| key.cursor() > limit)
655
-
batch.remove(&self.rollups, &key_bytes);
656
-
let val = db_complete::<CountsValue>(&val_bytes)
657
-
.inspect_err(|e| log::warn!("rlc: val: {e:?}"))?;
660
-
key.collection().clone(),
661
-
Rollup::Hourly(key.cursor().into()),
667
-
key.collection().clone(),
668
-
Rollup::Weekly(key.cursor().into()),
673
-
.entry((key.collection().clone(), Rollup::AllTime))
677
-
cursors_advanced += 1;
678
-
last_cursor = key.cursor();
680
-
log::warn!("done looping. looping cbr counts(?)..");
682
-
for ((nsid, rollup), counts) in counts_by_rollup {
684
-
"######################## cbr loop {nsid:?} {counts:?} ########################"
686
-
let key_bytes = match rollup {
687
-
Rollup::Hourly(hourly_cursor) => {
688
-
let k = HourlyRollupKey::new(hourly_cursor, &nsid);
689
-
log::info!("hrly k: {k:?}");
692
-
Rollup::Weekly(weekly_cursor) => {
693
-
let k = WeeklyRollupKey::new(weekly_cursor, &nsid);
694
-
log::info!("weekly k: {k:?}");
697
-
Rollup::AllTime => {
698
-
let k = AllTimeRollupKey::new(&nsid);
699
-
log::info!("alltime k: {k:?}");
703
-
// log::info!("key bytes: {key_bytes:?}");
704
-
let mut rolled: CountsValue = self
708
-
let lax = CountsValue::from_db_bytes(v);
710
-
"val: len={}, lax={lax:?} first32={:?}",
716
-
.map(db_complete::<CountsValue>)
718
-
.inspect_err(|e| log::warn!("oooh did we break on the rolled thing? {e:?}"))?
719
-
.unwrap_or_default();
721
-
// try to round-trip before inserting, for funsies
722
-
let tripppin = counts.to_db_bytes()?;
723
-
let (and_back, n) = CountsValue::from_db_bytes(&tripppin)?;
724
-
assert_eq!(n, tripppin.len());
725
-
assert_eq!(counts.prefix, and_back.prefix);
726
-
assert_eq!(counts.dids().estimate(), and_back.dids().estimate());
727
-
if counts.counts().creates > 20000000 {
728
-
panic!("COUNTS maybe wtf? {counts:?}")
730
-
// assert_eq!(rolled, and_back);
732
-
rolled.merge(&counts);
734
-
// try to round-trip before inserting, for funsies
735
-
let tripppin = rolled.to_db_bytes()?;
736
-
let (and_back, n) = CountsValue::from_db_bytes(&tripppin)?;
737
-
assert_eq!(n, tripppin.len());
738
-
assert_eq!(rolled.prefix, and_back.prefix);
739
-
assert_eq!(rolled.dids().estimate(), and_back.dids().estimate());
740
-
if rolled.counts().creates > 20000000 {
741
-
panic!("maybe wtf? {rolled:?}")
743
-
// assert_eq!(rolled, and_back);
745
-
batch.insert(&self.rollups, &key_bytes, &rolled.to_db_bytes()?);
748
-
log::warn!("done cbr loop.");
750
-
insert_batch_static_neu::<NewRollupCursorKey>(&mut batch, &self.global, last_cursor)
751
-
.inspect_err(|e| log::warn!("insert neu: {e:?}"))?;
755
-
log::warn!("ok finished rlc stuff. huh.");
756
-
Ok(cursors_advanced)
760
-
impl StoreWriter<MemBackground> for MemWriter {
761
-
fn background_tasks(&mut self, _reroll: bool) -> StorageResult<MemBackground> {
762
-
Ok(MemBackground {})
765
-
fn insert_batch<const LIMIT: usize>(
767
-
event_batch: EventBatch<LIMIT>,
768
-
) -> StorageResult<()> {
769
-
if event_batch.is_empty() {
773
-
let mut batch = self.keyspace.batch();
775
-
// would be nice not to have to iterate everything at once here
776
-
let latest = event_batch.latest_cursor().unwrap();
778
-
for (nsid, commits) in event_batch.commits_by_nsid {
779
-
for commit in commits.commits {
780
-
let location_key: RecordLocationKey = (&commit, &nsid).into();
782
-
match commit.action {
783
-
CommitAction::Cut => {
784
-
batch.remove(&self.records, &location_key.to_db_bytes()?);
786
-
CommitAction::Put(put_action) => {
787
-
let feed_key = NsidRecordFeedKey::from_pair(nsid.clone(), commit.cursor);
788
-
let feed_val: NsidRecordFeedVal =
789
-
(&commit.did, &commit.rkey, commit.rev.as_str()).into();
792
-
&feed_key.to_db_bytes()?,
793
-
&feed_val.to_db_bytes()?,
796
-
let location_val: RecordLocationVal =
797
-
(commit.cursor, commit.rev.as_str(), put_action).into();
800
-
&location_key.to_db_bytes()?,
801
-
&location_val.to_db_bytes()?,
806
-
let live_counts_key: LiveCountsKey = (latest, &nsid).into();
807
-
let counts_value = CountsValue::new(
809
-
creates: commits.creates as u64,
810
-
updates: commits.updates as u64,
811
-
deletes: commits.deletes as u64,
813
-
commits.dids_estimate,
817
-
&live_counts_key.to_db_bytes()?,
818
-
&counts_value.to_db_bytes()?,
822
-
for remove in event_batch.account_removes {
823
-
let queue_key = DeleteAccountQueueKey::new(remove.cursor);
824
-
let queue_val: DeleteAccountQueueVal = remove.did;
827
-
&queue_key.to_db_bytes()?,
828
-
&queue_val.to_db_bytes()?,
834
-
&DbStaticStr::<JetstreamCursorKey>::default().to_db_bytes()?,
835
-
&latest.to_db_bytes()?,
842
-
fn step_rollup(&mut self) -> StorageResult<(usize, HashSet<Nsid>)> {
843
-
let mut dirty_nsids = HashSet::new();
845
-
let rollup_cursor =
846
-
get_static_neu::<NewRollupCursorKey, NewRollupCursorValue>(&self.global)?
847
-
.ok_or(StorageError::BadStateError(
848
-
"Could not find current rollup cursor".to_string(),
850
-
.inspect_err(|e| log::warn!("failed getting rollup cursor: {e:?}"))?;
853
-
let live_counts_range = LiveCountsKey::range_from_cursor(rollup_cursor)
854
-
.inspect_err(|e| log::warn!("live counts range: {e:?}"))?;
855
-
let mut timely_iter = self.rollups.range(live_counts_range).into_iter().peekable();
857
-
let timely_next = timely_iter
859
-
.map(|kv| -> StorageResult<LiveCountsKey> {
861
-
Err(e) => Err(std::mem::replace(e, StorageError::Stolen))?,
862
-
Ok((key_bytes, _)) => {
863
-
let key = db_complete::<LiveCountsKey>(key_bytes).inspect_err(|e| {
864
-
log::warn!("failed getting key for next timely: {e:?}")
871
-
.inspect_err(|e| log::warn!("something about timely: {e:?}"))?;
874
-
let delete_accounts_range =
875
-
DeleteAccountQueueKey::new(rollup_cursor).range_to_prefix_end()?;
877
-
let next_delete = self
879
-
.range(delete_accounts_range)
883
-
.inspect_err(|e| log::warn!("range for next delete: {e:?}"))?
884
-
.map(|(key_bytes, val_bytes)| {
885
-
db_complete::<DeleteAccountQueueKey>(&key_bytes)
886
-
.inspect_err(|e| log::warn!("failed inside next delete thing????: {e:?}"))
887
-
.map(|k| (k.suffix, key_bytes, val_bytes))
890
-
.inspect_err(|e| log::warn!("failed getting next delete: {e:?}"))?;
892
-
let cursors_stepped = match (timely_next, next_delete) {
893
-
(Some(timely), Some((delete_cursor, delete_key_bytes, delete_val_bytes))) => {
894
-
if timely.cursor() < delete_cursor {
896
-
.rollup_live_counts(
898
-
Some(delete_cursor),
899
-
MAX_BATCHED_ROLLUP_COUNTS,
901
-
.inspect_err(|e| log::warn!("rolling up live counts: {e:?}"))?;
902
-
dirty_nsids.insert(timely.collection().clone());
905
-
self.rollup_delete_account(delete_cursor, &delete_key_bytes, &delete_val_bytes)
906
-
.inspect_err(|e| log::warn!("deleting acocunt: {e:?}"))?
909
-
(Some(timely), None) => {
911
-
.rollup_live_counts(timely_iter, None, MAX_BATCHED_ROLLUP_COUNTS)
912
-
.inspect_err(|e| log::warn!("rolling up (lasjdflkajs): {e:?}"))?;
913
-
dirty_nsids.insert(timely.collection().clone());
916
-
(None, Some((delete_cursor, delete_key_bytes, delete_val_bytes))) => self
917
-
.rollup_delete_account(delete_cursor, &delete_key_bytes, &delete_val_bytes)
918
-
.inspect_err(|e| log::warn!("deleting acocunt other branch: {e:?}"))?,
922
-
Ok((cursors_stepped, dirty_nsids))
925
-
fn trim_collection(
930
-
// TODO: could add a start cursor limit to avoid iterating deleted stuff at the start (/end)
931
-
) -> StorageResult<(usize, usize, bool)> {
932
-
let mut dangling_feed_keys_cleaned = 0;
933
-
let mut records_deleted = 0;
935
-
let mut batch = self.keyspace.batch();
937
-
let prefix = NsidRecordFeedKey::from_prefix_to_db_bytes(collection)?;
939
-
for kv in self.feeds.prefix(&prefix).into_iter().rev() {
940
-
let (key_bytes, val_bytes) = kv?;
941
-
let feed_key = db_complete::<NsidRecordFeedKey>(&key_bytes)?;
942
-
let feed_val = db_complete::<NsidRecordFeedVal>(&val_bytes)?;
943
-
let location_key: RecordLocationKey = (&feed_key, &feed_val).into();
944
-
let location_key_bytes = location_key.to_db_bytes()?;
946
-
let Some(location_val_bytes) = self.records.get(&location_key_bytes)? else {
947
-
// record was deleted (hopefully)
948
-
batch.remove(&self.feeds, &location_key_bytes);
949
-
dangling_feed_keys_cleaned += 1;
953
-
let (meta, _) = RecordLocationMeta::from_db_bytes(&location_val_bytes)?;
955
-
if meta.cursor() != feed_key.cursor() {
956
-
// older/different version
957
-
batch.remove(&self.feeds, &location_key_bytes);
958
-
dangling_feed_keys_cleaned += 1;
961
-
if meta.rev != feed_val.rev() {
963
-
log::warn!("record lookup: cursor match but rev did not...? removing.");
964
-
batch.remove(&self.feeds, &location_key_bytes);
965
-
dangling_feed_keys_cleaned += 1;
969
-
if batch.len() >= MAX_BATCHED_CLEANUP_SIZE {
971
-
batch = self.keyspace.batch();
975
-
if found <= limit {
979
-
batch.remove(&self.feeds, &location_key_bytes);
980
-
batch.remove(&self.records, &location_key_bytes);
981
-
records_deleted += 1;
986
-
log::info!("trim_collection ({collection:?}) removed {dangling_feed_keys_cleaned} dangling feed entries and {records_deleted} records");
987
-
Ok((dangling_feed_keys_cleaned, records_deleted, false))
990
-
fn delete_account(&mut self, did: &Did) -> Result<usize, StorageError> {
991
-
let mut records_deleted = 0;
992
-
let mut batch = self.keyspace.batch();
993
-
let prefix = RecordLocationKey::from_prefix_to_db_bytes(did)?;
994
-
for kv in self.records.prefix(&prefix) {
995
-
let (key_bytes, _) = kv?;
996
-
batch.remove(&self.records, &key_bytes);
997
-
records_deleted += 1;
998
-
if batch.len() >= MAX_BATCHED_ACCOUNT_DELETE_RECORDS {
1000
-
batch = self.keyspace.batch();
1004
-
Ok(records_deleted)
1008
-
pub struct MemBackground;
1011
-
impl StoreBackground for MemBackground {
1012
-
async fn run(mut self, _backfill: bool) -> StorageResult<()> {
1013
-
// noop for mem (is there a nicer way to do this?)
1015
-
tokio::time::sleep(std::time::Duration::from_secs_f64(10.)).await;
1020
-
/// Get a value from a fixed key
1021
-
fn get_static_neu<K: StaticStr, V: DbBytes>(global: &MemPartion) -> StorageResult<Option<V>> {
1022
-
let key_bytes = DbStaticStr::<K>::default().to_db_bytes()?;
1023
-
let value = global
1025
-
.map(|value_bytes| db_complete(&value_bytes))
1030
-
/// Get a value from a fixed key
1031
-
fn get_snapshot_static_neu<K: StaticStr, V: DbBytes>(
1032
-
global: &MemPartion,
1033
-
) -> StorageResult<Option<V>> {
1034
-
let key_bytes = DbStaticStr::<K>::default().to_db_bytes()?;
1035
-
let value = global
1037
-
.map(|value_bytes| db_complete(&value_bytes))
1042
-
/// Set a value to a fixed key
1043
-
fn insert_static_neu<K: StaticStr>(global: &MemPartion, value: impl DbBytes) -> StorageResult<()> {
1044
-
let key_bytes = DbStaticStr::<K>::default().to_db_bytes()?;
1045
-
let value_bytes = value.to_db_bytes()?;
1046
-
global.insert(&key_bytes, &value_bytes)?;
1050
-
/// Set a value to a fixed key
1051
-
fn insert_batch_static_neu<K: StaticStr>(
1052
-
batch: &mut MemBatch,
1053
-
global: &MemPartion,
1054
-
value: impl DbBytes,
1055
-
) -> StorageResult<()> {
1056
-
let key_bytes = DbStaticStr::<K>::default().to_db_bytes()?;
1057
-
let value_bytes = value.to_db_bytes()?;
1058
-
batch.insert(global, &key_bytes, &value_bytes);
1062
-
#[derive(Debug, serde::Serialize, schemars::JsonSchema)]
1063
-
pub struct StorageInfo {
1064
-
pub keyspace_disk_space: u64,
1065
-
pub keyspace_journal_count: usize,
1066
-
pub keyspace_sequence: u64,
1067
-
pub global_approximate_len: usize,
1073
-
use crate::{DeleteAccount, RecordKey, UFOsCommit};
1074
-
use jetstream::events::{CommitEvent, CommitOp};
1075
-
use jetstream::exports::Cid;
1076
-
use serde_json::value::RawValue;
1078
-
fn fjall_db() -> (MemReader, MemWriter) {
1079
-
let (read, write, _, _) = MemStorage::init(
1080
-
tempfile::tempdir().unwrap(),
1081
-
"offline test (no real jetstream endpoint)".to_string(),
1083
-
MemConfig { temp: true },
1089
-
const TEST_BATCH_LIMIT: usize = 16;
1091
-
#[derive(Debug, Default)]
1092
-
struct TestBatch {
1093
-
pub batch: EventBatch<TEST_BATCH_LIMIT>,
1097
-
#[allow(clippy::too_many_arguments)]
1104
-
rev: Option<&str>,
1108
-
let did = Did::new(did.to_string()).unwrap();
1109
-
let collection = Nsid::new(collection.to_string()).unwrap();
1110
-
let record = RawValue::from_string(record.to_string()).unwrap();
1111
-
let cid = cid.unwrap_or(
1112
-
"bafyreidofvwoqvd2cnzbun6dkzgfucxh57tirf3ohhde7lsvh4fu3jehgy"
1117
-
let event = CommitEvent {
1119
-
rkey: RecordKey::new(rkey.to_string()).unwrap(),
1120
-
rev: rev.unwrap_or("asdf").to_string(),
1121
-
operation: CommitOp::Create,
1122
-
record: Some(record),
1126
-
let (commit, collection) =
1127
-
UFOsCommit::from_commit_info(event, did.clone(), Cursor::from_raw_u64(cursor))
1132
-
.entry(collection.clone())
1134
-
.truncating_insert(commit, &[0u8; 16])
1139
-
#[allow(clippy::too_many_arguments)]
1146
-
rev: Option<&str>,
1150
-
let did = Did::new(did.to_string()).unwrap();
1151
-
let collection = Nsid::new(collection.to_string()).unwrap();
1152
-
let record = RawValue::from_string(record.to_string()).unwrap();
1153
-
let cid = cid.unwrap_or(
1154
-
"bafyreidofvwoqvd2cnzbun6dkzgfucxh57tirf3ohhde7lsvh4fu3jehgy"
1159
-
let event = CommitEvent {
1161
-
rkey: RecordKey::new(rkey.to_string()).unwrap(),
1162
-
rev: rev.unwrap_or("asdf").to_string(),
1163
-
operation: CommitOp::Update,
1164
-
record: Some(record),
1168
-
let (commit, collection) =
1169
-
UFOsCommit::from_commit_info(event, did.clone(), Cursor::from_raw_u64(cursor))
1174
-
.entry(collection.clone())
1176
-
.truncating_insert(commit, &[0u8; 16])
1181
-
#[allow(clippy::too_many_arguments)]
1187
-
rev: Option<&str>,
1190
-
let did = Did::new(did.to_string()).unwrap();
1191
-
let collection = Nsid::new(collection.to_string()).unwrap();
1192
-
let event = CommitEvent {
1194
-
rkey: RecordKey::new(rkey.to_string()).unwrap(),
1195
-
rev: rev.unwrap_or("asdf").to_string(),
1196
-
operation: CommitOp::Delete,
1201
-
let (commit, collection) =
1202
-
UFOsCommit::from_commit_info(event, did, Cursor::from_raw_u64(cursor)).unwrap();
1206
-
.entry(collection.clone())
1208
-
.truncating_insert(commit, &[0u8; 16])
1213
-
pub fn delete_account(&mut self, did: &str, cursor: u64) -> Did {
1214
-
let did = Did::new(did.to_string()).unwrap();
1215
-
self.batch.account_removes.push(DeleteAccount {
1217
-
cursor: Cursor::from_raw_u64(cursor),
1224
-
fn test_hello() -> anyhow::Result<()> {
1225
-
let (read, mut write) = fjall_db();
1226
-
write.insert_batch::<TEST_BATCH_LIMIT>(EventBatch::default())?;
1227
-
let (records, dids) =
1228
-
read.get_counts_by_collection(&Nsid::new("a.b.c".to_string()).unwrap())?;
1229
-
assert_eq!(records, 0);
1230
-
assert_eq!(dids, 0);
1235
-
fn test_insert_one() -> anyhow::Result<()> {
1236
-
let (read, mut write) = fjall_db();
1238
-
let mut batch = TestBatch::default();
1239
-
let collection = batch.create(
1240
-
"did:plc:inze6wrmsm7pjl7yta3oig77",
1248
-
write.insert_batch(batch.batch)?;
1250
-
let (records, dids) = read.get_counts_by_collection(&collection)?;
1251
-
assert_eq!(records, 1);
1252
-
assert_eq!(dids, 1);
1253
-
let (records, dids) =
1254
-
read.get_counts_by_collection(&Nsid::new("d.e.f".to_string()).unwrap())?;
1255
-
assert_eq!(records, 0);
1256
-
assert_eq!(dids, 0);
1258
-
let records = read.get_records_by_collections(HashSet::from([collection]), 2, false)?;
1259
-
assert_eq!(records.len(), 1);
1260
-
let rec = &records[0];
1261
-
assert_eq!(rec.record.get(), "{}");
1262
-
assert!(!rec.is_update);
1264
-
let records = read.get_records_by_collections(
1265
-
HashSet::from([Nsid::new("d.e.f".to_string()).unwrap()]),
1269
-
assert_eq!(records.len(), 0);
1275
-
fn test_get_multi_collection() -> anyhow::Result<()> {
1276
-
let (read, mut write) = fjall_db();
1278
-
let mut batch = TestBatch::default();
1280
-
"did:plc:inze6wrmsm7pjl7yta3oig77",
1289
-
"did:plc:inze6wrmsm7pjl7yta3oig77",
1292
-
r#""in between""#,
1298
-
"did:plc:inze6wrmsm7pjl7yta3oig77",
1306
-
write.insert_batch(batch.batch)?;
1308
-
let records = read.get_records_by_collections(
1310
-
Nsid::new("a.a.a".to_string()).unwrap(),
1311
-
Nsid::new("a.a.b".to_string()).unwrap(),
1312
-
Nsid::new("a.a.c".to_string()).unwrap(),
1317
-
assert_eq!(records.len(), 3);
1318
-
assert_eq!(records[0].record.get(), r#""last""#);
1320
-
records[0].collection,
1321
-
Nsid::new("a.a.a".to_string()).unwrap()
1323
-
assert_eq!(records[1].record.get(), r#""in between""#);
1325
-
records[1].collection,
1326
-
Nsid::new("a.a.b".to_string()).unwrap()
1328
-
assert_eq!(records[2].record.get(), r#""earliest""#);
1330
-
records[2].collection,
1331
-
Nsid::new("a.a.a".to_string()).unwrap()
1338
-
fn test_update_one() -> anyhow::Result<()> {
1339
-
let (read, mut write) = fjall_db();
1341
-
let mut batch = TestBatch::default();
1342
-
let collection = batch.create(
1343
-
"did:plc:inze6wrmsm7pjl7yta3oig77",
1351
-
write.insert_batch(batch.batch)?;
1353
-
let mut batch = TestBatch::default();
1355
-
"did:plc:inze6wrmsm7pjl7yta3oig77",
1358
-
r#"{"ch": "ch-ch-ch-changes"}"#,
1363
-
write.insert_batch(batch.batch)?;
1365
-
let (records, dids) = read.get_counts_by_collection(&collection)?;
1366
-
assert_eq!(records, 1);
1367
-
assert_eq!(dids, 1);
1369
-
let records = read.get_records_by_collections(HashSet::from([collection]), 2, false)?;
1370
-
assert_eq!(records.len(), 1);
1371
-
let rec = &records[0];
1372
-
assert_eq!(rec.record.get(), r#"{"ch": "ch-ch-ch-changes"}"#);
1373
-
assert!(rec.is_update);
1378
-
fn test_delete_one() -> anyhow::Result<()> {
1379
-
let (read, mut write) = fjall_db();
1381
-
let mut batch = TestBatch::default();
1382
-
let collection = batch.create(
1383
-
"did:plc:inze6wrmsm7pjl7yta3oig77",
1391
-
write.insert_batch(batch.batch)?;
1393
-
let mut batch = TestBatch::default();
1395
-
"did:plc:inze6wrmsm7pjl7yta3oig77",
1401
-
write.insert_batch(batch.batch)?;
1403
-
let (records, dids) = read.get_counts_by_collection(&collection)?;
1404
-
assert_eq!(records, 1);
1405
-
assert_eq!(dids, 1);
1407
-
let records = read.get_records_by_collections(HashSet::from([collection]), 2, false)?;
1408
-
assert_eq!(records.len(), 0);
1414
-
fn test_collection_trim() -> anyhow::Result<()> {
1415
-
let (read, mut write) = fjall_db();
1417
-
let mut batch = TestBatch::default();
1419
-
"did:plc:inze6wrmsm7pjl7yta3oig77",
1427
-
let mut last_b_cursor;
1429
-
last_b_cursor = 11_000 + i;
1431
-
&format!("did:plc:inze6wrmsm7pjl7yta3oig7{}", i % 3),
1433
-
&format!("rkey-bbb-{i}"),
1434
-
&format!(r#"{{"n": {i}}}"#),
1435
-
Some(&format!("rev-bbb-{i}")),
1441
-
"did:plc:inze6wrmsm7pjl7yta3oig77",
1450
-
write.insert_batch(batch.batch)?;
1452
-
let records = read.get_records_by_collections(
1453
-
HashSet::from([Nsid::new("a.a.a".to_string()).unwrap()]),
1457
-
assert_eq!(records.len(), 1);
1458
-
let records = read.get_records_by_collections(
1459
-
HashSet::from([Nsid::new("a.a.b".to_string()).unwrap()]),
1463
-
assert_eq!(records.len(), 10);
1464
-
let records = read.get_records_by_collections(
1465
-
HashSet::from([Nsid::new("a.a.c".to_string()).unwrap()]),
1469
-
assert_eq!(records.len(), 1);
1470
-
let records = read.get_records_by_collections(
1471
-
HashSet::from([Nsid::new("a.a.d".to_string()).unwrap()]),
1475
-
assert_eq!(records.len(), 0);
1477
-
write.trim_collection(&Nsid::new("a.a.a".to_string()).unwrap(), 6, false)?;
1478
-
write.trim_collection(&Nsid::new("a.a.b".to_string()).unwrap(), 6, false)?;
1479
-
write.trim_collection(&Nsid::new("a.a.c".to_string()).unwrap(), 6, false)?;
1480
-
write.trim_collection(&Nsid::new("a.a.d".to_string()).unwrap(), 6, false)?;
1482
-
let records = read.get_records_by_collections(
1483
-
HashSet::from([Nsid::new("a.a.a".to_string()).unwrap()]),
1487
-
assert_eq!(records.len(), 1);
1488
-
let records = read.get_records_by_collections(
1489
-
HashSet::from([Nsid::new("a.a.b".to_string()).unwrap()]),
1493
-
assert_eq!(records.len(), 6);
1494
-
let records = read.get_records_by_collections(
1495
-
HashSet::from([Nsid::new("a.a.c".to_string()).unwrap()]),
1499
-
assert_eq!(records.len(), 1);
1500
-
let records = read.get_records_by_collections(
1501
-
HashSet::from([Nsid::new("a.a.d".to_string()).unwrap()]),
1505
-
assert_eq!(records.len(), 0);
1511
-
fn test_delete_account() -> anyhow::Result<()> {
1512
-
let (read, mut write) = fjall_db();
1514
-
let mut batch = TestBatch::default();
1516
-
"did:plc:person-a",
1526
-
"did:plc:person-b",
1528
-
&format!("rkey-bbb-{i}"),
1529
-
&format!(r#"{{"n": {i}}}"#),
1530
-
Some(&format!("rev-bbb-{i}")),
1535
-
write.insert_batch(batch.batch)?;
1537
-
let records = read.get_records_by_collections(
1538
-
HashSet::from([Nsid::new("a.a.a".to_string()).unwrap()]),
1542
-
assert_eq!(records.len(), 3);
1544
-
let records_deleted =
1545
-
write.delete_account(&Did::new("did:plc:person-b".to_string()).unwrap())?;
1546
-
assert_eq!(records_deleted, 2);
1548
-
let records = read.get_records_by_collections(
1549
-
HashSet::from([Nsid::new("a.a.a".to_string()).unwrap()]),
1553
-
assert_eq!(records.len(), 1);
1559
-
fn rollup_delete_account_removes_record() -> anyhow::Result<()> {
1560
-
let (read, mut write) = fjall_db();
1562
-
let mut batch = TestBatch::default();
1564
-
"did:plc:person-a",
1572
-
write.insert_batch(batch.batch)?;
1574
-
let mut batch = TestBatch::default();
1575
-
batch.delete_account("did:plc:person-a", 9_999); // queue it before the rollup
1576
-
write.insert_batch(batch.batch)?;
1578
-
write.step_rollup()?;
1580
-
let records = read.get_records_by_collections(
1581
-
HashSet::from([Nsid::new("a.a.a".to_string()).unwrap()]),
1585
-
assert_eq!(records.len(), 0);
1591
-
fn rollup_delete_live_count_step() -> anyhow::Result<()> {
1592
-
let (read, mut write) = fjall_db();
1594
-
let mut batch = TestBatch::default();
1596
-
"did:plc:person-a",
1604
-
write.insert_batch(batch.batch)?;
1606
-
let (n, _) = write.step_rollup()?;
1609
-
let mut batch = TestBatch::default();
1610
-
batch.delete_account("did:plc:person-a", 10_001);
1611
-
write.insert_batch(batch.batch)?;
1613
-
let records = read.get_records_by_collections(
1614
-
HashSet::from([Nsid::new("a.a.a".to_string()).unwrap()]),
1618
-
assert_eq!(records.len(), 1);
1620
-
let (n, _) = write.step_rollup()?;
1623
-
let records = read.get_records_by_collections(
1624
-
HashSet::from([Nsid::new("a.a.a".to_string()).unwrap()]),
1628
-
assert_eq!(records.len(), 0);
1630
-
let mut batch = TestBatch::default();
1631
-
batch.delete_account("did:plc:person-a", 9_999);
1632
-
write.insert_batch(batch.batch)?;
1634
-
let (n, _) = write.step_rollup()?;
1641
-
fn rollup_multiple_count_batches() -> anyhow::Result<()> {
1642
-
let (_read, mut write) = fjall_db();
1644
-
let mut batch = TestBatch::default();
1646
-
"did:plc:person-a",
1654
-
write.insert_batch(batch.batch)?;
1656
-
let mut batch = TestBatch::default();
1658
-
"did:plc:person-a",
1666
-
write.insert_batch(batch.batch)?;
1668
-
let (n, _) = write.step_rollup()?;
1671
-
let (n, _) = write.step_rollup()?;
1678
-
fn counts_before_and_after_rollup() -> anyhow::Result<()> {
1679
-
let (read, mut write) = fjall_db();
1681
-
let mut batch = TestBatch::default();
1683
-
"did:plc:person-a",
1692
-
"did:plc:person-b",
1700
-
write.insert_batch(batch.batch)?;
1702
-
let mut batch = TestBatch::default();
1703
-
batch.delete_account("did:plc:person-a", 11_000);
1704
-
write.insert_batch(batch.batch)?;
1706
-
let mut batch = TestBatch::default();
1708
-
"did:plc:person-a",
1716
-
write.insert_batch(batch.batch)?;
1718
-
// before any rollup
1719
-
let (records, dids) =
1720
-
read.get_counts_by_collection(&Nsid::new("a.a.a".to_string()).unwrap())?;
1721
-
assert_eq!(records, 3);
1722
-
assert_eq!(dids, 2);
1724
-
// first batch rolled up
1725
-
let (n, _) = write.step_rollup()?;
1728
-
let (records, dids) =
1729
-
read.get_counts_by_collection(&Nsid::new("a.a.a".to_string()).unwrap())?;
1730
-
assert_eq!(records, 3);
1731
-
assert_eq!(dids, 2);
1733
-
// delete account rolled up
1734
-
let (n, _) = write.step_rollup()?;
1737
-
let (records, dids) =
1738
-
read.get_counts_by_collection(&Nsid::new("a.a.a".to_string()).unwrap())?;
1739
-
assert_eq!(records, 3);
1740
-
assert_eq!(dids, 2);
1742
-
// second batch rolled up
1743
-
let (n, _) = write.step_rollup()?;
1746
-
let (records, dids) =
1747
-
read.get_counts_by_collection(&Nsid::new("a.a.a".to_string()).unwrap())?;
1748
-
assert_eq!(records, 3);
1749
-
assert_eq!(dids, 2);
1751
-
// no more rollups left
1752
-
let (n, _) = write.step_rollup()?;