Constellation, Spacedust, Slingshot, UFOs: atproto crates and services for microcosm

track all partitions' l0_run_count in metrics

Changed files
+30 -1
ufos
+1
ufos/src/main.rs
···
interval.set_missed_tick_behavior(tokio::time::MissedTickBehavior::Delay);
loop {
interval.tick().await;
+
read_store.update_metrics();
match read_store.get_consumer_info().await {
Err(e) => log::warn!("failed to get jetstream consumer info: {e:?}"),
Ok(ConsumerInfo::Jetstream {
+2
ufos/src/storage.rs
···
pub trait StoreReader: Send + Sync {
fn name(&self) -> String;
+
fn update_metrics(&self) {}
+
async fn get_storage_stats(&self) -> StorageResult<serde_json::Value>;
async fn get_consumer_info(&self) -> StorageResult<ConsumerInfo>;
+27 -1
ufos/src/storage_fjall.rs
···
Batch as FjallBatch, Config, Keyspace, PartitionCreateOptions, PartitionHandle, Snapshot,
};
use jetstream::events::Cursor;
-
use metrics::{counter, describe_counter, describe_histogram, histogram, Unit};
+
use lsm_tree::AbstractTree;
+
use metrics::{
+
counter, describe_counter, describe_gauge, describe_histogram, gauge, histogram, Unit,
+
};
use std::collections::{HashMap, HashSet};
use std::iter::Peekable;
use std::ops::Bound;
···
feeds: feeds.clone(),
records: records.clone(),
rollups: rollups.clone(),
+
queues: queues.clone(),
};
+
reader.describe_metrics();
let writer = FjallWriter {
bg_taken: Arc::new(AtomicBool::new(false)),
keyspace,
···
feeds: PartitionHandle,
records: PartitionHandle,
rollups: PartitionHandle,
+
queues: PartitionHandle,
}
/// An iterator that knows how to skip over deleted/invalidated records
···
type CollectionSerieses = HashMap<Nsid, Vec<CountsValue>>;
impl FjallReader {
+
fn describe_metrics(&self) {
+
describe_gauge!(
+
"storage_fjall_l0_run_count",
+
Unit::Count,
+
"number of L0 runs in a partition"
+
);
+
}
+
fn get_storage_stats(&self) -> StorageResult<serde_json::Value> {
let rollup_cursor =
get_static_neu::<NewRollupCursorKey, NewRollupCursorValue>(&self.global)?
···
impl StoreReader for FjallReader {
fn name(&self) -> String {
"fjall storage v2".into()
+
}
+
fn update_metrics(&self) {
+
gauge!("storage_fjall_l0_run_count", "partition" => "global")
+
.set(self.global.tree.l0_run_count() as f64);
+
gauge!("storage_fjall_l0_run_count", "partition" => "feeds")
+
.set(self.feeds.tree.l0_run_count() as f64);
+
gauge!("storage_fjall_l0_run_count", "partition" => "records")
+
.set(self.records.tree.l0_run_count() as f64);
+
gauge!("storage_fjall_l0_run_count", "partition" => "rollups")
+
.set(self.rollups.tree.l0_run_count() as f64);
+
gauge!("storage_fjall_l0_run_count", "partition" => "queues")
+
.set(self.queues.tree.l0_run_count() as f64);
async fn get_storage_stats(&self) -> StorageResult<serde_json::Value> {
let s = self.clone();