Constellation, Spacedust, Slingshot, UFOs: atproto crates and services for microcosm

stats by prefix

with children by type (subsequent prefixes or full NSIDs)

+12 -5
ufos/src/db_types.rs
···
pub enum EncodingError {
#[error("failed to parse Atrium string type: {0}")]
BadAtriumStringType(&'static str),
+
#[error("Not enough NSID segments for a usable prefix")]
+
NotEnoughNsidSegments,
#[error("failed to bincode-encode: {0}")]
BincodeEncodeFailed(#[from] EncodeError),
#[error("failed to bincode-decode: {0}")]
···
fn from_db_bytes(bytes: &[u8]) -> Result<(Self, usize), EncodingError>
where
Self: Sized;
+
fn as_prefix_range_end(&self) -> EncodingResult<Vec<u8>> {
+
let bytes = self.to_db_bytes()?;
+
let (_, Bound::Excluded(range_end)) = prefix_to_range(&bytes) else {
+
return Err(EncodingError::BadRangeBound);
+
};
+
Ok(range_end.to_vec())
+
}
}
pub trait SubPrefixBytes<T> {
···
self.prefix.to_db_bytes()
}
pub fn prefix_range_end(prefix: &P) -> EncodingResult<Vec<u8>> {
-
let prefix_bytes = prefix.to_db_bytes()?;
-
let (_, Bound::Excluded(range_end)) = prefix_to_range(&prefix_bytes) else {
-
return Err(EncodingError::BadRangeBound);
-
};
-
Ok(range_end.to_vec())
+
prefix.as_prefix_range_end()
}
pub fn range_end(&self) -> EncodingResult<Vec<u8>> {
Self::prefix_range_end(&self.prefix)
···
impl<const N: usize> UseBincodePlz for [u8; N] {}
+
// bare bytes (NOT prefix-encoded!)
impl DbBytes for Vec<u8> {
fn to_db_bytes(&self) -> EncodingResult<Vec<u8>> {
Ok(self.to_vec())
}
+
// greedy, consumes ALL remaining bytes
fn from_db_bytes(bytes: &[u8]) -> Result<(Self, usize), EncodingError> {
Ok((bytes.to_owned(), bytes.len()))
}
+41
ufos/src/lib.rs
···
pub mod storage_fjall;
pub mod store_types;
+
use crate::db_types::{EncodingError, EncodingResult};
use crate::error::BatchInsertError;
use crate::store_types::SketchSecretPrefix;
use cardinality_estimator_safe::{Element, Sketch};
···
pub struct NsidCount {
nsid: String,
creates: u64,
+
// TODO: add updates and deletes
dids_estimate: u64,
+
}
+
+
#[derive(Debug, Serialize, JsonSchema)]
+
pub struct PrefixCount {
+
prefix: String,
+
creates: u64,
+
// TODO: add updates and deletes
+
dids_estimate: u64,
+
}
+
+
#[derive(Debug, Serialize, JsonSchema)]
+
#[serde(tag = "type", rename_all = "camelCase")]
+
pub enum PrefixChild {
+
Collection(NsidCount),
+
Prefix(PrefixCount),
+
}
+
+
#[derive(Debug, Serialize, JsonSchema)]
+
pub struct NsidPrefix(String);
+
impl NsidPrefix {
+
pub fn new(pre: &str) -> EncodingResult<Self> {
+
// it's a valid prefix if appending `.name` makes it a valid NSID
+
Nsid::new(format!("{pre}.name")).map_err(EncodingError::BadAtriumStringType)?;
+
// hack (shouldn't really be here): reject prefixes that aren't at least 2 segments long
+
if !pre.contains('.') {
+
return Err(EncodingError::NotEnoughNsidSegments);
+
}
+
Ok(Self(pre.to_string()))
+
}
+
pub fn is_group_of(&self, other: &Nsid) -> bool {
+
assert!(
+
other.as_str().starts_with(&self.0),
+
"must be a prefix of other"
+
);
+
self.0 == other.domain_authority()
+
}
+
pub fn as_str(&self) -> &str {
+
self.0.as_str()
+
}
}
#[derive(Debug, Serialize, JsonSchema)]
+135 -1
ufos/src/server/mod.rs
···
use crate::index_html::INDEX_HTML;
use crate::storage::StoreReader;
use crate::store_types::{HourTruncatedCursor, WeekTruncatedCursor};
-
use crate::{ConsumerInfo, Cursor, JustCount, Nsid, NsidCount, OrderCollectionsBy, UFOsRecord};
+
use crate::{
+
ConsumerInfo, Cursor, JustCount, Nsid, NsidCount, NsidPrefix, OrderCollectionsBy, PrefixChild,
+
UFOsRecord,
+
};
use base64::{engine::general_purpose::URL_SAFE_NO_PAD, Engine as _};
use chrono::{DateTime, Utc};
use collections_query::MultiCollectionQuery;
···
.into()
}
+
#[derive(Debug, Serialize, JsonSchema)]
+
struct PrefixResponse {
+
/// Note that total may not include counts beyond the current page (TODO)
+
total: JustCount,
+
children: Vec<PrefixChild>,
+
/// Include in a follow-up request to get the next page of results, if more are available
+
cursor: Option<String>,
+
}
+
#[derive(Debug, Deserialize, JsonSchema)]
+
struct PrefixQuery {
+
///
+
/// The final segment of a collection NSID is the `name`, and everything before it is called its `group`. eg:
+
///
+
/// - `app.bsky.feed.post` and `app.bsky.feed.like` are both in the _lexicon group_ "`app.bsky.feed`".
+
///
+
prefix: String,
+
/// The maximum number of collections to return in one request.
+
///
+
/// The number of items actually returned may be less than the limit. If paginating, this does **not** indicate that no
+
/// more items are available! Check if the `cursor` in the response is `null` to determine the end of items.
+
///
+
/// Default: `100` normally, `32` if `order` is specified.
+
#[schemars(range(min = 1, max = 200))]
+
limit: Option<usize>,
+
/// Get a paginated response with more collections.
+
///
+
/// Always omit the cursor for the first request. If more collections than the limit are available, the response will contain a non-null `cursor` to include with the next request.
+
///
+
/// `cursor` is mutually exclusive with `order`.
+
cursor: Option<String>,
+
/// Limit collections and statistics to those seen after this UTC datetime
+
///
+
/// Default: all-time
+
since: Option<DateTime<Utc>>,
+
/// Limit collections and statistics to those seen before this UTC datetime
+
///
+
/// Default: now
+
until: Option<DateTime<Utc>>,
+
/// Get a limited, sorted list
+
///
+
/// Mutually exclusive with `cursor` -- sorted results cannot be paged.
+
order: Option<CollectionsQueryOrder>,
+
}
+
/// Prefix-filter collections list
+
///
+
/// This endpoint enumerates all collection NSIDs for a lexicon group.
+
///
+
/// ## To fetch a full list:
+
///
+
/// Omit the `order` parameter and page through the results using the `cursor`. There have been a lot of collections seen in the ATmosphere, well over 400 at time of writing, so you *will* need to make a series of paginaged requests with `cursor`s to get them all.
+
///
+
/// The set of collections across multiple requests is not guaranteed to be a perfectly consistent snapshot:
+
///
+
/// - all collection NSIDs observed before the first request will be included in the results
+
///
+
/// - *new* NSIDs observed in the firehose *while paging* might be included or excluded from the final set
+
///
+
/// - no duplicate NSIDs will occur in the combined results
+
///
+
/// In practice this is close enough for most use-cases to not worry about.
+
///
+
/// ## To fetch the top collection NSIDs:
+
///
+
/// Specify the `order` parameter (must be either `records-created` or `did-estimate`). Note that ordered results cannot be paged.
+
///
+
/// All statistics are bucketed hourly, so the most granular effecitve time boundary for `since` and `until` is one hour.
+
#[endpoint {
+
method = GET,
+
path = "/prefix"
+
}]
+
async fn get_prefix(
+
ctx: RequestContext<Context>,
+
query: Query<PrefixQuery>,
+
) -> OkCorsResponse<PrefixResponse> {
+
let Context { storage, .. } = ctx.context();
+
let q = query.into_inner();
+
+
let prefix = NsidPrefix::new(&q.prefix).map_err(|e| {
+
HttpError::for_bad_request(
+
None,
+
format!("{:?} was not a valid NSID prefix: {e:?}", q.prefix),
+
)
+
})?;
+
+
if q.cursor.is_some() && q.order.is_some() {
+
let msg = "`cursor` is mutually exclusive with `order`. ordered results cannot be paged.";
+
return Err(HttpError::for_bad_request(None, msg.to_string()));
+
}
+
+
let order = if let Some(ref o) = q.order {
+
o.into()
+
} else {
+
let cursor = q
+
.cursor
+
.and_then(|c| if c.is_empty() { None } else { Some(c) })
+
.map(|c| URL_SAFE_NO_PAD.decode(&c))
+
.transpose()
+
.map_err(|e| HttpError::for_bad_request(None, format!("invalid cursor: {e:?}")))?;
+
OrderCollectionsBy::Lexi { cursor }
+
};
+
+
let limit = match (q.limit, q.order) {
+
(Some(limit), _) => limit,
+
(None, Some(_)) => 32,
+
(None, None) => 100,
+
};
+
+
if !(1..=200).contains(&limit) {
+
let msg = format!("limit not in 1..=200: {}", limit);
+
return Err(HttpError::for_bad_request(None, msg));
+
}
+
+
let since = q.since.map(dt_to_cursor).transpose()?;
+
let until = q.until.map(dt_to_cursor).transpose()?;
+
+
let (total, children, next_cursor) = storage
+
.get_prefix(prefix, limit, order, since, until)
+
.await
+
.map_err(|e| HttpError::for_internal_error(format!("oh shoot: {e:?}")))?;
+
+
let next_cursor = next_cursor.map(|c| URL_SAFE_NO_PAD.encode(c));
+
+
OkCors(PrefixResponse {
+
total,
+
children,
+
cursor: next_cursor,
+
})
+
.into()
+
}
+
#[derive(Debug, Deserialize, JsonSchema)]
struct CollectionTimeseriesQuery {
collection: String, // JsonSchema not implemented for Nsid :(
···
api.register(get_records_by_collections).unwrap();
api.register(get_collection_stats).unwrap();
api.register(get_collections).unwrap();
+
api.register(get_prefix).unwrap();
api.register(get_timeseries).unwrap();
let context = Context {
+11 -2
ufos/src/storage.rs
···
use crate::store_types::{CountsValue, HourTruncatedCursor, SketchSecretPrefix};
use crate::{
-
error::StorageError, ConsumerInfo, Cursor, EventBatch, JustCount, NsidCount,
-
OrderCollectionsBy, UFOsRecord,
+
error::StorageError, ConsumerInfo, Cursor, EventBatch, JustCount, NsidCount, NsidPrefix,
+
OrderCollectionsBy, PrefixChild, UFOsRecord,
};
use async_trait::async_trait;
use jetstream::exports::{Did, Nsid};
···
since: Option<HourTruncatedCursor>,
until: Option<HourTruncatedCursor>,
) -> StorageResult<(Vec<NsidCount>, Option<Vec<u8>>)>;
+
+
async fn get_prefix(
+
&self,
+
prefix: NsidPrefix,
+
limit: usize,
+
order: OrderCollectionsBy,
+
since: Option<HourTruncatedCursor>,
+
until: Option<HourTruncatedCursor>,
+
) -> StorageResult<(JustCount, Vec<PrefixChild>, Option<Vec<u8>>)>;
async fn get_timeseries(
&self,
+199 -3
ufos/src/storage_fjall.rs
···
-
use crate::db_types::{db_complete, DbBytes, DbStaticStr, EncodingResult, StaticStr};
+
use crate::db_types::{
+
db_complete, DbBytes, DbStaticStr, EncodingResult, StaticStr, SubPrefixBytes,
+
};
use crate::error::StorageError;
use crate::storage::{StorageResult, StorageWhatever, StoreBackground, StoreReader, StoreWriter};
use crate::store_types::{
···
WEEK_IN_MICROS,
};
use crate::{
-
nice_duration, CommitAction, ConsumerInfo, Did, EventBatch, JustCount, Nsid, NsidCount,
-
OrderCollectionsBy, UFOsRecord,
+
nice_duration, CommitAction, ConsumerInfo, Did, EncodingError, EventBatch, JustCount, Nsid,
+
NsidCount, NsidPrefix, OrderCollectionsBy, PrefixChild, PrefixCount, UFOsRecord,
};
use async_trait::async_trait;
use fjall::{
···
}
}
+
fn get_lexi_prefix(
+
&self,
+
snapshot: Snapshot,
+
prefix: NsidPrefix,
+
limit: usize,
+
cursor: Option<Vec<u8>>,
+
buckets: Vec<CursorBucket>,
+
) -> StorageResult<(JustCount, Vec<PrefixChild>, Option<Vec<u8>>)> {
+
let prefix_sub = String::sub_prefix(prefix.as_str())?;
+
let cursor_child = cursor
+
.as_deref()
+
.map(|encoded_bytes| {
+
let decoded: String = db_complete(encoded_bytes)?;
+
let as_sub_prefix = String::sub_prefix(&decoded)?;
+
Ok::<_, EncodingError>(as_sub_prefix)
+
})
+
.transpose()?;
+
let mut iters: Vec<NsidCounter> = Vec::with_capacity(buckets.len());
+
for bucket in &buckets {
+
let it: NsidCounter = match bucket {
+
CursorBucket::Hour(t) => {
+
let start = cursor_child
+
.as_ref()
+
.map(|child| HourlyRollupKey::after_nsid_prefix(*t, child))
+
.unwrap_or_else(|| HourlyRollupKey::after_nsid_prefix(*t, &prefix_sub))?;
+
let end = HourlyRollupKey::nsid_prefix_end(*t, &prefix_sub)?;
+
get_lexi_iter::<HourlyRollupKey>(&snapshot, start, end)?
+
}
+
CursorBucket::Week(t) => {
+
let start = cursor_child
+
.as_ref()
+
.map(|child| WeeklyRollupKey::after_nsid_prefix(*t, child))
+
.unwrap_or_else(|| WeeklyRollupKey::after_nsid_prefix(*t, &prefix_sub))?;
+
let end = WeeklyRollupKey::nsid_prefix_end(*t, &prefix_sub)?;
+
get_lexi_iter::<WeeklyRollupKey>(&snapshot, start, end)?
+
}
+
CursorBucket::AllTime => {
+
let start = cursor_child
+
.as_ref()
+
.map(|child| AllTimeRollupKey::after_nsid_prefix(child))
+
.unwrap_or_else(|| AllTimeRollupKey::after_nsid_prefix(&prefix_sub))?;
+
let end = AllTimeRollupKey::nsid_prefix_end(&prefix_sub)?;
+
get_lexi_iter::<AllTimeRollupKey>(&snapshot, start, end)?
+
}
+
};
+
iters.push(it);
+
}
+
+
// with apologies
+
let mut iters: Vec<_> = iters
+
.into_iter()
+
.map(|it| {
+
it.map(|bla| bla.map(|(nsid, v)| (Child::from_prefix(&nsid, &prefix), v)))
+
.peekable()
+
})
+
.collect();
+
+
let mut items = Vec::new();
+
let mut prefix_count = CountsValue::default();
+
#[derive(Debug, Clone, PartialEq)]
+
enum Child {
+
FullNsid(String),
+
ChildPrefix(String),
+
}
+
impl Child {
+
fn from_prefix(nsid: &Nsid, prefix: &NsidPrefix) -> Self {
+
if prefix.is_group_of(nsid) {
+
Child::FullNsid(nsid.to_string())
+
} else {
+
let suffix = nsid
+
.as_str()
+
.strip_prefix(&format!("{}.", prefix.0))
+
.unwrap();
+
let (segment, _) = suffix.split_once('.').unwrap();
+
Child::ChildPrefix(format!("{}.{segment}", prefix.0))
+
}
+
}
+
fn is_before(&self, other: &Child) -> bool {
+
match (self, other) {
+
(Child::FullNsid(s), Child::ChildPrefix(o)) if s == o => true,
+
(Child::ChildPrefix(s), Child::FullNsid(o)) if s == o => false,
+
(Child::FullNsid(s), Child::FullNsid(o)) => s < o,
+
(Child::ChildPrefix(s), Child::ChildPrefix(o)) => s < o,
+
(Child::FullNsid(s), Child::ChildPrefix(o)) => s < o,
+
(Child::ChildPrefix(s), Child::FullNsid(o)) => s < o,
+
}
+
}
+
fn into_inner(self) -> String {
+
match self {
+
Child::FullNsid(s) => s,
+
Child::ChildPrefix(s) => s,
+
}
+
}
+
}
+
let mut current_child: Option<Child> = None;
+
for _ in 0..limit {
+
// double-scan the iters for each element: this could be eliminated but we're starting simple.
+
// first scan: find the lowest nsid
+
// second scan: take + merge, and advance all iters with lowest nsid
+
let mut lowest: Option<Child> = None;
+
for iter in &mut iters {
+
if let Some(bla) = iter.peek_mut() {
+
let (child, _) = match bla {
+
Ok(v) => v,
+
Err(e) => Err(std::mem::replace(e, StorageError::Stolen))?,
+
};
+
+
lowest = match lowest {
+
Some(ref current) if current.is_before(child) => lowest,
+
_ => Some(child.clone()),
+
};
+
}
+
}
+
current_child = lowest.clone();
+
let Some(child) = lowest else { break };
+
+
let mut merged = CountsValue::default();
+
for iter in &mut iters {
+
// unwrap: potential fjall error was already checked & bailed over when peeking in the first loop
+
while let Some(Ok((_, get_counts))) =
+
iter.next_if(|v| v.as_ref().unwrap().0 == child)
+
{
+
let counts = get_counts()?;
+
prefix_count.merge(&counts);
+
merged.merge(&counts);
+
}
+
}
+
items.push(match child {
+
Child::FullNsid(nsid) => PrefixChild::Collection(NsidCount {
+
nsid,
+
creates: merged.counts().creates,
+
dids_estimate: merged.dids().estimate() as u64,
+
}),
+
Child::ChildPrefix(prefix) => PrefixChild::Prefix(PrefixCount {
+
prefix,
+
creates: merged.counts().creates,
+
dids_estimate: merged.dids().estimate() as u64,
+
}),
+
});
+
}
+
+
// TODO: could serialize the prefix count (with sketch) into the cursor so that uniqs can actually count up?
+
// ....er the sketch is probably too big
+
// TODO: this is probably buggy on child-type boundaries bleh
+
let next_cursor = current_child
+
.map(|s| s.into_inner().to_db_bytes())
+
.transpose()?;
+
+
Ok(((&prefix_count).into(), items, next_cursor))
+
}
+
+
fn get_prefix(
+
&self,
+
prefix: NsidPrefix,
+
limit: usize,
+
order: OrderCollectionsBy,
+
since: Option<HourTruncatedCursor>,
+
until: Option<HourTruncatedCursor>,
+
) -> StorageResult<(JustCount, Vec<PrefixChild>, Option<Vec<u8>>)> {
+
let snapshot = self.rollups.snapshot();
+
let buckets = if let (None, None) = (since, until) {
+
vec![CursorBucket::AllTime]
+
} else {
+
let mut lower = self.get_earliest_hour(Some(&snapshot))?;
+
if let Some(specified) = since {
+
if specified > lower {
+
lower = specified;
+
}
+
}
+
let upper = until.unwrap_or_else(|| Cursor::at(SystemTime::now()).into());
+
CursorBucket::buckets_spanning(lower, upper)
+
};
+
match order {
+
OrderCollectionsBy::Lexi { cursor } => {
+
self.get_lexi_prefix(snapshot, prefix, limit, cursor, buckets)
+
}
+
_ => todo!(),
+
}
+
}
+
/// - step: output series time step, in seconds
fn get_timeseries(
&self,
···
let s = self.clone();
tokio::task::spawn_blocking(move || {
FjallReader::get_collections(&s, limit, order, since, until)
+
})
+
.await?
+
}
+
async fn get_prefix(
+
&self,
+
prefix: NsidPrefix,
+
limit: usize,
+
order: OrderCollectionsBy,
+
since: Option<HourTruncatedCursor>,
+
until: Option<HourTruncatedCursor>,
+
) -> StorageResult<(JustCount, Vec<PrefixChild>, Option<Vec<u8>>)> {
+
let s = self.clone();
+
tokio::task::spawn_blocking(move || {
+
FjallReader::get_prefix(&s, prefix, limit, order, since, until)
})
.await?
}
+66 -6
ufos/src/store_types.rs
···
pub type HourlyRollupStaticPrefix = DbStaticStr<_HourlyRollupStaticStr>;
pub type HourlyRollupKeyHourPrefix = DbConcat<HourlyRollupStaticPrefix, HourTruncatedCursor>;
pub type HourlyRollupKey = DbConcat<HourlyRollupKeyHourPrefix, Nsid>;
+
pub type HourlyRollupPre = DbConcat<HourlyRollupKeyHourPrefix, Vec<u8>>; // bit hack but
impl HourlyRollupKey {
pub fn new(cursor: HourTruncatedCursor, nsid: &Nsid) -> Self {
Self::from_pair(
···
nsid.clone(),
)
}
+
pub fn new_nsid_prefix(cursor: HourTruncatedCursor, pre: &[u8]) -> HourlyRollupPre {
+
HourlyRollupPre::from_pair(
+
DbConcat::from_pair(Default::default(), cursor),
+
pre.to_vec(),
+
)
+
}
pub fn cursor(&self) -> HourTruncatedCursor {
self.prefix.suffix
}
···
pub fn after_nsid(hour: HourTruncatedCursor, nsid: &Nsid) -> EncodingResult<Bound<Vec<u8>>> {
Ok(Bound::Excluded(Self::new(hour, nsid).to_db_bytes()?))
}
+
pub fn after_nsid_prefix(
+
hour: HourTruncatedCursor,
+
pre: &[u8],
+
) -> EncodingResult<Bound<Vec<u8>>> {
+
Ok(Bound::Excluded(
+
Self::new_nsid_prefix(hour, pre).to_db_bytes()?,
+
))
+
}
pub fn end(hour: HourTruncatedCursor) -> EncodingResult<Bound<Vec<u8>>> {
let prefix = HourlyRollupKeyHourPrefix::from_pair(Default::default(), hour);
Ok(Bound::Excluded(Self::prefix_range_end(&prefix)?))
}
+
pub fn nsid_prefix_end(
+
hour: HourTruncatedCursor,
+
pre: &[u8],
+
) -> EncodingResult<Bound<Vec<u8>>> {
+
Ok(Bound::Excluded(
+
Self::new_nsid_prefix(hour, pre).as_prefix_range_end()?,
+
))
+
}
}
impl WithCollection for HourlyRollupKey {
fn collection(&self) -> &Nsid {
···
pub type WeeklyRollupStaticPrefix = DbStaticStr<_WeeklyRollupStaticStr>;
pub type WeeklyRollupKeyWeekPrefix = DbConcat<WeeklyRollupStaticPrefix, WeekTruncatedCursor>;
pub type WeeklyRollupKey = DbConcat<WeeklyRollupKeyWeekPrefix, Nsid>;
+
pub type WeeklyRollupPre = DbConcat<WeeklyRollupKeyWeekPrefix, Vec<u8>>;
impl WeeklyRollupKey {
pub fn new(cursor: WeekTruncatedCursor, nsid: &Nsid) -> Self {
Self::from_pair(
···
nsid.clone(),
)
}
+
pub fn new_nsid_prefix(cursor: WeekTruncatedCursor, pre: &[u8]) -> WeeklyRollupPre {
+
WeeklyRollupPre::from_pair(
+
DbConcat::from_pair(Default::default(), cursor),
+
pre.to_vec(),
+
)
+
}
pub fn cursor(&self) -> WeekTruncatedCursor {
self.prefix.suffix
}
-
pub fn start(hour: WeekTruncatedCursor) -> EncodingResult<Bound<Vec<u8>>> {
-
let prefix = WeeklyRollupKeyWeekPrefix::from_pair(Default::default(), hour);
+
pub fn start(week: WeekTruncatedCursor) -> EncodingResult<Bound<Vec<u8>>> {
+
let prefix = WeeklyRollupKeyWeekPrefix::from_pair(Default::default(), week);
let prefix_bytes = Self::from_prefix_to_db_bytes(&prefix)?;
Ok(Bound::Included(prefix_bytes))
}
-
pub fn after_nsid(hour: WeekTruncatedCursor, nsid: &Nsid) -> EncodingResult<Bound<Vec<u8>>> {
-
Ok(Bound::Excluded(Self::new(hour, nsid).to_db_bytes()?))
+
pub fn after_nsid(week: WeekTruncatedCursor, nsid: &Nsid) -> EncodingResult<Bound<Vec<u8>>> {
+
Ok(Bound::Excluded(Self::new(week, nsid).to_db_bytes()?))
}
-
pub fn end(hour: WeekTruncatedCursor) -> EncodingResult<Bound<Vec<u8>>> {
-
let prefix = WeeklyRollupKeyWeekPrefix::from_pair(Default::default(), hour);
+
pub fn after_nsid_prefix(
+
week: WeekTruncatedCursor,
+
prefix: &[u8],
+
) -> EncodingResult<Bound<Vec<u8>>> {
+
Ok(Bound::Excluded(
+
Self::new_nsid_prefix(week, prefix).to_db_bytes()?,
+
))
+
}
+
pub fn end(week: WeekTruncatedCursor) -> EncodingResult<Bound<Vec<u8>>> {
+
let prefix = WeeklyRollupKeyWeekPrefix::from_pair(Default::default(), week);
Ok(Bound::Excluded(Self::prefix_range_end(&prefix)?))
}
+
pub fn nsid_prefix_end(
+
week: WeekTruncatedCursor,
+
prefix: &[u8],
+
) -> EncodingResult<Bound<Vec<u8>>> {
+
Ok(Bound::Excluded(
+
Self::new_nsid_prefix(week, prefix).as_prefix_range_end()?,
+
))
+
}
}
impl WithCollection for WeeklyRollupKey {
fn collection(&self) -> &Nsid {
···
static_str!("ever_counts", _AllTimeRollupStaticStr);
pub type AllTimeRollupStaticPrefix = DbStaticStr<_AllTimeRollupStaticStr>;
pub type AllTimeRollupKey = DbConcat<AllTimeRollupStaticPrefix, Nsid>;
+
pub type AllTimeRollupPre = DbConcat<AllTimeRollupStaticPrefix, Vec<u8>>;
impl AllTimeRollupKey {
pub fn new(nsid: &Nsid) -> Self {
Self::from_pair(Default::default(), nsid.clone())
}
+
pub fn new_nsid_prefix(pre: &[u8]) -> AllTimeRollupPre {
+
AllTimeRollupPre::from_pair(Default::default(), pre.to_vec())
+
}
pub fn start() -> EncodingResult<Bound<Vec<u8>>> {
Ok(Bound::Included(Self::from_prefix_to_db_bytes(
&Default::default(),
···
pub fn after_nsid(nsid: &Nsid) -> EncodingResult<Bound<Vec<u8>>> {
Ok(Bound::Excluded(Self::new(nsid).to_db_bytes()?))
}
+
pub fn after_nsid_prefix(prefix: &[u8]) -> EncodingResult<Bound<Vec<u8>>> {
+
Ok(Bound::Excluded(
+
Self::new_nsid_prefix(prefix).to_db_bytes()?,
+
))
+
}
pub fn end() -> EncodingResult<Bound<Vec<u8>>> {
Ok(Bound::Excluded(
Self::prefix_range_end(&Default::default())?,
+
))
+
}
+
pub fn nsid_prefix_end(prefix: &[u8]) -> EncodingResult<Bound<Vec<u8>>> {
+
Ok(Bound::Excluded(
+
Self::new_nsid_prefix(prefix).as_prefix_range_end()?,
))
}
}