Constellation, Spacedust, Slingshot, UFOs: atproto crates and services for microcosm

new clippies

Changed files
+12 -12
.github
workflows
spacedust
src
ufos
+1 -1
.github/workflows/checks.yml
···
- name: get nightly toolchain for jetstream fmt
run: rustup toolchain install nightly --allow-downgrade -c rustfmt
- name: fmt
-
run: cargo fmt --package links --package constellation --package ufos -- --check
+
run: cargo fmt --package links --package constellation --package ufos --package spacedust --package who-am-i -- --check
- name: fmt jetstream (nightly)
run: cargo +nightly fmt --package jetstream -- --check
- name: clippy
+1 -1
spacedust/src/server.rs
···
) -> Result<MultiSubscribeQuery, HttpError> {
let raw_query = ctx.request.uri().query().unwrap_or("");
let q = serde_qs::from_str(raw_query).map_err(|e| {
-
HttpError::for_bad_request(None, format!("unable to parse query string: {}", e))
+
HttpError::for_bad_request(None, format!("unable to parse query string: {e}"))
})?;
Ok(q)
}
+1 -1
ufos/src/consumer.rs
···
let beginning = match self.current_batch.initial_cursor.map(|c| c.elapsed()) {
None => "unknown".to_string(),
-
Some(Ok(t)) => format!("{:?}", t),
+
Some(Ok(t)) => format!("{t:?}"),
Some(Err(e)) => format!("+{:?}", e.duration()),
};
log::trace!(
+2 -2
ufos/src/db_types.rs
···
] {
let serialized = s.to_string().to_db_bytes()?;
let prefixed = String::sub_prefix(pre)?;
-
assert_eq!(serialized.starts_with(&prefixed), is_pre, "{}", desc);
+
assert_eq!(serialized.starts_with(&prefixed), is_pre, "{desc}");
}
Ok(())
}
···
] {
let serialized = Nsid::new(s.to_string()).unwrap().to_db_bytes()?;
let prefixed = Nsid::sub_prefix(pre)?;
-
assert_eq!(serialized.starts_with(&prefixed), is_pre, "{}", desc);
+
assert_eq!(serialized.starts_with(&prefixed), is_pre, "{desc}");
}
Ok(())
}
+1 -1
ufos/src/server/collections_query.rs
···
) -> Result<MultiCollectionQuery, HttpError> {
let raw_query = ctx.request.uri().query().unwrap_or("");
let q = serde_qs::from_str(raw_query).map_err(|e| {
-
HttpError::for_bad_request(None, format!("unable to parse query string: {}", e))
+
HttpError::for_bad_request(None, format!("unable to parse query string: {e}"))
})?;
Ok(q)
}
+5 -5
ufos/src/server/mod.rs
···
};
if !(1..=200).contains(&limit) {
-
let msg = format!("limit not in 1..=200: {}", limit);
+
let msg = format!("limit not in 1..=200: {limit}");
return Err(HttpError::for_bad_request(None, msg));
}
···
};
if !(1..=200).contains(&limit) {
-
let msg = format!("limit not in 1..=200: {}", limit);
+
let msg = format!("limit not in 1..=200: {limit}");
return Err(HttpError::for_bad_request(None, msg));
}
···
let step = if let Some(secs) = q.step {
if secs < 3600 {
-
let msg = format!("step is too small: {}", secs);
+
let msg = format!("step is too small: {secs}");
Err(HttpError::for_bad_request(None, msg))?;
}
(secs / 3600) * 3600 // trucate to hour
···
};
let nsid = Nsid::new(q.collection).map_err(|e| {
-
HttpError::for_bad_request(None, format!("collection was not a valid NSID: {:?}", e))
+
HttpError::for_bad_request(None, format!("collection was not a valid NSID: {e:?}"))
})?;
let (range_cursors, series) = storage
···
..Default::default()
})
.start()
-
.map_err(|error| format!("failed to start server: {}", error))?
+
.map_err(|error| format!("failed to start server: {error}"))?
.await
}
+1 -1
ufos/src/storage_fjall.rs
···
let dt = t0.elapsed();
-
log::trace!("finished trimming {n} nsids in {:?}: {total_danglers} dangling and {total_deleted} total removed.", dt);
+
log::trace!("finished trimming {n} nsids in {dt:?}: {total_danglers} dangling and {total_deleted} total removed.");
histogram!("storage_trim_dirty_nsids").record(completed.len() as f64);
histogram!("storage_trim_duration").record(dt.as_micros() as f64);
counter!("storage_trim_removed", "dangling" => "true").increment(total_danglers as u64);