Constellation, Spacedust, Slingshot, UFOs: atproto crates and services for microcosm

wow fmt

+1 -1
Makefile
···
cargo test --all-features
fmt:
-
cargo fmt --package links --package constellation --package ufos
cargo +nightly fmt --package jetstream
clippy:
···
cargo test --all-features
fmt:
+
cargo fmt --package links --package constellation --package ufos --package spacedust --package who-am-i
cargo +nightly fmt --package jetstream
clippy:
+12 -6
spacedust/src/consumer.rs
···
-
use std::sync::Arc;
-
use tokio_util::sync::CancellationToken;
use crate::ClientMessage;
use crate::error::ConsumerError;
use crate::removable_delay_queue;
···
events::{CommitOp, Cursor, EventKind},
};
use links::collect_links;
use tokio::sync::broadcast;
const MAX_LINKS_PER_EVENT: usize = 100;
···
};
// TODO: something a bit more robust
-
let at_uri = format!("at://{}/{}/{}", &*event.did, &*commit.collection, &*commit.rkey);
// TODO: keep a buffer and remove quick deletes to debounce notifs
// for now we just drop all deletes eek
if commit.operation == CommitOp::Delete {
-
d.remove_range((at_uri.clone(), 0)..=(at_uri.clone(), MAX_LINKS_PER_EVENT)).await;
continue;
}
let Some(ref record) = commit.record else {
···
if i >= MAX_LINKS_PER_EVENT {
// todo: indicate if the link limit was reached (-> links omitted)
log::warn!("consumer: event has too many links, ignoring the rest");
-
metrics::counter!("consumer_dropped_links", "reason" => "too_many_links").increment(1);
break;
}
let client_message = match ClientMessage::new_link(link, &at_uri, commit) {
···
Err(e) => {
// TODO indicate to clients that a link has been dropped
log::warn!("consumer: failed to serialize link to json: {e:?}");
-
metrics::counter!("consumer_dropped_links", "reason" => "failed_to_serialize").increment(1);
continue;
}
};
···
use crate::ClientMessage;
use crate::error::ConsumerError;
use crate::removable_delay_queue;
···
events::{CommitOp, Cursor, EventKind},
};
use links::collect_links;
+
use std::sync::Arc;
use tokio::sync::broadcast;
+
use tokio_util::sync::CancellationToken;
const MAX_LINKS_PER_EVENT: usize = 100;
···
};
// TODO: something a bit more robust
+
let at_uri = format!(
+
"at://{}/{}/{}",
+
&*event.did, &*commit.collection, &*commit.rkey
+
);
// TODO: keep a buffer and remove quick deletes to debounce notifs
// for now we just drop all deletes eek
if commit.operation == CommitOp::Delete {
+
d.remove_range((at_uri.clone(), 0)..=(at_uri.clone(), MAX_LINKS_PER_EVENT))
+
.await;
continue;
}
let Some(ref record) = commit.record else {
···
if i >= MAX_LINKS_PER_EVENT {
// todo: indicate if the link limit was reached (-> links omitted)
log::warn!("consumer: event has too many links, ignoring the rest");
+
metrics::counter!("consumer_dropped_links", "reason" => "too_many_links")
+
.increment(1);
break;
}
let client_message = match ClientMessage::new_link(link, &at_uri, commit) {
···
Err(e) => {
// TODO indicate to clients that a link has been dropped
log::warn!("consumer: failed to serialize link to json: {e:?}");
+
metrics::counter!("consumer_dropped_links", "reason" => "failed_to_serialize")
+
.increment(1);
continue;
}
};
+2 -2
spacedust/src/delay.rs
···
use crate::removable_delay_queue;
-
use tokio_util::sync::CancellationToken;
use tokio::sync::broadcast;
-
use crate::error::DelayError;
pub async fn to_broadcast<T>(
source: removable_delay_queue::Output<(String, usize), T>,
···
+
use crate::error::DelayError;
use crate::removable_delay_queue;
use tokio::sync::broadcast;
+
use tokio_util::sync::CancellationToken;
pub async fn to_broadcast<T>(
source: removable_delay_queue::Output<(String, usize), T>,
+19 -8
spacedust/src/lib.rs
···
pub mod consumer;
pub mod delay;
pub mod error;
pub mod server;
pub mod subscriber;
-
pub mod removable_delay_queue;
-
use links::CollectedLink;
use jetstream::events::CommitEvent;
-
use tokio_tungstenite::tungstenite::Message;
use serde::{Deserialize, Serialize};
use server::MultiSubscribeQuery;
#[derive(Debug)]
pub struct FilterableProperties {
···
}
impl ClientMessage {
-
pub fn new_link(link: CollectedLink, at_uri: &str, commit: &CommitEvent) -> Result<Self, serde_json::Error> {
let subject_did = link.target.did();
let subject = link.target.into_string();
···
let message = Message::Text(client_event_json.into());
-
let properties = FilterableProperties { subject, subject_did, source };
-
Ok(ClientMessage { message, properties })
}
}
#[derive(Debug, Serialize)]
-
#[serde(rename_all="snake_case")]
pub struct ClientEvent {
-
kind: &'static str, // "link"
origin: &'static str, // "live", "replay", "backfill"
link: ClientLinkEvent,
}
···
pub mod consumer;
pub mod delay;
pub mod error;
+
pub mod removable_delay_queue;
pub mod server;
pub mod subscriber;
use jetstream::events::CommitEvent;
+
use links::CollectedLink;
use serde::{Deserialize, Serialize};
use server::MultiSubscribeQuery;
+
use tokio_tungstenite::tungstenite::Message;
#[derive(Debug)]
pub struct FilterableProperties {
···
}
impl ClientMessage {
+
pub fn new_link(
+
link: CollectedLink,
+
at_uri: &str,
+
commit: &CommitEvent,
+
) -> Result<Self, serde_json::Error> {
let subject_did = link.target.did();
let subject = link.target.into_string();
···
let message = Message::Text(client_event_json.into());
+
let properties = FilterableProperties {
+
subject,
+
subject_did,
+
source,
+
};
+
Ok(ClientMessage {
+
message,
+
properties,
+
})
}
}
#[derive(Debug, Serialize)]
+
#[serde(rename_all = "snake_case")]
pub struct ClientEvent {
+
kind: &'static str, // "link"
origin: &'static str, // "live", "replay", "backfill"
link: ClientLinkEvent,
}
+11 -6
spacedust/src/main.rs
···
-
use spacedust::error::MainTaskError;
use spacedust::consumer;
-
use spacedust::server;
use spacedust::delay;
use spacedust::removable_delay_queue::removable_delay_queue;
use clap::Parser;
use metrics_exporter_prometheus::PrometheusBuilder;
use tokio::sync::broadcast;
use tokio_util::sync::CancellationToken;
-
use std::time::Duration;
/// Aggregate links in the at-mosphere
#[derive(Parser, Debug, Clone)]
···
args.jetstream,
None,
args.jetstream_no_zstd,
-
consumer_shutdown
)
-
.await?;
Ok(())
});
let delay_shutdown = shutdown.clone();
tasks.spawn(async move {
-
delay::to_broadcast(delay_queue_receiver, consumer_delayed_sender, delay_shutdown).await?;
Ok(())
});
···
use spacedust::consumer;
use spacedust::delay;
+
use spacedust::error::MainTaskError;
use spacedust::removable_delay_queue::removable_delay_queue;
+
use spacedust::server;
use clap::Parser;
use metrics_exporter_prometheus::PrometheusBuilder;
+
use std::time::Duration;
use tokio::sync::broadcast;
use tokio_util::sync::CancellationToken;
/// Aggregate links in the at-mosphere
#[derive(Parser, Debug, Clone)]
···
args.jetstream,
None,
args.jetstream_no_zstd,
+
consumer_shutdown,
)
+
.await?;
Ok(())
});
let delay_shutdown = shutdown.clone();
tasks.spawn(async move {
+
delay::to_broadcast(
+
delay_queue_receiver,
+
consumer_delayed_sender,
+
delay_shutdown,
+
)
+
.await?;
Ok(())
});
+15 -11
spacedust/src/removable_delay_queue.rs
···
-
use std::ops::RangeBounds;
use std::collections::{BTreeMap, VecDeque};
-
use std::time::{Duration, Instant};
-
use tokio::sync::Mutex;
use std::sync::Arc;
use thiserror::Error;
#[derive(Debug, Error)]
pub enum EnqueueError<T> {
···
#[derive(Debug)]
struct Queue<K: Key, T> {
queue: VecDeque<(Instant, K)>,
-
items: BTreeMap<K, T>
}
pub struct Input<K: Key, T> {
···
pub async fn remove_range(&self, range: impl RangeBounds<K>) {
let n = {
let mut q = self.q.lock().await;
-
let keys = q.items.range(range).map(|(k, _)| k).cloned().collect::<Vec<_>>();
for k in &keys {
q.items.remove(k);
}
···
} else {
let overshoot = now.saturating_duration_since(expected_release);
metrics::counter!("delay_queue_emit_total", "early" => "no").increment(1);
-
metrics::histogram!("delay_queue_emit_overshoot").record(overshoot.as_secs_f64());
}
-
return Some(item)
} else if Arc::strong_count(&self.q) == 1 {
return None;
}
// the queue is *empty*, so we need to wait at least as long as the current delay
tokio::time::sleep(self.delay).await;
metrics::counter!("delay_queue_entirely_empty_total").increment(1);
-
};
}
}
-
pub fn removable_delay_queue<K: Key, T>(
-
delay: Duration,
-
) -> (Input<K, T>, Output<K, T>) {
let q: Arc<Mutex<Queue<K, T>>> = Arc::new(Mutex::new(Queue {
queue: VecDeque::new(),
items: BTreeMap::new(),
···
use std::collections::{BTreeMap, VecDeque};
+
use std::ops::RangeBounds;
use std::sync::Arc;
+
use std::time::{Duration, Instant};
use thiserror::Error;
+
use tokio::sync::Mutex;
#[derive(Debug, Error)]
pub enum EnqueueError<T> {
···
#[derive(Debug)]
struct Queue<K: Key, T> {
queue: VecDeque<(Instant, K)>,
+
items: BTreeMap<K, T>,
}
pub struct Input<K: Key, T> {
···
pub async fn remove_range(&self, range: impl RangeBounds<K>) {
let n = {
let mut q = self.q.lock().await;
+
let keys = q
+
.items
+
.range(range)
+
.map(|(k, _)| k)
+
.cloned()
+
.collect::<Vec<_>>();
for k in &keys {
q.items.remove(k);
}
···
} else {
let overshoot = now.saturating_duration_since(expected_release);
metrics::counter!("delay_queue_emit_total", "early" => "no").increment(1);
+
metrics::histogram!("delay_queue_emit_overshoot")
+
.record(overshoot.as_secs_f64());
}
+
return Some(item);
} else if Arc::strong_count(&self.q) == 1 {
return None;
}
// the queue is *empty*, so we need to wait at least as long as the current delay
tokio::time::sleep(self.delay).await;
metrics::counter!("delay_queue_entirely_empty_total").increment(1);
+
}
}
}
+
pub fn removable_delay_queue<K: Key, T>(delay: Duration) -> (Input<K, T>, Output<K, T>) {
let q: Arc<Mutex<Queue<K, T>>> = Arc::new(Mutex::new(Queue {
queue: VecDeque::new(),
items: BTreeMap::new(),
+19 -17
spacedust/src/server.rs
···
use crate::error::ServerError;
use crate::subscriber::Subscriber;
-
use metrics::{histogram, counter};
-
use std::sync::Arc;
-
use crate::ClientMessage;
use http::{
-
header::{ORIGIN, USER_AGENT},
Response, StatusCode,
-
};
-
use dropshot::{
-
Body,
-
ApiDescription, ConfigDropshot, ConfigLogging, ConfigLoggingLevel, Query, RequestContext,
-
ServerBuilder, WebsocketConnection, channel, endpoint, HttpResponse,
-
ApiEndpointBodyContentType, ExtractorMetadata, HttpError, ServerContext,
-
SharedExtractor,
};
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
use tokio::sync::broadcast;
use tokio::time::Instant;
use tokio_tungstenite::tungstenite::protocol::{Role, WebSocketConfig};
use tokio_util::sync::CancellationToken;
-
use async_trait::async_trait;
-
use std::collections::HashSet;
const INDEX_HTML: &str = include_str!("../static/index.html");
const FAVICON: &[u8] = include_bytes!("../static/favicon.ico");
···
pub async fn serve(
b: broadcast::Sender<Arc<ClientMessage>>,
d: broadcast::Sender<Arc<ClientMessage>>,
-
shutdown: CancellationToken
) -> Result<(), ServerError> {
let config_logging = ConfigLogging::StderrTerminal {
level: ConfigLoggingLevel::Info,
···
);
let sub_shutdown = shutdown.clone();
-
let ctx = Context { spec, b, d, shutdown: sub_shutdown };
let server = ServerBuilder::new(api, ctx, log)
.config(ConfigDropshot {
···
}
// TODO: cors for HttpError
-
/// Serve index page as html
#[endpoint {
···
upgraded.into_inner(),
Role::Server,
Some(WebSocketConfig::default().max_message_size(
-
Some(10 * 2_usize.pow(20)) // 10MiB, matching jetstream
)),
)
.await;
···
+
use crate::ClientMessage;
use crate::error::ServerError;
use crate::subscriber::Subscriber;
+
use dropshot::{
+
ApiDescription, ApiEndpointBodyContentType, Body, ConfigDropshot, ConfigLogging,
+
ConfigLoggingLevel, ExtractorMetadata, HttpError, HttpResponse, Query, RequestContext,
+
ServerBuilder, ServerContext, SharedExtractor, WebsocketConnection, channel, endpoint,
+
};
use http::{
Response, StatusCode,
+
header::{ORIGIN, USER_AGENT},
};
+
use metrics::{counter, histogram};
+
use std::sync::Arc;
+
use async_trait::async_trait;
use schemars::JsonSchema;
use serde::{Deserialize, Serialize};
+
use std::collections::HashSet;
use tokio::sync::broadcast;
use tokio::time::Instant;
use tokio_tungstenite::tungstenite::protocol::{Role, WebSocketConfig};
use tokio_util::sync::CancellationToken;
const INDEX_HTML: &str = include_str!("../static/index.html");
const FAVICON: &[u8] = include_bytes!("../static/favicon.ico");
···
pub async fn serve(
b: broadcast::Sender<Arc<ClientMessage>>,
d: broadcast::Sender<Arc<ClientMessage>>,
+
shutdown: CancellationToken,
) -> Result<(), ServerError> {
let config_logging = ConfigLogging::StderrTerminal {
level: ConfigLoggingLevel::Info,
···
);
let sub_shutdown = shutdown.clone();
+
let ctx = Context {
+
spec,
+
b,
+
d,
+
shutdown: sub_shutdown,
+
};
let server = ServerBuilder::new(api, ctx, log)
.config(ConfigDropshot {
···
}
// TODO: cors for HttpError
/// Serve index page as html
#[endpoint {
···
upgraded.into_inner(),
Role::Server,
Some(WebSocketConfig::default().max_message_size(
+
Some(10 * 2_usize.pow(20)), // 10MiB, matching jetstream
)),
)
.await;
+23 -26
spacedust/src/subscriber.rs
···
use crate::error::SubscriberUpdateError;
-
use std::sync::Arc;
-
use tokio::time::interval;
-
use std::time::Duration;
-
use futures::StreamExt;
use crate::{ClientMessage, FilterableProperties, SubscriberSourcedMessage};
-
use crate::server::MultiSubscribeQuery;
use futures::SinkExt;
use std::error::Error;
use tokio::sync::broadcast::{self, error::RecvError};
use tokio_tungstenite::{WebSocketStream, tungstenite::Message};
use tokio_util::sync::CancellationToken;
-
use dropshot::WebsocketConnectionRaw;
const PING_PERIOD: Duration = Duration::from_secs(30);
···
}
impl Subscriber {
-
pub fn new(
-
query: MultiSubscribeQuery,
-
shutdown: CancellationToken,
-
) -> Self {
Self { query, shutdown }
}
pub async fn start(
mut self,
ws: WebSocketStream<WebsocketConnectionRaw>,
-
mut receiver: broadcast::Receiver<Arc<ClientMessage>>
) -> Result<(), Box<dyn Error>> {
let mut ping_state = None;
let (mut ws_sender, mut ws_receiver) = ws.split();
···
// TODO: send client an explanation
self.shutdown.cancel();
}
},
Some(Ok(m)) => log::trace!("subscriber sent an unexpected message: {m:?}"),
Some(Err(e)) => {
···
Ok(())
}
-
fn filter(
-
&self,
-
properties: &FilterableProperties,
-
) -> bool {
let query = &self.query;
// subject + subject DIDs are logical OR
-
if !(
-
query.wanted_subjects.is_empty() && query.wanted_subject_dids.is_empty() ||
-
query.wanted_subjects.contains(&properties.subject) ||
-
properties.subject_did.as_ref().map(|did| query.wanted_subject_dids.contains(did)).unwrap_or(false)
-
) { // wowwww ^^ fix that
-
return false
}
// subjects together with sources are logical AND
if !(query.wanted_sources.is_empty() || query.wanted_sources.contains(&properties.source)) {
-
return false
}
true
}
}
-
-
impl MultiSubscribeQuery {
pub fn update_from_raw(&mut self, s: &str) -> Result<(), SubscriberUpdateError> {
-
let SubscriberSourcedMessage::OptionsUpdate(opts) = serde_json::from_str(s)
-
.map_err(SubscriberUpdateError::FailedToParseMessage)?;
if opts.wanted_sources.len() > 1_000 {
return Err(SubscriberUpdateError::TooManySourcesWanted);
}
···
use crate::error::SubscriberUpdateError;
+
use crate::server::MultiSubscribeQuery;
use crate::{ClientMessage, FilterableProperties, SubscriberSourcedMessage};
+
use dropshot::WebsocketConnectionRaw;
use futures::SinkExt;
+
use futures::StreamExt;
use std::error::Error;
+
use std::sync::Arc;
+
use std::time::Duration;
use tokio::sync::broadcast::{self, error::RecvError};
+
use tokio::time::interval;
use tokio_tungstenite::{WebSocketStream, tungstenite::Message};
use tokio_util::sync::CancellationToken;
const PING_PERIOD: Duration = Duration::from_secs(30);
···
}
impl Subscriber {
+
pub fn new(query: MultiSubscribeQuery, shutdown: CancellationToken) -> Self {
Self { query, shutdown }
}
pub async fn start(
mut self,
ws: WebSocketStream<WebsocketConnectionRaw>,
+
mut receiver: broadcast::Receiver<Arc<ClientMessage>>,
) -> Result<(), Box<dyn Error>> {
let mut ping_state = None;
let (mut ws_sender, mut ws_receiver) = ws.split();
···
// TODO: send client an explanation
self.shutdown.cancel();
}
+
log::trace!("subscriber updated with opts: {:?}", self.query);
},
Some(Ok(m)) => log::trace!("subscriber sent an unexpected message: {m:?}"),
Some(Err(e)) => {
···
Ok(())
}
+
fn filter(&self, properties: &FilterableProperties) -> bool {
let query = &self.query;
// subject + subject DIDs are logical OR
+
if !(query.wanted_subjects.is_empty() && query.wanted_subject_dids.is_empty()
+
|| query.wanted_subjects.contains(&properties.subject)
+
|| properties
+
.subject_did
+
.as_ref()
+
.map(|did| query.wanted_subject_dids.contains(did))
+
.unwrap_or(false))
+
{
+
// wowwww ^^ fix that
+
return false;
}
// subjects together with sources are logical AND
if !(query.wanted_sources.is_empty() || query.wanted_sources.contains(&properties.source)) {
+
return false;
}
true
}
}
impl MultiSubscribeQuery {
pub fn update_from_raw(&mut self, s: &str) -> Result<(), SubscriberUpdateError> {
+
let SubscriberSourcedMessage::OptionsUpdate(opts) =
+
serde_json::from_str(s).map_err(SubscriberUpdateError::FailedToParseMessage)?;
if opts.wanted_sources.len() > 1_000 {
return Err(SubscriberUpdateError::TooManySourcesWanted);
}
+1 -1
who-am-i/Cargo.toml
···
edition = "2024"
[dependencies]
-
atrium-api = { version = "0.25.4", default-features = false, features = ["tokio", "agent"] }
atrium-identity = "0.1.5"
atrium-oauth = "0.1.3"
clap = { version = "4.5.40", features = ["derive"] }
···
edition = "2024"
[dependencies]
+
atrium-api = { version = "0.25.4", default-features = false }
atrium-identity = "0.1.5"
atrium-oauth = "0.1.3"
clap = { version = "4.5.40", features = ["derive"] }
+1 -1
who-am-i/src/lib.rs
···
mod server;
pub use dns_resolver::HickoryDnsTxtResolver;
pub use server::serve;
-
pub use oauth::{Client, client, authorize};
···
mod server;
pub use dns_resolver::HickoryDnsTxtResolver;
+
pub use oauth::{Client, authorize, client};
pub use server::serve;
+1 -1
who-am-i/src/main.rs
···
-
use who_am_i::serve;
use tokio_util::sync::CancellationToken;
#[tokio::main]
async fn main() {
···
use tokio_util::sync::CancellationToken;
+
use who_am_i::serve;
#[tokio::main]
async fn main() {
+14 -17
who-am-i/src/oauth.rs
···
use atrium_identity::{
did::{CommonDidResolver, CommonDidResolverConfig, DEFAULT_PLC_DIRECTORY_URL},
handle::{AtprotoHandleResolver, AtprotoHandleResolverConfig},
};
use atrium_oauth::{
-
AuthorizeOptions,
store::{session::MemorySessionStore, state::MemoryStateStore},
-
AtprotoLocalhostClientMetadata, DefaultHttpClient, KnownScope, OAuthClient, OAuthClientConfig,
-
OAuthResolverConfig, Scope,
};
use std::sync::Arc;
-
use crate::HickoryDnsTxtResolver;
pub type Client = OAuthClient<
MemoryStateStore,
···
let config = OAuthClientConfig {
client_metadata: AtprotoLocalhostClientMetadata {
redirect_uris: Some(vec![String::from("http://127.0.0.1:9997/authorized")]),
-
scopes: Some(vec![
-
Scope::Known(KnownScope::Atproto),
-
]),
},
keys: None,
resolver: OAuthResolverConfig {
···
}
pub async fn authorize(client: &Client, handle: &str) -> String {
-
let Ok(url) = client.authorize(
-
handle,
-
AuthorizeOptions {
-
scopes: vec![
-
Scope::Known(KnownScope::Atproto),
-
],
-
..Default::default()
-
},
-
)
-
.await else {
panic!("failed to authorize");
};
url
···
+
use crate::HickoryDnsTxtResolver;
use atrium_identity::{
did::{CommonDidResolver, CommonDidResolverConfig, DEFAULT_PLC_DIRECTORY_URL},
handle::{AtprotoHandleResolver, AtprotoHandleResolverConfig},
};
use atrium_oauth::{
+
AtprotoLocalhostClientMetadata, AuthorizeOptions, DefaultHttpClient, KnownScope, OAuthClient,
+
OAuthClientConfig, OAuthResolverConfig, Scope,
store::{session::MemorySessionStore, state::MemoryStateStore},
};
use std::sync::Arc;
pub type Client = OAuthClient<
MemoryStateStore,
···
let config = OAuthClientConfig {
client_metadata: AtprotoLocalhostClientMetadata {
redirect_uris: Some(vec![String::from("http://127.0.0.1:9997/authorized")]),
+
scopes: Some(vec![Scope::Known(KnownScope::Atproto)]),
},
keys: None,
resolver: OAuthResolverConfig {
···
}
pub async fn authorize(client: &Client, handle: &str) -> String {
+
let Ok(url) = client
+
.authorize(
+
handle,
+
AuthorizeOptions {
+
scopes: vec![Scope::Known(KnownScope::Atproto)],
+
..Default::default()
+
},
+
)
+
.await
+
else {
panic!("failed to authorize");
};
url
+16 -18
who-am-i/src/server.rs
···
-
use atrium_api::agent::SessionManager;
-
use std::error::Error;
-
use metrics::{histogram, counter};
-
use std::sync::Arc;
use http::{
-
header::{ORIGIN, USER_AGENT},
Response, StatusCode,
};
-
use dropshot::{
-
Body, HttpResponseSeeOther, http_response_see_other,
-
ApiDescription, ConfigDropshot, ConfigLogging, ConfigLoggingLevel, RequestContext,
-
ServerBuilder, endpoint, HttpResponse, HttpError, ServerContext, Query,
-
};
use atrium_oauth::CallbackParams;
use schemars::JsonSchema;
···
use tokio::time::Instant;
use tokio_util::sync::CancellationToken;
-
use crate::{Client, client, authorize};
const INDEX_HTML: &str = include_str!("../static/index.html");
const FAVICON: &[u8] = include_bytes!("../static/favicon.ico");
-
pub async fn serve(
-
shutdown: CancellationToken
-
) -> Result<(), Box<dyn Error + Send + Sync>> {
let config_logging = ConfigLogging::StderrTerminal {
level: ConfigLoggingLevel::Info,
};
-
let log = config_logging
-
.to_logger("example-basic")?;
let mut api = ApiDescription::new();
api.register(index).unwrap();
···
.json()?,
);
-
let ctx = Context { spec, client: client().into() };
let server = ServerBuilder::new(api, ctx, log)
.config(ConfigDropshot {
···
}
// TODO: cors for HttpError
-
/// Serve index page as html
#[endpoint {
···
use atrium_api::agent::SessionManager;
+
use dropshot::{
+
ApiDescription, Body, ConfigDropshot, ConfigLogging, ConfigLoggingLevel, HttpError,
+
HttpResponse, HttpResponseSeeOther, Query, RequestContext, ServerBuilder, ServerContext,
+
endpoint, http_response_see_other,
+
};
use http::{
Response, StatusCode,
+
header::{ORIGIN, USER_AGENT},
};
+
use metrics::{counter, histogram};
+
use std::error::Error;
+
use std::sync::Arc;
use atrium_oauth::CallbackParams;
use schemars::JsonSchema;
···
use tokio::time::Instant;
use tokio_util::sync::CancellationToken;
+
use crate::{Client, authorize, client};
const INDEX_HTML: &str = include_str!("../static/index.html");
const FAVICON: &[u8] = include_bytes!("../static/favicon.ico");
+
pub async fn serve(shutdown: CancellationToken) -> Result<(), Box<dyn Error + Send + Sync>> {
let config_logging = ConfigLogging::StderrTerminal {
level: ConfigLoggingLevel::Info,
};
+
let log = config_logging.to_logger("example-basic")?;
let mut api = ApiDescription::new();
api.register(index).unwrap();
···
.json()?,
);
+
let ctx = Context {
+
spec,
+
client: client().into(),
+
};
let server = ServerBuilder::new(api, ctx, log)
.config(ConfigDropshot {
···
}
// TODO: cors for HttpError
/// Serve index page as html
#[endpoint {