- [x] `at://` parsing and struct - [x] TID codecs - [x] XRPC client - [x] DID & handle resolution service with a cache - [ ] Structs with validation for the common lexicons - [ ] Probably codegen for doing this with other lexicons - [ ] Extended XRPC client with support for validated inputs/outputs - [ ] Oauth stuff testing linkify: https://oppi.li oppi.li lets try `ul` items now: - foo - bar and ol: 1. foo 2. bar 3. baz ```rust mod client; mod error; mod fs; mod resolver; use atrium_api::{client::AtpServiceClient, com, types}; use atrium_common::resolver::Resolver; use atrium_identity::identity_resolver::ResolvedIdentity; use atrium_repo::{Repository, blockstore::CarStore}; use atrium_xrpc_client::isahc::IsahcClient; use fuser::MountOption; use futures::{StreamExt, stream}; use indicatif::{MultiProgress, ProgressBar, ProgressStyle}; use std::{ collections::HashMap, io::{Cursor, Write}, path::PathBuf, sync::Arc, }; use xdg::BaseDirectories; fn main() { let rt = tokio::runtime::Runtime::new().unwrap(); let matches = clap::command!() .arg( clap::Arg::new("handles") .index(1) .required(true) .num_args(1..) .help("One or more handles to download and mount"), ) .arg( clap::Arg::new("mountpoint") .short('m') .action(clap::ArgAction::Set) .value_parser(clap::value_parser!(PathBuf)), ) .get_matches(); let handles = matches .get_many::("handles") .unwrap() .cloned() .collect::>(); let mountpoint = matches .get_one::("mountpoint") .map(ToOwned::to_owned) .unwrap_or(PathBuf::from("mnt")); let _ = std::fs::create_dir_all(&mountpoint); let resolver = Arc::new(resolver::id_resolver()); let bars = Arc::new(MultiProgress::new()); let repos = rt.block_on( stream::iter(handles) .then(|handle| { let h = handle.clone(); let r = Arc::clone(&resolver); let b = Arc::clone(&bars); async move { let id = r.resolve(&h).await?; let bytes = cached_download(&id, &b).await?; let repo = build_repo(bytes).await?; Ok::<_, error::Error>((id.did, repo)) } }) .collect::>(), ); let (success, errors): (Vec<_>, Vec<_>) = repos.into_iter().partition(|r| r.is_ok()); for e in errors { eprintln!("{:?}", e.as_ref().unwrap_err()); } let repos = success .into_iter() .map(|s| s.unwrap()) .collect::>(); // construct the fs let mut fs = fs::PdsFs::new(); for (did, repo) in repos { rt.block_on(fs.add(did, repo)) } // mount let options = vec![MountOption::RO, MountOption::FSName("pdsfs".to_string())]; let join_handle = fuser::spawn_mount2(fs, &mountpoint, &options).unwrap(); println!("mounted at {mountpoint:?}"); print!("hit enter to unmount and exit..."); std::io::stdout().flush().unwrap(); // Wait for user input let mut input = String::new(); std::io::stdin().read_line(&mut input).unwrap(); join_handle.join(); std::fs::remove_dir(&mountpoint).unwrap(); println!("unmounted {mountpoint:?}"); } async fn cached_download( id: &ResolvedIdentity, m: &MultiProgress, ) -> Result, error::Error> { let mut pb = ProgressBar::new_spinner(); pb.set_style( ProgressStyle::default_spinner() .template("{spinner:.green} [{elapsed_precise}] {msg}") .unwrap() .tick_strings(&["⠋", "⠙", "⠹", "⠸", "⠼", "⠴", "⠦", "⠧", "⠇", "⠏"]), ); pb.enable_steady_tick(std::time::Duration::from_millis(100)); pb = m.add(pb); let dirs = BaseDirectories::new(); let dir = dirs .get_cache_home() .expect("$HOME is absent") .join("pdsfs"); tokio::fs::create_dir_all(&dir).await?; let file = dir.join(&id.did); let exists = std::fs::exists(&file)?; let bytes = if !exists { pb.set_message(format!("downloading CAR file for...{}", id.did)); download_car_file(id, &pb).await? } else { pb.set_message(format!("using cached CAR file for...{}", id.did)); tokio::fs::read(&file).await? }; // write to disk if !exists { tokio::fs::write(&file, &bytes).await?; } pb.finish(); Ok(bytes) } async fn download_car_file( id: &ResolvedIdentity, pb: &ProgressBar, ) -> Result, error::Error> { // download the entire car file first before mounting it as a fusefs let client = AtpServiceClient::new(IsahcClient::new(&id.pds)); let did = types::string::Did::new(id.did.clone()).unwrap(); let bytes = client .service .com .atproto .sync .get_repo(com::atproto::sync::get_repo::Parameters::from( com::atproto::sync::get_repo::ParametersData { did, since: None }, )) .await?; pb.finish_with_message(format!("download complete for \t...\t{}", id.did)); Ok(bytes) } async fn build_repo(bytes: Vec) -> Result>>>, error::Error> { let store = CarStore::open(Cursor::new(bytes)).await?; let root = store.roots().next().unwrap(); let repo = Repository::open(store, root).await?; Ok(repo) } ``` ``` foo bar ```
MacOS users will have to setup a Nix Builder first In order to build Tangled's dev VM on macOS, you will first need to set up a Linux Nix builder. The recommended way to do so is to run a [`darwin.linux-builder VM`](https://nixos.org/manual/nixpkgs/unstable/#sec-darwin-builder) and to register it in `nix.conf` as a builder for Linux with the same architecture as your Mac (`linux-aarch64` if you are using Apple Silicon). > IMPORTANT: You must build `darwin.linux-builder` somewhere other than inside > the tangled repo so that it doesn't conflict with the other VM. For example, > you can do > > ```shell > cd $(mktemp -d buildervm.XXXXX) && nix run nixpkgs#darwin.linux-builder > ``` > > to store the builder VM in a temporary dir. > > You should read and follow [all the other intructions][darwin builder vm] to > avoid subtle problems. Alternatively, you can use any other method to set up a Linux machine with `nix` installed that you can `sudo ssh` into (in other words, root user on your Mac has to be able to ssh into the Linux machine without entering a password) and that has the same architecture as your Mac. See [remote builder instructions](https://nix.dev/manual/nix/2.28/advanced-topics/distributed-builds.html#requirements) for how to register such a builder in `nix.conf`. > WARNING: If you'd like to use > [`nixos-lima`](https://github.com/nixos-lima/nixos-lima) or > [Orbstack](https://orbstack.dev/), note that setting them up so that `sudo > ssh` works can be tricky. It seems to be [possible with > Orbstack](https://github.com/orgs/orbstack/discussions/1669).