Monorepo for wisp.place. A static site hosting service built on top of the AT Protocol. wisp.place

add splitting to cli

Changed files
+2696 -177
cli
+42 -51
cli/Cargo.lock
···
[[package]]
name = "axum"
-
version = "0.7.9"
+
version = "0.8.7"
source = "registry+https://github.com/rust-lang/crates.io-index"
-
checksum = "edca88bc138befd0323b20752846e6587272d3b03b0343c8ea28a6f819e6e71f"
+
checksum = "5b098575ebe77cb6d14fc7f32749631a6e44edbef6b796f89b020e99ba20d425"
dependencies = [
-
"async-trait",
"axum-core",
"bytes",
+
"form_urlencoded",
"futures-util",
"http",
"http-body",
···
"mime",
"percent-encoding",
"pin-project-lite",
-
"rustversion",
-
"serde",
+
"serde_core",
"serde_json",
"serde_path_to_error",
"serde_urlencoded",
"sync_wrapper",
"tokio",
-
"tower 0.5.2",
+
"tower",
"tower-layer",
"tower-service",
"tracing",
···
[[package]]
name = "axum-core"
-
version = "0.4.5"
+
version = "0.5.5"
source = "registry+https://github.com/rust-lang/crates.io-index"
-
checksum = "09f2bd6146b97ae3359fa0cc6d6b376d9539582c7b4220f041a33ec24c226199"
+
checksum = "59446ce19cd142f8833f856eb31f3eb097812d1479ab224f54d72428ca21ea22"
dependencies = [
-
"async-trait",
"bytes",
-
"futures-util",
+
"futures-core",
"http",
"http-body",
"http-body-util",
"mime",
"pin-project-lite",
-
"rustversion",
"sync_wrapper",
"tower-layer",
"tower-service",
···
"miette",
"multibase",
"multihash",
-
"n0-future",
+
"n0-future 0.1.3",
"ouroboros",
"p256",
"rand 0.9.2",
···
[[package]]
name = "matchit"
-
version = "0.7.3"
+
version = "0.8.4"
source = "registry+https://github.com/rust-lang/crates.io-index"
-
checksum = "0e7465ac9959cc2b1404e8e2367b43684a6d13790fe23056cc8c6c5a6b7bcb94"
+
checksum = "47e1ffaa40ddd1f3ed91f717a33c8c0ee23fff369e3aa8772b9605cc1d22f4c3"
[[package]]
name = "memchr"
···
version = "0.1.3"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "7bb0e5d99e681ab3c938842b96fcb41bf8a7bb4bfdb11ccbd653a7e83e06c794"
+
dependencies = [
+
"cfg_aliases",
+
"derive_more",
+
"futures-buffered",
+
"futures-lite",
+
"futures-util",
+
"js-sys",
+
"pin-project",
+
"send_wrapper",
+
"tokio",
+
"tokio-util",
+
"wasm-bindgen",
+
"wasm-bindgen-futures",
+
"web-time",
+
]
+
+
[[package]]
+
name = "n0-future"
+
version = "0.3.1"
+
source = "registry+https://github.com/rust-lang/crates.io-index"
+
checksum = "8c0709ac8235ce13b82bc4d180ee3c42364b90c1a8a628c3422d991d75a728b5"
dependencies = [
"cfg_aliases",
"derive_more",
···
"tokio",
"tokio-rustls",
"tokio-util",
-
"tower 0.5.2",
-
"tower-http 0.6.6",
+
"tower",
+
"tower-http",
"tower-service",
"url",
"wasm-bindgen",
···
[[package]]
name = "tower"
-
version = "0.4.13"
-
source = "registry+https://github.com/rust-lang/crates.io-index"
-
checksum = "b8fa9be0de6cf49e536ce1851f987bd21a43b771b09473c3549a6c853db37c1c"
-
dependencies = [
-
"tower-layer",
-
"tower-service",
-
"tracing",
-
]
-
-
[[package]]
-
name = "tower"
version = "0.5.2"
source = "registry+https://github.com/rust-lang/crates.io-index"
checksum = "d039ad9159c98b70ecfd540b2573b97f7f52c3e8d9f8ad57a24b916a536975f9"
···
[[package]]
name = "tower-http"
-
version = "0.5.2"
+
version = "0.6.6"
source = "registry+https://github.com/rust-lang/crates.io-index"
-
checksum = "1e9cd434a998747dd2c4276bc96ee2e0c7a2eadf3cae88e52be55a05fa9053f5"
+
checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2"
dependencies = [
"async-compression",
"bitflags",
···
"http-body-util",
"http-range-header",
"httpdate",
+
"iri-string",
"mime",
"mime_guess",
"percent-encoding",
"pin-project-lite",
"tokio",
"tokio-util",
+
"tower",
"tower-layer",
"tower-service",
"tracing",
-
]
-
-
[[package]]
-
name = "tower-http"
-
version = "0.6.6"
-
source = "registry+https://github.com/rust-lang/crates.io-index"
-
checksum = "adc82fd73de2a9722ac5da747f12383d2bfdb93591ee6c58486e0097890f05f2"
-
dependencies = [
-
"bitflags",
-
"bytes",
-
"futures-util",
-
"http",
-
"http-body",
-
"iri-string",
-
"pin-project-lite",
-
"tower 0.5.2",
-
"tower-layer",
-
"tower-service",
[[package]]
···
[[package]]
name = "wisp-cli"
-
version = "0.2.0"
+
version = "0.3.0"
dependencies = [
"axum",
"base64 0.22.1",
···
"mime_guess",
"multibase",
"multihash",
-
"n0-future",
+
"n0-future 0.3.1",
"reqwest",
"rustversion",
"serde",
···
"sha2",
"shellexpand",
"tokio",
-
"tower 0.4.13",
-
"tower-http 0.5.2",
+
"tower",
+
"tower-http",
"url",
"walkdir",
+5 -5
cli/Cargo.toml
···
[package]
name = "wisp-cli"
-
version = "0.2.0"
+
version = "0.3.0"
edition = "2024"
[features]
···
multihash = "0.19.3"
multibase = "0.9"
sha2 = "0.10"
-
axum = "0.7"
-
tower-http = { version = "0.5", features = ["fs", "compression-gzip"] }
-
tower = "0.4"
-
n0-future = "0.1"
+
axum = "0.8.7"
+
tower-http = { version = "0.6.6", features = ["fs", "compression-gzip"] }
+
tower = "0.5.2"
+
n0-future = "0.3.1"
chrono = "0.4"
url = "2.5"
-51
cli/lexicons/place/wisp/fs.json
···
-
{
-
"lexicon": 1,
-
"id": "place.wisp.fs",
-
"defs": {
-
"main": {
-
"type": "record",
-
"description": "Virtual filesystem manifest for a Wisp site",
-
"record": {
-
"type": "object",
-
"required": ["site", "root", "createdAt"],
-
"properties": {
-
"site": { "type": "string" },
-
"root": { "type": "ref", "ref": "#directory" },
-
"fileCount": { "type": "integer", "minimum": 0, "maximum": 1000 },
-
"createdAt": { "type": "string", "format": "datetime" }
-
}
-
}
-
},
-
"file": {
-
"type": "object",
-
"required": ["type", "blob"],
-
"properties": {
-
"type": { "type": "string", "const": "file" },
-
"blob": { "type": "blob", "accept": ["*/*"], "maxSize": 1000000, "description": "Content blob ref" },
-
"encoding": { "type": "string", "enum": ["gzip"], "description": "Content encoding (e.g., gzip for compressed files)" },
-
"mimeType": { "type": "string", "description": "Original MIME type before compression" },
-
"base64": { "type": "boolean", "description": "True if blob content is base64-encoded (used to bypass PDS content sniffing)" }
-
}
-
},
-
"directory": {
-
"type": "object",
-
"required": ["type", "entries"],
-
"properties": {
-
"type": { "type": "string", "const": "directory" },
-
"entries": {
-
"type": "array",
-
"maxLength": 500,
-
"items": { "type": "ref", "ref": "#entry" }
-
}
-
}
-
},
-
"entry": {
-
"type": "object",
-
"required": ["name", "node"],
-
"properties": {
-
"name": { "type": "string", "maxLength": 255 },
-
"node": { "type": "union", "refs": ["#file", "#directory"] }
-
}
-
}
-
}
-
}
+5 -1
cli/src/blob_map.rs
···
// BlobRef is an enum with Blob variant, which has a ref field (CidLink)
let blob_ref = &file_node.blob;
let cid_string = blob_ref.blob().r#ref.to_string();
-
+
// Store with full path (mirrors TypeScript implementation)
blob_map.insert(
full_path,
···
EntryNode::Directory(subdir) => {
let sub_map = extract_blob_map_recursive(subdir, full_path);
blob_map.extend(sub_map);
+
}
+
EntryNode::Subfs(_) => {
+
// Subfs nodes don't contain blobs directly - they reference other records
+
// Skip them in blob map extraction
}
EntryNode::Unknown(_) => {
// Skip unknown node types
+9
cli/src/lib.rs
···
+
// @generated by jacquard-lexicon. DO NOT EDIT.
+
//
+
// This file was automatically generated from Lexicon schemas.
+
// Any manual changes will be overwritten on the next regeneration.
+
+
pub mod builder_types;
+
+
#[cfg(feature = "place_wisp")]
+
pub mod place_wisp;
+195 -12
cli/src/main.rs
···
mod download;
mod pull;
mod serve;
+
mod subfs_utils;
use clap::{Parser, Subcommand};
use jacquard::CowStr;
···
println!("Deploying site '{}'...", site_name);
// Try to fetch existing manifest for incremental updates
-
let existing_blob_map: HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)> = {
+
let (existing_blob_map, old_subfs_uris): (HashMap<String, (jacquard_common::types::blob::BlobRef<'static>, String)>, Vec<(String, String)>) = {
use jacquard_common::types::string::AtUri;
-
+
// Get the DID for this session
let session_info = agent.session_info().await;
if let Some((did, _)) = session_info {
···
match response.into_output() {
Ok(record_output) => {
let existing_manifest = record_output.value;
-
let blob_map = blob_map::extract_blob_map(&existing_manifest.root);
-
println!("Found existing manifest with {} files, checking for changes...", blob_map.len());
-
blob_map
+
let mut blob_map = blob_map::extract_blob_map(&existing_manifest.root);
+
println!("Found existing manifest with {} files in main record", blob_map.len());
+
+
// Extract subfs URIs from main record
+
let subfs_uris = subfs_utils::extract_subfs_uris(&existing_manifest.root, String::new());
+
+
if !subfs_uris.is_empty() {
+
println!("Found {} subfs records, fetching for blob reuse...", subfs_uris.len());
+
+
// Merge blob maps from all subfs records
+
match subfs_utils::merge_subfs_blob_maps(agent, subfs_uris.clone(), &mut blob_map).await {
+
Ok(merged_count) => {
+
println!("Total blob map: {} files (main + {} from subfs)", blob_map.len(), merged_count);
+
}
+
Err(e) => {
+
eprintln!("⚠️ Failed to merge some subfs blob maps: {}", e);
+
}
+
}
+
+
(blob_map, subfs_uris)
+
} else {
+
(blob_map, Vec::new())
+
}
}
Err(_) => {
println!("No existing manifest found, uploading all files...");
-
HashMap::new()
+
(HashMap::new(), Vec::new())
}
}
}
Err(_) => {
// Record doesn't exist yet - this is a new site
println!("No existing manifest found, uploading all files...");
-
HashMap::new()
+
(HashMap::new(), Vec::new())
}
}
} else {
println!("No existing manifest found (invalid URI), uploading all files...");
-
HashMap::new()
+
(HashMap::new(), Vec::new())
}
} else {
println!("No existing manifest found (could not get DID), uploading all files...");
-
HashMap::new()
+
(HashMap::new(), Vec::new())
}
};
···
let (root_dir, total_files, reused_count) = build_directory(agent, &path, &existing_blob_map, String::new()).await?;
let uploaded_count = total_files - reused_count;
-
// Create the Fs record
+
// Check if we need to split into subfs records
+
const MAX_MANIFEST_SIZE: usize = 140 * 1024; // 140KB (PDS limit is 150KB)
+
const FILE_COUNT_THRESHOLD: usize = 250; // Start splitting at this many files
+
const TARGET_FILE_COUNT: usize = 200; // Keep main manifest under this
+
+
let mut working_directory = root_dir;
+
let mut current_file_count = total_files;
+
let mut new_subfs_uris: Vec<(String, String)> = Vec::new();
+
+
// Estimate initial manifest size
+
let mut manifest_size = subfs_utils::estimate_directory_size(&working_directory);
+
+
if total_files >= FILE_COUNT_THRESHOLD || manifest_size > MAX_MANIFEST_SIZE {
+
println!("\n⚠️ Large site detected ({} files, {:.1}KB manifest), splitting into subfs records...",
+
total_files, manifest_size as f64 / 1024.0);
+
+
let mut attempts = 0;
+
const MAX_SPLIT_ATTEMPTS: usize = 50;
+
+
while (manifest_size > MAX_MANIFEST_SIZE || current_file_count > TARGET_FILE_COUNT) && attempts < MAX_SPLIT_ATTEMPTS {
+
attempts += 1;
+
+
// Find large directories to split
+
let directories = subfs_utils::find_large_directories(&working_directory, String::new());
+
+
if let Some(largest_dir) = directories.first() {
+
println!(" Split #{}: {} ({} files, {:.1}KB)",
+
attempts, largest_dir.path, largest_dir.file_count, largest_dir.size as f64 / 1024.0);
+
+
// Create a subfs record for this directory
+
use jacquard_common::types::string::Tid;
+
let subfs_tid = Tid::now_0();
+
let subfs_rkey = subfs_tid.to_string();
+
+
let subfs_manifest = crate::place_wisp::subfs::SubfsRecord::new()
+
.root(convert_fs_dir_to_subfs_dir(largest_dir.directory.clone()))
+
.file_count(Some(largest_dir.file_count as i64))
+
.created_at(Datetime::now())
+
.build();
+
+
// Upload subfs record
+
let subfs_output = agent.put_record(
+
RecordKey::from(Rkey::new(&subfs_rkey).into_diagnostic()?),
+
subfs_manifest
+
).await.into_diagnostic()?;
+
+
let subfs_uri = subfs_output.uri.to_string();
+
println!(" ✅ Created subfs: {}", subfs_uri);
+
+
// Replace directory with subfs node (flat: false to preserve structure)
+
working_directory = subfs_utils::replace_directory_with_subfs(
+
working_directory,
+
&largest_dir.path,
+
&subfs_uri,
+
false // Preserve directory structure
+
)?;
+
+
new_subfs_uris.push((subfs_uri, largest_dir.path.clone()));
+
current_file_count -= largest_dir.file_count;
+
+
// Recalculate manifest size
+
manifest_size = subfs_utils::estimate_directory_size(&working_directory);
+
println!(" → Manifest now {:.1}KB with {} files ({} subfs total)",
+
manifest_size as f64 / 1024.0, current_file_count, new_subfs_uris.len());
+
+
if manifest_size <= MAX_MANIFEST_SIZE && current_file_count <= TARGET_FILE_COUNT {
+
println!("✅ Manifest now fits within limits");
+
break;
+
}
+
} else {
+
println!(" No more subdirectories to split - stopping");
+
break;
+
}
+
}
+
+
if attempts >= MAX_SPLIT_ATTEMPTS {
+
return Err(miette::miette!(
+
"Exceeded maximum split attempts ({}). Manifest still too large: {:.1}KB with {} files",
+
MAX_SPLIT_ATTEMPTS,
+
manifest_size as f64 / 1024.0,
+
current_file_count
+
));
+
}
+
+
println!("✅ Split complete: {} subfs records, {} files in main manifest, {:.1}KB",
+
new_subfs_uris.len(), current_file_count, manifest_size as f64 / 1024.0);
+
} else {
+
println!("Manifest created ({} files, {:.1}KB) - no splitting needed",
+
total_files, manifest_size as f64 / 1024.0);
+
}
+
+
// Create the final Fs record
let fs_record = Fs::new()
.site(CowStr::from(site_name.clone()))
-
.root(root_dir)
-
.file_count(total_files as i64)
+
.root(working_directory)
+
.file_count(current_file_count as i64)
.created_at(Datetime::now())
.build();
···
println!("\n✓ Deployed site '{}': {}", site_name, output.uri);
println!(" Total files: {} ({} reused, {} uploaded)", total_files, reused_count, uploaded_count);
println!(" Available at: https://sites.wisp.place/{}/{}", did, site_name);
+
+
// Clean up old subfs records
+
if !old_subfs_uris.is_empty() {
+
println!("\nCleaning up {} old subfs records...", old_subfs_uris.len());
+
+
let mut deleted_count = 0;
+
let mut failed_count = 0;
+
+
for (uri, _path) in old_subfs_uris {
+
match subfs_utils::delete_subfs_record(agent, &uri).await {
+
Ok(_) => {
+
deleted_count += 1;
+
println!(" 🗑️ Deleted old subfs: {}", uri);
+
}
+
Err(e) => {
+
failed_count += 1;
+
eprintln!(" ⚠️ Failed to delete {}: {}", uri, e);
+
}
+
}
+
}
+
+
if failed_count > 0 {
+
eprintln!("⚠️ Cleanup completed with {} deleted, {} failed", deleted_count, failed_count);
+
} else {
+
println!("✅ Cleanup complete: {} old subfs records deleted", deleted_count);
+
}
+
}
Ok(())
}
···
))
}
+
/// Convert fs::Directory to subfs::Directory
+
/// They have the same structure, but different types
+
fn convert_fs_dir_to_subfs_dir(fs_dir: place_wisp::fs::Directory<'static>) -> place_wisp::subfs::Directory<'static> {
+
use place_wisp::subfs::{Directory as SubfsDirectory, Entry as SubfsEntry, EntryNode as SubfsEntryNode, File as SubfsFile};
+
+
let subfs_entries: Vec<SubfsEntry> = fs_dir.entries.into_iter().map(|entry| {
+
let node = match entry.node {
+
place_wisp::fs::EntryNode::File(file) => {
+
SubfsEntryNode::File(Box::new(SubfsFile::new()
+
.r#type(file.r#type)
+
.blob(file.blob)
+
.encoding(file.encoding)
+
.mime_type(file.mime_type)
+
.base64(file.base64)
+
.build()))
+
}
+
place_wisp::fs::EntryNode::Directory(dir) => {
+
SubfsEntryNode::Directory(Box::new(convert_fs_dir_to_subfs_dir(*dir)))
+
}
+
place_wisp::fs::EntryNode::Subfs(subfs) => {
+
// Nested subfs in the directory we're converting
+
// Note: subfs::Subfs doesn't have the 'flat' field - that's only in fs::Subfs
+
SubfsEntryNode::Subfs(Box::new(place_wisp::subfs::Subfs::new()
+
.r#type(subfs.r#type)
+
.subject(subfs.subject)
+
.build()))
+
}
+
place_wisp::fs::EntryNode::Unknown(unknown) => {
+
SubfsEntryNode::Unknown(unknown)
+
}
+
};
+
+
SubfsEntry::new()
+
.name(entry.name)
+
.node(node)
+
.build()
+
}).collect();
+
+
SubfsDirectory::new()
+
.r#type(fs_dir.r#type)
+
.entries(subfs_entries)
+
.build()
+
}
+
+2 -1
cli/src/place_wisp.rs
···
// This file was automatically generated from Lexicon schemas.
// Any manual changes will be overwritten on the next regeneration.
-
pub mod fs;
+
pub mod fs;
+
pub mod subfs;
+261 -1
cli/src/place_wisp/fs.rs
···
description: None,
refs: vec![
::jacquard_common::CowStr::new_static("#file"),
-
::jacquard_common::CowStr::new_static("#directory")
+
::jacquard_common::CowStr::new_static("#directory"),
+
::jacquard_common::CowStr::new_static("#subfs")
],
closed: None,
}),
···
}),
}),
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("subfs"),
+
::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
+
description: None,
+
required: Some(
+
vec![
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
+
::jacquard_common::smol_str::SmolStr::new_static("subject")
+
],
+
),
+
nullable: None,
+
properties: {
+
#[allow(unused_mut)]
+
let mut map = ::std::collections::BTreeMap::new();
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("flat"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::Boolean(::jacquard_lexicon::lexicon::LexBoolean {
+
description: None,
+
default: None,
+
r#const: None,
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("subject"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
+
description: Some(
+
::jacquard_common::CowStr::new_static(
+
"AT-URI pointing to a place.wisp.subfs record containing this subtree.",
+
),
+
),
+
format: Some(
+
::jacquard_lexicon::lexicon::LexStringFormat::AtUri,
+
),
+
default: None,
+
min_length: None,
+
max_length: None,
+
min_graphemes: None,
+
max_graphemes: None,
+
r#enum: None,
+
r#const: None,
+
known_values: None,
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
+
description: None,
+
format: None,
+
default: None,
+
min_length: None,
+
max_length: None,
+
min_graphemes: None,
+
max_graphemes: None,
+
r#enum: None,
+
r#const: None,
+
known_values: None,
+
}),
+
);
+
map
+
},
+
}),
+
);
map
},
}
···
File(Box<crate::place_wisp::fs::File<'a>>),
#[serde(rename = "place.wisp.fs#directory")]
Directory(Box<crate::place_wisp::fs::Directory<'a>>),
+
#[serde(rename = "place.wisp.fs#subfs")]
+
Subfs(Box<crate::place_wisp::fs::Subfs<'a>>),
}
impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Entry<'a> {
···
});
+
Ok(())
+
}
+
}
+
+
#[jacquard_derive::lexicon]
+
#[derive(
+
serde::Serialize,
+
serde::Deserialize,
+
Debug,
+
Clone,
+
PartialEq,
+
Eq,
+
jacquard_derive::IntoStatic
+
)]
+
#[serde(rename_all = "camelCase")]
+
pub struct Subfs<'a> {
+
/// If true, the subfs record's root entries are merged (flattened) into the parent directory, replacing the subfs entry. If false (default), the subfs entries are placed in a subdirectory with the subfs entry's name. Flat merging is useful for splitting large directories across multiple records while maintaining a flat structure.
+
#[serde(skip_serializing_if = "std::option::Option::is_none")]
+
pub flat: Option<bool>,
+
/// AT-URI pointing to a place.wisp.subfs record containing this subtree.
+
#[serde(borrow)]
+
pub subject: jacquard_common::types::string::AtUri<'a>,
+
#[serde(borrow)]
+
pub r#type: jacquard_common::CowStr<'a>,
+
}
+
+
pub mod subfs_state {
+
+
pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
+
#[allow(unused)]
+
use ::core::marker::PhantomData;
+
mod sealed {
+
pub trait Sealed {}
+
}
+
/// State trait tracking which required fields have been set
+
pub trait State: sealed::Sealed {
+
type Type;
+
type Subject;
+
}
+
/// Empty state - all required fields are unset
+
pub struct Empty(());
+
impl sealed::Sealed for Empty {}
+
impl State for Empty {
+
type Type = Unset;
+
type Subject = Unset;
+
}
+
///State transition - sets the `type` field to Set
+
pub struct SetType<S: State = Empty>(PhantomData<fn() -> S>);
+
impl<S: State> sealed::Sealed for SetType<S> {}
+
impl<S: State> State for SetType<S> {
+
type Type = Set<members::r#type>;
+
type Subject = S::Subject;
+
}
+
///State transition - sets the `subject` field to Set
+
pub struct SetSubject<S: State = Empty>(PhantomData<fn() -> S>);
+
impl<S: State> sealed::Sealed for SetSubject<S> {}
+
impl<S: State> State for SetSubject<S> {
+
type Type = S::Type;
+
type Subject = Set<members::subject>;
+
}
+
/// Marker types for field names
+
#[allow(non_camel_case_types)]
+
pub mod members {
+
///Marker type for the `type` field
+
pub struct r#type(());
+
///Marker type for the `subject` field
+
pub struct subject(());
+
}
+
}
+
+
/// Builder for constructing an instance of this type
+
pub struct SubfsBuilder<'a, S: subfs_state::State> {
+
_phantom_state: ::core::marker::PhantomData<fn() -> S>,
+
__unsafe_private_named: (
+
::core::option::Option<bool>,
+
::core::option::Option<jacquard_common::types::string::AtUri<'a>>,
+
::core::option::Option<jacquard_common::CowStr<'a>>,
+
),
+
_phantom: ::core::marker::PhantomData<&'a ()>,
+
}
+
+
impl<'a> Subfs<'a> {
+
/// Create a new builder for this type
+
pub fn new() -> SubfsBuilder<'a, subfs_state::Empty> {
+
SubfsBuilder::new()
+
}
+
}
+
+
impl<'a> SubfsBuilder<'a, subfs_state::Empty> {
+
/// Create a new builder with all fields unset
+
pub fn new() -> Self {
+
SubfsBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: (None, None, None),
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S: subfs_state::State> SubfsBuilder<'a, S> {
+
/// Set the `flat` field (optional)
+
pub fn flat(mut self, value: impl Into<Option<bool>>) -> Self {
+
self.__unsafe_private_named.0 = value.into();
+
self
+
}
+
/// Set the `flat` field to an Option value (optional)
+
pub fn maybe_flat(mut self, value: Option<bool>) -> Self {
+
self.__unsafe_private_named.0 = value;
+
self
+
}
+
}
+
+
impl<'a, S> SubfsBuilder<'a, S>
+
where
+
S: subfs_state::State,
+
S::Subject: subfs_state::IsUnset,
+
{
+
/// Set the `subject` field (required)
+
pub fn subject(
+
mut self,
+
value: impl Into<jacquard_common::types::string::AtUri<'a>>,
+
) -> SubfsBuilder<'a, subfs_state::SetSubject<S>> {
+
self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
+
SubfsBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: self.__unsafe_private_named,
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S> SubfsBuilder<'a, S>
+
where
+
S: subfs_state::State,
+
S::Type: subfs_state::IsUnset,
+
{
+
/// Set the `type` field (required)
+
pub fn r#type(
+
mut self,
+
value: impl Into<jacquard_common::CowStr<'a>>,
+
) -> SubfsBuilder<'a, subfs_state::SetType<S>> {
+
self.__unsafe_private_named.2 = ::core::option::Option::Some(value.into());
+
SubfsBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: self.__unsafe_private_named,
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S> SubfsBuilder<'a, S>
+
where
+
S: subfs_state::State,
+
S::Type: subfs_state::IsSet,
+
S::Subject: subfs_state::IsSet,
+
{
+
/// Build the final struct
+
pub fn build(self) -> Subfs<'a> {
+
Subfs {
+
flat: self.__unsafe_private_named.0,
+
subject: self.__unsafe_private_named.1.unwrap(),
+
r#type: self.__unsafe_private_named.2.unwrap(),
+
extra_data: Default::default(),
+
}
+
}
+
/// Build the final struct with custom extra_data
+
pub fn build_with_data(
+
self,
+
extra_data: std::collections::BTreeMap<
+
jacquard_common::smol_str::SmolStr,
+
jacquard_common::types::value::Data<'a>,
+
>,
+
) -> Subfs<'a> {
+
Subfs {
+
flat: self.__unsafe_private_named.0,
+
subject: self.__unsafe_private_named.1.unwrap(),
+
r#type: self.__unsafe_private_named.2.unwrap(),
+
extra_data: Some(extra_data),
+
}
+
}
+
}
+
+
impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Subfs<'a> {
+
fn nsid() -> &'static str {
+
"place.wisp.fs"
+
}
+
fn def_name() -> &'static str {
+
"subfs"
+
}
+
fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
+
lexicon_doc_place_wisp_fs()
+
}
+
fn validate(
+
&self,
+
) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
Ok(())
+1408
cli/src/place_wisp/subfs.rs
···
+
// @generated by jacquard-lexicon. DO NOT EDIT.
+
//
+
// Lexicon: place.wisp.subfs
+
//
+
// This file was automatically generated from Lexicon schemas.
+
// Any manual changes will be overwritten on the next regeneration.
+
+
#[jacquard_derive::lexicon]
+
#[derive(
+
serde::Serialize,
+
serde::Deserialize,
+
Debug,
+
Clone,
+
PartialEq,
+
Eq,
+
jacquard_derive::IntoStatic
+
)]
+
#[serde(rename_all = "camelCase")]
+
pub struct Directory<'a> {
+
#[serde(borrow)]
+
pub entries: Vec<crate::place_wisp::subfs::Entry<'a>>,
+
#[serde(borrow)]
+
pub r#type: jacquard_common::CowStr<'a>,
+
}
+
+
pub mod directory_state {
+
+
pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
+
#[allow(unused)]
+
use ::core::marker::PhantomData;
+
mod sealed {
+
pub trait Sealed {}
+
}
+
/// State trait tracking which required fields have been set
+
pub trait State: sealed::Sealed {
+
type Type;
+
type Entries;
+
}
+
/// Empty state - all required fields are unset
+
pub struct Empty(());
+
impl sealed::Sealed for Empty {}
+
impl State for Empty {
+
type Type = Unset;
+
type Entries = Unset;
+
}
+
///State transition - sets the `type` field to Set
+
pub struct SetType<S: State = Empty>(PhantomData<fn() -> S>);
+
impl<S: State> sealed::Sealed for SetType<S> {}
+
impl<S: State> State for SetType<S> {
+
type Type = Set<members::r#type>;
+
type Entries = S::Entries;
+
}
+
///State transition - sets the `entries` field to Set
+
pub struct SetEntries<S: State = Empty>(PhantomData<fn() -> S>);
+
impl<S: State> sealed::Sealed for SetEntries<S> {}
+
impl<S: State> State for SetEntries<S> {
+
type Type = S::Type;
+
type Entries = Set<members::entries>;
+
}
+
/// Marker types for field names
+
#[allow(non_camel_case_types)]
+
pub mod members {
+
///Marker type for the `type` field
+
pub struct r#type(());
+
///Marker type for the `entries` field
+
pub struct entries(());
+
}
+
}
+
+
/// Builder for constructing an instance of this type
+
pub struct DirectoryBuilder<'a, S: directory_state::State> {
+
_phantom_state: ::core::marker::PhantomData<fn() -> S>,
+
__unsafe_private_named: (
+
::core::option::Option<Vec<crate::place_wisp::subfs::Entry<'a>>>,
+
::core::option::Option<jacquard_common::CowStr<'a>>,
+
),
+
_phantom: ::core::marker::PhantomData<&'a ()>,
+
}
+
+
impl<'a> Directory<'a> {
+
/// Create a new builder for this type
+
pub fn new() -> DirectoryBuilder<'a, directory_state::Empty> {
+
DirectoryBuilder::new()
+
}
+
}
+
+
impl<'a> DirectoryBuilder<'a, directory_state::Empty> {
+
/// Create a new builder with all fields unset
+
pub fn new() -> Self {
+
DirectoryBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: (None, None),
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S> DirectoryBuilder<'a, S>
+
where
+
S: directory_state::State,
+
S::Entries: directory_state::IsUnset,
+
{
+
/// Set the `entries` field (required)
+
pub fn entries(
+
mut self,
+
value: impl Into<Vec<crate::place_wisp::subfs::Entry<'a>>>,
+
) -> DirectoryBuilder<'a, directory_state::SetEntries<S>> {
+
self.__unsafe_private_named.0 = ::core::option::Option::Some(value.into());
+
DirectoryBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: self.__unsafe_private_named,
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S> DirectoryBuilder<'a, S>
+
where
+
S: directory_state::State,
+
S::Type: directory_state::IsUnset,
+
{
+
/// Set the `type` field (required)
+
pub fn r#type(
+
mut self,
+
value: impl Into<jacquard_common::CowStr<'a>>,
+
) -> DirectoryBuilder<'a, directory_state::SetType<S>> {
+
self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
+
DirectoryBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: self.__unsafe_private_named,
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S> DirectoryBuilder<'a, S>
+
where
+
S: directory_state::State,
+
S::Type: directory_state::IsSet,
+
S::Entries: directory_state::IsSet,
+
{
+
/// Build the final struct
+
pub fn build(self) -> Directory<'a> {
+
Directory {
+
entries: self.__unsafe_private_named.0.unwrap(),
+
r#type: self.__unsafe_private_named.1.unwrap(),
+
extra_data: Default::default(),
+
}
+
}
+
/// Build the final struct with custom extra_data
+
pub fn build_with_data(
+
self,
+
extra_data: std::collections::BTreeMap<
+
jacquard_common::smol_str::SmolStr,
+
jacquard_common::types::value::Data<'a>,
+
>,
+
) -> Directory<'a> {
+
Directory {
+
entries: self.__unsafe_private_named.0.unwrap(),
+
r#type: self.__unsafe_private_named.1.unwrap(),
+
extra_data: Some(extra_data),
+
}
+
}
+
}
+
+
fn lexicon_doc_place_wisp_subfs() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
+
::jacquard_lexicon::lexicon::LexiconDoc {
+
lexicon: ::jacquard_lexicon::lexicon::Lexicon::Lexicon1,
+
id: ::jacquard_common::CowStr::new_static("place.wisp.subfs"),
+
revision: None,
+
description: None,
+
defs: {
+
let mut map = ::std::collections::BTreeMap::new();
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("directory"),
+
::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
+
description: None,
+
required: Some(
+
vec![
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
+
::jacquard_common::smol_str::SmolStr::new_static("entries")
+
],
+
),
+
nullable: None,
+
properties: {
+
#[allow(unused_mut)]
+
let mut map = ::std::collections::BTreeMap::new();
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("entries"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::Array(::jacquard_lexicon::lexicon::LexArray {
+
description: None,
+
items: ::jacquard_lexicon::lexicon::LexArrayItem::Ref(::jacquard_lexicon::lexicon::LexRef {
+
description: None,
+
r#ref: ::jacquard_common::CowStr::new_static("#entry"),
+
}),
+
min_length: None,
+
max_length: Some(500usize),
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
+
description: None,
+
format: None,
+
default: None,
+
min_length: None,
+
max_length: None,
+
min_graphemes: None,
+
max_graphemes: None,
+
r#enum: None,
+
r#const: None,
+
known_values: None,
+
}),
+
);
+
map
+
},
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("entry"),
+
::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
+
description: None,
+
required: Some(
+
vec![
+
::jacquard_common::smol_str::SmolStr::new_static("name"),
+
::jacquard_common::smol_str::SmolStr::new_static("node")
+
],
+
),
+
nullable: None,
+
properties: {
+
#[allow(unused_mut)]
+
let mut map = ::std::collections::BTreeMap::new();
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("name"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
+
description: None,
+
format: None,
+
default: None,
+
min_length: None,
+
max_length: Some(255usize),
+
min_graphemes: None,
+
max_graphemes: None,
+
r#enum: None,
+
r#const: None,
+
known_values: None,
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("node"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::Union(::jacquard_lexicon::lexicon::LexRefUnion {
+
description: None,
+
refs: vec![
+
::jacquard_common::CowStr::new_static("#file"),
+
::jacquard_common::CowStr::new_static("#directory"),
+
::jacquard_common::CowStr::new_static("#subfs")
+
],
+
closed: None,
+
}),
+
);
+
map
+
},
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("file"),
+
::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
+
description: None,
+
required: Some(
+
vec![
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
+
::jacquard_common::smol_str::SmolStr::new_static("blob")
+
],
+
),
+
nullable: None,
+
properties: {
+
#[allow(unused_mut)]
+
let mut map = ::std::collections::BTreeMap::new();
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("base64"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::Boolean(::jacquard_lexicon::lexicon::LexBoolean {
+
description: None,
+
default: None,
+
r#const: None,
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("blob"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::Blob(::jacquard_lexicon::lexicon::LexBlob {
+
description: None,
+
accept: None,
+
max_size: None,
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("encoding"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
+
description: Some(
+
::jacquard_common::CowStr::new_static(
+
"Content encoding (e.g., gzip for compressed files)",
+
),
+
),
+
format: None,
+
default: None,
+
min_length: None,
+
max_length: None,
+
min_graphemes: None,
+
max_graphemes: None,
+
r#enum: None,
+
r#const: None,
+
known_values: None,
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("mimeType"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
+
description: Some(
+
::jacquard_common::CowStr::new_static(
+
"Original MIME type before compression",
+
),
+
),
+
format: None,
+
default: None,
+
min_length: None,
+
max_length: None,
+
min_graphemes: None,
+
max_graphemes: None,
+
r#enum: None,
+
r#const: None,
+
known_values: None,
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
+
description: None,
+
format: None,
+
default: None,
+
min_length: None,
+
max_length: None,
+
min_graphemes: None,
+
max_graphemes: None,
+
r#enum: None,
+
r#const: None,
+
known_values: None,
+
}),
+
);
+
map
+
},
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("main"),
+
::jacquard_lexicon::lexicon::LexUserType::Record(::jacquard_lexicon::lexicon::LexRecord {
+
description: Some(
+
::jacquard_common::CowStr::new_static(
+
"Virtual filesystem subtree referenced by place.wisp.fs records. When a subfs entry is expanded, its root entries are merged (flattened) into the parent directory, allowing large directories to be split across multiple records while maintaining a flat structure.",
+
),
+
),
+
key: None,
+
record: ::jacquard_lexicon::lexicon::LexRecordRecord::Object(::jacquard_lexicon::lexicon::LexObject {
+
description: None,
+
required: Some(
+
vec![
+
::jacquard_common::smol_str::SmolStr::new_static("root"),
+
::jacquard_common::smol_str::SmolStr::new_static("createdAt")
+
],
+
),
+
nullable: None,
+
properties: {
+
#[allow(unused_mut)]
+
let mut map = ::std::collections::BTreeMap::new();
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static(
+
"createdAt",
+
),
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
+
description: None,
+
format: Some(
+
::jacquard_lexicon::lexicon::LexStringFormat::Datetime,
+
),
+
default: None,
+
min_length: None,
+
max_length: None,
+
min_graphemes: None,
+
max_graphemes: None,
+
r#enum: None,
+
r#const: None,
+
known_values: None,
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static(
+
"fileCount",
+
),
+
::jacquard_lexicon::lexicon::LexObjectProperty::Integer(::jacquard_lexicon::lexicon::LexInteger {
+
description: None,
+
default: None,
+
minimum: Some(0i64),
+
maximum: Some(1000i64),
+
r#enum: None,
+
r#const: None,
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("root"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::Ref(::jacquard_lexicon::lexicon::LexRef {
+
description: None,
+
r#ref: ::jacquard_common::CowStr::new_static("#directory"),
+
}),
+
);
+
map
+
},
+
}),
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("subfs"),
+
::jacquard_lexicon::lexicon::LexUserType::Object(::jacquard_lexicon::lexicon::LexObject {
+
description: None,
+
required: Some(
+
vec![
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
+
::jacquard_common::smol_str::SmolStr::new_static("subject")
+
],
+
),
+
nullable: None,
+
properties: {
+
#[allow(unused_mut)]
+
let mut map = ::std::collections::BTreeMap::new();
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("subject"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
+
description: Some(
+
::jacquard_common::CowStr::new_static(
+
"AT-URI pointing to another place.wisp.subfs record for nested subtrees. When expanded, the referenced record's root entries are merged (flattened) into the parent directory, allowing recursive splitting of large directory structures.",
+
),
+
),
+
format: Some(
+
::jacquard_lexicon::lexicon::LexStringFormat::AtUri,
+
),
+
default: None,
+
min_length: None,
+
max_length: None,
+
min_graphemes: None,
+
max_graphemes: None,
+
r#enum: None,
+
r#const: None,
+
known_values: None,
+
}),
+
);
+
map.insert(
+
::jacquard_common::smol_str::SmolStr::new_static("type"),
+
::jacquard_lexicon::lexicon::LexObjectProperty::String(::jacquard_lexicon::lexicon::LexString {
+
description: None,
+
format: None,
+
default: None,
+
min_length: None,
+
max_length: None,
+
min_graphemes: None,
+
max_graphemes: None,
+
r#enum: None,
+
r#const: None,
+
known_values: None,
+
}),
+
);
+
map
+
},
+
}),
+
);
+
map
+
},
+
}
+
}
+
+
impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Directory<'a> {
+
fn nsid() -> &'static str {
+
"place.wisp.subfs"
+
}
+
fn def_name() -> &'static str {
+
"directory"
+
}
+
fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
+
lexicon_doc_place_wisp_subfs()
+
}
+
fn validate(
+
&self,
+
) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
+
{
+
let value = &self.entries;
+
#[allow(unused_comparisons)]
+
if value.len() > 500usize {
+
return Err(::jacquard_lexicon::validation::ConstraintError::MaxLength {
+
path: ::jacquard_lexicon::validation::ValidationPath::from_field(
+
"entries",
+
),
+
max: 500usize,
+
actual: value.len(),
+
});
+
}
+
}
+
Ok(())
+
}
+
}
+
+
#[jacquard_derive::lexicon]
+
#[derive(
+
serde::Serialize,
+
serde::Deserialize,
+
Debug,
+
Clone,
+
PartialEq,
+
Eq,
+
jacquard_derive::IntoStatic
+
)]
+
#[serde(rename_all = "camelCase")]
+
pub struct Entry<'a> {
+
#[serde(borrow)]
+
pub name: jacquard_common::CowStr<'a>,
+
#[serde(borrow)]
+
pub node: EntryNode<'a>,
+
}
+
+
pub mod entry_state {
+
+
pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
+
#[allow(unused)]
+
use ::core::marker::PhantomData;
+
mod sealed {
+
pub trait Sealed {}
+
}
+
/// State trait tracking which required fields have been set
+
pub trait State: sealed::Sealed {
+
type Name;
+
type Node;
+
}
+
/// Empty state - all required fields are unset
+
pub struct Empty(());
+
impl sealed::Sealed for Empty {}
+
impl State for Empty {
+
type Name = Unset;
+
type Node = Unset;
+
}
+
///State transition - sets the `name` field to Set
+
pub struct SetName<S: State = Empty>(PhantomData<fn() -> S>);
+
impl<S: State> sealed::Sealed for SetName<S> {}
+
impl<S: State> State for SetName<S> {
+
type Name = Set<members::name>;
+
type Node = S::Node;
+
}
+
///State transition - sets the `node` field to Set
+
pub struct SetNode<S: State = Empty>(PhantomData<fn() -> S>);
+
impl<S: State> sealed::Sealed for SetNode<S> {}
+
impl<S: State> State for SetNode<S> {
+
type Name = S::Name;
+
type Node = Set<members::node>;
+
}
+
/// Marker types for field names
+
#[allow(non_camel_case_types)]
+
pub mod members {
+
///Marker type for the `name` field
+
pub struct name(());
+
///Marker type for the `node` field
+
pub struct node(());
+
}
+
}
+
+
/// Builder for constructing an instance of this type
+
pub struct EntryBuilder<'a, S: entry_state::State> {
+
_phantom_state: ::core::marker::PhantomData<fn() -> S>,
+
__unsafe_private_named: (
+
::core::option::Option<jacquard_common::CowStr<'a>>,
+
::core::option::Option<EntryNode<'a>>,
+
),
+
_phantom: ::core::marker::PhantomData<&'a ()>,
+
}
+
+
impl<'a> Entry<'a> {
+
/// Create a new builder for this type
+
pub fn new() -> EntryBuilder<'a, entry_state::Empty> {
+
EntryBuilder::new()
+
}
+
}
+
+
impl<'a> EntryBuilder<'a, entry_state::Empty> {
+
/// Create a new builder with all fields unset
+
pub fn new() -> Self {
+
EntryBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: (None, None),
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S> EntryBuilder<'a, S>
+
where
+
S: entry_state::State,
+
S::Name: entry_state::IsUnset,
+
{
+
/// Set the `name` field (required)
+
pub fn name(
+
mut self,
+
value: impl Into<jacquard_common::CowStr<'a>>,
+
) -> EntryBuilder<'a, entry_state::SetName<S>> {
+
self.__unsafe_private_named.0 = ::core::option::Option::Some(value.into());
+
EntryBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: self.__unsafe_private_named,
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S> EntryBuilder<'a, S>
+
where
+
S: entry_state::State,
+
S::Node: entry_state::IsUnset,
+
{
+
/// Set the `node` field (required)
+
pub fn node(
+
mut self,
+
value: impl Into<EntryNode<'a>>,
+
) -> EntryBuilder<'a, entry_state::SetNode<S>> {
+
self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
+
EntryBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: self.__unsafe_private_named,
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S> EntryBuilder<'a, S>
+
where
+
S: entry_state::State,
+
S::Name: entry_state::IsSet,
+
S::Node: entry_state::IsSet,
+
{
+
/// Build the final struct
+
pub fn build(self) -> Entry<'a> {
+
Entry {
+
name: self.__unsafe_private_named.0.unwrap(),
+
node: self.__unsafe_private_named.1.unwrap(),
+
extra_data: Default::default(),
+
}
+
}
+
/// Build the final struct with custom extra_data
+
pub fn build_with_data(
+
self,
+
extra_data: std::collections::BTreeMap<
+
jacquard_common::smol_str::SmolStr,
+
jacquard_common::types::value::Data<'a>,
+
>,
+
) -> Entry<'a> {
+
Entry {
+
name: self.__unsafe_private_named.0.unwrap(),
+
node: self.__unsafe_private_named.1.unwrap(),
+
extra_data: Some(extra_data),
+
}
+
}
+
}
+
+
#[jacquard_derive::open_union]
+
#[derive(
+
serde::Serialize,
+
serde::Deserialize,
+
Debug,
+
Clone,
+
PartialEq,
+
Eq,
+
jacquard_derive::IntoStatic
+
)]
+
#[serde(tag = "$type")]
+
#[serde(bound(deserialize = "'de: 'a"))]
+
pub enum EntryNode<'a> {
+
#[serde(rename = "place.wisp.subfs#file")]
+
File(Box<crate::place_wisp::subfs::File<'a>>),
+
#[serde(rename = "place.wisp.subfs#directory")]
+
Directory(Box<crate::place_wisp::subfs::Directory<'a>>),
+
#[serde(rename = "place.wisp.subfs#subfs")]
+
Subfs(Box<crate::place_wisp::subfs::Subfs<'a>>),
+
}
+
+
impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Entry<'a> {
+
fn nsid() -> &'static str {
+
"place.wisp.subfs"
+
}
+
fn def_name() -> &'static str {
+
"entry"
+
}
+
fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
+
lexicon_doc_place_wisp_subfs()
+
}
+
fn validate(
+
&self,
+
) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
+
{
+
let value = &self.name;
+
#[allow(unused_comparisons)]
+
if <str>::len(value.as_ref()) > 255usize {
+
return Err(::jacquard_lexicon::validation::ConstraintError::MaxLength {
+
path: ::jacquard_lexicon::validation::ValidationPath::from_field(
+
"name",
+
),
+
max: 255usize,
+
actual: <str>::len(value.as_ref()),
+
});
+
}
+
}
+
Ok(())
+
}
+
}
+
+
#[jacquard_derive::lexicon]
+
#[derive(
+
serde::Serialize,
+
serde::Deserialize,
+
Debug,
+
Clone,
+
PartialEq,
+
Eq,
+
jacquard_derive::IntoStatic
+
)]
+
#[serde(rename_all = "camelCase")]
+
pub struct File<'a> {
+
/// True if blob content is base64-encoded (used to bypass PDS content sniffing)
+
#[serde(skip_serializing_if = "std::option::Option::is_none")]
+
pub base64: Option<bool>,
+
/// Content blob ref
+
#[serde(borrow)]
+
pub blob: jacquard_common::types::blob::BlobRef<'a>,
+
/// Content encoding (e.g., gzip for compressed files)
+
#[serde(skip_serializing_if = "std::option::Option::is_none")]
+
#[serde(borrow)]
+
pub encoding: Option<jacquard_common::CowStr<'a>>,
+
/// Original MIME type before compression
+
#[serde(skip_serializing_if = "std::option::Option::is_none")]
+
#[serde(borrow)]
+
pub mime_type: Option<jacquard_common::CowStr<'a>>,
+
#[serde(borrow)]
+
pub r#type: jacquard_common::CowStr<'a>,
+
}
+
+
pub mod file_state {
+
+
pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
+
#[allow(unused)]
+
use ::core::marker::PhantomData;
+
mod sealed {
+
pub trait Sealed {}
+
}
+
/// State trait tracking which required fields have been set
+
pub trait State: sealed::Sealed {
+
type Type;
+
type Blob;
+
}
+
/// Empty state - all required fields are unset
+
pub struct Empty(());
+
impl sealed::Sealed for Empty {}
+
impl State for Empty {
+
type Type = Unset;
+
type Blob = Unset;
+
}
+
///State transition - sets the `type` field to Set
+
pub struct SetType<S: State = Empty>(PhantomData<fn() -> S>);
+
impl<S: State> sealed::Sealed for SetType<S> {}
+
impl<S: State> State for SetType<S> {
+
type Type = Set<members::r#type>;
+
type Blob = S::Blob;
+
}
+
///State transition - sets the `blob` field to Set
+
pub struct SetBlob<S: State = Empty>(PhantomData<fn() -> S>);
+
impl<S: State> sealed::Sealed for SetBlob<S> {}
+
impl<S: State> State for SetBlob<S> {
+
type Type = S::Type;
+
type Blob = Set<members::blob>;
+
}
+
/// Marker types for field names
+
#[allow(non_camel_case_types)]
+
pub mod members {
+
///Marker type for the `type` field
+
pub struct r#type(());
+
///Marker type for the `blob` field
+
pub struct blob(());
+
}
+
}
+
+
/// Builder for constructing an instance of this type
+
pub struct FileBuilder<'a, S: file_state::State> {
+
_phantom_state: ::core::marker::PhantomData<fn() -> S>,
+
__unsafe_private_named: (
+
::core::option::Option<bool>,
+
::core::option::Option<jacquard_common::types::blob::BlobRef<'a>>,
+
::core::option::Option<jacquard_common::CowStr<'a>>,
+
::core::option::Option<jacquard_common::CowStr<'a>>,
+
::core::option::Option<jacquard_common::CowStr<'a>>,
+
),
+
_phantom: ::core::marker::PhantomData<&'a ()>,
+
}
+
+
impl<'a> File<'a> {
+
/// Create a new builder for this type
+
pub fn new() -> FileBuilder<'a, file_state::Empty> {
+
FileBuilder::new()
+
}
+
}
+
+
impl<'a> FileBuilder<'a, file_state::Empty> {
+
/// Create a new builder with all fields unset
+
pub fn new() -> Self {
+
FileBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: (None, None, None, None, None),
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S: file_state::State> FileBuilder<'a, S> {
+
/// Set the `base64` field (optional)
+
pub fn base64(mut self, value: impl Into<Option<bool>>) -> Self {
+
self.__unsafe_private_named.0 = value.into();
+
self
+
}
+
/// Set the `base64` field to an Option value (optional)
+
pub fn maybe_base64(mut self, value: Option<bool>) -> Self {
+
self.__unsafe_private_named.0 = value;
+
self
+
}
+
}
+
+
impl<'a, S> FileBuilder<'a, S>
+
where
+
S: file_state::State,
+
S::Blob: file_state::IsUnset,
+
{
+
/// Set the `blob` field (required)
+
pub fn blob(
+
mut self,
+
value: impl Into<jacquard_common::types::blob::BlobRef<'a>>,
+
) -> FileBuilder<'a, file_state::SetBlob<S>> {
+
self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
+
FileBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: self.__unsafe_private_named,
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S: file_state::State> FileBuilder<'a, S> {
+
/// Set the `encoding` field (optional)
+
pub fn encoding(
+
mut self,
+
value: impl Into<Option<jacquard_common::CowStr<'a>>>,
+
) -> Self {
+
self.__unsafe_private_named.2 = value.into();
+
self
+
}
+
/// Set the `encoding` field to an Option value (optional)
+
pub fn maybe_encoding(mut self, value: Option<jacquard_common::CowStr<'a>>) -> Self {
+
self.__unsafe_private_named.2 = value;
+
self
+
}
+
}
+
+
impl<'a, S: file_state::State> FileBuilder<'a, S> {
+
/// Set the `mimeType` field (optional)
+
pub fn mime_type(
+
mut self,
+
value: impl Into<Option<jacquard_common::CowStr<'a>>>,
+
) -> Self {
+
self.__unsafe_private_named.3 = value.into();
+
self
+
}
+
/// Set the `mimeType` field to an Option value (optional)
+
pub fn maybe_mime_type(
+
mut self,
+
value: Option<jacquard_common::CowStr<'a>>,
+
) -> Self {
+
self.__unsafe_private_named.3 = value;
+
self
+
}
+
}
+
+
impl<'a, S> FileBuilder<'a, S>
+
where
+
S: file_state::State,
+
S::Type: file_state::IsUnset,
+
{
+
/// Set the `type` field (required)
+
pub fn r#type(
+
mut self,
+
value: impl Into<jacquard_common::CowStr<'a>>,
+
) -> FileBuilder<'a, file_state::SetType<S>> {
+
self.__unsafe_private_named.4 = ::core::option::Option::Some(value.into());
+
FileBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: self.__unsafe_private_named,
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S> FileBuilder<'a, S>
+
where
+
S: file_state::State,
+
S::Type: file_state::IsSet,
+
S::Blob: file_state::IsSet,
+
{
+
/// Build the final struct
+
pub fn build(self) -> File<'a> {
+
File {
+
base64: self.__unsafe_private_named.0,
+
blob: self.__unsafe_private_named.1.unwrap(),
+
encoding: self.__unsafe_private_named.2,
+
mime_type: self.__unsafe_private_named.3,
+
r#type: self.__unsafe_private_named.4.unwrap(),
+
extra_data: Default::default(),
+
}
+
}
+
/// Build the final struct with custom extra_data
+
pub fn build_with_data(
+
self,
+
extra_data: std::collections::BTreeMap<
+
jacquard_common::smol_str::SmolStr,
+
jacquard_common::types::value::Data<'a>,
+
>,
+
) -> File<'a> {
+
File {
+
base64: self.__unsafe_private_named.0,
+
blob: self.__unsafe_private_named.1.unwrap(),
+
encoding: self.__unsafe_private_named.2,
+
mime_type: self.__unsafe_private_named.3,
+
r#type: self.__unsafe_private_named.4.unwrap(),
+
extra_data: Some(extra_data),
+
}
+
}
+
}
+
+
impl<'a> ::jacquard_lexicon::schema::LexiconSchema for File<'a> {
+
fn nsid() -> &'static str {
+
"place.wisp.subfs"
+
}
+
fn def_name() -> &'static str {
+
"file"
+
}
+
fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
+
lexicon_doc_place_wisp_subfs()
+
}
+
fn validate(
+
&self,
+
) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
+
Ok(())
+
}
+
}
+
+
/// Virtual filesystem subtree referenced by place.wisp.fs records. When a subfs entry is expanded, its root entries are merged (flattened) into the parent directory, allowing large directories to be split across multiple records while maintaining a flat structure.
+
#[jacquard_derive::lexicon]
+
#[derive(
+
serde::Serialize,
+
serde::Deserialize,
+
Debug,
+
Clone,
+
PartialEq,
+
Eq,
+
jacquard_derive::IntoStatic
+
)]
+
#[serde(rename_all = "camelCase")]
+
pub struct SubfsRecord<'a> {
+
pub created_at: jacquard_common::types::string::Datetime,
+
#[serde(skip_serializing_if = "std::option::Option::is_none")]
+
pub file_count: Option<i64>,
+
#[serde(borrow)]
+
pub root: crate::place_wisp::subfs::Directory<'a>,
+
}
+
+
pub mod subfs_record_state {
+
+
pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
+
#[allow(unused)]
+
use ::core::marker::PhantomData;
+
mod sealed {
+
pub trait Sealed {}
+
}
+
/// State trait tracking which required fields have been set
+
pub trait State: sealed::Sealed {
+
type Root;
+
type CreatedAt;
+
}
+
/// Empty state - all required fields are unset
+
pub struct Empty(());
+
impl sealed::Sealed for Empty {}
+
impl State for Empty {
+
type Root = Unset;
+
type CreatedAt = Unset;
+
}
+
///State transition - sets the `root` field to Set
+
pub struct SetRoot<S: State = Empty>(PhantomData<fn() -> S>);
+
impl<S: State> sealed::Sealed for SetRoot<S> {}
+
impl<S: State> State for SetRoot<S> {
+
type Root = Set<members::root>;
+
type CreatedAt = S::CreatedAt;
+
}
+
///State transition - sets the `created_at` field to Set
+
pub struct SetCreatedAt<S: State = Empty>(PhantomData<fn() -> S>);
+
impl<S: State> sealed::Sealed for SetCreatedAt<S> {}
+
impl<S: State> State for SetCreatedAt<S> {
+
type Root = S::Root;
+
type CreatedAt = Set<members::created_at>;
+
}
+
/// Marker types for field names
+
#[allow(non_camel_case_types)]
+
pub mod members {
+
///Marker type for the `root` field
+
pub struct root(());
+
///Marker type for the `created_at` field
+
pub struct created_at(());
+
}
+
}
+
+
/// Builder for constructing an instance of this type
+
pub struct SubfsRecordBuilder<'a, S: subfs_record_state::State> {
+
_phantom_state: ::core::marker::PhantomData<fn() -> S>,
+
__unsafe_private_named: (
+
::core::option::Option<jacquard_common::types::string::Datetime>,
+
::core::option::Option<i64>,
+
::core::option::Option<crate::place_wisp::subfs::Directory<'a>>,
+
),
+
_phantom: ::core::marker::PhantomData<&'a ()>,
+
}
+
+
impl<'a> SubfsRecord<'a> {
+
/// Create a new builder for this type
+
pub fn new() -> SubfsRecordBuilder<'a, subfs_record_state::Empty> {
+
SubfsRecordBuilder::new()
+
}
+
}
+
+
impl<'a> SubfsRecordBuilder<'a, subfs_record_state::Empty> {
+
/// Create a new builder with all fields unset
+
pub fn new() -> Self {
+
SubfsRecordBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: (None, None, None),
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S> SubfsRecordBuilder<'a, S>
+
where
+
S: subfs_record_state::State,
+
S::CreatedAt: subfs_record_state::IsUnset,
+
{
+
/// Set the `createdAt` field (required)
+
pub fn created_at(
+
mut self,
+
value: impl Into<jacquard_common::types::string::Datetime>,
+
) -> SubfsRecordBuilder<'a, subfs_record_state::SetCreatedAt<S>> {
+
self.__unsafe_private_named.0 = ::core::option::Option::Some(value.into());
+
SubfsRecordBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: self.__unsafe_private_named,
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S: subfs_record_state::State> SubfsRecordBuilder<'a, S> {
+
/// Set the `fileCount` field (optional)
+
pub fn file_count(mut self, value: impl Into<Option<i64>>) -> Self {
+
self.__unsafe_private_named.1 = value.into();
+
self
+
}
+
/// Set the `fileCount` field to an Option value (optional)
+
pub fn maybe_file_count(mut self, value: Option<i64>) -> Self {
+
self.__unsafe_private_named.1 = value;
+
self
+
}
+
}
+
+
impl<'a, S> SubfsRecordBuilder<'a, S>
+
where
+
S: subfs_record_state::State,
+
S::Root: subfs_record_state::IsUnset,
+
{
+
/// Set the `root` field (required)
+
pub fn root(
+
mut self,
+
value: impl Into<crate::place_wisp::subfs::Directory<'a>>,
+
) -> SubfsRecordBuilder<'a, subfs_record_state::SetRoot<S>> {
+
self.__unsafe_private_named.2 = ::core::option::Option::Some(value.into());
+
SubfsRecordBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: self.__unsafe_private_named,
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S> SubfsRecordBuilder<'a, S>
+
where
+
S: subfs_record_state::State,
+
S::Root: subfs_record_state::IsSet,
+
S::CreatedAt: subfs_record_state::IsSet,
+
{
+
/// Build the final struct
+
pub fn build(self) -> SubfsRecord<'a> {
+
SubfsRecord {
+
created_at: self.__unsafe_private_named.0.unwrap(),
+
file_count: self.__unsafe_private_named.1,
+
root: self.__unsafe_private_named.2.unwrap(),
+
extra_data: Default::default(),
+
}
+
}
+
/// Build the final struct with custom extra_data
+
pub fn build_with_data(
+
self,
+
extra_data: std::collections::BTreeMap<
+
jacquard_common::smol_str::SmolStr,
+
jacquard_common::types::value::Data<'a>,
+
>,
+
) -> SubfsRecord<'a> {
+
SubfsRecord {
+
created_at: self.__unsafe_private_named.0.unwrap(),
+
file_count: self.__unsafe_private_named.1,
+
root: self.__unsafe_private_named.2.unwrap(),
+
extra_data: Some(extra_data),
+
}
+
}
+
}
+
+
impl<'a> SubfsRecord<'a> {
+
pub fn uri(
+
uri: impl Into<jacquard_common::CowStr<'a>>,
+
) -> Result<
+
jacquard_common::types::uri::RecordUri<'a, SubfsRecordRecord>,
+
jacquard_common::types::uri::UriError,
+
> {
+
jacquard_common::types::uri::RecordUri::try_from_uri(
+
jacquard_common::types::string::AtUri::new_cow(uri.into())?,
+
)
+
}
+
}
+
+
/// Typed wrapper for GetRecord response with this collection's record type.
+
#[derive(
+
serde::Serialize,
+
serde::Deserialize,
+
Debug,
+
Clone,
+
PartialEq,
+
Eq,
+
jacquard_derive::IntoStatic
+
)]
+
#[serde(rename_all = "camelCase")]
+
pub struct SubfsRecordGetRecordOutput<'a> {
+
#[serde(skip_serializing_if = "std::option::Option::is_none")]
+
#[serde(borrow)]
+
pub cid: std::option::Option<jacquard_common::types::string::Cid<'a>>,
+
#[serde(borrow)]
+
pub uri: jacquard_common::types::string::AtUri<'a>,
+
#[serde(borrow)]
+
pub value: SubfsRecord<'a>,
+
}
+
+
impl From<SubfsRecordGetRecordOutput<'_>> for SubfsRecord<'_> {
+
fn from(output: SubfsRecordGetRecordOutput<'_>) -> Self {
+
use jacquard_common::IntoStatic;
+
output.value.into_static()
+
}
+
}
+
+
impl jacquard_common::types::collection::Collection for SubfsRecord<'_> {
+
const NSID: &'static str = "place.wisp.subfs";
+
type Record = SubfsRecordRecord;
+
}
+
+
/// Marker type for deserializing records from this collection.
+
#[derive(Debug, serde::Serialize, serde::Deserialize)]
+
pub struct SubfsRecordRecord;
+
impl jacquard_common::xrpc::XrpcResp for SubfsRecordRecord {
+
const NSID: &'static str = "place.wisp.subfs";
+
const ENCODING: &'static str = "application/json";
+
type Output<'de> = SubfsRecordGetRecordOutput<'de>;
+
type Err<'de> = jacquard_common::types::collection::RecordError<'de>;
+
}
+
+
impl jacquard_common::types::collection::Collection for SubfsRecordRecord {
+
const NSID: &'static str = "place.wisp.subfs";
+
type Record = SubfsRecordRecord;
+
}
+
+
impl<'a> ::jacquard_lexicon::schema::LexiconSchema for SubfsRecord<'a> {
+
fn nsid() -> &'static str {
+
"place.wisp.subfs"
+
}
+
fn def_name() -> &'static str {
+
"main"
+
}
+
fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
+
lexicon_doc_place_wisp_subfs()
+
}
+
fn validate(
+
&self,
+
) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
+
if let Some(ref value) = self.file_count {
+
if *value > 1000i64 {
+
return Err(::jacquard_lexicon::validation::ConstraintError::Maximum {
+
path: ::jacquard_lexicon::validation::ValidationPath::from_field(
+
"file_count",
+
),
+
max: 1000i64,
+
actual: *value,
+
});
+
}
+
}
+
if let Some(ref value) = self.file_count {
+
if *value < 0i64 {
+
return Err(::jacquard_lexicon::validation::ConstraintError::Minimum {
+
path: ::jacquard_lexicon::validation::ValidationPath::from_field(
+
"file_count",
+
),
+
min: 0i64,
+
actual: *value,
+
});
+
}
+
}
+
Ok(())
+
}
+
}
+
+
#[jacquard_derive::lexicon]
+
#[derive(
+
serde::Serialize,
+
serde::Deserialize,
+
Debug,
+
Clone,
+
PartialEq,
+
Eq,
+
jacquard_derive::IntoStatic
+
)]
+
#[serde(rename_all = "camelCase")]
+
pub struct Subfs<'a> {
+
/// AT-URI pointing to another place.wisp.subfs record for nested subtrees. When expanded, the referenced record's root entries are merged (flattened) into the parent directory, allowing recursive splitting of large directory structures.
+
#[serde(borrow)]
+
pub subject: jacquard_common::types::string::AtUri<'a>,
+
#[serde(borrow)]
+
pub r#type: jacquard_common::CowStr<'a>,
+
}
+
+
pub mod subfs_state {
+
+
pub use crate::builder_types::{Set, Unset, IsSet, IsUnset};
+
#[allow(unused)]
+
use ::core::marker::PhantomData;
+
mod sealed {
+
pub trait Sealed {}
+
}
+
/// State trait tracking which required fields have been set
+
pub trait State: sealed::Sealed {
+
type Type;
+
type Subject;
+
}
+
/// Empty state - all required fields are unset
+
pub struct Empty(());
+
impl sealed::Sealed for Empty {}
+
impl State for Empty {
+
type Type = Unset;
+
type Subject = Unset;
+
}
+
///State transition - sets the `type` field to Set
+
pub struct SetType<S: State = Empty>(PhantomData<fn() -> S>);
+
impl<S: State> sealed::Sealed for SetType<S> {}
+
impl<S: State> State for SetType<S> {
+
type Type = Set<members::r#type>;
+
type Subject = S::Subject;
+
}
+
///State transition - sets the `subject` field to Set
+
pub struct SetSubject<S: State = Empty>(PhantomData<fn() -> S>);
+
impl<S: State> sealed::Sealed for SetSubject<S> {}
+
impl<S: State> State for SetSubject<S> {
+
type Type = S::Type;
+
type Subject = Set<members::subject>;
+
}
+
/// Marker types for field names
+
#[allow(non_camel_case_types)]
+
pub mod members {
+
///Marker type for the `type` field
+
pub struct r#type(());
+
///Marker type for the `subject` field
+
pub struct subject(());
+
}
+
}
+
+
/// Builder for constructing an instance of this type
+
pub struct SubfsBuilder<'a, S: subfs_state::State> {
+
_phantom_state: ::core::marker::PhantomData<fn() -> S>,
+
__unsafe_private_named: (
+
::core::option::Option<jacquard_common::types::string::AtUri<'a>>,
+
::core::option::Option<jacquard_common::CowStr<'a>>,
+
),
+
_phantom: ::core::marker::PhantomData<&'a ()>,
+
}
+
+
impl<'a> Subfs<'a> {
+
/// Create a new builder for this type
+
pub fn new() -> SubfsBuilder<'a, subfs_state::Empty> {
+
SubfsBuilder::new()
+
}
+
}
+
+
impl<'a> SubfsBuilder<'a, subfs_state::Empty> {
+
/// Create a new builder with all fields unset
+
pub fn new() -> Self {
+
SubfsBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: (None, None),
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S> SubfsBuilder<'a, S>
+
where
+
S: subfs_state::State,
+
S::Subject: subfs_state::IsUnset,
+
{
+
/// Set the `subject` field (required)
+
pub fn subject(
+
mut self,
+
value: impl Into<jacquard_common::types::string::AtUri<'a>>,
+
) -> SubfsBuilder<'a, subfs_state::SetSubject<S>> {
+
self.__unsafe_private_named.0 = ::core::option::Option::Some(value.into());
+
SubfsBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: self.__unsafe_private_named,
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S> SubfsBuilder<'a, S>
+
where
+
S: subfs_state::State,
+
S::Type: subfs_state::IsUnset,
+
{
+
/// Set the `type` field (required)
+
pub fn r#type(
+
mut self,
+
value: impl Into<jacquard_common::CowStr<'a>>,
+
) -> SubfsBuilder<'a, subfs_state::SetType<S>> {
+
self.__unsafe_private_named.1 = ::core::option::Option::Some(value.into());
+
SubfsBuilder {
+
_phantom_state: ::core::marker::PhantomData,
+
__unsafe_private_named: self.__unsafe_private_named,
+
_phantom: ::core::marker::PhantomData,
+
}
+
}
+
}
+
+
impl<'a, S> SubfsBuilder<'a, S>
+
where
+
S: subfs_state::State,
+
S::Type: subfs_state::IsSet,
+
S::Subject: subfs_state::IsSet,
+
{
+
/// Build the final struct
+
pub fn build(self) -> Subfs<'a> {
+
Subfs {
+
subject: self.__unsafe_private_named.0.unwrap(),
+
r#type: self.__unsafe_private_named.1.unwrap(),
+
extra_data: Default::default(),
+
}
+
}
+
/// Build the final struct with custom extra_data
+
pub fn build_with_data(
+
self,
+
extra_data: std::collections::BTreeMap<
+
jacquard_common::smol_str::SmolStr,
+
jacquard_common::types::value::Data<'a>,
+
>,
+
) -> Subfs<'a> {
+
Subfs {
+
subject: self.__unsafe_private_named.0.unwrap(),
+
r#type: self.__unsafe_private_named.1.unwrap(),
+
extra_data: Some(extra_data),
+
}
+
}
+
}
+
+
impl<'a> ::jacquard_lexicon::schema::LexiconSchema for Subfs<'a> {
+
fn nsid() -> &'static str {
+
"place.wisp.subfs"
+
}
+
fn def_name() -> &'static str {
+
"subfs"
+
}
+
fn lexicon_doc() -> ::jacquard_lexicon::lexicon::LexiconDoc<'static> {
+
lexicon_doc_place_wisp_subfs()
+
}
+
fn validate(
+
&self,
+
) -> ::std::result::Result<(), ::jacquard_lexicon::validation::ConstraintError> {
+
Ok(())
+
}
+
}
+433 -55
cli/src/pull.rs
···
use crate::download;
use crate::metadata::SiteMetadata;
use crate::place_wisp::fs::*;
+
use crate::subfs_utils;
use jacquard::CowStr;
use jacquard::prelude::IdentityResolver;
use jacquard_common::types::string::Did;
···
let fs_record: Fs = from_data(&record_output.value).into_diagnostic()?;
let file_count = fs_record.file_count.map(|c| c.to_string()).unwrap_or_else(|| "?".to_string());
-
println!("Found site '{}' with {} files", fs_record.site, file_count);
+
println!("Found site '{}' with {} files (in main record)", fs_record.site, file_count);
+
+
// Check for and expand subfs nodes
+
let expanded_root = expand_subfs_in_pull(&fs_record.root, &pds_url, did.as_str()).await?;
+
let total_file_count = subfs_utils::count_files_in_directory(&expanded_root);
+
+
if total_file_count as i64 != fs_record.file_count.unwrap_or(0) {
+
println!("Total files after expanding subfs: {}", total_file_count);
+
}
// Load existing metadata for incremental updates
let existing_metadata = SiteMetadata::load(&output_dir)?;
···
.map(|m| m.file_cids.clone())
.unwrap_or_default();
-
// Extract blob map from the new manifest
-
let new_blob_map = blob_map::extract_blob_map(&fs_record.root);
+
// Extract blob map from the expanded manifest
+
let new_blob_map = blob_map::extract_blob_map(&expanded_root);
let new_file_cids: HashMap<String, String> = new_blob_map
.iter()
.map(|(path, (_blob_ref, cid))| (path.clone(), cid.clone()))
···
}
}
-
// Check if we need to update (but only if output directory actually exists with files)
+
// Check if we need to update (verify files actually exist, not just metadata)
if let Some(metadata) = &existing_metadata {
if metadata.record_cid == record_cid {
-
// Verify that the output directory actually exists and has content
-
let has_content = output_dir.exists() &&
-
output_dir.read_dir()
-
.map(|mut entries| entries.any(|e| {
-
if let Ok(entry) = e {
-
!entry.file_name().to_string_lossy().starts_with(".wisp-metadata")
-
} else {
-
false
+
// Verify that the output directory actually exists and has the expected files
+
let has_all_files = output_dir.exists() && {
+
// Count actual files on disk (excluding metadata)
+
let mut actual_file_count = 0;
+
if let Ok(entries) = std::fs::read_dir(&output_dir) {
+
for entry in entries.flatten() {
+
let name = entry.file_name();
+
if !name.to_string_lossy().starts_with(".wisp-metadata") {
+
if entry.path().is_file() {
+
actual_file_count += 1;
+
}
}
-
}))
-
.unwrap_or(false);
-
-
if has_content {
+
}
+
}
+
+
// Compare with expected file count from metadata
+
let expected_count = metadata.file_cids.len();
+
actual_file_count > 0 && actual_file_count >= expected_count
+
};
+
+
if has_all_files {
println!("Site is already up to date!");
return Ok(());
+
} else {
+
println!("Site metadata exists but files are missing, re-downloading...");
}
}
}
···
let mut downloaded = 0;
let mut reused = 0;
-
// Download files recursively
+
// Download files recursively (using expanded root)
let download_result = download_directory(
-
&fs_record.root,
+
&expanded_root,
&temp_dir,
&pds_url,
did.as_str(),
···
Ok(())
}
-
/// Recursively download a directory
+
/// Recursively download a directory with concurrent downloads
fn download_directory<'a>(
dir: &'a Directory<'_>,
output_dir: &'a Path,
···
reused: &'a mut usize,
) -> std::pin::Pin<Box<dyn std::future::Future<Output = miette::Result<()>> + Send + 'a>> {
Box::pin(async move {
+
use futures::stream::{self, StreamExt};
+
+
// Collect download tasks and directory tasks separately
+
struct DownloadTask {
+
path: String,
+
output_path: PathBuf,
+
blob: jacquard_common::types::blob::BlobRef<'static>,
+
base64: bool,
+
gzip: bool,
+
}
+
+
struct CopyTask {
+
path: String,
+
from: PathBuf,
+
to: PathBuf,
+
}
+
+
let mut download_tasks = Vec::new();
+
let mut copy_tasks = Vec::new();
+
let mut dir_tasks = Vec::new();
+
for entry in &dir.entries {
let entry_name = entry.name.as_str();
let current_path = if path_prefix.is_empty() {
···
let output_path = output_dir.join(entry_name);
// Check if file CID matches existing
-
if let Some((_blob_ref, new_cid)) = new_blob_map.get(&current_path) {
+
let should_copy = if let Some((_blob_ref, new_cid)) = new_blob_map.get(&current_path) {
if let Some(existing_cid) = existing_file_cids.get(&current_path) {
if existing_cid == new_cid {
-
// File unchanged, copy from existing directory
let existing_path = existing_output_dir.join(&current_path);
if existing_path.exists() {
-
std::fs::copy(&existing_path, &output_path).into_diagnostic()?;
-
*reused += 1;
-
println!(" ✓ Reused {}", current_path);
-
continue;
+
copy_tasks.push(CopyTask {
+
path: current_path.clone(),
+
from: existing_path,
+
to: output_path.clone(),
+
});
+
true
+
} else {
+
false
}
+
} else {
+
false
}
+
} else {
+
false
}
-
}
-
-
// File is new or changed, download it
-
println!(" ↓ Downloading {}", current_path);
-
let data = download::download_and_decompress_blob(
-
pds_url,
-
&file.blob,
-
did,
-
file.base64.unwrap_or(false),
-
file.encoding.as_ref().map(|e| e.as_str() == "gzip").unwrap_or(false),
-
)
-
.await?;
+
} else {
+
false
+
};
-
std::fs::write(&output_path, data).into_diagnostic()?;
-
*downloaded += 1;
+
if !should_copy {
+
use jacquard_common::IntoStatic;
+
// File needs to be downloaded
+
download_tasks.push(DownloadTask {
+
path: current_path,
+
output_path,
+
blob: file.blob.clone().into_static(),
+
base64: file.base64.unwrap_or(false),
+
gzip: file.encoding.as_ref().map(|e| e.as_str() == "gzip").unwrap_or(false),
+
});
+
}
}
EntryNode::Directory(subdir) => {
let subdir_path = output_dir.join(entry_name);
-
std::fs::create_dir_all(&subdir_path).into_diagnostic()?;
-
-
download_directory(
-
subdir,
-
&subdir_path,
-
pds_url,
-
did,
-
new_blob_map,
-
existing_file_cids,
-
existing_output_dir,
-
current_path,
-
downloaded,
-
reused,
-
)
-
.await?;
+
dir_tasks.push((subdir.as_ref().clone(), subdir_path, current_path));
+
}
+
EntryNode::Subfs(_) => {
+
println!(" ⚠ Skipping subfs node at {} (should have been expanded)", current_path);
}
EntryNode::Unknown(_) => {
-
// Skip unknown node types
println!(" ⚠ Skipping unknown node type for {}", current_path);
}
}
}
+
// Execute copy tasks (fast, do them all)
+
for task in copy_tasks {
+
std::fs::copy(&task.from, &task.to).into_diagnostic()?;
+
*reused += 1;
+
println!(" ✓ Reused {}", task.path);
+
}
+
+
// Execute download tasks with concurrency limit (20 concurrent downloads)
+
const DOWNLOAD_CONCURRENCY: usize = 20;
+
+
let pds_url_clone = pds_url.clone();
+
let did_str = did.to_string();
+
+
let download_results: Vec<miette::Result<(String, PathBuf, Vec<u8>)>> = stream::iter(download_tasks)
+
.map(|task| {
+
let pds = pds_url_clone.clone();
+
let did_copy = did_str.clone();
+
+
async move {
+
println!(" ↓ Downloading {}", task.path);
+
let data = download::download_and_decompress_blob(
+
&pds,
+
&task.blob,
+
&did_copy,
+
task.base64,
+
task.gzip,
+
)
+
.await?;
+
+
Ok::<_, miette::Report>((task.path, task.output_path, data))
+
}
+
})
+
.buffer_unordered(DOWNLOAD_CONCURRENCY)
+
.collect()
+
.await;
+
+
// Write downloaded files to disk
+
for result in download_results {
+
let (path, output_path, data) = result?;
+
std::fs::write(&output_path, data).into_diagnostic()?;
+
*downloaded += 1;
+
println!(" ✓ Downloaded {}", path);
+
}
+
+
// Recursively process directories
+
for (subdir, subdir_path, current_path) in dir_tasks {
+
std::fs::create_dir_all(&subdir_path).into_diagnostic()?;
+
+
download_directory(
+
&subdir,
+
&subdir_path,
+
pds_url,
+
did,
+
new_blob_map,
+
existing_file_cids,
+
existing_output_dir,
+
current_path,
+
downloaded,
+
reused,
+
)
+
.await?;
+
}
+
Ok(())
})
}
+
/// Expand subfs nodes in a directory tree by fetching and merging subfs records (RECURSIVELY)
+
async fn expand_subfs_in_pull<'a>(
+
directory: &Directory<'a>,
+
pds_url: &Url,
+
_did: &str,
+
) -> miette::Result<Directory<'static>> {
+
use crate::place_wisp::subfs::SubfsRecord;
+
use jacquard_common::types::value::from_data;
+
use jacquard_common::IntoStatic;
+
+
// Recursively fetch ALL subfs records (including nested ones)
+
let mut all_subfs_map: HashMap<String, crate::place_wisp::subfs::Directory> = HashMap::new();
+
let mut to_fetch = subfs_utils::extract_subfs_uris(directory, String::new());
+
+
if to_fetch.is_empty() {
+
return Ok((*directory).clone().into_static());
+
}
+
+
println!("Found {} subfs records, fetching recursively...", to_fetch.len());
+
let client = reqwest::Client::new();
+
+
// Keep fetching until we've resolved all subfs (including nested ones)
+
let mut iteration = 0;
+
const MAX_ITERATIONS: usize = 10; // Prevent infinite loops
+
+
while !to_fetch.is_empty() && iteration < MAX_ITERATIONS {
+
iteration += 1;
+
println!(" Iteration {}: fetching {} subfs records...", iteration, to_fetch.len());
+
+
let mut fetch_tasks = Vec::new();
+
+
for (uri, path) in to_fetch.clone() {
+
let client = client.clone();
+
let pds_url = pds_url.clone();
+
+
fetch_tasks.push(async move {
+
let parts: Vec<&str> = uri.trim_start_matches("at://").split('/').collect();
+
if parts.len() < 3 {
+
return Err(miette::miette!("Invalid subfs URI: {}", uri));
+
}
+
+
let _did = parts[0];
+
let collection = parts[1];
+
let rkey = parts[2];
+
+
if collection != "place.wisp.subfs" {
+
return Err(miette::miette!("Expected place.wisp.subfs collection, got: {}", collection));
+
}
+
+
use jacquard::api::com_atproto::repo::get_record::GetRecord;
+
use jacquard_common::types::string::Rkey as RkeyType;
+
use jacquard_common::types::ident::AtIdentifier;
+
use jacquard_common::types::string::{RecordKey, Did as DidType};
+
+
let rkey_parsed = RkeyType::new(rkey).into_diagnostic()?;
+
let did_parsed = DidType::new(_did).into_diagnostic()?;
+
+
let request = GetRecord::new()
+
.repo(AtIdentifier::Did(did_parsed))
+
.collection(CowStr::from("place.wisp.subfs"))
+
.rkey(RecordKey::from(rkey_parsed))
+
.build();
+
+
let response = client
+
.xrpc(pds_url)
+
.send(&request)
+
.await
+
.into_diagnostic()?;
+
+
let record_output = response.into_output().into_diagnostic()?;
+
let subfs_record: SubfsRecord = from_data(&record_output.value).into_diagnostic()?;
+
let subfs_record_static = subfs_record.into_static();
+
+
Ok::<_, miette::Report>((path, subfs_record_static))
+
});
+
}
+
+
let results: Vec<_> = futures::future::join_all(fetch_tasks).await;
+
+
// Process results and find nested subfs
+
let mut newly_fetched = Vec::new();
+
for result in results {
+
match result {
+
Ok((path, record)) => {
+
println!(" ✓ Fetched subfs at {}", path);
+
+
// Check for nested subfs in this record
+
let nested_subfs = extract_subfs_from_subfs_dir(&record.root, path.clone());
+
newly_fetched.extend(nested_subfs);
+
+
all_subfs_map.insert(path, record.root);
+
}
+
Err(e) => {
+
eprintln!(" ⚠️ Failed to fetch subfs: {}", e);
+
}
+
}
+
}
+
+
// Update to_fetch with only the NEW subfs we haven't fetched yet
+
to_fetch = newly_fetched
+
.into_iter()
+
.filter(|(uri, _)| !all_subfs_map.iter().any(|(k, _)| k == uri))
+
.collect();
+
}
+
+
if iteration >= MAX_ITERATIONS {
+
return Err(miette::miette!("Max iterations reached while fetching nested subfs"));
+
}
+
+
println!(" Total subfs records fetched: {}", all_subfs_map.len());
+
+
// Now replace all subfs nodes with their content
+
Ok(replace_subfs_with_content(directory.clone(), &all_subfs_map, String::new()))
+
}
+
+
/// Extract subfs URIs from a subfs::Directory
+
fn extract_subfs_from_subfs_dir(
+
directory: &crate::place_wisp::subfs::Directory,
+
current_path: String,
+
) -> Vec<(String, String)> {
+
let mut uris = Vec::new();
+
+
for entry in &directory.entries {
+
let full_path = if current_path.is_empty() {
+
entry.name.to_string()
+
} else {
+
format!("{}/{}", current_path, entry.name)
+
};
+
+
match &entry.node {
+
crate::place_wisp::subfs::EntryNode::Subfs(subfs_node) => {
+
uris.push((subfs_node.subject.to_string(), full_path.clone()));
+
}
+
crate::place_wisp::subfs::EntryNode::Directory(subdir) => {
+
let nested = extract_subfs_from_subfs_dir(subdir, full_path);
+
uris.extend(nested);
+
}
+
_ => {}
+
}
+
}
+
+
uris
+
}
+
+
/// Recursively replace subfs nodes with their actual content
+
fn replace_subfs_with_content(
+
directory: Directory,
+
subfs_map: &HashMap<String, crate::place_wisp::subfs::Directory>,
+
current_path: String,
+
) -> Directory<'static> {
+
use jacquard_common::IntoStatic;
+
+
let new_entries: Vec<Entry<'static>> = directory
+
.entries
+
.into_iter()
+
.flat_map(|entry| {
+
let full_path = if current_path.is_empty() {
+
entry.name.to_string()
+
} else {
+
format!("{}/{}", current_path, entry.name)
+
};
+
+
match entry.node {
+
EntryNode::Subfs(subfs_node) => {
+
// Check if we have this subfs record
+
if let Some(subfs_dir) = subfs_map.get(&full_path) {
+
let flat = subfs_node.flat.unwrap_or(true); // Default to flat merge
+
+
if flat {
+
// Flat merge: hoist subfs entries into parent
+
println!(" Merging subfs {} (flat)", full_path);
+
let converted_entries: Vec<Entry<'static>> = subfs_dir
+
.entries
+
.iter()
+
.map(|subfs_entry| convert_subfs_entry_to_fs(subfs_entry.clone().into_static()))
+
.collect();
+
+
converted_entries
+
} else {
+
// Nested: create a directory with the subfs name
+
println!(" Merging subfs {} (nested)", full_path);
+
let converted_entries: Vec<Entry<'static>> = subfs_dir
+
.entries
+
.iter()
+
.map(|subfs_entry| convert_subfs_entry_to_fs(subfs_entry.clone().into_static()))
+
.collect();
+
+
vec![Entry::new()
+
.name(entry.name.into_static())
+
.node(EntryNode::Directory(Box::new(
+
Directory::new()
+
.r#type(CowStr::from("directory"))
+
.entries(converted_entries)
+
.build()
+
)))
+
.build()]
+
}
+
} else {
+
// Subfs not found, skip with warning
+
eprintln!(" ⚠️ Subfs not found: {}", full_path);
+
vec![]
+
}
+
}
+
EntryNode::Directory(dir) => {
+
// Recursively process subdirectories
+
vec![Entry::new()
+
.name(entry.name.into_static())
+
.node(EntryNode::Directory(Box::new(
+
replace_subfs_with_content(*dir, subfs_map, full_path)
+
)))
+
.build()]
+
}
+
EntryNode::File(_) => {
+
vec![entry.into_static()]
+
}
+
EntryNode::Unknown(_) => {
+
vec![entry.into_static()]
+
}
+
}
+
})
+
.collect();
+
+
Directory::new()
+
.r#type(CowStr::from("directory"))
+
.entries(new_entries)
+
.build()
+
}
+
+
/// Convert a subfs entry to a fs entry (they have the same structure but different types)
+
fn convert_subfs_entry_to_fs(subfs_entry: crate::place_wisp::subfs::Entry<'static>) -> Entry<'static> {
+
use jacquard_common::IntoStatic;
+
+
let node = match subfs_entry.node {
+
crate::place_wisp::subfs::EntryNode::File(file) => {
+
EntryNode::File(Box::new(
+
File::new()
+
.r#type(file.r#type.into_static())
+
.blob(file.blob.into_static())
+
.encoding(file.encoding.map(|e| e.into_static()))
+
.mime_type(file.mime_type.map(|m| m.into_static()))
+
.base64(file.base64)
+
.build()
+
))
+
}
+
crate::place_wisp::subfs::EntryNode::Directory(dir) => {
+
let converted_entries: Vec<Entry<'static>> = dir
+
.entries
+
.into_iter()
+
.map(|e| convert_subfs_entry_to_fs(e.into_static()))
+
.collect();
+
+
EntryNode::Directory(Box::new(
+
Directory::new()
+
.r#type(dir.r#type.into_static())
+
.entries(converted_entries)
+
.build()
+
))
+
}
+
crate::place_wisp::subfs::EntryNode::Subfs(_nested_subfs) => {
+
// Nested subfs should have been expanded already - if we get here, it means expansion failed
+
// Treat it like a directory reference that should have been expanded
+
eprintln!(" ⚠️ Warning: unexpanded nested subfs at path, treating as empty directory");
+
EntryNode::Directory(Box::new(
+
Directory::new()
+
.r#type(CowStr::from("directory"))
+
.entries(vec![])
+
.build()
+
))
+
}
+
crate::place_wisp::subfs::EntryNode::Unknown(unknown) => {
+
EntryNode::Unknown(unknown)
+
}
+
};
+
+
Entry::new()
+
.name(subfs_entry.name.into_static())
+
.node(node)
+
.build()
+
}
+
+336
cli/src/subfs_utils.rs
···
+
use jacquard_common::types::string::AtUri;
+
use jacquard_common::types::blob::BlobRef;
+
use jacquard_common::IntoStatic;
+
use jacquard::client::{Agent, AgentSession, AgentSessionExt};
+
use jacquard::prelude::IdentityResolver;
+
use miette::IntoDiagnostic;
+
use std::collections::HashMap;
+
+
use crate::place_wisp::fs::{Directory as FsDirectory, EntryNode as FsEntryNode};
+
use crate::place_wisp::subfs::SubfsRecord;
+
+
/// Extract all subfs URIs from a directory tree with their mount paths
+
pub fn extract_subfs_uris(directory: &FsDirectory, current_path: String) -> Vec<(String, String)> {
+
let mut uris = Vec::new();
+
+
for entry in &directory.entries {
+
let full_path = if current_path.is_empty() {
+
entry.name.to_string()
+
} else {
+
format!("{}/{}", current_path, entry.name)
+
};
+
+
match &entry.node {
+
FsEntryNode::Subfs(subfs_node) => {
+
// Found a subfs node - store its URI and mount path
+
uris.push((subfs_node.subject.to_string(), full_path.clone()));
+
}
+
FsEntryNode::Directory(subdir) => {
+
// Recursively search subdirectories
+
let sub_uris = extract_subfs_uris(subdir, full_path);
+
uris.extend(sub_uris);
+
}
+
FsEntryNode::File(_) => {
+
// Files don't contain subfs
+
}
+
FsEntryNode::Unknown(_) => {
+
// Skip unknown nodes
+
}
+
}
+
}
+
+
uris
+
}
+
+
/// Fetch a subfs record from the PDS
+
pub async fn fetch_subfs_record(
+
agent: &Agent<impl AgentSession + IdentityResolver>,
+
uri: &str,
+
) -> miette::Result<SubfsRecord<'static>> {
+
// Parse URI: at://did/collection/rkey
+
let parts: Vec<&str> = uri.trim_start_matches("at://").split('/').collect();
+
+
if parts.len() < 3 {
+
return Err(miette::miette!("Invalid subfs URI: {}", uri));
+
}
+
+
let _did = parts[0];
+
let collection = parts[1];
+
let _rkey = parts[2];
+
+
if collection != "place.wisp.subfs" {
+
return Err(miette::miette!("Expected place.wisp.subfs collection, got: {}", collection));
+
}
+
+
// Construct AT-URI for fetching
+
let at_uri = AtUri::new(uri).into_diagnostic()?;
+
+
// Fetch the record
+
let response = agent.get_record::<SubfsRecord>(&at_uri).await.into_diagnostic()?;
+
let record_output = response.into_output().into_diagnostic()?;
+
+
Ok(record_output.value.into_static())
+
}
+
+
/// Merge blob maps from subfs records into the main blob map
+
/// Returns the total number of blobs merged from all subfs records
+
pub async fn merge_subfs_blob_maps(
+
agent: &Agent<impl AgentSession + IdentityResolver>,
+
subfs_uris: Vec<(String, String)>,
+
main_blob_map: &mut HashMap<String, (BlobRef<'static>, String)>,
+
) -> miette::Result<usize> {
+
let mut total_merged = 0;
+
+
println!("Fetching {} subfs records for blob reuse...", subfs_uris.len());
+
+
// Fetch all subfs records in parallel (but with some concurrency limit)
+
use futures::stream::{self, StreamExt};
+
+
let subfs_results: Vec<_> = stream::iter(subfs_uris)
+
.map(|(uri, mount_path)| async move {
+
match fetch_subfs_record(agent, &uri).await {
+
Ok(record) => Some((record, mount_path)),
+
Err(e) => {
+
eprintln!(" ⚠️ Failed to fetch subfs {}: {}", uri, e);
+
None
+
}
+
}
+
})
+
.buffer_unordered(5)
+
.collect()
+
.await;
+
+
// Convert subfs Directory to fs Directory for blob extraction
+
// Note: We need to extract blobs from the subfs record's root
+
for result in subfs_results {
+
if let Some((subfs_record, mount_path)) = result {
+
// Extract blobs from this subfs record's root
+
// The blob_map module works with fs::Directory, but subfs::Directory has the same structure
+
// We need to convert or work directly with the entries
+
+
let subfs_blob_map = extract_subfs_blobs(&subfs_record.root, mount_path.clone());
+
let count = subfs_blob_map.len();
+
+
for (path, blob_info) in subfs_blob_map {
+
main_blob_map.insert(path, blob_info);
+
}
+
+
total_merged += count;
+
println!(" ✓ Merged {} blobs from subfs at {}", count, mount_path);
+
}
+
}
+
+
Ok(total_merged)
+
}
+
+
/// Extract blobs from a subfs directory (works with subfs::Directory)
+
/// Returns a map of file paths to their blob refs and CIDs
+
fn extract_subfs_blobs(
+
directory: &crate::place_wisp::subfs::Directory,
+
current_path: String,
+
) -> HashMap<String, (BlobRef<'static>, String)> {
+
let mut blob_map = HashMap::new();
+
+
for entry in &directory.entries {
+
let full_path = if current_path.is_empty() {
+
entry.name.to_string()
+
} else {
+
format!("{}/{}", current_path, entry.name)
+
};
+
+
match &entry.node {
+
crate::place_wisp::subfs::EntryNode::File(file_node) => {
+
let blob_ref = &file_node.blob;
+
let cid_string = blob_ref.blob().r#ref.to_string();
+
blob_map.insert(
+
full_path,
+
(blob_ref.clone().into_static(), cid_string)
+
);
+
}
+
crate::place_wisp::subfs::EntryNode::Directory(subdir) => {
+
let sub_map = extract_subfs_blobs(subdir, full_path);
+
blob_map.extend(sub_map);
+
}
+
crate::place_wisp::subfs::EntryNode::Subfs(_nested_subfs) => {
+
// Nested subfs - these should be resolved recursively in the main flow
+
// For now, we skip them (they'll be fetched separately)
+
eprintln!(" ⚠️ Found nested subfs at {}, skipping (should be fetched separately)", full_path);
+
}
+
crate::place_wisp::subfs::EntryNode::Unknown(_) => {
+
// Skip unknown nodes
+
}
+
}
+
}
+
+
blob_map
+
}
+
+
/// Count total files in a directory tree
+
pub fn count_files_in_directory(directory: &FsDirectory) -> usize {
+
let mut count = 0;
+
+
for entry in &directory.entries {
+
match &entry.node {
+
FsEntryNode::File(_) => count += 1,
+
FsEntryNode::Directory(subdir) => {
+
count += count_files_in_directory(subdir);
+
}
+
FsEntryNode::Subfs(_) => {
+
// Subfs nodes don't count towards the main manifest file count
+
}
+
FsEntryNode::Unknown(_) => {}
+
}
+
}
+
+
count
+
}
+
+
/// Estimate JSON size of a directory tree
+
pub fn estimate_directory_size(directory: &FsDirectory) -> usize {
+
// Serialize to JSON and measure
+
match serde_json::to_string(directory) {
+
Ok(json) => json.len(),
+
Err(_) => 0,
+
}
+
}
+
+
/// Information about a directory that could be split into a subfs record
+
#[derive(Debug)]
+
pub struct SplittableDirectory {
+
pub path: String,
+
pub directory: FsDirectory<'static>,
+
pub size: usize,
+
pub file_count: usize,
+
}
+
+
/// Find large directories that could be split into subfs records
+
/// Returns directories sorted by size (largest first)
+
pub fn find_large_directories(directory: &FsDirectory, current_path: String) -> Vec<SplittableDirectory> {
+
let mut result = Vec::new();
+
+
for entry in &directory.entries {
+
if let FsEntryNode::Directory(subdir) = &entry.node {
+
let dir_path = if current_path.is_empty() {
+
entry.name.to_string()
+
} else {
+
format!("{}/{}", current_path, entry.name)
+
};
+
+
let size = estimate_directory_size(subdir);
+
let file_count = count_files_in_directory(subdir);
+
+
result.push(SplittableDirectory {
+
path: dir_path.clone(),
+
directory: (*subdir.clone()).into_static(),
+
size,
+
file_count,
+
});
+
+
// Recursively find subdirectories
+
let subdirs = find_large_directories(subdir, dir_path);
+
result.extend(subdirs);
+
}
+
}
+
+
// Sort by size (largest first)
+
result.sort_by(|a, b| b.size.cmp(&a.size));
+
+
result
+
}
+
+
/// Replace a directory with a subfs node in the tree
+
pub fn replace_directory_with_subfs(
+
directory: FsDirectory<'static>,
+
target_path: &str,
+
subfs_uri: &str,
+
flat: bool,
+
) -> miette::Result<FsDirectory<'static>> {
+
use jacquard_common::CowStr;
+
use crate::place_wisp::fs::{Entry, Subfs};
+
+
let path_parts: Vec<&str> = target_path.split('/').collect();
+
+
if path_parts.is_empty() {
+
return Err(miette::miette!("Cannot replace root directory"));
+
}
+
+
// Parse the subfs URI and make it owned/'static
+
let at_uri = AtUri::new_cow(jacquard_common::CowStr::from(subfs_uri.to_string())).into_diagnostic()?;
+
+
// If this is a root-level directory
+
if path_parts.len() == 1 {
+
let target_name = path_parts[0];
+
let new_entries: Vec<Entry> = directory.entries.into_iter().map(|entry| {
+
if entry.name == target_name {
+
// Replace this directory with a subfs node
+
Entry::new()
+
.name(entry.name)
+
.node(FsEntryNode::Subfs(Box::new(
+
Subfs::new()
+
.r#type(CowStr::from("subfs"))
+
.subject(at_uri.clone())
+
.flat(Some(flat))
+
.build()
+
)))
+
.build()
+
} else {
+
entry
+
}
+
}).collect();
+
+
return Ok(FsDirectory::new()
+
.r#type(CowStr::from("directory"))
+
.entries(new_entries)
+
.build());
+
}
+
+
// Recursively navigate to parent directory
+
let first_part = path_parts[0];
+
let remaining_path = path_parts[1..].join("/");
+
+
let new_entries: Vec<Entry> = directory.entries.into_iter().filter_map(|entry| {
+
if entry.name == first_part {
+
if let FsEntryNode::Directory(subdir) = entry.node {
+
// Recursively process this subdirectory
+
match replace_directory_with_subfs((*subdir).into_static(), &remaining_path, subfs_uri, flat) {
+
Ok(updated_subdir) => {
+
Some(Entry::new()
+
.name(entry.name)
+
.node(FsEntryNode::Directory(Box::new(updated_subdir)))
+
.build())
+
}
+
Err(_) => None, // Skip entries that fail to update
+
}
+
} else {
+
Some(entry)
+
}
+
} else {
+
Some(entry)
+
}
+
}).collect();
+
+
Ok(FsDirectory::new()
+
.r#type(CowStr::from("directory"))
+
.entries(new_entries)
+
.build())
+
}
+
+
/// Delete a subfs record from the PDS
+
pub async fn delete_subfs_record(
+
agent: &Agent<impl AgentSession + IdentityResolver>,
+
uri: &str,
+
) -> miette::Result<()> {
+
use jacquard_common::types::uri::RecordUri;
+
+
// Construct AT-URI and convert to RecordUri
+
let at_uri = AtUri::new(uri).into_diagnostic()?;
+
let record_uri: RecordUri<'_, crate::place_wisp::subfs::SubfsRecordRecord> = RecordUri::try_from_uri(at_uri).into_diagnostic()?;
+
+
let rkey = record_uri.rkey()
+
.ok_or_else(|| miette::miette!("Invalid subfs URI: missing rkey"))?
+
.clone();
+
+
agent.delete_record::<SubfsRecord>(rkey).await.into_diagnostic()?;
+
+
Ok(())
+
}